2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/net_tstamp.h>
26 #include "enic_clsf.h"
28 #include "vnic_stats.h"
31 char name
[ETH_GSTRING_LEN
];
35 #define ENIC_TX_STAT(stat) { \
37 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
40 #define ENIC_RX_STAT(stat) { \
42 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
45 #define ENIC_GEN_STAT(stat) { \
47 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
50 static const struct enic_stat enic_tx_stats
[] = {
51 ENIC_TX_STAT(tx_frames_ok
),
52 ENIC_TX_STAT(tx_unicast_frames_ok
),
53 ENIC_TX_STAT(tx_multicast_frames_ok
),
54 ENIC_TX_STAT(tx_broadcast_frames_ok
),
55 ENIC_TX_STAT(tx_bytes_ok
),
56 ENIC_TX_STAT(tx_unicast_bytes_ok
),
57 ENIC_TX_STAT(tx_multicast_bytes_ok
),
58 ENIC_TX_STAT(tx_broadcast_bytes_ok
),
59 ENIC_TX_STAT(tx_drops
),
60 ENIC_TX_STAT(tx_errors
),
64 static const struct enic_stat enic_rx_stats
[] = {
65 ENIC_RX_STAT(rx_frames_ok
),
66 ENIC_RX_STAT(rx_frames_total
),
67 ENIC_RX_STAT(rx_unicast_frames_ok
),
68 ENIC_RX_STAT(rx_multicast_frames_ok
),
69 ENIC_RX_STAT(rx_broadcast_frames_ok
),
70 ENIC_RX_STAT(rx_bytes_ok
),
71 ENIC_RX_STAT(rx_unicast_bytes_ok
),
72 ENIC_RX_STAT(rx_multicast_bytes_ok
),
73 ENIC_RX_STAT(rx_broadcast_bytes_ok
),
74 ENIC_RX_STAT(rx_drop
),
75 ENIC_RX_STAT(rx_no_bufs
),
76 ENIC_RX_STAT(rx_errors
),
78 ENIC_RX_STAT(rx_crc_errors
),
79 ENIC_RX_STAT(rx_frames_64
),
80 ENIC_RX_STAT(rx_frames_127
),
81 ENIC_RX_STAT(rx_frames_255
),
82 ENIC_RX_STAT(rx_frames_511
),
83 ENIC_RX_STAT(rx_frames_1023
),
84 ENIC_RX_STAT(rx_frames_1518
),
85 ENIC_RX_STAT(rx_frames_to_max
),
88 static const struct enic_stat enic_gen_stats
[] = {
89 ENIC_GEN_STAT(dma_map_error
),
92 static const unsigned int enic_n_tx_stats
= ARRAY_SIZE(enic_tx_stats
);
93 static const unsigned int enic_n_rx_stats
= ARRAY_SIZE(enic_rx_stats
);
94 static const unsigned int enic_n_gen_stats
= ARRAY_SIZE(enic_gen_stats
);
96 static void enic_intr_coal_set_rx(struct enic
*enic
, u32 timer
)
101 for (i
= 0; i
< enic
->rq_count
; i
++) {
102 intr
= enic_msix_rq_intr(enic
, i
);
103 vnic_intr_coalescing_timer_set(&enic
->intr
[intr
], timer
);
107 static int enic_get_ksettings(struct net_device
*netdev
,
108 struct ethtool_link_ksettings
*ecmd
)
110 struct enic
*enic
= netdev_priv(netdev
);
111 struct ethtool_link_settings
*base
= &ecmd
->base
;
113 ethtool_link_ksettings_add_link_mode(ecmd
, supported
,
115 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, FIBRE
);
116 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
,
118 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
, FIBRE
);
119 base
->port
= PORT_FIBRE
;
121 if (netif_carrier_ok(netdev
)) {
122 base
->speed
= vnic_dev_port_speed(enic
->vdev
);
123 base
->duplex
= DUPLEX_FULL
;
125 base
->speed
= SPEED_UNKNOWN
;
126 base
->duplex
= DUPLEX_UNKNOWN
;
129 base
->autoneg
= AUTONEG_DISABLE
;
134 static void enic_get_drvinfo(struct net_device
*netdev
,
135 struct ethtool_drvinfo
*drvinfo
)
137 struct enic
*enic
= netdev_priv(netdev
);
138 struct vnic_devcmd_fw_info
*fw_info
;
141 err
= enic_dev_fw_info(enic
, &fw_info
);
142 /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
143 * For other failures, like devcmd failure, we return previously
149 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
150 strlcpy(drvinfo
->fw_version
, fw_info
->fw_version
,
151 sizeof(drvinfo
->fw_version
));
152 strlcpy(drvinfo
->bus_info
, pci_name(enic
->pdev
),
153 sizeof(drvinfo
->bus_info
));
156 static void enic_get_strings(struct net_device
*netdev
, u32 stringset
,
163 for (i
= 0; i
< enic_n_tx_stats
; i
++) {
164 memcpy(data
, enic_tx_stats
[i
].name
, ETH_GSTRING_LEN
);
165 data
+= ETH_GSTRING_LEN
;
167 for (i
= 0; i
< enic_n_rx_stats
; i
++) {
168 memcpy(data
, enic_rx_stats
[i
].name
, ETH_GSTRING_LEN
);
169 data
+= ETH_GSTRING_LEN
;
171 for (i
= 0; i
< enic_n_gen_stats
; i
++) {
172 memcpy(data
, enic_gen_stats
[i
].name
, ETH_GSTRING_LEN
);
173 data
+= ETH_GSTRING_LEN
;
179 static void enic_get_ringparam(struct net_device
*netdev
,
180 struct ethtool_ringparam
*ring
)
182 struct enic
*enic
= netdev_priv(netdev
);
183 struct vnic_enet_config
*c
= &enic
->config
;
185 ring
->rx_max_pending
= ENIC_MAX_RQ_DESCS
;
186 ring
->rx_pending
= c
->rq_desc_count
;
187 ring
->tx_max_pending
= ENIC_MAX_WQ_DESCS
;
188 ring
->tx_pending
= c
->wq_desc_count
;
191 static int enic_set_ringparam(struct net_device
*netdev
,
192 struct ethtool_ringparam
*ring
)
194 struct enic
*enic
= netdev_priv(netdev
);
195 struct vnic_enet_config
*c
= &enic
->config
;
196 int running
= netif_running(netdev
);
197 unsigned int rx_pending
;
198 unsigned int tx_pending
;
201 if (ring
->rx_mini_max_pending
|| ring
->rx_mini_pending
) {
203 "modifying mini ring params is not supported");
206 if (ring
->rx_jumbo_max_pending
|| ring
->rx_jumbo_pending
) {
208 "modifying jumbo ring params is not supported");
211 rx_pending
= c
->rq_desc_count
;
212 tx_pending
= c
->wq_desc_count
;
213 if (ring
->rx_pending
> ENIC_MAX_RQ_DESCS
||
214 ring
->rx_pending
< ENIC_MIN_RQ_DESCS
) {
215 netdev_info(netdev
, "rx pending (%u) not in range [%u,%u]",
216 ring
->rx_pending
, ENIC_MIN_RQ_DESCS
,
220 if (ring
->tx_pending
> ENIC_MAX_WQ_DESCS
||
221 ring
->tx_pending
< ENIC_MIN_WQ_DESCS
) {
222 netdev_info(netdev
, "tx pending (%u) not in range [%u,%u]",
223 ring
->tx_pending
, ENIC_MIN_WQ_DESCS
,
230 ring
->rx_pending
& 0xffffffe0; /* must be aligned to groups of 32 */
232 ring
->tx_pending
& 0xffffffe0; /* must be aligned to groups of 32 */
233 enic_free_vnic_resources(enic
);
234 err
= enic_alloc_vnic_resources(enic
);
237 "Failed to alloc vNIC resources, aborting\n");
238 enic_free_vnic_resources(enic
);
241 enic_init_vnic_resources(enic
);
243 err
= dev_open(netdev
, NULL
);
249 c
->rq_desc_count
= rx_pending
;
250 c
->wq_desc_count
= tx_pending
;
254 static int enic_get_sset_count(struct net_device
*netdev
, int sset
)
258 return enic_n_tx_stats
+ enic_n_rx_stats
+ enic_n_gen_stats
;
264 static void enic_get_ethtool_stats(struct net_device
*netdev
,
265 struct ethtool_stats
*stats
, u64
*data
)
267 struct enic
*enic
= netdev_priv(netdev
);
268 struct vnic_stats
*vstats
;
272 err
= enic_dev_stats_dump(enic
, &vstats
);
273 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
274 * For other failures, like devcmd failure, we return previously
280 for (i
= 0; i
< enic_n_tx_stats
; i
++)
281 *(data
++) = ((u64
*)&vstats
->tx
)[enic_tx_stats
[i
].index
];
282 for (i
= 0; i
< enic_n_rx_stats
; i
++)
283 *(data
++) = ((u64
*)&vstats
->rx
)[enic_rx_stats
[i
].index
];
284 for (i
= 0; i
< enic_n_gen_stats
; i
++)
285 *(data
++) = ((u64
*)&enic
->gen_stats
)[enic_gen_stats
[i
].index
];
288 static u32
enic_get_msglevel(struct net_device
*netdev
)
290 struct enic
*enic
= netdev_priv(netdev
);
291 return enic
->msg_enable
;
294 static void enic_set_msglevel(struct net_device
*netdev
, u32 value
)
296 struct enic
*enic
= netdev_priv(netdev
);
297 enic
->msg_enable
= value
;
300 static int enic_get_coalesce(struct net_device
*netdev
,
301 struct ethtool_coalesce
*ecmd
)
303 struct enic
*enic
= netdev_priv(netdev
);
304 struct enic_rx_coal
*rxcoal
= &enic
->rx_coalesce_setting
;
306 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
)
307 ecmd
->tx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
308 ecmd
->rx_coalesce_usecs
= enic
->rx_coalesce_usecs
;
309 if (rxcoal
->use_adaptive_rx_coalesce
)
310 ecmd
->use_adaptive_rx_coalesce
= 1;
311 ecmd
->rx_coalesce_usecs_low
= rxcoal
->small_pkt_range_start
;
312 ecmd
->rx_coalesce_usecs_high
= rxcoal
->range_end
;
317 static int enic_coalesce_valid(struct enic
*enic
,
318 struct ethtool_coalesce
*ec
)
320 u32 coalesce_usecs_max
= vnic_dev_get_intr_coal_timer_max(enic
->vdev
);
321 u32 rx_coalesce_usecs_high
= min_t(u32
, coalesce_usecs_max
,
322 ec
->rx_coalesce_usecs_high
);
323 u32 rx_coalesce_usecs_low
= min_t(u32
, coalesce_usecs_max
,
324 ec
->rx_coalesce_usecs_low
);
326 if ((vnic_dev_get_intr_mode(enic
->vdev
) != VNIC_DEV_INTR_MODE_MSIX
) &&
327 ec
->tx_coalesce_usecs
)
330 if ((ec
->tx_coalesce_usecs
> coalesce_usecs_max
) ||
331 (ec
->rx_coalesce_usecs
> coalesce_usecs_max
) ||
332 (ec
->rx_coalesce_usecs_low
> coalesce_usecs_max
) ||
333 (ec
->rx_coalesce_usecs_high
> coalesce_usecs_max
))
334 netdev_info(enic
->netdev
, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
337 if (ec
->rx_coalesce_usecs_high
&&
338 (rx_coalesce_usecs_high
<
339 rx_coalesce_usecs_low
+ ENIC_AIC_LARGE_PKT_DIFF
))
345 static int enic_set_coalesce(struct net_device
*netdev
,
346 struct ethtool_coalesce
*ecmd
)
348 struct enic
*enic
= netdev_priv(netdev
);
349 u32 tx_coalesce_usecs
;
350 u32 rx_coalesce_usecs
;
351 u32 rx_coalesce_usecs_low
;
352 u32 rx_coalesce_usecs_high
;
353 u32 coalesce_usecs_max
;
354 unsigned int i
, intr
;
356 struct enic_rx_coal
*rxcoal
= &enic
->rx_coalesce_setting
;
358 ret
= enic_coalesce_valid(enic
, ecmd
);
361 coalesce_usecs_max
= vnic_dev_get_intr_coal_timer_max(enic
->vdev
);
362 tx_coalesce_usecs
= min_t(u32
, ecmd
->tx_coalesce_usecs
,
364 rx_coalesce_usecs
= min_t(u32
, ecmd
->rx_coalesce_usecs
,
367 rx_coalesce_usecs_low
= min_t(u32
, ecmd
->rx_coalesce_usecs_low
,
369 rx_coalesce_usecs_high
= min_t(u32
, ecmd
->rx_coalesce_usecs_high
,
372 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
) {
373 for (i
= 0; i
< enic
->wq_count
; i
++) {
374 intr
= enic_msix_wq_intr(enic
, i
);
375 vnic_intr_coalescing_timer_set(&enic
->intr
[intr
],
378 enic
->tx_coalesce_usecs
= tx_coalesce_usecs
;
380 rxcoal
->use_adaptive_rx_coalesce
= !!ecmd
->use_adaptive_rx_coalesce
;
381 if (!rxcoal
->use_adaptive_rx_coalesce
)
382 enic_intr_coal_set_rx(enic
, rx_coalesce_usecs
);
383 if (ecmd
->rx_coalesce_usecs_high
) {
384 rxcoal
->range_end
= rx_coalesce_usecs_high
;
385 rxcoal
->small_pkt_range_start
= rx_coalesce_usecs_low
;
386 rxcoal
->large_pkt_range_start
= rx_coalesce_usecs_low
+
387 ENIC_AIC_LARGE_PKT_DIFF
;
390 enic
->rx_coalesce_usecs
= rx_coalesce_usecs
;
395 static int enic_grxclsrlall(struct enic
*enic
, struct ethtool_rxnfc
*cmd
,
398 int j
, ret
= 0, cnt
= 0;
400 cmd
->data
= enic
->rfs_h
.max
- enic
->rfs_h
.free
;
401 for (j
= 0; j
< (1 << ENIC_RFS_FLW_BITSHIFT
); j
++) {
402 struct hlist_head
*hhead
;
403 struct hlist_node
*tmp
;
404 struct enic_rfs_fltr_node
*n
;
406 hhead
= &enic
->rfs_h
.ht_head
[j
];
407 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
) {
408 if (cnt
== cmd
->rule_cnt
)
410 rule_locs
[cnt
] = n
->fltr_id
;
419 static int enic_grxclsrule(struct enic
*enic
, struct ethtool_rxnfc
*cmd
)
421 struct ethtool_rx_flow_spec
*fsp
=
422 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
423 struct enic_rfs_fltr_node
*n
;
425 n
= htbl_fltr_search(enic
, (u16
)fsp
->location
);
428 switch (n
->keys
.basic
.ip_proto
) {
430 fsp
->flow_type
= TCP_V4_FLOW
;
433 fsp
->flow_type
= UDP_V4_FLOW
;
439 fsp
->h_u
.tcp_ip4_spec
.ip4src
= flow_get_u32_src(&n
->keys
);
440 fsp
->m_u
.tcp_ip4_spec
.ip4src
= (__u32
)~0;
442 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= flow_get_u32_dst(&n
->keys
);
443 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= (__u32
)~0;
445 fsp
->h_u
.tcp_ip4_spec
.psrc
= n
->keys
.ports
.src
;
446 fsp
->m_u
.tcp_ip4_spec
.psrc
= (__u16
)~0;
448 fsp
->h_u
.tcp_ip4_spec
.pdst
= n
->keys
.ports
.dst
;
449 fsp
->m_u
.tcp_ip4_spec
.pdst
= (__u16
)~0;
451 fsp
->ring_cookie
= n
->rq_id
;
456 static int enic_get_rx_flow_hash(struct enic
*enic
, struct ethtool_rxnfc
*cmd
)
458 u8 rss_hash_type
= 0;
461 spin_lock_bh(&enic
->devcmd_lock
);
462 (void)vnic_dev_capable_rss_hash_type(enic
->vdev
, &rss_hash_type
);
463 spin_unlock_bh(&enic
->devcmd_lock
);
464 switch (cmd
->flow_type
) {
467 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
|
468 RXH_IP_SRC
| RXH_IP_DST
;
471 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
472 if (rss_hash_type
& NIC_CFG_RSS_HASH_TYPE_UDP_IPV6
)
473 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
476 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
477 if (rss_hash_type
& NIC_CFG_RSS_HASH_TYPE_UDP_IPV4
)
478 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
490 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
499 static int enic_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
502 struct enic
*enic
= netdev_priv(dev
);
506 case ETHTOOL_GRXRINGS
:
507 cmd
->data
= enic
->rq_count
;
509 case ETHTOOL_GRXCLSRLCNT
:
510 spin_lock_bh(&enic
->rfs_h
.lock
);
511 cmd
->rule_cnt
= enic
->rfs_h
.max
- enic
->rfs_h
.free
;
512 cmd
->data
= enic
->rfs_h
.max
;
513 spin_unlock_bh(&enic
->rfs_h
.lock
);
515 case ETHTOOL_GRXCLSRLALL
:
516 spin_lock_bh(&enic
->rfs_h
.lock
);
517 ret
= enic_grxclsrlall(enic
, cmd
, rule_locs
);
518 spin_unlock_bh(&enic
->rfs_h
.lock
);
520 case ETHTOOL_GRXCLSRULE
:
521 spin_lock_bh(&enic
->rfs_h
.lock
);
522 ret
= enic_grxclsrule(enic
, cmd
);
523 spin_unlock_bh(&enic
->rfs_h
.lock
);
526 ret
= enic_get_rx_flow_hash(enic
, cmd
);
536 static int enic_get_tunable(struct net_device
*dev
,
537 const struct ethtool_tunable
*tuna
, void *data
)
539 struct enic
*enic
= netdev_priv(dev
);
543 case ETHTOOL_RX_COPYBREAK
:
544 *(u32
*)data
= enic
->rx_copybreak
;
554 static int enic_set_tunable(struct net_device
*dev
,
555 const struct ethtool_tunable
*tuna
,
558 struct enic
*enic
= netdev_priv(dev
);
562 case ETHTOOL_RX_COPYBREAK
:
563 enic
->rx_copybreak
= *(u32
*)data
;
573 static u32
enic_get_rxfh_key_size(struct net_device
*netdev
)
578 static int enic_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*hkey
,
581 struct enic
*enic
= netdev_priv(netdev
);
584 memcpy(hkey
, enic
->rss_key
, ENIC_RSS_LEN
);
587 *hfunc
= ETH_RSS_HASH_TOP
;
592 static int enic_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
593 const u8
*hkey
, const u8 hfunc
)
595 struct enic
*enic
= netdev_priv(netdev
);
597 if ((hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
) ||
602 memcpy(enic
->rss_key
, hkey
, ENIC_RSS_LEN
);
604 return __enic_set_rsskey(enic
);
607 static int enic_get_ts_info(struct net_device
*netdev
,
608 struct ethtool_ts_info
*info
)
610 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
611 SOF_TIMESTAMPING_RX_SOFTWARE
|
612 SOF_TIMESTAMPING_SOFTWARE
;
617 static const struct ethtool_ops enic_ethtool_ops
= {
618 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
619 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
|
620 ETHTOOL_COALESCE_RX_USECS_LOW
|
621 ETHTOOL_COALESCE_RX_USECS_HIGH
,
622 .get_drvinfo
= enic_get_drvinfo
,
623 .get_msglevel
= enic_get_msglevel
,
624 .set_msglevel
= enic_set_msglevel
,
625 .get_link
= ethtool_op_get_link
,
626 .get_strings
= enic_get_strings
,
627 .get_ringparam
= enic_get_ringparam
,
628 .set_ringparam
= enic_set_ringparam
,
629 .get_sset_count
= enic_get_sset_count
,
630 .get_ethtool_stats
= enic_get_ethtool_stats
,
631 .get_coalesce
= enic_get_coalesce
,
632 .set_coalesce
= enic_set_coalesce
,
633 .get_rxnfc
= enic_get_rxnfc
,
634 .get_tunable
= enic_get_tunable
,
635 .set_tunable
= enic_set_tunable
,
636 .get_rxfh_key_size
= enic_get_rxfh_key_size
,
637 .get_rxfh
= enic_get_rxfh
,
638 .set_rxfh
= enic_set_rxfh
,
639 .get_link_ksettings
= enic_get_ksettings
,
640 .get_ts_info
= enic_get_ts_info
,
643 void enic_set_ethtool_ops(struct net_device
*netdev
)
645 netdev
->ethtool_ops
= &enic_ethtool_ops
;