2 * Copyright 2013 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/net_tstamp.h>
26 #include "enic_clsf.h"
28 #include "vnic_stats.h"
31 char name
[ETH_GSTRING_LEN
];
35 #define ENIC_TX_STAT(stat) { \
37 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
40 #define ENIC_RX_STAT(stat) { \
42 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
45 #define ENIC_GEN_STAT(stat) { \
47 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
50 static const struct enic_stat enic_tx_stats
[] = {
51 ENIC_TX_STAT(tx_frames_ok
),
52 ENIC_TX_STAT(tx_unicast_frames_ok
),
53 ENIC_TX_STAT(tx_multicast_frames_ok
),
54 ENIC_TX_STAT(tx_broadcast_frames_ok
),
55 ENIC_TX_STAT(tx_bytes_ok
),
56 ENIC_TX_STAT(tx_unicast_bytes_ok
),
57 ENIC_TX_STAT(tx_multicast_bytes_ok
),
58 ENIC_TX_STAT(tx_broadcast_bytes_ok
),
59 ENIC_TX_STAT(tx_drops
),
60 ENIC_TX_STAT(tx_errors
),
64 static const struct enic_stat enic_rx_stats
[] = {
65 ENIC_RX_STAT(rx_frames_ok
),
66 ENIC_RX_STAT(rx_frames_total
),
67 ENIC_RX_STAT(rx_unicast_frames_ok
),
68 ENIC_RX_STAT(rx_multicast_frames_ok
),
69 ENIC_RX_STAT(rx_broadcast_frames_ok
),
70 ENIC_RX_STAT(rx_bytes_ok
),
71 ENIC_RX_STAT(rx_unicast_bytes_ok
),
72 ENIC_RX_STAT(rx_multicast_bytes_ok
),
73 ENIC_RX_STAT(rx_broadcast_bytes_ok
),
74 ENIC_RX_STAT(rx_drop
),
75 ENIC_RX_STAT(rx_no_bufs
),
76 ENIC_RX_STAT(rx_errors
),
78 ENIC_RX_STAT(rx_crc_errors
),
79 ENIC_RX_STAT(rx_frames_64
),
80 ENIC_RX_STAT(rx_frames_127
),
81 ENIC_RX_STAT(rx_frames_255
),
82 ENIC_RX_STAT(rx_frames_511
),
83 ENIC_RX_STAT(rx_frames_1023
),
84 ENIC_RX_STAT(rx_frames_1518
),
85 ENIC_RX_STAT(rx_frames_to_max
),
88 static const struct enic_stat enic_gen_stats
[] = {
89 ENIC_GEN_STAT(dma_map_error
),
92 static const unsigned int enic_n_tx_stats
= ARRAY_SIZE(enic_tx_stats
);
93 static const unsigned int enic_n_rx_stats
= ARRAY_SIZE(enic_rx_stats
);
94 static const unsigned int enic_n_gen_stats
= ARRAY_SIZE(enic_gen_stats
);
96 static void enic_intr_coal_set_rx(struct enic
*enic
, u32 timer
)
101 for (i
= 0; i
< enic
->rq_count
; i
++) {
102 intr
= enic_msix_rq_intr(enic
, i
);
103 vnic_intr_coalescing_timer_set(&enic
->intr
[intr
], timer
);
107 static int enic_get_ksettings(struct net_device
*netdev
,
108 struct ethtool_link_ksettings
*ecmd
)
110 struct enic
*enic
= netdev_priv(netdev
);
111 struct ethtool_link_settings
*base
= &ecmd
->base
;
113 ethtool_link_ksettings_add_link_mode(ecmd
, supported
,
115 ethtool_link_ksettings_add_link_mode(ecmd
, supported
, FIBRE
);
116 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
,
118 ethtool_link_ksettings_add_link_mode(ecmd
, advertising
, FIBRE
);
119 base
->port
= PORT_FIBRE
;
121 if (netif_carrier_ok(netdev
)) {
122 base
->speed
= vnic_dev_port_speed(enic
->vdev
);
123 base
->duplex
= DUPLEX_FULL
;
125 base
->speed
= SPEED_UNKNOWN
;
126 base
->duplex
= DUPLEX_UNKNOWN
;
129 base
->autoneg
= AUTONEG_DISABLE
;
134 static void enic_get_drvinfo(struct net_device
*netdev
,
135 struct ethtool_drvinfo
*drvinfo
)
137 struct enic
*enic
= netdev_priv(netdev
);
138 struct vnic_devcmd_fw_info
*fw_info
;
141 err
= enic_dev_fw_info(enic
, &fw_info
);
142 /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
143 * For other failures, like devcmd failure, we return previously
149 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
150 strlcpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
151 strlcpy(drvinfo
->fw_version
, fw_info
->fw_version
,
152 sizeof(drvinfo
->fw_version
));
153 strlcpy(drvinfo
->bus_info
, pci_name(enic
->pdev
),
154 sizeof(drvinfo
->bus_info
));
157 static void enic_get_strings(struct net_device
*netdev
, u32 stringset
,
164 for (i
= 0; i
< enic_n_tx_stats
; i
++) {
165 memcpy(data
, enic_tx_stats
[i
].name
, ETH_GSTRING_LEN
);
166 data
+= ETH_GSTRING_LEN
;
168 for (i
= 0; i
< enic_n_rx_stats
; i
++) {
169 memcpy(data
, enic_rx_stats
[i
].name
, ETH_GSTRING_LEN
);
170 data
+= ETH_GSTRING_LEN
;
172 for (i
= 0; i
< enic_n_gen_stats
; i
++) {
173 memcpy(data
, enic_gen_stats
[i
].name
, ETH_GSTRING_LEN
);
174 data
+= ETH_GSTRING_LEN
;
180 static void enic_get_ringparam(struct net_device
*netdev
,
181 struct ethtool_ringparam
*ring
)
183 struct enic
*enic
= netdev_priv(netdev
);
184 struct vnic_enet_config
*c
= &enic
->config
;
186 ring
->rx_max_pending
= ENIC_MAX_RQ_DESCS
;
187 ring
->rx_pending
= c
->rq_desc_count
;
188 ring
->tx_max_pending
= ENIC_MAX_WQ_DESCS
;
189 ring
->tx_pending
= c
->wq_desc_count
;
192 static int enic_set_ringparam(struct net_device
*netdev
,
193 struct ethtool_ringparam
*ring
)
195 struct enic
*enic
= netdev_priv(netdev
);
196 struct vnic_enet_config
*c
= &enic
->config
;
197 int running
= netif_running(netdev
);
198 unsigned int rx_pending
;
199 unsigned int tx_pending
;
202 if (ring
->rx_mini_max_pending
|| ring
->rx_mini_pending
) {
204 "modifying mini ring params is not supported");
207 if (ring
->rx_jumbo_max_pending
|| ring
->rx_jumbo_pending
) {
209 "modifying jumbo ring params is not supported");
212 rx_pending
= c
->rq_desc_count
;
213 tx_pending
= c
->wq_desc_count
;
214 if (ring
->rx_pending
> ENIC_MAX_RQ_DESCS
||
215 ring
->rx_pending
< ENIC_MIN_RQ_DESCS
) {
216 netdev_info(netdev
, "rx pending (%u) not in range [%u,%u]",
217 ring
->rx_pending
, ENIC_MIN_RQ_DESCS
,
221 if (ring
->tx_pending
> ENIC_MAX_WQ_DESCS
||
222 ring
->tx_pending
< ENIC_MIN_WQ_DESCS
) {
223 netdev_info(netdev
, "tx pending (%u) not in range [%u,%u]",
224 ring
->tx_pending
, ENIC_MIN_WQ_DESCS
,
231 ring
->rx_pending
& 0xffffffe0; /* must be aligned to groups of 32 */
233 ring
->tx_pending
& 0xffffffe0; /* must be aligned to groups of 32 */
234 enic_free_vnic_resources(enic
);
235 err
= enic_alloc_vnic_resources(enic
);
238 "Failed to alloc vNIC resources, aborting\n");
239 enic_free_vnic_resources(enic
);
242 enic_init_vnic_resources(enic
);
244 err
= dev_open(netdev
);
250 c
->rq_desc_count
= rx_pending
;
251 c
->wq_desc_count
= tx_pending
;
255 static int enic_get_sset_count(struct net_device
*netdev
, int sset
)
259 return enic_n_tx_stats
+ enic_n_rx_stats
+ enic_n_gen_stats
;
265 static void enic_get_ethtool_stats(struct net_device
*netdev
,
266 struct ethtool_stats
*stats
, u64
*data
)
268 struct enic
*enic
= netdev_priv(netdev
);
269 struct vnic_stats
*vstats
;
273 err
= enic_dev_stats_dump(enic
, &vstats
);
274 /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
275 * For other failures, like devcmd failure, we return previously
281 for (i
= 0; i
< enic_n_tx_stats
; i
++)
282 *(data
++) = ((u64
*)&vstats
->tx
)[enic_tx_stats
[i
].index
];
283 for (i
= 0; i
< enic_n_rx_stats
; i
++)
284 *(data
++) = ((u64
*)&vstats
->rx
)[enic_rx_stats
[i
].index
];
285 for (i
= 0; i
< enic_n_gen_stats
; i
++)
286 *(data
++) = ((u64
*)&enic
->gen_stats
)[enic_gen_stats
[i
].index
];
289 static u32
enic_get_msglevel(struct net_device
*netdev
)
291 struct enic
*enic
= netdev_priv(netdev
);
292 return enic
->msg_enable
;
295 static void enic_set_msglevel(struct net_device
*netdev
, u32 value
)
297 struct enic
*enic
= netdev_priv(netdev
);
298 enic
->msg_enable
= value
;
301 static int enic_get_coalesce(struct net_device
*netdev
,
302 struct ethtool_coalesce
*ecmd
)
304 struct enic
*enic
= netdev_priv(netdev
);
305 struct enic_rx_coal
*rxcoal
= &enic
->rx_coalesce_setting
;
307 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
)
308 ecmd
->tx_coalesce_usecs
= enic
->tx_coalesce_usecs
;
309 ecmd
->rx_coalesce_usecs
= enic
->rx_coalesce_usecs
;
310 if (rxcoal
->use_adaptive_rx_coalesce
)
311 ecmd
->use_adaptive_rx_coalesce
= 1;
312 ecmd
->rx_coalesce_usecs_low
= rxcoal
->small_pkt_range_start
;
313 ecmd
->rx_coalesce_usecs_high
= rxcoal
->range_end
;
318 static int enic_coalesce_valid(struct enic
*enic
,
319 struct ethtool_coalesce
*ec
)
321 u32 coalesce_usecs_max
= vnic_dev_get_intr_coal_timer_max(enic
->vdev
);
322 u32 rx_coalesce_usecs_high
= min_t(u32
, coalesce_usecs_max
,
323 ec
->rx_coalesce_usecs_high
);
324 u32 rx_coalesce_usecs_low
= min_t(u32
, coalesce_usecs_max
,
325 ec
->rx_coalesce_usecs_low
);
327 if (ec
->rx_max_coalesced_frames
||
328 ec
->rx_coalesce_usecs_irq
||
329 ec
->rx_max_coalesced_frames_irq
||
330 ec
->tx_max_coalesced_frames
||
331 ec
->tx_coalesce_usecs_irq
||
332 ec
->tx_max_coalesced_frames_irq
||
333 ec
->stats_block_coalesce_usecs
||
334 ec
->use_adaptive_tx_coalesce
||
336 ec
->rx_max_coalesced_frames_low
||
337 ec
->tx_coalesce_usecs_low
||
338 ec
->tx_max_coalesced_frames_low
||
340 ec
->rx_max_coalesced_frames_high
||
341 ec
->tx_coalesce_usecs_high
||
342 ec
->tx_max_coalesced_frames_high
||
343 ec
->rate_sample_interval
)
346 if ((vnic_dev_get_intr_mode(enic
->vdev
) != VNIC_DEV_INTR_MODE_MSIX
) &&
347 ec
->tx_coalesce_usecs
)
350 if ((ec
->tx_coalesce_usecs
> coalesce_usecs_max
) ||
351 (ec
->rx_coalesce_usecs
> coalesce_usecs_max
) ||
352 (ec
->rx_coalesce_usecs_low
> coalesce_usecs_max
) ||
353 (ec
->rx_coalesce_usecs_high
> coalesce_usecs_max
))
354 netdev_info(enic
->netdev
, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
357 if (ec
->rx_coalesce_usecs_high
&&
358 (rx_coalesce_usecs_high
<
359 rx_coalesce_usecs_low
+ ENIC_AIC_LARGE_PKT_DIFF
))
365 static int enic_set_coalesce(struct net_device
*netdev
,
366 struct ethtool_coalesce
*ecmd
)
368 struct enic
*enic
= netdev_priv(netdev
);
369 u32 tx_coalesce_usecs
;
370 u32 rx_coalesce_usecs
;
371 u32 rx_coalesce_usecs_low
;
372 u32 rx_coalesce_usecs_high
;
373 u32 coalesce_usecs_max
;
374 unsigned int i
, intr
;
376 struct enic_rx_coal
*rxcoal
= &enic
->rx_coalesce_setting
;
378 ret
= enic_coalesce_valid(enic
, ecmd
);
381 coalesce_usecs_max
= vnic_dev_get_intr_coal_timer_max(enic
->vdev
);
382 tx_coalesce_usecs
= min_t(u32
, ecmd
->tx_coalesce_usecs
,
384 rx_coalesce_usecs
= min_t(u32
, ecmd
->rx_coalesce_usecs
,
387 rx_coalesce_usecs_low
= min_t(u32
, ecmd
->rx_coalesce_usecs_low
,
389 rx_coalesce_usecs_high
= min_t(u32
, ecmd
->rx_coalesce_usecs_high
,
392 if (vnic_dev_get_intr_mode(enic
->vdev
) == VNIC_DEV_INTR_MODE_MSIX
) {
393 for (i
= 0; i
< enic
->wq_count
; i
++) {
394 intr
= enic_msix_wq_intr(enic
, i
);
395 vnic_intr_coalescing_timer_set(&enic
->intr
[intr
],
398 enic
->tx_coalesce_usecs
= tx_coalesce_usecs
;
400 rxcoal
->use_adaptive_rx_coalesce
= !!ecmd
->use_adaptive_rx_coalesce
;
401 if (!rxcoal
->use_adaptive_rx_coalesce
)
402 enic_intr_coal_set_rx(enic
, rx_coalesce_usecs
);
403 if (ecmd
->rx_coalesce_usecs_high
) {
404 rxcoal
->range_end
= rx_coalesce_usecs_high
;
405 rxcoal
->small_pkt_range_start
= rx_coalesce_usecs_low
;
406 rxcoal
->large_pkt_range_start
= rx_coalesce_usecs_low
+
407 ENIC_AIC_LARGE_PKT_DIFF
;
410 enic
->rx_coalesce_usecs
= rx_coalesce_usecs
;
415 static int enic_grxclsrlall(struct enic
*enic
, struct ethtool_rxnfc
*cmd
,
418 int j
, ret
= 0, cnt
= 0;
420 cmd
->data
= enic
->rfs_h
.max
- enic
->rfs_h
.free
;
421 for (j
= 0; j
< (1 << ENIC_RFS_FLW_BITSHIFT
); j
++) {
422 struct hlist_head
*hhead
;
423 struct hlist_node
*tmp
;
424 struct enic_rfs_fltr_node
*n
;
426 hhead
= &enic
->rfs_h
.ht_head
[j
];
427 hlist_for_each_entry_safe(n
, tmp
, hhead
, node
) {
428 if (cnt
== cmd
->rule_cnt
)
430 rule_locs
[cnt
] = n
->fltr_id
;
439 static int enic_grxclsrule(struct enic
*enic
, struct ethtool_rxnfc
*cmd
)
441 struct ethtool_rx_flow_spec
*fsp
=
442 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
443 struct enic_rfs_fltr_node
*n
;
445 n
= htbl_fltr_search(enic
, (u16
)fsp
->location
);
448 switch (n
->keys
.basic
.ip_proto
) {
450 fsp
->flow_type
= TCP_V4_FLOW
;
453 fsp
->flow_type
= UDP_V4_FLOW
;
460 fsp
->h_u
.tcp_ip4_spec
.ip4src
= flow_get_u32_src(&n
->keys
);
461 fsp
->m_u
.tcp_ip4_spec
.ip4src
= (__u32
)~0;
463 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= flow_get_u32_dst(&n
->keys
);
464 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= (__u32
)~0;
466 fsp
->h_u
.tcp_ip4_spec
.psrc
= n
->keys
.ports
.src
;
467 fsp
->m_u
.tcp_ip4_spec
.psrc
= (__u16
)~0;
469 fsp
->h_u
.tcp_ip4_spec
.pdst
= n
->keys
.ports
.dst
;
470 fsp
->m_u
.tcp_ip4_spec
.pdst
= (__u16
)~0;
472 fsp
->ring_cookie
= n
->rq_id
;
477 static int enic_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
480 struct enic
*enic
= netdev_priv(dev
);
484 case ETHTOOL_GRXRINGS
:
485 cmd
->data
= enic
->rq_count
;
487 case ETHTOOL_GRXCLSRLCNT
:
488 spin_lock_bh(&enic
->rfs_h
.lock
);
489 cmd
->rule_cnt
= enic
->rfs_h
.max
- enic
->rfs_h
.free
;
490 cmd
->data
= enic
->rfs_h
.max
;
491 spin_unlock_bh(&enic
->rfs_h
.lock
);
493 case ETHTOOL_GRXCLSRLALL
:
494 spin_lock_bh(&enic
->rfs_h
.lock
);
495 ret
= enic_grxclsrlall(enic
, cmd
, rule_locs
);
496 spin_unlock_bh(&enic
->rfs_h
.lock
);
498 case ETHTOOL_GRXCLSRULE
:
499 spin_lock_bh(&enic
->rfs_h
.lock
);
500 ret
= enic_grxclsrule(enic
, cmd
);
501 spin_unlock_bh(&enic
->rfs_h
.lock
);
511 static int enic_get_tunable(struct net_device
*dev
,
512 const struct ethtool_tunable
*tuna
, void *data
)
514 struct enic
*enic
= netdev_priv(dev
);
518 case ETHTOOL_RX_COPYBREAK
:
519 *(u32
*)data
= enic
->rx_copybreak
;
529 static int enic_set_tunable(struct net_device
*dev
,
530 const struct ethtool_tunable
*tuna
,
533 struct enic
*enic
= netdev_priv(dev
);
537 case ETHTOOL_RX_COPYBREAK
:
538 enic
->rx_copybreak
= *(u32
*)data
;
548 static u32
enic_get_rxfh_key_size(struct net_device
*netdev
)
553 static int enic_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*hkey
,
556 struct enic
*enic
= netdev_priv(netdev
);
559 memcpy(hkey
, enic
->rss_key
, ENIC_RSS_LEN
);
562 *hfunc
= ETH_RSS_HASH_TOP
;
567 static int enic_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
568 const u8
*hkey
, const u8 hfunc
)
570 struct enic
*enic
= netdev_priv(netdev
);
572 if ((hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
) ||
577 memcpy(enic
->rss_key
, hkey
, ENIC_RSS_LEN
);
579 return __enic_set_rsskey(enic
);
582 static int enic_get_ts_info(struct net_device
*netdev
,
583 struct ethtool_ts_info
*info
)
585 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
586 SOF_TIMESTAMPING_RX_SOFTWARE
|
587 SOF_TIMESTAMPING_SOFTWARE
;
592 static const struct ethtool_ops enic_ethtool_ops
= {
593 .get_drvinfo
= enic_get_drvinfo
,
594 .get_msglevel
= enic_get_msglevel
,
595 .set_msglevel
= enic_set_msglevel
,
596 .get_link
= ethtool_op_get_link
,
597 .get_strings
= enic_get_strings
,
598 .get_ringparam
= enic_get_ringparam
,
599 .set_ringparam
= enic_set_ringparam
,
600 .get_sset_count
= enic_get_sset_count
,
601 .get_ethtool_stats
= enic_get_ethtool_stats
,
602 .get_coalesce
= enic_get_coalesce
,
603 .set_coalesce
= enic_set_coalesce
,
604 .get_rxnfc
= enic_get_rxnfc
,
605 .get_tunable
= enic_get_tunable
,
606 .set_tunable
= enic_set_tunable
,
607 .get_rxfh_key_size
= enic_get_rxfh_key_size
,
608 .get_rxfh
= enic_get_rxfh
,
609 .set_rxfh
= enic_set_rxfh
,
610 .get_link_ksettings
= enic_get_ksettings
,
611 .get_ts_info
= enic_get_ts_info
,
614 void enic_set_ethtool_ops(struct net_device
*netdev
)
616 netdev
->ethtool_ops
= &enic_ethtool_ops
;