1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/string.h>
12 char stats_string
[ETH_GSTRING_LEN
];
16 struct hns3_sfp_type
{
21 struct hns3_pflag_desc
{
22 char name
[ETH_GSTRING_LEN
];
23 void (*handler
)(struct net_device
*netdev
, bool enable
);
26 /* tqp related stats */
27 #define HNS3_TQP_STAT(_string, _member) { \
28 .stats_string = _string, \
29 .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
30 offsetof(struct ring_stats, _member), \
33 static const struct hns3_stats hns3_txq_stats
[] = {
34 /* Tx per-queue statistics */
35 HNS3_TQP_STAT("dropped", sw_err_cnt
),
36 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt
),
37 HNS3_TQP_STAT("packets", tx_pkts
),
38 HNS3_TQP_STAT("bytes", tx_bytes
),
39 HNS3_TQP_STAT("more", tx_more
),
40 HNS3_TQP_STAT("wake", restart_queue
),
41 HNS3_TQP_STAT("busy", tx_busy
),
42 HNS3_TQP_STAT("copy", tx_copy
),
43 HNS3_TQP_STAT("vlan_err", tx_vlan_err
),
44 HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err
),
45 HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err
),
46 HNS3_TQP_STAT("tso_err", tx_tso_err
),
49 #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
51 static const struct hns3_stats hns3_rxq_stats
[] = {
52 /* Rx per-queue statistics */
53 HNS3_TQP_STAT("dropped", sw_err_cnt
),
54 HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt
),
55 HNS3_TQP_STAT("packets", rx_pkts
),
56 HNS3_TQP_STAT("bytes", rx_bytes
),
57 HNS3_TQP_STAT("errors", rx_err_cnt
),
58 HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt
),
59 HNS3_TQP_STAT("err_pkt_len", err_pkt_len
),
60 HNS3_TQP_STAT("err_bd_num", err_bd_num
),
61 HNS3_TQP_STAT("l2_err", l2_err
),
62 HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err
),
63 HNS3_TQP_STAT("csum_complete", csum_complete
),
64 HNS3_TQP_STAT("multicast", rx_multicast
),
65 HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg
),
68 #define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags)
70 #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
72 #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
74 #define HNS3_SELF_TEST_TYPE_NUM 4
75 #define HNS3_NIC_LB_TEST_PKT_NUM 1
76 #define HNS3_NIC_LB_TEST_RING_ID 0
77 #define HNS3_NIC_LB_TEST_PACKET_SIZE 128
78 #define HNS3_NIC_LB_SETUP_USEC 10000
80 /* Nic loopback test err */
81 #define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
82 #define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
83 #define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
85 static int hns3_lp_setup(struct net_device
*ndev
, enum hnae3_loop loop
, bool en
)
87 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
88 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(h
->pdev
);
89 bool vlan_filter_enable
;
92 if (!h
->ae_algo
->ops
->set_loopback
||
93 !h
->ae_algo
->ops
->set_promisc_mode
)
97 case HNAE3_LOOP_SERIAL_SERDES
:
98 case HNAE3_LOOP_PARALLEL_SERDES
:
101 ret
= h
->ae_algo
->ops
->set_loopback(h
, loop
, en
);
108 if (ret
|| ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
)
112 h
->ae_algo
->ops
->set_promisc_mode(h
, true, true);
114 /* recover promisc mode before loopback test */
115 hns3_request_update_promisc_mode(h
);
116 vlan_filter_enable
= ndev
->flags
& IFF_PROMISC
? false : true;
117 hns3_enable_vlan_filter(ndev
, vlan_filter_enable
);
123 static int hns3_lp_up(struct net_device
*ndev
, enum hnae3_loop loop_mode
)
125 struct hnae3_handle
*h
= hns3_get_handle(ndev
);
128 ret
= hns3_nic_reset_all_ring(h
);
132 ret
= hns3_lp_setup(ndev
, loop_mode
, true);
133 usleep_range(HNS3_NIC_LB_SETUP_USEC
, HNS3_NIC_LB_SETUP_USEC
* 2);
138 static int hns3_lp_down(struct net_device
*ndev
, enum hnae3_loop loop_mode
)
142 ret
= hns3_lp_setup(ndev
, loop_mode
, false);
144 netdev_err(ndev
, "lb_setup return error: %d\n", ret
);
148 usleep_range(HNS3_NIC_LB_SETUP_USEC
, HNS3_NIC_LB_SETUP_USEC
* 2);
153 static void hns3_lp_setup_skb(struct sk_buff
*skb
)
155 #define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
157 struct net_device
*ndev
= skb
->dev
;
158 struct hnae3_handle
*handle
;
159 struct hnae3_ae_dev
*ae_dev
;
160 unsigned char *packet
;
164 skb_reserve(skb
, NET_IP_ALIGN
);
165 ethh
= skb_put(skb
, sizeof(struct ethhdr
));
166 packet
= skb_put(skb
, HNS3_NIC_LB_TEST_PACKET_SIZE
);
168 memcpy(ethh
->h_dest
, ndev
->dev_addr
, ETH_ALEN
);
170 /* The dst mac addr of loopback packet is the same as the host'
171 * mac addr, the SSU component may loop back the packet to host
172 * before the packet reaches mac or serdes, which will defect
173 * the purpose of mac or serdes selftest.
175 handle
= hns3_get_handle(ndev
);
176 ae_dev
= pci_get_drvdata(handle
->pdev
);
177 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
178 ethh
->h_dest
[5] += HNS3_NIC_LB_DST_MAC_ADDR
;
179 eth_zero_addr(ethh
->h_source
);
180 ethh
->h_proto
= htons(ETH_P_ARP
);
181 skb_reset_mac_header(skb
);
183 for (i
= 0; i
< HNS3_NIC_LB_TEST_PACKET_SIZE
; i
++)
184 packet
[i
] = (unsigned char)(i
& 0xff);
187 static void hns3_lb_check_skb_data(struct hns3_enet_ring
*ring
,
190 struct hns3_enet_tqp_vector
*tqp_vector
= ring
->tqp_vector
;
191 unsigned char *packet
= skb
->data
;
192 u32 len
= skb_headlen(skb
);
195 len
= min_t(u32
, len
, HNS3_NIC_LB_TEST_PACKET_SIZE
);
197 for (i
= 0; i
< len
; i
++)
198 if (packet
[i
] != (unsigned char)(i
& 0xff))
201 /* The packet is correctly received */
202 if (i
== HNS3_NIC_LB_TEST_PACKET_SIZE
)
203 tqp_vector
->rx_group
.total_packets
++;
205 print_hex_dump(KERN_ERR
, "selftest:", DUMP_PREFIX_OFFSET
, 16, 1,
206 skb
->data
, len
, true);
208 dev_kfree_skb_any(skb
);
211 static u32
hns3_lb_check_rx_ring(struct hns3_nic_priv
*priv
, u32 budget
)
213 struct hnae3_handle
*h
= priv
->ae_handle
;
214 struct hnae3_knic_private_info
*kinfo
;
215 u32 i
, rcv_good_pkt_total
= 0;
218 for (i
= kinfo
->num_tqps
; i
< kinfo
->num_tqps
* 2; i
++) {
219 struct hns3_enet_ring
*ring
= &priv
->ring
[i
];
220 struct hns3_enet_ring_group
*rx_group
;
223 rx_group
= &ring
->tqp_vector
->rx_group
;
224 pre_rx_pkt
= rx_group
->total_packets
;
227 hns3_clean_rx_ring(ring
, budget
, hns3_lb_check_skb_data
);
230 rcv_good_pkt_total
+= (rx_group
->total_packets
- pre_rx_pkt
);
231 rx_group
->total_packets
= pre_rx_pkt
;
233 return rcv_good_pkt_total
;
236 static void hns3_lb_clear_tx_ring(struct hns3_nic_priv
*priv
, u32 start_ringid
,
237 u32 end_ringid
, u32 budget
)
241 for (i
= start_ringid
; i
<= end_ringid
; i
++) {
242 struct hns3_enet_ring
*ring
= &priv
->ring
[i
];
244 hns3_clean_tx_ring(ring
, 0);
249 * hns3_lp_run_test - run loopback test
251 * @mode: loopback type
253 static int hns3_lp_run_test(struct net_device
*ndev
, enum hnae3_loop mode
)
255 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
260 skb
= alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE
+ ETH_HLEN
+ NET_IP_ALIGN
,
263 return HNS3_NIC_LB_TEST_NO_MEM_ERR
;
266 hns3_lp_setup_skb(skb
);
267 skb
->queue_mapping
= HNS3_NIC_LB_TEST_RING_ID
;
270 for (i
= 0; i
< HNS3_NIC_LB_TEST_PKT_NUM
; i
++) {
274 tx_ret
= hns3_nic_net_xmit(skb
, ndev
);
275 if (tx_ret
== NETDEV_TX_OK
) {
279 netdev_err(ndev
, "hns3_lb_run_test xmit failed: %d\n",
283 if (good_cnt
!= HNS3_NIC_LB_TEST_PKT_NUM
) {
284 ret_val
= HNS3_NIC_LB_TEST_TX_CNT_ERR
;
285 netdev_err(ndev
, "mode %d sent fail, cnt=0x%x, budget=0x%x\n",
286 mode
, good_cnt
, HNS3_NIC_LB_TEST_PKT_NUM
);
290 /* Allow 200 milliseconds for packets to go from Tx to Rx */
293 good_cnt
= hns3_lb_check_rx_ring(priv
, HNS3_NIC_LB_TEST_PKT_NUM
);
294 if (good_cnt
!= HNS3_NIC_LB_TEST_PKT_NUM
) {
295 ret_val
= HNS3_NIC_LB_TEST_RX_CNT_ERR
;
296 netdev_err(ndev
, "mode %d recv fail, cnt=0x%x, budget=0x%x\n",
297 mode
, good_cnt
, HNS3_NIC_LB_TEST_PKT_NUM
);
301 hns3_lb_clear_tx_ring(priv
, HNS3_NIC_LB_TEST_RING_ID
,
302 HNS3_NIC_LB_TEST_RING_ID
,
303 HNS3_NIC_LB_TEST_PKT_NUM
);
310 * hns3_nic_self_test - self test
312 * @eth_test: test cmd
315 static void hns3_self_test(struct net_device
*ndev
,
316 struct ethtool_test
*eth_test
, u64
*data
)
318 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
319 struct hnae3_handle
*h
= priv
->ae_handle
;
320 int st_param
[HNS3_SELF_TEST_TYPE_NUM
][2];
321 bool if_running
= netif_running(ndev
);
325 if (hns3_nic_resetting(ndev
)) {
326 netdev_err(ndev
, "dev resetting!");
330 /* Only do offline selftest, or pass by default */
331 if (eth_test
->flags
!= ETH_TEST_FL_OFFLINE
)
334 netif_dbg(h
, drv
, ndev
, "self test start");
336 st_param
[HNAE3_LOOP_APP
][0] = HNAE3_LOOP_APP
;
337 st_param
[HNAE3_LOOP_APP
][1] =
338 h
->flags
& HNAE3_SUPPORT_APP_LOOPBACK
;
340 st_param
[HNAE3_LOOP_SERIAL_SERDES
][0] = HNAE3_LOOP_SERIAL_SERDES
;
341 st_param
[HNAE3_LOOP_SERIAL_SERDES
][1] =
342 h
->flags
& HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK
;
344 st_param
[HNAE3_LOOP_PARALLEL_SERDES
][0] =
345 HNAE3_LOOP_PARALLEL_SERDES
;
346 st_param
[HNAE3_LOOP_PARALLEL_SERDES
][1] =
347 h
->flags
& HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK
;
349 st_param
[HNAE3_LOOP_PHY
][0] = HNAE3_LOOP_PHY
;
350 st_param
[HNAE3_LOOP_PHY
][1] =
351 h
->flags
& HNAE3_SUPPORT_PHY_LOOPBACK
;
354 ndev
->netdev_ops
->ndo_stop(ndev
);
356 #if IS_ENABLED(CONFIG_VLAN_8021Q)
357 /* Disable the vlan filter for selftest does not support it */
358 if (h
->ae_algo
->ops
->enable_vlan_filter
)
359 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
362 /* Tell firmware to stop mac autoneg before loopback test start,
363 * otherwise loopback test may be failed when the port is still
366 if (h
->ae_algo
->ops
->halt_autoneg
)
367 h
->ae_algo
->ops
->halt_autoneg(h
, true);
369 set_bit(HNS3_NIC_STATE_TESTING
, &priv
->state
);
371 for (i
= 0; i
< HNS3_SELF_TEST_TYPE_NUM
; i
++) {
372 enum hnae3_loop loop_type
= (enum hnae3_loop
)st_param
[i
][0];
377 data
[test_index
] = hns3_lp_up(ndev
, loop_type
);
378 if (!data
[test_index
])
379 data
[test_index
] = hns3_lp_run_test(ndev
, loop_type
);
381 hns3_lp_down(ndev
, loop_type
);
383 if (data
[test_index
])
384 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
389 clear_bit(HNS3_NIC_STATE_TESTING
, &priv
->state
);
391 if (h
->ae_algo
->ops
->halt_autoneg
)
392 h
->ae_algo
->ops
->halt_autoneg(h
, false);
394 #if IS_ENABLED(CONFIG_VLAN_8021Q)
395 if (h
->ae_algo
->ops
->enable_vlan_filter
)
396 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
400 ndev
->netdev_ops
->ndo_open(ndev
);
402 netif_dbg(h
, drv
, ndev
, "self test end\n");
405 static void hns3_update_limit_promisc_mode(struct net_device
*netdev
,
408 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
411 set_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->priv_flags
);
413 clear_bit(HNAE3_PFLAG_LIMIT_PROMISC
, &handle
->priv_flags
);
415 hns3_request_update_promisc_mode(handle
);
418 static const struct hns3_pflag_desc hns3_priv_flags
[HNAE3_PFLAG_MAX
] = {
419 { "limit_promisc", hns3_update_limit_promisc_mode
}
422 static int hns3_get_sset_count(struct net_device
*netdev
, int stringset
)
424 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
425 const struct hnae3_ae_ops
*ops
= h
->ae_algo
->ops
;
427 if (!ops
->get_sset_count
)
432 return ((HNS3_TQP_STATS_COUNT
* h
->kinfo
.num_tqps
) +
433 ops
->get_sset_count(h
, stringset
));
436 return ops
->get_sset_count(h
, stringset
);
438 case ETH_SS_PRIV_FLAGS
:
439 return HNAE3_PFLAG_MAX
;
446 static void *hns3_update_strings(u8
*data
, const struct hns3_stats
*stats
,
447 u32 stat_count
, u32 num_tqps
, const char *prefix
)
449 #define MAX_PREFIX_SIZE (6 + 4)
454 for (i
= 0; i
< num_tqps
; i
++) {
455 for (j
= 0; j
< stat_count
; j
++) {
456 data
[ETH_GSTRING_LEN
- 1] = '\0';
458 /* first, prepend the prefix string */
459 n1
= scnprintf(data
, MAX_PREFIX_SIZE
, "%s%d_",
461 size_left
= (ETH_GSTRING_LEN
- 1) - n1
;
463 /* now, concatenate the stats string to it */
464 strncat(data
, stats
[j
].stats_string
, size_left
);
465 data
+= ETH_GSTRING_LEN
;
472 static u8
*hns3_get_strings_tqps(struct hnae3_handle
*handle
, u8
*data
)
474 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
475 const char tx_prefix
[] = "txq";
476 const char rx_prefix
[] = "rxq";
478 /* get strings for Tx */
479 data
= hns3_update_strings(data
, hns3_txq_stats
, HNS3_TXQ_STATS_COUNT
,
480 kinfo
->num_tqps
, tx_prefix
);
482 /* get strings for Rx */
483 data
= hns3_update_strings(data
, hns3_rxq_stats
, HNS3_RXQ_STATS_COUNT
,
484 kinfo
->num_tqps
, rx_prefix
);
489 static void hns3_get_strings(struct net_device
*netdev
, u32 stringset
, u8
*data
)
491 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
492 const struct hnae3_ae_ops
*ops
= h
->ae_algo
->ops
;
493 char *buff
= (char *)data
;
496 if (!ops
->get_strings
)
501 buff
= hns3_get_strings_tqps(h
, buff
);
502 ops
->get_strings(h
, stringset
, (u8
*)buff
);
505 ops
->get_strings(h
, stringset
, data
);
507 case ETH_SS_PRIV_FLAGS
:
508 for (i
= 0; i
< HNS3_PRIV_FLAGS_LEN
; i
++) {
509 snprintf(buff
, ETH_GSTRING_LEN
, "%s",
510 hns3_priv_flags
[i
].name
);
511 buff
+= ETH_GSTRING_LEN
;
519 static u64
*hns3_get_stats_tqps(struct hnae3_handle
*handle
, u64
*data
)
521 struct hns3_nic_priv
*nic_priv
= (struct hns3_nic_priv
*)handle
->priv
;
522 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
523 struct hns3_enet_ring
*ring
;
527 /* get stats for Tx */
528 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
529 ring
= &nic_priv
->ring
[i
];
530 for (j
= 0; j
< HNS3_TXQ_STATS_COUNT
; j
++) {
531 stat
= (u8
*)ring
+ hns3_txq_stats
[j
].stats_offset
;
532 *data
++ = *(u64
*)stat
;
536 /* get stats for Rx */
537 for (i
= 0; i
< kinfo
->num_tqps
; i
++) {
538 ring
= &nic_priv
->ring
[i
+ kinfo
->num_tqps
];
539 for (j
= 0; j
< HNS3_RXQ_STATS_COUNT
; j
++) {
540 stat
= (u8
*)ring
+ hns3_rxq_stats
[j
].stats_offset
;
541 *data
++ = *(u64
*)stat
;
548 /* hns3_get_stats - get detail statistics.
549 * @netdev: net device
550 * @stats: statistics info.
551 * @data: statistics data.
553 static void hns3_get_stats(struct net_device
*netdev
,
554 struct ethtool_stats
*stats
, u64
*data
)
556 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
559 if (hns3_nic_resetting(netdev
)) {
560 netdev_err(netdev
, "dev resetting, could not get stats\n");
564 if (!h
->ae_algo
->ops
->get_stats
|| !h
->ae_algo
->ops
->update_stats
) {
565 netdev_err(netdev
, "could not get any statistics\n");
569 h
->ae_algo
->ops
->update_stats(h
, &netdev
->stats
);
571 /* get per-queue stats */
572 p
= hns3_get_stats_tqps(h
, p
);
574 /* get MAC & other misc hardware stats */
575 h
->ae_algo
->ops
->get_stats(h
, p
);
578 static void hns3_get_drvinfo(struct net_device
*netdev
,
579 struct ethtool_drvinfo
*drvinfo
)
581 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
582 struct hnae3_handle
*h
= priv
->ae_handle
;
585 if (!h
->ae_algo
->ops
->get_fw_version
) {
586 netdev_err(netdev
, "could not get fw version!\n");
590 strncpy(drvinfo
->driver
, h
->pdev
->driver
->name
,
591 sizeof(drvinfo
->driver
));
592 drvinfo
->driver
[sizeof(drvinfo
->driver
) - 1] = '\0';
594 strncpy(drvinfo
->bus_info
, pci_name(h
->pdev
),
595 sizeof(drvinfo
->bus_info
));
596 drvinfo
->bus_info
[ETHTOOL_BUSINFO_LEN
- 1] = '\0';
598 fw_version
= priv
->ae_handle
->ae_algo
->ops
->get_fw_version(h
);
600 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
602 hnae3_get_field(fw_version
, HNAE3_FW_VERSION_BYTE3_MASK
,
603 HNAE3_FW_VERSION_BYTE3_SHIFT
),
604 hnae3_get_field(fw_version
, HNAE3_FW_VERSION_BYTE2_MASK
,
605 HNAE3_FW_VERSION_BYTE2_SHIFT
),
606 hnae3_get_field(fw_version
, HNAE3_FW_VERSION_BYTE1_MASK
,
607 HNAE3_FW_VERSION_BYTE1_SHIFT
),
608 hnae3_get_field(fw_version
, HNAE3_FW_VERSION_BYTE0_MASK
,
609 HNAE3_FW_VERSION_BYTE0_SHIFT
));
612 static u32
hns3_get_link(struct net_device
*netdev
)
614 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
616 if (h
->ae_algo
->ops
->get_status
)
617 return h
->ae_algo
->ops
->get_status(h
);
622 static void hns3_get_ringparam(struct net_device
*netdev
,
623 struct ethtool_ringparam
*param
)
625 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
626 struct hnae3_handle
*h
= priv
->ae_handle
;
627 int queue_num
= h
->kinfo
.num_tqps
;
629 if (hns3_nic_resetting(netdev
)) {
630 netdev_err(netdev
, "dev resetting!");
634 param
->tx_max_pending
= HNS3_RING_MAX_PENDING
;
635 param
->rx_max_pending
= HNS3_RING_MAX_PENDING
;
637 param
->tx_pending
= priv
->ring
[0].desc_num
;
638 param
->rx_pending
= priv
->ring
[queue_num
].desc_num
;
641 static void hns3_get_pauseparam(struct net_device
*netdev
,
642 struct ethtool_pauseparam
*param
)
644 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
646 if (h
->ae_algo
->ops
->get_pauseparam
)
647 h
->ae_algo
->ops
->get_pauseparam(h
, ¶m
->autoneg
,
648 ¶m
->rx_pause
, ¶m
->tx_pause
);
651 static int hns3_set_pauseparam(struct net_device
*netdev
,
652 struct ethtool_pauseparam
*param
)
654 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
656 netif_dbg(h
, drv
, netdev
,
657 "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
658 param
->autoneg
, param
->rx_pause
, param
->tx_pause
);
660 if (h
->ae_algo
->ops
->set_pauseparam
)
661 return h
->ae_algo
->ops
->set_pauseparam(h
, param
->autoneg
,
667 static void hns3_get_ksettings(struct hnae3_handle
*h
,
668 struct ethtool_link_ksettings
*cmd
)
670 const struct hnae3_ae_ops
*ops
= h
->ae_algo
->ops
;
672 /* 1.auto_neg & speed & duplex from cmd */
673 if (ops
->get_ksettings_an_result
)
674 ops
->get_ksettings_an_result(h
,
679 /* 2.get link mode */
680 if (ops
->get_link_mode
)
681 ops
->get_link_mode(h
,
682 cmd
->link_modes
.supported
,
683 cmd
->link_modes
.advertising
);
685 /* 3.mdix_ctrl&mdix get from phy reg */
686 if (ops
->get_mdix_mode
)
687 ops
->get_mdix_mode(h
, &cmd
->base
.eth_tp_mdix_ctrl
,
688 &cmd
->base
.eth_tp_mdix
);
691 static int hns3_get_link_ksettings(struct net_device
*netdev
,
692 struct ethtool_link_ksettings
*cmd
)
694 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
695 const struct hnae3_ae_ops
*ops
;
700 ops
= h
->ae_algo
->ops
;
701 if (ops
->get_media_type
)
702 ops
->get_media_type(h
, &media_type
, &module_type
);
706 switch (media_type
) {
707 case HNAE3_MEDIA_TYPE_NONE
:
708 cmd
->base
.port
= PORT_NONE
;
709 hns3_get_ksettings(h
, cmd
);
711 case HNAE3_MEDIA_TYPE_FIBER
:
712 if (module_type
== HNAE3_MODULE_TYPE_CR
)
713 cmd
->base
.port
= PORT_DA
;
715 cmd
->base
.port
= PORT_FIBRE
;
717 hns3_get_ksettings(h
, cmd
);
719 case HNAE3_MEDIA_TYPE_BACKPLANE
:
720 cmd
->base
.port
= PORT_NONE
;
721 hns3_get_ksettings(h
, cmd
);
723 case HNAE3_MEDIA_TYPE_COPPER
:
724 cmd
->base
.port
= PORT_TP
;
726 hns3_get_ksettings(h
, cmd
);
728 phy_ethtool_ksettings_get(netdev
->phydev
, cmd
);
732 netdev_warn(netdev
, "Unknown media type");
737 cmd
->base
.mdio_support
= ETH_MDIO_SUPPORTS_C22
;
739 link_stat
= hns3_get_link(netdev
);
741 cmd
->base
.speed
= SPEED_UNKNOWN
;
742 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
748 static int hns3_check_ksettings_param(const struct net_device
*netdev
,
749 const struct ethtool_link_ksettings
*cmd
)
751 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
752 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
753 u8 module_type
= HNAE3_MODULE_TYPE_UNKNOWN
;
754 u8 media_type
= HNAE3_MEDIA_TYPE_UNKNOWN
;
760 /* hw doesn't support use specified speed and duplex to negotiate,
761 * unnecessary to check them when autoneg on.
763 if (cmd
->base
.autoneg
)
766 if (ops
->get_ksettings_an_result
) {
767 ops
->get_ksettings_an_result(handle
, &autoneg
, &speed
, &duplex
);
768 if (cmd
->base
.autoneg
== autoneg
&& cmd
->base
.speed
== speed
&&
769 cmd
->base
.duplex
== duplex
)
773 if (ops
->get_media_type
)
774 ops
->get_media_type(handle
, &media_type
, &module_type
);
776 if (cmd
->base
.duplex
== DUPLEX_HALF
&&
777 media_type
!= HNAE3_MEDIA_TYPE_COPPER
) {
779 "only copper port supports half duplex!");
783 if (ops
->check_port_speed
) {
784 ret
= ops
->check_port_speed(handle
, cmd
->base
.speed
);
786 netdev_err(netdev
, "unsupported speed\n");
794 static int hns3_set_link_ksettings(struct net_device
*netdev
,
795 const struct ethtool_link_ksettings
*cmd
)
797 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
798 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
799 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
802 /* Chip don't support this mode. */
803 if (cmd
->base
.speed
== SPEED_1000
&& cmd
->base
.duplex
== DUPLEX_HALF
)
806 netif_dbg(handle
, drv
, netdev
,
807 "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
808 netdev
->phydev
? "phy" : "mac",
809 cmd
->base
.autoneg
, cmd
->base
.speed
, cmd
->base
.duplex
);
811 /* Only support ksettings_set for netdev with phy attached for now */
812 if (netdev
->phydev
) {
813 if (cmd
->base
.speed
== SPEED_1000
&&
814 cmd
->base
.autoneg
== AUTONEG_DISABLE
)
817 return phy_ethtool_ksettings_set(netdev
->phydev
, cmd
);
820 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
)
823 ret
= hns3_check_ksettings_param(netdev
, cmd
);
827 if (ops
->set_autoneg
) {
828 ret
= ops
->set_autoneg(handle
, cmd
->base
.autoneg
);
833 /* hw doesn't support use specified speed and duplex to negotiate,
834 * ignore them when autoneg on.
836 if (cmd
->base
.autoneg
) {
838 "autoneg is on, ignore the speed and duplex\n");
842 if (ops
->cfg_mac_speed_dup_h
)
843 ret
= ops
->cfg_mac_speed_dup_h(handle
, cmd
->base
.speed
,
849 static u32
hns3_get_rss_key_size(struct net_device
*netdev
)
851 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
853 if (!h
->ae_algo
->ops
->get_rss_key_size
)
856 return h
->ae_algo
->ops
->get_rss_key_size(h
);
859 static u32
hns3_get_rss_indir_size(struct net_device
*netdev
)
861 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
863 if (!h
->ae_algo
->ops
->get_rss_indir_size
)
866 return h
->ae_algo
->ops
->get_rss_indir_size(h
);
869 static int hns3_get_rss(struct net_device
*netdev
, u32
*indir
, u8
*key
,
872 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
874 if (!h
->ae_algo
->ops
->get_rss
)
877 return h
->ae_algo
->ops
->get_rss(h
, indir
, key
, hfunc
);
880 static int hns3_set_rss(struct net_device
*netdev
, const u32
*indir
,
881 const u8
*key
, const u8 hfunc
)
883 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
884 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(h
->pdev
);
886 if (!h
->ae_algo
->ops
->set_rss
)
889 if ((ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
&&
890 hfunc
!= ETH_RSS_HASH_TOP
) || (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&&
891 hfunc
!= ETH_RSS_HASH_TOP
&& hfunc
!= ETH_RSS_HASH_XOR
)) {
892 netdev_err(netdev
, "hash func not supported\n");
898 "set rss failed for indir is empty\n");
902 return h
->ae_algo
->ops
->set_rss(h
, indir
, key
, hfunc
);
905 static int hns3_get_rxnfc(struct net_device
*netdev
,
906 struct ethtool_rxnfc
*cmd
,
909 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
912 case ETHTOOL_GRXRINGS
:
913 cmd
->data
= h
->kinfo
.num_tqps
;
916 if (h
->ae_algo
->ops
->get_rss_tuple
)
917 return h
->ae_algo
->ops
->get_rss_tuple(h
, cmd
);
919 case ETHTOOL_GRXCLSRLCNT
:
920 if (h
->ae_algo
->ops
->get_fd_rule_cnt
)
921 return h
->ae_algo
->ops
->get_fd_rule_cnt(h
, cmd
);
923 case ETHTOOL_GRXCLSRULE
:
924 if (h
->ae_algo
->ops
->get_fd_rule_info
)
925 return h
->ae_algo
->ops
->get_fd_rule_info(h
, cmd
);
927 case ETHTOOL_GRXCLSRLALL
:
928 if (h
->ae_algo
->ops
->get_fd_all_rules
)
929 return h
->ae_algo
->ops
->get_fd_all_rules(h
, cmd
,
937 static void hns3_change_all_ring_bd_num(struct hns3_nic_priv
*priv
,
938 u32 tx_desc_num
, u32 rx_desc_num
)
940 struct hnae3_handle
*h
= priv
->ae_handle
;
943 h
->kinfo
.num_tx_desc
= tx_desc_num
;
944 h
->kinfo
.num_rx_desc
= rx_desc_num
;
946 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
947 priv
->ring
[i
].desc_num
= tx_desc_num
;
948 priv
->ring
[i
+ h
->kinfo
.num_tqps
].desc_num
= rx_desc_num
;
952 static struct hns3_enet_ring
*hns3_backup_ringparam(struct hns3_nic_priv
*priv
)
954 struct hnae3_handle
*handle
= priv
->ae_handle
;
955 struct hns3_enet_ring
*tmp_rings
;
958 tmp_rings
= kcalloc(handle
->kinfo
.num_tqps
* 2,
959 sizeof(struct hns3_enet_ring
), GFP_KERNEL
);
963 for (i
= 0; i
< handle
->kinfo
.num_tqps
* 2; i
++) {
964 memcpy(&tmp_rings
[i
], &priv
->ring
[i
],
965 sizeof(struct hns3_enet_ring
));
966 tmp_rings
[i
].skb
= NULL
;
972 static int hns3_check_ringparam(struct net_device
*ndev
,
973 struct ethtool_ringparam
*param
)
975 if (hns3_nic_resetting(ndev
))
978 if (param
->rx_mini_pending
|| param
->rx_jumbo_pending
)
981 if (param
->tx_pending
> HNS3_RING_MAX_PENDING
||
982 param
->tx_pending
< HNS3_RING_MIN_PENDING
||
983 param
->rx_pending
> HNS3_RING_MAX_PENDING
||
984 param
->rx_pending
< HNS3_RING_MIN_PENDING
) {
985 netdev_err(ndev
, "Queue depth out of range [%d-%d]\n",
986 HNS3_RING_MIN_PENDING
, HNS3_RING_MAX_PENDING
);
993 static int hns3_set_ringparam(struct net_device
*ndev
,
994 struct ethtool_ringparam
*param
)
996 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
997 struct hnae3_handle
*h
= priv
->ae_handle
;
998 struct hns3_enet_ring
*tmp_rings
;
999 bool if_running
= netif_running(ndev
);
1000 u32 old_tx_desc_num
, new_tx_desc_num
;
1001 u32 old_rx_desc_num
, new_rx_desc_num
;
1002 u16 queue_num
= h
->kinfo
.num_tqps
;
1005 ret
= hns3_check_ringparam(ndev
, param
);
1009 /* Hardware requires that its descriptors must be multiple of eight */
1010 new_tx_desc_num
= ALIGN(param
->tx_pending
, HNS3_RING_BD_MULTIPLE
);
1011 new_rx_desc_num
= ALIGN(param
->rx_pending
, HNS3_RING_BD_MULTIPLE
);
1012 old_tx_desc_num
= priv
->ring
[0].desc_num
;
1013 old_rx_desc_num
= priv
->ring
[queue_num
].desc_num
;
1014 if (old_tx_desc_num
== new_tx_desc_num
&&
1015 old_rx_desc_num
== new_rx_desc_num
)
1018 tmp_rings
= hns3_backup_ringparam(priv
);
1021 "backup ring param failed by allocating memory fail\n");
1026 "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
1027 old_tx_desc_num
, old_rx_desc_num
,
1028 new_tx_desc_num
, new_rx_desc_num
);
1031 ndev
->netdev_ops
->ndo_stop(ndev
);
1033 hns3_change_all_ring_bd_num(priv
, new_tx_desc_num
, new_rx_desc_num
);
1034 ret
= hns3_init_all_ring(priv
);
1036 netdev_err(ndev
, "Change bd num fail, revert to old value(%d)\n",
1039 hns3_change_all_ring_bd_num(priv
, old_tx_desc_num
,
1041 for (i
= 0; i
< h
->kinfo
.num_tqps
* 2; i
++)
1042 memcpy(&priv
->ring
[i
], &tmp_rings
[i
],
1043 sizeof(struct hns3_enet_ring
));
1045 for (i
= 0; i
< h
->kinfo
.num_tqps
* 2; i
++)
1046 hns3_fini_ring(&tmp_rings
[i
]);
1052 ret
= ndev
->netdev_ops
->ndo_open(ndev
);
1057 static int hns3_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*cmd
)
1059 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1063 if (h
->ae_algo
->ops
->set_rss_tuple
)
1064 return h
->ae_algo
->ops
->set_rss_tuple(h
, cmd
);
1066 case ETHTOOL_SRXCLSRLINS
:
1067 if (h
->ae_algo
->ops
->add_fd_entry
)
1068 return h
->ae_algo
->ops
->add_fd_entry(h
, cmd
);
1070 case ETHTOOL_SRXCLSRLDEL
:
1071 if (h
->ae_algo
->ops
->del_fd_entry
)
1072 return h
->ae_algo
->ops
->del_fd_entry(h
, cmd
);
1079 static int hns3_nway_reset(struct net_device
*netdev
)
1081 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1082 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
1083 struct phy_device
*phy
= netdev
->phydev
;
1086 if (!netif_running(netdev
))
1089 if (hns3_nic_resetting(netdev
)) {
1090 netdev_err(netdev
, "dev resetting!");
1094 if (!ops
->get_autoneg
|| !ops
->restart_autoneg
)
1097 autoneg
= ops
->get_autoneg(handle
);
1098 if (autoneg
!= AUTONEG_ENABLE
) {
1100 "Autoneg is off, don't support to restart it\n");
1104 netif_dbg(handle
, drv
, netdev
,
1105 "nway reset (using %s)\n", phy
? "phy" : "mac");
1108 return genphy_restart_aneg(phy
);
1110 return ops
->restart_autoneg(handle
);
1113 static void hns3_get_channels(struct net_device
*netdev
,
1114 struct ethtool_channels
*ch
)
1116 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1118 if (h
->ae_algo
->ops
->get_channels
)
1119 h
->ae_algo
->ops
->get_channels(h
, ch
);
1122 static int hns3_get_coalesce_per_queue(struct net_device
*netdev
, u32 queue
,
1123 struct ethtool_coalesce
*cmd
)
1125 struct hns3_enet_tqp_vector
*tx_vector
, *rx_vector
;
1126 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1127 struct hnae3_handle
*h
= priv
->ae_handle
;
1128 u16 queue_num
= h
->kinfo
.num_tqps
;
1130 if (hns3_nic_resetting(netdev
))
1133 if (queue
>= queue_num
) {
1135 "Invalid queue value %u! Queue max id=%u\n",
1136 queue
, queue_num
- 1);
1140 tx_vector
= priv
->ring
[queue
].tqp_vector
;
1141 rx_vector
= priv
->ring
[queue_num
+ queue
].tqp_vector
;
1143 cmd
->use_adaptive_tx_coalesce
=
1144 tx_vector
->tx_group
.coal
.adapt_enable
;
1145 cmd
->use_adaptive_rx_coalesce
=
1146 rx_vector
->rx_group
.coal
.adapt_enable
;
1148 cmd
->tx_coalesce_usecs
= tx_vector
->tx_group
.coal
.int_gl
;
1149 cmd
->rx_coalesce_usecs
= rx_vector
->rx_group
.coal
.int_gl
;
1151 cmd
->tx_coalesce_usecs_high
= h
->kinfo
.int_rl_setting
;
1152 cmd
->rx_coalesce_usecs_high
= h
->kinfo
.int_rl_setting
;
1154 cmd
->tx_max_coalesced_frames
= tx_vector
->tx_group
.coal
.int_ql
;
1155 cmd
->rx_max_coalesced_frames
= rx_vector
->rx_group
.coal
.int_ql
;
1160 static int hns3_get_coalesce(struct net_device
*netdev
,
1161 struct ethtool_coalesce
*cmd
)
1163 return hns3_get_coalesce_per_queue(netdev
, 0, cmd
);
1166 static int hns3_check_gl_coalesce_para(struct net_device
*netdev
,
1167 struct ethtool_coalesce
*cmd
)
1169 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1170 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
1173 if (cmd
->rx_coalesce_usecs
> ae_dev
->dev_specs
.max_int_gl
) {
1175 "invalid rx-usecs value, rx-usecs range is 0-%u\n",
1176 ae_dev
->dev_specs
.max_int_gl
);
1180 if (cmd
->tx_coalesce_usecs
> ae_dev
->dev_specs
.max_int_gl
) {
1182 "invalid tx-usecs value, tx-usecs range is 0-%u\n",
1183 ae_dev
->dev_specs
.max_int_gl
);
1187 /* device version above V3(include V3), GL uses 1us unit,
1188 * so the round down is not needed.
1190 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V3
)
1193 rx_gl
= hns3_gl_round_down(cmd
->rx_coalesce_usecs
);
1194 if (rx_gl
!= cmd
->rx_coalesce_usecs
) {
1196 "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
1197 cmd
->rx_coalesce_usecs
, rx_gl
);
1200 tx_gl
= hns3_gl_round_down(cmd
->tx_coalesce_usecs
);
1201 if (tx_gl
!= cmd
->tx_coalesce_usecs
) {
1203 "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
1204 cmd
->tx_coalesce_usecs
, tx_gl
);
1210 static int hns3_check_rl_coalesce_para(struct net_device
*netdev
,
1211 struct ethtool_coalesce
*cmd
)
1215 if (cmd
->tx_coalesce_usecs_high
!= cmd
->rx_coalesce_usecs_high
) {
1217 "tx_usecs_high must be same as rx_usecs_high.\n");
1221 if (cmd
->rx_coalesce_usecs_high
> HNS3_INT_RL_MAX
) {
1223 "Invalid usecs_high value, usecs_high range is 0-%d\n",
1228 rl
= hns3_rl_round_down(cmd
->rx_coalesce_usecs_high
);
1229 if (rl
!= cmd
->rx_coalesce_usecs_high
) {
1231 "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
1232 cmd
->rx_coalesce_usecs_high
, rl
);
1238 static int hns3_check_ql_coalesce_param(struct net_device
*netdev
,
1239 struct ethtool_coalesce
*cmd
)
1241 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1242 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
1244 if ((cmd
->tx_max_coalesced_frames
|| cmd
->rx_max_coalesced_frames
) &&
1245 !ae_dev
->dev_specs
.int_ql_max
) {
1246 netdev_err(netdev
, "coalesced frames is not supported\n");
1250 if (cmd
->tx_max_coalesced_frames
> ae_dev
->dev_specs
.int_ql_max
||
1251 cmd
->rx_max_coalesced_frames
> ae_dev
->dev_specs
.int_ql_max
) {
1253 "invalid coalesced_frames value, range is 0-%u\n",
1254 ae_dev
->dev_specs
.int_ql_max
);
1261 static int hns3_check_coalesce_para(struct net_device
*netdev
,
1262 struct ethtool_coalesce
*cmd
)
1266 ret
= hns3_check_gl_coalesce_para(netdev
, cmd
);
1269 "Check gl coalesce param fail. ret = %d\n", ret
);
1273 ret
= hns3_check_rl_coalesce_para(netdev
, cmd
);
1276 "Check rl coalesce param fail. ret = %d\n", ret
);
1280 ret
= hns3_check_ql_coalesce_param(netdev
, cmd
);
1284 if (cmd
->use_adaptive_tx_coalesce
== 1 ||
1285 cmd
->use_adaptive_rx_coalesce
== 1) {
1287 "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
1288 cmd
->use_adaptive_tx_coalesce
,
1289 cmd
->use_adaptive_rx_coalesce
);
1295 static void hns3_set_coalesce_per_queue(struct net_device
*netdev
,
1296 struct ethtool_coalesce
*cmd
,
1299 struct hns3_enet_tqp_vector
*tx_vector
, *rx_vector
;
1300 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1301 struct hnae3_handle
*h
= priv
->ae_handle
;
1302 int queue_num
= h
->kinfo
.num_tqps
;
1304 tx_vector
= priv
->ring
[queue
].tqp_vector
;
1305 rx_vector
= priv
->ring
[queue_num
+ queue
].tqp_vector
;
1307 tx_vector
->tx_group
.coal
.adapt_enable
=
1308 cmd
->use_adaptive_tx_coalesce
;
1309 rx_vector
->rx_group
.coal
.adapt_enable
=
1310 cmd
->use_adaptive_rx_coalesce
;
1312 tx_vector
->tx_group
.coal
.int_gl
= cmd
->tx_coalesce_usecs
;
1313 rx_vector
->rx_group
.coal
.int_gl
= cmd
->rx_coalesce_usecs
;
1315 tx_vector
->tx_group
.coal
.int_ql
= cmd
->tx_max_coalesced_frames
;
1316 rx_vector
->rx_group
.coal
.int_ql
= cmd
->rx_max_coalesced_frames
;
1318 hns3_set_vector_coalesce_tx_gl(tx_vector
,
1319 tx_vector
->tx_group
.coal
.int_gl
);
1320 hns3_set_vector_coalesce_rx_gl(rx_vector
,
1321 rx_vector
->rx_group
.coal
.int_gl
);
1323 hns3_set_vector_coalesce_rl(tx_vector
, h
->kinfo
.int_rl_setting
);
1324 hns3_set_vector_coalesce_rl(rx_vector
, h
->kinfo
.int_rl_setting
);
1326 if (tx_vector
->tx_group
.coal
.ql_enable
)
1327 hns3_set_vector_coalesce_tx_ql(tx_vector
,
1328 tx_vector
->tx_group
.coal
.int_ql
);
1329 if (rx_vector
->rx_group
.coal
.ql_enable
)
1330 hns3_set_vector_coalesce_rx_ql(rx_vector
,
1331 rx_vector
->rx_group
.coal
.int_ql
);
1334 static int hns3_set_coalesce(struct net_device
*netdev
,
1335 struct ethtool_coalesce
*cmd
)
1337 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1338 u16 queue_num
= h
->kinfo
.num_tqps
;
1342 if (hns3_nic_resetting(netdev
))
1345 ret
= hns3_check_coalesce_para(netdev
, cmd
);
1349 h
->kinfo
.int_rl_setting
=
1350 hns3_rl_round_down(cmd
->rx_coalesce_usecs_high
);
1352 for (i
= 0; i
< queue_num
; i
++)
1353 hns3_set_coalesce_per_queue(netdev
, cmd
, i
);
1358 static int hns3_get_regs_len(struct net_device
*netdev
)
1360 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1362 if (!h
->ae_algo
->ops
->get_regs_len
)
1365 return h
->ae_algo
->ops
->get_regs_len(h
);
1368 static void hns3_get_regs(struct net_device
*netdev
,
1369 struct ethtool_regs
*cmd
, void *data
)
1371 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1373 if (!h
->ae_algo
->ops
->get_regs
)
1376 h
->ae_algo
->ops
->get_regs(h
, &cmd
->version
, data
);
1379 static int hns3_set_phys_id(struct net_device
*netdev
,
1380 enum ethtool_phys_id_state state
)
1382 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1384 if (!h
->ae_algo
->ops
->set_led_id
)
1387 return h
->ae_algo
->ops
->set_led_id(h
, state
);
1390 static u32
hns3_get_msglevel(struct net_device
*netdev
)
1392 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1394 return h
->msg_enable
;
1397 static void hns3_set_msglevel(struct net_device
*netdev
, u32 msg_level
)
1399 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1401 h
->msg_enable
= msg_level
;
1404 /* Translate local fec value into ethtool value. */
1405 static unsigned int loc_to_eth_fec(u8 loc_fec
)
1409 if (loc_fec
& BIT(HNAE3_FEC_AUTO
))
1410 eth_fec
|= ETHTOOL_FEC_AUTO
;
1411 if (loc_fec
& BIT(HNAE3_FEC_RS
))
1412 eth_fec
|= ETHTOOL_FEC_RS
;
1413 if (loc_fec
& BIT(HNAE3_FEC_BASER
))
1414 eth_fec
|= ETHTOOL_FEC_BASER
;
1416 /* if nothing is set, then FEC is off */
1418 eth_fec
= ETHTOOL_FEC_OFF
;
1423 /* Translate ethtool fec value into local value. */
1424 static unsigned int eth_to_loc_fec(unsigned int eth_fec
)
1428 if (eth_fec
& ETHTOOL_FEC_OFF
)
1431 if (eth_fec
& ETHTOOL_FEC_AUTO
)
1432 loc_fec
|= BIT(HNAE3_FEC_AUTO
);
1433 if (eth_fec
& ETHTOOL_FEC_RS
)
1434 loc_fec
|= BIT(HNAE3_FEC_RS
);
1435 if (eth_fec
& ETHTOOL_FEC_BASER
)
1436 loc_fec
|= BIT(HNAE3_FEC_BASER
);
1441 static int hns3_get_fecparam(struct net_device
*netdev
,
1442 struct ethtool_fecparam
*fec
)
1444 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1445 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
1446 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
1450 if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B
, ae_dev
->caps
))
1456 ops
->get_fec(handle
, &fec_ability
, &fec_mode
);
1458 fec
->fec
= loc_to_eth_fec(fec_ability
);
1459 fec
->active_fec
= loc_to_eth_fec(fec_mode
);
1464 static int hns3_set_fecparam(struct net_device
*netdev
,
1465 struct ethtool_fecparam
*fec
)
1467 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1468 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
1469 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
1472 if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B
, ae_dev
->caps
))
1477 fec_mode
= eth_to_loc_fec(fec
->fec
);
1479 netif_dbg(handle
, drv
, netdev
, "set fecparam: mode=%u\n", fec_mode
);
1481 return ops
->set_fec(handle
, fec_mode
);
1484 static int hns3_get_module_info(struct net_device
*netdev
,
1485 struct ethtool_modinfo
*modinfo
)
1487 #define HNS3_SFF_8636_V1_3 0x03
1489 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1490 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
1491 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
1492 struct hns3_sfp_type sfp_type
;
1495 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
||
1496 !ops
->get_module_eeprom
)
1499 memset(&sfp_type
, 0, sizeof(sfp_type
));
1500 ret
= ops
->get_module_eeprom(handle
, 0, sizeof(sfp_type
) / sizeof(u8
),
1505 switch (sfp_type
.type
) {
1506 case SFF8024_ID_SFP
:
1507 modinfo
->type
= ETH_MODULE_SFF_8472
;
1508 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
1510 case SFF8024_ID_QSFP_8438
:
1511 modinfo
->type
= ETH_MODULE_SFF_8436
;
1512 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_MAX_LEN
;
1514 case SFF8024_ID_QSFP_8436_8636
:
1515 if (sfp_type
.ext_type
< HNS3_SFF_8636_V1_3
) {
1516 modinfo
->type
= ETH_MODULE_SFF_8436
;
1517 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_MAX_LEN
;
1519 modinfo
->type
= ETH_MODULE_SFF_8636
;
1520 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_MAX_LEN
;
1523 case SFF8024_ID_QSFP28_8636
:
1524 modinfo
->type
= ETH_MODULE_SFF_8636
;
1525 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_MAX_LEN
;
1528 netdev_err(netdev
, "Optical module unknown: %#x\n",
1536 static int hns3_get_module_eeprom(struct net_device
*netdev
,
1537 struct ethtool_eeprom
*ee
, u8
*data
)
1539 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1540 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(handle
->pdev
);
1541 const struct hnae3_ae_ops
*ops
= handle
->ae_algo
->ops
;
1543 if (ae_dev
->dev_version
< HNAE3_DEVICE_VERSION_V2
||
1544 !ops
->get_module_eeprom
)
1550 memset(data
, 0, ee
->len
);
1552 return ops
->get_module_eeprom(handle
, ee
->offset
, ee
->len
, data
);
1555 static u32
hns3_get_priv_flags(struct net_device
*netdev
)
1557 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1559 return handle
->priv_flags
;
1562 static int hns3_check_priv_flags(struct hnae3_handle
*h
, u32 changed
)
1566 for (i
= 0; i
< HNAE3_PFLAG_MAX
; i
++)
1567 if ((changed
& BIT(i
)) && !test_bit(i
, &h
->supported_pflags
)) {
1568 netdev_err(h
->netdev
, "%s is unsupported\n",
1569 hns3_priv_flags
[i
].name
);
1576 static int hns3_set_priv_flags(struct net_device
*netdev
, u32 pflags
)
1578 struct hnae3_handle
*handle
= hns3_get_handle(netdev
);
1579 u32 changed
= pflags
^ handle
->priv_flags
;
1583 ret
= hns3_check_priv_flags(handle
, changed
);
1587 for (i
= 0; i
< HNAE3_PFLAG_MAX
; i
++) {
1588 if (changed
& BIT(i
)) {
1589 bool enable
= !(handle
->priv_flags
& BIT(i
));
1592 handle
->priv_flags
|= BIT(i
);
1594 handle
->priv_flags
&= ~BIT(i
);
1595 hns3_priv_flags
[i
].handler(netdev
, enable
);
1602 #define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
1603 ETHTOOL_COALESCE_USE_ADAPTIVE | \
1604 ETHTOOL_COALESCE_RX_USECS_HIGH | \
1605 ETHTOOL_COALESCE_TX_USECS_HIGH | \
1606 ETHTOOL_COALESCE_MAX_FRAMES)
1608 static const struct ethtool_ops hns3vf_ethtool_ops
= {
1609 .supported_coalesce_params
= HNS3_ETHTOOL_COALESCE
,
1610 .get_drvinfo
= hns3_get_drvinfo
,
1611 .get_ringparam
= hns3_get_ringparam
,
1612 .set_ringparam
= hns3_set_ringparam
,
1613 .get_strings
= hns3_get_strings
,
1614 .get_ethtool_stats
= hns3_get_stats
,
1615 .get_sset_count
= hns3_get_sset_count
,
1616 .get_rxnfc
= hns3_get_rxnfc
,
1617 .set_rxnfc
= hns3_set_rxnfc
,
1618 .get_rxfh_key_size
= hns3_get_rss_key_size
,
1619 .get_rxfh_indir_size
= hns3_get_rss_indir_size
,
1620 .get_rxfh
= hns3_get_rss
,
1621 .set_rxfh
= hns3_set_rss
,
1622 .get_link_ksettings
= hns3_get_link_ksettings
,
1623 .get_channels
= hns3_get_channels
,
1624 .set_channels
= hns3_set_channels
,
1625 .get_coalesce
= hns3_get_coalesce
,
1626 .set_coalesce
= hns3_set_coalesce
,
1627 .get_regs_len
= hns3_get_regs_len
,
1628 .get_regs
= hns3_get_regs
,
1629 .get_link
= hns3_get_link
,
1630 .get_msglevel
= hns3_get_msglevel
,
1631 .set_msglevel
= hns3_set_msglevel
,
1632 .get_priv_flags
= hns3_get_priv_flags
,
1633 .set_priv_flags
= hns3_set_priv_flags
,
1636 static const struct ethtool_ops hns3_ethtool_ops
= {
1637 .supported_coalesce_params
= HNS3_ETHTOOL_COALESCE
,
1638 .self_test
= hns3_self_test
,
1639 .get_drvinfo
= hns3_get_drvinfo
,
1640 .get_link
= hns3_get_link
,
1641 .get_ringparam
= hns3_get_ringparam
,
1642 .set_ringparam
= hns3_set_ringparam
,
1643 .get_pauseparam
= hns3_get_pauseparam
,
1644 .set_pauseparam
= hns3_set_pauseparam
,
1645 .get_strings
= hns3_get_strings
,
1646 .get_ethtool_stats
= hns3_get_stats
,
1647 .get_sset_count
= hns3_get_sset_count
,
1648 .get_rxnfc
= hns3_get_rxnfc
,
1649 .set_rxnfc
= hns3_set_rxnfc
,
1650 .get_rxfh_key_size
= hns3_get_rss_key_size
,
1651 .get_rxfh_indir_size
= hns3_get_rss_indir_size
,
1652 .get_rxfh
= hns3_get_rss
,
1653 .set_rxfh
= hns3_set_rss
,
1654 .get_link_ksettings
= hns3_get_link_ksettings
,
1655 .set_link_ksettings
= hns3_set_link_ksettings
,
1656 .nway_reset
= hns3_nway_reset
,
1657 .get_channels
= hns3_get_channels
,
1658 .set_channels
= hns3_set_channels
,
1659 .get_coalesce
= hns3_get_coalesce
,
1660 .set_coalesce
= hns3_set_coalesce
,
1661 .get_regs_len
= hns3_get_regs_len
,
1662 .get_regs
= hns3_get_regs
,
1663 .set_phys_id
= hns3_set_phys_id
,
1664 .get_msglevel
= hns3_get_msglevel
,
1665 .set_msglevel
= hns3_set_msglevel
,
1666 .get_fecparam
= hns3_get_fecparam
,
1667 .set_fecparam
= hns3_set_fecparam
,
1668 .get_module_info
= hns3_get_module_info
,
1669 .get_module_eeprom
= hns3_get_module_eeprom
,
1670 .get_priv_flags
= hns3_get_priv_flags
,
1671 .set_priv_flags
= hns3_set_priv_flags
,
1674 void hns3_ethtool_set_ops(struct net_device
*netdev
)
1676 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1678 if (h
->flags
& HNAE3_SUPPORT_VF
)
1679 netdev
->ethtool_ops
= &hns3vf_ethtool_ops
;
1681 netdev
->ethtool_ops
= &hns3_ethtool_ops
;