1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/crash_dump.h>
8 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
19 #include <asm/byteorder.h>
20 #include <asm/param.h>
22 #include <linux/netdev_features.h>
23 #include <linux/udp.h>
24 #include <linux/tcp.h>
25 #include <net/udp_tunnel.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/pkt_sched.h>
32 #include <linux/ethtool.h>
34 #include <linux/random.h>
35 #include <net/ip6_checksum.h>
36 #include <linux/bitops.h>
37 #include <linux/vmalloc.h>
41 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
42 MODULE_LICENSE("GPL");
45 module_param(debug
, uint
, 0);
46 MODULE_PARM_DESC(debug
, " Default debug msglevel");
48 static const struct qed_eth_ops
*qed_ops
;
50 #define CHIP_NUM_57980S_40 0x1634
51 #define CHIP_NUM_57980S_10 0x1666
52 #define CHIP_NUM_57980S_MF 0x1636
53 #define CHIP_NUM_57980S_100 0x1644
54 #define CHIP_NUM_57980S_50 0x1654
55 #define CHIP_NUM_57980S_25 0x1656
56 #define CHIP_NUM_57980S_IOV 0x1664
57 #define CHIP_NUM_AH 0x8070
58 #define CHIP_NUM_AH_IOV 0x8090
60 #ifndef PCI_DEVICE_ID_NX2_57980E
61 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
62 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
63 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
64 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
65 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
66 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
67 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
68 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
69 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
73 enum qede_pci_private
{
78 static const struct pci_device_id qede_pci_tbl
[] = {
79 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_40
), QEDE_PRIVATE_PF
},
80 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_10
), QEDE_PRIVATE_PF
},
81 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_MF
), QEDE_PRIVATE_PF
},
82 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_100
), QEDE_PRIVATE_PF
},
83 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_50
), QEDE_PRIVATE_PF
},
84 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_25
), QEDE_PRIVATE_PF
},
85 #ifdef CONFIG_QED_SRIOV
86 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_IOV
), QEDE_PRIVATE_VF
},
88 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_AH
), QEDE_PRIVATE_PF
},
89 #ifdef CONFIG_QED_SRIOV
90 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_AH_IOV
), QEDE_PRIVATE_VF
},
95 MODULE_DEVICE_TABLE(pci
, qede_pci_tbl
);
97 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
98 static pci_ers_result_t
99 qede_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
);
101 #define TX_TIMEOUT (5 * HZ)
103 /* Utilize last protocol index for XDP */
106 static void qede_remove(struct pci_dev
*pdev
);
107 static void qede_shutdown(struct pci_dev
*pdev
);
108 static void qede_link_update(void *dev
, struct qed_link_output
*link
);
109 static void qede_schedule_recovery_handler(void *dev
);
110 static void qede_recovery_handler(struct qede_dev
*edev
);
111 static void qede_schedule_hw_err_handler(void *dev
,
112 enum qed_hw_err_type err_type
);
113 static void qede_get_eth_tlv_data(void *edev
, void *data
);
114 static void qede_get_generic_tlv_data(void *edev
,
115 struct qed_generic_tlvs
*data
);
116 static void qede_generic_hw_err_handler(struct qede_dev
*edev
);
117 #ifdef CONFIG_QED_SRIOV
118 static int qede_set_vf_vlan(struct net_device
*ndev
, int vf
, u16 vlan
, u8 qos
,
121 struct qede_dev
*edev
= netdev_priv(ndev
);
124 DP_NOTICE(edev
, "Illegal vlan value %d\n", vlan
);
128 if (vlan_proto
!= htons(ETH_P_8021Q
))
129 return -EPROTONOSUPPORT
;
131 DP_VERBOSE(edev
, QED_MSG_IOV
, "Setting Vlan 0x%04x to VF [%d]\n",
134 return edev
->ops
->iov
->set_vlan(edev
->cdev
, vlan
, vf
);
137 static int qede_set_vf_mac(struct net_device
*ndev
, int vfidx
, u8
*mac
)
139 struct qede_dev
*edev
= netdev_priv(ndev
);
141 DP_VERBOSE(edev
, QED_MSG_IOV
, "Setting MAC %pM to VF [%d]\n", mac
, vfidx
);
143 if (!is_valid_ether_addr(mac
)) {
144 DP_VERBOSE(edev
, QED_MSG_IOV
, "MAC address isn't valid\n");
148 return edev
->ops
->iov
->set_mac(edev
->cdev
, mac
, vfidx
);
151 static int qede_sriov_configure(struct pci_dev
*pdev
, int num_vfs_param
)
153 struct qede_dev
*edev
= netdev_priv(pci_get_drvdata(pdev
));
154 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
155 struct qed_update_vport_params
*vport_params
;
158 vport_params
= vzalloc(sizeof(*vport_params
));
161 DP_VERBOSE(edev
, QED_MSG_IOV
, "Requested %d VFs\n", num_vfs_param
);
163 rc
= edev
->ops
->iov
->configure(edev
->cdev
, num_vfs_param
);
165 /* Enable/Disable Tx switching for PF */
166 if ((rc
== num_vfs_param
) && netif_running(edev
->ndev
) &&
167 !qed_info
->b_inter_pf_switch
&& qed_info
->tx_switching
) {
168 vport_params
->vport_id
= 0;
169 vport_params
->update_tx_switching_flg
= 1;
170 vport_params
->tx_switching_flg
= num_vfs_param
? 1 : 0;
171 edev
->ops
->vport_update(edev
->cdev
, vport_params
);
179 static int __maybe_unused
qede_suspend(struct device
*dev
)
181 dev_info(dev
, "Device does not support suspend operation\n");
186 static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops
, qede_suspend
, NULL
);
188 static const struct pci_error_handlers qede_err_handler
= {
189 .error_detected
= qede_io_error_detected
,
192 static struct pci_driver qede_pci_driver
= {
194 .id_table
= qede_pci_tbl
,
196 .remove
= qede_remove
,
197 .shutdown
= qede_shutdown
,
198 #ifdef CONFIG_QED_SRIOV
199 .sriov_configure
= qede_sriov_configure
,
201 .err_handler
= &qede_err_handler
,
202 .driver
.pm
= &qede_pm_ops
,
205 static struct qed_eth_cb_ops qede_ll_ops
= {
207 #ifdef CONFIG_RFS_ACCEL
208 .arfs_filter_op
= qede_arfs_filter_op
,
210 .link_update
= qede_link_update
,
211 .schedule_recovery_handler
= qede_schedule_recovery_handler
,
212 .schedule_hw_err_handler
= qede_schedule_hw_err_handler
,
213 .get_generic_tlv_data
= qede_get_generic_tlv_data
,
214 .get_protocol_tlv_data
= qede_get_eth_tlv_data
,
216 .force_mac
= qede_force_mac
,
217 .ports_update
= qede_udp_ports_update
,
220 static int qede_netdev_event(struct notifier_block
*this, unsigned long event
,
223 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
224 struct ethtool_drvinfo drvinfo
;
225 struct qede_dev
*edev
;
227 if (event
!= NETDEV_CHANGENAME
&& event
!= NETDEV_CHANGEADDR
)
230 /* Check whether this is a qede device */
231 if (!ndev
|| !ndev
->ethtool_ops
|| !ndev
->ethtool_ops
->get_drvinfo
)
234 memset(&drvinfo
, 0, sizeof(drvinfo
));
235 ndev
->ethtool_ops
->get_drvinfo(ndev
, &drvinfo
);
236 if (strcmp(drvinfo
.driver
, "qede"))
238 edev
= netdev_priv(ndev
);
241 case NETDEV_CHANGENAME
:
242 /* Notify qed of the name change */
243 if (!edev
->ops
|| !edev
->ops
->common
)
245 edev
->ops
->common
->set_name(edev
->cdev
, edev
->ndev
->name
);
247 case NETDEV_CHANGEADDR
:
248 edev
= netdev_priv(ndev
);
249 qede_rdma_event_changeaddr(edev
);
257 static struct notifier_block qede_netdev_notifier
= {
258 .notifier_call
= qede_netdev_event
,
262 int __init
qede_init(void)
266 pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
268 qede_forced_speed_maps_init();
270 qed_ops
= qed_get_eth_ops();
272 pr_notice("Failed to get qed ethtool operations\n");
276 /* Must register notifier before pci ops, since we might miss
277 * interface rename after pci probe and netdev registration.
279 ret
= register_netdevice_notifier(&qede_netdev_notifier
);
281 pr_notice("Failed to register netdevice_notifier\n");
286 ret
= pci_register_driver(&qede_pci_driver
);
288 pr_notice("Failed to register driver\n");
289 unregister_netdevice_notifier(&qede_netdev_notifier
);
297 static void __exit
qede_cleanup(void)
299 if (debug
& QED_LOG_INFO_MASK
)
300 pr_info("qede_cleanup called\n");
302 unregister_netdevice_notifier(&qede_netdev_notifier
);
303 pci_unregister_driver(&qede_pci_driver
);
307 module_init(qede_init
);
308 module_exit(qede_cleanup
);
310 static int qede_open(struct net_device
*ndev
);
311 static int qede_close(struct net_device
*ndev
);
313 void qede_fill_by_demand_stats(struct qede_dev
*edev
)
315 struct qede_stats_common
*p_common
= &edev
->stats
.common
;
316 struct qed_eth_stats stats
;
318 edev
->ops
->get_vport_stats(edev
->cdev
, &stats
);
320 spin_lock(&edev
->stats_lock
);
322 p_common
->no_buff_discards
= stats
.common
.no_buff_discards
;
323 p_common
->packet_too_big_discard
= stats
.common
.packet_too_big_discard
;
324 p_common
->ttl0_discard
= stats
.common
.ttl0_discard
;
325 p_common
->rx_ucast_bytes
= stats
.common
.rx_ucast_bytes
;
326 p_common
->rx_mcast_bytes
= stats
.common
.rx_mcast_bytes
;
327 p_common
->rx_bcast_bytes
= stats
.common
.rx_bcast_bytes
;
328 p_common
->rx_ucast_pkts
= stats
.common
.rx_ucast_pkts
;
329 p_common
->rx_mcast_pkts
= stats
.common
.rx_mcast_pkts
;
330 p_common
->rx_bcast_pkts
= stats
.common
.rx_bcast_pkts
;
331 p_common
->mftag_filter_discards
= stats
.common
.mftag_filter_discards
;
332 p_common
->mac_filter_discards
= stats
.common
.mac_filter_discards
;
333 p_common
->gft_filter_drop
= stats
.common
.gft_filter_drop
;
335 p_common
->tx_ucast_bytes
= stats
.common
.tx_ucast_bytes
;
336 p_common
->tx_mcast_bytes
= stats
.common
.tx_mcast_bytes
;
337 p_common
->tx_bcast_bytes
= stats
.common
.tx_bcast_bytes
;
338 p_common
->tx_ucast_pkts
= stats
.common
.tx_ucast_pkts
;
339 p_common
->tx_mcast_pkts
= stats
.common
.tx_mcast_pkts
;
340 p_common
->tx_bcast_pkts
= stats
.common
.tx_bcast_pkts
;
341 p_common
->tx_err_drop_pkts
= stats
.common
.tx_err_drop_pkts
;
342 p_common
->coalesced_pkts
= stats
.common
.tpa_coalesced_pkts
;
343 p_common
->coalesced_events
= stats
.common
.tpa_coalesced_events
;
344 p_common
->coalesced_aborts_num
= stats
.common
.tpa_aborts_num
;
345 p_common
->non_coalesced_pkts
= stats
.common
.tpa_not_coalesced_pkts
;
346 p_common
->coalesced_bytes
= stats
.common
.tpa_coalesced_bytes
;
348 p_common
->rx_64_byte_packets
= stats
.common
.rx_64_byte_packets
;
349 p_common
->rx_65_to_127_byte_packets
=
350 stats
.common
.rx_65_to_127_byte_packets
;
351 p_common
->rx_128_to_255_byte_packets
=
352 stats
.common
.rx_128_to_255_byte_packets
;
353 p_common
->rx_256_to_511_byte_packets
=
354 stats
.common
.rx_256_to_511_byte_packets
;
355 p_common
->rx_512_to_1023_byte_packets
=
356 stats
.common
.rx_512_to_1023_byte_packets
;
357 p_common
->rx_1024_to_1518_byte_packets
=
358 stats
.common
.rx_1024_to_1518_byte_packets
;
359 p_common
->rx_crc_errors
= stats
.common
.rx_crc_errors
;
360 p_common
->rx_mac_crtl_frames
= stats
.common
.rx_mac_crtl_frames
;
361 p_common
->rx_pause_frames
= stats
.common
.rx_pause_frames
;
362 p_common
->rx_pfc_frames
= stats
.common
.rx_pfc_frames
;
363 p_common
->rx_align_errors
= stats
.common
.rx_align_errors
;
364 p_common
->rx_carrier_errors
= stats
.common
.rx_carrier_errors
;
365 p_common
->rx_oversize_packets
= stats
.common
.rx_oversize_packets
;
366 p_common
->rx_jabbers
= stats
.common
.rx_jabbers
;
367 p_common
->rx_undersize_packets
= stats
.common
.rx_undersize_packets
;
368 p_common
->rx_fragments
= stats
.common
.rx_fragments
;
369 p_common
->tx_64_byte_packets
= stats
.common
.tx_64_byte_packets
;
370 p_common
->tx_65_to_127_byte_packets
=
371 stats
.common
.tx_65_to_127_byte_packets
;
372 p_common
->tx_128_to_255_byte_packets
=
373 stats
.common
.tx_128_to_255_byte_packets
;
374 p_common
->tx_256_to_511_byte_packets
=
375 stats
.common
.tx_256_to_511_byte_packets
;
376 p_common
->tx_512_to_1023_byte_packets
=
377 stats
.common
.tx_512_to_1023_byte_packets
;
378 p_common
->tx_1024_to_1518_byte_packets
=
379 stats
.common
.tx_1024_to_1518_byte_packets
;
380 p_common
->tx_pause_frames
= stats
.common
.tx_pause_frames
;
381 p_common
->tx_pfc_frames
= stats
.common
.tx_pfc_frames
;
382 p_common
->brb_truncates
= stats
.common
.brb_truncates
;
383 p_common
->brb_discards
= stats
.common
.brb_discards
;
384 p_common
->tx_mac_ctrl_frames
= stats
.common
.tx_mac_ctrl_frames
;
385 p_common
->link_change_count
= stats
.common
.link_change_count
;
386 p_common
->ptp_skip_txts
= edev
->ptp_skip_txts
;
388 if (QEDE_IS_BB(edev
)) {
389 struct qede_stats_bb
*p_bb
= &edev
->stats
.bb
;
391 p_bb
->rx_1519_to_1522_byte_packets
=
392 stats
.bb
.rx_1519_to_1522_byte_packets
;
393 p_bb
->rx_1519_to_2047_byte_packets
=
394 stats
.bb
.rx_1519_to_2047_byte_packets
;
395 p_bb
->rx_2048_to_4095_byte_packets
=
396 stats
.bb
.rx_2048_to_4095_byte_packets
;
397 p_bb
->rx_4096_to_9216_byte_packets
=
398 stats
.bb
.rx_4096_to_9216_byte_packets
;
399 p_bb
->rx_9217_to_16383_byte_packets
=
400 stats
.bb
.rx_9217_to_16383_byte_packets
;
401 p_bb
->tx_1519_to_2047_byte_packets
=
402 stats
.bb
.tx_1519_to_2047_byte_packets
;
403 p_bb
->tx_2048_to_4095_byte_packets
=
404 stats
.bb
.tx_2048_to_4095_byte_packets
;
405 p_bb
->tx_4096_to_9216_byte_packets
=
406 stats
.bb
.tx_4096_to_9216_byte_packets
;
407 p_bb
->tx_9217_to_16383_byte_packets
=
408 stats
.bb
.tx_9217_to_16383_byte_packets
;
409 p_bb
->tx_lpi_entry_count
= stats
.bb
.tx_lpi_entry_count
;
410 p_bb
->tx_total_collisions
= stats
.bb
.tx_total_collisions
;
412 struct qede_stats_ah
*p_ah
= &edev
->stats
.ah
;
414 p_ah
->rx_1519_to_max_byte_packets
=
415 stats
.ah
.rx_1519_to_max_byte_packets
;
416 p_ah
->tx_1519_to_max_byte_packets
=
417 stats
.ah
.tx_1519_to_max_byte_packets
;
420 spin_unlock(&edev
->stats_lock
);
423 static void qede_get_stats64(struct net_device
*dev
,
424 struct rtnl_link_stats64
*stats
)
426 struct qede_dev
*edev
= netdev_priv(dev
);
427 struct qede_stats_common
*p_common
;
429 p_common
= &edev
->stats
.common
;
431 spin_lock(&edev
->stats_lock
);
433 stats
->rx_packets
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
434 p_common
->rx_bcast_pkts
;
435 stats
->tx_packets
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
436 p_common
->tx_bcast_pkts
;
438 stats
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
439 p_common
->rx_bcast_bytes
;
440 stats
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
441 p_common
->tx_bcast_bytes
;
443 stats
->tx_errors
= p_common
->tx_err_drop_pkts
;
444 stats
->multicast
= p_common
->rx_mcast_pkts
+ p_common
->rx_bcast_pkts
;
446 stats
->rx_fifo_errors
= p_common
->no_buff_discards
;
448 if (QEDE_IS_BB(edev
))
449 stats
->collisions
= edev
->stats
.bb
.tx_total_collisions
;
450 stats
->rx_crc_errors
= p_common
->rx_crc_errors
;
451 stats
->rx_frame_errors
= p_common
->rx_align_errors
;
453 spin_unlock(&edev
->stats_lock
);
456 #ifdef CONFIG_QED_SRIOV
457 static int qede_get_vf_config(struct net_device
*dev
, int vfidx
,
458 struct ifla_vf_info
*ivi
)
460 struct qede_dev
*edev
= netdev_priv(dev
);
465 return edev
->ops
->iov
->get_config(edev
->cdev
, vfidx
, ivi
);
468 static int qede_set_vf_rate(struct net_device
*dev
, int vfidx
,
469 int min_tx_rate
, int max_tx_rate
)
471 struct qede_dev
*edev
= netdev_priv(dev
);
473 return edev
->ops
->iov
->set_rate(edev
->cdev
, vfidx
, min_tx_rate
,
477 static int qede_set_vf_spoofchk(struct net_device
*dev
, int vfidx
, bool val
)
479 struct qede_dev
*edev
= netdev_priv(dev
);
484 return edev
->ops
->iov
->set_spoof(edev
->cdev
, vfidx
, val
);
487 static int qede_set_vf_link_state(struct net_device
*dev
, int vfidx
,
490 struct qede_dev
*edev
= netdev_priv(dev
);
495 return edev
->ops
->iov
->set_link_state(edev
->cdev
, vfidx
, link_state
);
498 static int qede_set_vf_trust(struct net_device
*dev
, int vfidx
, bool setting
)
500 struct qede_dev
*edev
= netdev_priv(dev
);
505 return edev
->ops
->iov
->set_trust(edev
->cdev
, vfidx
, setting
);
509 static int qede_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
511 struct qede_dev
*edev
= netdev_priv(dev
);
513 if (!netif_running(dev
))
518 return qede_ptp_hw_ts(edev
, ifr
);
520 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
521 "default IOCTL cmd 0x%x\n", cmd
);
528 static void qede_fp_sb_dump(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
530 char *p_sb
= (char *)fp
->sb_info
->sb_virt
;
533 sb_size
= sizeof(struct status_block
);
535 for (i
= 0; i
< sb_size
; i
+= 8)
537 "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
538 p_sb
[i
], p_sb
[i
+ 1], p_sb
[i
+ 2], p_sb
[i
+ 3],
539 p_sb
[i
+ 4], p_sb
[i
+ 5], p_sb
[i
+ 6], p_sb
[i
+ 7]);
543 qede_txq_fp_log_metadata(struct qede_dev
*edev
,
544 struct qede_fastpath
*fp
, struct qede_tx_queue
*txq
)
546 struct qed_chain
*p_chain
= &txq
->tx_pbl
;
548 /* Dump txq/fp/sb ids etc. other metadata */
550 "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
551 fp
->id
, fp
->sb_info
->igu_sb_id
, txq
->index
, txq
->ndev_txq_id
, txq
->cos
,
552 p_chain
, p_chain
->capacity
, p_chain
->size
, jiffies
, HZ
);
554 /* Dump all the relevant prod/cons indexes */
556 "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
557 le16_to_cpu(*txq
->hw_cons_ptr
), txq
->sw_tx_prod
, txq
->sw_tx_cons
,
558 qed_chain_get_prod_idx(p_chain
), qed_chain_get_cons_idx(p_chain
));
562 qede_tx_log_print(struct qede_dev
*edev
, struct qede_fastpath
*fp
, struct qede_tx_queue
*txq
)
564 struct qed_sb_info_dbg sb_dbg
;
568 qede_fp_sb_dump(edev
, fp
);
570 memset(&sb_dbg
, 0, sizeof(sb_dbg
));
571 rc
= edev
->ops
->common
->get_sb_info(edev
->cdev
, fp
->sb_info
, (u16
)fp
->id
, &sb_dbg
);
573 DP_NOTICE(edev
, "IGU: prod %08x cons %08x CAU Tx %04x\n",
574 sb_dbg
.igu_prod
, sb_dbg
.igu_cons
, sb_dbg
.pi
[TX_PI(txq
->cos
)]);
577 edev
->ops
->common
->mfw_report(edev
->cdev
,
578 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
579 txq
->index
, le16_to_cpu(*txq
->hw_cons_ptr
),
580 qed_chain_get_cons_idx(&txq
->tx_pbl
),
581 qed_chain_get_prod_idx(&txq
->tx_pbl
), jiffies
);
583 edev
->ops
->common
->mfw_report(edev
->cdev
,
584 "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
585 txq
->index
, fp
->sb_info
->igu_sb_id
,
586 sb_dbg
.igu_prod
, sb_dbg
.igu_cons
,
587 sb_dbg
.pi
[TX_PI(txq
->cos
)]);
590 static void qede_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
592 struct qede_dev
*edev
= netdev_priv(dev
);
595 netif_carrier_off(dev
);
596 DP_NOTICE(edev
, "TX timeout on queue %u!\n", txqueue
);
599 struct qede_tx_queue
*txq
;
600 struct qede_fastpath
*fp
;
603 fp
= &edev
->fp_array
[i
];
604 if (!(fp
->type
& QEDE_FASTPATH_TX
))
607 for_each_cos_in_txq(edev
, cos
) {
610 /* Dump basic metadata for all queues */
611 qede_txq_fp_log_metadata(edev
, fp
, txq
);
613 if (qed_chain_get_cons_idx(&txq
->tx_pbl
) !=
614 qed_chain_get_prod_idx(&txq
->tx_pbl
))
615 qede_tx_log_print(edev
, fp
, txq
);
622 if (test_and_set_bit(QEDE_ERR_IS_HANDLED
, &edev
->err_flags
) ||
623 edev
->state
== QEDE_STATE_RECOVERY
) {
625 "Avoid handling a Tx timeout while another HW error is being handled\n");
629 set_bit(QEDE_ERR_GET_DBG_INFO
, &edev
->err_flags
);
630 set_bit(QEDE_SP_HW_ERR
, &edev
->sp_flags
);
631 schedule_delayed_work(&edev
->sp_task
, 0);
634 static int qede_setup_tc(struct net_device
*ndev
, u8 num_tc
)
636 struct qede_dev
*edev
= netdev_priv(ndev
);
637 int cos
, count
, offset
;
639 if (num_tc
> edev
->dev_info
.num_tc
)
642 netdev_reset_tc(ndev
);
643 netdev_set_num_tc(ndev
, num_tc
);
645 for_each_cos_in_txq(edev
, cos
) {
646 count
= QEDE_TSS_COUNT(edev
);
647 offset
= cos
* QEDE_TSS_COUNT(edev
);
648 netdev_set_tc_queue(ndev
, cos
, count
, offset
);
655 qede_set_flower(struct qede_dev
*edev
, struct flow_cls_offload
*f
,
658 switch (f
->command
) {
659 case FLOW_CLS_REPLACE
:
660 return qede_add_tc_flower_fltr(edev
, proto
, f
);
661 case FLOW_CLS_DESTROY
:
662 return qede_delete_flow_filter(edev
, f
->cookie
);
668 static int qede_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
671 struct flow_cls_offload
*f
;
672 struct qede_dev
*edev
= cb_priv
;
674 if (!tc_cls_can_offload_and_chain0(edev
->ndev
, type_data
))
678 case TC_SETUP_CLSFLOWER
:
680 return qede_set_flower(edev
, f
, f
->common
.protocol
);
686 static LIST_HEAD(qede_block_cb_list
);
689 qede_setup_tc_offload(struct net_device
*dev
, enum tc_setup_type type
,
692 struct qede_dev
*edev
= netdev_priv(dev
);
693 struct tc_mqprio_qopt
*mqprio
;
697 return flow_block_cb_setup_simple(type_data
,
699 qede_setup_tc_block_cb
,
701 case TC_SETUP_QDISC_MQPRIO
:
704 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
705 return qede_setup_tc(dev
, mqprio
->num_tc
);
711 static const struct net_device_ops qede_netdev_ops
= {
712 .ndo_open
= qede_open
,
713 .ndo_stop
= qede_close
,
714 .ndo_start_xmit
= qede_start_xmit
,
715 .ndo_select_queue
= qede_select_queue
,
716 .ndo_set_rx_mode
= qede_set_rx_mode
,
717 .ndo_set_mac_address
= qede_set_mac_addr
,
718 .ndo_validate_addr
= eth_validate_addr
,
719 .ndo_change_mtu
= qede_change_mtu
,
720 .ndo_eth_ioctl
= qede_ioctl
,
721 .ndo_tx_timeout
= qede_tx_timeout
,
722 #ifdef CONFIG_QED_SRIOV
723 .ndo_set_vf_mac
= qede_set_vf_mac
,
724 .ndo_set_vf_vlan
= qede_set_vf_vlan
,
725 .ndo_set_vf_trust
= qede_set_vf_trust
,
727 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
728 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
729 .ndo_fix_features
= qede_fix_features
,
730 .ndo_set_features
= qede_set_features
,
731 .ndo_get_stats64
= qede_get_stats64
,
732 #ifdef CONFIG_QED_SRIOV
733 .ndo_set_vf_link_state
= qede_set_vf_link_state
,
734 .ndo_set_vf_spoofchk
= qede_set_vf_spoofchk
,
735 .ndo_get_vf_config
= qede_get_vf_config
,
736 .ndo_set_vf_rate
= qede_set_vf_rate
,
738 .ndo_features_check
= qede_features_check
,
740 #ifdef CONFIG_RFS_ACCEL
741 .ndo_rx_flow_steer
= qede_rx_flow_steer
,
743 .ndo_xdp_xmit
= qede_xdp_transmit
,
744 .ndo_setup_tc
= qede_setup_tc_offload
,
747 static const struct net_device_ops qede_netdev_vf_ops
= {
748 .ndo_open
= qede_open
,
749 .ndo_stop
= qede_close
,
750 .ndo_start_xmit
= qede_start_xmit
,
751 .ndo_select_queue
= qede_select_queue
,
752 .ndo_set_rx_mode
= qede_set_rx_mode
,
753 .ndo_set_mac_address
= qede_set_mac_addr
,
754 .ndo_validate_addr
= eth_validate_addr
,
755 .ndo_change_mtu
= qede_change_mtu
,
756 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
757 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
758 .ndo_fix_features
= qede_fix_features
,
759 .ndo_set_features
= qede_set_features
,
760 .ndo_get_stats64
= qede_get_stats64
,
761 .ndo_features_check
= qede_features_check
,
764 static const struct net_device_ops qede_netdev_vf_xdp_ops
= {
765 .ndo_open
= qede_open
,
766 .ndo_stop
= qede_close
,
767 .ndo_start_xmit
= qede_start_xmit
,
768 .ndo_select_queue
= qede_select_queue
,
769 .ndo_set_rx_mode
= qede_set_rx_mode
,
770 .ndo_set_mac_address
= qede_set_mac_addr
,
771 .ndo_validate_addr
= eth_validate_addr
,
772 .ndo_change_mtu
= qede_change_mtu
,
773 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
774 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
775 .ndo_fix_features
= qede_fix_features
,
776 .ndo_set_features
= qede_set_features
,
777 .ndo_get_stats64
= qede_get_stats64
,
778 .ndo_features_check
= qede_features_check
,
780 .ndo_xdp_xmit
= qede_xdp_transmit
,
783 /* -------------------------------------------------------------------------
784 * START OF PROBE / REMOVE
785 * -------------------------------------------------------------------------
788 static struct qede_dev
*qede_alloc_etherdev(struct qed_dev
*cdev
,
789 struct pci_dev
*pdev
,
790 struct qed_dev_eth_info
*info
,
791 u32 dp_module
, u8 dp_level
)
793 struct net_device
*ndev
;
794 struct qede_dev
*edev
;
796 ndev
= alloc_etherdev_mqs(sizeof(*edev
),
797 info
->num_queues
* info
->num_tc
,
800 pr_err("etherdev allocation failed\n");
804 edev
= netdev_priv(ndev
);
808 edev
->dp_module
= dp_module
;
809 edev
->dp_level
= dp_level
;
812 if (is_kdump_kernel()) {
813 edev
->q_num_rx_buffers
= NUM_RX_BDS_KDUMP_MIN
;
814 edev
->q_num_tx_buffers
= NUM_TX_BDS_KDUMP_MIN
;
816 edev
->q_num_rx_buffers
= NUM_RX_BDS_DEF
;
817 edev
->q_num_tx_buffers
= NUM_TX_BDS_DEF
;
820 DP_INFO(edev
, "Allocated netdev with %d tx queues and %d rx queues\n",
821 info
->num_queues
, info
->num_queues
);
823 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
825 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
826 memcpy(&edev
->dev_info
, info
, sizeof(*info
));
828 /* As ethtool doesn't have the ability to show WoL behavior as
829 * 'default', if device supports it declare it's enabled.
831 if (edev
->dev_info
.common
.wol_support
)
832 edev
->wol_enabled
= true;
834 INIT_LIST_HEAD(&edev
->vlan_list
);
839 static void qede_init_ndev(struct qede_dev
*edev
)
841 struct net_device
*ndev
= edev
->ndev
;
842 struct pci_dev
*pdev
= edev
->pdev
;
843 bool udp_tunnel_enable
= false;
844 netdev_features_t hw_features
;
846 pci_set_drvdata(pdev
, ndev
);
848 ndev
->mem_start
= edev
->dev_info
.common
.pci_mem_start
;
849 ndev
->base_addr
= ndev
->mem_start
;
850 ndev
->mem_end
= edev
->dev_info
.common
.pci_mem_end
;
851 ndev
->irq
= edev
->dev_info
.common
.pci_irq
;
853 ndev
->watchdog_timeo
= TX_TIMEOUT
;
856 if (edev
->dev_info
.xdp_supported
)
857 ndev
->netdev_ops
= &qede_netdev_vf_xdp_ops
;
859 ndev
->netdev_ops
= &qede_netdev_vf_ops
;
861 ndev
->netdev_ops
= &qede_netdev_ops
;
864 qede_set_ethtool_ops(ndev
);
866 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
868 /* user-changeble features */
869 hw_features
= NETIF_F_GRO
| NETIF_F_GRO_HW
| NETIF_F_SG
|
870 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
871 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_TC
;
873 if (edev
->dev_info
.common
.b_arfs_capable
)
874 hw_features
|= NETIF_F_NTUPLE
;
876 if (edev
->dev_info
.common
.vxlan_enable
||
877 edev
->dev_info
.common
.geneve_enable
)
878 udp_tunnel_enable
= true;
880 if (udp_tunnel_enable
|| edev
->dev_info
.common
.gre_enable
) {
881 hw_features
|= NETIF_F_TSO_ECN
;
882 ndev
->hw_enc_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
883 NETIF_F_SG
| NETIF_F_TSO
|
884 NETIF_F_TSO_ECN
| NETIF_F_TSO6
|
888 if (udp_tunnel_enable
) {
889 hw_features
|= (NETIF_F_GSO_UDP_TUNNEL
|
890 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
891 ndev
->hw_enc_features
|= (NETIF_F_GSO_UDP_TUNNEL
|
892 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
894 qede_set_udp_tunnels(edev
);
897 if (edev
->dev_info
.common
.gre_enable
) {
898 hw_features
|= (NETIF_F_GSO_GRE
| NETIF_F_GSO_GRE_CSUM
);
899 ndev
->hw_enc_features
|= (NETIF_F_GSO_GRE
|
900 NETIF_F_GSO_GRE_CSUM
);
903 ndev
->vlan_features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
905 ndev
->features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
906 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HIGHDMA
|
907 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
;
909 ndev
->hw_features
= hw_features
;
911 ndev
->xdp_features
= NETDEV_XDP_ACT_BASIC
| NETDEV_XDP_ACT_REDIRECT
|
912 NETDEV_XDP_ACT_NDO_XMIT
;
914 /* MTU range: 46 - 9600 */
915 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
916 ndev
->max_mtu
= QEDE_MAX_JUMBO_PACKET_SIZE
;
918 /* Set network device HW mac */
919 eth_hw_addr_set(edev
->ndev
, edev
->dev_info
.common
.hw_mac
);
921 ndev
->mtu
= edev
->dev_info
.common
.mtu
;
924 /* This function converts from 32b param to two params of level and module
925 * Input 32b decoding:
926 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
927 * 'happy' flow, e.g. memory allocation failed.
928 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
929 * and provide important parameters.
930 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
931 * module. VERBOSE prints are for tracking the specific flow in low level.
933 * Notice that the level should be that of the lowest required logs.
935 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
)
937 *p_dp_level
= QED_LEVEL_NOTICE
;
940 if (debug
& QED_LOG_VERBOSE_MASK
) {
941 *p_dp_level
= QED_LEVEL_VERBOSE
;
942 *p_dp_module
= (debug
& 0x3FFFFFFF);
943 } else if (debug
& QED_LOG_INFO_MASK
) {
944 *p_dp_level
= QED_LEVEL_INFO
;
945 } else if (debug
& QED_LOG_NOTICE_MASK
) {
946 *p_dp_level
= QED_LEVEL_NOTICE
;
950 static void qede_free_fp_array(struct qede_dev
*edev
)
952 if (edev
->fp_array
) {
953 struct qede_fastpath
*fp
;
957 fp
= &edev
->fp_array
[i
];
960 /* Handle mem alloc failure case where qede_init_fp
961 * didn't register xdp_rxq_info yet.
962 * Implicit only (fp->type & QEDE_FASTPATH_RX)
964 if (fp
->rxq
&& xdp_rxq_info_is_reg(&fp
->rxq
->xdp_rxq
))
965 xdp_rxq_info_unreg(&fp
->rxq
->xdp_rxq
);
970 kfree(edev
->fp_array
);
973 edev
->num_queues
= 0;
978 static int qede_alloc_fp_array(struct qede_dev
*edev
)
980 u8 fp_combined
, fp_rx
= edev
->fp_num_rx
;
981 struct qede_fastpath
*fp
;
984 edev
->fp_array
= kcalloc(QEDE_QUEUE_CNT(edev
),
985 sizeof(*edev
->fp_array
), GFP_KERNEL
);
986 if (!edev
->fp_array
) {
987 DP_NOTICE(edev
, "fp array allocation failed\n");
991 if (!edev
->coal_entry
) {
992 edev
->coal_entry
= kcalloc(QEDE_MAX_RSS_CNT(edev
),
993 sizeof(*edev
->coal_entry
),
995 if (!edev
->coal_entry
) {
996 DP_ERR(edev
, "coalesce entry allocation failed\n");
1001 fp_combined
= QEDE_QUEUE_CNT(edev
) - fp_rx
- edev
->fp_num_tx
;
1003 /* Allocate the FP elements for Rx queues followed by combined and then
1004 * the Tx. This ordering should be maintained so that the respective
1005 * queues (Rx or Tx) will be together in the fastpath array and the
1006 * associated ids will be sequential.
1009 fp
= &edev
->fp_array
[i
];
1011 fp
->sb_info
= kzalloc(sizeof(*fp
->sb_info
), GFP_KERNEL
);
1013 DP_NOTICE(edev
, "sb info struct allocation failed\n");
1018 fp
->type
= QEDE_FASTPATH_RX
;
1020 } else if (fp_combined
) {
1021 fp
->type
= QEDE_FASTPATH_COMBINED
;
1024 fp
->type
= QEDE_FASTPATH_TX
;
1027 if (fp
->type
& QEDE_FASTPATH_TX
) {
1028 fp
->txq
= kcalloc(edev
->dev_info
.num_tc
,
1029 sizeof(*fp
->txq
), GFP_KERNEL
);
1034 if (fp
->type
& QEDE_FASTPATH_RX
) {
1035 fp
->rxq
= kzalloc(sizeof(*fp
->rxq
), GFP_KERNEL
);
1039 if (edev
->xdp_prog
) {
1040 fp
->xdp_tx
= kzalloc(sizeof(*fp
->xdp_tx
),
1044 fp
->type
|= QEDE_FASTPATH_XDP
;
1051 qede_free_fp_array(edev
);
1055 /* The qede lock is used to protect driver state change and driver flows that
1056 * are not reentrant.
1058 void __qede_lock(struct qede_dev
*edev
)
1060 mutex_lock(&edev
->qede_lock
);
1063 void __qede_unlock(struct qede_dev
*edev
)
1065 mutex_unlock(&edev
->qede_lock
);
1068 /* This version of the lock should be used when acquiring the RTNL lock is also
1069 * needed in addition to the internal qede lock.
1071 static void qede_lock(struct qede_dev
*edev
)
1077 static void qede_unlock(struct qede_dev
*edev
)
1079 __qede_unlock(edev
);
1083 static void qede_periodic_task(struct work_struct
*work
)
1085 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
1086 periodic_task
.work
);
1088 qede_fill_by_demand_stats(edev
);
1089 schedule_delayed_work(&edev
->periodic_task
, edev
->stats_coal_ticks
);
1092 static void qede_init_periodic_task(struct qede_dev
*edev
)
1094 INIT_DELAYED_WORK(&edev
->periodic_task
, qede_periodic_task
);
1095 spin_lock_init(&edev
->stats_lock
);
1096 edev
->stats_coal_usecs
= USEC_PER_SEC
;
1097 edev
->stats_coal_ticks
= usecs_to_jiffies(USEC_PER_SEC
);
1100 static void qede_sp_task(struct work_struct
*work
)
1102 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
1105 /* Disable execution of this deferred work once
1106 * qede removal is in progress, this stop any future
1107 * scheduling of sp_task.
1109 if (test_bit(QEDE_SP_DISABLE
, &edev
->sp_flags
))
1112 /* The locking scheme depends on the specific flag:
1113 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1114 * ensure that ongoing flows are ended and new ones are not started.
1115 * In other cases - only the internal qede lock should be acquired.
1118 if (test_and_clear_bit(QEDE_SP_RECOVERY
, &edev
->sp_flags
)) {
1119 cancel_delayed_work_sync(&edev
->periodic_task
);
1120 #ifdef CONFIG_QED_SRIOV
1121 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1122 * The recovery of the active VFs is currently not supported.
1124 if (pci_num_vf(edev
->pdev
))
1125 qede_sriov_configure(edev
->pdev
, 0);
1128 qede_recovery_handler(edev
);
1134 if (test_and_clear_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
))
1135 if (edev
->state
== QEDE_STATE_OPEN
)
1136 qede_config_rx_mode(edev
->ndev
);
1138 #ifdef CONFIG_RFS_ACCEL
1139 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG
, &edev
->sp_flags
)) {
1140 if (edev
->state
== QEDE_STATE_OPEN
)
1141 qede_process_arfs_filters(edev
, false);
1144 if (test_and_clear_bit(QEDE_SP_HW_ERR
, &edev
->sp_flags
))
1145 qede_generic_hw_err_handler(edev
);
1146 __qede_unlock(edev
);
1148 if (test_and_clear_bit(QEDE_SP_AER
, &edev
->sp_flags
)) {
1149 #ifdef CONFIG_QED_SRIOV
1150 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1151 * The recovery of the active VFs is currently not supported.
1153 if (pci_num_vf(edev
->pdev
))
1154 qede_sriov_configure(edev
->pdev
, 0);
1156 edev
->ops
->common
->recovery_process(edev
->cdev
);
1160 static void qede_update_pf_params(struct qed_dev
*cdev
)
1162 struct qed_pf_params pf_params
;
1165 /* 64 rx + 64 tx + 64 XDP */
1166 memset(&pf_params
, 0, sizeof(struct qed_pf_params
));
1168 /* 1 rx + 1 xdp + max tx cos */
1169 num_cons
= QED_MIN_L2_CONS
;
1171 pf_params
.eth_pf_params
.num_cons
= (MAX_SB_PER_PF_MIMD
- 1) * num_cons
;
1173 /* Same for VFs - make sure they'll have sufficient connections
1174 * to support XDP Tx queues.
1176 pf_params
.eth_pf_params
.num_vf_cons
= 48;
1178 pf_params
.eth_pf_params
.num_arfs_filters
= QEDE_RFS_MAX_FLTR
;
1179 qed_ops
->common
->update_pf_params(cdev
, &pf_params
);
1182 #define QEDE_FW_VER_STR_SIZE 80
1184 static void qede_log_probe(struct qede_dev
*edev
)
1186 struct qed_dev_info
*p_dev_info
= &edev
->dev_info
.common
;
1187 u8 buf
[QEDE_FW_VER_STR_SIZE
];
1190 snprintf(buf
, QEDE_FW_VER_STR_SIZE
,
1191 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1192 p_dev_info
->fw_major
, p_dev_info
->fw_minor
, p_dev_info
->fw_rev
,
1194 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_3_MASK
) >>
1195 QED_MFW_VERSION_3_OFFSET
,
1196 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_2_MASK
) >>
1197 QED_MFW_VERSION_2_OFFSET
,
1198 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_1_MASK
) >>
1199 QED_MFW_VERSION_1_OFFSET
,
1200 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_0_MASK
) >>
1201 QED_MFW_VERSION_0_OFFSET
);
1203 left_size
= QEDE_FW_VER_STR_SIZE
- strlen(buf
);
1204 if (p_dev_info
->mbi_version
&& left_size
)
1205 snprintf(buf
+ strlen(buf
), left_size
,
1207 (p_dev_info
->mbi_version
& QED_MBI_VERSION_2_MASK
) >>
1208 QED_MBI_VERSION_2_OFFSET
,
1209 (p_dev_info
->mbi_version
& QED_MBI_VERSION_1_MASK
) >>
1210 QED_MBI_VERSION_1_OFFSET
,
1211 (p_dev_info
->mbi_version
& QED_MBI_VERSION_0_MASK
) >>
1212 QED_MBI_VERSION_0_OFFSET
);
1214 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev
->pdev
->bus
->number
,
1215 PCI_SLOT(edev
->pdev
->devfn
), PCI_FUNC(edev
->pdev
->devfn
),
1216 buf
, edev
->ndev
->name
);
1219 enum qede_probe_mode
{
1221 QEDE_PROBE_RECOVERY
,
1224 static int __qede_probe(struct pci_dev
*pdev
, u32 dp_module
, u8 dp_level
,
1225 bool is_vf
, enum qede_probe_mode mode
)
1227 struct qed_probe_params probe_params
;
1228 struct qed_slowpath_params sp_params
;
1229 struct qed_dev_eth_info dev_info
;
1230 struct qede_dev
*edev
;
1231 struct qed_dev
*cdev
;
1234 if (unlikely(dp_level
& QED_LEVEL_INFO
))
1235 pr_notice("Starting qede probe\n");
1237 memset(&probe_params
, 0, sizeof(probe_params
));
1238 probe_params
.protocol
= QED_PROTOCOL_ETH
;
1239 probe_params
.dp_module
= dp_module
;
1240 probe_params
.dp_level
= dp_level
;
1241 probe_params
.is_vf
= is_vf
;
1242 probe_params
.recov_in_prog
= (mode
== QEDE_PROBE_RECOVERY
);
1243 cdev
= qed_ops
->common
->probe(pdev
, &probe_params
);
1249 qede_update_pf_params(cdev
);
1251 /* Start the Slowpath-process */
1252 memset(&sp_params
, 0, sizeof(sp_params
));
1253 sp_params
.int_mode
= QED_INT_MODE_MSIX
;
1254 strscpy(sp_params
.name
, "qede LAN", QED_DRV_VER_STR_SIZE
);
1255 rc
= qed_ops
->common
->slowpath_start(cdev
, &sp_params
);
1257 pr_notice("Cannot start slowpath\n");
1261 /* Learn information crucial for qede to progress */
1262 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
1266 if (mode
!= QEDE_PROBE_RECOVERY
) {
1267 edev
= qede_alloc_etherdev(cdev
, pdev
, &dev_info
, dp_module
,
1274 edev
->devlink
= qed_ops
->common
->devlink_register(cdev
);
1275 if (IS_ERR(edev
->devlink
)) {
1276 DP_NOTICE(edev
, "Cannot register devlink\n");
1277 rc
= PTR_ERR(edev
->devlink
);
1278 edev
->devlink
= NULL
;
1282 struct net_device
*ndev
= pci_get_drvdata(pdev
);
1283 struct qed_devlink
*qdl
;
1285 edev
= netdev_priv(ndev
);
1286 qdl
= devlink_priv(edev
->devlink
);
1289 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
1290 memcpy(&edev
->dev_info
, &dev_info
, sizeof(dev_info
));
1294 set_bit(QEDE_FLAGS_IS_VF
, &edev
->flags
);
1296 qede_init_ndev(edev
);
1298 rc
= qede_rdma_dev_add(edev
, (mode
== QEDE_PROBE_RECOVERY
));
1302 if (mode
!= QEDE_PROBE_RECOVERY
) {
1303 /* Prepare the lock prior to the registration of the netdev,
1304 * as once it's registered we might reach flows requiring it
1305 * [it's even possible to reach a flow needing it directly
1306 * from there, although it's unlikely].
1308 INIT_DELAYED_WORK(&edev
->sp_task
, qede_sp_task
);
1309 mutex_init(&edev
->qede_lock
);
1310 qede_init_periodic_task(edev
);
1312 rc
= register_netdev(edev
->ndev
);
1314 DP_NOTICE(edev
, "Cannot register net-device\n");
1319 edev
->ops
->common
->set_name(cdev
, edev
->ndev
->name
);
1321 /* PTP not supported on VFs */
1323 qede_ptp_enable(edev
);
1325 edev
->ops
->register_ops(cdev
, &qede_ll_ops
, edev
);
1329 qede_set_dcbnl_ops(edev
->ndev
);
1332 edev
->rx_copybreak
= QEDE_RX_HDR_SIZE
;
1334 qede_log_probe(edev
);
1336 /* retain user config (for example - after recovery) */
1337 if (edev
->stats_coal_usecs
)
1338 schedule_delayed_work(&edev
->periodic_task
, 0);
1343 qede_rdma_dev_remove(edev
, (mode
== QEDE_PROBE_RECOVERY
));
1345 if (mode
!= QEDE_PROBE_RECOVERY
)
1346 free_netdev(edev
->ndev
);
1350 qed_ops
->common
->slowpath_stop(cdev
);
1352 qed_ops
->common
->remove(cdev
);
1357 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1363 switch ((enum qede_pci_private
)id
->driver_data
) {
1364 case QEDE_PRIVATE_VF
:
1365 if (debug
& QED_LOG_VERBOSE_MASK
)
1366 dev_err(&pdev
->dev
, "Probing a VF\n");
1370 if (debug
& QED_LOG_VERBOSE_MASK
)
1371 dev_err(&pdev
->dev
, "Probing a PF\n");
1374 qede_config_debug(debug
, &dp_module
, &dp_level
);
1376 return __qede_probe(pdev
, dp_module
, dp_level
, is_vf
,
1380 enum qede_remove_mode
{
1382 QEDE_REMOVE_RECOVERY
,
1385 static void __qede_remove(struct pci_dev
*pdev
, enum qede_remove_mode mode
)
1387 struct net_device
*ndev
= pci_get_drvdata(pdev
);
1388 struct qede_dev
*edev
;
1389 struct qed_dev
*cdev
;
1392 dev_info(&pdev
->dev
, "Device has already been removed\n");
1396 edev
= netdev_priv(ndev
);
1399 DP_INFO(edev
, "Starting qede_remove\n");
1401 qede_rdma_dev_remove(edev
, (mode
== QEDE_REMOVE_RECOVERY
));
1403 if (mode
!= QEDE_REMOVE_RECOVERY
) {
1404 set_bit(QEDE_SP_DISABLE
, &edev
->sp_flags
);
1405 unregister_netdev(ndev
);
1407 cancel_delayed_work_sync(&edev
->sp_task
);
1408 cancel_delayed_work_sync(&edev
->periodic_task
);
1410 edev
->ops
->common
->set_power_state(cdev
, PCI_D0
);
1412 pci_set_drvdata(pdev
, NULL
);
1415 qede_ptp_disable(edev
);
1417 /* Use global ops since we've freed edev */
1418 qed_ops
->common
->slowpath_stop(cdev
);
1419 if (system_state
== SYSTEM_POWER_OFF
)
1422 if (mode
!= QEDE_REMOVE_RECOVERY
&& edev
->devlink
) {
1423 qed_ops
->common
->devlink_unregister(edev
->devlink
);
1424 edev
->devlink
= NULL
;
1426 qed_ops
->common
->remove(cdev
);
1429 /* Since this can happen out-of-sync with other flows,
1430 * don't release the netdevice until after slowpath stop
1431 * has been called to guarantee various other contexts
1432 * [e.g., QED register callbacks] won't break anything when
1433 * accessing the netdevice.
1435 if (mode
!= QEDE_REMOVE_RECOVERY
) {
1436 kfree(edev
->coal_entry
);
1440 dev_info(&pdev
->dev
, "Ending qede_remove successfully\n");
1443 static void qede_remove(struct pci_dev
*pdev
)
1445 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
1448 static void qede_shutdown(struct pci_dev
*pdev
)
1450 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
1453 /* -------------------------------------------------------------------------
1454 * START OF LOAD / UNLOAD
1455 * -------------------------------------------------------------------------
1458 static int qede_set_num_queues(struct qede_dev
*edev
)
1463 /* Setup queues according to possible resources*/
1464 if (edev
->req_queues
)
1465 rss_num
= edev
->req_queues
;
1467 rss_num
= netif_get_num_default_rss_queues() *
1468 edev
->dev_info
.common
.num_hwfns
;
1470 rss_num
= min_t(u16
, QEDE_MAX_RSS_CNT(edev
), rss_num
);
1472 rc
= edev
->ops
->common
->set_fp_int(edev
->cdev
, rss_num
);
1474 /* Managed to request interrupts for our queues */
1475 edev
->num_queues
= rc
;
1476 DP_INFO(edev
, "Managed %d [of %d] RSS queues\n",
1477 QEDE_QUEUE_CNT(edev
), rss_num
);
1481 edev
->fp_num_tx
= edev
->req_num_tx
;
1482 edev
->fp_num_rx
= edev
->req_num_rx
;
1487 static void qede_free_mem_sb(struct qede_dev
*edev
, struct qed_sb_info
*sb_info
,
1490 if (sb_info
->sb_virt
) {
1491 edev
->ops
->common
->sb_release(edev
->cdev
, sb_info
, sb_id
,
1492 QED_SB_TYPE_L2_QUEUE
);
1493 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
1494 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
1495 memset(sb_info
, 0, sizeof(*sb_info
));
1499 /* This function allocates fast-path status block memory */
1500 static int qede_alloc_mem_sb(struct qede_dev
*edev
,
1501 struct qed_sb_info
*sb_info
, u16 sb_id
)
1503 struct status_block
*sb_virt
;
1507 sb_virt
= dma_alloc_coherent(&edev
->pdev
->dev
,
1508 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
1510 DP_ERR(edev
, "Status block allocation failed\n");
1514 rc
= edev
->ops
->common
->sb_init(edev
->cdev
, sb_info
,
1515 sb_virt
, sb_phys
, sb_id
,
1516 QED_SB_TYPE_L2_QUEUE
);
1518 DP_ERR(edev
, "Status block initialization failed\n");
1519 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_virt
),
1527 static void qede_free_rx_buffers(struct qede_dev
*edev
,
1528 struct qede_rx_queue
*rxq
)
1532 for (i
= rxq
->sw_rx_cons
; i
!= rxq
->sw_rx_prod
; i
++) {
1533 struct sw_rx_data
*rx_buf
;
1536 rx_buf
= &rxq
->sw_rx_ring
[i
& NUM_RX_BDS_MAX
];
1537 data
= rx_buf
->data
;
1539 dma_unmap_page(&edev
->pdev
->dev
,
1540 rx_buf
->mapping
, PAGE_SIZE
, rxq
->data_direction
);
1542 rx_buf
->data
= NULL
;
1547 static void qede_free_mem_rxq(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
1549 /* Free rx buffers */
1550 qede_free_rx_buffers(edev
, rxq
);
1552 /* Free the parallel SW ring */
1553 kfree(rxq
->sw_rx_ring
);
1555 /* Free the real RQ ring used by FW */
1556 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_bd_ring
);
1557 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_comp_ring
);
1560 static void qede_set_tpa_param(struct qede_rx_queue
*rxq
)
1564 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
1565 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
1567 tpa_info
->state
= QEDE_AGG_STATE_NONE
;
1571 /* This function allocates all memory needed per Rx queue */
1572 static int qede_alloc_mem_rxq(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
1574 struct qed_chain_init_params params
= {
1575 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
1576 .num_elems
= RX_RING_SIZE
,
1578 struct qed_dev
*cdev
= edev
->cdev
;
1581 rxq
->num_rx_buffers
= edev
->q_num_rx_buffers
;
1583 rxq
->rx_buf_size
= NET_IP_ALIGN
+ ETH_OVERHEAD
+ edev
->ndev
->mtu
;
1585 rxq
->rx_headroom
= edev
->xdp_prog
? XDP_PACKET_HEADROOM
: NET_SKB_PAD
;
1586 size
= rxq
->rx_headroom
+
1587 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1589 /* Make sure that the headroom and payload fit in a single page */
1590 if (rxq
->rx_buf_size
+ size
> PAGE_SIZE
)
1591 rxq
->rx_buf_size
= PAGE_SIZE
- size
;
1593 /* Segment size to split a page in multiple equal parts,
1594 * unless XDP is used in which case we'd use the entire page.
1596 if (!edev
->xdp_prog
) {
1597 size
= size
+ rxq
->rx_buf_size
;
1598 rxq
->rx_buf_seg_size
= roundup_pow_of_two(size
);
1600 rxq
->rx_buf_seg_size
= PAGE_SIZE
;
1601 edev
->ndev
->features
&= ~NETIF_F_GRO_HW
;
1604 /* Allocate the parallel driver ring for Rx buffers */
1605 size
= sizeof(*rxq
->sw_rx_ring
) * RX_RING_SIZE
;
1606 rxq
->sw_rx_ring
= kzalloc(size
, GFP_KERNEL
);
1607 if (!rxq
->sw_rx_ring
) {
1608 DP_ERR(edev
, "Rx buffers ring allocation failed\n");
1613 /* Allocate FW Rx ring */
1614 params
.mode
= QED_CHAIN_MODE_NEXT_PTR
;
1615 params
.intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
;
1616 params
.elem_size
= sizeof(struct eth_rx_bd
);
1618 rc
= edev
->ops
->common
->chain_alloc(cdev
, &rxq
->rx_bd_ring
, ¶ms
);
1622 /* Allocate FW completion ring */
1623 params
.mode
= QED_CHAIN_MODE_PBL
;
1624 params
.intended_use
= QED_CHAIN_USE_TO_CONSUME
;
1625 params
.elem_size
= sizeof(union eth_rx_cqe
);
1627 rc
= edev
->ops
->common
->chain_alloc(cdev
, &rxq
->rx_comp_ring
, ¶ms
);
1631 /* Allocate buffers for the Rx ring */
1632 rxq
->filled_buffers
= 0;
1633 for (i
= 0; i
< rxq
->num_rx_buffers
; i
++) {
1634 rc
= qede_alloc_rx_buffer(rxq
, false);
1637 "Rx buffers allocation failed at index %d\n", i
);
1642 edev
->gro_disable
= !(edev
->ndev
->features
& NETIF_F_GRO_HW
);
1643 if (!edev
->gro_disable
)
1644 qede_set_tpa_param(rxq
);
1649 static void qede_free_mem_txq(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
1651 /* Free the parallel SW ring */
1653 kfree(txq
->sw_tx_ring
.xdp
);
1655 kfree(txq
->sw_tx_ring
.skbs
);
1657 /* Free the real RQ ring used by FW */
1658 edev
->ops
->common
->chain_free(edev
->cdev
, &txq
->tx_pbl
);
1661 /* This function allocates all memory needed per Tx queue */
1662 static int qede_alloc_mem_txq(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
1664 struct qed_chain_init_params params
= {
1665 .mode
= QED_CHAIN_MODE_PBL
,
1666 .intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1667 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
1668 .num_elems
= edev
->q_num_tx_buffers
,
1669 .elem_size
= sizeof(union eth_tx_bd_types
),
1673 txq
->num_tx_buffers
= edev
->q_num_tx_buffers
;
1675 /* Allocate the parallel driver ring for Tx buffers */
1677 size
= sizeof(*txq
->sw_tx_ring
.xdp
) * txq
->num_tx_buffers
;
1678 txq
->sw_tx_ring
.xdp
= kzalloc(size
, GFP_KERNEL
);
1679 if (!txq
->sw_tx_ring
.xdp
)
1682 size
= sizeof(*txq
->sw_tx_ring
.skbs
) * txq
->num_tx_buffers
;
1683 txq
->sw_tx_ring
.skbs
= kzalloc(size
, GFP_KERNEL
);
1684 if (!txq
->sw_tx_ring
.skbs
)
1688 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
, &txq
->tx_pbl
, ¶ms
);
1695 qede_free_mem_txq(edev
, txq
);
1699 /* This function frees all memory of a single fp */
1700 static void qede_free_mem_fp(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
1702 qede_free_mem_sb(edev
, fp
->sb_info
, fp
->id
);
1704 if (fp
->type
& QEDE_FASTPATH_RX
)
1705 qede_free_mem_rxq(edev
, fp
->rxq
);
1707 if (fp
->type
& QEDE_FASTPATH_XDP
)
1708 qede_free_mem_txq(edev
, fp
->xdp_tx
);
1710 if (fp
->type
& QEDE_FASTPATH_TX
) {
1713 for_each_cos_in_txq(edev
, cos
)
1714 qede_free_mem_txq(edev
, &fp
->txq
[cos
]);
1718 /* This function allocates all memory needed for a single fp (i.e. an entity
1719 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1721 static int qede_alloc_mem_fp(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
1725 rc
= qede_alloc_mem_sb(edev
, fp
->sb_info
, fp
->id
);
1729 if (fp
->type
& QEDE_FASTPATH_RX
) {
1730 rc
= qede_alloc_mem_rxq(edev
, fp
->rxq
);
1735 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1736 rc
= qede_alloc_mem_txq(edev
, fp
->xdp_tx
);
1741 if (fp
->type
& QEDE_FASTPATH_TX
) {
1744 for_each_cos_in_txq(edev
, cos
) {
1745 rc
= qede_alloc_mem_txq(edev
, &fp
->txq
[cos
]);
1755 static void qede_free_mem_load(struct qede_dev
*edev
)
1760 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1762 qede_free_mem_fp(edev
, fp
);
1766 /* This function allocates all qede memory at NIC load. */
1767 static int qede_alloc_mem_load(struct qede_dev
*edev
)
1769 int rc
= 0, queue_id
;
1771 for (queue_id
= 0; queue_id
< QEDE_QUEUE_CNT(edev
); queue_id
++) {
1772 struct qede_fastpath
*fp
= &edev
->fp_array
[queue_id
];
1774 rc
= qede_alloc_mem_fp(edev
, fp
);
1777 "Failed to allocate memory for fastpath - rss id = %d\n",
1779 qede_free_mem_load(edev
);
1787 static void qede_empty_tx_queue(struct qede_dev
*edev
,
1788 struct qede_tx_queue
*txq
)
1790 unsigned int pkts_compl
= 0, bytes_compl
= 0;
1791 struct netdev_queue
*netdev_txq
;
1794 netdev_txq
= netdev_get_tx_queue(edev
->ndev
, txq
->ndev_txq_id
);
1796 while (qed_chain_get_cons_idx(&txq
->tx_pbl
) !=
1797 qed_chain_get_prod_idx(&txq
->tx_pbl
)) {
1798 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
1799 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1800 txq
->index
, qed_chain_get_cons_idx(&txq
->tx_pbl
),
1801 qed_chain_get_prod_idx(&txq
->tx_pbl
));
1803 rc
= qede_free_tx_pkt(edev
, txq
, &len
);
1806 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1808 qed_chain_get_cons_idx(&txq
->tx_pbl
),
1809 qed_chain_get_prod_idx(&txq
->tx_pbl
));
1818 netdev_tx_completed_queue(netdev_txq
, pkts_compl
, bytes_compl
);
1821 static void qede_empty_tx_queues(struct qede_dev
*edev
)
1826 if (edev
->fp_array
[i
].type
& QEDE_FASTPATH_TX
) {
1829 for_each_cos_in_txq(edev
, cos
) {
1830 struct qede_fastpath
*fp
;
1832 fp
= &edev
->fp_array
[i
];
1833 qede_empty_tx_queue(edev
,
1839 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1840 static void qede_init_fp(struct qede_dev
*edev
)
1842 int queue_id
, rxq_index
= 0, txq_index
= 0;
1843 struct qede_fastpath
*fp
;
1844 bool init_xdp
= false;
1846 for_each_queue(queue_id
) {
1847 fp
= &edev
->fp_array
[queue_id
];
1852 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1853 fp
->xdp_tx
->index
= QEDE_TXQ_IDX_TO_XDP(edev
,
1855 fp
->xdp_tx
->is_xdp
= 1;
1857 spin_lock_init(&fp
->xdp_tx
->xdp_tx_lock
);
1861 if (fp
->type
& QEDE_FASTPATH_RX
) {
1862 fp
->rxq
->rxq_id
= rxq_index
++;
1864 /* Determine how to map buffers for this queue */
1865 if (fp
->type
& QEDE_FASTPATH_XDP
)
1866 fp
->rxq
->data_direction
= DMA_BIDIRECTIONAL
;
1868 fp
->rxq
->data_direction
= DMA_FROM_DEVICE
;
1869 fp
->rxq
->dev
= &edev
->pdev
->dev
;
1871 /* Driver have no error path from here */
1872 WARN_ON(xdp_rxq_info_reg(&fp
->rxq
->xdp_rxq
, edev
->ndev
,
1873 fp
->rxq
->rxq_id
, 0) < 0);
1875 if (xdp_rxq_info_reg_mem_model(&fp
->rxq
->xdp_rxq
,
1876 MEM_TYPE_PAGE_ORDER0
,
1879 "Failed to register XDP memory model\n");
1883 if (fp
->type
& QEDE_FASTPATH_TX
) {
1886 for_each_cos_in_txq(edev
, cos
) {
1887 struct qede_tx_queue
*txq
= &fp
->txq
[cos
];
1891 txq
->index
= txq_index
;
1892 ndev_tx_id
= QEDE_TXQ_TO_NDEV_TXQ_ID(edev
, txq
);
1893 txq
->ndev_txq_id
= ndev_tx_id
;
1895 if (edev
->dev_info
.is_legacy
)
1896 txq
->is_legacy
= true;
1897 txq
->dev
= &edev
->pdev
->dev
;
1903 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1904 edev
->ndev
->name
, queue_id
);
1908 edev
->total_xdp_queues
= QEDE_RSS_COUNT(edev
);
1909 DP_INFO(edev
, "Total XDP queues: %u\n", edev
->total_xdp_queues
);
1913 static int qede_set_real_num_queues(struct qede_dev
*edev
)
1917 rc
= netif_set_real_num_tx_queues(edev
->ndev
,
1918 QEDE_TSS_COUNT(edev
) *
1919 edev
->dev_info
.num_tc
);
1921 DP_NOTICE(edev
, "Failed to set real number of Tx queues\n");
1925 rc
= netif_set_real_num_rx_queues(edev
->ndev
, QEDE_RSS_COUNT(edev
));
1927 DP_NOTICE(edev
, "Failed to set real number of Rx queues\n");
1934 static void qede_napi_disable_remove(struct qede_dev
*edev
)
1939 napi_disable(&edev
->fp_array
[i
].napi
);
1941 netif_napi_del(&edev
->fp_array
[i
].napi
);
1945 static void qede_napi_add_enable(struct qede_dev
*edev
)
1949 /* Add NAPI objects */
1951 netif_napi_add(edev
->ndev
, &edev
->fp_array
[i
].napi
, qede_poll
);
1952 napi_enable(&edev
->fp_array
[i
].napi
);
1956 static void qede_sync_free_irqs(struct qede_dev
*edev
)
1960 for (i
= 0; i
< edev
->int_info
.used_cnt
; i
++) {
1961 if (edev
->int_info
.msix_cnt
) {
1962 free_irq(edev
->int_info
.msix
[i
].vector
,
1963 &edev
->fp_array
[i
]);
1965 edev
->ops
->common
->simd_handler_clean(edev
->cdev
, i
);
1969 edev
->int_info
.used_cnt
= 0;
1970 edev
->int_info
.msix_cnt
= 0;
1973 static int qede_req_msix_irqs(struct qede_dev
*edev
)
1977 /* Sanitize number of interrupts == number of prepared RSS queues */
1978 if (QEDE_QUEUE_CNT(edev
) > edev
->int_info
.msix_cnt
) {
1980 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1981 QEDE_QUEUE_CNT(edev
), edev
->int_info
.msix_cnt
);
1985 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++) {
1986 #ifdef CONFIG_RFS_ACCEL
1987 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1989 if (edev
->ndev
->rx_cpu_rmap
&& (fp
->type
& QEDE_FASTPATH_RX
)) {
1990 rc
= irq_cpu_rmap_add(edev
->ndev
->rx_cpu_rmap
,
1991 edev
->int_info
.msix
[i
].vector
);
1993 DP_ERR(edev
, "Failed to add CPU rmap\n");
1994 qede_free_arfs(edev
);
1998 rc
= request_irq(edev
->int_info
.msix
[i
].vector
,
1999 qede_msix_fp_int
, 0, edev
->fp_array
[i
].name
,
2000 &edev
->fp_array
[i
]);
2002 DP_ERR(edev
, "Request fp %d irq failed\n", i
);
2003 #ifdef CONFIG_RFS_ACCEL
2004 if (edev
->ndev
->rx_cpu_rmap
)
2005 free_irq_cpu_rmap(edev
->ndev
->rx_cpu_rmap
);
2007 edev
->ndev
->rx_cpu_rmap
= NULL
;
2009 qede_sync_free_irqs(edev
);
2012 DP_VERBOSE(edev
, NETIF_MSG_INTR
,
2013 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
2014 edev
->fp_array
[i
].name
, i
,
2015 &edev
->fp_array
[i
]);
2016 edev
->int_info
.used_cnt
++;
2022 static void qede_simd_fp_handler(void *cookie
)
2024 struct qede_fastpath
*fp
= (struct qede_fastpath
*)cookie
;
2026 napi_schedule_irqoff(&fp
->napi
);
2029 static int qede_setup_irqs(struct qede_dev
*edev
)
2033 /* Learn Interrupt configuration */
2034 rc
= edev
->ops
->common
->get_fp_int(edev
->cdev
, &edev
->int_info
);
2038 if (edev
->int_info
.msix_cnt
) {
2039 rc
= qede_req_msix_irqs(edev
);
2042 edev
->ndev
->irq
= edev
->int_info
.msix
[0].vector
;
2044 const struct qed_common_ops
*ops
;
2046 /* qed should learn receive the RSS ids and callbacks */
2047 ops
= edev
->ops
->common
;
2048 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++)
2049 ops
->simd_handler_config(edev
->cdev
,
2050 &edev
->fp_array
[i
], i
,
2051 qede_simd_fp_handler
);
2052 edev
->int_info
.used_cnt
= QEDE_QUEUE_CNT(edev
);
2057 static int qede_drain_txq(struct qede_dev
*edev
,
2058 struct qede_tx_queue
*txq
, bool allow_drain
)
2062 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
2066 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2068 rc
= edev
->ops
->common
->drain(edev
->cdev
);
2071 return qede_drain_txq(edev
, txq
, false);
2074 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2075 txq
->index
, txq
->sw_tx_prod
,
2080 usleep_range(1000, 2000);
2084 /* FW finished processing, wait for HW to transmit all tx packets */
2085 usleep_range(1000, 2000);
2090 static int qede_stop_txq(struct qede_dev
*edev
,
2091 struct qede_tx_queue
*txq
, int rss_id
)
2093 /* delete doorbell from doorbell recovery mechanism */
2094 edev
->ops
->common
->db_recovery_del(edev
->cdev
, txq
->doorbell_addr
,
2097 return edev
->ops
->q_tx_stop(edev
->cdev
, rss_id
, txq
->handle
);
2100 static int qede_stop_queues(struct qede_dev
*edev
)
2102 struct qed_update_vport_params
*vport_update_params
;
2103 struct qed_dev
*cdev
= edev
->cdev
;
2104 struct qede_fastpath
*fp
;
2107 /* Disable the vport */
2108 vport_update_params
= vzalloc(sizeof(*vport_update_params
));
2109 if (!vport_update_params
)
2112 vport_update_params
->vport_id
= 0;
2113 vport_update_params
->update_vport_active_flg
= 1;
2114 vport_update_params
->vport_active_flg
= 0;
2115 vport_update_params
->update_rss_flg
= 0;
2117 rc
= edev
->ops
->vport_update(cdev
, vport_update_params
);
2118 vfree(vport_update_params
);
2121 DP_ERR(edev
, "Failed to update vport\n");
2125 /* Flush Tx queues. If needed, request drain from MCP */
2127 fp
= &edev
->fp_array
[i
];
2129 if (fp
->type
& QEDE_FASTPATH_TX
) {
2132 for_each_cos_in_txq(edev
, cos
) {
2133 rc
= qede_drain_txq(edev
, &fp
->txq
[cos
], true);
2139 if (fp
->type
& QEDE_FASTPATH_XDP
) {
2140 rc
= qede_drain_txq(edev
, fp
->xdp_tx
, true);
2146 /* Stop all Queues in reverse order */
2147 for (i
= QEDE_QUEUE_CNT(edev
) - 1; i
>= 0; i
--) {
2148 fp
= &edev
->fp_array
[i
];
2150 /* Stop the Tx Queue(s) */
2151 if (fp
->type
& QEDE_FASTPATH_TX
) {
2154 for_each_cos_in_txq(edev
, cos
) {
2155 rc
= qede_stop_txq(edev
, &fp
->txq
[cos
], i
);
2161 /* Stop the Rx Queue */
2162 if (fp
->type
& QEDE_FASTPATH_RX
) {
2163 rc
= edev
->ops
->q_rx_stop(cdev
, i
, fp
->rxq
->handle
);
2165 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
2170 /* Stop the XDP forwarding queue */
2171 if (fp
->type
& QEDE_FASTPATH_XDP
) {
2172 rc
= qede_stop_txq(edev
, fp
->xdp_tx
, i
);
2176 bpf_prog_put(fp
->rxq
->xdp_prog
);
2180 /* Stop the vport */
2181 rc
= edev
->ops
->vport_stop(cdev
, 0);
2183 DP_ERR(edev
, "Failed to stop VPORT\n");
2188 static int qede_start_txq(struct qede_dev
*edev
,
2189 struct qede_fastpath
*fp
,
2190 struct qede_tx_queue
*txq
, u8 rss_id
, u16 sb_idx
)
2192 dma_addr_t phys_table
= qed_chain_get_pbl_phys(&txq
->tx_pbl
);
2193 u32 page_cnt
= qed_chain_get_page_cnt(&txq
->tx_pbl
);
2194 struct qed_queue_start_common_params params
;
2195 struct qed_txq_start_ret_params ret_params
;
2198 memset(¶ms
, 0, sizeof(params
));
2199 memset(&ret_params
, 0, sizeof(ret_params
));
2201 /* Let the XDP queue share the queue-zone with one of the regular txq.
2202 * We don't really care about its coalescing.
2205 params
.queue_id
= QEDE_TXQ_XDP_TO_IDX(edev
, txq
);
2207 params
.queue_id
= txq
->index
;
2209 params
.p_sb
= fp
->sb_info
;
2210 params
.sb_idx
= sb_idx
;
2211 params
.tc
= txq
->cos
;
2213 rc
= edev
->ops
->q_tx_start(edev
->cdev
, rss_id
, ¶ms
, phys_table
,
2214 page_cnt
, &ret_params
);
2216 DP_ERR(edev
, "Start TXQ #%d failed %d\n", txq
->index
, rc
);
2220 txq
->doorbell_addr
= ret_params
.p_doorbell
;
2221 txq
->handle
= ret_params
.p_handle
;
2223 /* Determine the FW consumer address associated */
2224 txq
->hw_cons_ptr
= &fp
->sb_info
->sb_virt
->pi_array
[sb_idx
];
2226 /* Prepare the doorbell parameters */
2227 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_DEST
, DB_DEST_XCM
);
2228 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
2229 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_VAL_SEL
,
2230 DQ_XCM_ETH_TX_BD_PROD_CMD
);
2231 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
2233 /* register doorbell with doorbell recovery mechanism */
2234 rc
= edev
->ops
->common
->db_recovery_add(edev
->cdev
, txq
->doorbell_addr
,
2235 &txq
->tx_db
, DB_REC_WIDTH_32B
,
2241 static int qede_start_queues(struct qede_dev
*edev
, bool clear_stats
)
2243 int vlan_removal_en
= 1;
2244 struct qed_dev
*cdev
= edev
->cdev
;
2245 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
2246 struct qed_update_vport_params
*vport_update_params
;
2247 struct qed_queue_start_common_params q_params
;
2248 struct qed_start_vport_params start
= {0};
2251 if (!edev
->num_queues
) {
2253 "Cannot update V-VPORT as active as there are no Rx queues\n");
2257 vport_update_params
= vzalloc(sizeof(*vport_update_params
));
2258 if (!vport_update_params
)
2261 start
.handle_ptp_pkts
= !!(edev
->ptp
);
2262 start
.gro_enable
= !edev
->gro_disable
;
2263 start
.mtu
= edev
->ndev
->mtu
;
2265 start
.drop_ttl0
= true;
2266 start
.remove_inner_vlan
= vlan_removal_en
;
2267 start
.clear_stats
= clear_stats
;
2269 rc
= edev
->ops
->vport_start(cdev
, &start
);
2272 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
2276 DP_VERBOSE(edev
, NETIF_MSG_IFUP
,
2277 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2278 start
.vport_id
, edev
->ndev
->mtu
+ 0xe, vlan_removal_en
);
2281 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2282 dma_addr_t p_phys_table
;
2285 if (fp
->type
& QEDE_FASTPATH_RX
) {
2286 struct qed_rxq_start_ret_params ret_params
;
2287 struct qede_rx_queue
*rxq
= fp
->rxq
;
2290 memset(&ret_params
, 0, sizeof(ret_params
));
2291 memset(&q_params
, 0, sizeof(q_params
));
2292 q_params
.queue_id
= rxq
->rxq_id
;
2293 q_params
.vport_id
= 0;
2294 q_params
.p_sb
= fp
->sb_info
;
2295 q_params
.sb_idx
= RX_PI
;
2298 qed_chain_get_pbl_phys(&rxq
->rx_comp_ring
);
2299 page_cnt
= qed_chain_get_page_cnt(&rxq
->rx_comp_ring
);
2301 rc
= edev
->ops
->q_rx_start(cdev
, i
, &q_params
,
2303 rxq
->rx_bd_ring
.p_phys_addr
,
2305 page_cnt
, &ret_params
);
2307 DP_ERR(edev
, "Start RXQ #%d failed %d\n", i
,
2312 /* Use the return parameters */
2313 rxq
->hw_rxq_prod_addr
= ret_params
.p_prod
;
2314 rxq
->handle
= ret_params
.p_handle
;
2316 val
= &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
2317 rxq
->hw_cons_ptr
= val
;
2319 qede_update_rx_prod(edev
, rxq
);
2322 if (fp
->type
& QEDE_FASTPATH_XDP
) {
2323 rc
= qede_start_txq(edev
, fp
, fp
->xdp_tx
, i
, XDP_PI
);
2327 bpf_prog_add(edev
->xdp_prog
, 1);
2328 fp
->rxq
->xdp_prog
= edev
->xdp_prog
;
2331 if (fp
->type
& QEDE_FASTPATH_TX
) {
2334 for_each_cos_in_txq(edev
, cos
) {
2335 rc
= qede_start_txq(edev
, fp
, &fp
->txq
[cos
], i
,
2343 /* Prepare and send the vport enable */
2344 vport_update_params
->vport_id
= start
.vport_id
;
2345 vport_update_params
->update_vport_active_flg
= 1;
2346 vport_update_params
->vport_active_flg
= 1;
2348 if ((qed_info
->b_inter_pf_switch
|| pci_num_vf(edev
->pdev
)) &&
2349 qed_info
->tx_switching
) {
2350 vport_update_params
->update_tx_switching_flg
= 1;
2351 vport_update_params
->tx_switching_flg
= 1;
2354 qede_fill_rss_params(edev
, &vport_update_params
->rss_params
,
2355 &vport_update_params
->update_rss_flg
);
2357 rc
= edev
->ops
->vport_update(cdev
, vport_update_params
);
2359 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
2362 vfree(vport_update_params
);
2366 enum qede_unload_mode
{
2368 QEDE_UNLOAD_RECOVERY
,
2371 static void qede_unload(struct qede_dev
*edev
, enum qede_unload_mode mode
,
2374 struct qed_link_params link_params
;
2377 DP_INFO(edev
, "Starting qede unload\n");
2382 clear_bit(QEDE_FLAGS_LINK_REQUESTED
, &edev
->flags
);
2384 if (mode
!= QEDE_UNLOAD_RECOVERY
)
2385 edev
->state
= QEDE_STATE_CLOSED
;
2387 qede_rdma_dev_event_close(edev
);
2390 netif_tx_disable(edev
->ndev
);
2391 netif_carrier_off(edev
->ndev
);
2393 if (mode
!= QEDE_UNLOAD_RECOVERY
) {
2394 /* Reset the link */
2395 memset(&link_params
, 0, sizeof(link_params
));
2396 link_params
.link_up
= false;
2397 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
2399 rc
= qede_stop_queues(edev
);
2401 #ifdef CONFIG_RFS_ACCEL
2402 if (edev
->dev_info
.common
.b_arfs_capable
) {
2403 qede_poll_for_freeing_arfs_filters(edev
);
2404 if (edev
->ndev
->rx_cpu_rmap
)
2405 free_irq_cpu_rmap(edev
->ndev
->rx_cpu_rmap
);
2407 edev
->ndev
->rx_cpu_rmap
= NULL
;
2410 qede_sync_free_irqs(edev
);
2414 DP_INFO(edev
, "Stopped Queues\n");
2417 qede_vlan_mark_nonconfigured(edev
);
2418 edev
->ops
->fastpath_stop(edev
->cdev
);
2420 if (edev
->dev_info
.common
.b_arfs_capable
) {
2421 qede_poll_for_freeing_arfs_filters(edev
);
2422 qede_free_arfs(edev
);
2425 /* Release the interrupts */
2426 qede_sync_free_irqs(edev
);
2427 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
2429 qede_napi_disable_remove(edev
);
2431 if (mode
== QEDE_UNLOAD_RECOVERY
)
2432 qede_empty_tx_queues(edev
);
2434 qede_free_mem_load(edev
);
2435 qede_free_fp_array(edev
);
2439 __qede_unlock(edev
);
2441 if (mode
!= QEDE_UNLOAD_RECOVERY
)
2442 DP_NOTICE(edev
, "Link is down\n");
2444 edev
->ptp_skip_txts
= 0;
2446 DP_INFO(edev
, "Ending qede unload\n");
2449 enum qede_load_mode
{
2455 static int qede_load(struct qede_dev
*edev
, enum qede_load_mode mode
,
2458 struct qed_link_params link_params
;
2459 struct ethtool_coalesce coal
= {};
2463 DP_INFO(edev
, "Starting qede load\n");
2468 rc
= qede_set_num_queues(edev
);
2472 rc
= qede_alloc_fp_array(edev
);
2478 rc
= qede_alloc_mem_load(edev
);
2481 DP_INFO(edev
, "Allocated %d Rx, %d Tx queues\n",
2482 QEDE_RSS_COUNT(edev
), QEDE_TSS_COUNT(edev
));
2484 rc
= qede_set_real_num_queues(edev
);
2488 if (qede_alloc_arfs(edev
)) {
2489 edev
->ndev
->features
&= ~NETIF_F_NTUPLE
;
2490 edev
->dev_info
.common
.b_arfs_capable
= false;
2493 qede_napi_add_enable(edev
);
2494 DP_INFO(edev
, "Napi added and enabled\n");
2496 rc
= qede_setup_irqs(edev
);
2499 DP_INFO(edev
, "Setup IRQs succeeded\n");
2501 rc
= qede_start_queues(edev
, mode
!= QEDE_LOAD_RELOAD
);
2504 DP_INFO(edev
, "Start VPORT, RXQ and TXQ succeeded\n");
2506 num_tc
= netdev_get_num_tc(edev
->ndev
);
2507 num_tc
= num_tc
? num_tc
: edev
->dev_info
.num_tc
;
2508 qede_setup_tc(edev
->ndev
, num_tc
);
2510 /* Program un-configured VLANs */
2511 qede_configure_vlan_filters(edev
);
2513 set_bit(QEDE_FLAGS_LINK_REQUESTED
, &edev
->flags
);
2515 /* Ask for link-up using current configuration */
2516 memset(&link_params
, 0, sizeof(link_params
));
2517 link_params
.link_up
= true;
2518 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
2520 edev
->state
= QEDE_STATE_OPEN
;
2522 coal
.rx_coalesce_usecs
= QED_DEFAULT_RX_USECS
;
2523 coal
.tx_coalesce_usecs
= QED_DEFAULT_TX_USECS
;
2526 if (edev
->coal_entry
[i
].isvalid
) {
2527 coal
.rx_coalesce_usecs
= edev
->coal_entry
[i
].rxc
;
2528 coal
.tx_coalesce_usecs
= edev
->coal_entry
[i
].txc
;
2530 __qede_unlock(edev
);
2531 qede_set_per_coalesce(edev
->ndev
, i
, &coal
);
2534 DP_INFO(edev
, "Ending successfully qede load\n");
2538 qede_sync_free_irqs(edev
);
2540 qede_napi_disable_remove(edev
);
2542 qede_free_mem_load(edev
);
2544 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
2545 qede_free_fp_array(edev
);
2546 edev
->num_queues
= 0;
2547 edev
->fp_num_tx
= 0;
2548 edev
->fp_num_rx
= 0;
2551 __qede_unlock(edev
);
2556 /* 'func' should be able to run between unload and reload assuming interface
2557 * is actually running, or afterwards in case it's currently DOWN.
2559 void qede_reload(struct qede_dev
*edev
,
2560 struct qede_reload_args
*args
, bool is_locked
)
2565 /* Since qede_lock is held, internal state wouldn't change even
2566 * if netdev state would start transitioning. Check whether current
2567 * internal configuration indicates device is up, then reload.
2569 if (edev
->state
== QEDE_STATE_OPEN
) {
2570 qede_unload(edev
, QEDE_UNLOAD_NORMAL
, true);
2572 args
->func(edev
, args
);
2573 qede_load(edev
, QEDE_LOAD_RELOAD
, true);
2575 /* Since no one is going to do it for us, re-configure */
2576 qede_config_rx_mode(edev
->ndev
);
2578 args
->func(edev
, args
);
2582 __qede_unlock(edev
);
2585 /* called with rtnl_lock */
2586 static int qede_open(struct net_device
*ndev
)
2588 struct qede_dev
*edev
= netdev_priv(ndev
);
2591 netif_carrier_off(ndev
);
2593 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D0
);
2595 rc
= qede_load(edev
, QEDE_LOAD_NORMAL
, false);
2599 udp_tunnel_nic_reset_ntf(ndev
);
2601 edev
->ops
->common
->update_drv_state(edev
->cdev
, true);
2606 static int qede_close(struct net_device
*ndev
)
2608 struct qede_dev
*edev
= netdev_priv(ndev
);
2610 qede_unload(edev
, QEDE_UNLOAD_NORMAL
, false);
2613 edev
->ops
->common
->update_drv_state(edev
->cdev
, false);
2618 static void qede_link_update(void *dev
, struct qed_link_output
*link
)
2620 struct qede_dev
*edev
= dev
;
2622 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED
, &edev
->flags
)) {
2623 DP_VERBOSE(edev
, NETIF_MSG_LINK
, "Interface is not ready\n");
2627 if (link
->link_up
) {
2628 if (!netif_carrier_ok(edev
->ndev
)) {
2629 DP_NOTICE(edev
, "Link is up\n");
2630 netif_tx_start_all_queues(edev
->ndev
);
2631 netif_carrier_on(edev
->ndev
);
2632 qede_rdma_dev_event_open(edev
);
2635 if (netif_carrier_ok(edev
->ndev
)) {
2636 DP_NOTICE(edev
, "Link is down\n");
2637 netif_tx_disable(edev
->ndev
);
2638 netif_carrier_off(edev
->ndev
);
2639 qede_rdma_dev_event_close(edev
);
2644 static void qede_schedule_recovery_handler(void *dev
)
2646 struct qede_dev
*edev
= dev
;
2648 if (edev
->state
== QEDE_STATE_RECOVERY
) {
2650 "Avoid scheduling a recovery handling since already in recovery state\n");
2654 set_bit(QEDE_SP_RECOVERY
, &edev
->sp_flags
);
2655 schedule_delayed_work(&edev
->sp_task
, 0);
2657 DP_INFO(edev
, "Scheduled a recovery handler\n");
2660 static void qede_recovery_failed(struct qede_dev
*edev
)
2662 netdev_err(edev
->ndev
, "Recovery handling has failed. Power cycle is needed.\n");
2664 netif_device_detach(edev
->ndev
);
2667 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D3hot
);
2670 static void qede_recovery_handler(struct qede_dev
*edev
)
2672 u32 curr_state
= edev
->state
;
2675 DP_NOTICE(edev
, "Starting a recovery process\n");
2677 /* No need to acquire first the qede_lock since is done by qede_sp_task
2678 * before calling this function.
2680 edev
->state
= QEDE_STATE_RECOVERY
;
2682 edev
->ops
->common
->recovery_prolog(edev
->cdev
);
2684 if (curr_state
== QEDE_STATE_OPEN
)
2685 qede_unload(edev
, QEDE_UNLOAD_RECOVERY
, true);
2687 __qede_remove(edev
->pdev
, QEDE_REMOVE_RECOVERY
);
2689 rc
= __qede_probe(edev
->pdev
, edev
->dp_module
, edev
->dp_level
,
2690 IS_VF(edev
), QEDE_PROBE_RECOVERY
);
2696 if (curr_state
== QEDE_STATE_OPEN
) {
2697 rc
= qede_load(edev
, QEDE_LOAD_RECOVERY
, true);
2701 qede_config_rx_mode(edev
->ndev
);
2702 udp_tunnel_nic_reset_ntf(edev
->ndev
);
2705 edev
->state
= curr_state
;
2707 DP_NOTICE(edev
, "Recovery handling is done\n");
2712 qede_recovery_failed(edev
);
2715 static void qede_atomic_hw_err_handler(struct qede_dev
*edev
)
2717 struct qed_dev
*cdev
= edev
->cdev
;
2720 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2723 /* Get a call trace of the flow that led to the error */
2724 WARN_ON(test_bit(QEDE_ERR_WARN
, &edev
->err_flags
));
2726 /* Prevent HW attentions from being reasserted */
2727 if (test_bit(QEDE_ERR_ATTN_CLR_EN
, &edev
->err_flags
))
2728 edev
->ops
->common
->attn_clr_enable(cdev
, true);
2730 DP_NOTICE(edev
, "Generic non-sleepable HW error handling is done\n");
2733 static void qede_generic_hw_err_handler(struct qede_dev
*edev
)
2736 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2739 if (edev
->devlink
) {
2740 DP_NOTICE(edev
, "Reporting fatal error to devlink\n");
2741 edev
->ops
->common
->report_fatal_error(edev
->devlink
, edev
->last_err_type
);
2744 clear_bit(QEDE_ERR_IS_HANDLED
, &edev
->err_flags
);
2746 DP_NOTICE(edev
, "Generic sleepable HW error handling is done\n");
2749 static void qede_set_hw_err_flags(struct qede_dev
*edev
,
2750 enum qed_hw_err_type err_type
)
2752 unsigned long err_flags
= 0;
2755 case QED_HW_ERR_DMAE_FAIL
:
2756 set_bit(QEDE_ERR_WARN
, &err_flags
);
2758 case QED_HW_ERR_MFW_RESP_FAIL
:
2759 case QED_HW_ERR_HW_ATTN
:
2760 case QED_HW_ERR_RAMROD_FAIL
:
2761 case QED_HW_ERR_FW_ASSERT
:
2762 set_bit(QEDE_ERR_ATTN_CLR_EN
, &err_flags
);
2763 set_bit(QEDE_ERR_GET_DBG_INFO
, &err_flags
);
2764 /* make this error as recoverable and start recovery*/
2765 set_bit(QEDE_ERR_IS_RECOVERABLE
, &err_flags
);
2769 DP_NOTICE(edev
, "Unexpected HW error [%d]\n", err_type
);
2773 edev
->err_flags
|= err_flags
;
2776 static void qede_schedule_hw_err_handler(void *dev
,
2777 enum qed_hw_err_type err_type
)
2779 struct qede_dev
*edev
= dev
;
2781 /* Fan failure cannot be masked by handling of another HW error or by a
2782 * concurrent recovery process.
2784 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED
, &edev
->err_flags
) ||
2785 edev
->state
== QEDE_STATE_RECOVERY
) &&
2786 err_type
!= QED_HW_ERR_FAN_FAIL
) {
2788 "Avoid scheduling an error handling while another HW error is being handled\n");
2792 if (err_type
>= QED_HW_ERR_LAST
) {
2793 DP_NOTICE(edev
, "Unknown HW error [%d]\n", err_type
);
2794 clear_bit(QEDE_ERR_IS_HANDLED
, &edev
->err_flags
);
2798 edev
->last_err_type
= err_type
;
2799 qede_set_hw_err_flags(edev
, err_type
);
2800 qede_atomic_hw_err_handler(edev
);
2801 set_bit(QEDE_SP_HW_ERR
, &edev
->sp_flags
);
2802 schedule_delayed_work(&edev
->sp_task
, 0);
2804 DP_INFO(edev
, "Scheduled a error handler [err_type %d]\n", err_type
);
2807 static bool qede_is_txq_full(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
2809 struct netdev_queue
*netdev_txq
;
2811 netdev_txq
= netdev_get_tx_queue(edev
->ndev
, txq
->ndev_txq_id
);
2812 if (netif_xmit_stopped(netdev_txq
))
2818 static void qede_get_generic_tlv_data(void *dev
, struct qed_generic_tlvs
*data
)
2820 struct qede_dev
*edev
= dev
;
2821 struct netdev_hw_addr
*ha
;
2824 if (edev
->ndev
->features
& NETIF_F_IP_CSUM
)
2825 data
->feat_flags
|= QED_TLV_IP_CSUM
;
2826 if (edev
->ndev
->features
& NETIF_F_TSO
)
2827 data
->feat_flags
|= QED_TLV_LSO
;
2829 ether_addr_copy(data
->mac
[0], edev
->ndev
->dev_addr
);
2830 eth_zero_addr(data
->mac
[1]);
2831 eth_zero_addr(data
->mac
[2]);
2832 /* Copy the first two UC macs */
2833 netif_addr_lock_bh(edev
->ndev
);
2835 netdev_for_each_uc_addr(ha
, edev
->ndev
) {
2836 ether_addr_copy(data
->mac
[i
++], ha
->addr
);
2837 if (i
== QED_TLV_MAC_COUNT
)
2841 netif_addr_unlock_bh(edev
->ndev
);
2844 static void qede_get_eth_tlv_data(void *dev
, void *data
)
2846 struct qed_mfw_tlv_eth
*etlv
= data
;
2847 struct qede_dev
*edev
= dev
;
2848 struct qede_fastpath
*fp
;
2851 etlv
->lso_maxoff_size
= 0XFFFF;
2852 etlv
->lso_maxoff_size_set
= true;
2853 etlv
->lso_minseg_size
= (u16
)ETH_TX_LSO_WINDOW_MIN_LEN
;
2854 etlv
->lso_minseg_size_set
= true;
2855 etlv
->prom_mode
= !!(edev
->ndev
->flags
& IFF_PROMISC
);
2856 etlv
->prom_mode_set
= true;
2857 etlv
->tx_descr_size
= QEDE_TSS_COUNT(edev
);
2858 etlv
->tx_descr_size_set
= true;
2859 etlv
->rx_descr_size
= QEDE_RSS_COUNT(edev
);
2860 etlv
->rx_descr_size_set
= true;
2861 etlv
->iov_offload
= QED_MFW_TLV_IOV_OFFLOAD_VEB
;
2862 etlv
->iov_offload_set
= true;
2864 /* Fill information regarding queues; Should be done under the qede
2865 * lock to guarantee those don't change beneath our feet.
2867 etlv
->txqs_empty
= true;
2868 etlv
->rxqs_empty
= true;
2869 etlv
->num_txqs_full
= 0;
2870 etlv
->num_rxqs_full
= 0;
2874 fp
= &edev
->fp_array
[i
];
2875 if (fp
->type
& QEDE_FASTPATH_TX
) {
2876 struct qede_tx_queue
*txq
= QEDE_FP_TC0_TXQ(fp
);
2878 if (txq
->sw_tx_cons
!= txq
->sw_tx_prod
)
2879 etlv
->txqs_empty
= false;
2880 if (qede_is_txq_full(edev
, txq
))
2881 etlv
->num_txqs_full
++;
2883 if (fp
->type
& QEDE_FASTPATH_RX
) {
2884 if (qede_has_rx_work(fp
->rxq
))
2885 etlv
->rxqs_empty
= false;
2887 /* This one is a bit tricky; Firmware might stop
2888 * placing packets if ring is not yet full.
2889 * Give an approximation.
2891 if (le16_to_cpu(*fp
->rxq
->hw_cons_ptr
) -
2892 qed_chain_get_cons_idx(&fp
->rxq
->rx_comp_ring
) >
2894 etlv
->num_rxqs_full
++;
2897 __qede_unlock(edev
);
2899 etlv
->txqs_empty_set
= true;
2900 etlv
->rxqs_empty_set
= true;
2901 etlv
->num_txqs_full_set
= true;
2902 etlv
->num_rxqs_full_set
= true;
2906 * qede_io_error_detected(): Called when PCI error is detected
2908 * @pdev: Pointer to PCI device
2909 * @state: The current pci connection state
2911 *Return: pci_ers_result_t.
2913 * This function is called after a PCI bus error affecting
2914 * this device has been detected.
2916 static pci_ers_result_t
2917 qede_io_error_detected(struct pci_dev
*pdev
, pci_channel_state_t state
)
2919 struct net_device
*dev
= pci_get_drvdata(pdev
);
2920 struct qede_dev
*edev
= netdev_priv(dev
);
2923 return PCI_ERS_RESULT_NONE
;
2925 DP_NOTICE(edev
, "IO error detected [%d]\n", state
);
2928 if (edev
->state
== QEDE_STATE_RECOVERY
) {
2929 DP_NOTICE(edev
, "Device already in the recovery state\n");
2930 __qede_unlock(edev
);
2931 return PCI_ERS_RESULT_NONE
;
2934 /* PF handles the recovery of its VFs */
2936 DP_VERBOSE(edev
, QED_MSG_IOV
,
2937 "VF recovery is handled by its PF\n");
2938 __qede_unlock(edev
);
2939 return PCI_ERS_RESULT_RECOVERED
;
2943 netif_tx_disable(edev
->ndev
);
2944 netif_carrier_off(edev
->ndev
);
2946 set_bit(QEDE_SP_AER
, &edev
->sp_flags
);
2947 schedule_delayed_work(&edev
->sp_task
, 0);
2949 __qede_unlock(edev
);
2951 return PCI_ERS_RESULT_CAN_RECOVER
;