1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/version.h>
35 #include <linux/device.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/errno.h>
40 #include <linux/list.h>
41 #include <linux/string.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/interrupt.h>
44 #include <asm/byteorder.h>
45 #include <asm/param.h>
47 #include <linux/netdev_features.h>
48 #include <linux/udp.h>
49 #include <linux/tcp.h>
50 #include <net/udp_tunnel.h>
54 #include <linux/if_ether.h>
55 #include <linux/if_vlan.h>
56 #include <linux/pkt_sched.h>
57 #include <linux/ethtool.h>
59 #include <linux/random.h>
60 #include <net/ip6_checksum.h>
61 #include <linux/bitops.h>
62 #include <linux/vmalloc.h>
66 static char version
[] =
67 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION
"\n";
69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION
);
74 module_param(debug
, uint
, 0);
75 MODULE_PARM_DESC(debug
, " Default debug msglevel");
77 static const struct qed_eth_ops
*qed_ops
;
79 #define CHIP_NUM_57980S_40 0x1634
80 #define CHIP_NUM_57980S_10 0x1666
81 #define CHIP_NUM_57980S_MF 0x1636
82 #define CHIP_NUM_57980S_100 0x1644
83 #define CHIP_NUM_57980S_50 0x1654
84 #define CHIP_NUM_57980S_25 0x1656
85 #define CHIP_NUM_57980S_IOV 0x1664
86 #define CHIP_NUM_AH 0x8070
87 #define CHIP_NUM_AH_IOV 0x8090
89 #ifndef PCI_DEVICE_ID_NX2_57980E
90 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
91 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
92 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
93 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
94 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
95 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
96 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
97 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
98 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
102 enum qede_pci_private
{
107 static const struct pci_device_id qede_pci_tbl
[] = {
108 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_40
), QEDE_PRIVATE_PF
},
109 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_10
), QEDE_PRIVATE_PF
},
110 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_MF
), QEDE_PRIVATE_PF
},
111 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_100
), QEDE_PRIVATE_PF
},
112 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_50
), QEDE_PRIVATE_PF
},
113 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_25
), QEDE_PRIVATE_PF
},
114 #ifdef CONFIG_QED_SRIOV
115 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_IOV
), QEDE_PRIVATE_VF
},
117 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_AH
), QEDE_PRIVATE_PF
},
118 #ifdef CONFIG_QED_SRIOV
119 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_AH_IOV
), QEDE_PRIVATE_VF
},
124 MODULE_DEVICE_TABLE(pci
, qede_pci_tbl
);
126 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
128 #define TX_TIMEOUT (5 * HZ)
130 /* Utilize last protocol index for XDP */
133 static void qede_remove(struct pci_dev
*pdev
);
134 static void qede_shutdown(struct pci_dev
*pdev
);
135 static void qede_link_update(void *dev
, struct qed_link_output
*link
);
137 /* The qede lock is used to protect driver state change and driver flows that
140 void __qede_lock(struct qede_dev
*edev
)
142 mutex_lock(&edev
->qede_lock
);
145 void __qede_unlock(struct qede_dev
*edev
)
147 mutex_unlock(&edev
->qede_lock
);
150 #ifdef CONFIG_QED_SRIOV
151 static int qede_set_vf_vlan(struct net_device
*ndev
, int vf
, u16 vlan
, u8 qos
,
154 struct qede_dev
*edev
= netdev_priv(ndev
);
157 DP_NOTICE(edev
, "Illegal vlan value %d\n", vlan
);
161 if (vlan_proto
!= htons(ETH_P_8021Q
))
162 return -EPROTONOSUPPORT
;
164 DP_VERBOSE(edev
, QED_MSG_IOV
, "Setting Vlan 0x%04x to VF [%d]\n",
167 return edev
->ops
->iov
->set_vlan(edev
->cdev
, vlan
, vf
);
170 static int qede_set_vf_mac(struct net_device
*ndev
, int vfidx
, u8
*mac
)
172 struct qede_dev
*edev
= netdev_priv(ndev
);
174 DP_VERBOSE(edev
, QED_MSG_IOV
,
175 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
176 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5], vfidx
);
178 if (!is_valid_ether_addr(mac
)) {
179 DP_VERBOSE(edev
, QED_MSG_IOV
, "MAC address isn't valid\n");
183 return edev
->ops
->iov
->set_mac(edev
->cdev
, mac
, vfidx
);
186 static int qede_sriov_configure(struct pci_dev
*pdev
, int num_vfs_param
)
188 struct qede_dev
*edev
= netdev_priv(pci_get_drvdata(pdev
));
189 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
190 struct qed_update_vport_params
*vport_params
;
193 vport_params
= vzalloc(sizeof(*vport_params
));
196 DP_VERBOSE(edev
, QED_MSG_IOV
, "Requested %d VFs\n", num_vfs_param
);
198 rc
= edev
->ops
->iov
->configure(edev
->cdev
, num_vfs_param
);
200 /* Enable/Disable Tx switching for PF */
201 if ((rc
== num_vfs_param
) && netif_running(edev
->ndev
) &&
202 qed_info
->mf_mode
!= QED_MF_NPAR
&& qed_info
->tx_switching
) {
203 vport_params
->vport_id
= 0;
204 vport_params
->update_tx_switching_flg
= 1;
205 vport_params
->tx_switching_flg
= num_vfs_param
? 1 : 0;
206 edev
->ops
->vport_update(edev
->cdev
, vport_params
);
214 static struct pci_driver qede_pci_driver
= {
216 .id_table
= qede_pci_tbl
,
218 .remove
= qede_remove
,
219 .shutdown
= qede_shutdown
,
220 #ifdef CONFIG_QED_SRIOV
221 .sriov_configure
= qede_sriov_configure
,
225 static struct qed_eth_cb_ops qede_ll_ops
= {
227 #ifdef CONFIG_RFS_ACCEL
228 .arfs_filter_op
= qede_arfs_filter_op
,
230 .link_update
= qede_link_update
,
232 .force_mac
= qede_force_mac
,
233 .ports_update
= qede_udp_ports_update
,
236 static int qede_netdev_event(struct notifier_block
*this, unsigned long event
,
239 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
240 struct ethtool_drvinfo drvinfo
;
241 struct qede_dev
*edev
;
243 if (event
!= NETDEV_CHANGENAME
&& event
!= NETDEV_CHANGEADDR
)
246 /* Check whether this is a qede device */
247 if (!ndev
|| !ndev
->ethtool_ops
|| !ndev
->ethtool_ops
->get_drvinfo
)
250 memset(&drvinfo
, 0, sizeof(drvinfo
));
251 ndev
->ethtool_ops
->get_drvinfo(ndev
, &drvinfo
);
252 if (strcmp(drvinfo
.driver
, "qede"))
254 edev
= netdev_priv(ndev
);
257 case NETDEV_CHANGENAME
:
258 /* Notify qed of the name change */
259 if (!edev
->ops
|| !edev
->ops
->common
)
261 edev
->ops
->common
->set_name(edev
->cdev
, edev
->ndev
->name
);
263 case NETDEV_CHANGEADDR
:
264 edev
= netdev_priv(ndev
);
265 qede_rdma_event_changeaddr(edev
);
273 static struct notifier_block qede_netdev_notifier
= {
274 .notifier_call
= qede_netdev_event
,
278 int __init
qede_init(void)
282 pr_info("qede_init: %s\n", version
);
284 qed_ops
= qed_get_eth_ops();
286 pr_notice("Failed to get qed ethtool operations\n");
290 /* Must register notifier before pci ops, since we might miss
291 * interface rename after pci probe and netdev registration.
293 ret
= register_netdevice_notifier(&qede_netdev_notifier
);
295 pr_notice("Failed to register netdevice_notifier\n");
300 ret
= pci_register_driver(&qede_pci_driver
);
302 pr_notice("Failed to register driver\n");
303 unregister_netdevice_notifier(&qede_netdev_notifier
);
311 static void __exit
qede_cleanup(void)
313 if (debug
& QED_LOG_INFO_MASK
)
314 pr_info("qede_cleanup called\n");
316 unregister_netdevice_notifier(&qede_netdev_notifier
);
317 pci_unregister_driver(&qede_pci_driver
);
321 module_init(qede_init
);
322 module_exit(qede_cleanup
);
324 static int qede_open(struct net_device
*ndev
);
325 static int qede_close(struct net_device
*ndev
);
327 void qede_fill_by_demand_stats(struct qede_dev
*edev
)
329 struct qede_stats_common
*p_common
= &edev
->stats
.common
;
330 struct qed_eth_stats stats
;
332 edev
->ops
->get_vport_stats(edev
->cdev
, &stats
);
334 p_common
->no_buff_discards
= stats
.common
.no_buff_discards
;
335 p_common
->packet_too_big_discard
= stats
.common
.packet_too_big_discard
;
336 p_common
->ttl0_discard
= stats
.common
.ttl0_discard
;
337 p_common
->rx_ucast_bytes
= stats
.common
.rx_ucast_bytes
;
338 p_common
->rx_mcast_bytes
= stats
.common
.rx_mcast_bytes
;
339 p_common
->rx_bcast_bytes
= stats
.common
.rx_bcast_bytes
;
340 p_common
->rx_ucast_pkts
= stats
.common
.rx_ucast_pkts
;
341 p_common
->rx_mcast_pkts
= stats
.common
.rx_mcast_pkts
;
342 p_common
->rx_bcast_pkts
= stats
.common
.rx_bcast_pkts
;
343 p_common
->mftag_filter_discards
= stats
.common
.mftag_filter_discards
;
344 p_common
->mac_filter_discards
= stats
.common
.mac_filter_discards
;
346 p_common
->tx_ucast_bytes
= stats
.common
.tx_ucast_bytes
;
347 p_common
->tx_mcast_bytes
= stats
.common
.tx_mcast_bytes
;
348 p_common
->tx_bcast_bytes
= stats
.common
.tx_bcast_bytes
;
349 p_common
->tx_ucast_pkts
= stats
.common
.tx_ucast_pkts
;
350 p_common
->tx_mcast_pkts
= stats
.common
.tx_mcast_pkts
;
351 p_common
->tx_bcast_pkts
= stats
.common
.tx_bcast_pkts
;
352 p_common
->tx_err_drop_pkts
= stats
.common
.tx_err_drop_pkts
;
353 p_common
->coalesced_pkts
= stats
.common
.tpa_coalesced_pkts
;
354 p_common
->coalesced_events
= stats
.common
.tpa_coalesced_events
;
355 p_common
->coalesced_aborts_num
= stats
.common
.tpa_aborts_num
;
356 p_common
->non_coalesced_pkts
= stats
.common
.tpa_not_coalesced_pkts
;
357 p_common
->coalesced_bytes
= stats
.common
.tpa_coalesced_bytes
;
359 p_common
->rx_64_byte_packets
= stats
.common
.rx_64_byte_packets
;
360 p_common
->rx_65_to_127_byte_packets
=
361 stats
.common
.rx_65_to_127_byte_packets
;
362 p_common
->rx_128_to_255_byte_packets
=
363 stats
.common
.rx_128_to_255_byte_packets
;
364 p_common
->rx_256_to_511_byte_packets
=
365 stats
.common
.rx_256_to_511_byte_packets
;
366 p_common
->rx_512_to_1023_byte_packets
=
367 stats
.common
.rx_512_to_1023_byte_packets
;
368 p_common
->rx_1024_to_1518_byte_packets
=
369 stats
.common
.rx_1024_to_1518_byte_packets
;
370 p_common
->rx_crc_errors
= stats
.common
.rx_crc_errors
;
371 p_common
->rx_mac_crtl_frames
= stats
.common
.rx_mac_crtl_frames
;
372 p_common
->rx_pause_frames
= stats
.common
.rx_pause_frames
;
373 p_common
->rx_pfc_frames
= stats
.common
.rx_pfc_frames
;
374 p_common
->rx_align_errors
= stats
.common
.rx_align_errors
;
375 p_common
->rx_carrier_errors
= stats
.common
.rx_carrier_errors
;
376 p_common
->rx_oversize_packets
= stats
.common
.rx_oversize_packets
;
377 p_common
->rx_jabbers
= stats
.common
.rx_jabbers
;
378 p_common
->rx_undersize_packets
= stats
.common
.rx_undersize_packets
;
379 p_common
->rx_fragments
= stats
.common
.rx_fragments
;
380 p_common
->tx_64_byte_packets
= stats
.common
.tx_64_byte_packets
;
381 p_common
->tx_65_to_127_byte_packets
=
382 stats
.common
.tx_65_to_127_byte_packets
;
383 p_common
->tx_128_to_255_byte_packets
=
384 stats
.common
.tx_128_to_255_byte_packets
;
385 p_common
->tx_256_to_511_byte_packets
=
386 stats
.common
.tx_256_to_511_byte_packets
;
387 p_common
->tx_512_to_1023_byte_packets
=
388 stats
.common
.tx_512_to_1023_byte_packets
;
389 p_common
->tx_1024_to_1518_byte_packets
=
390 stats
.common
.tx_1024_to_1518_byte_packets
;
391 p_common
->tx_pause_frames
= stats
.common
.tx_pause_frames
;
392 p_common
->tx_pfc_frames
= stats
.common
.tx_pfc_frames
;
393 p_common
->brb_truncates
= stats
.common
.brb_truncates
;
394 p_common
->brb_discards
= stats
.common
.brb_discards
;
395 p_common
->tx_mac_ctrl_frames
= stats
.common
.tx_mac_ctrl_frames
;
397 if (QEDE_IS_BB(edev
)) {
398 struct qede_stats_bb
*p_bb
= &edev
->stats
.bb
;
400 p_bb
->rx_1519_to_1522_byte_packets
=
401 stats
.bb
.rx_1519_to_1522_byte_packets
;
402 p_bb
->rx_1519_to_2047_byte_packets
=
403 stats
.bb
.rx_1519_to_2047_byte_packets
;
404 p_bb
->rx_2048_to_4095_byte_packets
=
405 stats
.bb
.rx_2048_to_4095_byte_packets
;
406 p_bb
->rx_4096_to_9216_byte_packets
=
407 stats
.bb
.rx_4096_to_9216_byte_packets
;
408 p_bb
->rx_9217_to_16383_byte_packets
=
409 stats
.bb
.rx_9217_to_16383_byte_packets
;
410 p_bb
->tx_1519_to_2047_byte_packets
=
411 stats
.bb
.tx_1519_to_2047_byte_packets
;
412 p_bb
->tx_2048_to_4095_byte_packets
=
413 stats
.bb
.tx_2048_to_4095_byte_packets
;
414 p_bb
->tx_4096_to_9216_byte_packets
=
415 stats
.bb
.tx_4096_to_9216_byte_packets
;
416 p_bb
->tx_9217_to_16383_byte_packets
=
417 stats
.bb
.tx_9217_to_16383_byte_packets
;
418 p_bb
->tx_lpi_entry_count
= stats
.bb
.tx_lpi_entry_count
;
419 p_bb
->tx_total_collisions
= stats
.bb
.tx_total_collisions
;
421 struct qede_stats_ah
*p_ah
= &edev
->stats
.ah
;
423 p_ah
->rx_1519_to_max_byte_packets
=
424 stats
.ah
.rx_1519_to_max_byte_packets
;
425 p_ah
->tx_1519_to_max_byte_packets
=
426 stats
.ah
.tx_1519_to_max_byte_packets
;
430 static void qede_get_stats64(struct net_device
*dev
,
431 struct rtnl_link_stats64
*stats
)
433 struct qede_dev
*edev
= netdev_priv(dev
);
434 struct qede_stats_common
*p_common
;
436 qede_fill_by_demand_stats(edev
);
437 p_common
= &edev
->stats
.common
;
439 stats
->rx_packets
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
440 p_common
->rx_bcast_pkts
;
441 stats
->tx_packets
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
442 p_common
->tx_bcast_pkts
;
444 stats
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
445 p_common
->rx_bcast_bytes
;
446 stats
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
447 p_common
->tx_bcast_bytes
;
449 stats
->tx_errors
= p_common
->tx_err_drop_pkts
;
450 stats
->multicast
= p_common
->rx_mcast_pkts
+ p_common
->rx_bcast_pkts
;
452 stats
->rx_fifo_errors
= p_common
->no_buff_discards
;
454 if (QEDE_IS_BB(edev
))
455 stats
->collisions
= edev
->stats
.bb
.tx_total_collisions
;
456 stats
->rx_crc_errors
= p_common
->rx_crc_errors
;
457 stats
->rx_frame_errors
= p_common
->rx_align_errors
;
460 #ifdef CONFIG_QED_SRIOV
461 static int qede_get_vf_config(struct net_device
*dev
, int vfidx
,
462 struct ifla_vf_info
*ivi
)
464 struct qede_dev
*edev
= netdev_priv(dev
);
469 return edev
->ops
->iov
->get_config(edev
->cdev
, vfidx
, ivi
);
472 static int qede_set_vf_rate(struct net_device
*dev
, int vfidx
,
473 int min_tx_rate
, int max_tx_rate
)
475 struct qede_dev
*edev
= netdev_priv(dev
);
477 return edev
->ops
->iov
->set_rate(edev
->cdev
, vfidx
, min_tx_rate
,
481 static int qede_set_vf_spoofchk(struct net_device
*dev
, int vfidx
, bool val
)
483 struct qede_dev
*edev
= netdev_priv(dev
);
488 return edev
->ops
->iov
->set_spoof(edev
->cdev
, vfidx
, val
);
491 static int qede_set_vf_link_state(struct net_device
*dev
, int vfidx
,
494 struct qede_dev
*edev
= netdev_priv(dev
);
499 return edev
->ops
->iov
->set_link_state(edev
->cdev
, vfidx
, link_state
);
502 static int qede_set_vf_trust(struct net_device
*dev
, int vfidx
, bool setting
)
504 struct qede_dev
*edev
= netdev_priv(dev
);
509 return edev
->ops
->iov
->set_trust(edev
->cdev
, vfidx
, setting
);
513 static int qede_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
515 struct qede_dev
*edev
= netdev_priv(dev
);
517 if (!netif_running(dev
))
522 return qede_ptp_hw_ts(edev
, ifr
);
524 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
525 "default IOCTL cmd 0x%x\n", cmd
);
532 static const struct net_device_ops qede_netdev_ops
= {
533 .ndo_open
= qede_open
,
534 .ndo_stop
= qede_close
,
535 .ndo_start_xmit
= qede_start_xmit
,
536 .ndo_set_rx_mode
= qede_set_rx_mode
,
537 .ndo_set_mac_address
= qede_set_mac_addr
,
538 .ndo_validate_addr
= eth_validate_addr
,
539 .ndo_change_mtu
= qede_change_mtu
,
540 .ndo_do_ioctl
= qede_ioctl
,
541 #ifdef CONFIG_QED_SRIOV
542 .ndo_set_vf_mac
= qede_set_vf_mac
,
543 .ndo_set_vf_vlan
= qede_set_vf_vlan
,
544 .ndo_set_vf_trust
= qede_set_vf_trust
,
546 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
547 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
548 .ndo_fix_features
= qede_fix_features
,
549 .ndo_set_features
= qede_set_features
,
550 .ndo_get_stats64
= qede_get_stats64
,
551 #ifdef CONFIG_QED_SRIOV
552 .ndo_set_vf_link_state
= qede_set_vf_link_state
,
553 .ndo_set_vf_spoofchk
= qede_set_vf_spoofchk
,
554 .ndo_get_vf_config
= qede_get_vf_config
,
555 .ndo_set_vf_rate
= qede_set_vf_rate
,
557 .ndo_udp_tunnel_add
= qede_udp_tunnel_add
,
558 .ndo_udp_tunnel_del
= qede_udp_tunnel_del
,
559 .ndo_features_check
= qede_features_check
,
561 #ifdef CONFIG_RFS_ACCEL
562 .ndo_rx_flow_steer
= qede_rx_flow_steer
,
566 static const struct net_device_ops qede_netdev_vf_ops
= {
567 .ndo_open
= qede_open
,
568 .ndo_stop
= qede_close
,
569 .ndo_start_xmit
= qede_start_xmit
,
570 .ndo_set_rx_mode
= qede_set_rx_mode
,
571 .ndo_set_mac_address
= qede_set_mac_addr
,
572 .ndo_validate_addr
= eth_validate_addr
,
573 .ndo_change_mtu
= qede_change_mtu
,
574 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
575 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
576 .ndo_fix_features
= qede_fix_features
,
577 .ndo_set_features
= qede_set_features
,
578 .ndo_get_stats64
= qede_get_stats64
,
579 .ndo_udp_tunnel_add
= qede_udp_tunnel_add
,
580 .ndo_udp_tunnel_del
= qede_udp_tunnel_del
,
581 .ndo_features_check
= qede_features_check
,
584 static const struct net_device_ops qede_netdev_vf_xdp_ops
= {
585 .ndo_open
= qede_open
,
586 .ndo_stop
= qede_close
,
587 .ndo_start_xmit
= qede_start_xmit
,
588 .ndo_set_rx_mode
= qede_set_rx_mode
,
589 .ndo_set_mac_address
= qede_set_mac_addr
,
590 .ndo_validate_addr
= eth_validate_addr
,
591 .ndo_change_mtu
= qede_change_mtu
,
592 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
593 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
594 .ndo_fix_features
= qede_fix_features
,
595 .ndo_set_features
= qede_set_features
,
596 .ndo_get_stats64
= qede_get_stats64
,
597 .ndo_udp_tunnel_add
= qede_udp_tunnel_add
,
598 .ndo_udp_tunnel_del
= qede_udp_tunnel_del
,
599 .ndo_features_check
= qede_features_check
,
603 /* -------------------------------------------------------------------------
604 * START OF PROBE / REMOVE
605 * -------------------------------------------------------------------------
608 static struct qede_dev
*qede_alloc_etherdev(struct qed_dev
*cdev
,
609 struct pci_dev
*pdev
,
610 struct qed_dev_eth_info
*info
,
611 u32 dp_module
, u8 dp_level
)
613 struct net_device
*ndev
;
614 struct qede_dev
*edev
;
616 ndev
= alloc_etherdev_mqs(sizeof(*edev
),
617 info
->num_queues
, info
->num_queues
);
619 pr_err("etherdev allocation failed\n");
623 edev
= netdev_priv(ndev
);
627 edev
->dp_module
= dp_module
;
628 edev
->dp_level
= dp_level
;
630 edev
->q_num_rx_buffers
= NUM_RX_BDS_DEF
;
631 edev
->q_num_tx_buffers
= NUM_TX_BDS_DEF
;
633 DP_INFO(edev
, "Allocated netdev with %d tx queues and %d rx queues\n",
634 info
->num_queues
, info
->num_queues
);
636 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
638 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
639 memcpy(&edev
->dev_info
, info
, sizeof(*info
));
641 /* As ethtool doesn't have the ability to show WoL behavior as
642 * 'default', if device supports it declare it's enabled.
644 if (edev
->dev_info
.common
.wol_support
)
645 edev
->wol_enabled
= true;
647 INIT_LIST_HEAD(&edev
->vlan_list
);
652 static void qede_init_ndev(struct qede_dev
*edev
)
654 struct net_device
*ndev
= edev
->ndev
;
655 struct pci_dev
*pdev
= edev
->pdev
;
656 bool udp_tunnel_enable
= false;
657 netdev_features_t hw_features
;
659 pci_set_drvdata(pdev
, ndev
);
661 ndev
->mem_start
= edev
->dev_info
.common
.pci_mem_start
;
662 ndev
->base_addr
= ndev
->mem_start
;
663 ndev
->mem_end
= edev
->dev_info
.common
.pci_mem_end
;
664 ndev
->irq
= edev
->dev_info
.common
.pci_irq
;
666 ndev
->watchdog_timeo
= TX_TIMEOUT
;
669 if (edev
->dev_info
.xdp_supported
)
670 ndev
->netdev_ops
= &qede_netdev_vf_xdp_ops
;
672 ndev
->netdev_ops
= &qede_netdev_vf_ops
;
674 ndev
->netdev_ops
= &qede_netdev_ops
;
677 qede_set_ethtool_ops(ndev
);
679 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
681 /* user-changeble features */
682 hw_features
= NETIF_F_GRO
| NETIF_F_GRO_HW
| NETIF_F_SG
|
683 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
684 NETIF_F_TSO
| NETIF_F_TSO6
;
686 if (!IS_VF(edev
) && edev
->dev_info
.common
.num_hwfns
== 1)
687 hw_features
|= NETIF_F_NTUPLE
;
689 if (edev
->dev_info
.common
.vxlan_enable
||
690 edev
->dev_info
.common
.geneve_enable
)
691 udp_tunnel_enable
= true;
693 if (udp_tunnel_enable
|| edev
->dev_info
.common
.gre_enable
) {
694 hw_features
|= NETIF_F_TSO_ECN
;
695 ndev
->hw_enc_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
696 NETIF_F_SG
| NETIF_F_TSO
|
697 NETIF_F_TSO_ECN
| NETIF_F_TSO6
|
701 if (udp_tunnel_enable
) {
702 hw_features
|= (NETIF_F_GSO_UDP_TUNNEL
|
703 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
704 ndev
->hw_enc_features
|= (NETIF_F_GSO_UDP_TUNNEL
|
705 NETIF_F_GSO_UDP_TUNNEL_CSUM
);
708 if (edev
->dev_info
.common
.gre_enable
) {
709 hw_features
|= (NETIF_F_GSO_GRE
| NETIF_F_GSO_GRE_CSUM
);
710 ndev
->hw_enc_features
|= (NETIF_F_GSO_GRE
|
711 NETIF_F_GSO_GRE_CSUM
);
714 ndev
->vlan_features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
716 ndev
->features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
717 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HIGHDMA
|
718 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
;
720 ndev
->hw_features
= hw_features
;
722 /* MTU range: 46 - 9600 */
723 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
724 ndev
->max_mtu
= QEDE_MAX_JUMBO_PACKET_SIZE
;
726 /* Set network device HW mac */
727 ether_addr_copy(edev
->ndev
->dev_addr
, edev
->dev_info
.common
.hw_mac
);
729 ndev
->mtu
= edev
->dev_info
.common
.mtu
;
732 /* This function converts from 32b param to two params of level and module
733 * Input 32b decoding:
734 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
735 * 'happy' flow, e.g. memory allocation failed.
736 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
737 * and provide important parameters.
738 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
739 * module. VERBOSE prints are for tracking the specific flow in low level.
741 * Notice that the level should be that of the lowest required logs.
743 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
)
745 *p_dp_level
= QED_LEVEL_NOTICE
;
748 if (debug
& QED_LOG_VERBOSE_MASK
) {
749 *p_dp_level
= QED_LEVEL_VERBOSE
;
750 *p_dp_module
= (debug
& 0x3FFFFFFF);
751 } else if (debug
& QED_LOG_INFO_MASK
) {
752 *p_dp_level
= QED_LEVEL_INFO
;
753 } else if (debug
& QED_LOG_NOTICE_MASK
) {
754 *p_dp_level
= QED_LEVEL_NOTICE
;
758 static void qede_free_fp_array(struct qede_dev
*edev
)
760 if (edev
->fp_array
) {
761 struct qede_fastpath
*fp
;
765 fp
= &edev
->fp_array
[i
];
768 /* Handle mem alloc failure case where qede_init_fp
769 * didn't register xdp_rxq_info yet.
770 * Implicit only (fp->type & QEDE_FASTPATH_RX)
772 if (fp
->rxq
&& xdp_rxq_info_is_reg(&fp
->rxq
->xdp_rxq
))
773 xdp_rxq_info_unreg(&fp
->rxq
->xdp_rxq
);
778 kfree(edev
->fp_array
);
781 edev
->num_queues
= 0;
786 static int qede_alloc_fp_array(struct qede_dev
*edev
)
788 u8 fp_combined
, fp_rx
= edev
->fp_num_rx
;
789 struct qede_fastpath
*fp
;
792 edev
->fp_array
= kcalloc(QEDE_QUEUE_CNT(edev
),
793 sizeof(*edev
->fp_array
), GFP_KERNEL
);
794 if (!edev
->fp_array
) {
795 DP_NOTICE(edev
, "fp array allocation failed\n");
799 fp_combined
= QEDE_QUEUE_CNT(edev
) - fp_rx
- edev
->fp_num_tx
;
801 /* Allocate the FP elements for Rx queues followed by combined and then
802 * the Tx. This ordering should be maintained so that the respective
803 * queues (Rx or Tx) will be together in the fastpath array and the
804 * associated ids will be sequential.
807 fp
= &edev
->fp_array
[i
];
809 fp
->sb_info
= kzalloc(sizeof(*fp
->sb_info
), GFP_KERNEL
);
811 DP_NOTICE(edev
, "sb info struct allocation failed\n");
816 fp
->type
= QEDE_FASTPATH_RX
;
818 } else if (fp_combined
) {
819 fp
->type
= QEDE_FASTPATH_COMBINED
;
822 fp
->type
= QEDE_FASTPATH_TX
;
825 if (fp
->type
& QEDE_FASTPATH_TX
) {
826 fp
->txq
= kzalloc(sizeof(*fp
->txq
), GFP_KERNEL
);
831 if (fp
->type
& QEDE_FASTPATH_RX
) {
832 fp
->rxq
= kzalloc(sizeof(*fp
->rxq
), GFP_KERNEL
);
836 if (edev
->xdp_prog
) {
837 fp
->xdp_tx
= kzalloc(sizeof(*fp
->xdp_tx
),
841 fp
->type
|= QEDE_FASTPATH_XDP
;
848 qede_free_fp_array(edev
);
852 static void qede_sp_task(struct work_struct
*work
)
854 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
859 if (test_and_clear_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
))
860 if (edev
->state
== QEDE_STATE_OPEN
)
861 qede_config_rx_mode(edev
->ndev
);
863 #ifdef CONFIG_RFS_ACCEL
864 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG
, &edev
->sp_flags
)) {
865 if (edev
->state
== QEDE_STATE_OPEN
)
866 qede_process_arfs_filters(edev
, false);
872 static void qede_update_pf_params(struct qed_dev
*cdev
)
874 struct qed_pf_params pf_params
;
876 /* 64 rx + 64 tx + 64 XDP */
877 memset(&pf_params
, 0, sizeof(struct qed_pf_params
));
878 pf_params
.eth_pf_params
.num_cons
= (MAX_SB_PER_PF_MIMD
- 1) * 3;
880 /* Same for VFs - make sure they'll have sufficient connections
881 * to support XDP Tx queues.
883 pf_params
.eth_pf_params
.num_vf_cons
= 48;
885 pf_params
.eth_pf_params
.num_arfs_filters
= QEDE_RFS_MAX_FLTR
;
886 qed_ops
->common
->update_pf_params(cdev
, &pf_params
);
889 #define QEDE_FW_VER_STR_SIZE 80
891 static void qede_log_probe(struct qede_dev
*edev
)
893 struct qed_dev_info
*p_dev_info
= &edev
->dev_info
.common
;
894 u8 buf
[QEDE_FW_VER_STR_SIZE
];
897 snprintf(buf
, QEDE_FW_VER_STR_SIZE
,
898 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
899 p_dev_info
->fw_major
, p_dev_info
->fw_minor
, p_dev_info
->fw_rev
,
901 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_3_MASK
) >>
902 QED_MFW_VERSION_3_OFFSET
,
903 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_2_MASK
) >>
904 QED_MFW_VERSION_2_OFFSET
,
905 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_1_MASK
) >>
906 QED_MFW_VERSION_1_OFFSET
,
907 (p_dev_info
->mfw_rev
& QED_MFW_VERSION_0_MASK
) >>
908 QED_MFW_VERSION_0_OFFSET
);
910 left_size
= QEDE_FW_VER_STR_SIZE
- strlen(buf
);
911 if (p_dev_info
->mbi_version
&& left_size
)
912 snprintf(buf
+ strlen(buf
), left_size
,
914 (p_dev_info
->mbi_version
& QED_MBI_VERSION_2_MASK
) >>
915 QED_MBI_VERSION_2_OFFSET
,
916 (p_dev_info
->mbi_version
& QED_MBI_VERSION_1_MASK
) >>
917 QED_MBI_VERSION_1_OFFSET
,
918 (p_dev_info
->mbi_version
& QED_MBI_VERSION_0_MASK
) >>
919 QED_MBI_VERSION_0_OFFSET
);
921 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev
->pdev
->bus
->number
,
922 PCI_SLOT(edev
->pdev
->devfn
), PCI_FUNC(edev
->pdev
->devfn
),
923 buf
, edev
->ndev
->name
);
926 enum qede_probe_mode
{
930 static int __qede_probe(struct pci_dev
*pdev
, u32 dp_module
, u8 dp_level
,
931 bool is_vf
, enum qede_probe_mode mode
)
933 struct qed_probe_params probe_params
;
934 struct qed_slowpath_params sp_params
;
935 struct qed_dev_eth_info dev_info
;
936 struct qede_dev
*edev
;
937 struct qed_dev
*cdev
;
940 if (unlikely(dp_level
& QED_LEVEL_INFO
))
941 pr_notice("Starting qede probe\n");
943 memset(&probe_params
, 0, sizeof(probe_params
));
944 probe_params
.protocol
= QED_PROTOCOL_ETH
;
945 probe_params
.dp_module
= dp_module
;
946 probe_params
.dp_level
= dp_level
;
947 probe_params
.is_vf
= is_vf
;
948 cdev
= qed_ops
->common
->probe(pdev
, &probe_params
);
954 qede_update_pf_params(cdev
);
956 /* Start the Slowpath-process */
957 memset(&sp_params
, 0, sizeof(sp_params
));
958 sp_params
.int_mode
= QED_INT_MODE_MSIX
;
959 sp_params
.drv_major
= QEDE_MAJOR_VERSION
;
960 sp_params
.drv_minor
= QEDE_MINOR_VERSION
;
961 sp_params
.drv_rev
= QEDE_REVISION_VERSION
;
962 sp_params
.drv_eng
= QEDE_ENGINEERING_VERSION
;
963 strlcpy(sp_params
.name
, "qede LAN", QED_DRV_VER_STR_SIZE
);
964 rc
= qed_ops
->common
->slowpath_start(cdev
, &sp_params
);
966 pr_notice("Cannot start slowpath\n");
970 /* Learn information crucial for qede to progress */
971 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
975 edev
= qede_alloc_etherdev(cdev
, pdev
, &dev_info
, dp_module
,
983 edev
->flags
|= QEDE_FLAG_IS_VF
;
985 qede_init_ndev(edev
);
987 rc
= qede_rdma_dev_add(edev
);
991 /* Prepare the lock prior to the registration of the netdev,
992 * as once it's registered we might reach flows requiring it
993 * [it's even possible to reach a flow needing it directly
994 * from there, although it's unlikely].
996 INIT_DELAYED_WORK(&edev
->sp_task
, qede_sp_task
);
997 mutex_init(&edev
->qede_lock
);
998 rc
= register_netdev(edev
->ndev
);
1000 DP_NOTICE(edev
, "Cannot register net-device\n");
1004 edev
->ops
->common
->set_name(cdev
, edev
->ndev
->name
);
1006 /* PTP not supported on VFs */
1008 qede_ptp_enable(edev
, true);
1010 edev
->ops
->register_ops(cdev
, &qede_ll_ops
, edev
);
1014 qede_set_dcbnl_ops(edev
->ndev
);
1017 edev
->rx_copybreak
= QEDE_RX_HDR_SIZE
;
1019 qede_log_probe(edev
);
1023 qede_rdma_dev_remove(edev
);
1025 free_netdev(edev
->ndev
);
1027 qed_ops
->common
->slowpath_stop(cdev
);
1029 qed_ops
->common
->remove(cdev
);
1034 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1040 switch ((enum qede_pci_private
)id
->driver_data
) {
1041 case QEDE_PRIVATE_VF
:
1042 if (debug
& QED_LOG_VERBOSE_MASK
)
1043 dev_err(&pdev
->dev
, "Probing a VF\n");
1047 if (debug
& QED_LOG_VERBOSE_MASK
)
1048 dev_err(&pdev
->dev
, "Probing a PF\n");
1051 qede_config_debug(debug
, &dp_module
, &dp_level
);
1053 return __qede_probe(pdev
, dp_module
, dp_level
, is_vf
,
1057 enum qede_remove_mode
{
1061 static void __qede_remove(struct pci_dev
*pdev
, enum qede_remove_mode mode
)
1063 struct net_device
*ndev
= pci_get_drvdata(pdev
);
1064 struct qede_dev
*edev
= netdev_priv(ndev
);
1065 struct qed_dev
*cdev
= edev
->cdev
;
1067 DP_INFO(edev
, "Starting qede_remove\n");
1069 unregister_netdev(ndev
);
1070 cancel_delayed_work_sync(&edev
->sp_task
);
1072 qede_ptp_disable(edev
);
1074 qede_rdma_dev_remove(edev
);
1076 edev
->ops
->common
->set_power_state(cdev
, PCI_D0
);
1078 pci_set_drvdata(pdev
, NULL
);
1080 /* Use global ops since we've freed edev */
1081 qed_ops
->common
->slowpath_stop(cdev
);
1082 if (system_state
== SYSTEM_POWER_OFF
)
1084 qed_ops
->common
->remove(cdev
);
1086 /* Since this can happen out-of-sync with other flows,
1087 * don't release the netdevice until after slowpath stop
1088 * has been called to guarantee various other contexts
1089 * [e.g., QED register callbacks] won't break anything when
1090 * accessing the netdevice.
1094 dev_info(&pdev
->dev
, "Ending qede_remove successfully\n");
1097 static void qede_remove(struct pci_dev
*pdev
)
1099 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
1102 static void qede_shutdown(struct pci_dev
*pdev
)
1104 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
1107 /* -------------------------------------------------------------------------
1108 * START OF LOAD / UNLOAD
1109 * -------------------------------------------------------------------------
1112 static int qede_set_num_queues(struct qede_dev
*edev
)
1117 /* Setup queues according to possible resources*/
1118 if (edev
->req_queues
)
1119 rss_num
= edev
->req_queues
;
1121 rss_num
= netif_get_num_default_rss_queues() *
1122 edev
->dev_info
.common
.num_hwfns
;
1124 rss_num
= min_t(u16
, QEDE_MAX_RSS_CNT(edev
), rss_num
);
1126 rc
= edev
->ops
->common
->set_fp_int(edev
->cdev
, rss_num
);
1128 /* Managed to request interrupts for our queues */
1129 edev
->num_queues
= rc
;
1130 DP_INFO(edev
, "Managed %d [of %d] RSS queues\n",
1131 QEDE_QUEUE_CNT(edev
), rss_num
);
1135 edev
->fp_num_tx
= edev
->req_num_tx
;
1136 edev
->fp_num_rx
= edev
->req_num_rx
;
1141 static void qede_free_mem_sb(struct qede_dev
*edev
, struct qed_sb_info
*sb_info
,
1144 if (sb_info
->sb_virt
) {
1145 edev
->ops
->common
->sb_release(edev
->cdev
, sb_info
, sb_id
);
1146 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
1147 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
1148 memset(sb_info
, 0, sizeof(*sb_info
));
1152 /* This function allocates fast-path status block memory */
1153 static int qede_alloc_mem_sb(struct qede_dev
*edev
,
1154 struct qed_sb_info
*sb_info
, u16 sb_id
)
1156 struct status_block_e4
*sb_virt
;
1160 sb_virt
= dma_alloc_coherent(&edev
->pdev
->dev
,
1161 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
1163 DP_ERR(edev
, "Status block allocation failed\n");
1167 rc
= edev
->ops
->common
->sb_init(edev
->cdev
, sb_info
,
1168 sb_virt
, sb_phys
, sb_id
,
1169 QED_SB_TYPE_L2_QUEUE
);
1171 DP_ERR(edev
, "Status block initialization failed\n");
1172 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_virt
),
1180 static void qede_free_rx_buffers(struct qede_dev
*edev
,
1181 struct qede_rx_queue
*rxq
)
1185 for (i
= rxq
->sw_rx_cons
; i
!= rxq
->sw_rx_prod
; i
++) {
1186 struct sw_rx_data
*rx_buf
;
1189 rx_buf
= &rxq
->sw_rx_ring
[i
& NUM_RX_BDS_MAX
];
1190 data
= rx_buf
->data
;
1192 dma_unmap_page(&edev
->pdev
->dev
,
1193 rx_buf
->mapping
, PAGE_SIZE
, rxq
->data_direction
);
1195 rx_buf
->data
= NULL
;
1200 static void qede_free_sge_mem(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
1204 if (edev
->gro_disable
)
1207 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
1208 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
1209 struct sw_rx_data
*replace_buf
= &tpa_info
->buffer
;
1211 if (replace_buf
->data
) {
1212 dma_unmap_page(&edev
->pdev
->dev
,
1213 replace_buf
->mapping
,
1214 PAGE_SIZE
, DMA_FROM_DEVICE
);
1215 __free_page(replace_buf
->data
);
1220 static void qede_free_mem_rxq(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
1222 qede_free_sge_mem(edev
, rxq
);
1224 /* Free rx buffers */
1225 qede_free_rx_buffers(edev
, rxq
);
1227 /* Free the parallel SW ring */
1228 kfree(rxq
->sw_rx_ring
);
1230 /* Free the real RQ ring used by FW */
1231 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_bd_ring
);
1232 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_comp_ring
);
1235 static int qede_alloc_sge_mem(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
1240 if (edev
->gro_disable
)
1243 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
1244 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
1245 struct sw_rx_data
*replace_buf
= &tpa_info
->buffer
;
1247 replace_buf
->data
= alloc_pages(GFP_ATOMIC
, 0);
1248 if (unlikely(!replace_buf
->data
)) {
1250 "Failed to allocate TPA skb pool [replacement buffer]\n");
1254 mapping
= dma_map_page(&edev
->pdev
->dev
, replace_buf
->data
, 0,
1255 PAGE_SIZE
, DMA_FROM_DEVICE
);
1256 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
1258 "Failed to map TPA replacement buffer\n");
1262 replace_buf
->mapping
= mapping
;
1263 tpa_info
->buffer
.page_offset
= 0;
1264 tpa_info
->buffer_mapping
= mapping
;
1265 tpa_info
->state
= QEDE_AGG_STATE_NONE
;
1270 qede_free_sge_mem(edev
, rxq
);
1271 edev
->gro_disable
= 1;
1272 edev
->ndev
->features
&= ~NETIF_F_GRO_HW
;
1276 /* This function allocates all memory needed per Rx queue */
1277 static int qede_alloc_mem_rxq(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
1281 rxq
->num_rx_buffers
= edev
->q_num_rx_buffers
;
1283 rxq
->rx_buf_size
= NET_IP_ALIGN
+ ETH_OVERHEAD
+ edev
->ndev
->mtu
;
1284 rxq
->rx_headroom
= edev
->xdp_prog
? XDP_PACKET_HEADROOM
: 0;
1286 /* Make sure that the headroom and payload fit in a single page */
1287 if (rxq
->rx_buf_size
+ rxq
->rx_headroom
> PAGE_SIZE
)
1288 rxq
->rx_buf_size
= PAGE_SIZE
- rxq
->rx_headroom
;
1290 /* Segment size to spilt a page in multiple equal parts,
1291 * unless XDP is used in which case we'd use the entire page.
1293 if (!edev
->xdp_prog
)
1294 rxq
->rx_buf_seg_size
= roundup_pow_of_two(rxq
->rx_buf_size
);
1296 rxq
->rx_buf_seg_size
= PAGE_SIZE
;
1298 /* Allocate the parallel driver ring for Rx buffers */
1299 size
= sizeof(*rxq
->sw_rx_ring
) * RX_RING_SIZE
;
1300 rxq
->sw_rx_ring
= kzalloc(size
, GFP_KERNEL
);
1301 if (!rxq
->sw_rx_ring
) {
1302 DP_ERR(edev
, "Rx buffers ring allocation failed\n");
1307 /* Allocate FW Rx ring */
1308 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
1309 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1310 QED_CHAIN_MODE_NEXT_PTR
,
1311 QED_CHAIN_CNT_TYPE_U16
,
1313 sizeof(struct eth_rx_bd
),
1314 &rxq
->rx_bd_ring
, NULL
);
1318 /* Allocate FW completion ring */
1319 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
1320 QED_CHAIN_USE_TO_CONSUME
,
1322 QED_CHAIN_CNT_TYPE_U16
,
1324 sizeof(union eth_rx_cqe
),
1325 &rxq
->rx_comp_ring
, NULL
);
1329 /* Allocate buffers for the Rx ring */
1330 rxq
->filled_buffers
= 0;
1331 for (i
= 0; i
< rxq
->num_rx_buffers
; i
++) {
1332 rc
= qede_alloc_rx_buffer(rxq
, false);
1335 "Rx buffers allocation failed at index %d\n", i
);
1340 rc
= qede_alloc_sge_mem(edev
, rxq
);
1345 static void qede_free_mem_txq(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
1347 /* Free the parallel SW ring */
1349 kfree(txq
->sw_tx_ring
.xdp
);
1351 kfree(txq
->sw_tx_ring
.skbs
);
1353 /* Free the real RQ ring used by FW */
1354 edev
->ops
->common
->chain_free(edev
->cdev
, &txq
->tx_pbl
);
1357 /* This function allocates all memory needed per Tx queue */
1358 static int qede_alloc_mem_txq(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
1360 union eth_tx_bd_types
*p_virt
;
1363 txq
->num_tx_buffers
= edev
->q_num_tx_buffers
;
1365 /* Allocate the parallel driver ring for Tx buffers */
1367 size
= sizeof(*txq
->sw_tx_ring
.xdp
) * txq
->num_tx_buffers
;
1368 txq
->sw_tx_ring
.xdp
= kzalloc(size
, GFP_KERNEL
);
1369 if (!txq
->sw_tx_ring
.xdp
)
1372 size
= sizeof(*txq
->sw_tx_ring
.skbs
) * txq
->num_tx_buffers
;
1373 txq
->sw_tx_ring
.skbs
= kzalloc(size
, GFP_KERNEL
);
1374 if (!txq
->sw_tx_ring
.skbs
)
1378 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
1379 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1381 QED_CHAIN_CNT_TYPE_U16
,
1382 txq
->num_tx_buffers
,
1384 &txq
->tx_pbl
, NULL
);
1391 qede_free_mem_txq(edev
, txq
);
1395 /* This function frees all memory of a single fp */
1396 static void qede_free_mem_fp(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
1398 qede_free_mem_sb(edev
, fp
->sb_info
, fp
->id
);
1400 if (fp
->type
& QEDE_FASTPATH_RX
)
1401 qede_free_mem_rxq(edev
, fp
->rxq
);
1403 if (fp
->type
& QEDE_FASTPATH_XDP
)
1404 qede_free_mem_txq(edev
, fp
->xdp_tx
);
1406 if (fp
->type
& QEDE_FASTPATH_TX
)
1407 qede_free_mem_txq(edev
, fp
->txq
);
1410 /* This function allocates all memory needed for a single fp (i.e. an entity
1411 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1413 static int qede_alloc_mem_fp(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
1417 rc
= qede_alloc_mem_sb(edev
, fp
->sb_info
, fp
->id
);
1421 if (fp
->type
& QEDE_FASTPATH_RX
) {
1422 rc
= qede_alloc_mem_rxq(edev
, fp
->rxq
);
1427 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1428 rc
= qede_alloc_mem_txq(edev
, fp
->xdp_tx
);
1433 if (fp
->type
& QEDE_FASTPATH_TX
) {
1434 rc
= qede_alloc_mem_txq(edev
, fp
->txq
);
1443 static void qede_free_mem_load(struct qede_dev
*edev
)
1448 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1450 qede_free_mem_fp(edev
, fp
);
1454 /* This function allocates all qede memory at NIC load. */
1455 static int qede_alloc_mem_load(struct qede_dev
*edev
)
1457 int rc
= 0, queue_id
;
1459 for (queue_id
= 0; queue_id
< QEDE_QUEUE_CNT(edev
); queue_id
++) {
1460 struct qede_fastpath
*fp
= &edev
->fp_array
[queue_id
];
1462 rc
= qede_alloc_mem_fp(edev
, fp
);
1465 "Failed to allocate memory for fastpath - rss id = %d\n",
1467 qede_free_mem_load(edev
);
1475 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1476 static void qede_init_fp(struct qede_dev
*edev
)
1478 int queue_id
, rxq_index
= 0, txq_index
= 0;
1479 struct qede_fastpath
*fp
;
1481 for_each_queue(queue_id
) {
1482 fp
= &edev
->fp_array
[queue_id
];
1487 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1488 fp
->xdp_tx
->index
= QEDE_TXQ_IDX_TO_XDP(edev
,
1490 fp
->xdp_tx
->is_xdp
= 1;
1493 if (fp
->type
& QEDE_FASTPATH_RX
) {
1494 fp
->rxq
->rxq_id
= rxq_index
++;
1496 /* Determine how to map buffers for this queue */
1497 if (fp
->type
& QEDE_FASTPATH_XDP
)
1498 fp
->rxq
->data_direction
= DMA_BIDIRECTIONAL
;
1500 fp
->rxq
->data_direction
= DMA_FROM_DEVICE
;
1501 fp
->rxq
->dev
= &edev
->pdev
->dev
;
1503 /* Driver have no error path from here */
1504 WARN_ON(xdp_rxq_info_reg(&fp
->rxq
->xdp_rxq
, edev
->ndev
,
1505 fp
->rxq
->rxq_id
) < 0);
1508 if (fp
->type
& QEDE_FASTPATH_TX
) {
1509 fp
->txq
->index
= txq_index
++;
1510 if (edev
->dev_info
.is_legacy
)
1511 fp
->txq
->is_legacy
= 1;
1512 fp
->txq
->dev
= &edev
->pdev
->dev
;
1515 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1516 edev
->ndev
->name
, queue_id
);
1519 edev
->gro_disable
= !(edev
->ndev
->features
& NETIF_F_GRO_HW
);
1522 static int qede_set_real_num_queues(struct qede_dev
*edev
)
1526 rc
= netif_set_real_num_tx_queues(edev
->ndev
, QEDE_TSS_COUNT(edev
));
1528 DP_NOTICE(edev
, "Failed to set real number of Tx queues\n");
1532 rc
= netif_set_real_num_rx_queues(edev
->ndev
, QEDE_RSS_COUNT(edev
));
1534 DP_NOTICE(edev
, "Failed to set real number of Rx queues\n");
1541 static void qede_napi_disable_remove(struct qede_dev
*edev
)
1546 napi_disable(&edev
->fp_array
[i
].napi
);
1548 netif_napi_del(&edev
->fp_array
[i
].napi
);
1552 static void qede_napi_add_enable(struct qede_dev
*edev
)
1556 /* Add NAPI objects */
1558 netif_napi_add(edev
->ndev
, &edev
->fp_array
[i
].napi
,
1559 qede_poll
, NAPI_POLL_WEIGHT
);
1560 napi_enable(&edev
->fp_array
[i
].napi
);
1564 static void qede_sync_free_irqs(struct qede_dev
*edev
)
1568 for (i
= 0; i
< edev
->int_info
.used_cnt
; i
++) {
1569 if (edev
->int_info
.msix_cnt
) {
1570 synchronize_irq(edev
->int_info
.msix
[i
].vector
);
1571 free_irq(edev
->int_info
.msix
[i
].vector
,
1572 &edev
->fp_array
[i
]);
1574 edev
->ops
->common
->simd_handler_clean(edev
->cdev
, i
);
1578 edev
->int_info
.used_cnt
= 0;
1581 static int qede_req_msix_irqs(struct qede_dev
*edev
)
1585 /* Sanitize number of interrupts == number of prepared RSS queues */
1586 if (QEDE_QUEUE_CNT(edev
) > edev
->int_info
.msix_cnt
) {
1588 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1589 QEDE_QUEUE_CNT(edev
), edev
->int_info
.msix_cnt
);
1593 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++) {
1594 #ifdef CONFIG_RFS_ACCEL
1595 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1597 if (edev
->ndev
->rx_cpu_rmap
&& (fp
->type
& QEDE_FASTPATH_RX
)) {
1598 rc
= irq_cpu_rmap_add(edev
->ndev
->rx_cpu_rmap
,
1599 edev
->int_info
.msix
[i
].vector
);
1601 DP_ERR(edev
, "Failed to add CPU rmap\n");
1602 qede_free_arfs(edev
);
1606 rc
= request_irq(edev
->int_info
.msix
[i
].vector
,
1607 qede_msix_fp_int
, 0, edev
->fp_array
[i
].name
,
1608 &edev
->fp_array
[i
]);
1610 DP_ERR(edev
, "Request fp %d irq failed\n", i
);
1611 qede_sync_free_irqs(edev
);
1614 DP_VERBOSE(edev
, NETIF_MSG_INTR
,
1615 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1616 edev
->fp_array
[i
].name
, i
,
1617 &edev
->fp_array
[i
]);
1618 edev
->int_info
.used_cnt
++;
1624 static void qede_simd_fp_handler(void *cookie
)
1626 struct qede_fastpath
*fp
= (struct qede_fastpath
*)cookie
;
1628 napi_schedule_irqoff(&fp
->napi
);
1631 static int qede_setup_irqs(struct qede_dev
*edev
)
1635 /* Learn Interrupt configuration */
1636 rc
= edev
->ops
->common
->get_fp_int(edev
->cdev
, &edev
->int_info
);
1640 if (edev
->int_info
.msix_cnt
) {
1641 rc
= qede_req_msix_irqs(edev
);
1644 edev
->ndev
->irq
= edev
->int_info
.msix
[0].vector
;
1646 const struct qed_common_ops
*ops
;
1648 /* qed should learn receive the RSS ids and callbacks */
1649 ops
= edev
->ops
->common
;
1650 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++)
1651 ops
->simd_handler_config(edev
->cdev
,
1652 &edev
->fp_array
[i
], i
,
1653 qede_simd_fp_handler
);
1654 edev
->int_info
.used_cnt
= QEDE_QUEUE_CNT(edev
);
1659 static int qede_drain_txq(struct qede_dev
*edev
,
1660 struct qede_tx_queue
*txq
, bool allow_drain
)
1664 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
1668 "Tx queue[%d] is stuck, requesting MCP to drain\n",
1670 rc
= edev
->ops
->common
->drain(edev
->cdev
);
1673 return qede_drain_txq(edev
, txq
, false);
1676 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1677 txq
->index
, txq
->sw_tx_prod
,
1682 usleep_range(1000, 2000);
1686 /* FW finished processing, wait for HW to transmit all tx packets */
1687 usleep_range(1000, 2000);
1692 static int qede_stop_txq(struct qede_dev
*edev
,
1693 struct qede_tx_queue
*txq
, int rss_id
)
1695 return edev
->ops
->q_tx_stop(edev
->cdev
, rss_id
, txq
->handle
);
1698 static int qede_stop_queues(struct qede_dev
*edev
)
1700 struct qed_update_vport_params
*vport_update_params
;
1701 struct qed_dev
*cdev
= edev
->cdev
;
1702 struct qede_fastpath
*fp
;
1705 /* Disable the vport */
1706 vport_update_params
= vzalloc(sizeof(*vport_update_params
));
1707 if (!vport_update_params
)
1710 vport_update_params
->vport_id
= 0;
1711 vport_update_params
->update_vport_active_flg
= 1;
1712 vport_update_params
->vport_active_flg
= 0;
1713 vport_update_params
->update_rss_flg
= 0;
1715 rc
= edev
->ops
->vport_update(cdev
, vport_update_params
);
1716 vfree(vport_update_params
);
1719 DP_ERR(edev
, "Failed to update vport\n");
1723 /* Flush Tx queues. If needed, request drain from MCP */
1725 fp
= &edev
->fp_array
[i
];
1727 if (fp
->type
& QEDE_FASTPATH_TX
) {
1728 rc
= qede_drain_txq(edev
, fp
->txq
, true);
1733 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1734 rc
= qede_drain_txq(edev
, fp
->xdp_tx
, true);
1740 /* Stop all Queues in reverse order */
1741 for (i
= QEDE_QUEUE_CNT(edev
) - 1; i
>= 0; i
--) {
1742 fp
= &edev
->fp_array
[i
];
1744 /* Stop the Tx Queue(s) */
1745 if (fp
->type
& QEDE_FASTPATH_TX
) {
1746 rc
= qede_stop_txq(edev
, fp
->txq
, i
);
1751 /* Stop the Rx Queue */
1752 if (fp
->type
& QEDE_FASTPATH_RX
) {
1753 rc
= edev
->ops
->q_rx_stop(cdev
, i
, fp
->rxq
->handle
);
1755 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
1760 /* Stop the XDP forwarding queue */
1761 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1762 rc
= qede_stop_txq(edev
, fp
->xdp_tx
, i
);
1766 bpf_prog_put(fp
->rxq
->xdp_prog
);
1770 /* Stop the vport */
1771 rc
= edev
->ops
->vport_stop(cdev
, 0);
1773 DP_ERR(edev
, "Failed to stop VPORT\n");
1778 static int qede_start_txq(struct qede_dev
*edev
,
1779 struct qede_fastpath
*fp
,
1780 struct qede_tx_queue
*txq
, u8 rss_id
, u16 sb_idx
)
1782 dma_addr_t phys_table
= qed_chain_get_pbl_phys(&txq
->tx_pbl
);
1783 u32 page_cnt
= qed_chain_get_page_cnt(&txq
->tx_pbl
);
1784 struct qed_queue_start_common_params params
;
1785 struct qed_txq_start_ret_params ret_params
;
1788 memset(¶ms
, 0, sizeof(params
));
1789 memset(&ret_params
, 0, sizeof(ret_params
));
1791 /* Let the XDP queue share the queue-zone with one of the regular txq.
1792 * We don't really care about its coalescing.
1795 params
.queue_id
= QEDE_TXQ_XDP_TO_IDX(edev
, txq
);
1797 params
.queue_id
= txq
->index
;
1799 params
.p_sb
= fp
->sb_info
;
1800 params
.sb_idx
= sb_idx
;
1802 rc
= edev
->ops
->q_tx_start(edev
->cdev
, rss_id
, ¶ms
, phys_table
,
1803 page_cnt
, &ret_params
);
1805 DP_ERR(edev
, "Start TXQ #%d failed %d\n", txq
->index
, rc
);
1809 txq
->doorbell_addr
= ret_params
.p_doorbell
;
1810 txq
->handle
= ret_params
.p_handle
;
1812 /* Determine the FW consumer address associated */
1813 txq
->hw_cons_ptr
= &fp
->sb_info
->sb_virt
->pi_array
[sb_idx
];
1815 /* Prepare the doorbell parameters */
1816 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_DEST
, DB_DEST_XCM
);
1817 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1818 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_VAL_SEL
,
1819 DQ_XCM_ETH_TX_BD_PROD_CMD
);
1820 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
1825 static int qede_start_queues(struct qede_dev
*edev
, bool clear_stats
)
1827 int vlan_removal_en
= 1;
1828 struct qed_dev
*cdev
= edev
->cdev
;
1829 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
1830 struct qed_update_vport_params
*vport_update_params
;
1831 struct qed_queue_start_common_params q_params
;
1832 struct qed_start_vport_params start
= {0};
1835 if (!edev
->num_queues
) {
1837 "Cannot update V-VPORT as active as there are no Rx queues\n");
1841 vport_update_params
= vzalloc(sizeof(*vport_update_params
));
1842 if (!vport_update_params
)
1845 start
.handle_ptp_pkts
= !!(edev
->ptp
);
1846 start
.gro_enable
= !edev
->gro_disable
;
1847 start
.mtu
= edev
->ndev
->mtu
;
1849 start
.drop_ttl0
= true;
1850 start
.remove_inner_vlan
= vlan_removal_en
;
1851 start
.clear_stats
= clear_stats
;
1853 rc
= edev
->ops
->vport_start(cdev
, &start
);
1856 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
1860 DP_VERBOSE(edev
, NETIF_MSG_IFUP
,
1861 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1862 start
.vport_id
, edev
->ndev
->mtu
+ 0xe, vlan_removal_en
);
1865 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1866 dma_addr_t p_phys_table
;
1869 if (fp
->type
& QEDE_FASTPATH_RX
) {
1870 struct qed_rxq_start_ret_params ret_params
;
1871 struct qede_rx_queue
*rxq
= fp
->rxq
;
1874 memset(&ret_params
, 0, sizeof(ret_params
));
1875 memset(&q_params
, 0, sizeof(q_params
));
1876 q_params
.queue_id
= rxq
->rxq_id
;
1877 q_params
.vport_id
= 0;
1878 q_params
.p_sb
= fp
->sb_info
;
1879 q_params
.sb_idx
= RX_PI
;
1882 qed_chain_get_pbl_phys(&rxq
->rx_comp_ring
);
1883 page_cnt
= qed_chain_get_page_cnt(&rxq
->rx_comp_ring
);
1885 rc
= edev
->ops
->q_rx_start(cdev
, i
, &q_params
,
1887 rxq
->rx_bd_ring
.p_phys_addr
,
1889 page_cnt
, &ret_params
);
1891 DP_ERR(edev
, "Start RXQ #%d failed %d\n", i
,
1896 /* Use the return parameters */
1897 rxq
->hw_rxq_prod_addr
= ret_params
.p_prod
;
1898 rxq
->handle
= ret_params
.p_handle
;
1900 val
= &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
1901 rxq
->hw_cons_ptr
= val
;
1903 qede_update_rx_prod(edev
, rxq
);
1906 if (fp
->type
& QEDE_FASTPATH_XDP
) {
1907 rc
= qede_start_txq(edev
, fp
, fp
->xdp_tx
, i
, XDP_PI
);
1911 fp
->rxq
->xdp_prog
= bpf_prog_add(edev
->xdp_prog
, 1);
1912 if (IS_ERR(fp
->rxq
->xdp_prog
)) {
1913 rc
= PTR_ERR(fp
->rxq
->xdp_prog
);
1914 fp
->rxq
->xdp_prog
= NULL
;
1919 if (fp
->type
& QEDE_FASTPATH_TX
) {
1920 rc
= qede_start_txq(edev
, fp
, fp
->txq
, i
, TX_PI(0));
1926 /* Prepare and send the vport enable */
1927 vport_update_params
->vport_id
= start
.vport_id
;
1928 vport_update_params
->update_vport_active_flg
= 1;
1929 vport_update_params
->vport_active_flg
= 1;
1931 if ((qed_info
->mf_mode
== QED_MF_NPAR
|| pci_num_vf(edev
->pdev
)) &&
1932 qed_info
->tx_switching
) {
1933 vport_update_params
->update_tx_switching_flg
= 1;
1934 vport_update_params
->tx_switching_flg
= 1;
1937 qede_fill_rss_params(edev
, &vport_update_params
->rss_params
,
1938 &vport_update_params
->update_rss_flg
);
1940 rc
= edev
->ops
->vport_update(cdev
, vport_update_params
);
1942 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
1945 vfree(vport_update_params
);
1949 enum qede_unload_mode
{
1953 static void qede_unload(struct qede_dev
*edev
, enum qede_unload_mode mode
,
1956 struct qed_link_params link_params
;
1959 DP_INFO(edev
, "Starting qede unload\n");
1964 edev
->state
= QEDE_STATE_CLOSED
;
1966 qede_rdma_dev_event_close(edev
);
1969 netif_tx_disable(edev
->ndev
);
1970 netif_carrier_off(edev
->ndev
);
1972 /* Reset the link */
1973 memset(&link_params
, 0, sizeof(link_params
));
1974 link_params
.link_up
= false;
1975 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
1976 rc
= qede_stop_queues(edev
);
1978 qede_sync_free_irqs(edev
);
1982 DP_INFO(edev
, "Stopped Queues\n");
1984 qede_vlan_mark_nonconfigured(edev
);
1985 edev
->ops
->fastpath_stop(edev
->cdev
);
1987 if (!IS_VF(edev
) && edev
->dev_info
.common
.num_hwfns
== 1) {
1988 qede_poll_for_freeing_arfs_filters(edev
);
1989 qede_free_arfs(edev
);
1992 /* Release the interrupts */
1993 qede_sync_free_irqs(edev
);
1994 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
1996 qede_napi_disable_remove(edev
);
1998 qede_free_mem_load(edev
);
1999 qede_free_fp_array(edev
);
2003 __qede_unlock(edev
);
2004 DP_INFO(edev
, "Ending qede unload\n");
2007 enum qede_load_mode
{
2012 static int qede_load(struct qede_dev
*edev
, enum qede_load_mode mode
,
2015 struct qed_link_params link_params
;
2018 DP_INFO(edev
, "Starting qede load\n");
2023 rc
= qede_set_num_queues(edev
);
2027 rc
= qede_alloc_fp_array(edev
);
2033 rc
= qede_alloc_mem_load(edev
);
2036 DP_INFO(edev
, "Allocated %d Rx, %d Tx queues\n",
2037 QEDE_RSS_COUNT(edev
), QEDE_TSS_COUNT(edev
));
2039 rc
= qede_set_real_num_queues(edev
);
2043 if (!IS_VF(edev
) && edev
->dev_info
.common
.num_hwfns
== 1) {
2044 rc
= qede_alloc_arfs(edev
);
2046 DP_NOTICE(edev
, "aRFS memory allocation failed\n");
2049 qede_napi_add_enable(edev
);
2050 DP_INFO(edev
, "Napi added and enabled\n");
2052 rc
= qede_setup_irqs(edev
);
2055 DP_INFO(edev
, "Setup IRQs succeeded\n");
2057 rc
= qede_start_queues(edev
, mode
!= QEDE_LOAD_RELOAD
);
2060 DP_INFO(edev
, "Start VPORT, RXQ and TXQ succeeded\n");
2062 /* Program un-configured VLANs */
2063 qede_configure_vlan_filters(edev
);
2065 /* Ask for link-up using current configuration */
2066 memset(&link_params
, 0, sizeof(link_params
));
2067 link_params
.link_up
= true;
2068 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
2070 edev
->state
= QEDE_STATE_OPEN
;
2072 DP_INFO(edev
, "Ending successfully qede load\n");
2076 qede_sync_free_irqs(edev
);
2077 memset(&edev
->int_info
.msix_cnt
, 0, sizeof(struct qed_int_info
));
2079 qede_napi_disable_remove(edev
);
2081 qede_free_mem_load(edev
);
2083 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
2084 qede_free_fp_array(edev
);
2085 edev
->num_queues
= 0;
2086 edev
->fp_num_tx
= 0;
2087 edev
->fp_num_rx
= 0;
2090 __qede_unlock(edev
);
2095 /* 'func' should be able to run between unload and reload assuming interface
2096 * is actually running, or afterwards in case it's currently DOWN.
2098 void qede_reload(struct qede_dev
*edev
,
2099 struct qede_reload_args
*args
, bool is_locked
)
2104 /* Since qede_lock is held, internal state wouldn't change even
2105 * if netdev state would start transitioning. Check whether current
2106 * internal configuration indicates device is up, then reload.
2108 if (edev
->state
== QEDE_STATE_OPEN
) {
2109 qede_unload(edev
, QEDE_UNLOAD_NORMAL
, true);
2111 args
->func(edev
, args
);
2112 qede_load(edev
, QEDE_LOAD_RELOAD
, true);
2114 /* Since no one is going to do it for us, re-configure */
2115 qede_config_rx_mode(edev
->ndev
);
2117 args
->func(edev
, args
);
2121 __qede_unlock(edev
);
2124 /* called with rtnl_lock */
2125 static int qede_open(struct net_device
*ndev
)
2127 struct qede_dev
*edev
= netdev_priv(ndev
);
2130 netif_carrier_off(ndev
);
2132 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D0
);
2134 rc
= qede_load(edev
, QEDE_LOAD_NORMAL
, false);
2138 udp_tunnel_get_rx_info(ndev
);
2140 edev
->ops
->common
->update_drv_state(edev
->cdev
, true);
2145 static int qede_close(struct net_device
*ndev
)
2147 struct qede_dev
*edev
= netdev_priv(ndev
);
2149 qede_unload(edev
, QEDE_UNLOAD_NORMAL
, false);
2151 edev
->ops
->common
->update_drv_state(edev
->cdev
, false);
2156 static void qede_link_update(void *dev
, struct qed_link_output
*link
)
2158 struct qede_dev
*edev
= dev
;
2160 if (!netif_running(edev
->ndev
)) {
2161 DP_VERBOSE(edev
, NETIF_MSG_LINK
, "Interface is not running\n");
2165 if (link
->link_up
) {
2166 if (!netif_carrier_ok(edev
->ndev
)) {
2167 DP_NOTICE(edev
, "Link is up\n");
2168 netif_tx_start_all_queues(edev
->ndev
);
2169 netif_carrier_on(edev
->ndev
);
2170 qede_rdma_dev_event_open(edev
);
2173 if (netif_carrier_ok(edev
->ndev
)) {
2174 DP_NOTICE(edev
, "Link is down\n");
2175 netif_tx_disable(edev
->ndev
);
2176 netif_carrier_off(edev
->ndev
);
2177 qede_rdma_dev_event_close(edev
);