1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/version.h>
12 #include <linux/device.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <asm/byteorder.h>
22 #include <asm/param.h>
24 #include <linux/netdev_features.h>
25 #include <linux/udp.h>
26 #include <linux/tcp.h>
27 #include <net/vxlan.h>
31 #include <linux/if_ether.h>
32 #include <linux/if_vlan.h>
33 #include <linux/pkt_sched.h>
34 #include <linux/ethtool.h>
36 #include <linux/random.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/bitops.h>
42 static char version
[] =
43 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION
"\n";
45 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION
);
50 module_param(debug
, uint
, 0);
51 MODULE_PARM_DESC(debug
, " Default debug msglevel");
53 static const struct qed_eth_ops
*qed_ops
;
55 #define CHIP_NUM_57980S_40 0x1634
56 #define CHIP_NUM_57980S_10 0x1666
57 #define CHIP_NUM_57980S_MF 0x1636
58 #define CHIP_NUM_57980S_100 0x1644
59 #define CHIP_NUM_57980S_50 0x1654
60 #define CHIP_NUM_57980S_25 0x1656
62 #ifndef PCI_DEVICE_ID_NX2_57980E
63 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
64 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
65 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
66 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
67 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
68 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
71 static const struct pci_device_id qede_pci_tbl
[] = {
72 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_40
), 0 },
73 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_10
), 0 },
74 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_MF
), 0 },
75 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_100
), 0 },
76 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_50
), 0 },
77 { PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_25
), 0 },
81 MODULE_DEVICE_TABLE(pci
, qede_pci_tbl
);
83 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
85 #define TX_TIMEOUT (5 * HZ)
87 static void qede_remove(struct pci_dev
*pdev
);
88 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
89 struct qede_rx_queue
*rxq
);
90 static void qede_link_update(void *dev
, struct qed_link_output
*link
);
92 static struct pci_driver qede_pci_driver
= {
94 .id_table
= qede_pci_tbl
,
96 .remove
= qede_remove
,
99 static struct qed_eth_cb_ops qede_ll_ops
= {
101 .link_update
= qede_link_update
,
105 static int qede_netdev_event(struct notifier_block
*this, unsigned long event
,
108 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
109 struct ethtool_drvinfo drvinfo
;
110 struct qede_dev
*edev
;
112 /* Currently only support name change */
113 if (event
!= NETDEV_CHANGENAME
)
116 /* Check whether this is a qede device */
117 if (!ndev
|| !ndev
->ethtool_ops
|| !ndev
->ethtool_ops
->get_drvinfo
)
120 memset(&drvinfo
, 0, sizeof(drvinfo
));
121 ndev
->ethtool_ops
->get_drvinfo(ndev
, &drvinfo
);
122 if (strcmp(drvinfo
.driver
, "qede"))
124 edev
= netdev_priv(ndev
);
126 /* Notify qed of the name change */
127 if (!edev
->ops
|| !edev
->ops
->common
)
129 edev
->ops
->common
->set_id(edev
->cdev
, edev
->ndev
->name
,
136 static struct notifier_block qede_netdev_notifier
= {
137 .notifier_call
= qede_netdev_event
,
141 int __init
qede_init(void)
146 pr_notice("qede_init: %s\n", version
);
148 qed_ver
= qed_get_protocol_version(QED_PROTOCOL_ETH
);
149 if (qed_ver
!= QEDE_ETH_INTERFACE_VERSION
) {
150 pr_notice("Version mismatch [%08x != %08x]\n",
152 QEDE_ETH_INTERFACE_VERSION
);
156 qed_ops
= qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION
);
158 pr_notice("Failed to get qed ethtool operations\n");
162 /* Must register notifier before pci ops, since we might miss
163 * interface rename after pci probe and netdev registeration.
165 ret
= register_netdevice_notifier(&qede_netdev_notifier
);
167 pr_notice("Failed to register netdevice_notifier\n");
172 ret
= pci_register_driver(&qede_pci_driver
);
174 pr_notice("Failed to register driver\n");
175 unregister_netdevice_notifier(&qede_netdev_notifier
);
183 static void __exit
qede_cleanup(void)
185 pr_notice("qede_cleanup called\n");
187 unregister_netdevice_notifier(&qede_netdev_notifier
);
188 pci_unregister_driver(&qede_pci_driver
);
192 module_init(qede_init
);
193 module_exit(qede_cleanup
);
195 /* -------------------------------------------------------------------------
197 * -------------------------------------------------------------------------
200 /* Unmap the data and free skb */
201 static int qede_free_tx_pkt(struct qede_dev
*edev
,
202 struct qede_tx_queue
*txq
,
205 u16 idx
= txq
->sw_tx_cons
& NUM_TX_BDS_MAX
;
206 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
207 struct eth_tx_1st_bd
*first_bd
;
208 struct eth_tx_bd
*tx_data_bd
;
209 int bds_consumed
= 0;
211 bool data_split
= txq
->sw_tx_ring
[idx
].flags
& QEDE_TSO_SPLIT_BD
;
212 int i
, split_bd_len
= 0;
214 if (unlikely(!skb
)) {
216 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
217 idx
, txq
->sw_tx_cons
, txq
->sw_tx_prod
);
223 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_consume(&txq
->tx_pbl
);
227 nbds
= first_bd
->data
.nbds
;
230 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
231 qed_chain_consume(&txq
->tx_pbl
);
232 split_bd_len
= BD_UNMAP_LEN(split
);
235 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
236 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
238 /* Unmap the data of the skb frags */
239 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++, bds_consumed
++) {
240 tx_data_bd
= (struct eth_tx_bd
*)
241 qed_chain_consume(&txq
->tx_pbl
);
242 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
243 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
246 while (bds_consumed
++ < nbds
)
247 qed_chain_consume(&txq
->tx_pbl
);
250 dev_kfree_skb_any(skb
);
251 txq
->sw_tx_ring
[idx
].skb
= NULL
;
252 txq
->sw_tx_ring
[idx
].flags
= 0;
257 /* Unmap the data and free skb when mapping failed during start_xmit */
258 static void qede_free_failed_tx_pkt(struct qede_dev
*edev
,
259 struct qede_tx_queue
*txq
,
260 struct eth_tx_1st_bd
*first_bd
,
264 u16 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
265 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
266 struct eth_tx_bd
*tx_data_bd
;
267 int i
, split_bd_len
= 0;
269 /* Return prod to its position before this skb was handled */
270 qed_chain_set_prod(&txq
->tx_pbl
,
271 le16_to_cpu(txq
->tx_db
.data
.bd_prod
),
274 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_produce(&txq
->tx_pbl
);
277 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
278 qed_chain_produce(&txq
->tx_pbl
);
279 split_bd_len
= BD_UNMAP_LEN(split
);
283 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
284 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
286 /* Unmap the data of the skb frags */
287 for (i
= 0; i
< nbd
; i
++) {
288 tx_data_bd
= (struct eth_tx_bd
*)
289 qed_chain_produce(&txq
->tx_pbl
);
290 if (tx_data_bd
->nbytes
)
291 dma_unmap_page(&edev
->pdev
->dev
,
292 BD_UNMAP_ADDR(tx_data_bd
),
293 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
296 /* Return again prod to its position before this skb was handled */
297 qed_chain_set_prod(&txq
->tx_pbl
,
298 le16_to_cpu(txq
->tx_db
.data
.bd_prod
),
302 dev_kfree_skb_any(skb
);
303 txq
->sw_tx_ring
[idx
].skb
= NULL
;
304 txq
->sw_tx_ring
[idx
].flags
= 0;
307 static u32
qede_xmit_type(struct qede_dev
*edev
,
311 u32 rc
= XMIT_L4_CSUM
;
314 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
317 l3_proto
= vlan_get_protocol(skb
);
318 if (l3_proto
== htons(ETH_P_IPV6
) &&
319 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
328 static void qede_set_params_for_ipv6_ext(struct sk_buff
*skb
,
329 struct eth_tx_2nd_bd
*second_bd
,
330 struct eth_tx_3rd_bd
*third_bd
)
333 u16 bd2_bits1
= 0, bd2_bits2
= 0;
335 bd2_bits1
|= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT
);
337 bd2_bits2
|= ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) &
338 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK
)
339 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT
;
341 bd2_bits1
|= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
<<
342 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT
);
344 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
))
345 l4_proto
= ipv6_hdr(skb
)->nexthdr
;
347 l4_proto
= ip_hdr(skb
)->protocol
;
349 if (l4_proto
== IPPROTO_UDP
)
350 bd2_bits1
|= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT
;
353 third_bd
->data
.bitfields
|=
354 cpu_to_le16(((tcp_hdrlen(skb
) / 4) &
355 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK
) <<
356 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT
);
358 second_bd
->data
.bitfields1
= cpu_to_le16(bd2_bits1
);
359 second_bd
->data
.bitfields2
= cpu_to_le16(bd2_bits2
);
362 static int map_frag_to_bd(struct qede_dev
*edev
,
364 struct eth_tx_bd
*bd
)
368 /* Map skb non-linear frag data for DMA */
369 mapping
= skb_frag_dma_map(&edev
->pdev
->dev
, frag
, 0,
372 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
373 DP_NOTICE(edev
, "Unable to map frag - dropping packet\n");
377 /* Setup the data pointer of the frag data */
378 BD_SET_UNMAP_ADDR_LEN(bd
, mapping
, skb_frag_size(frag
));
383 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
384 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
385 static bool qede_pkt_req_lin(struct qede_dev
*edev
, struct sk_buff
*skb
,
388 int allowed_frags
= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
- 1;
390 if (xmit_type
& XMIT_LSO
) {
393 hlen
= skb_transport_header(skb
) +
394 tcp_hdrlen(skb
) - skb
->data
;
396 /* linear payload would require its own BD */
397 if (skb_headlen(skb
) > hlen
)
401 return (skb_shinfo(skb
)->nr_frags
> allowed_frags
);
405 /* Main transmit function */
407 netdev_tx_t
qede_start_xmit(struct sk_buff
*skb
,
408 struct net_device
*ndev
)
410 struct qede_dev
*edev
= netdev_priv(ndev
);
411 struct netdev_queue
*netdev_txq
;
412 struct qede_tx_queue
*txq
;
413 struct eth_tx_1st_bd
*first_bd
;
414 struct eth_tx_2nd_bd
*second_bd
= NULL
;
415 struct eth_tx_3rd_bd
*third_bd
= NULL
;
416 struct eth_tx_bd
*tx_data_bd
= NULL
;
420 int rc
, frag_idx
= 0, ipv6_ext
= 0;
426 /* Get tx-queue context and netdev index */
427 txq_index
= skb_get_queue_mapping(skb
);
428 WARN_ON(txq_index
>= QEDE_TSS_CNT(edev
));
429 txq
= QEDE_TX_QUEUE(edev
, txq_index
);
430 netdev_txq
= netdev_get_tx_queue(ndev
, txq_index
);
432 WARN_ON(qed_chain_get_elem_left(&txq
->tx_pbl
) <
433 (MAX_SKB_FRAGS
+ 1));
435 xmit_type
= qede_xmit_type(edev
, skb
, &ipv6_ext
);
437 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
438 if (qede_pkt_req_lin(edev
, skb
, xmit_type
)) {
439 if (skb_linearize(skb
)) {
441 "SKB linearization failed - silently dropping this SKB\n");
442 dev_kfree_skb_any(skb
);
448 /* Fill the entry in the SW ring and the BDs in the FW ring */
449 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
450 txq
->sw_tx_ring
[idx
].skb
= skb
;
451 first_bd
= (struct eth_tx_1st_bd
*)
452 qed_chain_produce(&txq
->tx_pbl
);
453 memset(first_bd
, 0, sizeof(*first_bd
));
454 first_bd
->data
.bd_flags
.bitfields
=
455 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT
;
457 /* Map skb linear data for DMA and set in the first BD */
458 mapping
= dma_map_single(&edev
->pdev
->dev
, skb
->data
,
459 skb_headlen(skb
), DMA_TO_DEVICE
);
460 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
461 DP_NOTICE(edev
, "SKB mapping failed\n");
462 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, 0, false);
466 BD_SET_UNMAP_ADDR_LEN(first_bd
, mapping
, skb_headlen(skb
));
468 /* In case there is IPv6 with extension headers or LSO we need 2nd and
471 if (unlikely((xmit_type
& XMIT_LSO
) | ipv6_ext
)) {
472 second_bd
= (struct eth_tx_2nd_bd
*)
473 qed_chain_produce(&txq
->tx_pbl
);
474 memset(second_bd
, 0, sizeof(*second_bd
));
477 third_bd
= (struct eth_tx_3rd_bd
*)
478 qed_chain_produce(&txq
->tx_pbl
);
479 memset(third_bd
, 0, sizeof(*third_bd
));
482 /* We need to fill in additional data in second_bd... */
483 tx_data_bd
= (struct eth_tx_bd
*)second_bd
;
486 if (skb_vlan_tag_present(skb
)) {
487 first_bd
->data
.vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
488 first_bd
->data
.bd_flags
.bitfields
|=
489 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT
;
492 /* Fill the parsing flags & params according to the requested offload */
493 if (xmit_type
& XMIT_L4_CSUM
) {
494 u16 temp
= 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT
;
496 /* We don't re-calculate IP checksum as it is already done by
499 first_bd
->data
.bd_flags
.bitfields
|=
500 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT
;
502 first_bd
->data
.bitfields
|= cpu_to_le16(temp
);
504 /* If the packet is IPv6 with extension header, indicate that
505 * to FW and pass few params, since the device cracker doesn't
506 * support parsing IPv6 with extension header/s.
508 if (unlikely(ipv6_ext
))
509 qede_set_params_for_ipv6_ext(skb
, second_bd
, third_bd
);
512 if (xmit_type
& XMIT_LSO
) {
513 first_bd
->data
.bd_flags
.bitfields
|=
514 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT
);
515 third_bd
->data
.lso_mss
=
516 cpu_to_le16(skb_shinfo(skb
)->gso_size
);
518 first_bd
->data
.bd_flags
.bitfields
|=
519 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
520 hlen
= skb_transport_header(skb
) +
521 tcp_hdrlen(skb
) - skb
->data
;
523 /* @@@TBD - if will not be removed need to check */
524 third_bd
->data
.bitfields
|=
525 cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT
));
527 /* Make life easier for FW guys who can't deal with header and
528 * data on same BD. If we need to split, use the second bd...
530 if (unlikely(skb_headlen(skb
) > hlen
)) {
531 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
532 "TSO split header size is %d (%x:%x)\n",
533 first_bd
->nbytes
, first_bd
->addr
.hi
,
536 mapping
= HILO_U64(le32_to_cpu(first_bd
->addr
.hi
),
537 le32_to_cpu(first_bd
->addr
.lo
)) +
540 BD_SET_UNMAP_ADDR_LEN(tx_data_bd
, mapping
,
541 le16_to_cpu(first_bd
->nbytes
) -
544 /* this marks the BD as one that has no
547 txq
->sw_tx_ring
[idx
].flags
|= QEDE_TSO_SPLIT_BD
;
549 first_bd
->nbytes
= cpu_to_le16(hlen
);
551 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
556 /* Handle fragmented skb */
557 /* special handle for frags inside 2nd and 3rd bds.. */
558 while (tx_data_bd
&& frag_idx
< skb_shinfo(skb
)->nr_frags
) {
559 rc
= map_frag_to_bd(edev
,
560 &skb_shinfo(skb
)->frags
[frag_idx
],
563 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
568 if (tx_data_bd
== (struct eth_tx_bd
*)second_bd
)
569 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
576 /* map last frags into 4th, 5th .... */
577 for (; frag_idx
< skb_shinfo(skb
)->nr_frags
; frag_idx
++, nbd
++) {
578 tx_data_bd
= (struct eth_tx_bd
*)
579 qed_chain_produce(&txq
->tx_pbl
);
581 memset(tx_data_bd
, 0, sizeof(*tx_data_bd
));
583 rc
= map_frag_to_bd(edev
,
584 &skb_shinfo(skb
)->frags
[frag_idx
],
587 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
593 /* update the first BD with the actual num BDs */
594 first_bd
->data
.nbds
= nbd
;
596 netdev_tx_sent_queue(netdev_txq
, skb
->len
);
598 skb_tx_timestamp(skb
);
600 /* Advance packet producer only before sending the packet since mapping
605 /* 'next page' entries are counted in the producer value */
606 txq
->tx_db
.data
.bd_prod
=
607 cpu_to_le16(qed_chain_get_prod_idx(&txq
->tx_pbl
));
609 /* wmb makes sure that the BDs data is updated before updating the
610 * producer, otherwise FW may read old data from the BDs.
614 writel(txq
->tx_db
.raw
, txq
->doorbell_addr
);
616 /* mmiowb is needed to synchronize doorbell writes from more than one
617 * processor. It guarantees that the write arrives to the device before
618 * the queue lock is released and another start_xmit is called (possibly
619 * on another CPU). Without this barrier, the next doorbell can bypass
620 * this doorbell. This is applicable to IA64/Altix systems.
624 if (unlikely(qed_chain_get_elem_left(&txq
->tx_pbl
)
625 < (MAX_SKB_FRAGS
+ 1))) {
626 netif_tx_stop_queue(netdev_txq
);
627 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
628 "Stop queue was called\n");
629 /* paired memory barrier is in qede_tx_int(), we have to keep
630 * ordering of set_bit() in netif_tx_stop_queue() and read of
635 if (qed_chain_get_elem_left(&txq
->tx_pbl
)
636 >= (MAX_SKB_FRAGS
+ 1) &&
637 (edev
->state
== QEDE_STATE_OPEN
)) {
638 netif_tx_wake_queue(netdev_txq
);
639 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
640 "Wake queue was called\n");
647 static int qede_txq_has_work(struct qede_tx_queue
*txq
)
651 /* Tell compiler that consumer and producer can change */
653 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
654 if (qed_chain_get_cons_idx(&txq
->tx_pbl
) == hw_bd_cons
+ 1)
657 return hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
);
660 static int qede_tx_int(struct qede_dev
*edev
,
661 struct qede_tx_queue
*txq
)
663 struct netdev_queue
*netdev_txq
;
665 unsigned int pkts_compl
= 0, bytes_compl
= 0;
668 netdev_txq
= netdev_get_tx_queue(edev
->ndev
, txq
->index
);
670 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
673 while (hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
)) {
676 rc
= qede_free_tx_pkt(edev
, txq
, &len
);
678 DP_NOTICE(edev
, "hw_bd_cons = %d, chain_cons=%d\n",
680 qed_chain_get_cons_idx(&txq
->tx_pbl
));
689 netdev_tx_completed_queue(netdev_txq
, pkts_compl
, bytes_compl
);
691 /* Need to make the tx_bd_cons update visible to start_xmit()
692 * before checking for netif_tx_queue_stopped(). Without the
693 * memory barrier, there is a small possibility that
694 * start_xmit() will miss it and cause the queue to be stopped
696 * On the other hand we need an rmb() here to ensure the proper
697 * ordering of bit testing in the following
698 * netif_tx_queue_stopped(txq) call.
702 if (unlikely(netif_tx_queue_stopped(netdev_txq
))) {
703 /* Taking tx_lock is needed to prevent reenabling the queue
704 * while it's empty. This could have happen if rx_action() gets
705 * suspended in qede_tx_int() after the condition before
706 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
708 * stops the queue->sees fresh tx_bd_cons->releases the queue->
709 * sends some packets consuming the whole queue again->
713 __netif_tx_lock(netdev_txq
, smp_processor_id());
715 if ((netif_tx_queue_stopped(netdev_txq
)) &&
716 (edev
->state
== QEDE_STATE_OPEN
) &&
717 (qed_chain_get_elem_left(&txq
->tx_pbl
)
718 >= (MAX_SKB_FRAGS
+ 1))) {
719 netif_tx_wake_queue(netdev_txq
);
720 DP_VERBOSE(edev
, NETIF_MSG_TX_DONE
,
721 "Wake queue was called\n");
724 __netif_tx_unlock(netdev_txq
);
730 static bool qede_has_rx_work(struct qede_rx_queue
*rxq
)
732 u16 hw_comp_cons
, sw_comp_cons
;
734 /* Tell compiler that status block fields can change */
737 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
738 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
740 return hw_comp_cons
!= sw_comp_cons
;
743 static bool qede_has_tx_work(struct qede_fastpath
*fp
)
747 for (tc
= 0; tc
< fp
->edev
->num_tc
; tc
++)
748 if (qede_txq_has_work(&fp
->txqs
[tc
]))
753 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue
*rxq
)
755 qed_chain_consume(&rxq
->rx_bd_ring
);
759 /* This function reuses the buffer(from an offset) from
760 * consumer index to producer index in the bd ring
762 static inline void qede_reuse_page(struct qede_dev
*edev
,
763 struct qede_rx_queue
*rxq
,
764 struct sw_rx_data
*curr_cons
)
766 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
767 struct sw_rx_data
*curr_prod
;
768 dma_addr_t new_mapping
;
770 curr_prod
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
771 *curr_prod
= *curr_cons
;
773 new_mapping
= curr_prod
->mapping
+ curr_prod
->page_offset
;
775 rx_bd_prod
->addr
.hi
= cpu_to_le32(upper_32_bits(new_mapping
));
776 rx_bd_prod
->addr
.lo
= cpu_to_le32(lower_32_bits(new_mapping
));
779 curr_cons
->data
= NULL
;
782 /* In case of allocation failures reuse buffers
783 * from consumer index to produce buffers for firmware
785 static void qede_recycle_rx_bd_ring(struct qede_rx_queue
*rxq
,
786 struct qede_dev
*edev
, u8 count
)
788 struct sw_rx_data
*curr_cons
;
790 for (; count
> 0; count
--) {
791 curr_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
792 qede_reuse_page(edev
, rxq
, curr_cons
);
793 qede_rx_bd_ring_consume(rxq
);
797 static inline int qede_realloc_rx_buffer(struct qede_dev
*edev
,
798 struct qede_rx_queue
*rxq
,
799 struct sw_rx_data
*curr_cons
)
801 /* Move to the next segment in the page */
802 curr_cons
->page_offset
+= rxq
->rx_buf_seg_size
;
804 if (curr_cons
->page_offset
== PAGE_SIZE
) {
805 if (unlikely(qede_alloc_rx_buffer(edev
, rxq
))) {
806 /* Since we failed to allocate new buffer
807 * current buffer can be used again.
809 curr_cons
->page_offset
-= rxq
->rx_buf_seg_size
;
814 dma_unmap_page(&edev
->pdev
->dev
, curr_cons
->mapping
,
815 PAGE_SIZE
, DMA_FROM_DEVICE
);
817 /* Increment refcount of the page as we don't want
818 * network stack to take the ownership of the page
819 * which can be recycled multiple times by the driver.
821 atomic_inc(&curr_cons
->data
->_count
);
822 qede_reuse_page(edev
, rxq
, curr_cons
);
828 static inline void qede_update_rx_prod(struct qede_dev
*edev
,
829 struct qede_rx_queue
*rxq
)
831 u16 bd_prod
= qed_chain_get_prod_idx(&rxq
->rx_bd_ring
);
832 u16 cqe_prod
= qed_chain_get_prod_idx(&rxq
->rx_comp_ring
);
833 struct eth_rx_prod_data rx_prods
= {0};
835 /* Update producers */
836 rx_prods
.bd_prod
= cpu_to_le16(bd_prod
);
837 rx_prods
.cqe_prod
= cpu_to_le16(cqe_prod
);
839 /* Make sure that the BD and SGE data is updated before updating the
840 * producers since FW might read the BD/SGE right after the producer
845 internal_ram_wr(rxq
->hw_rxq_prod_addr
, sizeof(rx_prods
),
848 /* mmiowb is needed to synchronize doorbell writes from more than one
849 * processor. It guarantees that the write arrives to the device before
850 * the napi lock is released and another qede_poll is called (possibly
851 * on another CPU). Without this barrier, the next doorbell can bypass
852 * this doorbell. This is applicable to IA64/Altix systems.
857 static u32
qede_get_rxhash(struct qede_dev
*edev
,
860 enum pkt_hash_types
*rxhash_type
)
862 enum rss_hash_type htype
;
864 htype
= GET_FIELD(bitfields
, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE
);
866 if ((edev
->ndev
->features
& NETIF_F_RXHASH
) && htype
) {
867 *rxhash_type
= ((htype
== RSS_HASH_TYPE_IPV4
) ||
868 (htype
== RSS_HASH_TYPE_IPV6
)) ?
869 PKT_HASH_TYPE_L3
: PKT_HASH_TYPE_L4
;
870 return le32_to_cpu(rss_hash
);
872 *rxhash_type
= PKT_HASH_TYPE_NONE
;
876 static void qede_set_skb_csum(struct sk_buff
*skb
, u8 csum_flag
)
878 skb_checksum_none_assert(skb
);
880 if (csum_flag
& QEDE_CSUM_UNNECESSARY
)
881 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
884 static inline void qede_skb_receive(struct qede_dev
*edev
,
885 struct qede_fastpath
*fp
,
890 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
893 napi_gro_receive(&fp
->napi
, skb
);
896 static void qede_set_gro_params(struct qede_dev
*edev
,
898 struct eth_fast_path_rx_tpa_start_cqe
*cqe
)
900 u16 parsing_flags
= le16_to_cpu(cqe
->pars_flags
.flags
);
902 if (((parsing_flags
>> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT
) &
903 PARSING_AND_ERR_FLAGS_L3TYPE_MASK
) == 2)
904 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
906 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
908 skb_shinfo(skb
)->gso_size
= __le16_to_cpu(cqe
->len_on_first_bd
) -
912 static int qede_fill_frag_skb(struct qede_dev
*edev
,
913 struct qede_rx_queue
*rxq
,
917 struct sw_rx_data
*current_bd
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
&
919 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[tpa_agg_index
];
920 struct sk_buff
*skb
= tpa_info
->skb
;
922 if (unlikely(tpa_info
->agg_state
!= QEDE_AGG_STATE_START
))
925 /* Add one frag and update the appropriate fields in the skb */
926 skb_fill_page_desc(skb
, tpa_info
->frag_id
++,
927 current_bd
->data
, current_bd
->page_offset
,
930 if (unlikely(qede_realloc_rx_buffer(edev
, rxq
, current_bd
))) {
931 /* Incr page ref count to reuse on allocation failure
932 * so that it doesn't get freed while freeing SKB.
934 atomic_inc(¤t_bd
->data
->_count
);
938 qed_chain_consume(&rxq
->rx_bd_ring
);
941 skb
->data_len
+= len_on_bd
;
942 skb
->truesize
+= rxq
->rx_buf_seg_size
;
943 skb
->len
+= len_on_bd
;
948 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
949 qede_recycle_rx_bd_ring(rxq
, edev
, 1);
953 static void qede_tpa_start(struct qede_dev
*edev
,
954 struct qede_rx_queue
*rxq
,
955 struct eth_fast_path_rx_tpa_start_cqe
*cqe
)
957 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[cqe
->tpa_agg_index
];
958 struct eth_rx_bd
*rx_bd_cons
= qed_chain_consume(&rxq
->rx_bd_ring
);
959 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
960 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
961 dma_addr_t mapping
= tpa_info
->replace_buf_mapping
;
962 struct sw_rx_data
*sw_rx_data_cons
;
963 struct sw_rx_data
*sw_rx_data_prod
;
964 enum pkt_hash_types rxhash_type
;
967 sw_rx_data_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
968 sw_rx_data_prod
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
970 /* Use pre-allocated replacement buffer - we can't release the agg.
971 * start until its over and we don't want to risk allocation failing
972 * here, so re-allocate when aggregation will be over.
974 dma_unmap_addr_set(sw_rx_data_prod
, mapping
,
975 dma_unmap_addr(replace_buf
, mapping
));
977 sw_rx_data_prod
->data
= replace_buf
->data
;
978 rx_bd_prod
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
979 rx_bd_prod
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
980 sw_rx_data_prod
->page_offset
= replace_buf
->page_offset
;
984 /* move partial skb from cons to pool (don't unmap yet)
985 * save mapping, incase we drop the packet later on.
987 tpa_info
->start_buf
= *sw_rx_data_cons
;
988 mapping
= HILO_U64(le32_to_cpu(rx_bd_cons
->addr
.hi
),
989 le32_to_cpu(rx_bd_cons
->addr
.lo
));
991 tpa_info
->start_buf_mapping
= mapping
;
994 /* set tpa state to start only if we are able to allocate skb
995 * for this aggregation, otherwise mark as error and aggregation will
998 tpa_info
->skb
= netdev_alloc_skb(edev
->ndev
,
999 le16_to_cpu(cqe
->len_on_first_bd
));
1000 if (unlikely(!tpa_info
->skb
)) {
1001 DP_NOTICE(edev
, "Failed to allocate SKB for gro\n");
1002 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1006 skb_put(tpa_info
->skb
, le16_to_cpu(cqe
->len_on_first_bd
));
1007 memcpy(&tpa_info
->start_cqe
, cqe
, sizeof(tpa_info
->start_cqe
));
1009 /* Start filling in the aggregation info */
1010 tpa_info
->frag_id
= 0;
1011 tpa_info
->agg_state
= QEDE_AGG_STATE_START
;
1013 rxhash
= qede_get_rxhash(edev
, cqe
->bitfields
,
1014 cqe
->rss_hash
, &rxhash_type
);
1015 skb_set_hash(tpa_info
->skb
, rxhash
, rxhash_type
);
1016 if ((le16_to_cpu(cqe
->pars_flags
.flags
) >>
1017 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT
) &
1018 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK
)
1019 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
1021 tpa_info
->vlan_tag
= 0;
1023 /* This is needed in order to enable forwarding support */
1024 qede_set_gro_params(edev
, tpa_info
->skb
, cqe
);
1026 cons_buf
: /* We still need to handle bd_len_list to consume buffers */
1027 if (likely(cqe
->ext_bd_len_list
[0]))
1028 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1029 le16_to_cpu(cqe
->ext_bd_len_list
[0]));
1031 if (unlikely(cqe
->ext_bd_len_list
[1])) {
1033 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
1034 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1039 static void qede_gro_ip_csum(struct sk_buff
*skb
)
1041 const struct iphdr
*iph
= ip_hdr(skb
);
1044 skb_set_transport_header(skb
, sizeof(struct iphdr
));
1047 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
1048 iph
->saddr
, iph
->daddr
, 0);
1050 tcp_gro_complete(skb
);
1053 static void qede_gro_ipv6_csum(struct sk_buff
*skb
)
1055 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1058 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
1061 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
1062 &iph
->saddr
, &iph
->daddr
, 0);
1063 tcp_gro_complete(skb
);
1067 static void qede_gro_receive(struct qede_dev
*edev
,
1068 struct qede_fastpath
*fp
,
1069 struct sk_buff
*skb
,
1072 /* FW can send a single MTU sized packet from gro flow
1073 * due to aggregation timeout/last segment etc. which
1074 * is not expected to be a gro packet. If a skb has zero
1075 * frags then simply push it in the stack as non gso skb.
1077 if (unlikely(!skb
->data_len
)) {
1078 skb_shinfo(skb
)->gso_type
= 0;
1079 skb_shinfo(skb
)->gso_size
= 0;
1084 if (skb_shinfo(skb
)->gso_size
) {
1085 skb_set_network_header(skb
, 0);
1087 switch (skb
->protocol
) {
1088 case htons(ETH_P_IP
):
1089 qede_gro_ip_csum(skb
);
1091 case htons(ETH_P_IPV6
):
1092 qede_gro_ipv6_csum(skb
);
1096 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
1097 ntohs(skb
->protocol
));
1103 skb_record_rx_queue(skb
, fp
->rss_id
);
1104 qede_skb_receive(edev
, fp
, skb
, vlan_tag
);
1107 static inline void qede_tpa_cont(struct qede_dev
*edev
,
1108 struct qede_rx_queue
*rxq
,
1109 struct eth_fast_path_rx_tpa_cont_cqe
*cqe
)
1113 for (i
= 0; cqe
->len_list
[i
]; i
++)
1114 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1115 le16_to_cpu(cqe
->len_list
[i
]));
1117 if (unlikely(i
> 1))
1119 "Strange - TPA cont with more than a single len_list entry\n");
1122 static void qede_tpa_end(struct qede_dev
*edev
,
1123 struct qede_fastpath
*fp
,
1124 struct eth_fast_path_rx_tpa_end_cqe
*cqe
)
1126 struct qede_rx_queue
*rxq
= fp
->rxq
;
1127 struct qede_agg_info
*tpa_info
;
1128 struct sk_buff
*skb
;
1131 tpa_info
= &rxq
->tpa_info
[cqe
->tpa_agg_index
];
1132 skb
= tpa_info
->skb
;
1134 for (i
= 0; cqe
->len_list
[i
]; i
++)
1135 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1136 le16_to_cpu(cqe
->len_list
[i
]));
1137 if (unlikely(i
> 1))
1139 "Strange - TPA emd with more than a single len_list entry\n");
1141 if (unlikely(tpa_info
->agg_state
!= QEDE_AGG_STATE_START
))
1145 if (unlikely(cqe
->num_of_bds
!= tpa_info
->frag_id
+ 1))
1147 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1148 cqe
->num_of_bds
, tpa_info
->frag_id
);
1149 if (unlikely(skb
->len
!= le16_to_cpu(cqe
->total_packet_len
)))
1151 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1152 le16_to_cpu(cqe
->total_packet_len
), skb
->len
);
1155 page_address(tpa_info
->start_buf
.data
) +
1156 tpa_info
->start_cqe
.placement_offset
+
1157 tpa_info
->start_buf
.page_offset
,
1158 le16_to_cpu(tpa_info
->start_cqe
.len_on_first_bd
));
1160 /* Recycle [mapped] start buffer for the next replacement */
1161 tpa_info
->replace_buf
= tpa_info
->start_buf
;
1162 tpa_info
->replace_buf_mapping
= tpa_info
->start_buf_mapping
;
1164 /* Finalize the SKB */
1165 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
1166 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1168 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1169 * to skb_shinfo(skb)->gso_segs
1171 NAPI_GRO_CB(skb
)->count
= le16_to_cpu(cqe
->num_of_coalesced_segs
);
1173 qede_gro_receive(edev
, fp
, skb
, tpa_info
->vlan_tag
);
1175 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
1179 /* The BD starting the aggregation is still mapped; Re-use it for
1180 * future aggregations [as replacement buffer]
1182 memcpy(&tpa_info
->replace_buf
, &tpa_info
->start_buf
,
1183 sizeof(struct sw_rx_data
));
1184 tpa_info
->replace_buf_mapping
= tpa_info
->start_buf_mapping
;
1185 tpa_info
->start_buf
.data
= NULL
;
1186 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
1187 dev_kfree_skb_any(tpa_info
->skb
);
1188 tpa_info
->skb
= NULL
;
1191 static u8
qede_check_csum(u16 flag
)
1196 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
1197 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
) & flag
) {
1198 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
1199 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
1200 csum
= QEDE_CSUM_UNNECESSARY
;
1203 csum_flag
|= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
1204 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
1206 if (csum_flag
& flag
)
1207 return QEDE_CSUM_ERROR
;
1212 static int qede_rx_int(struct qede_fastpath
*fp
, int budget
)
1214 struct qede_dev
*edev
= fp
->edev
;
1215 struct qede_rx_queue
*rxq
= fp
->rxq
;
1217 u16 hw_comp_cons
, sw_comp_cons
, sw_rx_index
, parse_flag
;
1221 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
1222 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1224 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1225 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1226 * read before it is written by FW, then FW writes CQE and SB, and then
1227 * the CPU reads the hw_comp_cons, it will use an old CQE.
1231 /* Loop to complete all indicated BDs */
1232 while (sw_comp_cons
!= hw_comp_cons
) {
1233 struct eth_fast_path_rx_reg_cqe
*fp_cqe
;
1234 enum pkt_hash_types rxhash_type
;
1235 enum eth_rx_cqe_type cqe_type
;
1236 struct sw_rx_data
*sw_rx_data
;
1237 union eth_rx_cqe
*cqe
;
1238 struct sk_buff
*skb
;
1244 /* Get the CQE from the completion ring */
1245 cqe
= (union eth_rx_cqe
*)
1246 qed_chain_consume(&rxq
->rx_comp_ring
);
1247 cqe_type
= cqe
->fast_path_regular
.type
;
1249 if (unlikely(cqe_type
== ETH_RX_CQE_TYPE_SLOW_PATH
)) {
1250 edev
->ops
->eth_cqe_completion(
1251 edev
->cdev
, fp
->rss_id
,
1252 (struct eth_slow_path_rx_cqe
*)cqe
);
1256 if (cqe_type
!= ETH_RX_CQE_TYPE_REGULAR
) {
1258 case ETH_RX_CQE_TYPE_TPA_START
:
1259 qede_tpa_start(edev
, rxq
,
1260 &cqe
->fast_path_tpa_start
);
1262 case ETH_RX_CQE_TYPE_TPA_CONT
:
1263 qede_tpa_cont(edev
, rxq
,
1264 &cqe
->fast_path_tpa_cont
);
1266 case ETH_RX_CQE_TYPE_TPA_END
:
1267 qede_tpa_end(edev
, fp
,
1268 &cqe
->fast_path_tpa_end
);
1275 /* Get the data from the SW ring */
1276 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1277 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1278 data
= sw_rx_data
->data
;
1280 fp_cqe
= &cqe
->fast_path_regular
;
1281 len
= le16_to_cpu(fp_cqe
->len_on_first_bd
);
1282 pad
= fp_cqe
->placement_offset
;
1283 flags
= cqe
->fast_path_regular
.pars_flags
.flags
;
1285 /* If this is an error packet then drop it */
1286 parse_flag
= le16_to_cpu(flags
);
1288 csum_flag
= qede_check_csum(parse_flag
);
1289 if (unlikely(csum_flag
== QEDE_CSUM_ERROR
)) {
1291 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1292 sw_comp_cons
, parse_flag
);
1293 rxq
->rx_hw_errors
++;
1294 qede_recycle_rx_bd_ring(rxq
, edev
, fp_cqe
->bd_num
);
1298 skb
= netdev_alloc_skb(edev
->ndev
, QEDE_RX_HDR_SIZE
);
1299 if (unlikely(!skb
)) {
1301 "Build_skb failed, dropping incoming packet\n");
1302 qede_recycle_rx_bd_ring(rxq
, edev
, fp_cqe
->bd_num
);
1303 rxq
->rx_alloc_errors
++;
1307 /* Copy data into SKB */
1308 if (len
+ pad
<= QEDE_RX_HDR_SIZE
) {
1309 memcpy(skb_put(skb
, len
),
1310 page_address(data
) + pad
+
1311 sw_rx_data
->page_offset
, len
);
1312 qede_reuse_page(edev
, rxq
, sw_rx_data
);
1314 struct skb_frag_struct
*frag
;
1315 unsigned int pull_len
;
1318 frag
= &skb_shinfo(skb
)->frags
[0];
1320 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, data
,
1321 pad
+ sw_rx_data
->page_offset
,
1322 len
, rxq
->rx_buf_seg_size
);
1324 va
= skb_frag_address(frag
);
1325 pull_len
= eth_get_headlen(va
, QEDE_RX_HDR_SIZE
);
1327 /* Align the pull_len to optimize memcpy */
1328 memcpy(skb
->data
, va
, ALIGN(pull_len
, sizeof(long)));
1330 skb_frag_size_sub(frag
, pull_len
);
1331 frag
->page_offset
+= pull_len
;
1332 skb
->data_len
-= pull_len
;
1333 skb
->tail
+= pull_len
;
1335 if (unlikely(qede_realloc_rx_buffer(edev
, rxq
,
1337 DP_ERR(edev
, "Failed to allocate rx buffer\n");
1338 /* Incr page ref count to reuse on allocation
1339 * failure so that it doesn't get freed while
1343 atomic_inc(&sw_rx_data
->data
->_count
);
1344 rxq
->rx_alloc_errors
++;
1345 qede_recycle_rx_bd_ring(rxq
, edev
,
1347 dev_kfree_skb_any(skb
);
1352 qede_rx_bd_ring_consume(rxq
);
1354 if (fp_cqe
->bd_num
!= 1) {
1355 u16 pkt_len
= le16_to_cpu(fp_cqe
->pkt_len
);
1360 for (num_frags
= fp_cqe
->bd_num
- 1; num_frags
> 0;
1362 u16 cur_size
= pkt_len
> rxq
->rx_buf_size
?
1363 rxq
->rx_buf_size
: pkt_len
;
1364 if (unlikely(!cur_size
)) {
1366 "Still got %d BDs for mapping jumbo, but length became 0\n",
1368 qede_recycle_rx_bd_ring(rxq
, edev
,
1370 dev_kfree_skb_any(skb
);
1374 if (unlikely(qede_alloc_rx_buffer(edev
, rxq
))) {
1375 qede_recycle_rx_bd_ring(rxq
, edev
,
1377 dev_kfree_skb_any(skb
);
1381 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1382 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1383 qede_rx_bd_ring_consume(rxq
);
1385 dma_unmap_page(&edev
->pdev
->dev
,
1386 sw_rx_data
->mapping
,
1387 PAGE_SIZE
, DMA_FROM_DEVICE
);
1389 skb_fill_page_desc(skb
,
1390 skb_shinfo(skb
)->nr_frags
++,
1391 sw_rx_data
->data
, 0,
1394 skb
->truesize
+= PAGE_SIZE
;
1395 skb
->data_len
+= cur_size
;
1396 skb
->len
+= cur_size
;
1397 pkt_len
-= cur_size
;
1400 if (unlikely(pkt_len
))
1402 "Mapped all BDs of jumbo, but still have %d bytes\n",
1406 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
1408 rx_hash
= qede_get_rxhash(edev
, fp_cqe
->bitfields
,
1412 skb_set_hash(skb
, rx_hash
, rxhash_type
);
1414 qede_set_skb_csum(skb
, csum_flag
);
1416 skb_record_rx_queue(skb
, fp
->rss_id
);
1418 qede_skb_receive(edev
, fp
, skb
, le16_to_cpu(fp_cqe
->vlan_tag
));
1422 next_cqe
: /* don't consume bd rx buffer */
1423 qed_chain_recycle_consumed(&rxq
->rx_comp_ring
);
1424 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1425 /* CR TPA - revisit how to handle budget in TPA perhaps
1428 if (rx_pkt
== budget
)
1430 } /* repeat while sw_comp_cons != hw_comp_cons... */
1432 /* Update producers */
1433 qede_update_rx_prod(edev
, rxq
);
1438 static int qede_poll(struct napi_struct
*napi
, int budget
)
1441 struct qede_fastpath
*fp
= container_of(napi
, struct qede_fastpath
,
1443 struct qede_dev
*edev
= fp
->edev
;
1448 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
1449 if (qede_txq_has_work(&fp
->txqs
[tc
]))
1450 qede_tx_int(edev
, &fp
->txqs
[tc
]);
1452 if (qede_has_rx_work(fp
->rxq
)) {
1453 work_done
+= qede_rx_int(fp
, budget
- work_done
);
1455 /* must not complete if we consumed full budget */
1456 if (work_done
>= budget
)
1460 /* Fall out from the NAPI loop if needed */
1461 if (!(qede_has_rx_work(fp
->rxq
) || qede_has_tx_work(fp
))) {
1462 qed_sb_update_sb_idx(fp
->sb_info
);
1463 /* *_has_*_work() reads the status block,
1464 * thus we need to ensure that status block indices
1465 * have been actually read (qed_sb_update_sb_idx)
1466 * prior to this check (*_has_*_work) so that
1467 * we won't write the "newer" value of the status block
1468 * to HW (if there was a DMA right after
1469 * qede_has_rx_work and if there is no rmb, the memory
1470 * reading (qed_sb_update_sb_idx) may be postponed
1471 * to right before *_ack_sb). In this case there
1472 * will never be another interrupt until there is
1473 * another update of the status block, while there
1474 * is still unhandled work.
1478 if (!(qede_has_rx_work(fp
->rxq
) ||
1479 qede_has_tx_work(fp
))) {
1480 napi_complete(napi
);
1481 /* Update and reenable interrupts */
1482 qed_sb_ack(fp
->sb_info
, IGU_INT_ENABLE
,
1492 static irqreturn_t
qede_msix_fp_int(int irq
, void *fp_cookie
)
1494 struct qede_fastpath
*fp
= fp_cookie
;
1496 qed_sb_ack(fp
->sb_info
, IGU_INT_DISABLE
, 0 /*do not update*/);
1498 napi_schedule_irqoff(&fp
->napi
);
1502 /* -------------------------------------------------------------------------
1504 * -------------------------------------------------------------------------
1507 static int qede_open(struct net_device
*ndev
);
1508 static int qede_close(struct net_device
*ndev
);
1509 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
);
1510 static void qede_set_rx_mode(struct net_device
*ndev
);
1511 static void qede_config_rx_mode(struct net_device
*ndev
);
1513 static int qede_set_ucast_rx_mac(struct qede_dev
*edev
,
1514 enum qed_filter_xcast_params_type opcode
,
1515 unsigned char mac
[ETH_ALEN
])
1517 struct qed_filter_params filter_cmd
;
1519 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1520 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1521 filter_cmd
.filter
.ucast
.type
= opcode
;
1522 filter_cmd
.filter
.ucast
.mac_valid
= 1;
1523 ether_addr_copy(filter_cmd
.filter
.ucast
.mac
, mac
);
1525 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1528 static int qede_set_ucast_rx_vlan(struct qede_dev
*edev
,
1529 enum qed_filter_xcast_params_type opcode
,
1532 struct qed_filter_params filter_cmd
;
1534 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1535 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1536 filter_cmd
.filter
.ucast
.type
= opcode
;
1537 filter_cmd
.filter
.ucast
.vlan_valid
= 1;
1538 filter_cmd
.filter
.ucast
.vlan
= vid
;
1540 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1543 void qede_fill_by_demand_stats(struct qede_dev
*edev
)
1545 struct qed_eth_stats stats
;
1547 edev
->ops
->get_vport_stats(edev
->cdev
, &stats
);
1548 edev
->stats
.no_buff_discards
= stats
.no_buff_discards
;
1549 edev
->stats
.rx_ucast_bytes
= stats
.rx_ucast_bytes
;
1550 edev
->stats
.rx_mcast_bytes
= stats
.rx_mcast_bytes
;
1551 edev
->stats
.rx_bcast_bytes
= stats
.rx_bcast_bytes
;
1552 edev
->stats
.rx_ucast_pkts
= stats
.rx_ucast_pkts
;
1553 edev
->stats
.rx_mcast_pkts
= stats
.rx_mcast_pkts
;
1554 edev
->stats
.rx_bcast_pkts
= stats
.rx_bcast_pkts
;
1555 edev
->stats
.mftag_filter_discards
= stats
.mftag_filter_discards
;
1556 edev
->stats
.mac_filter_discards
= stats
.mac_filter_discards
;
1558 edev
->stats
.tx_ucast_bytes
= stats
.tx_ucast_bytes
;
1559 edev
->stats
.tx_mcast_bytes
= stats
.tx_mcast_bytes
;
1560 edev
->stats
.tx_bcast_bytes
= stats
.tx_bcast_bytes
;
1561 edev
->stats
.tx_ucast_pkts
= stats
.tx_ucast_pkts
;
1562 edev
->stats
.tx_mcast_pkts
= stats
.tx_mcast_pkts
;
1563 edev
->stats
.tx_bcast_pkts
= stats
.tx_bcast_pkts
;
1564 edev
->stats
.tx_err_drop_pkts
= stats
.tx_err_drop_pkts
;
1565 edev
->stats
.coalesced_pkts
= stats
.tpa_coalesced_pkts
;
1566 edev
->stats
.coalesced_events
= stats
.tpa_coalesced_events
;
1567 edev
->stats
.coalesced_aborts_num
= stats
.tpa_aborts_num
;
1568 edev
->stats
.non_coalesced_pkts
= stats
.tpa_not_coalesced_pkts
;
1569 edev
->stats
.coalesced_bytes
= stats
.tpa_coalesced_bytes
;
1571 edev
->stats
.rx_64_byte_packets
= stats
.rx_64_byte_packets
;
1572 edev
->stats
.rx_127_byte_packets
= stats
.rx_127_byte_packets
;
1573 edev
->stats
.rx_255_byte_packets
= stats
.rx_255_byte_packets
;
1574 edev
->stats
.rx_511_byte_packets
= stats
.rx_511_byte_packets
;
1575 edev
->stats
.rx_1023_byte_packets
= stats
.rx_1023_byte_packets
;
1576 edev
->stats
.rx_1518_byte_packets
= stats
.rx_1518_byte_packets
;
1577 edev
->stats
.rx_1522_byte_packets
= stats
.rx_1522_byte_packets
;
1578 edev
->stats
.rx_2047_byte_packets
= stats
.rx_2047_byte_packets
;
1579 edev
->stats
.rx_4095_byte_packets
= stats
.rx_4095_byte_packets
;
1580 edev
->stats
.rx_9216_byte_packets
= stats
.rx_9216_byte_packets
;
1581 edev
->stats
.rx_16383_byte_packets
= stats
.rx_16383_byte_packets
;
1582 edev
->stats
.rx_crc_errors
= stats
.rx_crc_errors
;
1583 edev
->stats
.rx_mac_crtl_frames
= stats
.rx_mac_crtl_frames
;
1584 edev
->stats
.rx_pause_frames
= stats
.rx_pause_frames
;
1585 edev
->stats
.rx_pfc_frames
= stats
.rx_pfc_frames
;
1586 edev
->stats
.rx_align_errors
= stats
.rx_align_errors
;
1587 edev
->stats
.rx_carrier_errors
= stats
.rx_carrier_errors
;
1588 edev
->stats
.rx_oversize_packets
= stats
.rx_oversize_packets
;
1589 edev
->stats
.rx_jabbers
= stats
.rx_jabbers
;
1590 edev
->stats
.rx_undersize_packets
= stats
.rx_undersize_packets
;
1591 edev
->stats
.rx_fragments
= stats
.rx_fragments
;
1592 edev
->stats
.tx_64_byte_packets
= stats
.tx_64_byte_packets
;
1593 edev
->stats
.tx_65_to_127_byte_packets
= stats
.tx_65_to_127_byte_packets
;
1594 edev
->stats
.tx_128_to_255_byte_packets
=
1595 stats
.tx_128_to_255_byte_packets
;
1596 edev
->stats
.tx_256_to_511_byte_packets
=
1597 stats
.tx_256_to_511_byte_packets
;
1598 edev
->stats
.tx_512_to_1023_byte_packets
=
1599 stats
.tx_512_to_1023_byte_packets
;
1600 edev
->stats
.tx_1024_to_1518_byte_packets
=
1601 stats
.tx_1024_to_1518_byte_packets
;
1602 edev
->stats
.tx_1519_to_2047_byte_packets
=
1603 stats
.tx_1519_to_2047_byte_packets
;
1604 edev
->stats
.tx_2048_to_4095_byte_packets
=
1605 stats
.tx_2048_to_4095_byte_packets
;
1606 edev
->stats
.tx_4096_to_9216_byte_packets
=
1607 stats
.tx_4096_to_9216_byte_packets
;
1608 edev
->stats
.tx_9217_to_16383_byte_packets
=
1609 stats
.tx_9217_to_16383_byte_packets
;
1610 edev
->stats
.tx_pause_frames
= stats
.tx_pause_frames
;
1611 edev
->stats
.tx_pfc_frames
= stats
.tx_pfc_frames
;
1612 edev
->stats
.tx_lpi_entry_count
= stats
.tx_lpi_entry_count
;
1613 edev
->stats
.tx_total_collisions
= stats
.tx_total_collisions
;
1614 edev
->stats
.brb_truncates
= stats
.brb_truncates
;
1615 edev
->stats
.brb_discards
= stats
.brb_discards
;
1616 edev
->stats
.tx_mac_ctrl_frames
= stats
.tx_mac_ctrl_frames
;
1619 static struct rtnl_link_stats64
*qede_get_stats64(
1620 struct net_device
*dev
,
1621 struct rtnl_link_stats64
*stats
)
1623 struct qede_dev
*edev
= netdev_priv(dev
);
1625 qede_fill_by_demand_stats(edev
);
1627 stats
->rx_packets
= edev
->stats
.rx_ucast_pkts
+
1628 edev
->stats
.rx_mcast_pkts
+
1629 edev
->stats
.rx_bcast_pkts
;
1630 stats
->tx_packets
= edev
->stats
.tx_ucast_pkts
+
1631 edev
->stats
.tx_mcast_pkts
+
1632 edev
->stats
.tx_bcast_pkts
;
1634 stats
->rx_bytes
= edev
->stats
.rx_ucast_bytes
+
1635 edev
->stats
.rx_mcast_bytes
+
1636 edev
->stats
.rx_bcast_bytes
;
1638 stats
->tx_bytes
= edev
->stats
.tx_ucast_bytes
+
1639 edev
->stats
.tx_mcast_bytes
+
1640 edev
->stats
.tx_bcast_bytes
;
1642 stats
->tx_errors
= edev
->stats
.tx_err_drop_pkts
;
1643 stats
->multicast
= edev
->stats
.rx_mcast_pkts
+
1644 edev
->stats
.rx_bcast_pkts
;
1646 stats
->rx_fifo_errors
= edev
->stats
.no_buff_discards
;
1648 stats
->collisions
= edev
->stats
.tx_total_collisions
;
1649 stats
->rx_crc_errors
= edev
->stats
.rx_crc_errors
;
1650 stats
->rx_frame_errors
= edev
->stats
.rx_align_errors
;
1655 static void qede_config_accept_any_vlan(struct qede_dev
*edev
, bool action
)
1657 struct qed_update_vport_params params
;
1660 /* Proceed only if action actually needs to be performed */
1661 if (edev
->accept_any_vlan
== action
)
1664 memset(¶ms
, 0, sizeof(params
));
1666 params
.vport_id
= 0;
1667 params
.accept_any_vlan
= action
;
1668 params
.update_accept_any_vlan_flg
= 1;
1670 rc
= edev
->ops
->vport_update(edev
->cdev
, ¶ms
);
1672 DP_ERR(edev
, "Failed to %s accept-any-vlan\n",
1673 action
? "enable" : "disable");
1675 DP_INFO(edev
, "%s accept-any-vlan\n",
1676 action
? "enabled" : "disabled");
1677 edev
->accept_any_vlan
= action
;
1681 static int qede_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1683 struct qede_dev
*edev
= netdev_priv(dev
);
1684 struct qede_vlan
*vlan
, *tmp
;
1687 DP_VERBOSE(edev
, NETIF_MSG_IFUP
, "Adding vlan 0x%04x\n", vid
);
1689 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1691 DP_INFO(edev
, "Failed to allocate struct for vlan\n");
1694 INIT_LIST_HEAD(&vlan
->list
);
1696 vlan
->configured
= false;
1698 /* Verify vlan isn't already configured */
1699 list_for_each_entry(tmp
, &edev
->vlan_list
, list
) {
1700 if (tmp
->vid
== vlan
->vid
) {
1701 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1702 "vlan already configured\n");
1708 /* If interface is down, cache this VLAN ID and return */
1709 if (edev
->state
!= QEDE_STATE_OPEN
) {
1710 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
1711 "Interface is down, VLAN %d will be configured when interface is up\n",
1714 edev
->non_configured_vlans
++;
1715 list_add(&vlan
->list
, &edev
->vlan_list
);
1720 /* Check for the filter limit.
1721 * Note - vlan0 has a reserved filter and can be added without
1722 * worrying about quota
1724 if ((edev
->configured_vlans
< edev
->dev_info
.num_vlan_filters
) ||
1726 rc
= qede_set_ucast_rx_vlan(edev
,
1727 QED_FILTER_XCAST_TYPE_ADD
,
1730 DP_ERR(edev
, "Failed to configure VLAN %d\n",
1735 vlan
->configured
= true;
1737 /* vlan0 filter isn't consuming out of our quota */
1739 edev
->configured_vlans
++;
1741 /* Out of quota; Activate accept-any-VLAN mode */
1742 if (!edev
->non_configured_vlans
)
1743 qede_config_accept_any_vlan(edev
, true);
1745 edev
->non_configured_vlans
++;
1748 list_add(&vlan
->list
, &edev
->vlan_list
);
1753 static void qede_del_vlan_from_list(struct qede_dev
*edev
,
1754 struct qede_vlan
*vlan
)
1756 /* vlan0 filter isn't consuming out of our quota */
1757 if (vlan
->vid
!= 0) {
1758 if (vlan
->configured
)
1759 edev
->configured_vlans
--;
1761 edev
->non_configured_vlans
--;
1764 list_del(&vlan
->list
);
1768 static int qede_configure_vlan_filters(struct qede_dev
*edev
)
1770 int rc
= 0, real_rc
= 0, accept_any_vlan
= 0;
1771 struct qed_dev_eth_info
*dev_info
;
1772 struct qede_vlan
*vlan
= NULL
;
1774 if (list_empty(&edev
->vlan_list
))
1777 dev_info
= &edev
->dev_info
;
1779 /* Configure non-configured vlans */
1780 list_for_each_entry(vlan
, &edev
->vlan_list
, list
) {
1781 if (vlan
->configured
)
1784 /* We have used all our credits, now enable accept_any_vlan */
1785 if ((vlan
->vid
!= 0) &&
1786 (edev
->configured_vlans
== dev_info
->num_vlan_filters
)) {
1787 accept_any_vlan
= 1;
1791 DP_VERBOSE(edev
, NETIF_MSG_IFUP
, "Adding vlan %d\n", vlan
->vid
);
1793 rc
= qede_set_ucast_rx_vlan(edev
, QED_FILTER_XCAST_TYPE_ADD
,
1796 DP_ERR(edev
, "Failed to configure VLAN %u\n",
1802 vlan
->configured
= true;
1803 /* vlan0 filter doesn't consume our VLAN filter's quota */
1804 if (vlan
->vid
!= 0) {
1805 edev
->non_configured_vlans
--;
1806 edev
->configured_vlans
++;
1810 /* enable accept_any_vlan mode if we have more VLANs than credits,
1811 * or remove accept_any_vlan mode if we've actually removed
1812 * a non-configured vlan, and all remaining vlans are truly configured.
1815 if (accept_any_vlan
)
1816 qede_config_accept_any_vlan(edev
, true);
1817 else if (!edev
->non_configured_vlans
)
1818 qede_config_accept_any_vlan(edev
, false);
1823 static int qede_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1825 struct qede_dev
*edev
= netdev_priv(dev
);
1826 struct qede_vlan
*vlan
= NULL
;
1829 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
, "Removing vlan 0x%04x\n", vid
);
1831 /* Find whether entry exists */
1832 list_for_each_entry(vlan
, &edev
->vlan_list
, list
)
1833 if (vlan
->vid
== vid
)
1836 if (!vlan
|| (vlan
->vid
!= vid
)) {
1837 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1838 "Vlan isn't configured\n");
1842 if (edev
->state
!= QEDE_STATE_OPEN
) {
1843 /* As interface is already down, we don't have a VPORT
1844 * instance to remove vlan filter. So just update vlan list
1846 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
1847 "Interface is down, removing VLAN from list only\n");
1848 qede_del_vlan_from_list(edev
, vlan
);
1853 rc
= qede_set_ucast_rx_vlan(edev
, QED_FILTER_XCAST_TYPE_DEL
, vid
);
1855 DP_ERR(edev
, "Failed to remove VLAN %d\n", vid
);
1859 qede_del_vlan_from_list(edev
, vlan
);
1861 /* We have removed a VLAN - try to see if we can
1862 * configure non-configured VLAN from the list.
1864 rc
= qede_configure_vlan_filters(edev
);
1869 static void qede_vlan_mark_nonconfigured(struct qede_dev
*edev
)
1871 struct qede_vlan
*vlan
= NULL
;
1873 if (list_empty(&edev
->vlan_list
))
1876 list_for_each_entry(vlan
, &edev
->vlan_list
, list
) {
1877 if (!vlan
->configured
)
1880 vlan
->configured
= false;
1882 /* vlan0 filter isn't consuming out of our quota */
1883 if (vlan
->vid
!= 0) {
1884 edev
->non_configured_vlans
++;
1885 edev
->configured_vlans
--;
1888 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
1889 "marked vlan %d as non-configured\n",
1893 edev
->accept_any_vlan
= false;
1896 static const struct net_device_ops qede_netdev_ops
= {
1897 .ndo_open
= qede_open
,
1898 .ndo_stop
= qede_close
,
1899 .ndo_start_xmit
= qede_start_xmit
,
1900 .ndo_set_rx_mode
= qede_set_rx_mode
,
1901 .ndo_set_mac_address
= qede_set_mac_addr
,
1902 .ndo_validate_addr
= eth_validate_addr
,
1903 .ndo_change_mtu
= qede_change_mtu
,
1904 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
1905 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
1906 .ndo_get_stats64
= qede_get_stats64
,
1909 /* -------------------------------------------------------------------------
1910 * START OF PROBE / REMOVE
1911 * -------------------------------------------------------------------------
1914 static struct qede_dev
*qede_alloc_etherdev(struct qed_dev
*cdev
,
1915 struct pci_dev
*pdev
,
1916 struct qed_dev_eth_info
*info
,
1920 struct net_device
*ndev
;
1921 struct qede_dev
*edev
;
1923 ndev
= alloc_etherdev_mqs(sizeof(*edev
),
1927 pr_err("etherdev allocation failed\n");
1931 edev
= netdev_priv(ndev
);
1935 edev
->dp_module
= dp_module
;
1936 edev
->dp_level
= dp_level
;
1937 edev
->ops
= qed_ops
;
1938 edev
->q_num_rx_buffers
= NUM_RX_BDS_DEF
;
1939 edev
->q_num_tx_buffers
= NUM_TX_BDS_DEF
;
1941 DP_INFO(edev
, "Allocated netdev with 64 tx queues and 64 rx queues\n");
1943 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1945 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
1946 memcpy(&edev
->dev_info
, info
, sizeof(*info
));
1948 edev
->num_tc
= edev
->dev_info
.num_tc
;
1950 INIT_LIST_HEAD(&edev
->vlan_list
);
1955 static void qede_init_ndev(struct qede_dev
*edev
)
1957 struct net_device
*ndev
= edev
->ndev
;
1958 struct pci_dev
*pdev
= edev
->pdev
;
1961 pci_set_drvdata(pdev
, ndev
);
1963 ndev
->mem_start
= edev
->dev_info
.common
.pci_mem_start
;
1964 ndev
->base_addr
= ndev
->mem_start
;
1965 ndev
->mem_end
= edev
->dev_info
.common
.pci_mem_end
;
1966 ndev
->irq
= edev
->dev_info
.common
.pci_irq
;
1968 ndev
->watchdog_timeo
= TX_TIMEOUT
;
1970 ndev
->netdev_ops
= &qede_netdev_ops
;
1972 qede_set_ethtool_ops(ndev
);
1974 /* user-changeble features */
1975 hw_features
= NETIF_F_GRO
| NETIF_F_SG
|
1976 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1977 NETIF_F_TSO
| NETIF_F_TSO6
;
1979 ndev
->vlan_features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
1981 ndev
->features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
1982 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HIGHDMA
|
1983 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
;
1985 ndev
->hw_features
= hw_features
;
1987 /* Set network device HW mac */
1988 ether_addr_copy(edev
->ndev
->dev_addr
, edev
->dev_info
.common
.hw_mac
);
1991 /* This function converts from 32b param to two params of level and module
1992 * Input 32b decoding:
1993 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
1994 * 'happy' flow, e.g. memory allocation failed.
1995 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
1996 * and provide important parameters.
1997 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
1998 * module. VERBOSE prints are for tracking the specific flow in low level.
2000 * Notice that the level should be that of the lowest required logs.
2002 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
)
2004 *p_dp_level
= QED_LEVEL_NOTICE
;
2007 if (debug
& QED_LOG_VERBOSE_MASK
) {
2008 *p_dp_level
= QED_LEVEL_VERBOSE
;
2009 *p_dp_module
= (debug
& 0x3FFFFFFF);
2010 } else if (debug
& QED_LOG_INFO_MASK
) {
2011 *p_dp_level
= QED_LEVEL_INFO
;
2012 } else if (debug
& QED_LOG_NOTICE_MASK
) {
2013 *p_dp_level
= QED_LEVEL_NOTICE
;
2017 static void qede_free_fp_array(struct qede_dev
*edev
)
2019 if (edev
->fp_array
) {
2020 struct qede_fastpath
*fp
;
2024 fp
= &edev
->fp_array
[i
];
2030 kfree(edev
->fp_array
);
2035 static int qede_alloc_fp_array(struct qede_dev
*edev
)
2037 struct qede_fastpath
*fp
;
2040 edev
->fp_array
= kcalloc(QEDE_RSS_CNT(edev
),
2041 sizeof(*edev
->fp_array
), GFP_KERNEL
);
2042 if (!edev
->fp_array
) {
2043 DP_NOTICE(edev
, "fp array allocation failed\n");
2048 fp
= &edev
->fp_array
[i
];
2050 fp
->sb_info
= kcalloc(1, sizeof(*fp
->sb_info
), GFP_KERNEL
);
2052 DP_NOTICE(edev
, "sb info struct allocation failed\n");
2056 fp
->rxq
= kcalloc(1, sizeof(*fp
->rxq
), GFP_KERNEL
);
2058 DP_NOTICE(edev
, "RXQ struct allocation failed\n");
2062 fp
->txqs
= kcalloc(edev
->num_tc
, sizeof(*fp
->txqs
), GFP_KERNEL
);
2064 DP_NOTICE(edev
, "TXQ array allocation failed\n");
2071 qede_free_fp_array(edev
);
2075 static void qede_sp_task(struct work_struct
*work
)
2077 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
2079 mutex_lock(&edev
->qede_lock
);
2081 if (edev
->state
== QEDE_STATE_OPEN
) {
2082 if (test_and_clear_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
))
2083 qede_config_rx_mode(edev
->ndev
);
2086 mutex_unlock(&edev
->qede_lock
);
2089 static void qede_update_pf_params(struct qed_dev
*cdev
)
2091 struct qed_pf_params pf_params
;
2094 memset(&pf_params
, 0, sizeof(struct qed_pf_params
));
2095 pf_params
.eth_pf_params
.num_cons
= 32;
2096 qed_ops
->common
->update_pf_params(cdev
, &pf_params
);
2099 enum qede_probe_mode
{
2103 static int __qede_probe(struct pci_dev
*pdev
, u32 dp_module
, u8 dp_level
,
2104 enum qede_probe_mode mode
)
2106 struct qed_slowpath_params params
;
2107 struct qed_dev_eth_info dev_info
;
2108 struct qede_dev
*edev
;
2109 struct qed_dev
*cdev
;
2112 if (unlikely(dp_level
& QED_LEVEL_INFO
))
2113 pr_notice("Starting qede probe\n");
2115 cdev
= qed_ops
->common
->probe(pdev
, QED_PROTOCOL_ETH
,
2116 dp_module
, dp_level
);
2122 qede_update_pf_params(cdev
);
2124 /* Start the Slowpath-process */
2125 memset(¶ms
, 0, sizeof(struct qed_slowpath_params
));
2126 params
.int_mode
= QED_INT_MODE_MSIX
;
2127 params
.drv_major
= QEDE_MAJOR_VERSION
;
2128 params
.drv_minor
= QEDE_MINOR_VERSION
;
2129 params
.drv_rev
= QEDE_REVISION_VERSION
;
2130 params
.drv_eng
= QEDE_ENGINEERING_VERSION
;
2131 strlcpy(params
.name
, "qede LAN", QED_DRV_VER_STR_SIZE
);
2132 rc
= qed_ops
->common
->slowpath_start(cdev
, ¶ms
);
2134 pr_notice("Cannot start slowpath\n");
2138 /* Learn information crucial for qede to progress */
2139 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
2143 edev
= qede_alloc_etherdev(cdev
, pdev
, &dev_info
, dp_module
,
2150 qede_init_ndev(edev
);
2152 rc
= register_netdev(edev
->ndev
);
2154 DP_NOTICE(edev
, "Cannot register net-device\n");
2158 edev
->ops
->common
->set_id(cdev
, edev
->ndev
->name
, DRV_MODULE_VERSION
);
2160 edev
->ops
->register_ops(cdev
, &qede_ll_ops
, edev
);
2162 INIT_DELAYED_WORK(&edev
->sp_task
, qede_sp_task
);
2163 mutex_init(&edev
->qede_lock
);
2165 DP_INFO(edev
, "Ending successfully qede probe\n");
2170 free_netdev(edev
->ndev
);
2172 qed_ops
->common
->slowpath_stop(cdev
);
2174 qed_ops
->common
->remove(cdev
);
2179 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2184 qede_config_debug(debug
, &dp_module
, &dp_level
);
2186 return __qede_probe(pdev
, dp_module
, dp_level
,
2190 enum qede_remove_mode
{
2194 static void __qede_remove(struct pci_dev
*pdev
, enum qede_remove_mode mode
)
2196 struct net_device
*ndev
= pci_get_drvdata(pdev
);
2197 struct qede_dev
*edev
= netdev_priv(ndev
);
2198 struct qed_dev
*cdev
= edev
->cdev
;
2200 DP_INFO(edev
, "Starting qede_remove\n");
2202 cancel_delayed_work_sync(&edev
->sp_task
);
2203 unregister_netdev(ndev
);
2205 edev
->ops
->common
->set_power_state(cdev
, PCI_D0
);
2207 pci_set_drvdata(pdev
, NULL
);
2211 /* Use global ops since we've freed edev */
2212 qed_ops
->common
->slowpath_stop(cdev
);
2213 qed_ops
->common
->remove(cdev
);
2215 pr_notice("Ending successfully qede_remove\n");
2218 static void qede_remove(struct pci_dev
*pdev
)
2220 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
2223 /* -------------------------------------------------------------------------
2224 * START OF LOAD / UNLOAD
2225 * -------------------------------------------------------------------------
2228 static int qede_set_num_queues(struct qede_dev
*edev
)
2233 /* Setup queues according to possible resources*/
2235 rss_num
= edev
->req_rss
;
2237 rss_num
= netif_get_num_default_rss_queues() *
2238 edev
->dev_info
.common
.num_hwfns
;
2240 rss_num
= min_t(u16
, QEDE_MAX_RSS_CNT(edev
), rss_num
);
2242 rc
= edev
->ops
->common
->set_fp_int(edev
->cdev
, rss_num
);
2244 /* Managed to request interrupts for our queues */
2246 DP_INFO(edev
, "Managed %d [of %d] RSS queues\n",
2247 QEDE_RSS_CNT(edev
), rss_num
);
2253 static void qede_free_mem_sb(struct qede_dev
*edev
,
2254 struct qed_sb_info
*sb_info
)
2256 if (sb_info
->sb_virt
)
2257 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
2258 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
2261 /* This function allocates fast-path status block memory */
2262 static int qede_alloc_mem_sb(struct qede_dev
*edev
,
2263 struct qed_sb_info
*sb_info
,
2266 struct status_block
*sb_virt
;
2270 sb_virt
= dma_alloc_coherent(&edev
->pdev
->dev
,
2272 &sb_phys
, GFP_KERNEL
);
2274 DP_ERR(edev
, "Status block allocation failed\n");
2278 rc
= edev
->ops
->common
->sb_init(edev
->cdev
, sb_info
,
2279 sb_virt
, sb_phys
, sb_id
,
2280 QED_SB_TYPE_L2_QUEUE
);
2282 DP_ERR(edev
, "Status block initialization failed\n");
2283 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_virt
),
2291 static void qede_free_rx_buffers(struct qede_dev
*edev
,
2292 struct qede_rx_queue
*rxq
)
2296 for (i
= rxq
->sw_rx_cons
; i
!= rxq
->sw_rx_prod
; i
++) {
2297 struct sw_rx_data
*rx_buf
;
2300 rx_buf
= &rxq
->sw_rx_ring
[i
& NUM_RX_BDS_MAX
];
2301 data
= rx_buf
->data
;
2303 dma_unmap_page(&edev
->pdev
->dev
,
2305 PAGE_SIZE
, DMA_FROM_DEVICE
);
2307 rx_buf
->data
= NULL
;
2312 static void qede_free_sge_mem(struct qede_dev
*edev
,
2313 struct qede_rx_queue
*rxq
) {
2316 if (edev
->gro_disable
)
2319 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
2320 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
2321 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
2323 if (replace_buf
->data
) {
2324 dma_unmap_page(&edev
->pdev
->dev
,
2325 dma_unmap_addr(replace_buf
, mapping
),
2326 PAGE_SIZE
, DMA_FROM_DEVICE
);
2327 __free_page(replace_buf
->data
);
2332 static void qede_free_mem_rxq(struct qede_dev
*edev
,
2333 struct qede_rx_queue
*rxq
)
2335 qede_free_sge_mem(edev
, rxq
);
2337 /* Free rx buffers */
2338 qede_free_rx_buffers(edev
, rxq
);
2340 /* Free the parallel SW ring */
2341 kfree(rxq
->sw_rx_ring
);
2343 /* Free the real RQ ring used by FW */
2344 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_bd_ring
);
2345 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_comp_ring
);
2348 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
2349 struct qede_rx_queue
*rxq
)
2351 struct sw_rx_data
*sw_rx_data
;
2352 struct eth_rx_bd
*rx_bd
;
2357 rx_buf_size
= rxq
->rx_buf_size
;
2359 data
= alloc_pages(GFP_ATOMIC
, 0);
2360 if (unlikely(!data
)) {
2361 DP_NOTICE(edev
, "Failed to allocate Rx data [page]\n");
2365 /* Map the entire page as it would be used
2366 * for multiple RX buffer segment size mapping.
2368 mapping
= dma_map_page(&edev
->pdev
->dev
, data
, 0,
2369 PAGE_SIZE
, DMA_FROM_DEVICE
);
2370 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
2372 DP_NOTICE(edev
, "Failed to map Rx buffer\n");
2376 sw_rx_data
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
2377 sw_rx_data
->page_offset
= 0;
2378 sw_rx_data
->data
= data
;
2379 sw_rx_data
->mapping
= mapping
;
2381 /* Advance PROD and get BD pointer */
2382 rx_bd
= (struct eth_rx_bd
*)qed_chain_produce(&rxq
->rx_bd_ring
);
2384 rx_bd
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
2385 rx_bd
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
2392 static int qede_alloc_sge_mem(struct qede_dev
*edev
,
2393 struct qede_rx_queue
*rxq
)
2398 if (edev
->gro_disable
)
2401 if (edev
->ndev
->mtu
> PAGE_SIZE
) {
2402 edev
->gro_disable
= 1;
2406 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
2407 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
2408 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
2410 replace_buf
->data
= alloc_pages(GFP_ATOMIC
, 0);
2411 if (unlikely(!replace_buf
->data
)) {
2413 "Failed to allocate TPA skb pool [replacement buffer]\n");
2417 mapping
= dma_map_page(&edev
->pdev
->dev
, replace_buf
->data
, 0,
2418 rxq
->rx_buf_size
, DMA_FROM_DEVICE
);
2419 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
2421 "Failed to map TPA replacement buffer\n");
2425 dma_unmap_addr_set(replace_buf
, mapping
, mapping
);
2426 tpa_info
->replace_buf
.page_offset
= 0;
2428 tpa_info
->replace_buf_mapping
= mapping
;
2429 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
2434 qede_free_sge_mem(edev
, rxq
);
2435 edev
->gro_disable
= 1;
2439 /* This function allocates all memory needed per Rx queue */
2440 static int qede_alloc_mem_rxq(struct qede_dev
*edev
,
2441 struct qede_rx_queue
*rxq
)
2445 rxq
->num_rx_buffers
= edev
->q_num_rx_buffers
;
2447 rxq
->rx_buf_size
= NET_IP_ALIGN
+ ETH_OVERHEAD
+
2449 if (rxq
->rx_buf_size
> PAGE_SIZE
)
2450 rxq
->rx_buf_size
= PAGE_SIZE
;
2452 /* Segment size to spilt a page in multiple equal parts */
2453 rxq
->rx_buf_seg_size
= roundup_pow_of_two(rxq
->rx_buf_size
);
2455 /* Allocate the parallel driver ring for Rx buffers */
2456 size
= sizeof(*rxq
->sw_rx_ring
) * RX_RING_SIZE
;
2457 rxq
->sw_rx_ring
= kzalloc(size
, GFP_KERNEL
);
2458 if (!rxq
->sw_rx_ring
) {
2459 DP_ERR(edev
, "Rx buffers ring allocation failed\n");
2464 /* Allocate FW Rx ring */
2465 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2466 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
2467 QED_CHAIN_MODE_NEXT_PTR
,
2469 sizeof(struct eth_rx_bd
),
2475 /* Allocate FW completion ring */
2476 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2477 QED_CHAIN_USE_TO_CONSUME
,
2480 sizeof(union eth_rx_cqe
),
2481 &rxq
->rx_comp_ring
);
2485 /* Allocate buffers for the Rx ring */
2486 for (i
= 0; i
< rxq
->num_rx_buffers
; i
++) {
2487 rc
= qede_alloc_rx_buffer(edev
, rxq
);
2490 "Rx buffers allocation failed at index %d\n", i
);
2495 rc
= qede_alloc_sge_mem(edev
, rxq
);
2500 static void qede_free_mem_txq(struct qede_dev
*edev
,
2501 struct qede_tx_queue
*txq
)
2503 /* Free the parallel SW ring */
2504 kfree(txq
->sw_tx_ring
);
2506 /* Free the real RQ ring used by FW */
2507 edev
->ops
->common
->chain_free(edev
->cdev
, &txq
->tx_pbl
);
2510 /* This function allocates all memory needed per Tx queue */
2511 static int qede_alloc_mem_txq(struct qede_dev
*edev
,
2512 struct qede_tx_queue
*txq
)
2515 union eth_tx_bd_types
*p_virt
;
2517 txq
->num_tx_buffers
= edev
->q_num_tx_buffers
;
2519 /* Allocate the parallel driver ring for Tx buffers */
2520 size
= sizeof(*txq
->sw_tx_ring
) * NUM_TX_BDS_MAX
;
2521 txq
->sw_tx_ring
= kzalloc(size
, GFP_KERNEL
);
2522 if (!txq
->sw_tx_ring
) {
2523 DP_NOTICE(edev
, "Tx buffers ring allocation failed\n");
2527 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2528 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
2539 qede_free_mem_txq(edev
, txq
);
2543 /* This function frees all memory of a single fp */
2544 static void qede_free_mem_fp(struct qede_dev
*edev
,
2545 struct qede_fastpath
*fp
)
2549 qede_free_mem_sb(edev
, fp
->sb_info
);
2551 qede_free_mem_rxq(edev
, fp
->rxq
);
2553 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
2554 qede_free_mem_txq(edev
, &fp
->txqs
[tc
]);
2557 /* This function allocates all memory needed for a single fp (i.e. an entity
2558 * which contains status block, one rx queue and multiple per-TC tx queues.
2560 static int qede_alloc_mem_fp(struct qede_dev
*edev
,
2561 struct qede_fastpath
*fp
)
2565 rc
= qede_alloc_mem_sb(edev
, fp
->sb_info
, fp
->rss_id
);
2569 rc
= qede_alloc_mem_rxq(edev
, fp
->rxq
);
2573 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2574 rc
= qede_alloc_mem_txq(edev
, &fp
->txqs
[tc
]);
2584 static void qede_free_mem_load(struct qede_dev
*edev
)
2589 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2591 qede_free_mem_fp(edev
, fp
);
2595 /* This function allocates all qede memory at NIC load. */
2596 static int qede_alloc_mem_load(struct qede_dev
*edev
)
2600 for (rss_id
= 0; rss_id
< QEDE_RSS_CNT(edev
); rss_id
++) {
2601 struct qede_fastpath
*fp
= &edev
->fp_array
[rss_id
];
2603 rc
= qede_alloc_mem_fp(edev
, fp
);
2606 "Failed to allocate memory for fastpath - rss id = %d\n",
2608 qede_free_mem_load(edev
);
2616 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
2617 static void qede_init_fp(struct qede_dev
*edev
)
2619 int rss_id
, txq_index
, tc
;
2620 struct qede_fastpath
*fp
;
2622 for_each_rss(rss_id
) {
2623 fp
= &edev
->fp_array
[rss_id
];
2626 fp
->rss_id
= rss_id
;
2628 memset((void *)&fp
->napi
, 0, sizeof(fp
->napi
));
2630 memset((void *)fp
->sb_info
, 0, sizeof(*fp
->sb_info
));
2632 memset((void *)fp
->rxq
, 0, sizeof(*fp
->rxq
));
2633 fp
->rxq
->rxq_id
= rss_id
;
2635 memset((void *)fp
->txqs
, 0, (edev
->num_tc
* sizeof(*fp
->txqs
)));
2636 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2637 txq_index
= tc
* QEDE_RSS_CNT(edev
) + rss_id
;
2638 fp
->txqs
[tc
].index
= txq_index
;
2641 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
2642 edev
->ndev
->name
, rss_id
);
2645 edev
->gro_disable
= !(edev
->ndev
->features
& NETIF_F_GRO
);
2648 static int qede_set_real_num_queues(struct qede_dev
*edev
)
2652 rc
= netif_set_real_num_tx_queues(edev
->ndev
, QEDE_TSS_CNT(edev
));
2654 DP_NOTICE(edev
, "Failed to set real number of Tx queues\n");
2657 rc
= netif_set_real_num_rx_queues(edev
->ndev
, QEDE_RSS_CNT(edev
));
2659 DP_NOTICE(edev
, "Failed to set real number of Rx queues\n");
2666 static void qede_napi_disable_remove(struct qede_dev
*edev
)
2671 napi_disable(&edev
->fp_array
[i
].napi
);
2673 netif_napi_del(&edev
->fp_array
[i
].napi
);
2677 static void qede_napi_add_enable(struct qede_dev
*edev
)
2681 /* Add NAPI objects */
2683 netif_napi_add(edev
->ndev
, &edev
->fp_array
[i
].napi
,
2684 qede_poll
, NAPI_POLL_WEIGHT
);
2685 napi_enable(&edev
->fp_array
[i
].napi
);
2689 static void qede_sync_free_irqs(struct qede_dev
*edev
)
2693 for (i
= 0; i
< edev
->int_info
.used_cnt
; i
++) {
2694 if (edev
->int_info
.msix_cnt
) {
2695 synchronize_irq(edev
->int_info
.msix
[i
].vector
);
2696 free_irq(edev
->int_info
.msix
[i
].vector
,
2697 &edev
->fp_array
[i
]);
2699 edev
->ops
->common
->simd_handler_clean(edev
->cdev
, i
);
2703 edev
->int_info
.used_cnt
= 0;
2706 static int qede_req_msix_irqs(struct qede_dev
*edev
)
2710 /* Sanitize number of interrupts == number of prepared RSS queues */
2711 if (QEDE_RSS_CNT(edev
) > edev
->int_info
.msix_cnt
) {
2713 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
2714 QEDE_RSS_CNT(edev
), edev
->int_info
.msix_cnt
);
2718 for (i
= 0; i
< QEDE_RSS_CNT(edev
); i
++) {
2719 rc
= request_irq(edev
->int_info
.msix
[i
].vector
,
2720 qede_msix_fp_int
, 0, edev
->fp_array
[i
].name
,
2721 &edev
->fp_array
[i
]);
2723 DP_ERR(edev
, "Request fp %d irq failed\n", i
);
2724 qede_sync_free_irqs(edev
);
2727 DP_VERBOSE(edev
, NETIF_MSG_INTR
,
2728 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
2729 edev
->fp_array
[i
].name
, i
,
2730 &edev
->fp_array
[i
]);
2731 edev
->int_info
.used_cnt
++;
2737 static void qede_simd_fp_handler(void *cookie
)
2739 struct qede_fastpath
*fp
= (struct qede_fastpath
*)cookie
;
2741 napi_schedule_irqoff(&fp
->napi
);
2744 static int qede_setup_irqs(struct qede_dev
*edev
)
2748 /* Learn Interrupt configuration */
2749 rc
= edev
->ops
->common
->get_fp_int(edev
->cdev
, &edev
->int_info
);
2753 if (edev
->int_info
.msix_cnt
) {
2754 rc
= qede_req_msix_irqs(edev
);
2757 edev
->ndev
->irq
= edev
->int_info
.msix
[0].vector
;
2759 const struct qed_common_ops
*ops
;
2761 /* qed should learn receive the RSS ids and callbacks */
2762 ops
= edev
->ops
->common
;
2763 for (i
= 0; i
< QEDE_RSS_CNT(edev
); i
++)
2764 ops
->simd_handler_config(edev
->cdev
,
2765 &edev
->fp_array
[i
], i
,
2766 qede_simd_fp_handler
);
2767 edev
->int_info
.used_cnt
= QEDE_RSS_CNT(edev
);
2772 static int qede_drain_txq(struct qede_dev
*edev
,
2773 struct qede_tx_queue
*txq
,
2778 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
2782 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2784 rc
= edev
->ops
->common
->drain(edev
->cdev
);
2787 return qede_drain_txq(edev
, txq
, false);
2790 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2791 txq
->index
, txq
->sw_tx_prod
,
2796 usleep_range(1000, 2000);
2800 /* FW finished processing, wait for HW to transmit all tx packets */
2801 usleep_range(1000, 2000);
2806 static int qede_stop_queues(struct qede_dev
*edev
)
2808 struct qed_update_vport_params vport_update_params
;
2809 struct qed_dev
*cdev
= edev
->cdev
;
2812 /* Disable the vport */
2813 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
2814 vport_update_params
.vport_id
= 0;
2815 vport_update_params
.update_vport_active_flg
= 1;
2816 vport_update_params
.vport_active_flg
= 0;
2817 vport_update_params
.update_rss_flg
= 0;
2819 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
2821 DP_ERR(edev
, "Failed to update vport\n");
2825 /* Flush Tx queues. If needed, request drain from MCP */
2827 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2829 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2830 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
2832 rc
= qede_drain_txq(edev
, txq
, true);
2838 /* Stop all Queues in reverse order*/
2839 for (i
= QEDE_RSS_CNT(edev
) - 1; i
>= 0; i
--) {
2840 struct qed_stop_rxq_params rx_params
;
2842 /* Stop the Tx Queue(s)*/
2843 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2844 struct qed_stop_txq_params tx_params
;
2846 tx_params
.rss_id
= i
;
2847 tx_params
.tx_queue_id
= tc
* QEDE_RSS_CNT(edev
) + i
;
2848 rc
= edev
->ops
->q_tx_stop(cdev
, &tx_params
);
2850 DP_ERR(edev
, "Failed to stop TXQ #%d\n",
2851 tx_params
.tx_queue_id
);
2856 /* Stop the Rx Queue*/
2857 memset(&rx_params
, 0, sizeof(rx_params
));
2858 rx_params
.rss_id
= i
;
2859 rx_params
.rx_queue_id
= i
;
2861 rc
= edev
->ops
->q_rx_stop(cdev
, &rx_params
);
2863 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
2868 /* Stop the vport */
2869 rc
= edev
->ops
->vport_stop(cdev
, 0);
2871 DP_ERR(edev
, "Failed to stop VPORT\n");
2876 static int qede_start_queues(struct qede_dev
*edev
)
2879 int vlan_removal_en
= 1;
2880 struct qed_dev
*cdev
= edev
->cdev
;
2881 struct qed_update_vport_rss_params
*rss_params
= &edev
->rss_params
;
2882 struct qed_update_vport_params vport_update_params
;
2883 struct qed_queue_start_common_params q_params
;
2884 struct qed_start_vport_params start
= {0};
2886 if (!edev
->num_rss
) {
2888 "Cannot update V-VPORT as active as there are no Rx queues\n");
2892 start
.gro_enable
= !edev
->gro_disable
;
2893 start
.mtu
= edev
->ndev
->mtu
;
2895 start
.drop_ttl0
= true;
2896 start
.remove_inner_vlan
= vlan_removal_en
;
2898 rc
= edev
->ops
->vport_start(cdev
, &start
);
2901 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
2905 DP_VERBOSE(edev
, NETIF_MSG_IFUP
,
2906 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2907 start
.vport_id
, edev
->ndev
->mtu
+ 0xe, vlan_removal_en
);
2910 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
2911 dma_addr_t phys_table
= fp
->rxq
->rx_comp_ring
.pbl
.p_phys_table
;
2913 memset(&q_params
, 0, sizeof(q_params
));
2914 q_params
.rss_id
= i
;
2915 q_params
.queue_id
= i
;
2916 q_params
.vport_id
= 0;
2917 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
2918 q_params
.sb_idx
= RX_PI
;
2920 rc
= edev
->ops
->q_rx_start(cdev
, &q_params
,
2921 fp
->rxq
->rx_buf_size
,
2922 fp
->rxq
->rx_bd_ring
.p_phys_addr
,
2924 fp
->rxq
->rx_comp_ring
.page_cnt
,
2925 &fp
->rxq
->hw_rxq_prod_addr
);
2927 DP_ERR(edev
, "Start RXQ #%d failed %d\n", i
, rc
);
2931 fp
->rxq
->hw_cons_ptr
= &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
2933 qede_update_rx_prod(edev
, fp
->rxq
);
2935 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
2936 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
2937 int txq_index
= tc
* QEDE_RSS_CNT(edev
) + i
;
2939 memset(&q_params
, 0, sizeof(q_params
));
2940 q_params
.rss_id
= i
;
2941 q_params
.queue_id
= txq_index
;
2942 q_params
.vport_id
= 0;
2943 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
2944 q_params
.sb_idx
= TX_PI(tc
);
2946 rc
= edev
->ops
->q_tx_start(cdev
, &q_params
,
2947 txq
->tx_pbl
.pbl
.p_phys_table
,
2948 txq
->tx_pbl
.page_cnt
,
2949 &txq
->doorbell_addr
);
2951 DP_ERR(edev
, "Start TXQ #%d failed %d\n",
2957 &fp
->sb_info
->sb_virt
->pi_array
[TX_PI(tc
)];
2958 SET_FIELD(txq
->tx_db
.data
.params
,
2959 ETH_DB_DATA_DEST
, DB_DEST_XCM
);
2960 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
,
2962 SET_FIELD(txq
->tx_db
.data
.params
,
2963 ETH_DB_DATA_AGG_VAL_SEL
,
2964 DQ_XCM_ETH_TX_BD_PROD_CMD
);
2966 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
2970 /* Prepare and send the vport enable */
2971 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
2972 vport_update_params
.vport_id
= start
.vport_id
;
2973 vport_update_params
.update_vport_active_flg
= 1;
2974 vport_update_params
.vport_active_flg
= 1;
2976 /* Fill struct with RSS params */
2977 if (QEDE_RSS_CNT(edev
) > 1) {
2978 vport_update_params
.update_rss_flg
= 1;
2979 for (i
= 0; i
< 128; i
++)
2980 rss_params
->rss_ind_table
[i
] =
2981 ethtool_rxfh_indir_default(i
, QEDE_RSS_CNT(edev
));
2982 netdev_rss_key_fill(rss_params
->rss_key
,
2983 sizeof(rss_params
->rss_key
));
2985 memset(rss_params
, 0, sizeof(*rss_params
));
2987 memcpy(&vport_update_params
.rss_params
, rss_params
,
2988 sizeof(*rss_params
));
2990 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
2992 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
2999 static int qede_set_mcast_rx_mac(struct qede_dev
*edev
,
3000 enum qed_filter_xcast_params_type opcode
,
3001 unsigned char *mac
, int num_macs
)
3003 struct qed_filter_params filter_cmd
;
3006 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
3007 filter_cmd
.type
= QED_FILTER_TYPE_MCAST
;
3008 filter_cmd
.filter
.mcast
.type
= opcode
;
3009 filter_cmd
.filter
.mcast
.num
= num_macs
;
3011 for (i
= 0; i
< num_macs
; i
++, mac
+= ETH_ALEN
)
3012 ether_addr_copy(filter_cmd
.filter
.mcast
.mac
[i
], mac
);
3014 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
3017 enum qede_unload_mode
{
3021 static void qede_unload(struct qede_dev
*edev
, enum qede_unload_mode mode
)
3023 struct qed_link_params link_params
;
3026 DP_INFO(edev
, "Starting qede unload\n");
3028 mutex_lock(&edev
->qede_lock
);
3029 edev
->state
= QEDE_STATE_CLOSED
;
3032 netif_tx_disable(edev
->ndev
);
3033 netif_carrier_off(edev
->ndev
);
3035 /* Reset the link */
3036 memset(&link_params
, 0, sizeof(link_params
));
3037 link_params
.link_up
= false;
3038 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
3039 rc
= qede_stop_queues(edev
);
3041 qede_sync_free_irqs(edev
);
3045 DP_INFO(edev
, "Stopped Queues\n");
3047 qede_vlan_mark_nonconfigured(edev
);
3048 edev
->ops
->fastpath_stop(edev
->cdev
);
3050 /* Release the interrupts */
3051 qede_sync_free_irqs(edev
);
3052 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
3054 qede_napi_disable_remove(edev
);
3056 qede_free_mem_load(edev
);
3057 qede_free_fp_array(edev
);
3060 mutex_unlock(&edev
->qede_lock
);
3061 DP_INFO(edev
, "Ending qede unload\n");
3064 enum qede_load_mode
{
3068 static int qede_load(struct qede_dev
*edev
, enum qede_load_mode mode
)
3070 struct qed_link_params link_params
;
3071 struct qed_link_output link_output
;
3074 DP_INFO(edev
, "Starting qede load\n");
3076 rc
= qede_set_num_queues(edev
);
3080 rc
= qede_alloc_fp_array(edev
);
3086 rc
= qede_alloc_mem_load(edev
);
3089 DP_INFO(edev
, "Allocated %d RSS queues on %d TC/s\n",
3090 QEDE_RSS_CNT(edev
), edev
->num_tc
);
3092 rc
= qede_set_real_num_queues(edev
);
3096 qede_napi_add_enable(edev
);
3097 DP_INFO(edev
, "Napi added and enabled\n");
3099 rc
= qede_setup_irqs(edev
);
3102 DP_INFO(edev
, "Setup IRQs succeeded\n");
3104 rc
= qede_start_queues(edev
);
3107 DP_INFO(edev
, "Start VPORT, RXQ and TXQ succeeded\n");
3109 /* Add primary mac and set Rx filters */
3110 ether_addr_copy(edev
->primary_mac
, edev
->ndev
->dev_addr
);
3112 mutex_lock(&edev
->qede_lock
);
3113 edev
->state
= QEDE_STATE_OPEN
;
3114 mutex_unlock(&edev
->qede_lock
);
3116 /* Program un-configured VLANs */
3117 qede_configure_vlan_filters(edev
);
3119 /* Ask for link-up using current configuration */
3120 memset(&link_params
, 0, sizeof(link_params
));
3121 link_params
.link_up
= true;
3122 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
3124 /* Query whether link is already-up */
3125 memset(&link_output
, 0, sizeof(link_output
));
3126 edev
->ops
->common
->get_link(edev
->cdev
, &link_output
);
3127 qede_link_update(edev
, &link_output
);
3129 DP_INFO(edev
, "Ending successfully qede load\n");
3134 qede_sync_free_irqs(edev
);
3135 memset(&edev
->int_info
.msix_cnt
, 0, sizeof(struct qed_int_info
));
3137 qede_napi_disable_remove(edev
);
3139 qede_free_mem_load(edev
);
3141 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
3142 qede_free_fp_array(edev
);
3148 void qede_reload(struct qede_dev
*edev
,
3149 void (*func
)(struct qede_dev
*, union qede_reload_args
*),
3150 union qede_reload_args
*args
)
3152 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
3153 /* Call function handler to update parameters
3154 * needed for function load.
3159 qede_load(edev
, QEDE_LOAD_NORMAL
);
3161 mutex_lock(&edev
->qede_lock
);
3162 qede_config_rx_mode(edev
->ndev
);
3163 mutex_unlock(&edev
->qede_lock
);
3166 /* called with rtnl_lock */
3167 static int qede_open(struct net_device
*ndev
)
3169 struct qede_dev
*edev
= netdev_priv(ndev
);
3171 netif_carrier_off(ndev
);
3173 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D0
);
3175 return qede_load(edev
, QEDE_LOAD_NORMAL
);
3178 static int qede_close(struct net_device
*ndev
)
3180 struct qede_dev
*edev
= netdev_priv(ndev
);
3182 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
3187 static void qede_link_update(void *dev
, struct qed_link_output
*link
)
3189 struct qede_dev
*edev
= dev
;
3191 if (!netif_running(edev
->ndev
)) {
3192 DP_VERBOSE(edev
, NETIF_MSG_LINK
, "Interface is not running\n");
3196 if (link
->link_up
) {
3197 if (!netif_carrier_ok(edev
->ndev
)) {
3198 DP_NOTICE(edev
, "Link is up\n");
3199 netif_tx_start_all_queues(edev
->ndev
);
3200 netif_carrier_on(edev
->ndev
);
3203 if (netif_carrier_ok(edev
->ndev
)) {
3204 DP_NOTICE(edev
, "Link is down\n");
3205 netif_tx_disable(edev
->ndev
);
3206 netif_carrier_off(edev
->ndev
);
3211 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
)
3213 struct qede_dev
*edev
= netdev_priv(ndev
);
3214 struct sockaddr
*addr
= p
;
3217 ASSERT_RTNL(); /* @@@TBD To be removed */
3219 DP_INFO(edev
, "Set_mac_addr called\n");
3221 if (!is_valid_ether_addr(addr
->sa_data
)) {
3222 DP_NOTICE(edev
, "The MAC address is not valid\n");
3226 ether_addr_copy(ndev
->dev_addr
, addr
->sa_data
);
3228 if (!netif_running(ndev
)) {
3229 DP_NOTICE(edev
, "The device is currently down\n");
3233 /* Remove the previous primary mac */
3234 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
3239 /* Add MAC filter according to the new unicast HW MAC address */
3240 ether_addr_copy(edev
->primary_mac
, ndev
->dev_addr
);
3241 return qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
3246 qede_configure_mcast_filtering(struct net_device
*ndev
,
3247 enum qed_filter_rx_mode_type
*accept_flags
)
3249 struct qede_dev
*edev
= netdev_priv(ndev
);
3250 unsigned char *mc_macs
, *temp
;
3251 struct netdev_hw_addr
*ha
;
3252 int rc
= 0, mc_count
;
3255 size
= 64 * ETH_ALEN
;
3257 mc_macs
= kzalloc(size
, GFP_KERNEL
);
3260 "Failed to allocate memory for multicast MACs\n");
3267 /* Remove all previously configured MAC filters */
3268 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
3273 netif_addr_lock_bh(ndev
);
3275 mc_count
= netdev_mc_count(ndev
);
3276 if (mc_count
< 64) {
3277 netdev_for_each_mc_addr(ha
, ndev
) {
3278 ether_addr_copy(temp
, ha
->addr
);
3283 netif_addr_unlock_bh(ndev
);
3285 /* Check for all multicast @@@TBD resource allocation */
3286 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3288 if (*accept_flags
== QED_FILTER_RX_MODE_TYPE_REGULAR
)
3289 *accept_flags
= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
3291 /* Add all multicast MAC filters */
3292 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
3301 static void qede_set_rx_mode(struct net_device
*ndev
)
3303 struct qede_dev
*edev
= netdev_priv(ndev
);
3305 DP_INFO(edev
, "qede_set_rx_mode called\n");
3307 if (edev
->state
!= QEDE_STATE_OPEN
) {
3309 "qede_set_rx_mode called while interface is down\n");
3311 set_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
);
3312 schedule_delayed_work(&edev
->sp_task
, 0);
3316 /* Must be called with qede_lock held */
3317 static void qede_config_rx_mode(struct net_device
*ndev
)
3319 enum qed_filter_rx_mode_type accept_flags
= QED_FILTER_TYPE_UCAST
;
3320 struct qede_dev
*edev
= netdev_priv(ndev
);
3321 struct qed_filter_params rx_mode
;
3322 unsigned char *uc_macs
, *temp
;
3323 struct netdev_hw_addr
*ha
;
3327 netif_addr_lock_bh(ndev
);
3329 uc_count
= netdev_uc_count(ndev
);
3330 size
= uc_count
* ETH_ALEN
;
3332 uc_macs
= kzalloc(size
, GFP_ATOMIC
);
3334 DP_NOTICE(edev
, "Failed to allocate memory for unicast MACs\n");
3335 netif_addr_unlock_bh(ndev
);
3340 netdev_for_each_uc_addr(ha
, ndev
) {
3341 ether_addr_copy(temp
, ha
->addr
);
3345 netif_addr_unlock_bh(ndev
);
3347 /* Configure the struct for the Rx mode */
3348 memset(&rx_mode
, 0, sizeof(struct qed_filter_params
));
3349 rx_mode
.type
= QED_FILTER_TYPE_RX_MODE
;
3351 /* Remove all previous unicast secondary macs and multicast macs
3352 * (configrue / leave the primary mac)
3354 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_REPLACE
,
3359 /* Check for promiscuous */
3360 if ((ndev
->flags
& IFF_PROMISC
) ||
3361 (uc_count
> 15)) { /* @@@TBD resource allocation - 1 */
3362 accept_flags
= QED_FILTER_RX_MODE_TYPE_PROMISC
;
3364 /* Add MAC filters according to the unicast secondary macs */
3368 for (i
= 0; i
< uc_count
; i
++) {
3369 rc
= qede_set_ucast_rx_mac(edev
,
3370 QED_FILTER_XCAST_TYPE_ADD
,
3378 rc
= qede_configure_mcast_filtering(ndev
, &accept_flags
);
3383 /* take care of VLAN mode */
3384 if (ndev
->flags
& IFF_PROMISC
) {
3385 qede_config_accept_any_vlan(edev
, true);
3386 } else if (!edev
->non_configured_vlans
) {
3387 /* It's possible that accept_any_vlan mode is set due to a
3388 * previous setting of IFF_PROMISC. If vlan credits are
3389 * sufficient, disable accept_any_vlan.
3391 qede_config_accept_any_vlan(edev
, false);
3394 rx_mode
.filter
.accept_flags
= accept_flags
;
3395 edev
->ops
->filter_config(edev
->cdev
, &rx_mode
);