1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/netdevice.h>
9 #include <linux/u64_stats_sync.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/smp.h>
19 #include <asm/byteorder.h>
21 #include <linux/tcp.h>
22 #include <linux/sctp.h>
23 #include <linux/ipv6.h>
25 #include <net/checksum.h>
26 #include <net/ip6_checksum.h>
28 #include "hinic_common.h"
29 #include "hinic_hw_if.h"
30 #include "hinic_hw_wqe.h"
31 #include "hinic_hw_wq.h"
32 #include "hinic_hw_qp.h"
33 #include "hinic_hw_dev.h"
34 #include "hinic_dev.h"
37 #define TX_IRQ_NO_PENDING 0
38 #define TX_IRQ_NO_COALESC 0
39 #define TX_IRQ_NO_LLI_TIMER 0
40 #define TX_IRQ_NO_CREDIT 0
41 #define TX_IRQ_NO_RESEND_TIMER 0
43 #define CI_UPDATE_NO_PENDING 0
44 #define CI_UPDATE_NO_COALESC 0
46 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
48 #define MIN_SKB_LEN 32
50 #define MAX_PAYLOAD_OFFSET 221
51 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
65 enum hinic_offload_type
{
66 TX_OFFLOAD_TSO
= BIT(0),
67 TX_OFFLOAD_CSUM
= BIT(1),
68 TX_OFFLOAD_VLAN
= BIT(2),
69 TX_OFFLOAD_INVALID
= BIT(3),
73 * hinic_txq_clean_stats - Clean the statistics of specific queue
74 * @txq: Logical Tx Queue
76 void hinic_txq_clean_stats(struct hinic_txq
*txq
)
78 struct hinic_txq_stats
*txq_stats
= &txq
->txq_stats
;
80 u64_stats_update_begin(&txq_stats
->syncp
);
83 txq_stats
->tx_busy
= 0;
84 txq_stats
->tx_wake
= 0;
85 txq_stats
->tx_dropped
= 0;
86 txq_stats
->big_frags_pkts
= 0;
87 u64_stats_update_end(&txq_stats
->syncp
);
91 * hinic_txq_get_stats - get statistics of Tx Queue
92 * @txq: Logical Tx Queue
93 * @stats: return updated stats here
95 void hinic_txq_get_stats(struct hinic_txq
*txq
, struct hinic_txq_stats
*stats
)
97 struct hinic_txq_stats
*txq_stats
= &txq
->txq_stats
;
100 u64_stats_update_begin(&stats
->syncp
);
102 start
= u64_stats_fetch_begin(&txq_stats
->syncp
);
103 stats
->pkts
= txq_stats
->pkts
;
104 stats
->bytes
= txq_stats
->bytes
;
105 stats
->tx_busy
= txq_stats
->tx_busy
;
106 stats
->tx_wake
= txq_stats
->tx_wake
;
107 stats
->tx_dropped
= txq_stats
->tx_dropped
;
108 stats
->big_frags_pkts
= txq_stats
->big_frags_pkts
;
109 } while (u64_stats_fetch_retry(&txq_stats
->syncp
, start
));
110 u64_stats_update_end(&stats
->syncp
);
114 * txq_stats_init - Initialize the statistics of specific queue
115 * @txq: Logical Tx Queue
117 static void txq_stats_init(struct hinic_txq
*txq
)
119 struct hinic_txq_stats
*txq_stats
= &txq
->txq_stats
;
121 u64_stats_init(&txq_stats
->syncp
);
122 hinic_txq_clean_stats(txq
);
126 * tx_map_skb - dma mapping for skb and return sges
127 * @nic_dev: nic device
129 * @sges: returned sges
131 * Return 0 - Success, negative - Failure
133 static int tx_map_skb(struct hinic_dev
*nic_dev
, struct sk_buff
*skb
,
134 struct hinic_sge
*sges
)
136 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
137 struct hinic_hwif
*hwif
= hwdev
->hwif
;
138 struct pci_dev
*pdev
= hwif
->pdev
;
143 dma_addr
= dma_map_single(&pdev
->dev
, skb
->data
, skb_headlen(skb
),
145 if (dma_mapping_error(&pdev
->dev
, dma_addr
)) {
146 dev_err(&pdev
->dev
, "Failed to map Tx skb data\n");
150 hinic_set_sge(&sges
[0], dma_addr
, skb_headlen(skb
));
152 for (i
= 0 ; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
153 frag
= &skb_shinfo(skb
)->frags
[i
];
155 dma_addr
= skb_frag_dma_map(&pdev
->dev
, frag
, 0,
158 if (dma_mapping_error(&pdev
->dev
, dma_addr
)) {
159 dev_err(&pdev
->dev
, "Failed to map Tx skb frag\n");
163 hinic_set_sge(&sges
[i
+ 1], dma_addr
, skb_frag_size(frag
));
169 for (j
= 0; j
< i
; j
++)
170 dma_unmap_page(&pdev
->dev
, hinic_sge_to_dma(&sges
[j
+ 1]),
171 sges
[j
+ 1].len
, DMA_TO_DEVICE
);
173 dma_unmap_single(&pdev
->dev
, hinic_sge_to_dma(&sges
[0]), sges
[0].len
,
179 * tx_unmap_skb - unmap the dma address of the skb
180 * @nic_dev: nic device
182 * @sges: the sges that are connected to the skb
184 static void tx_unmap_skb(struct hinic_dev
*nic_dev
, struct sk_buff
*skb
,
185 struct hinic_sge
*sges
)
187 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
188 struct hinic_hwif
*hwif
= hwdev
->hwif
;
189 struct pci_dev
*pdev
= hwif
->pdev
;
192 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
193 dma_unmap_page(&pdev
->dev
, hinic_sge_to_dma(&sges
[i
+ 1]),
194 sges
[i
+ 1].len
, DMA_TO_DEVICE
);
196 dma_unmap_single(&pdev
->dev
, hinic_sge_to_dma(&sges
[0]), sges
[0].len
,
200 static void get_inner_l3_l4_type(struct sk_buff
*skb
, union hinic_l3
*ip
,
202 enum hinic_offload_type offload_type
,
203 enum hinic_l3_offload_type
*l3_type
,
208 if (ip
->v4
->version
== 4) {
209 *l3_type
= (offload_type
== TX_OFFLOAD_CSUM
) ?
210 IPV4_PKT_NO_CHKSUM_OFFLOAD
:
211 IPV4_PKT_WITH_CHKSUM_OFFLOAD
;
212 *l4_proto
= ip
->v4
->protocol
;
213 } else if (ip
->v4
->version
== 6) {
215 exthdr
= ip
->hdr
+ sizeof(*ip
->v6
);
216 *l4_proto
= ip
->v6
->nexthdr
;
217 if (exthdr
!= l4
->hdr
) {
218 int start
= exthdr
- skb
->data
;
221 ipv6_skip_exthdr(skb
, start
, l4_proto
, &frag_off
);
224 *l3_type
= L3TYPE_UNKNOWN
;
229 static void get_inner_l4_info(struct sk_buff
*skb
, union hinic_l4
*l4
,
230 enum hinic_offload_type offload_type
, u8 l4_proto
,
231 enum hinic_l4_offload_type
*l4_offload
,
232 u32
*l4_len
, u32
*offset
)
234 *l4_offload
= OFFLOAD_DISABLE
;
240 *l4_offload
= TCP_OFFLOAD_ENABLE
;
241 /* doff in unit of 4B */
242 *l4_len
= l4
->tcp
->doff
* 4;
243 *offset
= *l4_len
+ TRANSPORT_OFFSET(l4
->hdr
, skb
);
247 *l4_offload
= UDP_OFFLOAD_ENABLE
;
248 *l4_len
= sizeof(struct udphdr
);
249 *offset
= TRANSPORT_OFFSET(l4
->hdr
, skb
);
253 /* only csum offload support sctp */
254 if (offload_type
!= TX_OFFLOAD_CSUM
)
257 *l4_offload
= SCTP_OFFLOAD_ENABLE
;
258 *l4_len
= sizeof(struct sctphdr
);
259 *offset
= TRANSPORT_OFFSET(l4
->hdr
, skb
);
267 static __sum16
csum_magic(union hinic_l3
*ip
, unsigned short proto
)
269 return (ip
->v4
->version
== 4) ?
270 csum_tcpudp_magic(ip
->v4
->saddr
, ip
->v4
->daddr
, 0, proto
, 0) :
271 csum_ipv6_magic(&ip
->v6
->saddr
, &ip
->v6
->daddr
, 0, proto
, 0);
274 static int offload_tso(struct hinic_sq_task
*task
, u32
*queue_info
,
277 u32 offset
, l4_len
, ip_identify
, network_hdr_len
;
278 enum hinic_l3_offload_type l3_offload
;
279 enum hinic_l4_offload_type l4_offload
;
284 if (!skb_is_gso(skb
))
287 if (skb_cow_head(skb
, 0) < 0)
288 return -EPROTONOSUPPORT
;
290 if (skb
->encapsulation
) {
291 u32 gso_type
= skb_shinfo(skb
)->gso_type
;
295 ip
.hdr
= skb_network_header(skb
);
296 l4
.hdr
= skb_transport_header(skb
);
297 network_hdr_len
= skb_inner_network_header_len(skb
);
299 if (ip
.v4
->version
== 4) {
301 l3_offload
= IPV4_PKT_WITH_CHKSUM_OFFLOAD
;
302 } else if (ip
.v4
->version
== 6) {
303 l3_offload
= IPV6_PKT
;
308 hinic_task_set_outter_l3(task
, l3_offload
,
309 skb_network_header_len(skb
));
311 if (gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
) {
312 l4
.udp
->check
= ~csum_magic(&ip
, IPPROTO_UDP
);
313 tunnel_type
= TUNNEL_UDP_CSUM
;
314 } else if (gso_type
& SKB_GSO_UDP_TUNNEL
) {
315 tunnel_type
= TUNNEL_UDP_NO_CSUM
;
318 l4_tunnel_len
= skb_inner_network_offset(skb
) -
319 skb_transport_offset(skb
);
320 hinic_task_set_tunnel_l4(task
, tunnel_type
, l4_tunnel_len
);
322 ip
.hdr
= skb_inner_network_header(skb
);
323 l4
.hdr
= skb_inner_transport_header(skb
);
325 ip
.hdr
= skb_network_header(skb
);
326 l4
.hdr
= skb_transport_header(skb
);
327 network_hdr_len
= skb_network_header_len(skb
);
330 /* initialize inner IP header fields */
331 if (ip
.v4
->version
== 4)
334 ip
.v6
->payload_len
= 0;
336 get_inner_l3_l4_type(skb
, &ip
, &l4
, TX_OFFLOAD_TSO
, &l3_offload
,
339 hinic_task_set_inner_l3(task
, l3_offload
, network_hdr_len
);
342 if (l4_proto
== IPPROTO_TCP
)
343 l4
.tcp
->check
= ~csum_magic(&ip
, IPPROTO_TCP
);
345 get_inner_l4_info(skb
, &l4
, TX_OFFLOAD_TSO
, l4_proto
, &l4_offload
,
348 hinic_set_tso_inner_l4(task
, queue_info
, l4_offload
, l4_len
, offset
,
349 ip_identify
, skb_shinfo(skb
)->gso_size
);
354 static int offload_csum(struct hinic_sq_task
*task
, u32
*queue_info
,
357 enum hinic_l4_offload_type l4_offload
;
358 u32 offset
, l4_len
, network_hdr_len
;
359 enum hinic_l3_offload_type l3_type
;
360 u32 tunnel_type
= NOT_TUNNEL
;
365 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
368 if (skb
->encapsulation
) {
371 tunnel_type
= TUNNEL_UDP_NO_CSUM
;
372 ip
.hdr
= skb_network_header(skb
);
374 if (ip
.v4
->version
== 4) {
375 l3_type
= IPV4_PKT_NO_CHKSUM_OFFLOAD
;
376 l4_proto
= ip
.v4
->protocol
;
377 } else if (ip
.v4
->version
== 6) {
378 unsigned char *exthdr
;
381 tunnel_type
= TUNNEL_UDP_CSUM
;
382 exthdr
= ip
.hdr
+ sizeof(*ip
.v6
);
383 l4_proto
= ip
.v6
->nexthdr
;
384 l4
.hdr
= skb_transport_header(skb
);
385 if (l4
.hdr
!= exthdr
)
386 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
387 &l4_proto
, &frag_off
);
389 l3_type
= L3TYPE_UNKNOWN
;
390 l4_proto
= IPPROTO_RAW
;
393 hinic_task_set_outter_l3(task
, l3_type
,
394 skb_network_header_len(skb
));
398 l4_tunnel_len
= skb_inner_network_offset(skb
) -
399 skb_transport_offset(skb
);
400 ip
.hdr
= skb_inner_network_header(skb
);
401 l4
.hdr
= skb_inner_transport_header(skb
);
402 network_hdr_len
= skb_inner_network_header_len(skb
);
406 tunnel_type
= NOT_TUNNEL
;
409 ip
.hdr
= skb_inner_network_header(skb
);
410 l4
.hdr
= skb_transport_header(skb
);
411 network_hdr_len
= skb_network_header_len(skb
);
414 /* Unsupported tunnel packet, disable csum offload */
415 skb_checksum_help(skb
);
419 hinic_task_set_tunnel_l4(task
, tunnel_type
, l4_tunnel_len
);
421 ip
.hdr
= skb_network_header(skb
);
422 l4
.hdr
= skb_transport_header(skb
);
423 network_hdr_len
= skb_network_header_len(skb
);
426 get_inner_l3_l4_type(skb
, &ip
, &l4
, TX_OFFLOAD_CSUM
, &l3_type
,
429 hinic_task_set_inner_l3(task
, l3_type
, network_hdr_len
);
431 get_inner_l4_info(skb
, &l4
, TX_OFFLOAD_CSUM
, l4_proto
, &l4_offload
,
434 hinic_set_cs_inner_l4(task
, queue_info
, l4_offload
, l4_len
, offset
);
439 static void offload_vlan(struct hinic_sq_task
*task
, u32
*queue_info
,
440 u16 vlan_tag
, u16 vlan_pri
)
442 task
->pkt_info0
|= HINIC_SQ_TASK_INFO0_SET(vlan_tag
, VLAN_TAG
) |
443 HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD
);
445 *queue_info
|= HINIC_SQ_CTRL_SET(vlan_pri
, QUEUE_INFO_PRI
);
448 static int hinic_tx_offload(struct sk_buff
*skb
, struct hinic_sq_task
*task
,
451 enum hinic_offload_type offload
= 0;
455 enabled
= offload_tso(task
, queue_info
, skb
);
457 offload
|= TX_OFFLOAD_TSO
;
458 } else if (enabled
== 0) {
459 enabled
= offload_csum(task
, queue_info
, skb
);
461 offload
|= TX_OFFLOAD_CSUM
;
463 return -EPROTONOSUPPORT
;
466 if (unlikely(skb_vlan_tag_present(skb
))) {
467 vlan_tag
= skb_vlan_tag_get(skb
);
468 offload_vlan(task
, queue_info
, vlan_tag
,
469 vlan_tag
>> VLAN_PRIO_SHIFT
);
470 offload
|= TX_OFFLOAD_VLAN
;
474 hinic_task_set_l2hdr(task
, skb_network_offset(skb
));
476 /* payload offset should not more than 221 */
477 if (HINIC_SQ_CTRL_GET(*queue_info
, QUEUE_INFO_PLDOFF
) >
478 MAX_PAYLOAD_OFFSET
) {
479 return -EPROTONOSUPPORT
;
482 /* mss should not less than 80 */
483 if (HINIC_SQ_CTRL_GET(*queue_info
, QUEUE_INFO_MSS
) < HINIC_MSS_MIN
) {
484 *queue_info
= HINIC_SQ_CTRL_CLEAR(*queue_info
, QUEUE_INFO_MSS
);
485 *queue_info
|= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN
, QUEUE_INFO_MSS
);
491 netdev_tx_t
hinic_lb_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
493 struct hinic_dev
*nic_dev
= netdev_priv(netdev
);
494 u16 prod_idx
, q_id
= skb
->queue_mapping
;
495 struct netdev_queue
*netdev_txq
;
496 int nr_sges
, err
= NETDEV_TX_OK
;
497 struct hinic_sq_wqe
*sq_wqe
;
498 unsigned int wqe_size
;
499 struct hinic_txq
*txq
;
502 txq
= &nic_dev
->txqs
[q_id
];
503 qp
= container_of(txq
->sq
, struct hinic_qp
, sq
);
504 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
506 err
= tx_map_skb(nic_dev
, skb
, txq
->sges
);
510 wqe_size
= HINIC_SQ_WQE_SIZE(nr_sges
);
512 sq_wqe
= hinic_sq_get_wqe(txq
->sq
, wqe_size
, &prod_idx
);
514 netif_stop_subqueue(netdev
, qp
->q_id
);
516 sq_wqe
= hinic_sq_get_wqe(txq
->sq
, wqe_size
, &prod_idx
);
518 netif_wake_subqueue(nic_dev
->netdev
, qp
->q_id
);
522 tx_unmap_skb(nic_dev
, skb
, txq
->sges
);
524 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
525 txq
->txq_stats
.tx_busy
++;
526 u64_stats_update_end(&txq
->txq_stats
.syncp
);
527 err
= NETDEV_TX_BUSY
;
533 hinic_sq_prepare_wqe(txq
->sq
, prod_idx
, sq_wqe
, txq
->sges
, nr_sges
);
534 hinic_sq_write_wqe(txq
->sq
, prod_idx
, sq_wqe
, skb
, wqe_size
);
537 netdev_txq
= netdev_get_tx_queue(netdev
, q_id
);
538 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq
)))
539 hinic_sq_write_db(txq
->sq
, prod_idx
, wqe_size
, 0);
544 dev_kfree_skb_any(skb
);
545 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
546 txq
->txq_stats
.tx_dropped
++;
547 u64_stats_update_end(&txq
->txq_stats
.syncp
);
552 netdev_tx_t
hinic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
554 struct hinic_dev
*nic_dev
= netdev_priv(netdev
);
555 u16 prod_idx
, q_id
= skb
->queue_mapping
;
556 struct netdev_queue
*netdev_txq
;
557 int nr_sges
, err
= NETDEV_TX_OK
;
558 struct hinic_sq_wqe
*sq_wqe
;
559 unsigned int wqe_size
;
560 struct hinic_txq
*txq
;
563 txq
= &nic_dev
->txqs
[q_id
];
564 qp
= container_of(txq
->sq
, struct hinic_qp
, sq
);
566 if (skb
->len
< MIN_SKB_LEN
) {
567 if (skb_pad(skb
, MIN_SKB_LEN
- skb
->len
)) {
568 netdev_err(netdev
, "Failed to pad skb\n");
569 goto update_error_stats
;
572 skb
->len
= MIN_SKB_LEN
;
575 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
577 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
578 txq
->txq_stats
.big_frags_pkts
++;
579 u64_stats_update_end(&txq
->txq_stats
.syncp
);
582 if (nr_sges
> txq
->max_sges
) {
583 netdev_err(netdev
, "Too many Tx sges\n");
587 err
= tx_map_skb(nic_dev
, skb
, txq
->sges
);
591 wqe_size
= HINIC_SQ_WQE_SIZE(nr_sges
);
593 sq_wqe
= hinic_sq_get_wqe(txq
->sq
, wqe_size
, &prod_idx
);
595 netif_stop_subqueue(netdev
, qp
->q_id
);
597 /* Check for the case free_tx_poll is called in another cpu
598 * and we stopped the subqueue after free_tx_poll check.
600 sq_wqe
= hinic_sq_get_wqe(txq
->sq
, wqe_size
, &prod_idx
);
602 netif_wake_subqueue(nic_dev
->netdev
, qp
->q_id
);
606 tx_unmap_skb(nic_dev
, skb
, txq
->sges
);
608 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
609 txq
->txq_stats
.tx_busy
++;
610 u64_stats_update_end(&txq
->txq_stats
.syncp
);
611 err
= NETDEV_TX_BUSY
;
617 hinic_sq_prepare_wqe(txq
->sq
, prod_idx
, sq_wqe
, txq
->sges
, nr_sges
);
619 err
= hinic_tx_offload(skb
, &sq_wqe
->task
, &sq_wqe
->ctrl
.queue_info
);
623 hinic_sq_write_wqe(txq
->sq
, prod_idx
, sq_wqe
, skb
, wqe_size
);
626 netdev_txq
= netdev_get_tx_queue(netdev
, q_id
);
627 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq
)))
628 hinic_sq_write_db(txq
->sq
, prod_idx
, wqe_size
, 0);
633 hinic_sq_return_wqe(txq
->sq
, wqe_size
);
634 tx_unmap_skb(nic_dev
, skb
, txq
->sges
);
637 dev_kfree_skb_any(skb
);
640 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
641 txq
->txq_stats
.tx_dropped
++;
642 u64_stats_update_end(&txq
->txq_stats
.syncp
);
648 * tx_free_skb - unmap and free skb
649 * @nic_dev: nic device
651 * @sges: the sges that are connected to the skb
653 static void tx_free_skb(struct hinic_dev
*nic_dev
, struct sk_buff
*skb
,
654 struct hinic_sge
*sges
)
656 tx_unmap_skb(nic_dev
, skb
, sges
);
658 dev_kfree_skb_any(skb
);
662 * free_all_rx_skbs - free all skbs in tx queue
665 static void free_all_tx_skbs(struct hinic_txq
*txq
)
667 struct hinic_dev
*nic_dev
= netdev_priv(txq
->netdev
);
668 struct hinic_sq
*sq
= txq
->sq
;
669 struct hinic_sq_wqe
*sq_wqe
;
670 unsigned int wqe_size
;
675 while ((sq_wqe
= hinic_sq_read_wqebb(sq
, &skb
, &wqe_size
, &ci
))) {
676 sq_wqe
= hinic_sq_read_wqe(sq
, &skb
, wqe_size
, &ci
);
680 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
682 hinic_sq_get_sges(sq_wqe
, txq
->free_sges
, nr_sges
);
684 hinic_sq_put_wqe(sq
, wqe_size
);
686 tx_free_skb(nic_dev
, skb
, txq
->free_sges
);
691 * free_tx_poll - free finished tx skbs in tx queue that connected to napi
693 * @budget: number of tx
695 * Return 0 - Success, negative - Failure
697 static int free_tx_poll(struct napi_struct
*napi
, int budget
)
699 struct hinic_txq
*txq
= container_of(napi
, struct hinic_txq
, napi
);
700 struct hinic_qp
*qp
= container_of(txq
->sq
, struct hinic_qp
, sq
);
701 struct hinic_dev
*nic_dev
= netdev_priv(txq
->netdev
);
702 struct netdev_queue
*netdev_txq
;
703 struct hinic_sq
*sq
= txq
->sq
;
704 struct hinic_wq
*wq
= sq
->wq
;
705 struct hinic_sq_wqe
*sq_wqe
;
706 unsigned int wqe_size
;
707 int nr_sges
, pkts
= 0;
713 hw_ci
= HW_CONS_IDX(sq
) & wq
->mask
;
717 /* Reading a WQEBB to get real WQE size and consumer index. */
718 sq_wqe
= hinic_sq_read_wqebb(sq
, &skb
, &wqe_size
, &sw_ci
);
720 (((hw_ci
- sw_ci
) & wq
->mask
) * wq
->wqebb_size
< wqe_size
))
723 /* If this WQE have multiple WQEBBs, we will read again to get
726 if (wqe_size
> wq
->wqebb_size
) {
727 sq_wqe
= hinic_sq_read_wqe(sq
, &skb
, wqe_size
, &sw_ci
);
728 if (unlikely(!sq_wqe
))
732 tx_bytes
+= skb
->len
;
735 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
737 hinic_sq_get_sges(sq_wqe
, txq
->free_sges
, nr_sges
);
739 hinic_sq_put_wqe(sq
, wqe_size
);
741 tx_free_skb(nic_dev
, skb
, txq
->free_sges
);
742 } while (pkts
< budget
);
744 if (__netif_subqueue_stopped(nic_dev
->netdev
, qp
->q_id
) &&
745 hinic_get_sq_free_wqebbs(sq
) >= HINIC_MIN_TX_NUM_WQEBBS(sq
)) {
746 netdev_txq
= netdev_get_tx_queue(txq
->netdev
, qp
->q_id
);
748 __netif_tx_lock(netdev_txq
, smp_processor_id());
749 if (!netif_testing(nic_dev
->netdev
))
750 netif_wake_subqueue(nic_dev
->netdev
, qp
->q_id
);
752 __netif_tx_unlock(netdev_txq
);
754 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
755 txq
->txq_stats
.tx_wake
++;
756 u64_stats_update_end(&txq
->txq_stats
.syncp
);
759 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
760 txq
->txq_stats
.bytes
+= tx_bytes
;
761 txq
->txq_stats
.pkts
+= pkts
;
762 u64_stats_update_end(&txq
->txq_stats
.syncp
);
766 if (!HINIC_IS_VF(nic_dev
->hwdev
->hwif
))
767 hinic_hwdev_set_msix_state(nic_dev
->hwdev
,
777 static irqreturn_t
tx_irq(int irq
, void *data
)
779 struct hinic_txq
*txq
= data
;
780 struct hinic_dev
*nic_dev
;
782 nic_dev
= netdev_priv(txq
->netdev
);
784 if (!HINIC_IS_VF(nic_dev
->hwdev
->hwif
))
785 /* Disable the interrupt until napi will be completed */
786 hinic_hwdev_set_msix_state(nic_dev
->hwdev
,
790 hinic_hwdev_msix_cnt_set(nic_dev
->hwdev
, txq
->sq
->msix_entry
);
792 napi_schedule(&txq
->napi
);
796 static int tx_request_irq(struct hinic_txq
*txq
)
798 struct hinic_dev
*nic_dev
= netdev_priv(txq
->netdev
);
799 struct hinic_msix_config interrupt_info
= {0};
800 struct hinic_intr_coal_info
*intr_coal
= NULL
;
801 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
802 struct hinic_hwif
*hwif
= hwdev
->hwif
;
803 struct pci_dev
*pdev
= hwif
->pdev
;
804 struct hinic_sq
*sq
= txq
->sq
;
808 qp
= container_of(sq
, struct hinic_qp
, sq
);
810 netif_napi_add(txq
->netdev
, &txq
->napi
, free_tx_poll
, nic_dev
->tx_weight
);
812 hinic_hwdev_msix_set(nic_dev
->hwdev
, sq
->msix_entry
,
813 TX_IRQ_NO_PENDING
, TX_IRQ_NO_COALESC
,
814 TX_IRQ_NO_LLI_TIMER
, TX_IRQ_NO_CREDIT
,
815 TX_IRQ_NO_RESEND_TIMER
);
817 intr_coal
= &nic_dev
->tx_intr_coalesce
[qp
->q_id
];
818 interrupt_info
.msix_index
= sq
->msix_entry
;
819 interrupt_info
.coalesce_timer_cnt
= intr_coal
->coalesce_timer_cfg
;
820 interrupt_info
.pending_cnt
= intr_coal
->pending_limt
;
821 interrupt_info
.resend_timer_cnt
= intr_coal
->resend_timer_cfg
;
823 err
= hinic_set_interrupt_cfg(hwdev
, &interrupt_info
);
825 netif_err(nic_dev
, drv
, txq
->netdev
,
826 "Failed to set TX interrupt coalescing attribute\n");
827 netif_napi_del(&txq
->napi
);
831 err
= request_irq(sq
->irq
, tx_irq
, 0, txq
->irq_name
, txq
);
833 dev_err(&pdev
->dev
, "Failed to request Tx irq\n");
834 netif_napi_del(&txq
->napi
);
841 static void tx_free_irq(struct hinic_txq
*txq
)
843 struct hinic_sq
*sq
= txq
->sq
;
845 free_irq(sq
->irq
, txq
);
846 netif_napi_del(&txq
->napi
);
850 * hinic_init_txq - Initialize the Tx Queue
851 * @txq: Logical Tx Queue
852 * @sq: Hardware Tx Queue to connect the Logical queue with
853 * @netdev: network device to connect the Logical queue with
855 * Return 0 - Success, negative - Failure
857 int hinic_init_txq(struct hinic_txq
*txq
, struct hinic_sq
*sq
,
858 struct net_device
*netdev
)
860 struct hinic_qp
*qp
= container_of(sq
, struct hinic_qp
, sq
);
861 struct hinic_dev
*nic_dev
= netdev_priv(netdev
);
862 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
863 int err
, irqname_len
;
866 txq
->netdev
= netdev
;
871 txq
->max_sges
= HINIC_MAX_SQ_BUFDESCS
;
873 sges_size
= txq
->max_sges
* sizeof(*txq
->sges
);
874 txq
->sges
= devm_kzalloc(&netdev
->dev
, sges_size
, GFP_KERNEL
);
878 sges_size
= txq
->max_sges
* sizeof(*txq
->free_sges
);
879 txq
->free_sges
= devm_kzalloc(&netdev
->dev
, sges_size
, GFP_KERNEL
);
880 if (!txq
->free_sges
) {
882 goto err_alloc_free_sges
;
885 irqname_len
= snprintf(NULL
, 0, "%s_txq%d", netdev
->name
, qp
->q_id
) + 1;
886 txq
->irq_name
= devm_kzalloc(&netdev
->dev
, irqname_len
, GFP_KERNEL
);
887 if (!txq
->irq_name
) {
889 goto err_alloc_irqname
;
892 sprintf(txq
->irq_name
, "%s_txq%d", netdev
->name
, qp
->q_id
);
894 err
= hinic_hwdev_hw_ci_addr_set(hwdev
, sq
, CI_UPDATE_NO_PENDING
,
895 CI_UPDATE_NO_COALESC
);
899 err
= tx_request_irq(txq
);
901 netdev_err(netdev
, "Failed to request Tx irq\n");
909 devm_kfree(&netdev
->dev
, txq
->irq_name
);
912 devm_kfree(&netdev
->dev
, txq
->free_sges
);
915 devm_kfree(&netdev
->dev
, txq
->sges
);
920 * hinic_clean_txq - Clean the Tx Queue
921 * @txq: Logical Tx Queue
923 void hinic_clean_txq(struct hinic_txq
*txq
)
925 struct net_device
*netdev
= txq
->netdev
;
929 free_all_tx_skbs(txq
);
931 devm_kfree(&netdev
->dev
, txq
->irq_name
);
932 devm_kfree(&netdev
->dev
, txq
->free_sges
);
933 devm_kfree(&netdev
->dev
, txq
->sges
);