1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/netdevice.h>
9 #include <linux/u64_stats_sync.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/smp.h>
19 #include <asm/byteorder.h>
21 #include <linux/tcp.h>
22 #include <linux/sctp.h>
23 #include <linux/ipv6.h>
25 #include <net/checksum.h>
26 #include <net/ip6_checksum.h>
28 #include "hinic_common.h"
29 #include "hinic_hw_if.h"
30 #include "hinic_hw_wqe.h"
31 #include "hinic_hw_wq.h"
32 #include "hinic_hw_qp.h"
33 #include "hinic_hw_dev.h"
34 #include "hinic_dev.h"
37 #define TX_IRQ_NO_PENDING 0
38 #define TX_IRQ_NO_COALESC 0
39 #define TX_IRQ_NO_LLI_TIMER 0
40 #define TX_IRQ_NO_CREDIT 0
41 #define TX_IRQ_NO_RESEND_TIMER 0
43 #define CI_UPDATE_NO_PENDING 0
44 #define CI_UPDATE_NO_COALESC 0
46 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
48 #define MIN_SKB_LEN 17
50 #define MAX_PAYLOAD_OFFSET 221
51 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
65 enum hinic_offload_type
{
66 TX_OFFLOAD_TSO
= BIT(0),
67 TX_OFFLOAD_CSUM
= BIT(1),
68 TX_OFFLOAD_VLAN
= BIT(2),
69 TX_OFFLOAD_INVALID
= BIT(3),
73 * hinic_txq_clean_stats - Clean the statistics of specific queue
74 * @txq: Logical Tx Queue
76 void hinic_txq_clean_stats(struct hinic_txq
*txq
)
78 struct hinic_txq_stats
*txq_stats
= &txq
->txq_stats
;
80 u64_stats_update_begin(&txq_stats
->syncp
);
83 txq_stats
->tx_busy
= 0;
84 txq_stats
->tx_wake
= 0;
85 txq_stats
->tx_dropped
= 0;
86 txq_stats
->big_frags_pkts
= 0;
87 u64_stats_update_end(&txq_stats
->syncp
);
91 * hinic_txq_get_stats - get statistics of Tx Queue
92 * @txq: Logical Tx Queue
93 * @stats: return updated stats here
95 void hinic_txq_get_stats(struct hinic_txq
*txq
, struct hinic_txq_stats
*stats
)
97 struct hinic_txq_stats
*txq_stats
= &txq
->txq_stats
;
100 u64_stats_update_begin(&stats
->syncp
);
102 start
= u64_stats_fetch_begin(&txq_stats
->syncp
);
103 stats
->pkts
= txq_stats
->pkts
;
104 stats
->bytes
= txq_stats
->bytes
;
105 stats
->tx_busy
= txq_stats
->tx_busy
;
106 stats
->tx_wake
= txq_stats
->tx_wake
;
107 stats
->tx_dropped
= txq_stats
->tx_dropped
;
108 stats
->big_frags_pkts
= txq_stats
->big_frags_pkts
;
109 } while (u64_stats_fetch_retry(&txq_stats
->syncp
, start
));
110 u64_stats_update_end(&stats
->syncp
);
114 * txq_stats_init - Initialize the statistics of specific queue
115 * @txq: Logical Tx Queue
117 static void txq_stats_init(struct hinic_txq
*txq
)
119 struct hinic_txq_stats
*txq_stats
= &txq
->txq_stats
;
121 u64_stats_init(&txq_stats
->syncp
);
122 hinic_txq_clean_stats(txq
);
126 * tx_map_skb - dma mapping for skb and return sges
127 * @nic_dev: nic device
129 * @sges: returned sges
131 * Return 0 - Success, negative - Failure
133 static int tx_map_skb(struct hinic_dev
*nic_dev
, struct sk_buff
*skb
,
134 struct hinic_sge
*sges
)
136 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
137 struct hinic_hwif
*hwif
= hwdev
->hwif
;
138 struct pci_dev
*pdev
= hwif
->pdev
;
143 dma_addr
= dma_map_single(&pdev
->dev
, skb
->data
, skb_headlen(skb
),
145 if (dma_mapping_error(&pdev
->dev
, dma_addr
)) {
146 dev_err(&pdev
->dev
, "Failed to map Tx skb data\n");
150 hinic_set_sge(&sges
[0], dma_addr
, skb_headlen(skb
));
152 for (i
= 0 ; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
153 frag
= &skb_shinfo(skb
)->frags
[i
];
155 dma_addr
= skb_frag_dma_map(&pdev
->dev
, frag
, 0,
158 if (dma_mapping_error(&pdev
->dev
, dma_addr
)) {
159 dev_err(&pdev
->dev
, "Failed to map Tx skb frag\n");
163 hinic_set_sge(&sges
[i
+ 1], dma_addr
, skb_frag_size(frag
));
169 for (j
= 0; j
< i
; j
++)
170 dma_unmap_page(&pdev
->dev
, hinic_sge_to_dma(&sges
[j
+ 1]),
171 sges
[j
+ 1].len
, DMA_TO_DEVICE
);
173 dma_unmap_single(&pdev
->dev
, hinic_sge_to_dma(&sges
[0]), sges
[0].len
,
179 * tx_unmap_skb - unmap the dma address of the skb
180 * @nic_dev: nic device
182 * @sges: the sges that are connected to the skb
184 static void tx_unmap_skb(struct hinic_dev
*nic_dev
, struct sk_buff
*skb
,
185 struct hinic_sge
*sges
)
187 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
188 struct hinic_hwif
*hwif
= hwdev
->hwif
;
189 struct pci_dev
*pdev
= hwif
->pdev
;
192 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
193 dma_unmap_page(&pdev
->dev
, hinic_sge_to_dma(&sges
[i
+ 1]),
194 sges
[i
+ 1].len
, DMA_TO_DEVICE
);
196 dma_unmap_single(&pdev
->dev
, hinic_sge_to_dma(&sges
[0]), sges
[0].len
,
200 static void get_inner_l3_l4_type(struct sk_buff
*skb
, union hinic_l3
*ip
,
202 enum hinic_offload_type offload_type
,
203 enum hinic_l3_offload_type
*l3_type
,
208 if (ip
->v4
->version
== 4) {
209 *l3_type
= (offload_type
== TX_OFFLOAD_CSUM
) ?
210 IPV4_PKT_NO_CHKSUM_OFFLOAD
:
211 IPV4_PKT_WITH_CHKSUM_OFFLOAD
;
212 *l4_proto
= ip
->v4
->protocol
;
213 } else if (ip
->v4
->version
== 6) {
215 exthdr
= ip
->hdr
+ sizeof(*ip
->v6
);
216 *l4_proto
= ip
->v6
->nexthdr
;
217 if (exthdr
!= l4
->hdr
) {
218 int start
= exthdr
- skb
->data
;
221 ipv6_skip_exthdr(skb
, start
, l4_proto
, &frag_off
);
224 *l3_type
= L3TYPE_UNKNOWN
;
229 static void get_inner_l4_info(struct sk_buff
*skb
, union hinic_l4
*l4
,
230 enum hinic_offload_type offload_type
, u8 l4_proto
,
231 enum hinic_l4_offload_type
*l4_offload
,
232 u32
*l4_len
, u32
*offset
)
234 *l4_offload
= OFFLOAD_DISABLE
;
240 *l4_offload
= TCP_OFFLOAD_ENABLE
;
241 /* doff in unit of 4B */
242 *l4_len
= l4
->tcp
->doff
* 4;
243 *offset
= *l4_len
+ TRANSPORT_OFFSET(l4
->hdr
, skb
);
247 *l4_offload
= UDP_OFFLOAD_ENABLE
;
248 *l4_len
= sizeof(struct udphdr
);
249 *offset
= TRANSPORT_OFFSET(l4
->hdr
, skb
);
253 /* only csum offload support sctp */
254 if (offload_type
!= TX_OFFLOAD_CSUM
)
257 *l4_offload
= SCTP_OFFLOAD_ENABLE
;
258 *l4_len
= sizeof(struct sctphdr
);
259 *offset
= TRANSPORT_OFFSET(l4
->hdr
, skb
);
267 static __sum16
csum_magic(union hinic_l3
*ip
, unsigned short proto
)
269 return (ip
->v4
->version
== 4) ?
270 csum_tcpudp_magic(ip
->v4
->saddr
, ip
->v4
->daddr
, 0, proto
, 0) :
271 csum_ipv6_magic(&ip
->v6
->saddr
, &ip
->v6
->daddr
, 0, proto
, 0);
274 static int offload_tso(struct hinic_sq_task
*task
, u32
*queue_info
,
277 u32 offset
, l4_len
, ip_identify
, network_hdr_len
;
278 enum hinic_l3_offload_type l3_offload
;
279 enum hinic_l4_offload_type l4_offload
;
284 if (!skb_is_gso(skb
))
287 if (skb_cow_head(skb
, 0) < 0)
288 return -EPROTONOSUPPORT
;
290 if (skb
->encapsulation
) {
291 u32 gso_type
= skb_shinfo(skb
)->gso_type
;
295 ip
.hdr
= skb_network_header(skb
);
296 l4
.hdr
= skb_transport_header(skb
);
297 network_hdr_len
= skb_inner_network_header_len(skb
);
299 if (ip
.v4
->version
== 4) {
301 l3_offload
= IPV4_PKT_WITH_CHKSUM_OFFLOAD
;
302 } else if (ip
.v4
->version
== 6) {
303 l3_offload
= IPV6_PKT
;
308 hinic_task_set_outter_l3(task
, l3_offload
,
309 skb_network_header_len(skb
));
311 if (gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
) {
312 l4
.udp
->check
= ~csum_magic(&ip
, IPPROTO_UDP
);
313 tunnel_type
= TUNNEL_UDP_CSUM
;
314 } else if (gso_type
& SKB_GSO_UDP_TUNNEL
) {
315 tunnel_type
= TUNNEL_UDP_NO_CSUM
;
318 l4_tunnel_len
= skb_inner_network_offset(skb
) -
319 skb_transport_offset(skb
);
320 hinic_task_set_tunnel_l4(task
, tunnel_type
, l4_tunnel_len
);
322 ip
.hdr
= skb_inner_network_header(skb
);
323 l4
.hdr
= skb_inner_transport_header(skb
);
325 ip
.hdr
= skb_network_header(skb
);
326 l4
.hdr
= skb_transport_header(skb
);
327 network_hdr_len
= skb_network_header_len(skb
);
330 /* initialize inner IP header fields */
331 if (ip
.v4
->version
== 4)
334 ip
.v6
->payload_len
= 0;
336 get_inner_l3_l4_type(skb
, &ip
, &l4
, TX_OFFLOAD_TSO
, &l3_offload
,
339 hinic_task_set_inner_l3(task
, l3_offload
, network_hdr_len
);
342 if (l4_proto
== IPPROTO_TCP
)
343 l4
.tcp
->check
= ~csum_magic(&ip
, IPPROTO_TCP
);
345 get_inner_l4_info(skb
, &l4
, TX_OFFLOAD_TSO
, l4_proto
, &l4_offload
,
348 hinic_set_tso_inner_l4(task
, queue_info
, l4_offload
, l4_len
, offset
,
349 ip_identify
, skb_shinfo(skb
)->gso_size
);
354 static int offload_csum(struct hinic_sq_task
*task
, u32
*queue_info
,
357 enum hinic_l4_offload_type l4_offload
;
358 u32 offset
, l4_len
, network_hdr_len
;
359 enum hinic_l3_offload_type l3_type
;
364 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
367 if (skb
->encapsulation
) {
370 ip
.hdr
= skb_network_header(skb
);
372 if (ip
.v4
->version
== 4)
373 l3_type
= IPV4_PKT_NO_CHKSUM_OFFLOAD
;
374 else if (ip
.v4
->version
== 6)
377 l3_type
= L3TYPE_UNKNOWN
;
379 hinic_task_set_outter_l3(task
, l3_type
,
380 skb_network_header_len(skb
));
382 l4_tunnel_len
= skb_inner_network_offset(skb
) -
383 skb_transport_offset(skb
);
385 hinic_task_set_tunnel_l4(task
, TUNNEL_UDP_NO_CSUM
,
388 ip
.hdr
= skb_inner_network_header(skb
);
389 l4
.hdr
= skb_inner_transport_header(skb
);
390 network_hdr_len
= skb_inner_network_header_len(skb
);
392 ip
.hdr
= skb_network_header(skb
);
393 l4
.hdr
= skb_transport_header(skb
);
394 network_hdr_len
= skb_network_header_len(skb
);
397 get_inner_l3_l4_type(skb
, &ip
, &l4
, TX_OFFLOAD_CSUM
, &l3_type
,
400 hinic_task_set_inner_l3(task
, l3_type
, network_hdr_len
);
402 get_inner_l4_info(skb
, &l4
, TX_OFFLOAD_CSUM
, l4_proto
, &l4_offload
,
405 hinic_set_cs_inner_l4(task
, queue_info
, l4_offload
, l4_len
, offset
);
410 static void offload_vlan(struct hinic_sq_task
*task
, u32
*queue_info
,
411 u16 vlan_tag
, u16 vlan_pri
)
413 task
->pkt_info0
|= HINIC_SQ_TASK_INFO0_SET(vlan_tag
, VLAN_TAG
) |
414 HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD
);
416 *queue_info
|= HINIC_SQ_CTRL_SET(vlan_pri
, QUEUE_INFO_PRI
);
419 static int hinic_tx_offload(struct sk_buff
*skb
, struct hinic_sq_task
*task
,
422 enum hinic_offload_type offload
= 0;
426 enabled
= offload_tso(task
, queue_info
, skb
);
428 offload
|= TX_OFFLOAD_TSO
;
429 } else if (enabled
== 0) {
430 enabled
= offload_csum(task
, queue_info
, skb
);
432 offload
|= TX_OFFLOAD_CSUM
;
434 return -EPROTONOSUPPORT
;
437 if (unlikely(skb_vlan_tag_present(skb
))) {
438 vlan_tag
= skb_vlan_tag_get(skb
);
439 offload_vlan(task
, queue_info
, vlan_tag
,
440 vlan_tag
>> VLAN_PRIO_SHIFT
);
441 offload
|= TX_OFFLOAD_VLAN
;
445 hinic_task_set_l2hdr(task
, skb_network_offset(skb
));
447 /* payload offset should not more than 221 */
448 if (HINIC_SQ_CTRL_GET(*queue_info
, QUEUE_INFO_PLDOFF
) >
449 MAX_PAYLOAD_OFFSET
) {
450 return -EPROTONOSUPPORT
;
453 /* mss should not less than 80 */
454 if (HINIC_SQ_CTRL_GET(*queue_info
, QUEUE_INFO_MSS
) < HINIC_MSS_MIN
) {
455 *queue_info
= HINIC_SQ_CTRL_CLEAR(*queue_info
, QUEUE_INFO_MSS
);
456 *queue_info
|= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN
, QUEUE_INFO_MSS
);
462 netdev_tx_t
hinic_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
464 struct hinic_dev
*nic_dev
= netdev_priv(netdev
);
465 u16 prod_idx
, q_id
= skb
->queue_mapping
;
466 struct netdev_queue
*netdev_txq
;
467 int nr_sges
, err
= NETDEV_TX_OK
;
468 struct hinic_sq_wqe
*sq_wqe
;
469 unsigned int wqe_size
;
470 struct hinic_txq
*txq
;
473 txq
= &nic_dev
->txqs
[q_id
];
474 qp
= container_of(txq
->sq
, struct hinic_qp
, sq
);
476 if (skb
->len
< MIN_SKB_LEN
) {
477 if (skb_pad(skb
, MIN_SKB_LEN
- skb
->len
)) {
478 netdev_err(netdev
, "Failed to pad skb\n");
479 goto update_error_stats
;
482 skb
->len
= MIN_SKB_LEN
;
485 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
487 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
488 txq
->txq_stats
.big_frags_pkts
++;
489 u64_stats_update_end(&txq
->txq_stats
.syncp
);
492 if (nr_sges
> txq
->max_sges
) {
493 netdev_err(netdev
, "Too many Tx sges\n");
497 err
= tx_map_skb(nic_dev
, skb
, txq
->sges
);
501 wqe_size
= HINIC_SQ_WQE_SIZE(nr_sges
);
503 sq_wqe
= hinic_sq_get_wqe(txq
->sq
, wqe_size
, &prod_idx
);
505 netif_stop_subqueue(netdev
, qp
->q_id
);
507 /* Check for the case free_tx_poll is called in another cpu
508 * and we stopped the subqueue after free_tx_poll check.
510 sq_wqe
= hinic_sq_get_wqe(txq
->sq
, wqe_size
, &prod_idx
);
512 netif_wake_subqueue(nic_dev
->netdev
, qp
->q_id
);
516 tx_unmap_skb(nic_dev
, skb
, txq
->sges
);
518 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
519 txq
->txq_stats
.tx_busy
++;
520 u64_stats_update_end(&txq
->txq_stats
.syncp
);
521 err
= NETDEV_TX_BUSY
;
527 hinic_sq_prepare_wqe(txq
->sq
, prod_idx
, sq_wqe
, txq
->sges
, nr_sges
);
529 err
= hinic_tx_offload(skb
, &sq_wqe
->task
, &sq_wqe
->ctrl
.queue_info
);
533 hinic_sq_write_wqe(txq
->sq
, prod_idx
, sq_wqe
, skb
, wqe_size
);
536 netdev_txq
= netdev_get_tx_queue(netdev
, q_id
);
537 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq
)))
538 hinic_sq_write_db(txq
->sq
, prod_idx
, wqe_size
, 0);
543 hinic_sq_return_wqe(txq
->sq
, wqe_size
);
544 tx_unmap_skb(nic_dev
, skb
, txq
->sges
);
547 dev_kfree_skb_any(skb
);
550 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
551 txq
->txq_stats
.tx_dropped
++;
552 u64_stats_update_end(&txq
->txq_stats
.syncp
);
558 * tx_free_skb - unmap and free skb
559 * @nic_dev: nic device
561 * @sges: the sges that are connected to the skb
563 static void tx_free_skb(struct hinic_dev
*nic_dev
, struct sk_buff
*skb
,
564 struct hinic_sge
*sges
)
566 tx_unmap_skb(nic_dev
, skb
, sges
);
568 dev_kfree_skb_any(skb
);
572 * free_all_rx_skbs - free all skbs in tx queue
575 static void free_all_tx_skbs(struct hinic_txq
*txq
)
577 struct hinic_dev
*nic_dev
= netdev_priv(txq
->netdev
);
578 struct hinic_sq
*sq
= txq
->sq
;
579 struct hinic_sq_wqe
*sq_wqe
;
580 unsigned int wqe_size
;
585 while ((sq_wqe
= hinic_sq_read_wqebb(sq
, &skb
, &wqe_size
, &ci
))) {
586 sq_wqe
= hinic_sq_read_wqe(sq
, &skb
, wqe_size
, &ci
);
590 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
592 hinic_sq_get_sges(sq_wqe
, txq
->free_sges
, nr_sges
);
594 hinic_sq_put_wqe(sq
, wqe_size
);
596 tx_free_skb(nic_dev
, skb
, txq
->free_sges
);
601 * free_tx_poll - free finished tx skbs in tx queue that connected to napi
603 * @budget: number of tx
605 * Return 0 - Success, negative - Failure
607 static int free_tx_poll(struct napi_struct
*napi
, int budget
)
609 struct hinic_txq
*txq
= container_of(napi
, struct hinic_txq
, napi
);
610 struct hinic_qp
*qp
= container_of(txq
->sq
, struct hinic_qp
, sq
);
611 struct hinic_dev
*nic_dev
= netdev_priv(txq
->netdev
);
612 struct netdev_queue
*netdev_txq
;
613 struct hinic_sq
*sq
= txq
->sq
;
614 struct hinic_wq
*wq
= sq
->wq
;
615 struct hinic_sq_wqe
*sq_wqe
;
616 unsigned int wqe_size
;
617 int nr_sges
, pkts
= 0;
623 hw_ci
= HW_CONS_IDX(sq
) & wq
->mask
;
625 /* Reading a WQEBB to get real WQE size and consumer index. */
626 sq_wqe
= hinic_sq_read_wqebb(sq
, &skb
, &wqe_size
, &sw_ci
);
628 (((hw_ci
- sw_ci
) & wq
->mask
) * wq
->wqebb_size
< wqe_size
))
631 /* If this WQE have multiple WQEBBs, we will read again to get
634 if (wqe_size
> wq
->wqebb_size
) {
635 sq_wqe
= hinic_sq_read_wqe(sq
, &skb
, wqe_size
, &sw_ci
);
636 if (unlikely(!sq_wqe
))
640 tx_bytes
+= skb
->len
;
643 nr_sges
= skb_shinfo(skb
)->nr_frags
+ 1;
645 hinic_sq_get_sges(sq_wqe
, txq
->free_sges
, nr_sges
);
647 hinic_sq_put_wqe(sq
, wqe_size
);
649 tx_free_skb(nic_dev
, skb
, txq
->free_sges
);
650 } while (pkts
< budget
);
652 if (__netif_subqueue_stopped(nic_dev
->netdev
, qp
->q_id
) &&
653 hinic_get_sq_free_wqebbs(sq
) >= HINIC_MIN_TX_NUM_WQEBBS(sq
)) {
654 netdev_txq
= netdev_get_tx_queue(txq
->netdev
, qp
->q_id
);
656 __netif_tx_lock(netdev_txq
, smp_processor_id());
658 netif_wake_subqueue(nic_dev
->netdev
, qp
->q_id
);
660 __netif_tx_unlock(netdev_txq
);
662 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
663 txq
->txq_stats
.tx_wake
++;
664 u64_stats_update_end(&txq
->txq_stats
.syncp
);
667 u64_stats_update_begin(&txq
->txq_stats
.syncp
);
668 txq
->txq_stats
.bytes
+= tx_bytes
;
669 txq
->txq_stats
.pkts
+= pkts
;
670 u64_stats_update_end(&txq
->txq_stats
.syncp
);
674 hinic_hwdev_set_msix_state(nic_dev
->hwdev
,
683 static void tx_napi_add(struct hinic_txq
*txq
, int weight
)
685 netif_napi_add(txq
->netdev
, &txq
->napi
, free_tx_poll
, weight
);
686 napi_enable(&txq
->napi
);
689 static void tx_napi_del(struct hinic_txq
*txq
)
691 napi_disable(&txq
->napi
);
692 netif_napi_del(&txq
->napi
);
695 static irqreturn_t
tx_irq(int irq
, void *data
)
697 struct hinic_txq
*txq
= data
;
698 struct hinic_dev
*nic_dev
;
700 nic_dev
= netdev_priv(txq
->netdev
);
702 /* Disable the interrupt until napi will be completed */
703 hinic_hwdev_set_msix_state(nic_dev
->hwdev
,
707 hinic_hwdev_msix_cnt_set(nic_dev
->hwdev
, txq
->sq
->msix_entry
);
709 napi_schedule(&txq
->napi
);
713 static int tx_request_irq(struct hinic_txq
*txq
)
715 struct hinic_dev
*nic_dev
= netdev_priv(txq
->netdev
);
716 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
717 struct hinic_hwif
*hwif
= hwdev
->hwif
;
718 struct pci_dev
*pdev
= hwif
->pdev
;
719 struct hinic_sq
*sq
= txq
->sq
;
722 tx_napi_add(txq
, nic_dev
->tx_weight
);
724 hinic_hwdev_msix_set(nic_dev
->hwdev
, sq
->msix_entry
,
725 TX_IRQ_NO_PENDING
, TX_IRQ_NO_COALESC
,
726 TX_IRQ_NO_LLI_TIMER
, TX_IRQ_NO_CREDIT
,
727 TX_IRQ_NO_RESEND_TIMER
);
729 err
= request_irq(sq
->irq
, tx_irq
, 0, txq
->irq_name
, txq
);
731 dev_err(&pdev
->dev
, "Failed to request Tx irq\n");
739 static void tx_free_irq(struct hinic_txq
*txq
)
741 struct hinic_sq
*sq
= txq
->sq
;
743 free_irq(sq
->irq
, txq
);
748 * hinic_init_txq - Initialize the Tx Queue
749 * @txq: Logical Tx Queue
750 * @sq: Hardware Tx Queue to connect the Logical queue with
751 * @netdev: network device to connect the Logical queue with
753 * Return 0 - Success, negative - Failure
755 int hinic_init_txq(struct hinic_txq
*txq
, struct hinic_sq
*sq
,
756 struct net_device
*netdev
)
758 struct hinic_qp
*qp
= container_of(sq
, struct hinic_qp
, sq
);
759 struct hinic_dev
*nic_dev
= netdev_priv(netdev
);
760 struct hinic_hwdev
*hwdev
= nic_dev
->hwdev
;
761 int err
, irqname_len
;
764 txq
->netdev
= netdev
;
769 txq
->max_sges
= HINIC_MAX_SQ_BUFDESCS
;
771 sges_size
= txq
->max_sges
* sizeof(*txq
->sges
);
772 txq
->sges
= devm_kzalloc(&netdev
->dev
, sges_size
, GFP_KERNEL
);
776 sges_size
= txq
->max_sges
* sizeof(*txq
->free_sges
);
777 txq
->free_sges
= devm_kzalloc(&netdev
->dev
, sges_size
, GFP_KERNEL
);
778 if (!txq
->free_sges
) {
780 goto err_alloc_free_sges
;
783 irqname_len
= snprintf(NULL
, 0, "hinic_txq%d", qp
->q_id
) + 1;
784 txq
->irq_name
= devm_kzalloc(&netdev
->dev
, irqname_len
, GFP_KERNEL
);
785 if (!txq
->irq_name
) {
787 goto err_alloc_irqname
;
790 sprintf(txq
->irq_name
, "hinic_txq%d", qp
->q_id
);
792 err
= hinic_hwdev_hw_ci_addr_set(hwdev
, sq
, CI_UPDATE_NO_PENDING
,
793 CI_UPDATE_NO_COALESC
);
797 err
= tx_request_irq(txq
);
799 netdev_err(netdev
, "Failed to request Tx irq\n");
807 devm_kfree(&netdev
->dev
, txq
->irq_name
);
810 devm_kfree(&netdev
->dev
, txq
->free_sges
);
813 devm_kfree(&netdev
->dev
, txq
->sges
);
818 * hinic_clean_txq - Clean the Tx Queue
819 * @txq: Logical Tx Queue
821 void hinic_clean_txq(struct hinic_txq
*txq
)
823 struct net_device
*netdev
= txq
->netdev
;
827 free_all_tx_skbs(txq
);
829 devm_kfree(&netdev
->dev
, txq
->irq_name
);
830 devm_kfree(&netdev
->dev
, txq
->free_sges
);
831 devm_kfree(&netdev
->dev
, txq
->sges
);