1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
6 #include "i40e_txrx_common.h"
9 void i40e_clear_rx_bi_zc(struct i40e_ring
*rx_ring
)
11 memset(rx_ring
->rx_bi_zc
, 0,
12 sizeof(*rx_ring
->rx_bi_zc
) * rx_ring
->count
);
15 static struct xdp_buff
**i40e_rx_bi(struct i40e_ring
*rx_ring
, u32 idx
)
17 return &rx_ring
->rx_bi_zc
[idx
];
21 * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
22 * @rx_ring: Current rx ring
23 * @pool_present: is pool for XSK present
25 * Try allocating memory and return ENOMEM, if failed to allocate.
26 * If allocation was successful, substitute buffer with allocated one.
27 * Returns 0 on success, negative on failure
29 static int i40e_realloc_rx_xdp_bi(struct i40e_ring
*rx_ring
, bool pool_present
)
31 size_t elem_size
= pool_present
? sizeof(*rx_ring
->rx_bi_zc
) :
32 sizeof(*rx_ring
->rx_bi
);
33 void *sw_ring
= kcalloc(rx_ring
->count
, elem_size
, GFP_KERNEL
);
39 kfree(rx_ring
->rx_bi
);
40 rx_ring
->rx_bi
= NULL
;
41 rx_ring
->rx_bi_zc
= sw_ring
;
43 kfree(rx_ring
->rx_bi_zc
);
44 rx_ring
->rx_bi_zc
= NULL
;
45 rx_ring
->rx_bi
= sw_ring
;
51 * i40e_realloc_rx_bi_zc - reallocate rx SW rings
53 * @zc: is zero copy set
55 * Reallocate buffer for rx_rings that might be used by XSK.
56 * XDP requires more memory, than rx_buf provides.
57 * Returns 0 on success, negative on failure
59 int i40e_realloc_rx_bi_zc(struct i40e_vsi
*vsi
, bool zc
)
61 struct i40e_ring
*rx_ring
;
64 for_each_set_bit(q
, vsi
->af_xdp_zc_qps
, vsi
->alloc_queue_pairs
) {
65 rx_ring
= vsi
->rx_rings
[q
];
66 if (i40e_realloc_rx_xdp_bi(rx_ring
, zc
))
73 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
77 * @qid: Rx ring to associate buffer pool with
79 * Returns 0 on success, <0 on failure
81 static int i40e_xsk_pool_enable(struct i40e_vsi
*vsi
,
82 struct xsk_buff_pool
*pool
,
85 struct net_device
*netdev
= vsi
->netdev
;
89 if (vsi
->type
!= I40E_VSI_MAIN
)
92 if (qid
>= vsi
->num_queue_pairs
)
95 if (qid
>= netdev
->real_num_rx_queues
||
96 qid
>= netdev
->real_num_tx_queues
)
99 err
= xsk_pool_dma_map(pool
, &vsi
->back
->pdev
->dev
, I40E_RX_DMA_ATTR
);
103 set_bit(qid
, vsi
->af_xdp_zc_qps
);
105 if_running
= netif_running(vsi
->netdev
) && i40e_enabled_xdp_vsi(vsi
);
108 err
= i40e_queue_pair_disable(vsi
, qid
);
112 err
= i40e_realloc_rx_xdp_bi(vsi
->rx_rings
[qid
], true);
116 err
= i40e_queue_pair_enable(vsi
, qid
);
120 /* Kick start the NAPI context so that receiving will start */
121 err
= i40e_xsk_wakeup(vsi
->netdev
, qid
, XDP_WAKEUP_RX
);
130 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
133 * @qid: Rx ring to associate buffer pool with
135 * Returns 0 on success, <0 on failure
137 static int i40e_xsk_pool_disable(struct i40e_vsi
*vsi
, u16 qid
)
139 struct net_device
*netdev
= vsi
->netdev
;
140 struct xsk_buff_pool
*pool
;
144 pool
= xsk_get_pool_from_qid(netdev
, qid
);
148 if_running
= netif_running(vsi
->netdev
) && i40e_enabled_xdp_vsi(vsi
);
151 err
= i40e_queue_pair_disable(vsi
, qid
);
156 clear_bit(qid
, vsi
->af_xdp_zc_qps
);
157 xsk_pool_dma_unmap(pool
, I40E_RX_DMA_ATTR
);
160 err
= i40e_realloc_rx_xdp_bi(vsi
->rx_rings
[qid
], false);
163 err
= i40e_queue_pair_enable(vsi
, qid
);
172 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
175 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
176 * @qid: Rx ring to (dis)associate buffer pool (from)to
178 * This function enables or disables a buffer pool to a certain ring.
180 * Returns 0 on success, <0 on failure
182 int i40e_xsk_pool_setup(struct i40e_vsi
*vsi
, struct xsk_buff_pool
*pool
,
185 return pool
? i40e_xsk_pool_enable(vsi
, pool
, qid
) :
186 i40e_xsk_pool_disable(vsi
, qid
);
190 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
192 * @xdp: xdp_buff used as input to the XDP program
193 * @xdp_prog: XDP program to run
195 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
197 static int i40e_run_xdp_zc(struct i40e_ring
*rx_ring
, struct xdp_buff
*xdp
,
198 struct bpf_prog
*xdp_prog
)
200 int err
, result
= I40E_XDP_PASS
;
201 struct i40e_ring
*xdp_ring
;
204 act
= bpf_prog_run_xdp(xdp_prog
, xdp
);
206 if (likely(act
== XDP_REDIRECT
)) {
207 err
= xdp_do_redirect(rx_ring
->netdev
, xdp
, xdp_prog
);
209 return I40E_XDP_REDIR
;
210 if (xsk_uses_need_wakeup(rx_ring
->xsk_pool
) && err
== -ENOBUFS
)
211 result
= I40E_XDP_EXIT
;
213 result
= I40E_XDP_CONSUMED
;
221 xdp_ring
= rx_ring
->vsi
->xdp_rings
[rx_ring
->queue_index
];
222 result
= i40e_xmit_xdp_tx_ring(xdp
, xdp_ring
);
223 if (result
== I40E_XDP_CONSUMED
)
227 result
= I40E_XDP_CONSUMED
;
230 bpf_warn_invalid_xdp_action(rx_ring
->netdev
, xdp_prog
, act
);
233 result
= I40E_XDP_CONSUMED
;
235 trace_xdp_exception(rx_ring
->netdev
, xdp_prog
, act
);
240 bool i40e_alloc_rx_buffers_zc(struct i40e_ring
*rx_ring
, u16 count
)
242 u16 ntu
= rx_ring
->next_to_use
;
243 union i40e_rx_desc
*rx_desc
;
244 struct xdp_buff
**xdp
;
248 rx_desc
= I40E_RX_DESC(rx_ring
, ntu
);
249 xdp
= i40e_rx_bi(rx_ring
, ntu
);
251 nb_buffs
= min_t(u16
, count
, rx_ring
->count
- ntu
);
252 nb_buffs
= xsk_buff_alloc_batch(rx_ring
->xsk_pool
, xdp
, nb_buffs
);
258 dma
= xsk_buff_xdp_get_dma(*xdp
);
259 rx_desc
->read
.pkt_addr
= cpu_to_le64(dma
);
260 rx_desc
->read
.hdr_addr
= 0;
267 if (ntu
== rx_ring
->count
) {
268 rx_desc
= I40E_RX_DESC(rx_ring
, 0);
272 /* clear the status bits for the next_to_use descriptor */
273 rx_desc
->wb
.qword1
.status_error_len
= 0;
274 i40e_release_rx_desc(rx_ring
, ntu
);
276 return count
== nb_buffs
;
280 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
284 * This functions allocates a new skb from a zero-copy Rx buffer.
286 * Returns the skb, or NULL on failure.
288 static struct sk_buff
*i40e_construct_skb_zc(struct i40e_ring
*rx_ring
,
289 struct xdp_buff
*xdp
)
291 unsigned int totalsize
= xdp
->data_end
- xdp
->data_meta
;
292 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
293 struct skb_shared_info
*sinfo
= NULL
;
297 if (unlikely(xdp_buff_has_frags(xdp
))) {
298 sinfo
= xdp_get_shared_info_from_buff(xdp
);
299 nr_frags
= sinfo
->nr_frags
;
301 net_prefetch(xdp
->data_meta
);
303 /* allocate a skb to store the frags */
304 skb
= napi_alloc_skb(&rx_ring
->q_vector
->napi
, totalsize
);
308 memcpy(__skb_put(skb
, totalsize
), xdp
->data_meta
,
309 ALIGN(totalsize
, sizeof(long)));
312 skb_metadata_set(skb
, metasize
);
313 __skb_pull(skb
, metasize
);
316 if (likely(!xdp_buff_has_frags(xdp
)))
319 for (int i
= 0; i
< nr_frags
; i
++) {
320 struct skb_shared_info
*skinfo
= skb_shinfo(skb
);
321 skb_frag_t
*frag
= &sinfo
->frags
[i
];
325 page
= dev_alloc_page();
330 addr
= page_to_virt(page
);
332 memcpy(addr
, skb_frag_page(frag
), skb_frag_size(frag
));
334 __skb_fill_page_desc_noacc(skinfo
, skinfo
->nr_frags
++,
335 addr
, 0, skb_frag_size(frag
));
343 static void i40e_handle_xdp_result_zc(struct i40e_ring
*rx_ring
,
344 struct xdp_buff
*xdp_buff
,
345 union i40e_rx_desc
*rx_desc
,
346 unsigned int *rx_packets
,
347 unsigned int *rx_bytes
,
348 unsigned int xdp_res
,
354 *rx_bytes
= xdp_get_buff_len(xdp_buff
);
356 if (likely(xdp_res
== I40E_XDP_REDIR
) || xdp_res
== I40E_XDP_TX
)
359 if (xdp_res
== I40E_XDP_EXIT
) {
364 if (xdp_res
== I40E_XDP_CONSUMED
) {
365 xsk_buff_free(xdp_buff
);
368 if (xdp_res
== I40E_XDP_PASS
) {
369 /* NB! We are not checking for errors using
370 * i40e_test_staterr with
371 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
372 * SBP is *not* set in PRT_SBPVSI (default not set).
374 skb
= i40e_construct_skb_zc(rx_ring
, xdp_buff
);
376 rx_ring
->rx_stats
.alloc_buff_failed
++;
382 if (eth_skb_pad(skb
)) {
388 i40e_process_skb_fields(rx_ring
, rx_desc
, skb
);
389 napi_gro_receive(&rx_ring
->q_vector
->napi
, skb
);
393 /* Should never get here, as all valid cases have been handled already.
399 i40e_add_xsk_frag(struct i40e_ring
*rx_ring
, struct xdp_buff
*first
,
400 struct xdp_buff
*xdp
, const unsigned int size
)
402 struct skb_shared_info
*sinfo
= xdp_get_shared_info_from_buff(first
);
404 if (!xdp_buff_has_frags(first
)) {
406 sinfo
->xdp_frags_size
= 0;
407 xdp_buff_set_frags_flag(first
);
410 if (unlikely(sinfo
->nr_frags
== MAX_SKB_FRAGS
)) {
411 xsk_buff_free(first
);
415 __skb_fill_page_desc_noacc(sinfo
, sinfo
->nr_frags
++,
416 virt_to_page(xdp
->data_hard_start
),
417 XDP_PACKET_HEADROOM
, size
);
418 sinfo
->xdp_frags_size
+= size
;
419 xsk_buff_add_frag(xdp
);
425 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
427 * @budget: NAPI budget
429 * Returns amount of work completed
431 int i40e_clean_rx_irq_zc(struct i40e_ring
*rx_ring
, int budget
)
433 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
434 u16 next_to_process
= rx_ring
->next_to_process
;
435 u16 next_to_clean
= rx_ring
->next_to_clean
;
436 unsigned int xdp_res
, xdp_xmit
= 0;
437 struct xdp_buff
*first
= NULL
;
438 u32 count
= rx_ring
->count
;
439 struct bpf_prog
*xdp_prog
;
440 u32 entries_to_alloc
;
441 bool failure
= false;
443 if (next_to_process
!= next_to_clean
)
444 first
= *i40e_rx_bi(rx_ring
, next_to_clean
);
446 /* NB! xdp_prog will always be !NULL, due to the fact that
447 * this path is enabled by setting an XDP program.
449 xdp_prog
= READ_ONCE(rx_ring
->xdp_prog
);
451 while (likely(total_rx_packets
< (unsigned int)budget
)) {
452 union i40e_rx_desc
*rx_desc
;
453 unsigned int rx_packets
;
454 unsigned int rx_bytes
;
459 rx_desc
= I40E_RX_DESC(rx_ring
, next_to_process
);
460 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
462 /* This memory barrier is needed to keep us from reading
463 * any other fields out of the rx_desc until we have
464 * verified the descriptor has been written back.
468 if (i40e_rx_is_programming_status(qword
)) {
469 i40e_clean_programming_status(rx_ring
,
470 rx_desc
->raw
.qword
[0],
472 bi
= *i40e_rx_bi(rx_ring
, next_to_process
);
474 if (++next_to_process
== count
)
479 size
= FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK
, qword
);
483 bi
= *i40e_rx_bi(rx_ring
, next_to_process
);
484 xsk_buff_set_size(bi
, size
);
485 xsk_buff_dma_sync_for_cpu(bi
);
489 else if (i40e_add_xsk_frag(rx_ring
, first
, bi
, size
))
492 if (++next_to_process
== count
)
495 if (i40e_is_non_eop(rx_ring
, rx_desc
))
498 xdp_res
= i40e_run_xdp_zc(rx_ring
, first
, xdp_prog
);
499 i40e_handle_xdp_result_zc(rx_ring
, first
, rx_desc
, &rx_packets
,
500 &rx_bytes
, xdp_res
, &failure
);
501 next_to_clean
= next_to_process
;
504 total_rx_packets
+= rx_packets
;
505 total_rx_bytes
+= rx_bytes
;
506 xdp_xmit
|= xdp_res
& (I40E_XDP_TX
| I40E_XDP_REDIR
);
510 rx_ring
->next_to_clean
= next_to_clean
;
511 rx_ring
->next_to_process
= next_to_process
;
513 entries_to_alloc
= I40E_DESC_UNUSED(rx_ring
);
514 if (entries_to_alloc
>= I40E_RX_BUFFER_WRITE
)
515 failure
|= !i40e_alloc_rx_buffers_zc(rx_ring
, entries_to_alloc
);
517 i40e_finalize_xdp_rx(rx_ring
, xdp_xmit
);
518 i40e_update_rx_stats(rx_ring
, total_rx_bytes
, total_rx_packets
);
520 if (xsk_uses_need_wakeup(rx_ring
->xsk_pool
)) {
521 if (failure
|| next_to_clean
== rx_ring
->next_to_use
)
522 xsk_set_rx_need_wakeup(rx_ring
->xsk_pool
);
524 xsk_clear_rx_need_wakeup(rx_ring
->xsk_pool
);
526 return (int)total_rx_packets
;
528 return failure
? budget
: (int)total_rx_packets
;
531 static void i40e_xmit_pkt(struct i40e_ring
*xdp_ring
, struct xdp_desc
*desc
,
532 unsigned int *total_bytes
)
534 u32 cmd
= I40E_TX_DESC_CMD_ICRC
| xsk_is_eop_desc(desc
);
535 struct i40e_tx_desc
*tx_desc
;
538 dma
= xsk_buff_raw_get_dma(xdp_ring
->xsk_pool
, desc
->addr
);
539 xsk_buff_raw_dma_sync_for_device(xdp_ring
->xsk_pool
, dma
, desc
->len
);
541 tx_desc
= I40E_TX_DESC(xdp_ring
, xdp_ring
->next_to_use
++);
542 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
543 tx_desc
->cmd_type_offset_bsz
= build_ctob(cmd
, 0, desc
->len
, 0);
545 *total_bytes
+= desc
->len
;
548 static void i40e_xmit_pkt_batch(struct i40e_ring
*xdp_ring
, struct xdp_desc
*desc
,
549 unsigned int *total_bytes
)
551 u16 ntu
= xdp_ring
->next_to_use
;
552 struct i40e_tx_desc
*tx_desc
;
556 loop_unrolled_for(i
= 0; i
< PKTS_PER_BATCH
; i
++) {
557 u32 cmd
= I40E_TX_DESC_CMD_ICRC
| xsk_is_eop_desc(&desc
[i
]);
559 dma
= xsk_buff_raw_get_dma(xdp_ring
->xsk_pool
, desc
[i
].addr
);
560 xsk_buff_raw_dma_sync_for_device(xdp_ring
->xsk_pool
, dma
, desc
[i
].len
);
562 tx_desc
= I40E_TX_DESC(xdp_ring
, ntu
++);
563 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
564 tx_desc
->cmd_type_offset_bsz
= build_ctob(cmd
, 0, desc
[i
].len
, 0);
566 *total_bytes
+= desc
[i
].len
;
569 xdp_ring
->next_to_use
= ntu
;
572 static void i40e_fill_tx_hw_ring(struct i40e_ring
*xdp_ring
, struct xdp_desc
*descs
, u32 nb_pkts
,
573 unsigned int *total_bytes
)
575 u32 batched
, leftover
, i
;
577 batched
= nb_pkts
& ~(PKTS_PER_BATCH
- 1);
578 leftover
= nb_pkts
& (PKTS_PER_BATCH
- 1);
579 for (i
= 0; i
< batched
; i
+= PKTS_PER_BATCH
)
580 i40e_xmit_pkt_batch(xdp_ring
, &descs
[i
], total_bytes
);
581 for (i
= batched
; i
< batched
+ leftover
; i
++)
582 i40e_xmit_pkt(xdp_ring
, &descs
[i
], total_bytes
);
585 static void i40e_set_rs_bit(struct i40e_ring
*xdp_ring
)
587 u16 ntu
= xdp_ring
->next_to_use
? xdp_ring
->next_to_use
- 1 : xdp_ring
->count
- 1;
588 struct i40e_tx_desc
*tx_desc
;
590 tx_desc
= I40E_TX_DESC(xdp_ring
, ntu
);
591 tx_desc
->cmd_type_offset_bsz
|= cpu_to_le64(I40E_TX_DESC_CMD_RS
<< I40E_TXD_QW1_CMD_SHIFT
);
595 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
596 * @xdp_ring: XDP Tx ring
597 * @budget: NAPI budget
599 * Returns true if the work is finished.
601 static bool i40e_xmit_zc(struct i40e_ring
*xdp_ring
, unsigned int budget
)
603 struct xdp_desc
*descs
= xdp_ring
->xsk_pool
->tx_descs
;
604 u32 nb_pkts
, nb_processed
= 0;
605 unsigned int total_bytes
= 0;
607 nb_pkts
= xsk_tx_peek_release_desc_batch(xdp_ring
->xsk_pool
, budget
);
611 if (xdp_ring
->next_to_use
+ nb_pkts
>= xdp_ring
->count
) {
612 nb_processed
= xdp_ring
->count
- xdp_ring
->next_to_use
;
613 i40e_fill_tx_hw_ring(xdp_ring
, descs
, nb_processed
, &total_bytes
);
614 xdp_ring
->next_to_use
= 0;
617 i40e_fill_tx_hw_ring(xdp_ring
, &descs
[nb_processed
], nb_pkts
- nb_processed
,
620 /* Request an interrupt for the last frame and bump tail ptr. */
621 i40e_set_rs_bit(xdp_ring
);
622 i40e_xdp_ring_update_tail(xdp_ring
);
624 i40e_update_tx_stats(xdp_ring
, nb_pkts
, total_bytes
);
626 return nb_pkts
< budget
;
630 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
631 * @tx_ring: XDP Tx ring
632 * @tx_bi: Tx buffer info to clean
634 static void i40e_clean_xdp_tx_buffer(struct i40e_ring
*tx_ring
,
635 struct i40e_tx_buffer
*tx_bi
)
637 xdp_return_frame(tx_bi
->xdpf
);
638 tx_ring
->xdp_tx_active
--;
639 dma_unmap_single(tx_ring
->dev
,
640 dma_unmap_addr(tx_bi
, dma
),
641 dma_unmap_len(tx_bi
, len
), DMA_TO_DEVICE
);
642 dma_unmap_len_set(tx_bi
, len
, 0);
646 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
648 * @tx_ring: XDP Tx ring
650 * Returns true if cleanup/transmission is done.
652 bool i40e_clean_xdp_tx_irq(struct i40e_vsi
*vsi
, struct i40e_ring
*tx_ring
)
654 struct xsk_buff_pool
*bp
= tx_ring
->xsk_pool
;
655 u32 i
, completed_frames
, xsk_frames
= 0;
656 u32 head_idx
= i40e_get_head(tx_ring
);
657 struct i40e_tx_buffer
*tx_bi
;
660 if (head_idx
< tx_ring
->next_to_clean
)
661 head_idx
+= tx_ring
->count
;
662 completed_frames
= head_idx
- tx_ring
->next_to_clean
;
664 if (completed_frames
== 0)
667 if (likely(!tx_ring
->xdp_tx_active
)) {
668 xsk_frames
= completed_frames
;
672 ntc
= tx_ring
->next_to_clean
;
674 for (i
= 0; i
< completed_frames
; i
++) {
675 tx_bi
= &tx_ring
->tx_bi
[ntc
];
678 i40e_clean_xdp_tx_buffer(tx_ring
, tx_bi
);
684 if (++ntc
>= tx_ring
->count
)
689 tx_ring
->next_to_clean
+= completed_frames
;
690 if (unlikely(tx_ring
->next_to_clean
>= tx_ring
->count
))
691 tx_ring
->next_to_clean
-= tx_ring
->count
;
694 xsk_tx_completed(bp
, xsk_frames
);
696 i40e_arm_wb(tx_ring
, vsi
, completed_frames
);
699 if (xsk_uses_need_wakeup(tx_ring
->xsk_pool
))
700 xsk_set_tx_need_wakeup(tx_ring
->xsk_pool
);
702 return i40e_xmit_zc(tx_ring
, I40E_DESC_UNUSED(tx_ring
));
706 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
707 * @dev: the netdevice
708 * @queue_id: queue id to wake up
709 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
711 * Returns <0 for errors, 0 otherwise.
713 int i40e_xsk_wakeup(struct net_device
*dev
, u32 queue_id
, u32 flags
)
715 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
716 struct i40e_vsi
*vsi
= np
->vsi
;
717 struct i40e_pf
*pf
= vsi
->back
;
718 struct i40e_ring
*ring
;
720 if (test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
723 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
726 if (!i40e_enabled_xdp_vsi(vsi
))
729 if (queue_id
>= vsi
->num_queue_pairs
)
732 if (!vsi
->xdp_rings
[queue_id
]->xsk_pool
)
735 ring
= vsi
->xdp_rings
[queue_id
];
737 /* The idea here is that if NAPI is running, mark a miss, so
738 * it will run again. If not, trigger an interrupt and
739 * schedule the NAPI from interrupt context. If NAPI would be
740 * scheduled here, the interrupt affinity would not be
743 if (!napi_if_scheduled_mark_missed(&ring
->q_vector
->napi
))
744 i40e_force_wb(vsi
, ring
->q_vector
);
749 void i40e_xsk_clean_rx_ring(struct i40e_ring
*rx_ring
)
751 u16 ntc
= rx_ring
->next_to_clean
;
752 u16 ntu
= rx_ring
->next_to_use
;
755 struct xdp_buff
*rx_bi
= *i40e_rx_bi(rx_ring
, ntc
);
757 xsk_buff_free(rx_bi
);
759 if (ntc
>= rx_ring
->count
)
765 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
766 * @tx_ring: XDP Tx ring
768 void i40e_xsk_clean_tx_ring(struct i40e_ring
*tx_ring
)
770 u16 ntc
= tx_ring
->next_to_clean
, ntu
= tx_ring
->next_to_use
;
771 struct xsk_buff_pool
*bp
= tx_ring
->xsk_pool
;
772 struct i40e_tx_buffer
*tx_bi
;
776 tx_bi
= &tx_ring
->tx_bi
[ntc
];
779 i40e_clean_xdp_tx_buffer(tx_ring
, tx_bi
);
786 if (ntc
>= tx_ring
->count
)
791 xsk_tx_completed(bp
, xsk_frames
);
795 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
796 * buffer pool attached
799 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
801 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi
*vsi
)
803 struct net_device
*netdev
= vsi
->netdev
;
806 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
807 if (xsk_get_pool_from_qid(netdev
, i
))