1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
4 #include <linux/bpf_trace.h>
5 #include <linux/netdevice.h>
6 #include <linux/bitfield.h>
9 #include "../nfp_app.h"
10 #include "../nfp_net.h"
11 #include "../nfp_net_dp.h"
12 #include "../nfp_net_xsk.h"
13 #include "../crypto/crypto.h"
14 #include "../crypto/fw.h"
17 /* Transmit processing
19 * One queue controller peripheral queue is used for transmit. The
20 * driver en-queues packets for transmit by advancing the write
21 * pointer. The device indicates that packets have transmitted by
22 * advancing the read pointer. The driver maintains a local copy of
23 * the read and write pointer in @struct nfp_net_tx_ring. The driver
24 * keeps @wr_p in sync with the queue controller write pointer and can
25 * determine how many packets have been transmitted by comparing its
26 * copy of the read pointer @rd_p with the read pointer maintained by
27 * the queue controller peripheral.
30 /* Wrappers for deciding when to stop and restart TX queues */
31 static int nfp_nfd3_tx_ring_should_wake(struct nfp_net_tx_ring
*tx_ring
)
33 return !nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
* 4);
36 static int nfp_nfd3_tx_ring_should_stop(struct nfp_net_tx_ring
*tx_ring
)
38 return nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
+ 1);
42 * nfp_nfd3_tx_ring_stop() - stop tx ring
44 * @tx_ring: driver tx queue structure
46 * Safely stop TX ring. Remember that while we are running .start_xmit()
47 * someone else may be cleaning the TX ring completions so we need to be
51 nfp_nfd3_tx_ring_stop(struct netdev_queue
*nd_q
,
52 struct nfp_net_tx_ring
*tx_ring
)
54 netif_tx_stop_queue(nd_q
);
56 /* We can race with the TX completion out of NAPI so recheck */
58 if (unlikely(nfp_nfd3_tx_ring_should_wake(tx_ring
)))
59 netif_tx_start_queue(nd_q
);
63 * nfp_nfd3_tx_tso() - Set up Tx descriptor for LSO
64 * @r_vec: per-ring structure
65 * @txbuf: Pointer to driver soft TX descriptor
66 * @txd: Pointer to HW TX descriptor
67 * @skb: Pointer to SKB
68 * @md_bytes: Prepend length
70 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
71 * Return error on packet header greater than maximum supported LSO header size.
74 nfp_nfd3_tx_tso(struct nfp_net_r_vector
*r_vec
, struct nfp_nfd3_tx_buf
*txbuf
,
75 struct nfp_nfd3_tx_desc
*txd
, struct sk_buff
*skb
, u32 md_bytes
)
77 u32 l3_offset
, l4_offset
, hdrlen
, l4_hdrlen
;
83 if (!skb
->encapsulation
) {
84 l3_offset
= skb_network_offset(skb
);
85 l4_offset
= skb_transport_offset(skb
);
86 l4_hdrlen
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) ?
87 sizeof(struct udphdr
) : tcp_hdrlen(skb
);
89 l3_offset
= skb_inner_network_offset(skb
);
90 l4_offset
= skb_inner_transport_offset(skb
);
91 l4_hdrlen
= (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) ?
92 sizeof(struct udphdr
) : inner_tcp_hdrlen(skb
);
95 hdrlen
= l4_offset
+ l4_hdrlen
;
96 txbuf
->pkt_cnt
= skb_shinfo(skb
)->gso_segs
;
97 txbuf
->real_len
+= hdrlen
* (txbuf
->pkt_cnt
- 1);
99 mss
= skb_shinfo(skb
)->gso_size
& NFD3_DESC_TX_MSS_MASK
;
100 txd
->l3_offset
= l3_offset
- md_bytes
;
101 txd
->l4_offset
= l4_offset
- md_bytes
;
102 txd
->lso_hdrlen
= hdrlen
- md_bytes
;
103 txd
->mss
= cpu_to_le16(mss
);
104 txd
->flags
|= NFD3_DESC_TX_LSO
;
106 u64_stats_update_begin(&r_vec
->tx_sync
);
108 u64_stats_update_end(&r_vec
->tx_sync
);
112 * nfp_nfd3_tx_csum() - Set TX CSUM offload flags in TX descriptor
113 * @dp: NFP Net data path struct
114 * @r_vec: per-ring structure
115 * @txbuf: Pointer to driver soft TX descriptor
116 * @txd: Pointer to TX descriptor
117 * @skb: Pointer to SKB
119 * This function sets the TX checksum flags in the TX descriptor based
120 * on the configuration and the protocol of the packet to be transmitted.
123 nfp_nfd3_tx_csum(struct nfp_net_dp
*dp
, struct nfp_net_r_vector
*r_vec
,
124 struct nfp_nfd3_tx_buf
*txbuf
, struct nfp_nfd3_tx_desc
*txd
,
127 struct ipv6hdr
*ipv6h
;
131 if (!(dp
->ctrl
& NFP_NET_CFG_CTRL_TXCSUM
))
134 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
137 txd
->flags
|= NFD3_DESC_TX_CSUM
;
138 if (skb
->encapsulation
)
139 txd
->flags
|= NFD3_DESC_TX_ENCAP
;
141 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
142 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
) : ipv6_hdr(skb
);
144 if (iph
->version
== 4) {
145 txd
->flags
|= NFD3_DESC_TX_IP4_CSUM
;
146 l4_hdr
= iph
->protocol
;
147 } else if (ipv6h
->version
== 6) {
148 l4_hdr
= ipv6h
->nexthdr
;
150 nn_dp_warn(dp
, "partial checksum but ipv=%x!\n", iph
->version
);
156 txd
->flags
|= NFD3_DESC_TX_TCP_CSUM
;
159 txd
->flags
|= NFD3_DESC_TX_UDP_CSUM
;
162 nn_dp_warn(dp
, "partial checksum but l4 proto=%x!\n", l4_hdr
);
166 u64_stats_update_begin(&r_vec
->tx_sync
);
167 if (skb
->encapsulation
)
168 r_vec
->hw_csum_tx_inner
+= txbuf
->pkt_cnt
;
170 r_vec
->hw_csum_tx
+= txbuf
->pkt_cnt
;
171 u64_stats_update_end(&r_vec
->tx_sync
);
174 static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp
*dp
, struct sk_buff
*skb
,
175 u64 tls_handle
, bool *ipsec
)
177 struct metadata_dst
*md_dst
= skb_metadata_dst(skb
);
178 struct nfp_ipsec_offload offload_info
;
184 #ifdef CONFIG_NFP_NET_IPSEC
185 if (xfrm_offload(skb
))
186 *ipsec
= nfp_net_ipsec_tx_prep(dp
, skb
, &offload_info
);
189 if (unlikely(md_dst
&& md_dst
->type
!= METADATA_HW_PORT_MUX
))
192 vlan_insert
= skb_vlan_tag_present(skb
) && (dp
->ctrl
& NFP_NET_CFG_CTRL_TXVLAN_V2
);
194 if (!(md_dst
|| tls_handle
|| vlan_insert
|| *ipsec
))
197 md_bytes
= sizeof(meta_id
) +
198 (!!md_dst
? NFP_NET_META_PORTID_SIZE
: 0) +
199 (!!tls_handle
? NFP_NET_META_CONN_HANDLE_SIZE
: 0) +
200 (vlan_insert
? NFP_NET_META_VLAN_SIZE
: 0) +
201 (*ipsec
? NFP_NET_META_IPSEC_FIELD_SIZE
: 0);
203 if (unlikely(skb_cow_head(skb
, md_bytes
)))
206 data
= skb_push(skb
, md_bytes
) + md_bytes
;
208 data
-= NFP_NET_META_PORTID_SIZE
;
209 put_unaligned_be32(md_dst
->u
.port_info
.port_id
, data
);
210 meta_id
= NFP_NET_META_PORTID
;
213 /* conn handle is opaque, we just use u64 to be able to quickly
216 data
-= NFP_NET_META_CONN_HANDLE_SIZE
;
217 memcpy(data
, &tls_handle
, sizeof(tls_handle
));
218 meta_id
<<= NFP_NET_META_FIELD_SIZE
;
219 meta_id
|= NFP_NET_META_CONN_HANDLE
;
222 data
-= NFP_NET_META_VLAN_SIZE
;
223 /* data type of skb->vlan_proto is __be16
224 * so it fills metadata without calling put_unaligned_be16
226 memcpy(data
, &skb
->vlan_proto
, sizeof(skb
->vlan_proto
));
227 put_unaligned_be16(skb_vlan_tag_get(skb
), data
+ sizeof(skb
->vlan_proto
));
228 meta_id
<<= NFP_NET_META_FIELD_SIZE
;
229 meta_id
|= NFP_NET_META_VLAN
;
232 data
-= NFP_NET_META_IPSEC_SIZE
;
233 put_unaligned_be32(offload_info
.seq_hi
, data
);
234 data
-= NFP_NET_META_IPSEC_SIZE
;
235 put_unaligned_be32(offload_info
.seq_low
, data
);
236 data
-= NFP_NET_META_IPSEC_SIZE
;
237 put_unaligned_be32(offload_info
.handle
- 1, data
);
238 meta_id
<<= NFP_NET_META_IPSEC_FIELD_SIZE
;
239 meta_id
|= NFP_NET_META_IPSEC
<< 8 | NFP_NET_META_IPSEC
<< 4 | NFP_NET_META_IPSEC
;
242 data
-= sizeof(meta_id
);
243 put_unaligned_be32(meta_id
, data
);
249 * nfp_nfd3_tx() - Main transmit entry point
250 * @skb: SKB to transmit
251 * @netdev: netdev structure
253 * Return: NETDEV_TX_OK on success.
255 netdev_tx_t
nfp_nfd3_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
257 struct nfp_net
*nn
= netdev_priv(netdev
);
258 int f
, nr_frags
, wr_idx
, md_bytes
;
259 struct nfp_net_tx_ring
*tx_ring
;
260 struct nfp_net_r_vector
*r_vec
;
261 struct nfp_nfd3_tx_buf
*txbuf
;
262 struct nfp_nfd3_tx_desc
*txd
;
263 struct netdev_queue
*nd_q
;
264 const skb_frag_t
*frag
;
265 struct nfp_net_dp
*dp
;
273 qidx
= skb_get_queue_mapping(skb
);
274 tx_ring
= &dp
->tx_rings
[qidx
];
275 r_vec
= tx_ring
->r_vec
;
277 nr_frags
= skb_shinfo(skb
)->nr_frags
;
279 if (unlikely(nfp_net_tx_full(tx_ring
, nr_frags
+ 1))) {
280 nn_dp_warn(dp
, "TX ring %d busy. wrp=%u rdp=%u\n",
281 qidx
, tx_ring
->wr_p
, tx_ring
->rd_p
);
282 nd_q
= netdev_get_tx_queue(dp
->netdev
, qidx
);
283 netif_tx_stop_queue(nd_q
);
284 nfp_net_tx_xmit_more_flush(tx_ring
);
285 u64_stats_update_begin(&r_vec
->tx_sync
);
287 u64_stats_update_end(&r_vec
->tx_sync
);
288 return NETDEV_TX_BUSY
;
291 skb
= nfp_net_tls_tx(dp
, r_vec
, skb
, &tls_handle
, &nr_frags
);
292 if (unlikely(!skb
)) {
293 nfp_net_tx_xmit_more_flush(tx_ring
);
297 md_bytes
= nfp_nfd3_prep_tx_meta(dp
, skb
, tls_handle
, &ipsec
);
298 if (unlikely(md_bytes
< 0))
301 /* Start with the head skbuf */
302 dma_addr
= dma_map_single(dp
->dev
, skb
->data
, skb_headlen(skb
),
304 if (dma_mapping_error(dp
->dev
, dma_addr
))
307 wr_idx
= D_IDX(tx_ring
, tx_ring
->wr_p
);
309 /* Stash the soft descriptor of the head then initialize it */
310 txbuf
= &tx_ring
->txbufs
[wr_idx
];
312 txbuf
->dma_addr
= dma_addr
;
315 txbuf
->real_len
= skb
->len
;
317 /* Build TX descriptor */
318 txd
= &tx_ring
->txds
[wr_idx
];
319 txd
->offset_eop
= (nr_frags
? 0 : NFD3_DESC_TX_EOP
) | md_bytes
;
320 txd
->dma_len
= cpu_to_le16(skb_headlen(skb
));
321 nfp_desc_set_dma_addr_40b(txd
, dma_addr
);
322 txd
->data_len
= cpu_to_le16(skb
->len
);
328 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
329 nfp_nfd3_tx_tso(r_vec
, txbuf
, txd
, skb
, md_bytes
);
331 nfp_nfd3_ipsec_tx(txd
, skb
);
333 nfp_nfd3_tx_csum(dp
, r_vec
, txbuf
, txd
, skb
);
334 if (skb_vlan_tag_present(skb
) && dp
->ctrl
& NFP_NET_CFG_CTRL_TXVLAN
) {
335 txd
->flags
|= NFD3_DESC_TX_VLAN
;
336 txd
->vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
343 /* all descs must match except for in addr, length and eop */
344 second_half
= txd
->vals8
[1];
346 for (f
= 0; f
< nr_frags
; f
++) {
347 frag
= &skb_shinfo(skb
)->frags
[f
];
348 fsize
= skb_frag_size(frag
);
350 dma_addr
= skb_frag_dma_map(dp
->dev
, frag
, 0,
351 fsize
, DMA_TO_DEVICE
);
352 if (dma_mapping_error(dp
->dev
, dma_addr
))
355 wr_idx
= D_IDX(tx_ring
, wr_idx
+ 1);
356 tx_ring
->txbufs
[wr_idx
].skb
= skb
;
357 tx_ring
->txbufs
[wr_idx
].dma_addr
= dma_addr
;
358 tx_ring
->txbufs
[wr_idx
].fidx
= f
;
360 txd
= &tx_ring
->txds
[wr_idx
];
361 txd
->dma_len
= cpu_to_le16(fsize
);
362 nfp_desc_set_dma_addr_40b(txd
, dma_addr
);
363 txd
->offset_eop
= md_bytes
|
364 ((f
== nr_frags
- 1) ? NFD3_DESC_TX_EOP
: 0);
365 txd
->vals8
[1] = second_half
;
368 u64_stats_update_begin(&r_vec
->tx_sync
);
370 u64_stats_update_end(&r_vec
->tx_sync
);
373 skb_tx_timestamp(skb
);
375 nd_q
= netdev_get_tx_queue(dp
->netdev
, tx_ring
->idx
);
377 tx_ring
->wr_p
+= nr_frags
+ 1;
378 if (nfp_nfd3_tx_ring_should_stop(tx_ring
))
379 nfp_nfd3_tx_ring_stop(nd_q
, tx_ring
);
381 tx_ring
->wr_ptr_add
+= nr_frags
+ 1;
382 if (__netdev_tx_sent_queue(nd_q
, txbuf
->real_len
, netdev_xmit_more()))
383 nfp_net_tx_xmit_more_flush(tx_ring
);
389 frag
= &skb_shinfo(skb
)->frags
[f
];
390 dma_unmap_page(dp
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
391 skb_frag_size(frag
), DMA_TO_DEVICE
);
392 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
393 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
394 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
397 wr_idx
+= tx_ring
->cnt
;
399 dma_unmap_single(dp
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
400 skb_headlen(skb
), DMA_TO_DEVICE
);
401 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
402 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
403 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
405 nn_dp_warn(dp
, "Failed to map DMA TX buffer\n");
407 nfp_net_tx_xmit_more_flush(tx_ring
);
408 u64_stats_update_begin(&r_vec
->tx_sync
);
410 u64_stats_update_end(&r_vec
->tx_sync
);
411 nfp_net_tls_tx_undo(skb
, tls_handle
);
412 dev_kfree_skb_any(skb
);
417 * nfp_nfd3_tx_complete() - Handled completed TX packets
418 * @tx_ring: TX ring structure
419 * @budget: NAPI budget (only used as bool to determine if in NAPI context)
421 void nfp_nfd3_tx_complete(struct nfp_net_tx_ring
*tx_ring
, int budget
)
423 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
424 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
425 u32 done_pkts
= 0, done_bytes
= 0;
426 struct netdev_queue
*nd_q
;
430 if (tx_ring
->wr_p
== tx_ring
->rd_p
)
433 /* Work out how many descriptors have been transmitted */
434 qcp_rd_p
= nfp_net_read_tx_cmpl(tx_ring
, dp
);
436 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
439 todo
= D_IDX(tx_ring
, qcp_rd_p
- tx_ring
->qcp_rd_p
);
442 const skb_frag_t
*frag
;
443 struct nfp_nfd3_tx_buf
*tx_buf
;
448 idx
= D_IDX(tx_ring
, tx_ring
->rd_p
++);
449 tx_buf
= &tx_ring
->txbufs
[idx
];
455 nr_frags
= skb_shinfo(skb
)->nr_frags
;
460 dma_unmap_single(dp
->dev
, tx_buf
->dma_addr
,
461 skb_headlen(skb
), DMA_TO_DEVICE
);
463 done_pkts
+= tx_buf
->pkt_cnt
;
464 done_bytes
+= tx_buf
->real_len
;
467 frag
= &skb_shinfo(skb
)->frags
[fidx
];
468 dma_unmap_page(dp
->dev
, tx_buf
->dma_addr
,
469 skb_frag_size(frag
), DMA_TO_DEVICE
);
472 /* check for last gather fragment */
473 if (fidx
== nr_frags
- 1)
474 napi_consume_skb(skb
, budget
);
476 tx_buf
->dma_addr
= 0;
481 tx_ring
->qcp_rd_p
= qcp_rd_p
;
483 u64_stats_update_begin(&r_vec
->tx_sync
);
484 r_vec
->tx_bytes
+= done_bytes
;
485 r_vec
->tx_pkts
+= done_pkts
;
486 u64_stats_update_end(&r_vec
->tx_sync
);
491 nd_q
= netdev_get_tx_queue(dp
->netdev
, tx_ring
->idx
);
492 netdev_tx_completed_queue(nd_q
, done_pkts
, done_bytes
);
493 if (nfp_nfd3_tx_ring_should_wake(tx_ring
)) {
494 /* Make sure TX thread will see updated tx_ring->rd_p */
497 if (unlikely(netif_tx_queue_stopped(nd_q
)))
498 netif_tx_wake_queue(nd_q
);
501 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
502 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
503 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
506 static bool nfp_nfd3_xdp_complete(struct nfp_net_tx_ring
*tx_ring
)
508 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
509 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
510 u32 done_pkts
= 0, done_bytes
= 0;
515 /* Work out how many descriptors have been transmitted */
516 qcp_rd_p
= nfp_net_read_tx_cmpl(tx_ring
, dp
);
518 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
521 todo
= D_IDX(tx_ring
, qcp_rd_p
- tx_ring
->qcp_rd_p
);
523 done_all
= todo
<= NFP_NET_XDP_MAX_COMPLETE
;
524 todo
= min(todo
, NFP_NET_XDP_MAX_COMPLETE
);
526 tx_ring
->qcp_rd_p
= D_IDX(tx_ring
, tx_ring
->qcp_rd_p
+ todo
);
530 idx
= D_IDX(tx_ring
, tx_ring
->rd_p
);
533 done_bytes
+= tx_ring
->txbufs
[idx
].real_len
;
536 u64_stats_update_begin(&r_vec
->tx_sync
);
537 r_vec
->tx_bytes
+= done_bytes
;
538 r_vec
->tx_pkts
+= done_pkts
;
539 u64_stats_update_end(&r_vec
->tx_sync
);
541 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
542 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
543 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
548 /* Receive processing
552 nfp_nfd3_napi_alloc_one(struct nfp_net_dp
*dp
, dma_addr_t
*dma_addr
)
557 frag
= napi_alloc_frag(dp
->fl_bufsz
);
563 page
= dev_alloc_page();
566 frag
= page_address(page
);
569 *dma_addr
= nfp_net_dma_map_rx(dp
, frag
);
570 if (dma_mapping_error(dp
->dev
, *dma_addr
)) {
571 nfp_net_free_frag(frag
, dp
->xdp_prog
);
572 nn_dp_warn(dp
, "Failed to map DMA RX buffer\n");
580 * nfp_nfd3_rx_give_one() - Put mapped skb on the software and hardware rings
581 * @dp: NFP Net data path struct
582 * @rx_ring: RX ring structure
583 * @frag: page fragment buffer
584 * @dma_addr: DMA address of skb mapping
587 nfp_nfd3_rx_give_one(const struct nfp_net_dp
*dp
,
588 struct nfp_net_rx_ring
*rx_ring
,
589 void *frag
, dma_addr_t dma_addr
)
593 wr_idx
= D_IDX(rx_ring
, rx_ring
->wr_p
);
595 nfp_net_dma_sync_dev_rx(dp
, dma_addr
);
597 /* Stash SKB and DMA address away */
598 rx_ring
->rxbufs
[wr_idx
].frag
= frag
;
599 rx_ring
->rxbufs
[wr_idx
].dma_addr
= dma_addr
;
601 /* Fill freelist descriptor */
602 rx_ring
->rxds
[wr_idx
].fld
.reserved
= 0;
603 rx_ring
->rxds
[wr_idx
].fld
.meta_len_dd
= 0;
604 /* DMA address is expanded to 48-bit width in freelist for NFP3800,
605 * so the *_48b macro is used accordingly, it's also OK to fill
606 * a 40-bit address since the top 8 bits are get set to 0.
608 nfp_desc_set_dma_addr_48b(&rx_ring
->rxds
[wr_idx
].fld
,
609 dma_addr
+ dp
->rx_dma_off
);
612 if (!(rx_ring
->wr_p
% NFP_NET_FL_BATCH
)) {
613 /* Update write pointer of the freelist queue. Make
614 * sure all writes are flushed before telling the hardware.
617 nfp_qcp_wr_ptr_add(rx_ring
->qcp_fl
, NFP_NET_FL_BATCH
);
622 * nfp_nfd3_rx_ring_fill_freelist() - Give buffers from the ring to FW
623 * @dp: NFP Net data path struct
624 * @rx_ring: RX ring to fill
626 void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp
*dp
,
627 struct nfp_net_rx_ring
*rx_ring
)
631 if (nfp_net_has_xsk_pool_slow(dp
, rx_ring
->idx
))
632 return nfp_net_xsk_rx_ring_fill_freelist(rx_ring
);
634 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++)
635 nfp_nfd3_rx_give_one(dp
, rx_ring
, rx_ring
->rxbufs
[i
].frag
,
636 rx_ring
->rxbufs
[i
].dma_addr
);
640 * nfp_nfd3_rx_csum_has_errors() - group check if rxd has any csum errors
641 * @flags: RX descriptor flags field in CPU byte order
643 static int nfp_nfd3_rx_csum_has_errors(u16 flags
)
645 u16 csum_all_checked
, csum_all_ok
;
647 csum_all_checked
= flags
& __PCIE_DESC_RX_CSUM_ALL
;
648 csum_all_ok
= flags
& __PCIE_DESC_RX_CSUM_ALL_OK
;
650 return csum_all_checked
!= (csum_all_ok
<< PCIE_DESC_RX_CSUM_OK_SHIFT
);
654 * nfp_nfd3_rx_csum() - set SKB checksum field based on RX descriptor flags
655 * @dp: NFP Net data path struct
656 * @r_vec: per-ring structure
657 * @rxd: Pointer to RX descriptor
658 * @meta: Parsed metadata prepend
659 * @skb: Pointer to SKB
662 nfp_nfd3_rx_csum(const struct nfp_net_dp
*dp
, struct nfp_net_r_vector
*r_vec
,
663 const struct nfp_net_rx_desc
*rxd
,
664 const struct nfp_meta_parsed
*meta
, struct sk_buff
*skb
)
666 skb_checksum_none_assert(skb
);
668 if (!(dp
->netdev
->features
& NETIF_F_RXCSUM
))
671 if (meta
->csum_type
) {
672 skb
->ip_summed
= meta
->csum_type
;
673 skb
->csum
= meta
->csum
;
674 u64_stats_update_begin(&r_vec
->rx_sync
);
675 r_vec
->hw_csum_rx_complete
++;
676 u64_stats_update_end(&r_vec
->rx_sync
);
680 if (nfp_nfd3_rx_csum_has_errors(le16_to_cpu(rxd
->rxd
.flags
))) {
681 u64_stats_update_begin(&r_vec
->rx_sync
);
682 r_vec
->hw_csum_rx_error
++;
683 u64_stats_update_end(&r_vec
->rx_sync
);
687 /* Assume that the firmware will never report inner CSUM_OK unless outer
688 * L4 headers were successfully parsed. FW will always report zero UDP
689 * checksum as CSUM_OK.
691 if (rxd
->rxd
.flags
& PCIE_DESC_RX_TCP_CSUM_OK
||
692 rxd
->rxd
.flags
& PCIE_DESC_RX_UDP_CSUM_OK
) {
693 __skb_incr_checksum_unnecessary(skb
);
694 u64_stats_update_begin(&r_vec
->rx_sync
);
695 r_vec
->hw_csum_rx_ok
++;
696 u64_stats_update_end(&r_vec
->rx_sync
);
699 if (rxd
->rxd
.flags
& PCIE_DESC_RX_I_TCP_CSUM_OK
||
700 rxd
->rxd
.flags
& PCIE_DESC_RX_I_UDP_CSUM_OK
) {
701 __skb_incr_checksum_unnecessary(skb
);
702 u64_stats_update_begin(&r_vec
->rx_sync
);
703 r_vec
->hw_csum_rx_inner_ok
++;
704 u64_stats_update_end(&r_vec
->rx_sync
);
709 nfp_nfd3_set_hash(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
710 unsigned int type
, __be32
*hash
)
712 if (!(netdev
->features
& NETIF_F_RXHASH
))
716 case NFP_NET_RSS_IPV4
:
717 case NFP_NET_RSS_IPV6
:
718 case NFP_NET_RSS_IPV6_EX
:
719 meta
->hash_type
= PKT_HASH_TYPE_L3
;
722 meta
->hash_type
= PKT_HASH_TYPE_L4
;
726 meta
->hash
= get_unaligned_be32(hash
);
730 nfp_nfd3_set_hash_desc(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
731 void *data
, struct nfp_net_rx_desc
*rxd
)
733 struct nfp_net_rx_hash
*rx_hash
= data
;
735 if (!(rxd
->rxd
.flags
& PCIE_DESC_RX_RSS
))
738 nfp_nfd3_set_hash(netdev
, meta
, get_unaligned_be32(&rx_hash
->hash_type
),
743 nfp_nfd3_parse_meta(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
744 void *data
, void *pkt
, unsigned int pkt_len
, int meta_len
)
746 u32 meta_info
, vlan_info
;
748 meta_info
= get_unaligned_be32(data
);
752 switch (meta_info
& NFP_NET_META_FIELD_MASK
) {
753 case NFP_NET_META_HASH
:
754 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
755 nfp_nfd3_set_hash(netdev
, meta
,
756 meta_info
& NFP_NET_META_FIELD_MASK
,
760 case NFP_NET_META_MARK
:
761 meta
->mark
= get_unaligned_be32(data
);
764 case NFP_NET_META_VLAN
:
765 vlan_info
= get_unaligned_be32(data
);
766 if (FIELD_GET(NFP_NET_META_VLAN_STRIP
, vlan_info
)) {
767 meta
->vlan
.stripped
= true;
768 meta
->vlan
.tpid
= FIELD_GET(NFP_NET_META_VLAN_TPID_MASK
,
770 meta
->vlan
.tci
= FIELD_GET(NFP_NET_META_VLAN_TCI_MASK
,
775 case NFP_NET_META_PORTID
:
776 meta
->portid
= get_unaligned_be32(data
);
779 case NFP_NET_META_CSUM
:
780 meta
->csum_type
= CHECKSUM_COMPLETE
;
782 (__force __wsum
)__get_unaligned_cpu32(data
);
785 case NFP_NET_META_RESYNC_INFO
:
786 if (nfp_net_tls_rx_resync_req(netdev
, data
, pkt
,
789 data
+= sizeof(struct nfp_net_tls_resync_req
);
791 #ifdef CONFIG_NFP_NET_IPSEC
792 case NFP_NET_META_IPSEC
:
793 /* Note: IPsec packet will have zero saidx, so need add 1
794 * to indicate packet is IPsec packet within driver.
796 meta
->ipsec_saidx
= get_unaligned_be32(data
) + 1;
804 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
811 nfp_nfd3_rx_drop(const struct nfp_net_dp
*dp
, struct nfp_net_r_vector
*r_vec
,
812 struct nfp_net_rx_ring
*rx_ring
, struct nfp_net_rx_buf
*rxbuf
,
815 u64_stats_update_begin(&r_vec
->rx_sync
);
817 /* If we have both skb and rxbuf the replacement buffer allocation
818 * must have failed, count this as an alloc failure.
821 r_vec
->rx_replace_buf_alloc_fail
++;
822 u64_stats_update_end(&r_vec
->rx_sync
);
824 /* skb is build based on the frag, free_skb() would free the frag
825 * so to be able to reuse it we need an extra ref.
827 if (skb
&& rxbuf
&& skb
->head
== rxbuf
->frag
)
828 page_ref_inc(virt_to_head_page(rxbuf
->frag
));
830 nfp_nfd3_rx_give_one(dp
, rx_ring
, rxbuf
->frag
, rxbuf
->dma_addr
);
832 dev_kfree_skb_any(skb
);
836 nfp_nfd3_tx_xdp_buf(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
,
837 struct nfp_net_tx_ring
*tx_ring
,
838 struct nfp_net_rx_buf
*rxbuf
, unsigned int dma_off
,
839 unsigned int pkt_len
, bool *completed
)
841 unsigned int dma_map_sz
= dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
;
842 struct nfp_nfd3_tx_buf
*txbuf
;
843 struct nfp_nfd3_tx_desc
*txd
;
846 /* Reject if xdp_adjust_tail grow packet beyond DMA area */
847 if (pkt_len
+ dma_off
> dma_map_sz
)
850 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
852 nfp_nfd3_xdp_complete(tx_ring
);
856 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
857 nfp_nfd3_rx_drop(dp
, rx_ring
->r_vec
, rx_ring
, rxbuf
,
863 wr_idx
= D_IDX(tx_ring
, tx_ring
->wr_p
);
865 /* Stash the soft descriptor of the head then initialize it */
866 txbuf
= &tx_ring
->txbufs
[wr_idx
];
868 nfp_nfd3_rx_give_one(dp
, rx_ring
, txbuf
->frag
, txbuf
->dma_addr
);
870 txbuf
->frag
= rxbuf
->frag
;
871 txbuf
->dma_addr
= rxbuf
->dma_addr
;
874 txbuf
->real_len
= pkt_len
;
876 dma_sync_single_for_device(dp
->dev
, rxbuf
->dma_addr
+ dma_off
,
877 pkt_len
, DMA_BIDIRECTIONAL
);
879 /* Build TX descriptor */
880 txd
= &tx_ring
->txds
[wr_idx
];
881 txd
->offset_eop
= NFD3_DESC_TX_EOP
;
882 txd
->dma_len
= cpu_to_le16(pkt_len
);
883 nfp_desc_set_dma_addr_40b(txd
, rxbuf
->dma_addr
+ dma_off
);
884 txd
->data_len
= cpu_to_le16(pkt_len
);
891 tx_ring
->wr_ptr_add
++;
896 * nfp_nfd3_rx() - receive up to @budget packets on @rx_ring
897 * @rx_ring: RX ring to receive from
898 * @budget: NAPI budget
900 * Note, this function is separated out from the napi poll function to
901 * more cleanly separate packet receive code from other bookkeeping
902 * functions performed in the napi poll function.
904 * Return: Number of packets received.
906 static int nfp_nfd3_rx(struct nfp_net_rx_ring
*rx_ring
, int budget
)
908 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
909 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
910 struct nfp_net_tx_ring
*tx_ring
;
911 struct bpf_prog
*xdp_prog
;
912 int idx
, pkts_polled
= 0;
913 bool xdp_tx_cmpl
= false;
914 unsigned int true_bufsz
;
918 xdp_prog
= READ_ONCE(dp
->xdp_prog
);
919 true_bufsz
= xdp_prog
? PAGE_SIZE
: dp
->fl_bufsz
;
920 xdp_init_buff(&xdp
, PAGE_SIZE
- NFP_NET_RX_BUF_HEADROOM
,
922 tx_ring
= r_vec
->xdp_ring
;
924 while (pkts_polled
< budget
) {
925 unsigned int meta_len
, data_len
, meta_off
, pkt_len
, pkt_off
;
926 struct nfp_net_rx_buf
*rxbuf
;
927 struct nfp_net_rx_desc
*rxd
;
928 struct nfp_meta_parsed meta
;
929 bool redir_egress
= false;
930 struct net_device
*netdev
;
931 dma_addr_t new_dma_addr
;
932 u32 meta_len_xdp
= 0;
935 idx
= D_IDX(rx_ring
, rx_ring
->rd_p
);
937 rxd
= &rx_ring
->rxds
[idx
];
938 if (!(rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
))
941 /* Memory barrier to ensure that we won't do other reads
946 memset(&meta
, 0, sizeof(meta
));
951 rxbuf
= &rx_ring
->rxbufs
[idx
];
953 * <-- [rx_offset] -->
954 * ---------------------------------------------------------
955 * | [XX] | metadata | packet | XXXX |
956 * ---------------------------------------------------------
957 * <---------------- data_len --------------->
959 * The rx_offset is fixed for all packets, the meta_len can vary
960 * on a packet by packet basis. If rx_offset is set to zero
961 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
962 * buffer and is immediately followed by the packet (no [XX]).
964 meta_len
= rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_META_LEN_MASK
;
965 data_len
= le16_to_cpu(rxd
->rxd
.data_len
);
966 pkt_len
= data_len
- meta_len
;
968 pkt_off
= NFP_NET_RX_BUF_HEADROOM
+ dp
->rx_dma_off
;
969 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
972 pkt_off
+= dp
->rx_offset
;
973 meta_off
= pkt_off
- meta_len
;
976 u64_stats_update_begin(&r_vec
->rx_sync
);
978 r_vec
->rx_bytes
+= pkt_len
;
979 u64_stats_update_end(&r_vec
->rx_sync
);
981 if (unlikely(meta_len
> NFP_NET_MAX_PREPEND
||
982 (dp
->rx_offset
&& meta_len
> dp
->rx_offset
))) {
983 nn_dp_warn(dp
, "oversized RX packet metadata %u\n",
985 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
989 nfp_net_dma_sync_cpu_rx(dp
, rxbuf
->dma_addr
+ meta_off
,
992 if (!dp
->chained_metadata_format
) {
993 nfp_nfd3_set_hash_desc(dp
->netdev
, &meta
,
994 rxbuf
->frag
+ meta_off
, rxd
);
995 } else if (meta_len
) {
996 if (unlikely(nfp_nfd3_parse_meta(dp
->netdev
, &meta
,
997 rxbuf
->frag
+ meta_off
,
998 rxbuf
->frag
+ pkt_off
,
999 pkt_len
, meta_len
))) {
1000 nn_dp_warn(dp
, "invalid RX packet metadata\n");
1001 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
,
1007 if (xdp_prog
&& !meta
.portid
) {
1008 void *orig_data
= rxbuf
->frag
+ pkt_off
;
1009 unsigned int dma_off
;
1012 xdp_prepare_buff(&xdp
,
1013 rxbuf
->frag
+ NFP_NET_RX_BUF_HEADROOM
,
1014 pkt_off
- NFP_NET_RX_BUF_HEADROOM
,
1017 act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
1019 pkt_len
= xdp
.data_end
- xdp
.data
;
1020 pkt_off
+= xdp
.data
- orig_data
;
1024 meta_len_xdp
= xdp
.data
- xdp
.data_meta
;
1027 dma_off
= pkt_off
- NFP_NET_RX_BUF_HEADROOM
;
1028 if (unlikely(!nfp_nfd3_tx_xdp_buf(dp
, rx_ring
,
1034 trace_xdp_exception(dp
->netdev
,
1038 bpf_warn_invalid_xdp_action(dp
->netdev
, xdp_prog
, act
);
1041 trace_xdp_exception(dp
->netdev
, xdp_prog
, act
);
1044 nfp_nfd3_rx_give_one(dp
, rx_ring
, rxbuf
->frag
,
1050 if (likely(!meta
.portid
)) {
1051 netdev
= dp
->netdev
;
1052 } else if (meta
.portid
== NFP_META_PORT_ID_CTRL
) {
1053 struct nfp_net
*nn
= netdev_priv(dp
->netdev
);
1055 nfp_app_ctrl_rx_raw(nn
->app
, rxbuf
->frag
+ pkt_off
,
1057 nfp_nfd3_rx_give_one(dp
, rx_ring
, rxbuf
->frag
,
1063 nn
= netdev_priv(dp
->netdev
);
1064 netdev
= nfp_app_dev_get(nn
->app
, meta
.portid
,
1066 if (unlikely(!netdev
)) {
1067 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
,
1072 if (nfp_netdev_is_nfp_repr(netdev
))
1073 nfp_repr_inc_rx_stats(netdev
, pkt_len
);
1076 skb
= napi_build_skb(rxbuf
->frag
, true_bufsz
);
1077 if (unlikely(!skb
)) {
1078 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1081 new_frag
= nfp_nfd3_napi_alloc_one(dp
, &new_dma_addr
);
1082 if (unlikely(!new_frag
)) {
1083 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, skb
);
1087 nfp_net_dma_unmap_rx(dp
, rxbuf
->dma_addr
);
1089 nfp_nfd3_rx_give_one(dp
, rx_ring
, new_frag
, new_dma_addr
);
1091 skb_reserve(skb
, pkt_off
);
1092 skb_put(skb
, pkt_len
);
1094 skb
->mark
= meta
.mark
;
1095 skb_set_hash(skb
, meta
.hash
, meta
.hash_type
);
1097 skb_record_rx_queue(skb
, rx_ring
->idx
);
1098 skb
->protocol
= eth_type_trans(skb
, netdev
);
1100 nfp_nfd3_rx_csum(dp
, r_vec
, rxd
, &meta
, skb
);
1102 #ifdef CONFIG_TLS_DEVICE
1103 if (rxd
->rxd
.flags
& PCIE_DESC_RX_DECRYPTED
) {
1104 skb
->decrypted
= true;
1105 u64_stats_update_begin(&r_vec
->rx_sync
);
1107 u64_stats_update_end(&r_vec
->rx_sync
);
1111 if (unlikely(!nfp_net_vlan_strip(skb
, rxd
, &meta
))) {
1112 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, NULL
, skb
);
1116 #ifdef CONFIG_NFP_NET_IPSEC
1117 if (meta
.ipsec_saidx
!= 0 && unlikely(nfp_net_ipsec_rx(&meta
, skb
))) {
1118 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, NULL
, skb
);
1124 skb_metadata_set(skb
, meta_len_xdp
);
1126 if (likely(!redir_egress
)) {
1127 napi_gro_receive(&rx_ring
->r_vec
->napi
, skb
);
1130 skb_reset_network_header(skb
);
1131 __skb_push(skb
, ETH_HLEN
);
1132 dev_queue_xmit(skb
);
1137 if (tx_ring
->wr_ptr_add
)
1138 nfp_net_tx_xmit_more_flush(tx_ring
);
1139 else if (unlikely(tx_ring
->wr_p
!= tx_ring
->rd_p
) &&
1141 if (!nfp_nfd3_xdp_complete(tx_ring
))
1142 pkts_polled
= budget
;
1149 * nfp_nfd3_poll() - napi poll function
1150 * @napi: NAPI structure
1151 * @budget: NAPI budget
1153 * Return: number of packets polled.
1155 int nfp_nfd3_poll(struct napi_struct
*napi
, int budget
)
1157 struct nfp_net_r_vector
*r_vec
=
1158 container_of(napi
, struct nfp_net_r_vector
, napi
);
1159 unsigned int pkts_polled
= 0;
1162 nfp_nfd3_tx_complete(r_vec
->tx_ring
, budget
);
1164 pkts_polled
= nfp_nfd3_rx(r_vec
->rx_ring
, budget
);
1166 if (pkts_polled
< budget
)
1167 if (napi_complete_done(napi
, pkts_polled
))
1168 nfp_net_irq_unmask(r_vec
->nfp_net
, r_vec
->irq_entry
);
1170 if (r_vec
->nfp_net
->rx_coalesce_adapt_on
&& r_vec
->rx_ring
) {
1171 struct dim_sample dim_sample
= {};
1176 start
= u64_stats_fetch_begin(&r_vec
->rx_sync
);
1177 pkts
= r_vec
->rx_pkts
;
1178 bytes
= r_vec
->rx_bytes
;
1179 } while (u64_stats_fetch_retry(&r_vec
->rx_sync
, start
));
1181 dim_update_sample(r_vec
->event_ctr
, pkts
, bytes
, &dim_sample
);
1182 net_dim(&r_vec
->rx_dim
, &dim_sample
);
1185 if (r_vec
->nfp_net
->tx_coalesce_adapt_on
&& r_vec
->tx_ring
) {
1186 struct dim_sample dim_sample
= {};
1191 start
= u64_stats_fetch_begin(&r_vec
->tx_sync
);
1192 pkts
= r_vec
->tx_pkts
;
1193 bytes
= r_vec
->tx_bytes
;
1194 } while (u64_stats_fetch_retry(&r_vec
->tx_sync
, start
));
1196 dim_update_sample(r_vec
->event_ctr
, pkts
, bytes
, &dim_sample
);
1197 net_dim(&r_vec
->tx_dim
, &dim_sample
);
1203 /* Control device data path
1207 nfp_nfd3_ctrl_tx_one(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
1208 struct sk_buff
*skb
, bool old
)
1210 unsigned int real_len
= skb
->len
, meta_len
= 0;
1211 struct nfp_net_tx_ring
*tx_ring
;
1212 struct nfp_nfd3_tx_buf
*txbuf
;
1213 struct nfp_nfd3_tx_desc
*txd
;
1214 struct nfp_net_dp
*dp
;
1215 dma_addr_t dma_addr
;
1218 dp
= &r_vec
->nfp_net
->dp
;
1219 tx_ring
= r_vec
->tx_ring
;
1221 if (WARN_ON_ONCE(skb_shinfo(skb
)->nr_frags
)) {
1222 nn_dp_warn(dp
, "Driver's CTRL TX does not implement gather\n");
1226 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
1227 u64_stats_update_begin(&r_vec
->tx_sync
);
1229 u64_stats_update_end(&r_vec
->tx_sync
);
1231 __skb_queue_tail(&r_vec
->queue
, skb
);
1233 __skb_queue_head(&r_vec
->queue
, skb
);
1237 if (nfp_app_ctrl_has_meta(nn
->app
)) {
1238 if (unlikely(skb_headroom(skb
) < 8)) {
1239 nn_dp_warn(dp
, "CTRL TX on skb without headroom\n");
1243 put_unaligned_be32(NFP_META_PORT_ID_CTRL
, skb_push(skb
, 4));
1244 put_unaligned_be32(NFP_NET_META_PORTID
, skb_push(skb
, 4));
1247 /* Start with the head skbuf */
1248 dma_addr
= dma_map_single(dp
->dev
, skb
->data
, skb_headlen(skb
),
1250 if (dma_mapping_error(dp
->dev
, dma_addr
))
1253 wr_idx
= D_IDX(tx_ring
, tx_ring
->wr_p
);
1255 /* Stash the soft descriptor of the head then initialize it */
1256 txbuf
= &tx_ring
->txbufs
[wr_idx
];
1258 txbuf
->dma_addr
= dma_addr
;
1261 txbuf
->real_len
= real_len
;
1263 /* Build TX descriptor */
1264 txd
= &tx_ring
->txds
[wr_idx
];
1265 txd
->offset_eop
= meta_len
| NFD3_DESC_TX_EOP
;
1266 txd
->dma_len
= cpu_to_le16(skb_headlen(skb
));
1267 nfp_desc_set_dma_addr_40b(txd
, dma_addr
);
1268 txd
->data_len
= cpu_to_le16(skb
->len
);
1272 txd
->lso_hdrlen
= 0;
1275 tx_ring
->wr_ptr_add
++;
1276 nfp_net_tx_xmit_more_flush(tx_ring
);
1281 nn_dp_warn(dp
, "Failed to DMA map TX CTRL buffer\n");
1283 u64_stats_update_begin(&r_vec
->tx_sync
);
1285 u64_stats_update_end(&r_vec
->tx_sync
);
1286 dev_kfree_skb_any(skb
);
1290 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector
*r_vec
)
1292 struct sk_buff
*skb
;
1294 while ((skb
= __skb_dequeue(&r_vec
->queue
)))
1295 if (nfp_nfd3_ctrl_tx_one(r_vec
->nfp_net
, r_vec
, skb
, true))
1300 nfp_ctrl_meta_ok(struct nfp_net
*nn
, void *data
, unsigned int meta_len
)
1302 u32 meta_type
, meta_tag
;
1304 if (!nfp_app_ctrl_has_meta(nn
->app
))
1310 meta_type
= get_unaligned_be32(data
);
1311 meta_tag
= get_unaligned_be32(data
+ 4);
1313 return (meta_type
== NFP_NET_META_PORTID
&&
1314 meta_tag
== NFP_META_PORT_ID_CTRL
);
1318 nfp_ctrl_rx_one(struct nfp_net
*nn
, struct nfp_net_dp
*dp
,
1319 struct nfp_net_r_vector
*r_vec
, struct nfp_net_rx_ring
*rx_ring
)
1321 unsigned int meta_len
, data_len
, meta_off
, pkt_len
, pkt_off
;
1322 struct nfp_net_rx_buf
*rxbuf
;
1323 struct nfp_net_rx_desc
*rxd
;
1324 dma_addr_t new_dma_addr
;
1325 struct sk_buff
*skb
;
1329 idx
= D_IDX(rx_ring
, rx_ring
->rd_p
);
1331 rxd
= &rx_ring
->rxds
[idx
];
1332 if (!(rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
))
1335 /* Memory barrier to ensure that we won't do other reads
1336 * before the DD bit.
1342 rxbuf
= &rx_ring
->rxbufs
[idx
];
1343 meta_len
= rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_META_LEN_MASK
;
1344 data_len
= le16_to_cpu(rxd
->rxd
.data_len
);
1345 pkt_len
= data_len
- meta_len
;
1347 pkt_off
= NFP_NET_RX_BUF_HEADROOM
+ dp
->rx_dma_off
;
1348 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1349 pkt_off
+= meta_len
;
1351 pkt_off
+= dp
->rx_offset
;
1352 meta_off
= pkt_off
- meta_len
;
1355 u64_stats_update_begin(&r_vec
->rx_sync
);
1357 r_vec
->rx_bytes
+= pkt_len
;
1358 u64_stats_update_end(&r_vec
->rx_sync
);
1360 nfp_net_dma_sync_cpu_rx(dp
, rxbuf
->dma_addr
+ meta_off
, data_len
);
1362 if (unlikely(!nfp_ctrl_meta_ok(nn
, rxbuf
->frag
+ meta_off
, meta_len
))) {
1363 nn_dp_warn(dp
, "incorrect metadata for ctrl packet (%d)\n",
1365 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1369 skb
= build_skb(rxbuf
->frag
, dp
->fl_bufsz
);
1370 if (unlikely(!skb
)) {
1371 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1374 new_frag
= nfp_nfd3_napi_alloc_one(dp
, &new_dma_addr
);
1375 if (unlikely(!new_frag
)) {
1376 nfp_nfd3_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, skb
);
1380 nfp_net_dma_unmap_rx(dp
, rxbuf
->dma_addr
);
1382 nfp_nfd3_rx_give_one(dp
, rx_ring
, new_frag
, new_dma_addr
);
1384 skb_reserve(skb
, pkt_off
);
1385 skb_put(skb
, pkt_len
);
1387 nfp_app_ctrl_rx(nn
->app
, skb
);
1392 static bool nfp_ctrl_rx(struct nfp_net_r_vector
*r_vec
)
1394 struct nfp_net_rx_ring
*rx_ring
= r_vec
->rx_ring
;
1395 struct nfp_net
*nn
= r_vec
->nfp_net
;
1396 struct nfp_net_dp
*dp
= &nn
->dp
;
1397 unsigned int budget
= 512;
1399 while (nfp_ctrl_rx_one(nn
, dp
, r_vec
, rx_ring
) && budget
--)
1405 void nfp_nfd3_ctrl_poll(struct tasklet_struct
*t
)
1407 struct nfp_net_r_vector
*r_vec
= from_tasklet(r_vec
, t
, tasklet
);
1409 spin_lock(&r_vec
->lock
);
1410 nfp_nfd3_tx_complete(r_vec
->tx_ring
, 0);
1411 __nfp_ctrl_tx_queued(r_vec
);
1412 spin_unlock(&r_vec
->lock
);
1414 if (nfp_ctrl_rx(r_vec
)) {
1415 nfp_net_irq_unmask(r_vec
->nfp_net
, r_vec
->irq_entry
);
1417 tasklet_schedule(&r_vec
->tasklet
);
1418 nn_dp_warn(&r_vec
->nfp_net
->dp
,
1419 "control message budget exceeded!\n");