1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
5 #include "nfp_net_dp.h"
6 #include "nfp_net_xsk.h"
9 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
10 * @dp: NFP Net data path struct
11 * @dma_addr: Pointer to storage for DMA address (output param)
13 * This function will allcate a new page frag, map it for DMA.
15 * Return: allocated page frag or NULL on failure.
17 void *nfp_net_rx_alloc_one(struct nfp_net_dp
*dp
, dma_addr_t
*dma_addr
)
22 frag
= netdev_alloc_frag(dp
->fl_bufsz
);
26 page
= alloc_page(GFP_KERNEL
);
27 frag
= page
? page_address(page
) : NULL
;
30 nn_dp_warn(dp
, "Failed to alloc receive page frag\n");
34 *dma_addr
= nfp_net_dma_map_rx(dp
, frag
);
35 if (dma_mapping_error(dp
->dev
, *dma_addr
)) {
36 nfp_net_free_frag(frag
, dp
->xdp_prog
);
37 nn_dp_warn(dp
, "Failed to map DMA RX buffer\n");
45 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
46 * @tx_ring: TX ring structure
47 * @dp: NFP Net data path struct
48 * @r_vec: IRQ vector servicing this ring
50 * @is_xdp: Is this an XDP TX ring?
53 nfp_net_tx_ring_init(struct nfp_net_tx_ring
*tx_ring
, struct nfp_net_dp
*dp
,
54 struct nfp_net_r_vector
*r_vec
, unsigned int idx
,
57 struct nfp_net
*nn
= r_vec
->nfp_net
;
60 tx_ring
->r_vec
= r_vec
;
61 tx_ring
->is_xdp
= is_xdp
;
62 u64_stats_init(&tx_ring
->r_vec
->tx_sync
);
64 tx_ring
->qcidx
= tx_ring
->idx
* nn
->stride_tx
;
65 tx_ring
->txrwb
= dp
->txrwb
? &dp
->txrwb
[idx
] : NULL
;
66 tx_ring
->qcp_q
= nn
->tx_bar
+ NFP_QCP_QUEUE_OFF(tx_ring
->qcidx
);
70 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
71 * @rx_ring: RX ring structure
72 * @r_vec: IRQ vector servicing this ring
76 nfp_net_rx_ring_init(struct nfp_net_rx_ring
*rx_ring
,
77 struct nfp_net_r_vector
*r_vec
, unsigned int idx
)
79 struct nfp_net
*nn
= r_vec
->nfp_net
;
82 rx_ring
->r_vec
= r_vec
;
83 u64_stats_init(&rx_ring
->r_vec
->rx_sync
);
85 rx_ring
->fl_qcidx
= rx_ring
->idx
* nn
->stride_rx
;
86 rx_ring
->qcp_fl
= nn
->rx_bar
+ NFP_QCP_QUEUE_OFF(rx_ring
->fl_qcidx
);
90 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
91 * @rx_ring: RX ring structure
93 * Assumes that the device is stopped, must be idempotent.
95 void nfp_net_rx_ring_reset(struct nfp_net_rx_ring
*rx_ring
)
97 unsigned int wr_idx
, last_idx
;
99 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
100 * kept at cnt - 1 FL bufs.
102 if (rx_ring
->wr_p
== 0 && rx_ring
->rd_p
== 0)
105 /* Move the empty entry to the end of the list */
106 wr_idx
= D_IDX(rx_ring
, rx_ring
->wr_p
);
107 last_idx
= rx_ring
->cnt
- 1;
108 if (rx_ring
->r_vec
->xsk_pool
) {
109 rx_ring
->xsk_rxbufs
[wr_idx
] = rx_ring
->xsk_rxbufs
[last_idx
];
110 memset(&rx_ring
->xsk_rxbufs
[last_idx
], 0,
111 sizeof(*rx_ring
->xsk_rxbufs
));
113 rx_ring
->rxbufs
[wr_idx
] = rx_ring
->rxbufs
[last_idx
];
114 memset(&rx_ring
->rxbufs
[last_idx
], 0, sizeof(*rx_ring
->rxbufs
));
117 memset(rx_ring
->rxds
, 0, rx_ring
->size
);
123 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
124 * @dp: NFP Net data path struct
125 * @rx_ring: RX ring to remove buffers from
127 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
128 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
129 * to restore required ring geometry.
132 nfp_net_rx_ring_bufs_free(struct nfp_net_dp
*dp
,
133 struct nfp_net_rx_ring
*rx_ring
)
137 if (nfp_net_has_xsk_pool_slow(dp
, rx_ring
->idx
))
140 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
141 /* NULL skb can only happen when initial filling of the ring
142 * fails to allocate enough buffers and calls here to free
143 * already allocated ones.
145 if (!rx_ring
->rxbufs
[i
].frag
)
148 nfp_net_dma_unmap_rx(dp
, rx_ring
->rxbufs
[i
].dma_addr
);
149 nfp_net_free_frag(rx_ring
->rxbufs
[i
].frag
, dp
->xdp_prog
);
150 rx_ring
->rxbufs
[i
].dma_addr
= 0;
151 rx_ring
->rxbufs
[i
].frag
= NULL
;
156 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
157 * @dp: NFP Net data path struct
158 * @rx_ring: RX ring to remove buffers from
161 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp
*dp
,
162 struct nfp_net_rx_ring
*rx_ring
)
164 struct nfp_net_rx_buf
*rxbufs
;
167 if (nfp_net_has_xsk_pool_slow(dp
, rx_ring
->idx
))
170 rxbufs
= rx_ring
->rxbufs
;
172 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
173 rxbufs
[i
].frag
= nfp_net_rx_alloc_one(dp
, &rxbufs
[i
].dma_addr
);
174 if (!rxbufs
[i
].frag
) {
175 nfp_net_rx_ring_bufs_free(dp
, rx_ring
);
183 int nfp_net_tx_rings_prepare(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
187 dp
->tx_rings
= kcalloc(dp
->num_tx_rings
, sizeof(*dp
->tx_rings
),
192 if (dp
->ctrl
& NFP_NET_CFG_CTRL_TXRWB
) {
193 dp
->txrwb
= dma_alloc_coherent(dp
->dev
,
194 dp
->num_tx_rings
* sizeof(u64
),
195 &dp
->txrwb_dma
, GFP_KERNEL
);
200 for (r
= 0; r
< dp
->num_tx_rings
; r
++) {
203 if (r
>= dp
->num_stack_tx_rings
)
204 bias
= dp
->num_stack_tx_rings
;
206 nfp_net_tx_ring_init(&dp
->tx_rings
[r
], dp
,
207 &nn
->r_vecs
[r
- bias
], r
, bias
);
209 if (nfp_net_tx_ring_alloc(dp
, &dp
->tx_rings
[r
]))
212 if (nfp_net_tx_ring_bufs_alloc(dp
, &dp
->tx_rings
[r
]))
220 nfp_net_tx_ring_bufs_free(dp
, &dp
->tx_rings
[r
]);
222 nfp_net_tx_ring_free(dp
, &dp
->tx_rings
[r
]);
225 dma_free_coherent(dp
->dev
, dp
->num_tx_rings
* sizeof(u64
),
226 dp
->txrwb
, dp
->txrwb_dma
);
232 void nfp_net_tx_rings_free(struct nfp_net_dp
*dp
)
236 for (r
= 0; r
< dp
->num_tx_rings
; r
++) {
237 nfp_net_tx_ring_bufs_free(dp
, &dp
->tx_rings
[r
]);
238 nfp_net_tx_ring_free(dp
, &dp
->tx_rings
[r
]);
242 dma_free_coherent(dp
->dev
, dp
->num_tx_rings
* sizeof(u64
),
243 dp
->txrwb
, dp
->txrwb_dma
);
248 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
249 * @rx_ring: RX ring to free
251 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring
*rx_ring
)
253 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
254 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
257 xdp_rxq_info_unreg(&rx_ring
->xdp_rxq
);
259 if (nfp_net_has_xsk_pool_slow(dp
, rx_ring
->idx
))
260 kvfree(rx_ring
->xsk_rxbufs
);
262 kvfree(rx_ring
->rxbufs
);
265 dma_free_coherent(dp
->dev
, rx_ring
->size
,
266 rx_ring
->rxds
, rx_ring
->dma
);
269 rx_ring
->rxbufs
= NULL
;
270 rx_ring
->xsk_rxbufs
= NULL
;
271 rx_ring
->rxds
= NULL
;
277 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
278 * @dp: NFP Net data path struct
279 * @rx_ring: RX ring to allocate
281 * Return: 0 on success, negative errno otherwise.
284 nfp_net_rx_ring_alloc(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
)
286 enum xdp_mem_type mem_type
;
287 size_t rxbuf_sw_desc_sz
;
290 if (nfp_net_has_xsk_pool_slow(dp
, rx_ring
->idx
)) {
291 mem_type
= MEM_TYPE_XSK_BUFF_POOL
;
292 rxbuf_sw_desc_sz
= sizeof(*rx_ring
->xsk_rxbufs
);
294 mem_type
= MEM_TYPE_PAGE_ORDER0
;
295 rxbuf_sw_desc_sz
= sizeof(*rx_ring
->rxbufs
);
299 err
= xdp_rxq_info_reg(&rx_ring
->xdp_rxq
, dp
->netdev
,
300 rx_ring
->idx
, rx_ring
->r_vec
->napi
.napi_id
);
304 err
= xdp_rxq_info_reg_mem_model(&rx_ring
->xdp_rxq
, mem_type
, NULL
);
309 rx_ring
->cnt
= dp
->rxd_cnt
;
310 rx_ring
->size
= array_size(rx_ring
->cnt
, sizeof(*rx_ring
->rxds
));
311 rx_ring
->rxds
= dma_alloc_coherent(dp
->dev
, rx_ring
->size
,
313 GFP_KERNEL
| __GFP_NOWARN
);
314 if (!rx_ring
->rxds
) {
315 netdev_warn(dp
->netdev
, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
320 if (nfp_net_has_xsk_pool_slow(dp
, rx_ring
->idx
)) {
321 rx_ring
->xsk_rxbufs
= kvcalloc(rx_ring
->cnt
, rxbuf_sw_desc_sz
,
323 if (!rx_ring
->xsk_rxbufs
)
326 rx_ring
->rxbufs
= kvcalloc(rx_ring
->cnt
, rxbuf_sw_desc_sz
,
328 if (!rx_ring
->rxbufs
)
335 nfp_net_rx_ring_free(rx_ring
);
339 int nfp_net_rx_rings_prepare(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
343 dp
->rx_rings
= kcalloc(dp
->num_rx_rings
, sizeof(*dp
->rx_rings
),
348 for (r
= 0; r
< dp
->num_rx_rings
; r
++) {
349 nfp_net_rx_ring_init(&dp
->rx_rings
[r
], &nn
->r_vecs
[r
], r
);
351 if (nfp_net_rx_ring_alloc(dp
, &dp
->rx_rings
[r
]))
354 if (nfp_net_rx_ring_bufs_alloc(dp
, &dp
->rx_rings
[r
]))
362 nfp_net_rx_ring_bufs_free(dp
, &dp
->rx_rings
[r
]);
364 nfp_net_rx_ring_free(&dp
->rx_rings
[r
]);
370 void nfp_net_rx_rings_free(struct nfp_net_dp
*dp
)
374 for (r
= 0; r
< dp
->num_rx_rings
; r
++) {
375 nfp_net_rx_ring_bufs_free(dp
, &dp
->rx_rings
[r
]);
376 nfp_net_rx_ring_free(&dp
->rx_rings
[r
]);
383 nfp_net_rx_ring_hw_cfg_write(struct nfp_net
*nn
,
384 struct nfp_net_rx_ring
*rx_ring
, unsigned int idx
)
386 /* Write the DMA address, size and MSI-X info to the device */
387 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), rx_ring
->dma
);
388 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), ilog2(rx_ring
->cnt
));
389 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), rx_ring
->r_vec
->irq_entry
);
393 nfp_net_tx_ring_hw_cfg_write(struct nfp_net
*nn
,
394 struct nfp_net_tx_ring
*tx_ring
, unsigned int idx
)
396 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), tx_ring
->dma
);
397 if (tx_ring
->txrwb
) {
399 nn_writeq(nn
, NFP_NET_CFG_TXR_WB_ADDR(idx
),
400 nn
->dp
.txrwb_dma
+ idx
* sizeof(u64
));
402 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), ilog2(tx_ring
->cnt
));
403 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), tx_ring
->r_vec
->irq_entry
);
406 void nfp_net_vec_clear_ring_data(struct nfp_net
*nn
, unsigned int idx
)
408 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), 0);
409 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), 0);
410 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), 0);
412 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), 0);
413 nn_writeq(nn
, NFP_NET_CFG_TXR_WB_ADDR(idx
), 0);
414 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), 0);
415 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), 0);
418 netdev_tx_t
nfp_net_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
420 struct nfp_net
*nn
= netdev_priv(netdev
);
422 return nn
->dp
.ops
->xmit(skb
, netdev
);
425 bool __nfp_ctrl_tx(struct nfp_net
*nn
, struct sk_buff
*skb
)
427 struct nfp_net_r_vector
*r_vec
= &nn
->r_vecs
[0];
429 return nn
->dp
.ops
->ctrl_tx_one(nn
, r_vec
, skb
, false);
432 bool nfp_ctrl_tx(struct nfp_net
*nn
, struct sk_buff
*skb
)
434 struct nfp_net_r_vector
*r_vec
= &nn
->r_vecs
[0];
437 spin_lock_bh(&r_vec
->lock
);
438 ret
= nn
->dp
.ops
->ctrl_tx_one(nn
, r_vec
, skb
, false);
439 spin_unlock_bh(&r_vec
->lock
);
444 bool nfp_net_vlan_strip(struct sk_buff
*skb
, const struct nfp_net_rx_desc
*rxd
,
445 const struct nfp_meta_parsed
*meta
)
447 u16 tpid
= 0, tci
= 0;
449 if (rxd
->rxd
.flags
& PCIE_DESC_RX_VLAN
) {
451 tci
= le16_to_cpu(rxd
->rxd
.vlan
);
452 } else if (meta
->vlan
.stripped
) {
453 if (meta
->vlan
.tpid
== NFP_NET_VLAN_CTAG
)
455 else if (meta
->vlan
.tpid
== NFP_NET_VLAN_STAG
)
460 tci
= meta
->vlan
.tci
;
463 __vlan_hwaccel_put_tag(skb
, htons(tpid
), tci
);