module: Convert symbol namespace to string literal
[linux.git] / drivers / net / ethernet / intel / libeth / rx.c
blob66d1d23b8ad2403fd23a2671a6ef902771aa3473
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation */
4 #include <net/libeth/rx.h>
6 /* Rx buffer management */
8 /**
9 * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
10 * @pp: &page_pool_params of the netdev to calculate the size for
11 * @max_len: maximum buffer size for a single descriptor
13 * Return: HW-writeable length per one buffer to pass it to the HW accounting:
14 * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
15 * and system's page size.
17 static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
19 u32 len;
21 len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
22 len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
23 len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
24 pp->max_len);
26 return len;
29 /**
30 * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
31 * @pp: &page_pool_params of the netdev to calculate the size for
32 * @max_len: maximum buffer size for a single descriptor
33 * @truesize: desired truesize for the buffers
35 * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
36 * MTU and closest to the passed truesize. Can be used for "short" buffer
37 * queues to fragment pages more efficiently.
39 static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
40 u32 max_len, u32 truesize)
42 u32 min, len;
44 min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
45 truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
46 PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
48 len = SKB_WITH_OVERHEAD(truesize - pp->offset);
49 len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
50 len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
51 pp->max_len);
53 return len;
56 /**
57 * libeth_rx_page_pool_params - calculate params with the stack overhead
58 * @fq: buffer queue to calculate the size for
59 * @pp: &page_pool_params of the netdev
61 * Set the PP params to will all needed stack overhead (headroom, tailroom) and
62 * both the HW buffer length and the truesize for all types of buffers. For
63 * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
64 * it can be up to the page size.
66 * Return: true on success, false on invalid input params.
68 static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
69 struct page_pool_params *pp)
71 pp->offset = LIBETH_SKB_HEADROOM;
72 /* HW-writeable / syncable length per one page */
73 pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
75 /* HW-writeable length per buffer */
76 switch (fq->type) {
77 case LIBETH_FQE_MTU:
78 fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
79 break;
80 case LIBETH_FQE_SHORT:
81 fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
82 fq->truesize);
83 break;
84 case LIBETH_FQE_HDR:
85 fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
86 break;
87 default:
88 return false;
91 /* Buffer size to allocate */
92 fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
93 fq->buf_len));
95 return true;
98 /**
99 * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
100 * @fq: buffer queue to calculate the size for
101 * @pp: &page_pool_params of the netdev
103 * Set the PP params to exclude the stack overhead and both the buffer length
104 * and the truesize, which are equal for the data buffers. Note that this
105 * requires separate header buffers to be always active and account the
106 * overhead.
107 * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
108 * mode.
110 * Return: true on success, false on invalid input params.
112 static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
113 struct page_pool_params *pp)
115 u32 mtu, max;
117 pp->offset = 0;
118 pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
120 switch (fq->type) {
121 case LIBETH_FQE_MTU:
122 mtu = READ_ONCE(pp->netdev->mtu);
123 break;
124 case LIBETH_FQE_SHORT:
125 mtu = fq->truesize;
126 break;
127 default:
128 return false;
131 mtu = roundup_pow_of_two(mtu);
132 max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
133 pp->max_len);
135 fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
136 fq->truesize = fq->buf_len;
138 return true;
142 * libeth_rx_fq_create - create a PP with the default libeth settings
143 * @fq: buffer queue struct to fill
144 * @napi: &napi_struct covering this PP (no usage outside its poll loops)
146 * Return: %0 on success, -%errno on failure.
148 int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
150 struct page_pool_params pp = {
151 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
152 .order = LIBETH_RX_PAGE_ORDER,
153 .pool_size = fq->count,
154 .nid = fq->nid,
155 .dev = napi->dev->dev.parent,
156 .netdev = napi->dev,
157 .napi = napi,
158 .dma_dir = DMA_FROM_DEVICE,
160 struct libeth_fqe *fqes;
161 struct page_pool *pool;
162 bool ret;
164 if (!fq->hsplit)
165 ret = libeth_rx_page_pool_params(fq, &pp);
166 else
167 ret = libeth_rx_page_pool_params_zc(fq, &pp);
168 if (!ret)
169 return -EINVAL;
171 pool = page_pool_create(&pp);
172 if (IS_ERR(pool))
173 return PTR_ERR(pool);
175 fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
176 if (!fqes)
177 goto err_buf;
179 fq->fqes = fqes;
180 fq->pp = pool;
182 return 0;
184 err_buf:
185 page_pool_destroy(pool);
187 return -ENOMEM;
189 EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, "LIBETH");
192 * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
193 * @fq: buffer queue to process
195 void libeth_rx_fq_destroy(struct libeth_fq *fq)
197 kvfree(fq->fqes);
198 page_pool_destroy(fq->pp);
200 EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, "LIBETH");
203 * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
204 * @page: page to recycle
206 * To be used on exceptions or rare cases not requiring fast inline recycling.
208 void libeth_rx_recycle_slow(struct page *page)
210 page_pool_recycle_direct(page->pp, page);
212 EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, "LIBETH");
214 /* Converting abstract packet type numbers into a software structure with
215 * the packet parameters to do O(1) lookup on Rx.
218 static const u16 libeth_rx_pt_xdp_oip[] = {
219 [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE,
220 [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4,
221 [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6,
224 static const u16 libeth_rx_pt_xdp_iprot[] = {
225 [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE,
226 [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP,
227 [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP,
228 [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP,
229 [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP,
230 [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE,
233 static const u16 libeth_rx_pt_xdp_pl[] = {
234 [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE,
235 [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE,
236 [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE,
237 [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4,
241 * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
242 * @pt: PT structure to evaluate
244 * Generates ```hash_type``` field with XDP RSS type values from the parsed
245 * packet parameters if they're obtained dynamically at runtime.
247 void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
249 pt->hash_type = 0;
250 pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
251 pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
252 pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
254 EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, "LIBETH");
256 /* Module */
258 MODULE_DESCRIPTION("Common Ethernet library");
259 MODULE_LICENSE("GPL");