Linux 5.7.7
[linux/fpc-iii.git] / net / xdp / xsk_queue.h
blobb50bb5c76da5a04e055425f3b31088a77b5a3ade
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 */
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
13 struct xdp_ring {
14 u32 producer ____cacheline_aligned_in_smp;
15 u32 consumer ____cacheline_aligned_in_smp;
16 u32 flags;
19 /* Used for the RX and TX queues for packets */
20 struct xdp_rxtx_ring {
21 struct xdp_ring ptrs;
22 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
25 /* Used for the fill and completion queues for buffers */
26 struct xdp_umem_ring {
27 struct xdp_ring ptrs;
28 u64 desc[] ____cacheline_aligned_in_smp;
31 struct xsk_queue {
32 u64 chunk_mask;
33 u64 size;
34 u32 ring_mask;
35 u32 nentries;
36 u32 cached_prod;
37 u32 cached_cons;
38 struct xdp_ring *ring;
39 u64 invalid_descs;
42 /* The structure of the shared state of the rings are the same as the
43 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
44 * ring, the kernel is the producer and user space is the consumer. For
45 * the Tx and fill rings, the kernel is the consumer and user space is
46 * the producer.
48 * producer consumer
50 * if (LOAD ->consumer) { LOAD ->producer
51 * (A) smp_rmb() (C)
52 * STORE $data LOAD $data
53 * smp_wmb() (B) smp_mb() (D)
54 * STORE ->producer STORE ->consumer
55 * }
57 * (A) pairs with (D), and (B) pairs with (C).
59 * Starting with (B), it protects the data from being written after
60 * the producer pointer. If this barrier was missing, the consumer
61 * could observe the producer pointer being set and thus load the data
62 * before the producer has written the new data. The consumer would in
63 * this case load the old data.
65 * (C) protects the consumer from speculatively loading the data before
66 * the producer pointer actually has been read. If we do not have this
67 * barrier, some architectures could load old data as speculative loads
68 * are not discarded as the CPU does not know there is a dependency
69 * between ->producer and data.
71 * (A) is a control dependency that separates the load of ->consumer
72 * from the stores of $data. In case ->consumer indicates there is no
73 * room in the buffer to store $data we do not. So no barrier is needed.
75 * (D) protects the load of the data to be observed to happen after the
76 * store of the consumer pointer. If we did not have this memory
77 * barrier, the producer could observe the consumer pointer being set
78 * and overwrite the data with a new value before the consumer got the
79 * chance to read the old value. The consumer would thus miss reading
80 * the old entry and very likely read the new entry twice, once right
81 * now and again after circling through the ring.
84 /* The operations on the rings are the following:
86 * producer consumer
88 * RESERVE entries PEEK in the ring for entries
89 * WRITE data into the ring READ data from the ring
90 * SUBMIT entries RELEASE entries
92 * The producer reserves one or more entries in the ring. It can then
93 * fill in these entries and finally submit them so that they can be
94 * seen and read by the consumer.
96 * The consumer peeks into the ring to see if the producer has written
97 * any new entries. If so, the producer can then read these entries
98 * and when it is done reading them release them back to the producer
99 * so that the producer can use these slots to fill in new entries.
101 * The function names below reflect these operations.
104 /* Functions that read and validate content from consumer rings. */
106 static inline bool xskq_cons_crosses_non_contig_pg(struct xdp_umem *umem,
107 u64 addr,
108 u64 length)
110 bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE;
111 bool next_pg_contig =
112 (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr &
113 XSK_NEXT_PG_CONTIG_MASK;
115 return cross_pg && !next_pg_contig;
118 static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q,
119 u64 addr,
120 u64 length,
121 struct xdp_umem *umem)
123 u64 base_addr = xsk_umem_extract_addr(addr);
125 addr = xsk_umem_add_offset_to_addr(addr);
126 if (base_addr >= q->size || addr >= q->size ||
127 xskq_cons_crosses_non_contig_pg(umem, addr, length)) {
128 q->invalid_descs++;
129 return false;
132 return true;
135 static inline bool xskq_cons_is_valid_addr(struct xsk_queue *q, u64 addr)
137 if (addr >= q->size) {
138 q->invalid_descs++;
139 return false;
142 return true;
145 static inline bool xskq_cons_read_addr(struct xsk_queue *q, u64 *addr,
146 struct xdp_umem *umem)
148 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
150 while (q->cached_cons != q->cached_prod) {
151 u32 idx = q->cached_cons & q->ring_mask;
153 *addr = ring->desc[idx] & q->chunk_mask;
155 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
156 if (xskq_cons_is_valid_unaligned(q, *addr,
157 umem->chunk_size_nohr,
158 umem))
159 return true;
160 goto out;
163 if (xskq_cons_is_valid_addr(q, *addr))
164 return true;
166 out:
167 q->cached_cons++;
170 return false;
173 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
174 struct xdp_desc *d,
175 struct xdp_umem *umem)
177 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
178 if (!xskq_cons_is_valid_unaligned(q, d->addr, d->len, umem))
179 return false;
181 if (d->len > umem->chunk_size_nohr || d->options) {
182 q->invalid_descs++;
183 return false;
186 return true;
189 if (!xskq_cons_is_valid_addr(q, d->addr))
190 return false;
192 if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
193 d->options) {
194 q->invalid_descs++;
195 return false;
198 return true;
201 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
202 struct xdp_desc *desc,
203 struct xdp_umem *umem)
205 while (q->cached_cons != q->cached_prod) {
206 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
207 u32 idx = q->cached_cons & q->ring_mask;
209 *desc = ring->desc[idx];
210 if (xskq_cons_is_valid_desc(q, desc, umem))
211 return true;
213 q->cached_cons++;
216 return false;
219 /* Functions for consumers */
221 static inline void __xskq_cons_release(struct xsk_queue *q)
223 smp_mb(); /* D, matches A */
224 WRITE_ONCE(q->ring->consumer, q->cached_cons);
227 static inline void __xskq_cons_peek(struct xsk_queue *q)
229 /* Refresh the local pointer */
230 q->cached_prod = READ_ONCE(q->ring->producer);
231 smp_rmb(); /* C, matches B */
234 static inline void xskq_cons_get_entries(struct xsk_queue *q)
236 __xskq_cons_release(q);
237 __xskq_cons_peek(q);
240 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
242 u32 entries = q->cached_prod - q->cached_cons;
244 if (entries >= cnt)
245 return true;
247 __xskq_cons_peek(q);
248 entries = q->cached_prod - q->cached_cons;
250 return entries >= cnt;
253 static inline bool xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr,
254 struct xdp_umem *umem)
256 if (q->cached_prod == q->cached_cons)
257 xskq_cons_get_entries(q);
258 return xskq_cons_read_addr(q, addr, umem);
261 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
262 struct xdp_desc *desc,
263 struct xdp_umem *umem)
265 if (q->cached_prod == q->cached_cons)
266 xskq_cons_get_entries(q);
267 return xskq_cons_read_desc(q, desc, umem);
270 static inline void xskq_cons_release(struct xsk_queue *q)
272 /* To improve performance, only update local state here.
273 * Reflect this to global state when we get new entries
274 * from the ring in xskq_cons_get_entries() and whenever
275 * Rx or Tx processing are completed in the NAPI loop.
277 q->cached_cons++;
280 static inline bool xskq_cons_is_full(struct xsk_queue *q)
282 /* No barriers needed since data is not accessed */
283 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
284 q->nentries;
287 /* Functions for producers */
289 static inline bool xskq_prod_is_full(struct xsk_queue *q)
291 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
293 if (free_entries)
294 return false;
296 /* Refresh the local tail pointer */
297 q->cached_cons = READ_ONCE(q->ring->consumer);
298 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
300 return !free_entries;
303 static inline int xskq_prod_reserve(struct xsk_queue *q)
305 if (xskq_prod_is_full(q))
306 return -ENOSPC;
308 /* A, matches D */
309 q->cached_prod++;
310 return 0;
313 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
315 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
317 if (xskq_prod_is_full(q))
318 return -ENOSPC;
320 /* A, matches D */
321 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
322 return 0;
325 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
326 u64 addr, u32 len)
328 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
329 u32 idx;
331 if (xskq_prod_is_full(q))
332 return -ENOSPC;
334 /* A, matches D */
335 idx = q->cached_prod++ & q->ring_mask;
336 ring->desc[idx].addr = addr;
337 ring->desc[idx].len = len;
339 return 0;
342 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
344 smp_wmb(); /* B, matches C */
346 WRITE_ONCE(q->ring->producer, idx);
349 static inline void xskq_prod_submit(struct xsk_queue *q)
351 __xskq_prod_submit(q, q->cached_prod);
354 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
356 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
357 u32 idx = q->ring->producer;
359 ring->desc[idx++ & q->ring_mask] = addr;
361 __xskq_prod_submit(q, idx);
364 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
366 __xskq_prod_submit(q, q->ring->producer + nb_entries);
369 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
371 /* No barriers needed since data is not accessed */
372 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
375 /* For both producers and consumers */
377 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
379 return q ? q->invalid_descs : 0;
382 void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
383 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
384 void xskq_destroy(struct xsk_queue *q_ops);
386 /* Executed by the core when the entire UMEM gets freed */
387 void xsk_reuseq_destroy(struct xdp_umem *umem);
389 #endif /* _LINUX_XSK_QUEUE_H */