1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/log2.h>
7 #include <linux/slab.h>
8 #include <linux/overflow.h>
10 #include "xsk_queue.h"
12 void xskq_set_umem(struct xsk_queue
*q
, u64 size
, u64 chunk_mask
)
18 q
->chunk_mask
= chunk_mask
;
21 static u32
xskq_umem_get_ring_size(struct xsk_queue
*q
)
23 return sizeof(struct xdp_umem_ring
) + q
->nentries
* sizeof(u64
);
26 static u32
xskq_rxtx_get_ring_size(struct xsk_queue
*q
)
28 return sizeof(struct xdp_ring
) + q
->nentries
* sizeof(struct xdp_desc
);
31 struct xsk_queue
*xskq_create(u32 nentries
, bool umem_queue
)
37 q
= kzalloc(sizeof(*q
), GFP_KERNEL
);
41 q
->nentries
= nentries
;
42 q
->ring_mask
= nentries
- 1;
44 gfp_flags
= GFP_KERNEL
| __GFP_ZERO
| __GFP_NOWARN
|
45 __GFP_COMP
| __GFP_NORETRY
;
46 size
= umem_queue
? xskq_umem_get_ring_size(q
) :
47 xskq_rxtx_get_ring_size(q
);
49 q
->ring
= (struct xdp_ring
*)__get_free_pages(gfp_flags
,
59 void xskq_destroy(struct xsk_queue
*q
)
64 page_frag_free(q
->ring
);
68 struct xdp_umem_fq_reuse
*xsk_reuseq_prepare(u32 nentries
)
70 struct xdp_umem_fq_reuse
*newq
;
72 /* Check for overflow */
73 if (nentries
> (u32
)roundup_pow_of_two(nentries
))
75 nentries
= roundup_pow_of_two(nentries
);
77 newq
= kvmalloc(struct_size(newq
, handles
, nentries
), GFP_KERNEL
);
80 memset(newq
, 0, offsetof(typeof(*newq
), handles
));
82 newq
->nentries
= nentries
;
85 EXPORT_SYMBOL_GPL(xsk_reuseq_prepare
);
87 struct xdp_umem_fq_reuse
*xsk_reuseq_swap(struct xdp_umem
*umem
,
88 struct xdp_umem_fq_reuse
*newq
)
90 struct xdp_umem_fq_reuse
*oldq
= umem
->fq_reuse
;
93 umem
->fq_reuse
= newq
;
97 if (newq
->nentries
< oldq
->length
)
100 memcpy(newq
->handles
, oldq
->handles
,
101 array_size(oldq
->length
, sizeof(u64
)));
102 newq
->length
= oldq
->length
;
104 umem
->fq_reuse
= newq
;
107 EXPORT_SYMBOL_GPL(xsk_reuseq_swap
);
109 void xsk_reuseq_free(struct xdp_umem_fq_reuse
*rq
)
113 EXPORT_SYMBOL_GPL(xsk_reuseq_free
);
115 void xsk_reuseq_destroy(struct xdp_umem
*umem
)
117 xsk_reuseq_free(umem
->fq_reuse
);
118 umem
->fq_reuse
= NULL
;