3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
7 #include <linux/filter.h>
8 #include <linux/types.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <net/page_pool.h>
18 #define REG_STATE_NEW 0x0
19 #define REG_STATE_REGISTERED 0x1
20 #define REG_STATE_UNREGISTERED 0x2
21 #define REG_STATE_UNUSED 0x3
23 static DEFINE_IDA(mem_id_pool
);
24 static DEFINE_MUTEX(mem_id_lock
);
25 #define MEM_ID_MAX 0xFFFE
27 static int mem_id_next
= MEM_ID_MIN
;
29 static bool mem_id_init
; /* false */
30 static struct rhashtable
*mem_id_ht
;
32 struct xdp_mem_allocator
{
33 struct xdp_mem_info mem
;
36 struct page_pool
*page_pool
;
37 struct zero_copy_allocator
*zc_alloc
;
39 struct rhash_head node
;
43 static u32
xdp_mem_id_hashfn(const void *data
, u32 len
, u32 seed
)
48 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator
, mem
.id
)
51 /* Use cyclic increasing ID as direct hash key */
55 static int xdp_mem_id_cmp(struct rhashtable_compare_arg
*arg
,
58 const struct xdp_mem_allocator
*xa
= ptr
;
59 u32 mem_id
= *(u32
*)arg
->key
;
61 return xa
->mem
.id
!= mem_id
;
64 static const struct rhashtable_params mem_id_rht_params
= {
66 .head_offset
= offsetof(struct xdp_mem_allocator
, node
),
67 .key_offset
= offsetof(struct xdp_mem_allocator
, mem
.id
),
68 .key_len
= FIELD_SIZEOF(struct xdp_mem_allocator
, mem
.id
),
69 .max_size
= MEM_ID_MAX
,
71 .automatic_shrinking
= true,
72 .hashfn
= xdp_mem_id_hashfn
,
73 .obj_cmpfn
= xdp_mem_id_cmp
,
76 static void __xdp_mem_allocator_rcu_free(struct rcu_head
*rcu
)
78 struct xdp_mem_allocator
*xa
;
80 xa
= container_of(rcu
, struct xdp_mem_allocator
, rcu
);
82 /* Allow this ID to be reused */
83 ida_simple_remove(&mem_id_pool
, xa
->mem
.id
);
85 /* Notice, driver is expected to free the *allocator,
86 * e.g. page_pool, and MUST also use RCU free.
91 xa
->mem
.type
= 0xF0F0;
92 xa
->allocator
= (void *)0xDEAD9001;
97 static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info
*xdp_rxq
)
99 struct xdp_mem_allocator
*xa
;
100 int id
= xdp_rxq
->mem
.id
;
105 mutex_lock(&mem_id_lock
);
107 xa
= rhashtable_lookup_fast(mem_id_ht
, &id
, mem_id_rht_params
);
108 if (xa
&& !rhashtable_remove_fast(mem_id_ht
, &xa
->node
, mem_id_rht_params
))
109 call_rcu(&xa
->rcu
, __xdp_mem_allocator_rcu_free
);
111 mutex_unlock(&mem_id_lock
);
114 void xdp_rxq_info_unreg(struct xdp_rxq_info
*xdp_rxq
)
116 /* Simplify driver cleanup code paths, allow unreg "unused" */
117 if (xdp_rxq
->reg_state
== REG_STATE_UNUSED
)
120 WARN(!(xdp_rxq
->reg_state
== REG_STATE_REGISTERED
), "Driver BUG");
122 __xdp_rxq_info_unreg_mem_model(xdp_rxq
);
124 xdp_rxq
->reg_state
= REG_STATE_UNREGISTERED
;
127 /* Reset mem info to defaults */
129 xdp_rxq
->mem
.type
= 0;
131 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg
);
133 static void xdp_rxq_info_init(struct xdp_rxq_info
*xdp_rxq
)
135 memset(xdp_rxq
, 0, sizeof(*xdp_rxq
));
138 /* Returns 0 on success, negative on failure */
139 int xdp_rxq_info_reg(struct xdp_rxq_info
*xdp_rxq
,
140 struct net_device
*dev
, u32 queue_index
)
142 if (xdp_rxq
->reg_state
== REG_STATE_UNUSED
) {
143 WARN(1, "Driver promised not to register this");
147 if (xdp_rxq
->reg_state
== REG_STATE_REGISTERED
) {
148 WARN(1, "Missing unregister, handled but fix driver");
149 xdp_rxq_info_unreg(xdp_rxq
);
153 WARN(1, "Missing net_device from driver");
157 /* State either UNREGISTERED or NEW */
158 xdp_rxq_info_init(xdp_rxq
);
160 xdp_rxq
->queue_index
= queue_index
;
162 xdp_rxq
->reg_state
= REG_STATE_REGISTERED
;
165 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg
);
167 void xdp_rxq_info_unused(struct xdp_rxq_info
*xdp_rxq
)
169 xdp_rxq
->reg_state
= REG_STATE_UNUSED
;
171 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused
);
173 bool xdp_rxq_info_is_reg(struct xdp_rxq_info
*xdp_rxq
)
175 return (xdp_rxq
->reg_state
== REG_STATE_REGISTERED
);
177 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg
);
179 static int __mem_id_init_hash_table(void)
181 struct rhashtable
*rht
;
184 if (unlikely(mem_id_init
))
187 rht
= kzalloc(sizeof(*rht
), GFP_KERNEL
);
191 ret
= rhashtable_init(rht
, &mem_id_rht_params
);
197 smp_mb(); /* mutex lock should provide enough pairing */
203 /* Allocate a cyclic ID that maps to allocator pointer.
204 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
206 * Caller must lock mem_id_lock.
208 static int __mem_id_cyclic_get(gfp_t gfp
)
214 id
= ida_simple_get(&mem_id_pool
, mem_id_next
, MEM_ID_MAX
, gfp
);
217 /* Cyclic allocator, reset next id */
219 mem_id_next
= MEM_ID_MIN
;
223 return id
; /* errno */
225 mem_id_next
= id
+ 1;
230 static bool __is_supported_mem_type(enum xdp_mem_type type
)
232 if (type
== MEM_TYPE_PAGE_POOL
)
233 return is_page_pool_compiled_in();
235 if (type
>= MEM_TYPE_MAX
)
241 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info
*xdp_rxq
,
242 enum xdp_mem_type type
, void *allocator
)
244 struct xdp_mem_allocator
*xdp_alloc
;
245 gfp_t gfp
= GFP_KERNEL
;
249 if (xdp_rxq
->reg_state
!= REG_STATE_REGISTERED
) {
250 WARN(1, "Missing register, driver bug");
254 if (!__is_supported_mem_type(type
))
257 xdp_rxq
->mem
.type
= type
;
260 if (type
== MEM_TYPE_PAGE_POOL
|| type
== MEM_TYPE_ZERO_COPY
)
261 return -EINVAL
; /* Setup time check page_pool req */
265 /* Delay init of rhashtable to save memory if feature isn't used */
267 mutex_lock(&mem_id_lock
);
268 ret
= __mem_id_init_hash_table();
269 mutex_unlock(&mem_id_lock
);
276 xdp_alloc
= kzalloc(sizeof(*xdp_alloc
), gfp
);
280 mutex_lock(&mem_id_lock
);
281 id
= __mem_id_cyclic_get(gfp
);
286 xdp_rxq
->mem
.id
= id
;
287 xdp_alloc
->mem
= xdp_rxq
->mem
;
288 xdp_alloc
->allocator
= allocator
;
290 /* Insert allocator into ID lookup table */
291 ptr
= rhashtable_insert_slow(mem_id_ht
, &id
, &xdp_alloc
->node
);
293 errno
= PTR_ERR(ptr
);
297 mutex_unlock(&mem_id_lock
);
301 mutex_unlock(&mem_id_lock
);
305 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model
);
307 /* XDP RX runs under NAPI protection, and in different delivery error
308 * scenarios (e.g. queue full), it is possible to return the xdp_frame
309 * while still leveraging this protection. The @napi_direct boolian
310 * is used for those calls sites. Thus, allowing for faster recycling
311 * of xdp_frames/pages in those cases.
313 static void __xdp_return(void *data
, struct xdp_mem_info
*mem
, bool napi_direct
,
314 unsigned long handle
)
316 struct xdp_mem_allocator
*xa
;
320 case MEM_TYPE_PAGE_POOL
:
322 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
323 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
324 page
= virt_to_head_page(data
);
326 napi_direct
&= !xdp_return_frame_no_direct();
327 page_pool_put_page(xa
->page_pool
, page
, napi_direct
);
333 case MEM_TYPE_PAGE_SHARED
:
334 page_frag_free(data
);
336 case MEM_TYPE_PAGE_ORDER0
:
337 page
= virt_to_page(data
); /* Assumes order0 page*/
340 case MEM_TYPE_ZERO_COPY
:
341 /* NB! Only valid from an xdp_buff! */
343 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
344 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
345 xa
->zc_alloc
->free(xa
->zc_alloc
, handle
);
348 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
353 void xdp_return_frame(struct xdp_frame
*xdpf
)
355 __xdp_return(xdpf
->data
, &xdpf
->mem
, false, 0);
357 EXPORT_SYMBOL_GPL(xdp_return_frame
);
359 void xdp_return_frame_rx_napi(struct xdp_frame
*xdpf
)
361 __xdp_return(xdpf
->data
, &xdpf
->mem
, true, 0);
363 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi
);
365 void xdp_return_buff(struct xdp_buff
*xdp
)
367 __xdp_return(xdp
->data
, &xdp
->rxq
->mem
, true, xdp
->handle
);
369 EXPORT_SYMBOL_GPL(xdp_return_buff
);
371 int xdp_attachment_query(struct xdp_attachment_info
*info
,
372 struct netdev_bpf
*bpf
)
374 bpf
->prog_id
= info
->prog
? info
->prog
->aux
->id
: 0;
375 bpf
->prog_flags
= info
->prog
? info
->flags
: 0;
378 EXPORT_SYMBOL_GPL(xdp_attachment_query
);
380 bool xdp_attachment_flags_ok(struct xdp_attachment_info
*info
,
381 struct netdev_bpf
*bpf
)
383 if (info
->prog
&& (bpf
->flags
^ info
->flags
) & XDP_FLAGS_MODES
) {
384 NL_SET_ERR_MSG(bpf
->extack
,
385 "program loaded with different flags");
390 EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok
);
392 void xdp_attachment_setup(struct xdp_attachment_info
*info
,
393 struct netdev_bpf
*bpf
)
396 bpf_prog_put(info
->prog
);
397 info
->prog
= bpf
->prog
;
398 info
->flags
= bpf
->flags
;
400 EXPORT_SYMBOL_GPL(xdp_attachment_setup
);