3 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
4 * Released under terms in GPL version 2. See COPYING.
6 #include <linux/types.h>
8 #include <linux/slab.h>
10 #include <linux/rhashtable.h>
11 #include <net/page_pool.h>
15 #define REG_STATE_NEW 0x0
16 #define REG_STATE_REGISTERED 0x1
17 #define REG_STATE_UNREGISTERED 0x2
18 #define REG_STATE_UNUSED 0x3
20 static DEFINE_IDA(mem_id_pool
);
21 static DEFINE_MUTEX(mem_id_lock
);
22 #define MEM_ID_MAX 0xFFFE
24 static int mem_id_next
= MEM_ID_MIN
;
26 static bool mem_id_init
; /* false */
27 static struct rhashtable
*mem_id_ht
;
29 struct xdp_mem_allocator
{
30 struct xdp_mem_info mem
;
33 struct page_pool
*page_pool
;
34 struct zero_copy_allocator
*zc_alloc
;
36 struct rhash_head node
;
40 static u32
xdp_mem_id_hashfn(const void *data
, u32 len
, u32 seed
)
45 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator
, mem
.id
)
48 /* Use cyclic increasing ID as direct hash key, see rht_bucket_index */
49 return key
<< RHT_HASH_RESERVED_SPACE
;
52 static int xdp_mem_id_cmp(struct rhashtable_compare_arg
*arg
,
55 const struct xdp_mem_allocator
*xa
= ptr
;
56 u32 mem_id
= *(u32
*)arg
->key
;
58 return xa
->mem
.id
!= mem_id
;
61 static const struct rhashtable_params mem_id_rht_params
= {
63 .head_offset
= offsetof(struct xdp_mem_allocator
, node
),
64 .key_offset
= offsetof(struct xdp_mem_allocator
, mem
.id
),
65 .key_len
= FIELD_SIZEOF(struct xdp_mem_allocator
, mem
.id
),
66 .max_size
= MEM_ID_MAX
,
68 .automatic_shrinking
= true,
69 .hashfn
= xdp_mem_id_hashfn
,
70 .obj_cmpfn
= xdp_mem_id_cmp
,
73 static void __xdp_mem_allocator_rcu_free(struct rcu_head
*rcu
)
75 struct xdp_mem_allocator
*xa
;
77 xa
= container_of(rcu
, struct xdp_mem_allocator
, rcu
);
79 /* Allow this ID to be reused */
80 ida_simple_remove(&mem_id_pool
, xa
->mem
.id
);
82 /* Notice, driver is expected to free the *allocator,
83 * e.g. page_pool, and MUST also use RCU free.
88 xa
->mem
.type
= 0xF0F0;
89 xa
->allocator
= (void *)0xDEAD9001;
94 static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info
*xdp_rxq
)
96 struct xdp_mem_allocator
*xa
;
97 int id
= xdp_rxq
->mem
.id
;
103 mutex_lock(&mem_id_lock
);
105 xa
= rhashtable_lookup(mem_id_ht
, &id
, mem_id_rht_params
);
107 mutex_unlock(&mem_id_lock
);
111 err
= rhashtable_remove_fast(mem_id_ht
, &xa
->node
, mem_id_rht_params
);
114 call_rcu(&xa
->rcu
, __xdp_mem_allocator_rcu_free
);
116 mutex_unlock(&mem_id_lock
);
119 void xdp_rxq_info_unreg(struct xdp_rxq_info
*xdp_rxq
)
121 /* Simplify driver cleanup code paths, allow unreg "unused" */
122 if (xdp_rxq
->reg_state
== REG_STATE_UNUSED
)
125 WARN(!(xdp_rxq
->reg_state
== REG_STATE_REGISTERED
), "Driver BUG");
127 __xdp_rxq_info_unreg_mem_model(xdp_rxq
);
129 xdp_rxq
->reg_state
= REG_STATE_UNREGISTERED
;
132 /* Reset mem info to defaults */
134 xdp_rxq
->mem
.type
= 0;
136 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg
);
138 static void xdp_rxq_info_init(struct xdp_rxq_info
*xdp_rxq
)
140 memset(xdp_rxq
, 0, sizeof(*xdp_rxq
));
143 /* Returns 0 on success, negative on failure */
144 int xdp_rxq_info_reg(struct xdp_rxq_info
*xdp_rxq
,
145 struct net_device
*dev
, u32 queue_index
)
147 if (xdp_rxq
->reg_state
== REG_STATE_UNUSED
) {
148 WARN(1, "Driver promised not to register this");
152 if (xdp_rxq
->reg_state
== REG_STATE_REGISTERED
) {
153 WARN(1, "Missing unregister, handled but fix driver");
154 xdp_rxq_info_unreg(xdp_rxq
);
158 WARN(1, "Missing net_device from driver");
162 /* State either UNREGISTERED or NEW */
163 xdp_rxq_info_init(xdp_rxq
);
165 xdp_rxq
->queue_index
= queue_index
;
167 xdp_rxq
->reg_state
= REG_STATE_REGISTERED
;
170 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg
);
172 void xdp_rxq_info_unused(struct xdp_rxq_info
*xdp_rxq
)
174 xdp_rxq
->reg_state
= REG_STATE_UNUSED
;
176 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused
);
178 bool xdp_rxq_info_is_reg(struct xdp_rxq_info
*xdp_rxq
)
180 return (xdp_rxq
->reg_state
== REG_STATE_REGISTERED
);
182 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg
);
184 static int __mem_id_init_hash_table(void)
186 struct rhashtable
*rht
;
189 if (unlikely(mem_id_init
))
192 rht
= kzalloc(sizeof(*rht
), GFP_KERNEL
);
196 ret
= rhashtable_init(rht
, &mem_id_rht_params
);
202 smp_mb(); /* mutex lock should provide enough pairing */
208 /* Allocate a cyclic ID that maps to allocator pointer.
209 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
211 * Caller must lock mem_id_lock.
213 static int __mem_id_cyclic_get(gfp_t gfp
)
219 id
= ida_simple_get(&mem_id_pool
, mem_id_next
, MEM_ID_MAX
, gfp
);
222 /* Cyclic allocator, reset next id */
224 mem_id_next
= MEM_ID_MIN
;
228 return id
; /* errno */
230 mem_id_next
= id
+ 1;
235 static bool __is_supported_mem_type(enum xdp_mem_type type
)
237 if (type
== MEM_TYPE_PAGE_POOL
)
238 return is_page_pool_compiled_in();
240 if (type
>= MEM_TYPE_MAX
)
246 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info
*xdp_rxq
,
247 enum xdp_mem_type type
, void *allocator
)
249 struct xdp_mem_allocator
*xdp_alloc
;
250 gfp_t gfp
= GFP_KERNEL
;
254 if (xdp_rxq
->reg_state
!= REG_STATE_REGISTERED
) {
255 WARN(1, "Missing register, driver bug");
259 if (!__is_supported_mem_type(type
))
262 xdp_rxq
->mem
.type
= type
;
265 if (type
== MEM_TYPE_PAGE_POOL
|| type
== MEM_TYPE_ZERO_COPY
)
266 return -EINVAL
; /* Setup time check page_pool req */
270 /* Delay init of rhashtable to save memory if feature isn't used */
272 mutex_lock(&mem_id_lock
);
273 ret
= __mem_id_init_hash_table();
274 mutex_unlock(&mem_id_lock
);
281 xdp_alloc
= kzalloc(sizeof(*xdp_alloc
), gfp
);
285 mutex_lock(&mem_id_lock
);
286 id
= __mem_id_cyclic_get(gfp
);
291 xdp_rxq
->mem
.id
= id
;
292 xdp_alloc
->mem
= xdp_rxq
->mem
;
293 xdp_alloc
->allocator
= allocator
;
295 /* Insert allocator into ID lookup table */
296 ptr
= rhashtable_insert_slow(mem_id_ht
, &id
, &xdp_alloc
->node
);
298 errno
= PTR_ERR(ptr
);
302 mutex_unlock(&mem_id_lock
);
306 mutex_unlock(&mem_id_lock
);
310 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model
);
312 /* XDP RX runs under NAPI protection, and in different delivery error
313 * scenarios (e.g. queue full), it is possible to return the xdp_frame
314 * while still leveraging this protection. The @napi_direct boolian
315 * is used for those calls sites. Thus, allowing for faster recycling
316 * of xdp_frames/pages in those cases.
318 static void __xdp_return(void *data
, struct xdp_mem_info
*mem
, bool napi_direct
,
319 unsigned long handle
)
321 struct xdp_mem_allocator
*xa
;
325 case MEM_TYPE_PAGE_POOL
:
327 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
328 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
329 page
= virt_to_head_page(data
);
331 page_pool_put_page(xa
->page_pool
, page
, napi_direct
);
336 case MEM_TYPE_PAGE_SHARED
:
337 page_frag_free(data
);
339 case MEM_TYPE_PAGE_ORDER0
:
340 page
= virt_to_page(data
); /* Assumes order0 page*/
343 case MEM_TYPE_ZERO_COPY
:
344 /* NB! Only valid from an xdp_buff! */
346 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
347 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
348 xa
->zc_alloc
->free(xa
->zc_alloc
, handle
);
351 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
356 void xdp_return_frame(struct xdp_frame
*xdpf
)
358 __xdp_return(xdpf
->data
, &xdpf
->mem
, false, 0);
360 EXPORT_SYMBOL_GPL(xdp_return_frame
);
362 void xdp_return_frame_rx_napi(struct xdp_frame
*xdpf
)
364 __xdp_return(xdpf
->data
, &xdpf
->mem
, true, 0);
366 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi
);
368 void xdp_return_buff(struct xdp_buff
*xdp
)
370 __xdp_return(xdp
->data
, &xdp
->rxq
->mem
, true, xdp
->handle
);
372 EXPORT_SYMBOL_GPL(xdp_return_buff
);