1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
7 #include <linux/filter.h>
8 #include <linux/types.h>
10 #include <linux/netdevice.h>
11 #include <linux/slab.h>
12 #include <linux/idr.h>
13 #include <linux/rhashtable.h>
14 #include <net/page_pool.h>
17 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
18 #include <trace/events/xdp.h>
20 #define REG_STATE_NEW 0x0
21 #define REG_STATE_REGISTERED 0x1
22 #define REG_STATE_UNREGISTERED 0x2
23 #define REG_STATE_UNUSED 0x3
25 static DEFINE_IDA(mem_id_pool
);
26 static DEFINE_MUTEX(mem_id_lock
);
27 #define MEM_ID_MAX 0xFFFE
29 static int mem_id_next
= MEM_ID_MIN
;
31 static bool mem_id_init
; /* false */
32 static struct rhashtable
*mem_id_ht
;
34 static u32
xdp_mem_id_hashfn(const void *data
, u32 len
, u32 seed
)
39 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator
, mem
.id
)
42 /* Use cyclic increasing ID as direct hash key */
46 static int xdp_mem_id_cmp(struct rhashtable_compare_arg
*arg
,
49 const struct xdp_mem_allocator
*xa
= ptr
;
50 u32 mem_id
= *(u32
*)arg
->key
;
52 return xa
->mem
.id
!= mem_id
;
55 static const struct rhashtable_params mem_id_rht_params
= {
57 .head_offset
= offsetof(struct xdp_mem_allocator
, node
),
58 .key_offset
= offsetof(struct xdp_mem_allocator
, mem
.id
),
59 .key_len
= sizeof_field(struct xdp_mem_allocator
, mem
.id
),
60 .max_size
= MEM_ID_MAX
,
62 .automatic_shrinking
= true,
63 .hashfn
= xdp_mem_id_hashfn
,
64 .obj_cmpfn
= xdp_mem_id_cmp
,
67 static void __xdp_mem_allocator_rcu_free(struct rcu_head
*rcu
)
69 struct xdp_mem_allocator
*xa
;
71 xa
= container_of(rcu
, struct xdp_mem_allocator
, rcu
);
73 /* Allow this ID to be reused */
74 ida_simple_remove(&mem_id_pool
, xa
->mem
.id
);
79 static void mem_xa_remove(struct xdp_mem_allocator
*xa
)
81 trace_mem_disconnect(xa
);
83 if (!rhashtable_remove_fast(mem_id_ht
, &xa
->node
, mem_id_rht_params
))
84 call_rcu(&xa
->rcu
, __xdp_mem_allocator_rcu_free
);
87 static void mem_allocator_disconnect(void *allocator
)
89 struct xdp_mem_allocator
*xa
;
90 struct rhashtable_iter iter
;
92 mutex_lock(&mem_id_lock
);
94 rhashtable_walk_enter(mem_id_ht
, &iter
);
96 rhashtable_walk_start(&iter
);
98 while ((xa
= rhashtable_walk_next(&iter
)) && !IS_ERR(xa
)) {
99 if (xa
->allocator
== allocator
)
103 rhashtable_walk_stop(&iter
);
105 } while (xa
== ERR_PTR(-EAGAIN
));
106 rhashtable_walk_exit(&iter
);
108 mutex_unlock(&mem_id_lock
);
111 static void mem_id_disconnect(int id
)
113 struct xdp_mem_allocator
*xa
;
115 mutex_lock(&mem_id_lock
);
117 xa
= rhashtable_lookup_fast(mem_id_ht
, &id
, mem_id_rht_params
);
119 mutex_unlock(&mem_id_lock
);
120 WARN(1, "Request remove non-existing id(%d), driver bug?", id
);
124 trace_mem_disconnect(xa
);
126 if (!rhashtable_remove_fast(mem_id_ht
, &xa
->node
, mem_id_rht_params
))
127 call_rcu(&xa
->rcu
, __xdp_mem_allocator_rcu_free
);
129 mutex_unlock(&mem_id_lock
);
132 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info
*xdp_rxq
)
134 struct xdp_mem_allocator
*xa
;
135 int id
= xdp_rxq
->mem
.id
;
137 if (xdp_rxq
->reg_state
!= REG_STATE_REGISTERED
) {
138 WARN(1, "Missing register, driver bug");
145 if (xdp_rxq
->mem
.type
== MEM_TYPE_ZERO_COPY
)
146 return mem_id_disconnect(id
);
148 if (xdp_rxq
->mem
.type
== MEM_TYPE_PAGE_POOL
) {
150 xa
= rhashtable_lookup(mem_id_ht
, &id
, mem_id_rht_params
);
151 page_pool_destroy(xa
->page_pool
);
155 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model
);
157 void xdp_rxq_info_unreg(struct xdp_rxq_info
*xdp_rxq
)
159 /* Simplify driver cleanup code paths, allow unreg "unused" */
160 if (xdp_rxq
->reg_state
== REG_STATE_UNUSED
)
163 WARN(!(xdp_rxq
->reg_state
== REG_STATE_REGISTERED
), "Driver BUG");
165 xdp_rxq_info_unreg_mem_model(xdp_rxq
);
167 xdp_rxq
->reg_state
= REG_STATE_UNREGISTERED
;
170 /* Reset mem info to defaults */
172 xdp_rxq
->mem
.type
= 0;
174 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg
);
176 static void xdp_rxq_info_init(struct xdp_rxq_info
*xdp_rxq
)
178 memset(xdp_rxq
, 0, sizeof(*xdp_rxq
));
181 /* Returns 0 on success, negative on failure */
182 int xdp_rxq_info_reg(struct xdp_rxq_info
*xdp_rxq
,
183 struct net_device
*dev
, u32 queue_index
)
185 if (xdp_rxq
->reg_state
== REG_STATE_UNUSED
) {
186 WARN(1, "Driver promised not to register this");
190 if (xdp_rxq
->reg_state
== REG_STATE_REGISTERED
) {
191 WARN(1, "Missing unregister, handled but fix driver");
192 xdp_rxq_info_unreg(xdp_rxq
);
196 WARN(1, "Missing net_device from driver");
200 /* State either UNREGISTERED or NEW */
201 xdp_rxq_info_init(xdp_rxq
);
203 xdp_rxq
->queue_index
= queue_index
;
205 xdp_rxq
->reg_state
= REG_STATE_REGISTERED
;
208 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg
);
210 void xdp_rxq_info_unused(struct xdp_rxq_info
*xdp_rxq
)
212 xdp_rxq
->reg_state
= REG_STATE_UNUSED
;
214 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused
);
216 bool xdp_rxq_info_is_reg(struct xdp_rxq_info
*xdp_rxq
)
218 return (xdp_rxq
->reg_state
== REG_STATE_REGISTERED
);
220 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg
);
222 static int __mem_id_init_hash_table(void)
224 struct rhashtable
*rht
;
227 if (unlikely(mem_id_init
))
230 rht
= kzalloc(sizeof(*rht
), GFP_KERNEL
);
234 ret
= rhashtable_init(rht
, &mem_id_rht_params
);
240 smp_mb(); /* mutex lock should provide enough pairing */
246 /* Allocate a cyclic ID that maps to allocator pointer.
247 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
249 * Caller must lock mem_id_lock.
251 static int __mem_id_cyclic_get(gfp_t gfp
)
257 id
= ida_simple_get(&mem_id_pool
, mem_id_next
, MEM_ID_MAX
, gfp
);
260 /* Cyclic allocator, reset next id */
262 mem_id_next
= MEM_ID_MIN
;
266 return id
; /* errno */
268 mem_id_next
= id
+ 1;
273 static bool __is_supported_mem_type(enum xdp_mem_type type
)
275 if (type
== MEM_TYPE_PAGE_POOL
)
276 return is_page_pool_compiled_in();
278 if (type
>= MEM_TYPE_MAX
)
284 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info
*xdp_rxq
,
285 enum xdp_mem_type type
, void *allocator
)
287 struct xdp_mem_allocator
*xdp_alloc
;
288 gfp_t gfp
= GFP_KERNEL
;
292 if (xdp_rxq
->reg_state
!= REG_STATE_REGISTERED
) {
293 WARN(1, "Missing register, driver bug");
297 if (!__is_supported_mem_type(type
))
300 xdp_rxq
->mem
.type
= type
;
303 if (type
== MEM_TYPE_PAGE_POOL
|| type
== MEM_TYPE_ZERO_COPY
)
304 return -EINVAL
; /* Setup time check page_pool req */
308 /* Delay init of rhashtable to save memory if feature isn't used */
310 mutex_lock(&mem_id_lock
);
311 ret
= __mem_id_init_hash_table();
312 mutex_unlock(&mem_id_lock
);
319 xdp_alloc
= kzalloc(sizeof(*xdp_alloc
), gfp
);
323 mutex_lock(&mem_id_lock
);
324 id
= __mem_id_cyclic_get(gfp
);
329 xdp_rxq
->mem
.id
= id
;
330 xdp_alloc
->mem
= xdp_rxq
->mem
;
331 xdp_alloc
->allocator
= allocator
;
333 /* Insert allocator into ID lookup table */
334 ptr
= rhashtable_insert_slow(mem_id_ht
, &id
, &xdp_alloc
->node
);
336 ida_simple_remove(&mem_id_pool
, xdp_rxq
->mem
.id
);
338 errno
= PTR_ERR(ptr
);
342 if (type
== MEM_TYPE_PAGE_POOL
)
343 page_pool_use_xdp_mem(allocator
, mem_allocator_disconnect
);
345 mutex_unlock(&mem_id_lock
);
347 trace_mem_connect(xdp_alloc
, xdp_rxq
);
350 mutex_unlock(&mem_id_lock
);
354 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model
);
356 /* XDP RX runs under NAPI protection, and in different delivery error
357 * scenarios (e.g. queue full), it is possible to return the xdp_frame
358 * while still leveraging this protection. The @napi_direct boolean
359 * is used for those calls sites. Thus, allowing for faster recycling
360 * of xdp_frames/pages in those cases.
362 static void __xdp_return(void *data
, struct xdp_mem_info
*mem
, bool napi_direct
,
363 unsigned long handle
)
365 struct xdp_mem_allocator
*xa
;
369 case MEM_TYPE_PAGE_POOL
:
371 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
372 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
373 page
= virt_to_head_page(data
);
374 napi_direct
&= !xdp_return_frame_no_direct();
375 page_pool_put_page(xa
->page_pool
, page
, napi_direct
);
378 case MEM_TYPE_PAGE_SHARED
:
379 page_frag_free(data
);
381 case MEM_TYPE_PAGE_ORDER0
:
382 page
= virt_to_page(data
); /* Assumes order0 page*/
385 case MEM_TYPE_ZERO_COPY
:
386 /* NB! Only valid from an xdp_buff! */
388 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
389 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
390 xa
->zc_alloc
->free(xa
->zc_alloc
, handle
);
393 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
398 void xdp_return_frame(struct xdp_frame
*xdpf
)
400 __xdp_return(xdpf
->data
, &xdpf
->mem
, false, 0);
402 EXPORT_SYMBOL_GPL(xdp_return_frame
);
404 void xdp_return_frame_rx_napi(struct xdp_frame
*xdpf
)
406 __xdp_return(xdpf
->data
, &xdpf
->mem
, true, 0);
408 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi
);
410 void xdp_return_buff(struct xdp_buff
*xdp
)
412 __xdp_return(xdp
->data
, &xdp
->rxq
->mem
, true, xdp
->handle
);
414 EXPORT_SYMBOL_GPL(xdp_return_buff
);
416 /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
417 void __xdp_release_frame(void *data
, struct xdp_mem_info
*mem
)
419 struct xdp_mem_allocator
*xa
;
423 xa
= rhashtable_lookup(mem_id_ht
, &mem
->id
, mem_id_rht_params
);
424 page
= virt_to_head_page(data
);
426 page_pool_release_page(xa
->page_pool
, page
);
429 EXPORT_SYMBOL_GPL(__xdp_release_frame
);
431 int xdp_attachment_query(struct xdp_attachment_info
*info
,
432 struct netdev_bpf
*bpf
)
434 bpf
->prog_id
= info
->prog
? info
->prog
->aux
->id
: 0;
435 bpf
->prog_flags
= info
->prog
? info
->flags
: 0;
438 EXPORT_SYMBOL_GPL(xdp_attachment_query
);
440 bool xdp_attachment_flags_ok(struct xdp_attachment_info
*info
,
441 struct netdev_bpf
*bpf
)
443 if (info
->prog
&& (bpf
->flags
^ info
->flags
) & XDP_FLAGS_MODES
) {
444 NL_SET_ERR_MSG(bpf
->extack
,
445 "program loaded with different flags");
450 EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok
);
452 void xdp_attachment_setup(struct xdp_attachment_info
*info
,
453 struct netdev_bpf
*bpf
)
456 bpf_prog_put(info
->prog
);
457 info
->prog
= bpf
->prog
;
458 info
->flags
= bpf
->flags
;
460 EXPORT_SYMBOL_GPL(xdp_attachment_setup
);
462 struct xdp_frame
*xdp_convert_zc_to_xdp_frame(struct xdp_buff
*xdp
)
464 unsigned int metasize
, totsize
;
465 void *addr
, *data_to_copy
;
466 struct xdp_frame
*xdpf
;
469 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
470 metasize
= xdp_data_meta_unsupported(xdp
) ? 0 :
471 xdp
->data
- xdp
->data_meta
;
472 totsize
= xdp
->data_end
- xdp
->data
+ metasize
;
474 if (sizeof(*xdpf
) + totsize
> PAGE_SIZE
)
477 page
= dev_alloc_page();
481 addr
= page_to_virt(page
);
483 memset(xdpf
, 0, sizeof(*xdpf
));
485 addr
+= sizeof(*xdpf
);
486 data_to_copy
= metasize
? xdp
->data_meta
: xdp
->data
;
487 memcpy(addr
, data_to_copy
, totsize
);
489 xdpf
->data
= addr
+ metasize
;
490 xdpf
->len
= totsize
- metasize
;
492 xdpf
->metasize
= metasize
;
493 xdpf
->mem
.type
= MEM_TYPE_PAGE_ORDER0
;
495 xdp_return_buff(xdp
);
498 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame
);