1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
18 #include "xsk_queue.h"
20 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
22 void xdp_add_sk_umem(struct xdp_umem
*umem
, struct xdp_sock
*xs
)
29 spin_lock_irqsave(&umem
->xsk_list_lock
, flags
);
30 list_add_rcu(&xs
->list
, &umem
->xsk_list
);
31 spin_unlock_irqrestore(&umem
->xsk_list_lock
, flags
);
34 void xdp_del_sk_umem(struct xdp_umem
*umem
, struct xdp_sock
*xs
)
41 spin_lock_irqsave(&umem
->xsk_list_lock
, flags
);
42 list_del_rcu(&xs
->list
);
43 spin_unlock_irqrestore(&umem
->xsk_list_lock
, flags
);
46 int xdp_umem_query(struct net_device
*dev
, u16 queue_id
)
48 struct netdev_bpf bpf
;
52 memset(&bpf
, 0, sizeof(bpf
));
53 bpf
.command
= XDP_QUERY_XSK_UMEM
;
54 bpf
.xsk
.queue_id
= queue_id
;
56 if (!dev
->netdev_ops
->ndo_bpf
)
58 return dev
->netdev_ops
->ndo_bpf(dev
, &bpf
) ?: !!bpf
.xsk
.umem
;
61 int xdp_umem_assign_dev(struct xdp_umem
*umem
, struct net_device
*dev
,
62 u32 queue_id
, u16 flags
)
64 bool force_zc
, force_copy
;
65 struct netdev_bpf bpf
;
68 force_zc
= flags
& XDP_ZEROCOPY
;
69 force_copy
= flags
& XDP_COPY
;
71 if (force_zc
&& force_copy
)
77 if (!dev
->netdev_ops
->ndo_bpf
|| !dev
->netdev_ops
->ndo_xsk_async_xmit
)
78 return force_zc
? -EOPNOTSUPP
: 0; /* fail or fallback */
80 bpf
.command
= XDP_QUERY_XSK_UMEM
;
83 err
= xdp_umem_query(dev
, queue_id
);
85 err
= err
< 0 ? -EOPNOTSUPP
: -EBUSY
;
89 bpf
.command
= XDP_SETUP_XSK_UMEM
;
91 bpf
.xsk
.queue_id
= queue_id
;
93 err
= dev
->netdev_ops
->ndo_bpf(dev
, &bpf
);
100 umem
->queue_id
= queue_id
;
106 return force_zc
? err
: 0; /* fail or fallback */
109 static void xdp_umem_clear_dev(struct xdp_umem
*umem
)
111 struct netdev_bpf bpf
;
115 bpf
.command
= XDP_SETUP_XSK_UMEM
;
117 bpf
.xsk
.queue_id
= umem
->queue_id
;
120 err
= umem
->dev
->netdev_ops
->ndo_bpf(umem
->dev
, &bpf
);
124 WARN(1, "failed to disable umem!\n");
131 static void xdp_umem_unpin_pages(struct xdp_umem
*umem
)
135 for (i
= 0; i
< umem
->npgs
; i
++) {
136 struct page
*page
= umem
->pgs
[i
];
138 set_page_dirty_lock(page
);
146 static void xdp_umem_unaccount_pages(struct xdp_umem
*umem
)
149 atomic_long_sub(umem
->npgs
, &umem
->user
->locked_vm
);
150 free_uid(umem
->user
);
154 static void xdp_umem_release(struct xdp_umem
*umem
)
156 xdp_umem_clear_dev(umem
);
159 xskq_destroy(umem
->fq
);
164 xskq_destroy(umem
->cq
);
168 xdp_umem_unpin_pages(umem
);
173 xdp_umem_unaccount_pages(umem
);
177 static void xdp_umem_release_deferred(struct work_struct
*work
)
179 struct xdp_umem
*umem
= container_of(work
, struct xdp_umem
, work
);
181 xdp_umem_release(umem
);
184 void xdp_get_umem(struct xdp_umem
*umem
)
186 refcount_inc(&umem
->users
);
189 void xdp_put_umem(struct xdp_umem
*umem
)
194 if (refcount_dec_and_test(&umem
->users
)) {
195 INIT_WORK(&umem
->work
, xdp_umem_release_deferred
);
196 schedule_work(&umem
->work
);
200 static int xdp_umem_pin_pages(struct xdp_umem
*umem
)
202 unsigned int gup_flags
= FOLL_WRITE
;
206 umem
->pgs
= kcalloc(umem
->npgs
, sizeof(*umem
->pgs
),
207 GFP_KERNEL
| __GFP_NOWARN
);
211 down_write(¤t
->mm
->mmap_sem
);
212 npgs
= get_user_pages(umem
->address
, umem
->npgs
,
213 gup_flags
, &umem
->pgs
[0], NULL
);
214 up_write(¤t
->mm
->mmap_sem
);
216 if (npgs
!= umem
->npgs
) {
228 xdp_umem_unpin_pages(umem
);
235 static int xdp_umem_account_pages(struct xdp_umem
*umem
)
237 unsigned long lock_limit
, new_npgs
, old_npgs
;
239 if (capable(CAP_IPC_LOCK
))
242 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
243 umem
->user
= get_uid(current_user());
246 old_npgs
= atomic_long_read(&umem
->user
->locked_vm
);
247 new_npgs
= old_npgs
+ umem
->npgs
;
248 if (new_npgs
> lock_limit
) {
249 free_uid(umem
->user
);
253 } while (atomic_long_cmpxchg(&umem
->user
->locked_vm
, old_npgs
,
254 new_npgs
) != old_npgs
);
258 static int xdp_umem_reg(struct xdp_umem
*umem
, struct xdp_umem_reg
*mr
)
260 u32 chunk_size
= mr
->chunk_size
, headroom
= mr
->headroom
;
261 u64 npgs
, addr
= mr
->addr
, size
= mr
->len
;
262 unsigned int chunks
, chunks_per_page
;
265 if (chunk_size
< XDP_UMEM_MIN_CHUNK_SIZE
|| chunk_size
> PAGE_SIZE
) {
266 /* Strictly speaking we could support this, if:
268 * - using an IOMMU, or
269 * - making sure the memory area is consecutive
270 * but for now, we simply say "computer says no".
275 if (!is_power_of_2(chunk_size
))
278 if (!PAGE_ALIGNED(addr
)) {
279 /* Memory area has to be page size aligned. For
280 * simplicity, this might change.
285 if ((addr
+ size
) < addr
)
288 npgs
= div_u64(size
, PAGE_SIZE
);
292 chunks
= (unsigned int)div_u64(size
, chunk_size
);
296 chunks_per_page
= PAGE_SIZE
/ chunk_size
;
297 if (chunks
< chunks_per_page
|| chunks
% chunks_per_page
)
300 headroom
= ALIGN(headroom
, 64);
302 if (headroom
>= chunk_size
- XDP_PACKET_HEADROOM
)
305 umem
->address
= (unsigned long)addr
;
306 umem
->props
.chunk_mask
= ~((u64
)chunk_size
- 1);
307 umem
->props
.size
= size
;
308 umem
->headroom
= headroom
;
309 umem
->chunk_size_nohr
= chunk_size
- headroom
;
310 umem
->npgs
= (u32
)npgs
;
313 INIT_LIST_HEAD(&umem
->xsk_list
);
314 spin_lock_init(&umem
->xsk_list_lock
);
316 refcount_set(&umem
->users
, 1);
318 err
= xdp_umem_account_pages(umem
);
322 err
= xdp_umem_pin_pages(umem
);
326 umem
->pages
= kcalloc(umem
->npgs
, sizeof(*umem
->pages
), GFP_KERNEL
);
332 for (i
= 0; i
< umem
->npgs
; i
++)
333 umem
->pages
[i
].addr
= page_address(umem
->pgs
[i
]);
338 xdp_umem_unpin_pages(umem
);
340 xdp_umem_unaccount_pages(umem
);
344 struct xdp_umem
*xdp_umem_create(struct xdp_umem_reg
*mr
)
346 struct xdp_umem
*umem
;
349 umem
= kzalloc(sizeof(*umem
), GFP_KERNEL
);
351 return ERR_PTR(-ENOMEM
);
353 err
= xdp_umem_reg(umem
, mr
);
362 bool xdp_umem_validate_queues(struct xdp_umem
*umem
)
364 return umem
->fq
&& umem
->cq
;