1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/vmalloc.h>
20 #include "xsk_queue.h"
22 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
24 static DEFINE_IDA(umem_ida
);
26 static void xdp_umem_unpin_pages(struct xdp_umem
*umem
)
28 unpin_user_pages_dirty_lock(umem
->pgs
, umem
->npgs
, true);
34 static void xdp_umem_unaccount_pages(struct xdp_umem
*umem
)
37 atomic_long_sub(umem
->npgs
, &umem
->user
->locked_vm
);
42 static void xdp_umem_addr_unmap(struct xdp_umem
*umem
)
48 static int xdp_umem_addr_map(struct xdp_umem
*umem
, struct page
**pages
,
51 umem
->addrs
= vmap(pages
, nr_pages
, VM_MAP
, PAGE_KERNEL
);
57 static void xdp_umem_release(struct xdp_umem
*umem
)
60 ida_simple_remove(&umem_ida
, umem
->id
);
62 xdp_umem_addr_unmap(umem
);
63 xdp_umem_unpin_pages(umem
);
65 xdp_umem_unaccount_pages(umem
);
69 static void xdp_umem_release_deferred(struct work_struct
*work
)
71 struct xdp_umem
*umem
= container_of(work
, struct xdp_umem
, work
);
73 xdp_umem_release(umem
);
76 void xdp_get_umem(struct xdp_umem
*umem
)
78 refcount_inc(&umem
->users
);
81 void xdp_put_umem(struct xdp_umem
*umem
, bool defer_cleanup
)
86 if (refcount_dec_and_test(&umem
->users
)) {
88 INIT_WORK(&umem
->work
, xdp_umem_release_deferred
);
89 schedule_work(&umem
->work
);
91 xdp_umem_release(umem
);
96 static int xdp_umem_pin_pages(struct xdp_umem
*umem
, unsigned long address
)
98 unsigned int gup_flags
= FOLL_WRITE
;
102 umem
->pgs
= kcalloc(umem
->npgs
, sizeof(*umem
->pgs
),
103 GFP_KERNEL
| __GFP_NOWARN
);
107 mmap_read_lock(current
->mm
);
108 npgs
= pin_user_pages(address
, umem
->npgs
,
109 gup_flags
| FOLL_LONGTERM
, &umem
->pgs
[0], NULL
);
110 mmap_read_unlock(current
->mm
);
112 if (npgs
!= umem
->npgs
) {
124 xdp_umem_unpin_pages(umem
);
131 static int xdp_umem_account_pages(struct xdp_umem
*umem
)
133 unsigned long lock_limit
, new_npgs
, old_npgs
;
135 if (capable(CAP_IPC_LOCK
))
138 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
139 umem
->user
= get_uid(current_user());
142 old_npgs
= atomic_long_read(&umem
->user
->locked_vm
);
143 new_npgs
= old_npgs
+ umem
->npgs
;
144 if (new_npgs
> lock_limit
) {
145 free_uid(umem
->user
);
149 } while (atomic_long_cmpxchg(&umem
->user
->locked_vm
, old_npgs
,
150 new_npgs
) != old_npgs
);
154 static int xdp_umem_reg(struct xdp_umem
*umem
, struct xdp_umem_reg
*mr
)
156 u32 npgs_rem
, chunk_size
= mr
->chunk_size
, headroom
= mr
->headroom
;
157 bool unaligned_chunks
= mr
->flags
& XDP_UMEM_UNALIGNED_CHUNK_FLAG
;
158 u64 npgs
, addr
= mr
->addr
, size
= mr
->len
;
159 unsigned int chunks
, chunks_rem
;
162 if (chunk_size
< XDP_UMEM_MIN_CHUNK_SIZE
|| chunk_size
> PAGE_SIZE
) {
163 /* Strictly speaking we could support this, if:
165 * - using an IOMMU, or
166 * - making sure the memory area is consecutive
167 * but for now, we simply say "computer says no".
172 if (mr
->flags
& ~XDP_UMEM_UNALIGNED_CHUNK_FLAG
)
175 if (!unaligned_chunks
&& !is_power_of_2(chunk_size
))
178 if (!PAGE_ALIGNED(addr
)) {
179 /* Memory area has to be page size aligned. For
180 * simplicity, this might change.
185 if ((addr
+ size
) < addr
)
188 npgs
= div_u64_rem(size
, PAGE_SIZE
, &npgs_rem
);
194 chunks
= (unsigned int)div_u64_rem(size
, chunk_size
, &chunks_rem
);
198 if (!unaligned_chunks
&& chunks_rem
)
201 if (headroom
>= chunk_size
- XDP_PACKET_HEADROOM
)
205 umem
->headroom
= headroom
;
206 umem
->chunk_size
= chunk_size
;
207 umem
->chunks
= chunks
;
208 umem
->npgs
= (u32
)npgs
;
211 umem
->flags
= mr
->flags
;
213 INIT_LIST_HEAD(&umem
->xsk_dma_list
);
214 refcount_set(&umem
->users
, 1);
216 err
= xdp_umem_account_pages(umem
);
220 err
= xdp_umem_pin_pages(umem
, (unsigned long)addr
);
224 err
= xdp_umem_addr_map(umem
, umem
->pgs
, umem
->npgs
);
231 xdp_umem_unpin_pages(umem
);
233 xdp_umem_unaccount_pages(umem
);
237 struct xdp_umem
*xdp_umem_create(struct xdp_umem_reg
*mr
)
239 struct xdp_umem
*umem
;
242 umem
= kzalloc(sizeof(*umem
), GFP_KERNEL
);
244 return ERR_PTR(-ENOMEM
);
246 err
= ida_simple_get(&umem_ida
, 0, 0, GFP_KERNEL
);
253 err
= xdp_umem_reg(umem
, mr
);
255 ida_simple_remove(&umem_ida
, umem
->id
);