io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / net / xdp / xdp_umem.c
blob56a28a686988d5590b45aa8e6fa967c2184472c3
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
4 */
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/idr.h>
17 #include <linux/vmalloc.h>
19 #include "xdp_umem.h"
20 #include "xsk_queue.h"
22 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
24 static DEFINE_IDA(umem_ida);
26 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
30 kfree(umem->pgs);
31 umem->pgs = NULL;
34 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
36 if (umem->user) {
37 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
38 free_uid(umem->user);
42 static void xdp_umem_addr_unmap(struct xdp_umem *umem)
44 vunmap(umem->addrs);
45 umem->addrs = NULL;
48 static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
49 u32 nr_pages)
51 umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
52 if (!umem->addrs)
53 return -ENOMEM;
54 return 0;
57 static void xdp_umem_release(struct xdp_umem *umem)
59 umem->zc = false;
60 ida_simple_remove(&umem_ida, umem->id);
62 xdp_umem_addr_unmap(umem);
63 xdp_umem_unpin_pages(umem);
65 xdp_umem_unaccount_pages(umem);
66 kfree(umem);
69 static void xdp_umem_release_deferred(struct work_struct *work)
71 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
73 xdp_umem_release(umem);
76 void xdp_get_umem(struct xdp_umem *umem)
78 refcount_inc(&umem->users);
81 void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
83 if (!umem)
84 return;
86 if (refcount_dec_and_test(&umem->users)) {
87 if (defer_cleanup) {
88 INIT_WORK(&umem->work, xdp_umem_release_deferred);
89 schedule_work(&umem->work);
90 } else {
91 xdp_umem_release(umem);
96 static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
98 unsigned int gup_flags = FOLL_WRITE;
99 long npgs;
100 int err;
102 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
103 GFP_KERNEL | __GFP_NOWARN);
104 if (!umem->pgs)
105 return -ENOMEM;
107 mmap_read_lock(current->mm);
108 npgs = pin_user_pages(address, umem->npgs,
109 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
110 mmap_read_unlock(current->mm);
112 if (npgs != umem->npgs) {
113 if (npgs >= 0) {
114 umem->npgs = npgs;
115 err = -ENOMEM;
116 goto out_pin;
118 err = npgs;
119 goto out_pgs;
121 return 0;
123 out_pin:
124 xdp_umem_unpin_pages(umem);
125 out_pgs:
126 kfree(umem->pgs);
127 umem->pgs = NULL;
128 return err;
131 static int xdp_umem_account_pages(struct xdp_umem *umem)
133 unsigned long lock_limit, new_npgs, old_npgs;
135 if (capable(CAP_IPC_LOCK))
136 return 0;
138 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
139 umem->user = get_uid(current_user());
141 do {
142 old_npgs = atomic_long_read(&umem->user->locked_vm);
143 new_npgs = old_npgs + umem->npgs;
144 if (new_npgs > lock_limit) {
145 free_uid(umem->user);
146 umem->user = NULL;
147 return -ENOBUFS;
149 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
150 new_npgs) != old_npgs);
151 return 0;
154 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
156 u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
157 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
158 u64 npgs, addr = mr->addr, size = mr->len;
159 unsigned int chunks, chunks_rem;
160 int err;
162 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
163 /* Strictly speaking we could support this, if:
164 * - huge pages, or*
165 * - using an IOMMU, or
166 * - making sure the memory area is consecutive
167 * but for now, we simply say "computer says no".
169 return -EINVAL;
172 if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
173 return -EINVAL;
175 if (!unaligned_chunks && !is_power_of_2(chunk_size))
176 return -EINVAL;
178 if (!PAGE_ALIGNED(addr)) {
179 /* Memory area has to be page size aligned. For
180 * simplicity, this might change.
182 return -EINVAL;
185 if ((addr + size) < addr)
186 return -EINVAL;
188 npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
189 if (npgs_rem)
190 npgs++;
191 if (npgs > U32_MAX)
192 return -EINVAL;
194 chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
195 if (chunks == 0)
196 return -EINVAL;
198 if (!unaligned_chunks && chunks_rem)
199 return -EINVAL;
201 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
202 return -EINVAL;
204 umem->size = size;
205 umem->headroom = headroom;
206 umem->chunk_size = chunk_size;
207 umem->chunks = chunks;
208 umem->npgs = (u32)npgs;
209 umem->pgs = NULL;
210 umem->user = NULL;
211 umem->flags = mr->flags;
213 INIT_LIST_HEAD(&umem->xsk_dma_list);
214 refcount_set(&umem->users, 1);
216 err = xdp_umem_account_pages(umem);
217 if (err)
218 return err;
220 err = xdp_umem_pin_pages(umem, (unsigned long)addr);
221 if (err)
222 goto out_account;
224 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
225 if (err)
226 goto out_unpin;
228 return 0;
230 out_unpin:
231 xdp_umem_unpin_pages(umem);
232 out_account:
233 xdp_umem_unaccount_pages(umem);
234 return err;
237 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
239 struct xdp_umem *umem;
240 int err;
242 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
243 if (!umem)
244 return ERR_PTR(-ENOMEM);
246 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
247 if (err < 0) {
248 kfree(umem);
249 return ERR_PTR(err);
251 umem->id = err;
253 err = xdp_umem_reg(umem, mr);
254 if (err) {
255 ida_simple_remove(&umem_ida, umem->id);
256 kfree(umem);
257 return ERR_PTR(err);
260 return umem;