perf tools: Remove perf_tool from event_op2
[linux/fpc-iii.git] / net / xdp / xdp_umem.c
blobbfe2dbea480ba8ef54e0cc24659aa15d267aea2a
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
4 */
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
17 #include "xdp_umem.h"
18 #include "xsk_queue.h"
20 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
22 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
24 unsigned long flags;
26 spin_lock_irqsave(&umem->xsk_list_lock, flags);
27 list_add_rcu(&xs->list, &umem->xsk_list);
28 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
31 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
33 unsigned long flags;
35 if (xs->dev) {
36 spin_lock_irqsave(&umem->xsk_list_lock, flags);
37 list_del_rcu(&xs->list);
38 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
40 if (umem->zc)
41 synchronize_net();
45 int xdp_umem_query(struct net_device *dev, u16 queue_id)
47 struct netdev_bpf bpf;
49 ASSERT_RTNL();
51 memset(&bpf, 0, sizeof(bpf));
52 bpf.command = XDP_QUERY_XSK_UMEM;
53 bpf.xsk.queue_id = queue_id;
55 if (!dev->netdev_ops->ndo_bpf)
56 return 0;
57 return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
60 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
61 u32 queue_id, u16 flags)
63 bool force_zc, force_copy;
64 struct netdev_bpf bpf;
65 int err;
67 force_zc = flags & XDP_ZEROCOPY;
68 force_copy = flags & XDP_COPY;
70 if (force_zc && force_copy)
71 return -EINVAL;
73 if (force_copy)
74 return 0;
76 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
77 return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
79 bpf.command = XDP_QUERY_XSK_UMEM;
81 rtnl_lock();
82 err = xdp_umem_query(dev, queue_id);
83 if (err) {
84 err = err < 0 ? -EOPNOTSUPP : -EBUSY;
85 goto err_rtnl_unlock;
88 bpf.command = XDP_SETUP_XSK_UMEM;
89 bpf.xsk.umem = umem;
90 bpf.xsk.queue_id = queue_id;
92 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
93 if (err)
94 goto err_rtnl_unlock;
95 rtnl_unlock();
97 dev_hold(dev);
98 umem->dev = dev;
99 umem->queue_id = queue_id;
100 umem->zc = true;
101 return 0;
103 err_rtnl_unlock:
104 rtnl_unlock();
105 return force_zc ? err : 0; /* fail or fallback */
108 static void xdp_umem_clear_dev(struct xdp_umem *umem)
110 struct netdev_bpf bpf;
111 int err;
113 if (umem->dev) {
114 bpf.command = XDP_SETUP_XSK_UMEM;
115 bpf.xsk.umem = NULL;
116 bpf.xsk.queue_id = umem->queue_id;
118 rtnl_lock();
119 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
120 rtnl_unlock();
122 if (err)
123 WARN(1, "failed to disable umem!\n");
125 dev_put(umem->dev);
126 umem->dev = NULL;
130 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
132 unsigned int i;
134 for (i = 0; i < umem->npgs; i++) {
135 struct page *page = umem->pgs[i];
137 set_page_dirty_lock(page);
138 put_page(page);
141 kfree(umem->pgs);
142 umem->pgs = NULL;
145 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
147 if (umem->user) {
148 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
149 free_uid(umem->user);
153 static void xdp_umem_release(struct xdp_umem *umem)
155 struct task_struct *task;
156 struct mm_struct *mm;
158 xdp_umem_clear_dev(umem);
160 if (umem->fq) {
161 xskq_destroy(umem->fq);
162 umem->fq = NULL;
165 if (umem->cq) {
166 xskq_destroy(umem->cq);
167 umem->cq = NULL;
170 xdp_umem_unpin_pages(umem);
172 task = get_pid_task(umem->pid, PIDTYPE_PID);
173 put_pid(umem->pid);
174 if (!task)
175 goto out;
176 mm = get_task_mm(task);
177 put_task_struct(task);
178 if (!mm)
179 goto out;
181 mmput(mm);
182 kfree(umem->pages);
183 umem->pages = NULL;
185 xdp_umem_unaccount_pages(umem);
186 out:
187 kfree(umem);
190 static void xdp_umem_release_deferred(struct work_struct *work)
192 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
194 xdp_umem_release(umem);
197 void xdp_get_umem(struct xdp_umem *umem)
199 refcount_inc(&umem->users);
202 void xdp_put_umem(struct xdp_umem *umem)
204 if (!umem)
205 return;
207 if (refcount_dec_and_test(&umem->users)) {
208 INIT_WORK(&umem->work, xdp_umem_release_deferred);
209 schedule_work(&umem->work);
213 static int xdp_umem_pin_pages(struct xdp_umem *umem)
215 unsigned int gup_flags = FOLL_WRITE;
216 long npgs;
217 int err;
219 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
220 GFP_KERNEL | __GFP_NOWARN);
221 if (!umem->pgs)
222 return -ENOMEM;
224 down_write(&current->mm->mmap_sem);
225 npgs = get_user_pages(umem->address, umem->npgs,
226 gup_flags, &umem->pgs[0], NULL);
227 up_write(&current->mm->mmap_sem);
229 if (npgs != umem->npgs) {
230 if (npgs >= 0) {
231 umem->npgs = npgs;
232 err = -ENOMEM;
233 goto out_pin;
235 err = npgs;
236 goto out_pgs;
238 return 0;
240 out_pin:
241 xdp_umem_unpin_pages(umem);
242 out_pgs:
243 kfree(umem->pgs);
244 umem->pgs = NULL;
245 return err;
248 static int xdp_umem_account_pages(struct xdp_umem *umem)
250 unsigned long lock_limit, new_npgs, old_npgs;
252 if (capable(CAP_IPC_LOCK))
253 return 0;
255 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
256 umem->user = get_uid(current_user());
258 do {
259 old_npgs = atomic_long_read(&umem->user->locked_vm);
260 new_npgs = old_npgs + umem->npgs;
261 if (new_npgs > lock_limit) {
262 free_uid(umem->user);
263 umem->user = NULL;
264 return -ENOBUFS;
266 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
267 new_npgs) != old_npgs);
268 return 0;
271 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
273 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
274 unsigned int chunks, chunks_per_page;
275 u64 addr = mr->addr, size = mr->len;
276 int size_chk, err, i;
278 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
279 /* Strictly speaking we could support this, if:
280 * - huge pages, or*
281 * - using an IOMMU, or
282 * - making sure the memory area is consecutive
283 * but for now, we simply say "computer says no".
285 return -EINVAL;
288 if (!is_power_of_2(chunk_size))
289 return -EINVAL;
291 if (!PAGE_ALIGNED(addr)) {
292 /* Memory area has to be page size aligned. For
293 * simplicity, this might change.
295 return -EINVAL;
298 if ((addr + size) < addr)
299 return -EINVAL;
301 chunks = (unsigned int)div_u64(size, chunk_size);
302 if (chunks == 0)
303 return -EINVAL;
305 chunks_per_page = PAGE_SIZE / chunk_size;
306 if (chunks < chunks_per_page || chunks % chunks_per_page)
307 return -EINVAL;
309 headroom = ALIGN(headroom, 64);
311 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
312 if (size_chk < 0)
313 return -EINVAL;
315 umem->pid = get_task_pid(current, PIDTYPE_PID);
316 umem->address = (unsigned long)addr;
317 umem->props.chunk_mask = ~((u64)chunk_size - 1);
318 umem->props.size = size;
319 umem->headroom = headroom;
320 umem->chunk_size_nohr = chunk_size - headroom;
321 umem->npgs = size / PAGE_SIZE;
322 umem->pgs = NULL;
323 umem->user = NULL;
324 INIT_LIST_HEAD(&umem->xsk_list);
325 spin_lock_init(&umem->xsk_list_lock);
327 refcount_set(&umem->users, 1);
329 err = xdp_umem_account_pages(umem);
330 if (err)
331 goto out;
333 err = xdp_umem_pin_pages(umem);
334 if (err)
335 goto out_account;
337 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
338 if (!umem->pages) {
339 err = -ENOMEM;
340 goto out_account;
343 for (i = 0; i < umem->npgs; i++)
344 umem->pages[i].addr = page_address(umem->pgs[i]);
346 return 0;
348 out_account:
349 xdp_umem_unaccount_pages(umem);
350 out:
351 put_pid(umem->pid);
352 return err;
355 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
357 struct xdp_umem *umem;
358 int err;
360 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
361 if (!umem)
362 return ERR_PTR(-ENOMEM);
364 err = xdp_umem_reg(umem, mr);
365 if (err) {
366 kfree(umem);
367 return ERR_PTR(err);
370 return umem;
373 bool xdp_umem_validate_queues(struct xdp_umem *umem)
375 return umem->fq && umem->cq;