xsk: Add overflow check for u64 division, stored into u32
[linux/fpc-iii.git] / net / xdp / xdp_umem.c
blobb87e63cb55be988ad415b50328e2b031f729bb98
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3 * Copyright(c) 2018 Intel Corporation.
4 */
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
17 #include "xdp_umem.h"
18 #include "xsk_queue.h"
20 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
22 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
24 unsigned long flags;
26 if (!xs->tx)
27 return;
29 spin_lock_irqsave(&umem->xsk_list_lock, flags);
30 list_add_rcu(&xs->list, &umem->xsk_list);
31 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
34 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
36 unsigned long flags;
38 if (!xs->tx)
39 return;
41 spin_lock_irqsave(&umem->xsk_list_lock, flags);
42 list_del_rcu(&xs->list);
43 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
46 int xdp_umem_query(struct net_device *dev, u16 queue_id)
48 struct netdev_bpf bpf;
50 ASSERT_RTNL();
52 memset(&bpf, 0, sizeof(bpf));
53 bpf.command = XDP_QUERY_XSK_UMEM;
54 bpf.xsk.queue_id = queue_id;
56 if (!dev->netdev_ops->ndo_bpf)
57 return 0;
58 return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
61 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
62 u32 queue_id, u16 flags)
64 bool force_zc, force_copy;
65 struct netdev_bpf bpf;
66 int err;
68 force_zc = flags & XDP_ZEROCOPY;
69 force_copy = flags & XDP_COPY;
71 if (force_zc && force_copy)
72 return -EINVAL;
74 if (force_copy)
75 return 0;
77 if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
78 return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
80 bpf.command = XDP_QUERY_XSK_UMEM;
82 rtnl_lock();
83 err = xdp_umem_query(dev, queue_id);
84 if (err) {
85 err = err < 0 ? -EOPNOTSUPP : -EBUSY;
86 goto err_rtnl_unlock;
89 bpf.command = XDP_SETUP_XSK_UMEM;
90 bpf.xsk.umem = umem;
91 bpf.xsk.queue_id = queue_id;
93 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
94 if (err)
95 goto err_rtnl_unlock;
96 rtnl_unlock();
98 dev_hold(dev);
99 umem->dev = dev;
100 umem->queue_id = queue_id;
101 umem->zc = true;
102 return 0;
104 err_rtnl_unlock:
105 rtnl_unlock();
106 return force_zc ? err : 0; /* fail or fallback */
109 static void xdp_umem_clear_dev(struct xdp_umem *umem)
111 struct netdev_bpf bpf;
112 int err;
114 if (umem->dev) {
115 bpf.command = XDP_SETUP_XSK_UMEM;
116 bpf.xsk.umem = NULL;
117 bpf.xsk.queue_id = umem->queue_id;
119 rtnl_lock();
120 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
121 rtnl_unlock();
123 if (err)
124 WARN(1, "failed to disable umem!\n");
126 dev_put(umem->dev);
127 umem->dev = NULL;
131 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
133 unsigned int i;
135 for (i = 0; i < umem->npgs; i++) {
136 struct page *page = umem->pgs[i];
138 set_page_dirty_lock(page);
139 put_page(page);
142 kfree(umem->pgs);
143 umem->pgs = NULL;
146 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
148 if (umem->user) {
149 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
150 free_uid(umem->user);
154 static void xdp_umem_release(struct xdp_umem *umem)
156 xdp_umem_clear_dev(umem);
158 if (umem->fq) {
159 xskq_destroy(umem->fq);
160 umem->fq = NULL;
163 if (umem->cq) {
164 xskq_destroy(umem->cq);
165 umem->cq = NULL;
168 xdp_umem_unpin_pages(umem);
170 kfree(umem->pages);
171 umem->pages = NULL;
173 xdp_umem_unaccount_pages(umem);
174 kfree(umem);
177 static void xdp_umem_release_deferred(struct work_struct *work)
179 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
181 xdp_umem_release(umem);
184 void xdp_get_umem(struct xdp_umem *umem)
186 refcount_inc(&umem->users);
189 void xdp_put_umem(struct xdp_umem *umem)
191 if (!umem)
192 return;
194 if (refcount_dec_and_test(&umem->users)) {
195 INIT_WORK(&umem->work, xdp_umem_release_deferred);
196 schedule_work(&umem->work);
200 static int xdp_umem_pin_pages(struct xdp_umem *umem)
202 unsigned int gup_flags = FOLL_WRITE;
203 long npgs;
204 int err;
206 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
207 GFP_KERNEL | __GFP_NOWARN);
208 if (!umem->pgs)
209 return -ENOMEM;
211 down_write(&current->mm->mmap_sem);
212 npgs = get_user_pages(umem->address, umem->npgs,
213 gup_flags, &umem->pgs[0], NULL);
214 up_write(&current->mm->mmap_sem);
216 if (npgs != umem->npgs) {
217 if (npgs >= 0) {
218 umem->npgs = npgs;
219 err = -ENOMEM;
220 goto out_pin;
222 err = npgs;
223 goto out_pgs;
225 return 0;
227 out_pin:
228 xdp_umem_unpin_pages(umem);
229 out_pgs:
230 kfree(umem->pgs);
231 umem->pgs = NULL;
232 return err;
235 static int xdp_umem_account_pages(struct xdp_umem *umem)
237 unsigned long lock_limit, new_npgs, old_npgs;
239 if (capable(CAP_IPC_LOCK))
240 return 0;
242 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
243 umem->user = get_uid(current_user());
245 do {
246 old_npgs = atomic_long_read(&umem->user->locked_vm);
247 new_npgs = old_npgs + umem->npgs;
248 if (new_npgs > lock_limit) {
249 free_uid(umem->user);
250 umem->user = NULL;
251 return -ENOBUFS;
253 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
254 new_npgs) != old_npgs);
255 return 0;
258 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
260 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
261 u64 npgs, addr = mr->addr, size = mr->len;
262 unsigned int chunks, chunks_per_page;
263 int err, i;
265 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
266 /* Strictly speaking we could support this, if:
267 * - huge pages, or*
268 * - using an IOMMU, or
269 * - making sure the memory area is consecutive
270 * but for now, we simply say "computer says no".
272 return -EINVAL;
275 if (!is_power_of_2(chunk_size))
276 return -EINVAL;
278 if (!PAGE_ALIGNED(addr)) {
279 /* Memory area has to be page size aligned. For
280 * simplicity, this might change.
282 return -EINVAL;
285 if ((addr + size) < addr)
286 return -EINVAL;
288 npgs = div_u64(size, PAGE_SIZE);
289 if (npgs > U32_MAX)
290 return -EINVAL;
292 chunks = (unsigned int)div_u64(size, chunk_size);
293 if (chunks == 0)
294 return -EINVAL;
296 chunks_per_page = PAGE_SIZE / chunk_size;
297 if (chunks < chunks_per_page || chunks % chunks_per_page)
298 return -EINVAL;
300 headroom = ALIGN(headroom, 64);
302 if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
303 return -EINVAL;
305 umem->address = (unsigned long)addr;
306 umem->props.chunk_mask = ~((u64)chunk_size - 1);
307 umem->props.size = size;
308 umem->headroom = headroom;
309 umem->chunk_size_nohr = chunk_size - headroom;
310 umem->npgs = (u32)npgs;
311 umem->pgs = NULL;
312 umem->user = NULL;
313 INIT_LIST_HEAD(&umem->xsk_list);
314 spin_lock_init(&umem->xsk_list_lock);
316 refcount_set(&umem->users, 1);
318 err = xdp_umem_account_pages(umem);
319 if (err)
320 return err;
322 err = xdp_umem_pin_pages(umem);
323 if (err)
324 goto out_account;
326 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
327 if (!umem->pages) {
328 err = -ENOMEM;
329 goto out_pin;
332 for (i = 0; i < umem->npgs; i++)
333 umem->pages[i].addr = page_address(umem->pgs[i]);
335 return 0;
337 out_pin:
338 xdp_umem_unpin_pages(umem);
339 out_account:
340 xdp_umem_unaccount_pages(umem);
341 return err;
344 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
346 struct xdp_umem *umem;
347 int err;
349 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
350 if (!umem)
351 return ERR_PTR(-ENOMEM);
353 err = xdp_umem_reg(umem, mr);
354 if (err) {
355 kfree(umem);
356 return ERR_PTR(err);
359 return umem;
362 bool xdp_umem_validate_queues(struct xdp_umem *umem)
364 return umem->fq && umem->cq;