ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / net / bpf / test_run.c
blobd555c0d8657dad8414adf7a88ea0020c046f12c6
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #include <linux/bpf.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/etherdevice.h>
8 #include <linux/filter.h>
9 #include <linux/sched/signal.h>
10 #include <net/bpf_sk_storage.h>
11 #include <net/sock.h>
12 #include <net/tcp.h>
14 #define CREATE_TRACE_POINTS
15 #include <trace/events/bpf_test_run.h>
17 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
18 u32 *retval, u32 *time, bool xdp)
20 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
21 enum bpf_cgroup_storage_type stype;
22 u64 time_start, time_spent = 0;
23 int ret = 0;
24 u32 i;
26 for_each_cgroup_storage_type(stype) {
27 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
28 if (IS_ERR(storage[stype])) {
29 storage[stype] = NULL;
30 for_each_cgroup_storage_type(stype)
31 bpf_cgroup_storage_free(storage[stype]);
32 return -ENOMEM;
36 if (!repeat)
37 repeat = 1;
39 rcu_read_lock();
40 preempt_disable();
41 time_start = ktime_get_ns();
42 for (i = 0; i < repeat; i++) {
43 bpf_cgroup_storage_set(storage);
45 if (xdp)
46 *retval = bpf_prog_run_xdp(prog, ctx);
47 else
48 *retval = BPF_PROG_RUN(prog, ctx);
50 if (signal_pending(current)) {
51 ret = -EINTR;
52 break;
55 if (need_resched()) {
56 time_spent += ktime_get_ns() - time_start;
57 preempt_enable();
58 rcu_read_unlock();
60 cond_resched();
62 rcu_read_lock();
63 preempt_disable();
64 time_start = ktime_get_ns();
67 time_spent += ktime_get_ns() - time_start;
68 preempt_enable();
69 rcu_read_unlock();
71 do_div(time_spent, repeat);
72 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74 for_each_cgroup_storage_type(stype)
75 bpf_cgroup_storage_free(storage[stype]);
77 return ret;
80 static int bpf_test_finish(const union bpf_attr *kattr,
81 union bpf_attr __user *uattr, const void *data,
82 u32 size, u32 retval, u32 duration)
84 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
85 int err = -EFAULT;
86 u32 copy_size = size;
88 /* Clamp copy if the user has provided a size hint, but copy the full
89 * buffer if not to retain old behaviour.
91 if (kattr->test.data_size_out &&
92 copy_size > kattr->test.data_size_out) {
93 copy_size = kattr->test.data_size_out;
94 err = -ENOSPC;
97 if (data_out && copy_to_user(data_out, data, copy_size))
98 goto out;
99 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
100 goto out;
101 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
102 goto out;
103 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
104 goto out;
105 if (err != -ENOSPC)
106 err = 0;
107 out:
108 trace_bpf_test_finish(&err);
109 return err;
112 /* Integer types of various sizes and pointer combinations cover variety of
113 * architecture dependent calling conventions. 7+ can be supported in the
114 * future.
116 int noinline bpf_fentry_test1(int a)
118 return a + 1;
121 int noinline bpf_fentry_test2(int a, u64 b)
123 return a + b;
126 int noinline bpf_fentry_test3(char a, int b, u64 c)
128 return a + b + c;
131 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
133 return (long)a + b + c + d;
136 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
138 return a + (long)b + c + d + e;
141 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
143 return a + (long)b + c + d + (long)e + f;
146 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
147 u32 headroom, u32 tailroom)
149 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
150 void *data;
152 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
153 return ERR_PTR(-EINVAL);
155 data = kzalloc(size + headroom + tailroom, GFP_USER);
156 if (!data)
157 return ERR_PTR(-ENOMEM);
159 if (copy_from_user(data + headroom, data_in, size)) {
160 kfree(data);
161 return ERR_PTR(-EFAULT);
163 if (bpf_fentry_test1(1) != 2 ||
164 bpf_fentry_test2(2, 3) != 5 ||
165 bpf_fentry_test3(4, 5, 6) != 15 ||
166 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
167 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
168 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111) {
169 kfree(data);
170 return ERR_PTR(-EFAULT);
172 return data;
175 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
177 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
178 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
179 u32 size = kattr->test.ctx_size_in;
180 void *data;
181 int err;
183 if (!data_in && !data_out)
184 return NULL;
186 data = kzalloc(max_size, GFP_USER);
187 if (!data)
188 return ERR_PTR(-ENOMEM);
190 if (data_in) {
191 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
192 if (err) {
193 kfree(data);
194 return ERR_PTR(err);
197 size = min_t(u32, max_size, size);
198 if (copy_from_user(data, data_in, size)) {
199 kfree(data);
200 return ERR_PTR(-EFAULT);
203 return data;
206 static int bpf_ctx_finish(const union bpf_attr *kattr,
207 union bpf_attr __user *uattr, const void *data,
208 u32 size)
210 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
211 int err = -EFAULT;
212 u32 copy_size = size;
214 if (!data || !data_out)
215 return 0;
217 if (copy_size > kattr->test.ctx_size_out) {
218 copy_size = kattr->test.ctx_size_out;
219 err = -ENOSPC;
222 if (copy_to_user(data_out, data, copy_size))
223 goto out;
224 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
225 goto out;
226 if (err != -ENOSPC)
227 err = 0;
228 out:
229 return err;
233 * range_is_zero - test whether buffer is initialized
234 * @buf: buffer to check
235 * @from: check from this position
236 * @to: check up until (excluding) this position
238 * This function returns true if the there is a non-zero byte
239 * in the buf in the range [from,to).
241 static inline bool range_is_zero(void *buf, size_t from, size_t to)
243 return !memchr_inv((u8 *)buf + from, 0, to - from);
246 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
248 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
250 if (!__skb)
251 return 0;
253 /* make sure the fields we don't use are zeroed */
254 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
255 return -EINVAL;
257 /* mark is allowed */
259 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
260 offsetof(struct __sk_buff, priority)))
261 return -EINVAL;
263 /* priority is allowed */
265 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
266 offsetof(struct __sk_buff, cb)))
267 return -EINVAL;
269 /* cb is allowed */
271 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
272 offsetof(struct __sk_buff, tstamp)))
273 return -EINVAL;
275 /* tstamp is allowed */
276 /* wire_len is allowed */
277 /* gso_segs is allowed */
279 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
280 sizeof(struct __sk_buff)))
281 return -EINVAL;
283 skb->mark = __skb->mark;
284 skb->priority = __skb->priority;
285 skb->tstamp = __skb->tstamp;
286 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
288 if (__skb->wire_len == 0) {
289 cb->pkt_len = skb->len;
290 } else {
291 if (__skb->wire_len < skb->len ||
292 __skb->wire_len > GSO_MAX_SIZE)
293 return -EINVAL;
294 cb->pkt_len = __skb->wire_len;
297 if (__skb->gso_segs > GSO_MAX_SEGS)
298 return -EINVAL;
299 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
301 return 0;
304 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
306 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
308 if (!__skb)
309 return;
311 __skb->mark = skb->mark;
312 __skb->priority = skb->priority;
313 __skb->tstamp = skb->tstamp;
314 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
315 __skb->wire_len = cb->pkt_len;
316 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
319 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
320 union bpf_attr __user *uattr)
322 bool is_l2 = false, is_direct_pkt_access = false;
323 u32 size = kattr->test.data_size_in;
324 u32 repeat = kattr->test.repeat;
325 struct __sk_buff *ctx = NULL;
326 u32 retval, duration;
327 int hh_len = ETH_HLEN;
328 struct sk_buff *skb;
329 struct sock *sk;
330 void *data;
331 int ret;
333 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
334 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
335 if (IS_ERR(data))
336 return PTR_ERR(data);
338 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
339 if (IS_ERR(ctx)) {
340 kfree(data);
341 return PTR_ERR(ctx);
344 switch (prog->type) {
345 case BPF_PROG_TYPE_SCHED_CLS:
346 case BPF_PROG_TYPE_SCHED_ACT:
347 is_l2 = true;
348 /* fall through */
349 case BPF_PROG_TYPE_LWT_IN:
350 case BPF_PROG_TYPE_LWT_OUT:
351 case BPF_PROG_TYPE_LWT_XMIT:
352 is_direct_pkt_access = true;
353 break;
354 default:
355 break;
358 sk = kzalloc(sizeof(struct sock), GFP_USER);
359 if (!sk) {
360 kfree(data);
361 kfree(ctx);
362 return -ENOMEM;
364 sock_net_set(sk, current->nsproxy->net_ns);
365 sock_init_data(NULL, sk);
367 skb = build_skb(data, 0);
368 if (!skb) {
369 kfree(data);
370 kfree(ctx);
371 kfree(sk);
372 return -ENOMEM;
374 skb->sk = sk;
376 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
377 __skb_put(skb, size);
378 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
379 skb_reset_network_header(skb);
381 if (is_l2)
382 __skb_push(skb, hh_len);
383 if (is_direct_pkt_access)
384 bpf_compute_data_pointers(skb);
385 ret = convert___skb_to_skb(skb, ctx);
386 if (ret)
387 goto out;
388 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
389 if (ret)
390 goto out;
391 if (!is_l2) {
392 if (skb_headroom(skb) < hh_len) {
393 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
395 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
396 ret = -ENOMEM;
397 goto out;
400 memset(__skb_push(skb, hh_len), 0, hh_len);
402 convert_skb_to___skb(skb, ctx);
404 size = skb->len;
405 /* bpf program can never convert linear skb to non-linear */
406 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
407 size = skb_headlen(skb);
408 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
409 if (!ret)
410 ret = bpf_ctx_finish(kattr, uattr, ctx,
411 sizeof(struct __sk_buff));
412 out:
413 kfree_skb(skb);
414 bpf_sk_storage_free(sk);
415 kfree(sk);
416 kfree(ctx);
417 return ret;
420 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
421 union bpf_attr __user *uattr)
423 u32 size = kattr->test.data_size_in;
424 u32 repeat = kattr->test.repeat;
425 struct netdev_rx_queue *rxqueue;
426 struct xdp_buff xdp = {};
427 u32 retval, duration;
428 void *data;
429 int ret;
431 if (kattr->test.ctx_in || kattr->test.ctx_out)
432 return -EINVAL;
434 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
435 if (IS_ERR(data))
436 return PTR_ERR(data);
438 xdp.data_hard_start = data;
439 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
440 xdp.data_meta = xdp.data;
441 xdp.data_end = xdp.data + size;
443 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
444 xdp.rxq = &rxqueue->xdp_rxq;
445 bpf_prog_change_xdp(NULL, prog);
446 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
447 if (ret)
448 goto out;
449 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
450 xdp.data_end != xdp.data + size)
451 size = xdp.data_end - xdp.data;
452 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
453 out:
454 bpf_prog_change_xdp(prog, NULL);
455 kfree(data);
456 return ret;
459 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
461 /* make sure the fields we don't use are zeroed */
462 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
463 return -EINVAL;
465 /* flags is allowed */
467 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
468 sizeof(struct bpf_flow_keys)))
469 return -EINVAL;
471 return 0;
474 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
475 const union bpf_attr *kattr,
476 union bpf_attr __user *uattr)
478 u32 size = kattr->test.data_size_in;
479 struct bpf_flow_dissector ctx = {};
480 u32 repeat = kattr->test.repeat;
481 struct bpf_flow_keys *user_ctx;
482 struct bpf_flow_keys flow_keys;
483 u64 time_start, time_spent = 0;
484 const struct ethhdr *eth;
485 unsigned int flags = 0;
486 u32 retval, duration;
487 void *data;
488 int ret;
489 u32 i;
491 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
492 return -EINVAL;
494 if (size < ETH_HLEN)
495 return -EINVAL;
497 data = bpf_test_init(kattr, size, 0, 0);
498 if (IS_ERR(data))
499 return PTR_ERR(data);
501 eth = (struct ethhdr *)data;
503 if (!repeat)
504 repeat = 1;
506 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
507 if (IS_ERR(user_ctx)) {
508 kfree(data);
509 return PTR_ERR(user_ctx);
511 if (user_ctx) {
512 ret = verify_user_bpf_flow_keys(user_ctx);
513 if (ret)
514 goto out;
515 flags = user_ctx->flags;
518 ctx.flow_keys = &flow_keys;
519 ctx.data = data;
520 ctx.data_end = (__u8 *)data + size;
522 rcu_read_lock();
523 preempt_disable();
524 time_start = ktime_get_ns();
525 for (i = 0; i < repeat; i++) {
526 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
527 size, flags);
529 if (signal_pending(current)) {
530 preempt_enable();
531 rcu_read_unlock();
533 ret = -EINTR;
534 goto out;
537 if (need_resched()) {
538 time_spent += ktime_get_ns() - time_start;
539 preempt_enable();
540 rcu_read_unlock();
542 cond_resched();
544 rcu_read_lock();
545 preempt_disable();
546 time_start = ktime_get_ns();
549 time_spent += ktime_get_ns() - time_start;
550 preempt_enable();
551 rcu_read_unlock();
553 do_div(time_spent, repeat);
554 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
556 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
557 retval, duration);
558 if (!ret)
559 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
560 sizeof(struct bpf_flow_keys));
562 out:
563 kfree(user_ctx);
564 kfree(data);
565 return ret;