perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / net / bpf / test_run.c
blobc89c22c49015ff070f228bece397937d7cdce8b5
1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7 #include <linux/bpf.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 #include <net/sock.h>
14 #include <net/tcp.h>
16 static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
17 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
19 u32 ret;
21 preempt_disable();
22 rcu_read_lock();
23 bpf_cgroup_storage_set(storage);
24 ret = BPF_PROG_RUN(prog, ctx);
25 rcu_read_unlock();
26 preempt_enable();
28 return ret;
31 static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
33 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
34 enum bpf_cgroup_storage_type stype;
35 u64 time_start, time_spent = 0;
36 u32 ret = 0, i;
38 for_each_cgroup_storage_type(stype) {
39 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
40 if (IS_ERR(storage[stype])) {
41 storage[stype] = NULL;
42 for_each_cgroup_storage_type(stype)
43 bpf_cgroup_storage_free(storage[stype]);
44 return -ENOMEM;
48 if (!repeat)
49 repeat = 1;
50 time_start = ktime_get_ns();
51 for (i = 0; i < repeat; i++) {
52 ret = bpf_test_run_one(prog, ctx, storage);
53 if (need_resched()) {
54 if (signal_pending(current))
55 break;
56 time_spent += ktime_get_ns() - time_start;
57 cond_resched();
58 time_start = ktime_get_ns();
61 time_spent += ktime_get_ns() - time_start;
62 do_div(time_spent, repeat);
63 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
65 for_each_cgroup_storage_type(stype)
66 bpf_cgroup_storage_free(storage[stype]);
68 return ret;
71 static int bpf_test_finish(const union bpf_attr *kattr,
72 union bpf_attr __user *uattr, const void *data,
73 u32 size, u32 retval, u32 duration)
75 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
76 int err = -EFAULT;
78 if (data_out && copy_to_user(data_out, data, size))
79 goto out;
80 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
81 goto out;
82 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
83 goto out;
84 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
85 goto out;
86 err = 0;
87 out:
88 return err;
91 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
92 u32 headroom, u32 tailroom)
94 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
95 void *data;
97 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
98 return ERR_PTR(-EINVAL);
100 data = kzalloc(size + headroom + tailroom, GFP_USER);
101 if (!data)
102 return ERR_PTR(-ENOMEM);
104 if (copy_from_user(data + headroom, data_in, size)) {
105 kfree(data);
106 return ERR_PTR(-EFAULT);
108 return data;
111 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
112 union bpf_attr __user *uattr)
114 bool is_l2 = false, is_direct_pkt_access = false;
115 u32 size = kattr->test.data_size_in;
116 u32 repeat = kattr->test.repeat;
117 u32 retval, duration;
118 int hh_len = ETH_HLEN;
119 struct sk_buff *skb;
120 struct sock *sk;
121 void *data;
122 int ret;
124 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
125 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
126 if (IS_ERR(data))
127 return PTR_ERR(data);
129 switch (prog->type) {
130 case BPF_PROG_TYPE_SCHED_CLS:
131 case BPF_PROG_TYPE_SCHED_ACT:
132 is_l2 = true;
133 /* fall through */
134 case BPF_PROG_TYPE_LWT_IN:
135 case BPF_PROG_TYPE_LWT_OUT:
136 case BPF_PROG_TYPE_LWT_XMIT:
137 is_direct_pkt_access = true;
138 break;
139 default:
140 break;
143 sk = kzalloc(sizeof(struct sock), GFP_USER);
144 if (!sk) {
145 kfree(data);
146 return -ENOMEM;
148 sock_net_set(sk, current->nsproxy->net_ns);
149 sock_init_data(NULL, sk);
151 skb = build_skb(data, 0);
152 if (!skb) {
153 kfree(data);
154 kfree(sk);
155 return -ENOMEM;
157 skb->sk = sk;
159 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
160 __skb_put(skb, size);
161 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
162 skb_reset_network_header(skb);
164 if (is_l2)
165 __skb_push(skb, hh_len);
166 if (is_direct_pkt_access)
167 bpf_compute_data_pointers(skb);
168 retval = bpf_test_run(prog, skb, repeat, &duration);
169 if (!is_l2) {
170 if (skb_headroom(skb) < hh_len) {
171 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
173 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
174 kfree_skb(skb);
175 kfree(sk);
176 return -ENOMEM;
179 memset(__skb_push(skb, hh_len), 0, hh_len);
182 size = skb->len;
183 /* bpf program can never convert linear skb to non-linear */
184 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
185 size = skb_headlen(skb);
186 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
187 kfree_skb(skb);
188 kfree(sk);
189 return ret;
192 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
193 union bpf_attr __user *uattr)
195 u32 size = kattr->test.data_size_in;
196 u32 repeat = kattr->test.repeat;
197 struct netdev_rx_queue *rxqueue;
198 struct xdp_buff xdp = {};
199 u32 retval, duration;
200 void *data;
201 int ret;
203 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
204 if (IS_ERR(data))
205 return PTR_ERR(data);
207 xdp.data_hard_start = data;
208 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
209 xdp.data_meta = xdp.data;
210 xdp.data_end = xdp.data + size;
212 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
213 xdp.rxq = &rxqueue->xdp_rxq;
215 retval = bpf_test_run(prog, &xdp, repeat, &duration);
216 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
217 xdp.data_end != xdp.data + size)
218 size = xdp.data_end - xdp.data;
219 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
220 kfree(data);
221 return ret;