1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/etherdevice.h>
8 #include <linux/filter.h>
9 #include <linux/sched/signal.h>
10 #include <net/bpf_sk_storage.h>
14 #define CREATE_TRACE_POINTS
15 #include <trace/events/bpf_test_run.h>
17 static int bpf_test_run(struct bpf_prog
*prog
, void *ctx
, u32 repeat
,
18 u32
*retval
, u32
*time
)
20 struct bpf_cgroup_storage
*storage
[MAX_BPF_CGROUP_STORAGE_TYPE
] = { NULL
};
21 enum bpf_cgroup_storage_type stype
;
22 u64 time_start
, time_spent
= 0;
26 for_each_cgroup_storage_type(stype
) {
27 storage
[stype
] = bpf_cgroup_storage_alloc(prog
, stype
);
28 if (IS_ERR(storage
[stype
])) {
29 storage
[stype
] = NULL
;
30 for_each_cgroup_storage_type(stype
)
31 bpf_cgroup_storage_free(storage
[stype
]);
41 time_start
= ktime_get_ns();
42 for (i
= 0; i
< repeat
; i
++) {
43 bpf_cgroup_storage_set(storage
);
44 *retval
= BPF_PROG_RUN(prog
, ctx
);
46 if (signal_pending(current
)) {
52 time_spent
+= ktime_get_ns() - time_start
;
60 time_start
= ktime_get_ns();
63 time_spent
+= ktime_get_ns() - time_start
;
67 do_div(time_spent
, repeat
);
68 *time
= time_spent
> U32_MAX
? U32_MAX
: (u32
)time_spent
;
70 for_each_cgroup_storage_type(stype
)
71 bpf_cgroup_storage_free(storage
[stype
]);
76 static int bpf_test_finish(const union bpf_attr
*kattr
,
77 union bpf_attr __user
*uattr
, const void *data
,
78 u32 size
, u32 retval
, u32 duration
)
80 void __user
*data_out
= u64_to_user_ptr(kattr
->test
.data_out
);
84 /* Clamp copy if the user has provided a size hint, but copy the full
85 * buffer if not to retain old behaviour.
87 if (kattr
->test
.data_size_out
&&
88 copy_size
> kattr
->test
.data_size_out
) {
89 copy_size
= kattr
->test
.data_size_out
;
93 if (data_out
&& copy_to_user(data_out
, data
, copy_size
))
95 if (copy_to_user(&uattr
->test
.data_size_out
, &size
, sizeof(size
)))
97 if (copy_to_user(&uattr
->test
.retval
, &retval
, sizeof(retval
)))
99 if (copy_to_user(&uattr
->test
.duration
, &duration
, sizeof(duration
)))
104 trace_bpf_test_finish(&err
);
108 static void *bpf_test_init(const union bpf_attr
*kattr
, u32 size
,
109 u32 headroom
, u32 tailroom
)
111 void __user
*data_in
= u64_to_user_ptr(kattr
->test
.data_in
);
114 if (size
< ETH_HLEN
|| size
> PAGE_SIZE
- headroom
- tailroom
)
115 return ERR_PTR(-EINVAL
);
117 data
= kzalloc(size
+ headroom
+ tailroom
, GFP_USER
);
119 return ERR_PTR(-ENOMEM
);
121 if (copy_from_user(data
+ headroom
, data_in
, size
)) {
123 return ERR_PTR(-EFAULT
);
128 static void *bpf_ctx_init(const union bpf_attr
*kattr
, u32 max_size
)
130 void __user
*data_in
= u64_to_user_ptr(kattr
->test
.ctx_in
);
131 void __user
*data_out
= u64_to_user_ptr(kattr
->test
.ctx_out
);
132 u32 size
= kattr
->test
.ctx_size_in
;
136 if (!data_in
&& !data_out
)
139 data
= kzalloc(max_size
, GFP_USER
);
141 return ERR_PTR(-ENOMEM
);
144 err
= bpf_check_uarg_tail_zero(data_in
, max_size
, size
);
150 size
= min_t(u32
, max_size
, size
);
151 if (copy_from_user(data
, data_in
, size
)) {
153 return ERR_PTR(-EFAULT
);
159 static int bpf_ctx_finish(const union bpf_attr
*kattr
,
160 union bpf_attr __user
*uattr
, const void *data
,
163 void __user
*data_out
= u64_to_user_ptr(kattr
->test
.ctx_out
);
165 u32 copy_size
= size
;
167 if (!data
|| !data_out
)
170 if (copy_size
> kattr
->test
.ctx_size_out
) {
171 copy_size
= kattr
->test
.ctx_size_out
;
175 if (copy_to_user(data_out
, data
, copy_size
))
177 if (copy_to_user(&uattr
->test
.ctx_size_out
, &size
, sizeof(size
)))
186 * range_is_zero - test whether buffer is initialized
187 * @buf: buffer to check
188 * @from: check from this position
189 * @to: check up until (excluding) this position
191 * This function returns true if the there is a non-zero byte
192 * in the buf in the range [from,to).
194 static inline bool range_is_zero(void *buf
, size_t from
, size_t to
)
196 return !memchr_inv((u8
*)buf
+ from
, 0, to
- from
);
199 static int convert___skb_to_skb(struct sk_buff
*skb
, struct __sk_buff
*__skb
)
201 struct qdisc_skb_cb
*cb
= (struct qdisc_skb_cb
*)skb
->cb
;
206 /* make sure the fields we don't use are zeroed */
207 if (!range_is_zero(__skb
, 0, offsetof(struct __sk_buff
, priority
)))
210 /* priority is allowed */
212 if (!range_is_zero(__skb
, offsetof(struct __sk_buff
, priority
) +
213 FIELD_SIZEOF(struct __sk_buff
, priority
),
214 offsetof(struct __sk_buff
, cb
)))
219 if (!range_is_zero(__skb
, offsetof(struct __sk_buff
, cb
) +
220 FIELD_SIZEOF(struct __sk_buff
, cb
),
221 sizeof(struct __sk_buff
)))
224 skb
->priority
= __skb
->priority
;
225 memcpy(&cb
->data
, __skb
->cb
, QDISC_CB_PRIV_LEN
);
230 static void convert_skb_to___skb(struct sk_buff
*skb
, struct __sk_buff
*__skb
)
232 struct qdisc_skb_cb
*cb
= (struct qdisc_skb_cb
*)skb
->cb
;
237 __skb
->priority
= skb
->priority
;
238 memcpy(__skb
->cb
, &cb
->data
, QDISC_CB_PRIV_LEN
);
241 int bpf_prog_test_run_skb(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
242 union bpf_attr __user
*uattr
)
244 bool is_l2
= false, is_direct_pkt_access
= false;
245 u32 size
= kattr
->test
.data_size_in
;
246 u32 repeat
= kattr
->test
.repeat
;
247 struct __sk_buff
*ctx
= NULL
;
248 u32 retval
, duration
;
249 int hh_len
= ETH_HLEN
;
255 data
= bpf_test_init(kattr
, size
, NET_SKB_PAD
+ NET_IP_ALIGN
,
256 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
258 return PTR_ERR(data
);
260 ctx
= bpf_ctx_init(kattr
, sizeof(struct __sk_buff
));
266 switch (prog
->type
) {
267 case BPF_PROG_TYPE_SCHED_CLS
:
268 case BPF_PROG_TYPE_SCHED_ACT
:
271 case BPF_PROG_TYPE_LWT_IN
:
272 case BPF_PROG_TYPE_LWT_OUT
:
273 case BPF_PROG_TYPE_LWT_XMIT
:
274 is_direct_pkt_access
= true;
280 sk
= kzalloc(sizeof(struct sock
), GFP_USER
);
286 sock_net_set(sk
, current
->nsproxy
->net_ns
);
287 sock_init_data(NULL
, sk
);
289 skb
= build_skb(data
, 0);
298 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
299 __skb_put(skb
, size
);
300 skb
->protocol
= eth_type_trans(skb
, current
->nsproxy
->net_ns
->loopback_dev
);
301 skb_reset_network_header(skb
);
304 __skb_push(skb
, hh_len
);
305 if (is_direct_pkt_access
)
306 bpf_compute_data_pointers(skb
);
307 ret
= convert___skb_to_skb(skb
, ctx
);
310 ret
= bpf_test_run(prog
, skb
, repeat
, &retval
, &duration
);
314 if (skb_headroom(skb
) < hh_len
) {
315 int nhead
= HH_DATA_ALIGN(hh_len
- skb_headroom(skb
));
317 if (pskb_expand_head(skb
, nhead
, 0, GFP_USER
)) {
322 memset(__skb_push(skb
, hh_len
), 0, hh_len
);
324 convert_skb_to___skb(skb
, ctx
);
327 /* bpf program can never convert linear skb to non-linear */
328 if (WARN_ON_ONCE(skb_is_nonlinear(skb
)))
329 size
= skb_headlen(skb
);
330 ret
= bpf_test_finish(kattr
, uattr
, skb
->data
, size
, retval
, duration
);
332 ret
= bpf_ctx_finish(kattr
, uattr
, ctx
,
333 sizeof(struct __sk_buff
));
336 bpf_sk_storage_free(sk
);
342 int bpf_prog_test_run_xdp(struct bpf_prog
*prog
, const union bpf_attr
*kattr
,
343 union bpf_attr __user
*uattr
)
345 u32 size
= kattr
->test
.data_size_in
;
346 u32 repeat
= kattr
->test
.repeat
;
347 struct netdev_rx_queue
*rxqueue
;
348 struct xdp_buff xdp
= {};
349 u32 retval
, duration
;
353 if (kattr
->test
.ctx_in
|| kattr
->test
.ctx_out
)
356 data
= bpf_test_init(kattr
, size
, XDP_PACKET_HEADROOM
+ NET_IP_ALIGN
, 0);
358 return PTR_ERR(data
);
360 xdp
.data_hard_start
= data
;
361 xdp
.data
= data
+ XDP_PACKET_HEADROOM
+ NET_IP_ALIGN
;
362 xdp
.data_meta
= xdp
.data
;
363 xdp
.data_end
= xdp
.data
+ size
;
365 rxqueue
= __netif_get_rx_queue(current
->nsproxy
->net_ns
->loopback_dev
, 0);
366 xdp
.rxq
= &rxqueue
->xdp_rxq
;
368 ret
= bpf_test_run(prog
, &xdp
, repeat
, &retval
, &duration
);
371 if (xdp
.data
!= data
+ XDP_PACKET_HEADROOM
+ NET_IP_ALIGN
||
372 xdp
.data_end
!= xdp
.data
+ size
)
373 size
= xdp
.data_end
- xdp
.data
;
374 ret
= bpf_test_finish(kattr
, uattr
, xdp
.data
, size
, retval
, duration
);
380 int bpf_prog_test_run_flow_dissector(struct bpf_prog
*prog
,
381 const union bpf_attr
*kattr
,
382 union bpf_attr __user
*uattr
)
384 u32 size
= kattr
->test
.data_size_in
;
385 struct bpf_flow_dissector ctx
= {};
386 u32 repeat
= kattr
->test
.repeat
;
387 struct bpf_flow_keys flow_keys
;
388 u64 time_start
, time_spent
= 0;
389 const struct ethhdr
*eth
;
390 u32 retval
, duration
;
395 if (prog
->type
!= BPF_PROG_TYPE_FLOW_DISSECTOR
)
398 if (kattr
->test
.ctx_in
|| kattr
->test
.ctx_out
)
404 data
= bpf_test_init(kattr
, size
, 0, 0);
406 return PTR_ERR(data
);
408 eth
= (struct ethhdr
*)data
;
413 ctx
.flow_keys
= &flow_keys
;
415 ctx
.data_end
= (__u8
*)data
+ size
;
419 time_start
= ktime_get_ns();
420 for (i
= 0; i
< repeat
; i
++) {
421 retval
= bpf_flow_dissect(prog
, &ctx
, eth
->h_proto
, ETH_HLEN
,
424 if (signal_pending(current
)) {
432 if (need_resched()) {
433 time_spent
+= ktime_get_ns() - time_start
;
441 time_start
= ktime_get_ns();
444 time_spent
+= ktime_get_ns() - time_start
;
448 do_div(time_spent
, repeat
);
449 duration
= time_spent
> U32_MAX
? U32_MAX
: (u32
)time_spent
;
451 ret
= bpf_test_finish(kattr
, uattr
, &flow_keys
, sizeof(flow_keys
),