1 /* SPDX-License-Identifier: GPL-2.0
2 * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
4 * XDP monitor tool, based on tracepoints
6 #include <uapi/linux/bpf.h>
7 #include <bpf/bpf_helpers.h>
9 struct bpf_map_def
SEC("maps") redirect_err_cnt
= {
10 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
11 .key_size
= sizeof(u32
),
12 .value_size
= sizeof(u64
),
14 /* TODO: have entries for all possible errno's */
17 #define XDP_UNKNOWN XDP_REDIRECT + 1
18 struct bpf_map_def
SEC("maps") exception_cnt
= {
19 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
20 .key_size
= sizeof(u32
),
21 .value_size
= sizeof(u64
),
22 .max_entries
= XDP_UNKNOWN
+ 1,
25 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
26 * Code in: kernel/include/trace/events/xdp.h
28 struct xdp_redirect_ctx
{
29 u64 __pad
; // First 8 bytes are not accessible by bpf code
30 int prog_id
; // offset:8; size:4; signed:1;
31 u32 act
; // offset:12 size:4; signed:0;
32 int ifindex
; // offset:16 size:4; signed:1;
33 int err
; // offset:20 size:4; signed:1;
34 int to_ifindex
; // offset:24 size:4; signed:1;
35 u32 map_id
; // offset:28 size:4; signed:0;
36 int map_index
; // offset:32 size:4; signed:1;
40 XDP_REDIRECT_SUCCESS
= 0,
41 XDP_REDIRECT_ERROR
= 1
44 static __always_inline
45 int xdp_redirect_collect_stat(struct xdp_redirect_ctx
*ctx
)
47 u32 key
= XDP_REDIRECT_ERROR
;
52 key
= XDP_REDIRECT_SUCCESS
;
54 cnt
= bpf_map_lookup_elem(&redirect_err_cnt
, &key
);
59 return 0; /* Indicate event was filtered (no further processing)*/
61 * Returning 1 here would allow e.g. a perf-record tracepoint
62 * to see and record these events, but it doesn't work well
63 * in-practice as stopping perf-record also unload this
64 * bpf_prog. Plus, there is additional overhead of doing so.
68 SEC("tracepoint/xdp/xdp_redirect_err")
69 int trace_xdp_redirect_err(struct xdp_redirect_ctx
*ctx
)
71 return xdp_redirect_collect_stat(ctx
);
75 SEC("tracepoint/xdp/xdp_redirect_map_err")
76 int trace_xdp_redirect_map_err(struct xdp_redirect_ctx
*ctx
)
78 return xdp_redirect_collect_stat(ctx
);
81 /* Likely unloaded when prog starts */
82 SEC("tracepoint/xdp/xdp_redirect")
83 int trace_xdp_redirect(struct xdp_redirect_ctx
*ctx
)
85 return xdp_redirect_collect_stat(ctx
);
88 /* Likely unloaded when prog starts */
89 SEC("tracepoint/xdp/xdp_redirect_map")
90 int trace_xdp_redirect_map(struct xdp_redirect_ctx
*ctx
)
92 return xdp_redirect_collect_stat(ctx
);
95 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
96 * Code in: kernel/include/trace/events/xdp.h
98 struct xdp_exception_ctx
{
99 u64 __pad
; // First 8 bytes are not accessible by bpf code
100 int prog_id
; // offset:8; size:4; signed:1;
101 u32 act
; // offset:12; size:4; signed:0;
102 int ifindex
; // offset:16; size:4; signed:1;
105 SEC("tracepoint/xdp/xdp_exception")
106 int trace_xdp_exception(struct xdp_exception_ctx
*ctx
)
112 if (key
> XDP_REDIRECT
)
115 cnt
= bpf_map_lookup_elem(&exception_cnt
, &key
);
123 /* Common stats data record shared with _user.c */
132 struct bpf_map_def
SEC("maps") cpumap_enqueue_cnt
= {
133 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
134 .key_size
= sizeof(u32
),
135 .value_size
= sizeof(struct datarec
),
136 .max_entries
= MAX_CPUS
,
139 struct bpf_map_def
SEC("maps") cpumap_kthread_cnt
= {
140 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
141 .key_size
= sizeof(u32
),
142 .value_size
= sizeof(struct datarec
),
146 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
147 * Code in: kernel/include/trace/events/xdp.h
149 struct cpumap_enqueue_ctx
{
150 u64 __pad
; // First 8 bytes are not accessible by bpf code
151 int map_id
; // offset:8; size:4; signed:1;
152 u32 act
; // offset:12; size:4; signed:0;
153 int cpu
; // offset:16; size:4; signed:1;
154 unsigned int drops
; // offset:20; size:4; signed:0;
155 unsigned int processed
; // offset:24; size:4; signed:0;
156 int to_cpu
; // offset:28; size:4; signed:1;
159 SEC("tracepoint/xdp/xdp_cpumap_enqueue")
160 int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx
*ctx
)
162 u32 to_cpu
= ctx
->to_cpu
;
165 if (to_cpu
>= MAX_CPUS
)
168 rec
= bpf_map_lookup_elem(&cpumap_enqueue_cnt
, &to_cpu
);
171 rec
->processed
+= ctx
->processed
;
172 rec
->dropped
+= ctx
->drops
;
174 /* Record bulk events, then userspace can calc average bulk size */
175 if (ctx
->processed
> 0)
181 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
182 * Code in: kernel/include/trace/events/xdp.h
184 struct cpumap_kthread_ctx
{
185 u64 __pad
; // First 8 bytes are not accessible by bpf code
186 int map_id
; // offset:8; size:4; signed:1;
187 u32 act
; // offset:12; size:4; signed:0;
188 int cpu
; // offset:16; size:4; signed:1;
189 unsigned int drops
; // offset:20; size:4; signed:0;
190 unsigned int processed
; // offset:24; size:4; signed:0;
191 int sched
; // offset:28; size:4; signed:1;
194 SEC("tracepoint/xdp/xdp_cpumap_kthread")
195 int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx
*ctx
)
200 rec
= bpf_map_lookup_elem(&cpumap_kthread_cnt
, &key
);
203 rec
->processed
+= ctx
->processed
;
204 rec
->dropped
+= ctx
->drops
;
206 /* Count times kthread yielded CPU via schedule call */
213 struct bpf_map_def
SEC("maps") devmap_xmit_cnt
= {
214 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
215 .key_size
= sizeof(u32
),
216 .value_size
= sizeof(struct datarec
),
220 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
221 * Code in: kernel/include/trace/events/xdp.h
223 struct devmap_xmit_ctx
{
224 u64 __pad
; // First 8 bytes are not accessible by bpf code
225 int from_ifindex
; // offset:8; size:4; signed:1;
226 u32 act
; // offset:12; size:4; signed:0;
227 int to_ifindex
; // offset:16; size:4; signed:1;
228 int drops
; // offset:20; size:4; signed:1;
229 int sent
; // offset:24; size:4; signed:1;
230 int err
; // offset:28; size:4; signed:1;
233 SEC("tracepoint/xdp/xdp_devmap_xmit")
234 int trace_xdp_devmap_xmit(struct devmap_xmit_ctx
*ctx
)
239 rec
= bpf_map_lookup_elem(&devmap_xmit_cnt
, &key
);
242 rec
->processed
+= ctx
->sent
;
243 rec
->dropped
+= ctx
->drops
;
245 /* Record bulk events, then userspace can calc average bulk size */
248 /* Record error cases, where no frame were sent */
252 /* Catch API error of drv ndo_xdp_xmit sent more than count */