1 /* SPDX-License-Identifier: GPL-2.0
2 * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
4 * XDP monitor tool, based on tracepoints
6 #include <uapi/linux/bpf.h>
7 #include <bpf/bpf_helpers.h>
10 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
13 __uint(max_entries
, 2);
14 /* TODO: have entries for all possible errno's */
15 } redirect_err_cnt
SEC(".maps");
17 #define XDP_UNKNOWN XDP_REDIRECT + 1
19 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
22 __uint(max_entries
, XDP_UNKNOWN
+ 1);
23 } exception_cnt
SEC(".maps");
25 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
26 * Code in: kernel/include/trace/events/xdp.h
28 struct xdp_redirect_ctx
{
29 u64 __pad
; // First 8 bytes are not accessible by bpf code
30 int prog_id
; // offset:8; size:4; signed:1;
31 u32 act
; // offset:12 size:4; signed:0;
32 int ifindex
; // offset:16 size:4; signed:1;
33 int err
; // offset:20 size:4; signed:1;
34 int to_ifindex
; // offset:24 size:4; signed:1;
35 u32 map_id
; // offset:28 size:4; signed:0;
36 int map_index
; // offset:32 size:4; signed:1;
40 XDP_REDIRECT_SUCCESS
= 0,
41 XDP_REDIRECT_ERROR
= 1
44 static __always_inline
45 int xdp_redirect_collect_stat(struct xdp_redirect_ctx
*ctx
)
47 u32 key
= XDP_REDIRECT_ERROR
;
52 key
= XDP_REDIRECT_SUCCESS
;
54 cnt
= bpf_map_lookup_elem(&redirect_err_cnt
, &key
);
59 return 0; /* Indicate event was filtered (no further processing)*/
61 * Returning 1 here would allow e.g. a perf-record tracepoint
62 * to see and record these events, but it doesn't work well
63 * in-practice as stopping perf-record also unload this
64 * bpf_prog. Plus, there is additional overhead of doing so.
68 SEC("tracepoint/xdp/xdp_redirect_err")
69 int trace_xdp_redirect_err(struct xdp_redirect_ctx
*ctx
)
71 return xdp_redirect_collect_stat(ctx
);
75 SEC("tracepoint/xdp/xdp_redirect_map_err")
76 int trace_xdp_redirect_map_err(struct xdp_redirect_ctx
*ctx
)
78 return xdp_redirect_collect_stat(ctx
);
81 /* Likely unloaded when prog starts */
82 SEC("tracepoint/xdp/xdp_redirect")
83 int trace_xdp_redirect(struct xdp_redirect_ctx
*ctx
)
85 return xdp_redirect_collect_stat(ctx
);
88 /* Likely unloaded when prog starts */
89 SEC("tracepoint/xdp/xdp_redirect_map")
90 int trace_xdp_redirect_map(struct xdp_redirect_ctx
*ctx
)
92 return xdp_redirect_collect_stat(ctx
);
95 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
96 * Code in: kernel/include/trace/events/xdp.h
98 struct xdp_exception_ctx
{
99 u64 __pad
; // First 8 bytes are not accessible by bpf code
100 int prog_id
; // offset:8; size:4; signed:1;
101 u32 act
; // offset:12; size:4; signed:0;
102 int ifindex
; // offset:16; size:4; signed:1;
105 SEC("tracepoint/xdp/xdp_exception")
106 int trace_xdp_exception(struct xdp_exception_ctx
*ctx
)
112 if (key
> XDP_REDIRECT
)
115 cnt
= bpf_map_lookup_elem(&exception_cnt
, &key
);
123 /* Common stats data record shared with _user.c */
133 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
135 __type(value
, struct datarec
);
136 __uint(max_entries
, MAX_CPUS
);
137 } cpumap_enqueue_cnt
SEC(".maps");
140 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
142 __type(value
, struct datarec
);
143 __uint(max_entries
, 1);
144 } cpumap_kthread_cnt
SEC(".maps");
146 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
147 * Code in: kernel/include/trace/events/xdp.h
149 struct cpumap_enqueue_ctx
{
150 u64 __pad
; // First 8 bytes are not accessible by bpf code
151 int map_id
; // offset:8; size:4; signed:1;
152 u32 act
; // offset:12; size:4; signed:0;
153 int cpu
; // offset:16; size:4; signed:1;
154 unsigned int drops
; // offset:20; size:4; signed:0;
155 unsigned int processed
; // offset:24; size:4; signed:0;
156 int to_cpu
; // offset:28; size:4; signed:1;
159 SEC("tracepoint/xdp/xdp_cpumap_enqueue")
160 int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx
*ctx
)
162 u32 to_cpu
= ctx
->to_cpu
;
165 if (to_cpu
>= MAX_CPUS
)
168 rec
= bpf_map_lookup_elem(&cpumap_enqueue_cnt
, &to_cpu
);
171 rec
->processed
+= ctx
->processed
;
172 rec
->dropped
+= ctx
->drops
;
174 /* Record bulk events, then userspace can calc average bulk size */
175 if (ctx
->processed
> 0)
181 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
182 * Code in: kernel/include/trace/events/xdp.h
184 struct cpumap_kthread_ctx
{
185 u64 __pad
; // First 8 bytes are not accessible by bpf code
186 int map_id
; // offset:8; size:4; signed:1;
187 u32 act
; // offset:12; size:4; signed:0;
188 int cpu
; // offset:16; size:4; signed:1;
189 unsigned int drops
; // offset:20; size:4; signed:0;
190 unsigned int processed
; // offset:24; size:4; signed:0;
191 int sched
; // offset:28; size:4; signed:1;
194 SEC("tracepoint/xdp/xdp_cpumap_kthread")
195 int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx
*ctx
)
200 rec
= bpf_map_lookup_elem(&cpumap_kthread_cnt
, &key
);
203 rec
->processed
+= ctx
->processed
;
204 rec
->dropped
+= ctx
->drops
;
206 /* Count times kthread yielded CPU via schedule call */
214 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
216 __type(value
, struct datarec
);
217 __uint(max_entries
, 1);
218 } devmap_xmit_cnt
SEC(".maps");
220 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
221 * Code in: kernel/include/trace/events/xdp.h
223 struct devmap_xmit_ctx
{
224 u64 __pad
; // First 8 bytes are not accessible by bpf code
225 int from_ifindex
; // offset:8; size:4; signed:1;
226 u32 act
; // offset:12; size:4; signed:0;
227 int to_ifindex
; // offset:16; size:4; signed:1;
228 int drops
; // offset:20; size:4; signed:1;
229 int sent
; // offset:24; size:4; signed:1;
230 int err
; // offset:28; size:4; signed:1;
233 SEC("tracepoint/xdp/xdp_devmap_xmit")
234 int trace_xdp_devmap_xmit(struct devmap_xmit_ctx
*ctx
)
239 rec
= bpf_map_lookup_elem(&devmap_xmit_cnt
, &key
);
242 rec
->processed
+= ctx
->sent
;
243 rec
->dropped
+= ctx
->drops
;
245 /* Record bulk events, then userspace can calc average bulk size */
248 /* Record error cases, where no frame were sent */
252 /* Catch API error of drv ndo_xdp_xmit sent more than count */