1 /* SPDX-License-Identifier: GPL-2.0
2 * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
4 * XDP monitor tool, based on tracepoints
6 #include <uapi/linux/bpf.h>
7 #include "bpf_helpers.h"
9 struct bpf_map_def
SEC("maps") redirect_err_cnt
= {
10 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
11 .key_size
= sizeof(u32
),
12 .value_size
= sizeof(u64
),
14 /* TODO: have entries for all possible errno's */
17 #define XDP_UNKNOWN XDP_REDIRECT + 1
18 struct bpf_map_def
SEC("maps") exception_cnt
= {
19 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
20 .key_size
= sizeof(u32
),
21 .value_size
= sizeof(u64
),
22 .max_entries
= XDP_UNKNOWN
+ 1,
25 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
26 * Code in: kernel/include/trace/events/xdp.h
28 struct xdp_redirect_ctx
{
29 u64 __pad
; // First 8 bytes are not accessible by bpf code
30 int prog_id
; // offset:8; size:4; signed:1;
31 u32 act
; // offset:12 size:4; signed:0;
32 int ifindex
; // offset:16 size:4; signed:1;
33 int err
; // offset:20 size:4; signed:1;
34 int to_ifindex
; // offset:24 size:4; signed:1;
35 u32 map_id
; // offset:28 size:4; signed:0;
36 int map_index
; // offset:32 size:4; signed:1;
40 XDP_REDIRECT_SUCCESS
= 0,
41 XDP_REDIRECT_ERROR
= 1
44 static __always_inline
45 int xdp_redirect_collect_stat(struct xdp_redirect_ctx
*ctx
)
47 u32 key
= XDP_REDIRECT_ERROR
;
52 key
= XDP_REDIRECT_SUCCESS
;
54 cnt
= bpf_map_lookup_elem(&redirect_err_cnt
, &key
);
59 return 0; /* Indicate event was filtered (no further processing)*/
61 * Returning 1 here would allow e.g. a perf-record tracepoint
62 * to see and record these events, but it doesn't work well
63 * in-practice as stopping perf-record also unload this
64 * bpf_prog. Plus, there is additional overhead of doing so.
68 SEC("tracepoint/xdp/xdp_redirect_err")
69 int trace_xdp_redirect_err(struct xdp_redirect_ctx
*ctx
)
71 return xdp_redirect_collect_stat(ctx
);
75 SEC("tracepoint/xdp/xdp_redirect_map_err")
76 int trace_xdp_redirect_map_err(struct xdp_redirect_ctx
*ctx
)
78 return xdp_redirect_collect_stat(ctx
);
81 /* Likely unloaded when prog starts */
82 SEC("tracepoint/xdp/xdp_redirect")
83 int trace_xdp_redirect(struct xdp_redirect_ctx
*ctx
)
85 return xdp_redirect_collect_stat(ctx
);
88 /* Likely unloaded when prog starts */
89 SEC("tracepoint/xdp/xdp_redirect_map")
90 int trace_xdp_redirect_map(struct xdp_redirect_ctx
*ctx
)
92 return xdp_redirect_collect_stat(ctx
);
95 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
96 * Code in: kernel/include/trace/events/xdp.h
98 struct xdp_exception_ctx
{
99 u64 __pad
; // First 8 bytes are not accessible by bpf code
100 int prog_id
; // offset:8; size:4; signed:1;
101 u32 act
; // offset:12; size:4; signed:0;
102 int ifindex
; // offset:16; size:4; signed:1;
105 SEC("tracepoint/xdp/xdp_exception")
106 int trace_xdp_exception(struct xdp_exception_ctx
*ctx
)
112 if (key
> XDP_REDIRECT
)
115 cnt
= bpf_map_lookup_elem(&exception_cnt
, &key
);
123 /* Common stats data record shared with _user.c */
131 struct bpf_map_def
SEC("maps") cpumap_enqueue_cnt
= {
132 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
133 .key_size
= sizeof(u32
),
134 .value_size
= sizeof(struct datarec
),
135 .max_entries
= MAX_CPUS
,
138 struct bpf_map_def
SEC("maps") cpumap_kthread_cnt
= {
139 .type
= BPF_MAP_TYPE_PERCPU_ARRAY
,
140 .key_size
= sizeof(u32
),
141 .value_size
= sizeof(struct datarec
),
145 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
146 * Code in: kernel/include/trace/events/xdp.h
148 struct cpumap_enqueue_ctx
{
149 u64 __pad
; // First 8 bytes are not accessible by bpf code
150 int map_id
; // offset:8; size:4; signed:1;
151 u32 act
; // offset:12; size:4; signed:0;
152 int cpu
; // offset:16; size:4; signed:1;
153 unsigned int drops
; // offset:20; size:4; signed:0;
154 unsigned int processed
; // offset:24; size:4; signed:0;
155 int to_cpu
; // offset:28; size:4; signed:1;
158 SEC("tracepoint/xdp/xdp_cpumap_enqueue")
159 int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx
*ctx
)
161 u32 to_cpu
= ctx
->to_cpu
;
164 if (to_cpu
>= MAX_CPUS
)
167 rec
= bpf_map_lookup_elem(&cpumap_enqueue_cnt
, &to_cpu
);
170 rec
->processed
+= ctx
->processed
;
171 rec
->dropped
+= ctx
->drops
;
173 /* Record bulk events, then userspace can calc average bulk size */
174 if (ctx
->processed
> 0)
180 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
181 * Code in: kernel/include/trace/events/xdp.h
183 struct cpumap_kthread_ctx
{
184 u64 __pad
; // First 8 bytes are not accessible by bpf code
185 int map_id
; // offset:8; size:4; signed:1;
186 u32 act
; // offset:12; size:4; signed:0;
187 int cpu
; // offset:16; size:4; signed:1;
188 unsigned int drops
; // offset:20; size:4; signed:0;
189 unsigned int processed
; // offset:24; size:4; signed:0;
190 int sched
; // offset:28; size:4; signed:1;
193 SEC("tracepoint/xdp/xdp_cpumap_kthread")
194 int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx
*ctx
)
199 rec
= bpf_map_lookup_elem(&cpumap_kthread_cnt
, &key
);
202 rec
->processed
+= ctx
->processed
;
203 rec
->dropped
+= ctx
->drops
;
205 /* Count times kthread yielded CPU via schedule call */