Merge tag 'drm-fixes-for-v4.16-rc6' of git://people.freedesktop.org/~airlied/linux
[linux/fpc-iii.git] / samples / bpf / xdp_monitor_kern.c
blob211db8ded0de36e8514d3b91af0587cba78d89c2
1 /* SPDX-License-Identifier: GPL-2.0
2 * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
4 * XDP monitor tool, based on tracepoints
5 */
6 #include <uapi/linux/bpf.h>
7 #include "bpf_helpers.h"
9 struct bpf_map_def SEC("maps") redirect_err_cnt = {
10 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
11 .key_size = sizeof(u32),
12 .value_size = sizeof(u64),
13 .max_entries = 2,
14 /* TODO: have entries for all possible errno's */
17 #define XDP_UNKNOWN XDP_REDIRECT + 1
18 struct bpf_map_def SEC("maps") exception_cnt = {
19 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
20 .key_size = sizeof(u32),
21 .value_size = sizeof(u64),
22 .max_entries = XDP_UNKNOWN + 1,
25 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
26 * Code in: kernel/include/trace/events/xdp.h
28 struct xdp_redirect_ctx {
29 u64 __pad; // First 8 bytes are not accessible by bpf code
30 int prog_id; // offset:8; size:4; signed:1;
31 u32 act; // offset:12 size:4; signed:0;
32 int ifindex; // offset:16 size:4; signed:1;
33 int err; // offset:20 size:4; signed:1;
34 int to_ifindex; // offset:24 size:4; signed:1;
35 u32 map_id; // offset:28 size:4; signed:0;
36 int map_index; // offset:32 size:4; signed:1;
37 }; // offset:36
39 enum {
40 XDP_REDIRECT_SUCCESS = 0,
41 XDP_REDIRECT_ERROR = 1
44 static __always_inline
45 int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
47 u32 key = XDP_REDIRECT_ERROR;
48 int err = ctx->err;
49 u64 *cnt;
51 if (!err)
52 key = XDP_REDIRECT_SUCCESS;
54 cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
55 if (!cnt)
56 return 1;
57 *cnt += 1;
59 return 0; /* Indicate event was filtered (no further processing)*/
61 * Returning 1 here would allow e.g. a perf-record tracepoint
62 * to see and record these events, but it doesn't work well
63 * in-practice as stopping perf-record also unload this
64 * bpf_prog. Plus, there is additional overhead of doing so.
68 SEC("tracepoint/xdp/xdp_redirect_err")
69 int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
71 return xdp_redirect_collect_stat(ctx);
75 SEC("tracepoint/xdp/xdp_redirect_map_err")
76 int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
78 return xdp_redirect_collect_stat(ctx);
81 /* Likely unloaded when prog starts */
82 SEC("tracepoint/xdp/xdp_redirect")
83 int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
85 return xdp_redirect_collect_stat(ctx);
88 /* Likely unloaded when prog starts */
89 SEC("tracepoint/xdp/xdp_redirect_map")
90 int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
92 return xdp_redirect_collect_stat(ctx);
95 /* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
96 * Code in: kernel/include/trace/events/xdp.h
98 struct xdp_exception_ctx {
99 u64 __pad; // First 8 bytes are not accessible by bpf code
100 int prog_id; // offset:8; size:4; signed:1;
101 u32 act; // offset:12; size:4; signed:0;
102 int ifindex; // offset:16; size:4; signed:1;
105 SEC("tracepoint/xdp/xdp_exception")
106 int trace_xdp_exception(struct xdp_exception_ctx *ctx)
108 u64 *cnt;
109 u32 key;
111 key = ctx->act;
112 if (key > XDP_REDIRECT)
113 key = XDP_UNKNOWN;
115 cnt = bpf_map_lookup_elem(&exception_cnt, &key);
116 if (!cnt)
117 return 1;
118 *cnt += 1;
120 return 0;
123 /* Common stats data record shared with _user.c */
124 struct datarec {
125 u64 processed;
126 u64 dropped;
127 u64 info;
129 #define MAX_CPUS 64
131 struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
132 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
133 .key_size = sizeof(u32),
134 .value_size = sizeof(struct datarec),
135 .max_entries = MAX_CPUS,
138 struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
139 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
140 .key_size = sizeof(u32),
141 .value_size = sizeof(struct datarec),
142 .max_entries = 1,
145 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
146 * Code in: kernel/include/trace/events/xdp.h
148 struct cpumap_enqueue_ctx {
149 u64 __pad; // First 8 bytes are not accessible by bpf code
150 int map_id; // offset:8; size:4; signed:1;
151 u32 act; // offset:12; size:4; signed:0;
152 int cpu; // offset:16; size:4; signed:1;
153 unsigned int drops; // offset:20; size:4; signed:0;
154 unsigned int processed; // offset:24; size:4; signed:0;
155 int to_cpu; // offset:28; size:4; signed:1;
158 SEC("tracepoint/xdp/xdp_cpumap_enqueue")
159 int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
161 u32 to_cpu = ctx->to_cpu;
162 struct datarec *rec;
164 if (to_cpu >= MAX_CPUS)
165 return 1;
167 rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
168 if (!rec)
169 return 0;
170 rec->processed += ctx->processed;
171 rec->dropped += ctx->drops;
173 /* Record bulk events, then userspace can calc average bulk size */
174 if (ctx->processed > 0)
175 rec->info += 1;
177 return 0;
180 /* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
181 * Code in: kernel/include/trace/events/xdp.h
183 struct cpumap_kthread_ctx {
184 u64 __pad; // First 8 bytes are not accessible by bpf code
185 int map_id; // offset:8; size:4; signed:1;
186 u32 act; // offset:12; size:4; signed:0;
187 int cpu; // offset:16; size:4; signed:1;
188 unsigned int drops; // offset:20; size:4; signed:0;
189 unsigned int processed; // offset:24; size:4; signed:0;
190 int sched; // offset:28; size:4; signed:1;
193 SEC("tracepoint/xdp/xdp_cpumap_kthread")
194 int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
196 struct datarec *rec;
197 u32 key = 0;
199 rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
200 if (!rec)
201 return 0;
202 rec->processed += ctx->processed;
203 rec->dropped += ctx->drops;
205 /* Count times kthread yielded CPU via schedule call */
206 if (ctx->sched)
207 rec->info++;
209 return 0;