ARM: dma-api: fix max_pfn off-by-one error in __dma_supported()
[linux/fpc-iii.git] / tools / lib / bpf / libbpf_probes.c
blobb782ebef6ac9265269ab47db41b3584b1ef326ba
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Netronome Systems, Inc. */
4 #include <errno.h>
5 #include <fcntl.h>
6 #include <string.h>
7 #include <stdlib.h>
8 #include <unistd.h>
9 #include <net/if.h>
10 #include <sys/utsname.h>
12 #include <linux/btf.h>
13 #include <linux/filter.h>
14 #include <linux/kernel.h>
16 #include "bpf.h"
17 #include "libbpf.h"
18 #include "libbpf_internal.h"
20 /* make sure libbpf doesn't use kernel-only integer typedefs */
21 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
23 static bool grep(const char *buffer, const char *pattern)
25 return !!strstr(buffer, pattern);
28 static int get_vendor_id(int ifindex)
30 char ifname[IF_NAMESIZE], path[64], buf[8];
31 ssize_t len;
32 int fd;
34 if (!if_indextoname(ifindex, ifname))
35 return -1;
37 snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
39 fd = open(path, O_RDONLY);
40 if (fd < 0)
41 return -1;
43 len = read(fd, buf, sizeof(buf));
44 close(fd);
45 if (len < 0)
46 return -1;
47 if (len >= (ssize_t)sizeof(buf))
48 return -1;
49 buf[len] = '\0';
51 return strtol(buf, NULL, 0);
54 static int get_kernel_version(void)
56 int version, subversion, patchlevel;
57 struct utsname utsn;
59 /* Return 0 on failure, and attempt to probe with empty kversion */
60 if (uname(&utsn))
61 return 0;
63 if (sscanf(utsn.release, "%d.%d.%d",
64 &version, &subversion, &patchlevel) != 3)
65 return 0;
67 return (version << 16) + (subversion << 8) + patchlevel;
70 static void
71 probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
72 size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
74 struct bpf_load_program_attr xattr = {};
75 int fd;
77 switch (prog_type) {
78 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
79 xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
80 break;
81 case BPF_PROG_TYPE_KPROBE:
82 xattr.kern_version = get_kernel_version();
83 break;
84 case BPF_PROG_TYPE_UNSPEC:
85 case BPF_PROG_TYPE_SOCKET_FILTER:
86 case BPF_PROG_TYPE_SCHED_CLS:
87 case BPF_PROG_TYPE_SCHED_ACT:
88 case BPF_PROG_TYPE_TRACEPOINT:
89 case BPF_PROG_TYPE_XDP:
90 case BPF_PROG_TYPE_PERF_EVENT:
91 case BPF_PROG_TYPE_CGROUP_SKB:
92 case BPF_PROG_TYPE_CGROUP_SOCK:
93 case BPF_PROG_TYPE_LWT_IN:
94 case BPF_PROG_TYPE_LWT_OUT:
95 case BPF_PROG_TYPE_LWT_XMIT:
96 case BPF_PROG_TYPE_SOCK_OPS:
97 case BPF_PROG_TYPE_SK_SKB:
98 case BPF_PROG_TYPE_CGROUP_DEVICE:
99 case BPF_PROG_TYPE_SK_MSG:
100 case BPF_PROG_TYPE_RAW_TRACEPOINT:
101 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
102 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
103 case BPF_PROG_TYPE_LIRC_MODE2:
104 case BPF_PROG_TYPE_SK_REUSEPORT:
105 case BPF_PROG_TYPE_FLOW_DISSECTOR:
106 case BPF_PROG_TYPE_CGROUP_SYSCTL:
107 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
108 case BPF_PROG_TYPE_TRACING:
109 case BPF_PROG_TYPE_STRUCT_OPS:
110 case BPF_PROG_TYPE_EXT:
111 default:
112 break;
115 xattr.prog_type = prog_type;
116 xattr.insns = insns;
117 xattr.insns_cnt = insns_cnt;
118 xattr.license = "GPL";
119 xattr.prog_ifindex = ifindex;
121 fd = bpf_load_program_xattr(&xattr, buf, buf_len);
122 if (fd >= 0)
123 close(fd);
126 bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
128 struct bpf_insn insns[2] = {
129 BPF_MOV64_IMM(BPF_REG_0, 0),
130 BPF_EXIT_INSN()
133 if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
134 /* nfp returns -EINVAL on exit(0) with TC offload */
135 insns[0].imm = 2;
137 errno = 0;
138 probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
140 return errno != EINVAL && errno != EOPNOTSUPP;
143 int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
144 const char *str_sec, size_t str_len)
146 struct btf_header hdr = {
147 .magic = BTF_MAGIC,
148 .version = BTF_VERSION,
149 .hdr_len = sizeof(struct btf_header),
150 .type_len = types_len,
151 .str_off = types_len,
152 .str_len = str_len,
154 int btf_fd, btf_len;
155 __u8 *raw_btf;
157 btf_len = hdr.hdr_len + hdr.type_len + hdr.str_len;
158 raw_btf = malloc(btf_len);
159 if (!raw_btf)
160 return -ENOMEM;
162 memcpy(raw_btf, &hdr, sizeof(hdr));
163 memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
164 memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
166 btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
168 free(raw_btf);
169 return btf_fd;
172 static int load_sk_storage_btf(void)
174 const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
175 /* struct bpf_spin_lock {
176 * int val;
177 * };
178 * struct val {
179 * int cnt;
180 * struct bpf_spin_lock l;
181 * };
183 __u32 types[] = {
184 /* int */
185 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
186 /* struct bpf_spin_lock */ /* [2] */
187 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
188 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
189 /* struct val */ /* [3] */
190 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
191 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
192 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
195 return libbpf__load_raw_btf((char *)types, sizeof(types),
196 strs, sizeof(strs));
199 bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
201 int key_size, value_size, max_entries, map_flags;
202 __u32 btf_key_type_id = 0, btf_value_type_id = 0;
203 struct bpf_create_map_attr attr = {};
204 int fd = -1, btf_fd = -1, fd_inner;
206 key_size = sizeof(__u32);
207 value_size = sizeof(__u32);
208 max_entries = 1;
209 map_flags = 0;
211 switch (map_type) {
212 case BPF_MAP_TYPE_STACK_TRACE:
213 value_size = sizeof(__u64);
214 break;
215 case BPF_MAP_TYPE_LPM_TRIE:
216 key_size = sizeof(__u64);
217 value_size = sizeof(__u64);
218 map_flags = BPF_F_NO_PREALLOC;
219 break;
220 case BPF_MAP_TYPE_CGROUP_STORAGE:
221 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
222 key_size = sizeof(struct bpf_cgroup_storage_key);
223 value_size = sizeof(__u64);
224 max_entries = 0;
225 break;
226 case BPF_MAP_TYPE_QUEUE:
227 case BPF_MAP_TYPE_STACK:
228 key_size = 0;
229 break;
230 case BPF_MAP_TYPE_SK_STORAGE:
231 btf_key_type_id = 1;
232 btf_value_type_id = 3;
233 value_size = 8;
234 max_entries = 0;
235 map_flags = BPF_F_NO_PREALLOC;
236 btf_fd = load_sk_storage_btf();
237 if (btf_fd < 0)
238 return false;
239 break;
240 case BPF_MAP_TYPE_UNSPEC:
241 case BPF_MAP_TYPE_HASH:
242 case BPF_MAP_TYPE_ARRAY:
243 case BPF_MAP_TYPE_PROG_ARRAY:
244 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
245 case BPF_MAP_TYPE_PERCPU_HASH:
246 case BPF_MAP_TYPE_PERCPU_ARRAY:
247 case BPF_MAP_TYPE_CGROUP_ARRAY:
248 case BPF_MAP_TYPE_LRU_HASH:
249 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
250 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
251 case BPF_MAP_TYPE_HASH_OF_MAPS:
252 case BPF_MAP_TYPE_DEVMAP:
253 case BPF_MAP_TYPE_DEVMAP_HASH:
254 case BPF_MAP_TYPE_SOCKMAP:
255 case BPF_MAP_TYPE_CPUMAP:
256 case BPF_MAP_TYPE_XSKMAP:
257 case BPF_MAP_TYPE_SOCKHASH:
258 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
259 case BPF_MAP_TYPE_STRUCT_OPS:
260 default:
261 break;
264 if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
265 map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
266 /* TODO: probe for device, once libbpf has a function to create
267 * map-in-map for offload
269 if (ifindex)
270 return false;
272 fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
273 sizeof(__u32), sizeof(__u32), 1, 0);
274 if (fd_inner < 0)
275 return false;
276 fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
277 fd_inner, 1, 0);
278 close(fd_inner);
279 } else {
280 /* Note: No other restriction on map type probes for offload */
281 attr.map_type = map_type;
282 attr.key_size = key_size;
283 attr.value_size = value_size;
284 attr.max_entries = max_entries;
285 attr.map_flags = map_flags;
286 attr.map_ifindex = ifindex;
287 if (btf_fd >= 0) {
288 attr.btf_fd = btf_fd;
289 attr.btf_key_type_id = btf_key_type_id;
290 attr.btf_value_type_id = btf_value_type_id;
293 fd = bpf_create_map_xattr(&attr);
295 if (fd >= 0)
296 close(fd);
297 if (btf_fd >= 0)
298 close(btf_fd);
300 return fd >= 0;
303 bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
304 __u32 ifindex)
306 struct bpf_insn insns[2] = {
307 BPF_EMIT_CALL(id),
308 BPF_EXIT_INSN()
310 char buf[4096] = {};
311 bool res;
313 probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
314 ifindex);
315 res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
317 if (ifindex) {
318 switch (get_vendor_id(ifindex)) {
319 case 0x19ee: /* Netronome specific */
320 res = res && !grep(buf, "not supported by FW") &&
321 !grep(buf, "unsupported function id");
322 break;
323 default:
324 break;
328 return res;
332 * Probe for availability of kernel commit (5.3):
334 * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
336 bool bpf_probe_large_insn_limit(__u32 ifindex)
338 struct bpf_insn insns[BPF_MAXINSNS + 1];
339 int i;
341 for (i = 0; i < BPF_MAXINSNS; i++)
342 insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
343 insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
345 errno = 0;
346 probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
347 ifindex);
349 return errno != E2BIG && errno != EINVAL;