1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Netronome Systems, Inc. */
10 #include <sys/utsname.h>
12 #include <linux/btf.h>
13 #include <linux/filter.h>
14 #include <linux/kernel.h>
15 #include <linux/version.h>
19 #include "libbpf_internal.h"
21 /* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
22 * but Ubuntu provides /proc/version_signature file, as described at
23 * https://ubuntu.com/kernel, with an example contents below, which we
24 * can use to get a proper LINUX_VERSION_CODE.
26 * Ubuntu 5.4.0-12.15-generic 5.4.8
28 * In the above, 5.4.8 is what kernel is actually expecting, while
29 * uname() call will return 5.4.0 in info.release.
31 static __u32
get_ubuntu_kernel_version(void)
33 const char *ubuntu_kver_file
= "/proc/version_signature";
34 __u32 major
, minor
, patch
;
38 if (faccessat(AT_FDCWD
, ubuntu_kver_file
, R_OK
, AT_EACCESS
) != 0)
41 f
= fopen(ubuntu_kver_file
, "re");
45 ret
= fscanf(f
, "%*s %*s %u.%u.%u\n", &major
, &minor
, &patch
);
50 return KERNEL_VERSION(major
, minor
, patch
);
53 /* On Debian LINUX_VERSION_CODE doesn't correspond to info.release.
54 * Instead, it is provided in info.version. An example content of
55 * Debian 10 looks like the below.
57 * utsname::release 4.19.0-22-amd64
58 * utsname::version #1 SMP Debian 4.19.260-1 (2022-09-29)
60 * In the above, 4.19.260 is what kernel is actually expecting, while
61 * uname() call will return 4.19.0 in info.release.
63 static __u32
get_debian_kernel_version(struct utsname
*info
)
65 __u32 major
, minor
, patch
;
68 p
= strstr(info
->version
, "Debian ");
70 /* This is not a Debian kernel. */
74 if (sscanf(p
, "Debian %u.%u.%u", &major
, &minor
, &patch
) != 3)
77 return KERNEL_VERSION(major
, minor
, patch
);
80 __u32
get_kernel_version(void)
82 __u32 major
, minor
, patch
, version
;
85 /* Check if this is an Ubuntu kernel. */
86 version
= get_ubuntu_kernel_version();
92 /* Check if this is a Debian kernel. */
93 version
= get_debian_kernel_version(&info
);
97 if (sscanf(info
.release
, "%u.%u.%u", &major
, &minor
, &patch
) != 3)
100 return KERNEL_VERSION(major
, minor
, patch
);
103 static int probe_prog_load(enum bpf_prog_type prog_type
,
104 const struct bpf_insn
*insns
, size_t insns_cnt
,
105 char *log_buf
, size_t log_buf_sz
)
107 LIBBPF_OPTS(bpf_prog_load_opts
, opts
,
109 .log_size
= log_buf_sz
,
110 .log_level
= log_buf
? 1 : 0,
112 int fd
, err
, exp_err
= 0;
113 const char *exp_msg
= NULL
;
117 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR
:
118 opts
.expected_attach_type
= BPF_CGROUP_INET4_CONNECT
;
120 case BPF_PROG_TYPE_CGROUP_SOCKOPT
:
121 opts
.expected_attach_type
= BPF_CGROUP_GETSOCKOPT
;
123 case BPF_PROG_TYPE_SK_LOOKUP
:
124 opts
.expected_attach_type
= BPF_SK_LOOKUP
;
126 case BPF_PROG_TYPE_KPROBE
:
127 opts
.kern_version
= get_kernel_version();
129 case BPF_PROG_TYPE_LIRC_MODE2
:
130 opts
.expected_attach_type
= BPF_LIRC_MODE2
;
132 case BPF_PROG_TYPE_TRACING
:
133 case BPF_PROG_TYPE_LSM
:
135 opts
.log_size
= sizeof(buf
);
137 if (prog_type
== BPF_PROG_TYPE_TRACING
)
138 opts
.expected_attach_type
= BPF_TRACE_FENTRY
;
140 opts
.expected_attach_type
= BPF_MODIFY_RETURN
;
141 opts
.attach_btf_id
= 1;
144 exp_msg
= "attach_btf_id 1 is not a function";
146 case BPF_PROG_TYPE_EXT
:
148 opts
.log_size
= sizeof(buf
);
150 opts
.attach_btf_id
= 1;
153 exp_msg
= "Cannot replace kernel functions";
155 case BPF_PROG_TYPE_SYSCALL
:
156 opts
.prog_flags
= BPF_F_SLEEPABLE
;
158 case BPF_PROG_TYPE_STRUCT_OPS
:
159 exp_err
= -524; /* -ENOTSUPP */
161 case BPF_PROG_TYPE_UNSPEC
:
162 case BPF_PROG_TYPE_SOCKET_FILTER
:
163 case BPF_PROG_TYPE_SCHED_CLS
:
164 case BPF_PROG_TYPE_SCHED_ACT
:
165 case BPF_PROG_TYPE_TRACEPOINT
:
166 case BPF_PROG_TYPE_XDP
:
167 case BPF_PROG_TYPE_PERF_EVENT
:
168 case BPF_PROG_TYPE_CGROUP_SKB
:
169 case BPF_PROG_TYPE_CGROUP_SOCK
:
170 case BPF_PROG_TYPE_LWT_IN
:
171 case BPF_PROG_TYPE_LWT_OUT
:
172 case BPF_PROG_TYPE_LWT_XMIT
:
173 case BPF_PROG_TYPE_SOCK_OPS
:
174 case BPF_PROG_TYPE_SK_SKB
:
175 case BPF_PROG_TYPE_CGROUP_DEVICE
:
176 case BPF_PROG_TYPE_SK_MSG
:
177 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
178 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE
:
179 case BPF_PROG_TYPE_LWT_SEG6LOCAL
:
180 case BPF_PROG_TYPE_SK_REUSEPORT
:
181 case BPF_PROG_TYPE_FLOW_DISSECTOR
:
182 case BPF_PROG_TYPE_CGROUP_SYSCTL
:
184 case BPF_PROG_TYPE_NETFILTER
:
185 opts
.expected_attach_type
= BPF_NETFILTER
;
191 fd
= bpf_prog_load(prog_type
, NULL
, "GPL", insns
, insns_cnt
, &opts
);
196 if (fd
>= 0 || err
!= exp_err
)
198 if (exp_msg
&& !strstr(buf
, exp_msg
))
202 return fd
>= 0 ? 1 : 0;
205 int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type
, const void *opts
)
207 struct bpf_insn insns
[] = {
208 BPF_MOV64_IMM(BPF_REG_0
, 0),
211 const size_t insn_cnt
= ARRAY_SIZE(insns
);
215 return libbpf_err(-EINVAL
);
217 ret
= probe_prog_load(prog_type
, insns
, insn_cnt
, NULL
, 0);
218 return libbpf_err(ret
);
221 int libbpf__load_raw_btf(const char *raw_types
, size_t types_len
,
222 const char *str_sec
, size_t str_len
,
225 struct btf_header hdr
= {
227 .version
= BTF_VERSION
,
228 .hdr_len
= sizeof(struct btf_header
),
229 .type_len
= types_len
,
230 .str_off
= types_len
,
233 LIBBPF_OPTS(bpf_btf_load_opts
, opts
,
234 .token_fd
= token_fd
,
235 .btf_flags
= token_fd
? BPF_F_TOKEN_FD
: 0,
240 btf_len
= hdr
.hdr_len
+ hdr
.type_len
+ hdr
.str_len
;
241 raw_btf
= malloc(btf_len
);
245 memcpy(raw_btf
, &hdr
, sizeof(hdr
));
246 memcpy(raw_btf
+ hdr
.hdr_len
, raw_types
, hdr
.type_len
);
247 memcpy(raw_btf
+ hdr
.hdr_len
+ hdr
.type_len
, str_sec
, hdr
.str_len
);
249 btf_fd
= bpf_btf_load(raw_btf
, btf_len
, &opts
);
255 static int load_local_storage_btf(void)
257 const char strs
[] = "\0bpf_spin_lock\0val\0cnt\0l";
258 /* struct bpf_spin_lock {
263 * struct bpf_spin_lock l;
268 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED
, 0, 32, 4), /* [1] */
269 /* struct bpf_spin_lock */ /* [2] */
270 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 1), 4),
271 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
272 /* struct val */ /* [3] */
273 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 2), 8),
274 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
275 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
278 return libbpf__load_raw_btf((char *)types
, sizeof(types
),
279 strs
, sizeof(strs
), 0);
282 static int probe_map_create(enum bpf_map_type map_type
)
284 LIBBPF_OPTS(bpf_map_create_opts
, opts
);
285 int key_size
, value_size
, max_entries
;
286 __u32 btf_key_type_id
= 0, btf_value_type_id
= 0;
287 int fd
= -1, btf_fd
= -1, fd_inner
= -1, exp_err
= 0, err
= 0;
289 key_size
= sizeof(__u32
);
290 value_size
= sizeof(__u32
);
294 case BPF_MAP_TYPE_STACK_TRACE
:
295 value_size
= sizeof(__u64
);
297 case BPF_MAP_TYPE_LPM_TRIE
:
298 key_size
= sizeof(__u64
);
299 value_size
= sizeof(__u64
);
300 opts
.map_flags
= BPF_F_NO_PREALLOC
;
302 case BPF_MAP_TYPE_CGROUP_STORAGE
:
303 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
304 key_size
= sizeof(struct bpf_cgroup_storage_key
);
305 value_size
= sizeof(__u64
);
308 case BPF_MAP_TYPE_QUEUE
:
309 case BPF_MAP_TYPE_STACK
:
312 case BPF_MAP_TYPE_SK_STORAGE
:
313 case BPF_MAP_TYPE_INODE_STORAGE
:
314 case BPF_MAP_TYPE_TASK_STORAGE
:
315 case BPF_MAP_TYPE_CGRP_STORAGE
:
317 btf_value_type_id
= 3;
320 opts
.map_flags
= BPF_F_NO_PREALLOC
;
321 btf_fd
= load_local_storage_btf();
325 case BPF_MAP_TYPE_RINGBUF
:
326 case BPF_MAP_TYPE_USER_RINGBUF
:
329 max_entries
= sysconf(_SC_PAGE_SIZE
);
331 case BPF_MAP_TYPE_STRUCT_OPS
:
332 /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
333 opts
.btf_vmlinux_value_type_id
= 1;
334 opts
.value_type_btf_obj_fd
= -1;
335 exp_err
= -524; /* -ENOTSUPP */
337 case BPF_MAP_TYPE_BLOOM_FILTER
:
341 case BPF_MAP_TYPE_ARENA
:
344 max_entries
= 1; /* one page */
345 opts
.map_extra
= 0; /* can mmap() at any address */
346 opts
.map_flags
= BPF_F_MMAPABLE
;
348 case BPF_MAP_TYPE_HASH
:
349 case BPF_MAP_TYPE_ARRAY
:
350 case BPF_MAP_TYPE_PROG_ARRAY
:
351 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
352 case BPF_MAP_TYPE_PERCPU_HASH
:
353 case BPF_MAP_TYPE_PERCPU_ARRAY
:
354 case BPF_MAP_TYPE_CGROUP_ARRAY
:
355 case BPF_MAP_TYPE_LRU_HASH
:
356 case BPF_MAP_TYPE_LRU_PERCPU_HASH
:
357 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
358 case BPF_MAP_TYPE_HASH_OF_MAPS
:
359 case BPF_MAP_TYPE_DEVMAP
:
360 case BPF_MAP_TYPE_DEVMAP_HASH
:
361 case BPF_MAP_TYPE_SOCKMAP
:
362 case BPF_MAP_TYPE_CPUMAP
:
363 case BPF_MAP_TYPE_XSKMAP
:
364 case BPF_MAP_TYPE_SOCKHASH
:
365 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
:
367 case BPF_MAP_TYPE_UNSPEC
:
372 if (map_type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
||
373 map_type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
374 fd_inner
= bpf_map_create(BPF_MAP_TYPE_HASH
, NULL
,
375 sizeof(__u32
), sizeof(__u32
), 1, NULL
);
379 opts
.inner_map_fd
= fd_inner
;
383 opts
.btf_fd
= btf_fd
;
384 opts
.btf_key_type_id
= btf_key_type_id
;
385 opts
.btf_value_type_id
= btf_value_type_id
;
388 fd
= bpf_map_create(map_type
, NULL
, key_size
, value_size
, max_entries
, &opts
);
400 return fd
< 0 && err
== exp_err
? 1 : 0;
402 return fd
>= 0 ? 1 : 0;
405 int libbpf_probe_bpf_map_type(enum bpf_map_type map_type
, const void *opts
)
410 return libbpf_err(-EINVAL
);
412 ret
= probe_map_create(map_type
);
413 return libbpf_err(ret
);
416 int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type
, enum bpf_func_id helper_id
,
419 struct bpf_insn insns
[] = {
420 BPF_EMIT_CALL((__u32
)helper_id
),
423 const size_t insn_cnt
= ARRAY_SIZE(insns
);
428 return libbpf_err(-EINVAL
);
430 /* we can't successfully load all prog types to check for BPF helper
431 * support, so bail out with -EOPNOTSUPP error
434 case BPF_PROG_TYPE_TRACING
:
435 case BPF_PROG_TYPE_EXT
:
436 case BPF_PROG_TYPE_LSM
:
437 case BPF_PROG_TYPE_STRUCT_OPS
:
444 ret
= probe_prog_load(prog_type
, insns
, insn_cnt
, buf
, sizeof(buf
));
446 return libbpf_err(ret
);
448 /* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
449 * at all, it will emit something like "invalid func unknown#181".
450 * If BPF verifier recognizes BPF helper but it's not supported for
451 * given BPF program type, it will emit "unknown func bpf_sys_bpf#166"
452 * or "program of this type cannot use helper bpf_sys_bpf#166".
453 * In both cases, provided combination of BPF program type and BPF
454 * helper is not supported by the kernel.
455 * In all other cases, probe_prog_load() above will either succeed (e.g.,
456 * because BPF helper happens to accept no input arguments or it
457 * accepts one input argument and initial PTR_TO_CTX is fine for
458 * that), or we'll get some more specific BPF verifier error about
459 * some unsatisfied conditions.
461 if (ret
== 0 && (strstr(buf
, "invalid func ") || strstr(buf
, "unknown func ") ||
462 strstr(buf
, "program of this type cannot use helper ")))
464 return 1; /* assume supported */