1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2019 Netronome Systems, Inc. */
10 #include <sys/utsname.h>
12 #include <linux/btf.h>
13 #include <linux/filter.h>
14 #include <linux/kernel.h>
18 #include "libbpf_internal.h"
20 /* make sure libbpf doesn't use kernel-only integer typedefs */
21 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
23 static bool grep(const char *buffer
, const char *pattern
)
25 return !!strstr(buffer
, pattern
);
28 static int get_vendor_id(int ifindex
)
30 char ifname
[IF_NAMESIZE
], path
[64], buf
[8];
34 if (!if_indextoname(ifindex
, ifname
))
37 snprintf(path
, sizeof(path
), "/sys/class/net/%s/device/vendor", ifname
);
39 fd
= open(path
, O_RDONLY
);
43 len
= read(fd
, buf
, sizeof(buf
));
47 if (len
>= (ssize_t
)sizeof(buf
))
51 return strtol(buf
, NULL
, 0);
54 static int get_kernel_version(void)
56 int version
, subversion
, patchlevel
;
59 /* Return 0 on failure, and attempt to probe with empty kversion */
63 if (sscanf(utsn
.release
, "%d.%d.%d",
64 &version
, &subversion
, &patchlevel
) != 3)
67 return (version
<< 16) + (subversion
<< 8) + patchlevel
;
71 probe_load(enum bpf_prog_type prog_type
, const struct bpf_insn
*insns
,
72 size_t insns_cnt
, char *buf
, size_t buf_len
, __u32 ifindex
)
74 struct bpf_load_program_attr xattr
= {};
78 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR
:
79 xattr
.expected_attach_type
= BPF_CGROUP_INET4_CONNECT
;
81 case BPF_PROG_TYPE_KPROBE
:
82 xattr
.kern_version
= get_kernel_version();
84 case BPF_PROG_TYPE_UNSPEC
:
85 case BPF_PROG_TYPE_SOCKET_FILTER
:
86 case BPF_PROG_TYPE_SCHED_CLS
:
87 case BPF_PROG_TYPE_SCHED_ACT
:
88 case BPF_PROG_TYPE_TRACEPOINT
:
89 case BPF_PROG_TYPE_XDP
:
90 case BPF_PROG_TYPE_PERF_EVENT
:
91 case BPF_PROG_TYPE_CGROUP_SKB
:
92 case BPF_PROG_TYPE_CGROUP_SOCK
:
93 case BPF_PROG_TYPE_LWT_IN
:
94 case BPF_PROG_TYPE_LWT_OUT
:
95 case BPF_PROG_TYPE_LWT_XMIT
:
96 case BPF_PROG_TYPE_SOCK_OPS
:
97 case BPF_PROG_TYPE_SK_SKB
:
98 case BPF_PROG_TYPE_CGROUP_DEVICE
:
99 case BPF_PROG_TYPE_SK_MSG
:
100 case BPF_PROG_TYPE_RAW_TRACEPOINT
:
101 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE
:
102 case BPF_PROG_TYPE_LWT_SEG6LOCAL
:
103 case BPF_PROG_TYPE_LIRC_MODE2
:
104 case BPF_PROG_TYPE_SK_REUSEPORT
:
105 case BPF_PROG_TYPE_FLOW_DISSECTOR
:
106 case BPF_PROG_TYPE_CGROUP_SYSCTL
:
107 case BPF_PROG_TYPE_CGROUP_SOCKOPT
:
108 case BPF_PROG_TYPE_TRACING
:
109 case BPF_PROG_TYPE_STRUCT_OPS
:
110 case BPF_PROG_TYPE_EXT
:
115 xattr
.prog_type
= prog_type
;
117 xattr
.insns_cnt
= insns_cnt
;
118 xattr
.license
= "GPL";
119 xattr
.prog_ifindex
= ifindex
;
121 fd
= bpf_load_program_xattr(&xattr
, buf
, buf_len
);
126 bool bpf_probe_prog_type(enum bpf_prog_type prog_type
, __u32 ifindex
)
128 struct bpf_insn insns
[2] = {
129 BPF_MOV64_IMM(BPF_REG_0
, 0),
133 if (ifindex
&& prog_type
== BPF_PROG_TYPE_SCHED_CLS
)
134 /* nfp returns -EINVAL on exit(0) with TC offload */
138 probe_load(prog_type
, insns
, ARRAY_SIZE(insns
), NULL
, 0, ifindex
);
140 return errno
!= EINVAL
&& errno
!= EOPNOTSUPP
;
143 int libbpf__load_raw_btf(const char *raw_types
, size_t types_len
,
144 const char *str_sec
, size_t str_len
)
146 struct btf_header hdr
= {
148 .version
= BTF_VERSION
,
149 .hdr_len
= sizeof(struct btf_header
),
150 .type_len
= types_len
,
151 .str_off
= types_len
,
157 btf_len
= hdr
.hdr_len
+ hdr
.type_len
+ hdr
.str_len
;
158 raw_btf
= malloc(btf_len
);
162 memcpy(raw_btf
, &hdr
, sizeof(hdr
));
163 memcpy(raw_btf
+ hdr
.hdr_len
, raw_types
, hdr
.type_len
);
164 memcpy(raw_btf
+ hdr
.hdr_len
+ hdr
.type_len
, str_sec
, hdr
.str_len
);
166 btf_fd
= bpf_load_btf(raw_btf
, btf_len
, NULL
, 0, false);
172 static int load_sk_storage_btf(void)
174 const char strs
[] = "\0bpf_spin_lock\0val\0cnt\0l";
175 /* struct bpf_spin_lock {
180 * struct bpf_spin_lock l;
185 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED
, 0, 32, 4), /* [1] */
186 /* struct bpf_spin_lock */ /* [2] */
187 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 1), 4),
188 BTF_MEMBER_ENC(15, 1, 0), /* int val; */
189 /* struct val */ /* [3] */
190 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT
, 0, 2), 8),
191 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
192 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
195 return libbpf__load_raw_btf((char *)types
, sizeof(types
),
199 bool bpf_probe_map_type(enum bpf_map_type map_type
, __u32 ifindex
)
201 int key_size
, value_size
, max_entries
, map_flags
;
202 __u32 btf_key_type_id
= 0, btf_value_type_id
= 0;
203 struct bpf_create_map_attr attr
= {};
204 int fd
= -1, btf_fd
= -1, fd_inner
;
206 key_size
= sizeof(__u32
);
207 value_size
= sizeof(__u32
);
212 case BPF_MAP_TYPE_STACK_TRACE
:
213 value_size
= sizeof(__u64
);
215 case BPF_MAP_TYPE_LPM_TRIE
:
216 key_size
= sizeof(__u64
);
217 value_size
= sizeof(__u64
);
218 map_flags
= BPF_F_NO_PREALLOC
;
220 case BPF_MAP_TYPE_CGROUP_STORAGE
:
221 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
222 key_size
= sizeof(struct bpf_cgroup_storage_key
);
223 value_size
= sizeof(__u64
);
226 case BPF_MAP_TYPE_QUEUE
:
227 case BPF_MAP_TYPE_STACK
:
230 case BPF_MAP_TYPE_SK_STORAGE
:
232 btf_value_type_id
= 3;
235 map_flags
= BPF_F_NO_PREALLOC
;
236 btf_fd
= load_sk_storage_btf();
240 case BPF_MAP_TYPE_UNSPEC
:
241 case BPF_MAP_TYPE_HASH
:
242 case BPF_MAP_TYPE_ARRAY
:
243 case BPF_MAP_TYPE_PROG_ARRAY
:
244 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
245 case BPF_MAP_TYPE_PERCPU_HASH
:
246 case BPF_MAP_TYPE_PERCPU_ARRAY
:
247 case BPF_MAP_TYPE_CGROUP_ARRAY
:
248 case BPF_MAP_TYPE_LRU_HASH
:
249 case BPF_MAP_TYPE_LRU_PERCPU_HASH
:
250 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
251 case BPF_MAP_TYPE_HASH_OF_MAPS
:
252 case BPF_MAP_TYPE_DEVMAP
:
253 case BPF_MAP_TYPE_DEVMAP_HASH
:
254 case BPF_MAP_TYPE_SOCKMAP
:
255 case BPF_MAP_TYPE_CPUMAP
:
256 case BPF_MAP_TYPE_XSKMAP
:
257 case BPF_MAP_TYPE_SOCKHASH
:
258 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
:
259 case BPF_MAP_TYPE_STRUCT_OPS
:
264 if (map_type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
||
265 map_type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
266 /* TODO: probe for device, once libbpf has a function to create
267 * map-in-map for offload
272 fd_inner
= bpf_create_map(BPF_MAP_TYPE_HASH
,
273 sizeof(__u32
), sizeof(__u32
), 1, 0);
276 fd
= bpf_create_map_in_map(map_type
, NULL
, sizeof(__u32
),
280 /* Note: No other restriction on map type probes for offload */
281 attr
.map_type
= map_type
;
282 attr
.key_size
= key_size
;
283 attr
.value_size
= value_size
;
284 attr
.max_entries
= max_entries
;
285 attr
.map_flags
= map_flags
;
286 attr
.map_ifindex
= ifindex
;
288 attr
.btf_fd
= btf_fd
;
289 attr
.btf_key_type_id
= btf_key_type_id
;
290 attr
.btf_value_type_id
= btf_value_type_id
;
293 fd
= bpf_create_map_xattr(&attr
);
303 bool bpf_probe_helper(enum bpf_func_id id
, enum bpf_prog_type prog_type
,
306 struct bpf_insn insns
[2] = {
313 probe_load(prog_type
, insns
, ARRAY_SIZE(insns
), buf
, sizeof(buf
),
315 res
= !grep(buf
, "invalid func ") && !grep(buf
, "unknown func ");
318 switch (get_vendor_id(ifindex
)) {
319 case 0x19ee: /* Netronome specific */
320 res
= res
&& !grep(buf
, "not supported by FW") &&
321 !grep(buf
, "unsupported function id");
332 * Probe for availability of kernel commit (5.3):
334 * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
336 bool bpf_probe_large_insn_limit(__u32 ifindex
)
338 struct bpf_insn insns
[BPF_MAXINSNS
+ 1];
341 for (i
= 0; i
< BPF_MAXINSNS
; i
++)
342 insns
[i
] = BPF_MOV64_IMM(BPF_REG_0
, 1);
343 insns
[BPF_MAXINSNS
] = BPF_EXIT_INSN();
346 probe_load(BPF_PROG_TYPE_SCHED_CLS
, insns
, ARRAY_SIZE(insns
), NULL
, 0,
349 return errno
!= E2BIG
&& errno
!= EINVAL
;