1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 /* Copyright (c) 2021 Facebook */
3 #ifndef __SKEL_INTERNAL_H
4 #define __SKEL_INTERNAL_H
7 #include <linux/fdtable.h>
9 #include <linux/mman.h>
10 #include <linux/slab.h>
11 #include <linux/bpf.h>
14 #include <sys/syscall.h>
21 # if defined(__mips__) && defined(_ABIO32)
22 # define __NR_bpf 4355
23 # elif defined(__mips__) && defined(_ABIN32)
24 # define __NR_bpf 6319
25 # elif defined(__mips__) && defined(_ABI64)
26 # define __NR_bpf 5315
30 /* This file is a base header for auto-generated *.lskel.h files.
31 * Its contents will change and may become part of auto-generation in the future.
33 * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
34 * and will change from one version of libbpf to another and features
35 * requested during loader program generation.
38 /* output of the loader prog */
40 /* input for the loader prog */
42 __aligned_u64 initial_value
;
44 struct bpf_prog_desc
{
49 BPF_SKEL_KERNEL
= (1ULL << 0),
52 struct bpf_loader_ctx
{
60 struct bpf_load_and_run_opts
{
61 struct bpf_loader_ctx
*ctx
;
69 long kern_sys_bpf(__u32 cmd
, void *attr
, __u32 attr_size
);
71 static inline int skel_sys_bpf(enum bpf_cmd cmd
, union bpf_attr
*attr
,
75 return kern_sys_bpf(cmd
, attr
, size
);
77 return syscall(__NR_bpf
, cmd
, attr
, size
);
82 static inline int close(int fd
)
87 static inline void *skel_alloc(size_t size
)
89 struct bpf_loader_ctx
*ctx
= kzalloc(size
, GFP_KERNEL
);
93 ctx
->flags
|= BPF_SKEL_KERNEL
;
97 static inline void skel_free(const void *p
)
102 /* skel->bss/rodata maps are populated the following way:
105 * skel_prep_map_data() allocates kernel memory that kernel module can directly access.
106 * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
107 * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
108 * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
109 * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
113 * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
114 * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
115 * The loader program will perform copy_from_user() from maps.rodata.initial_value.
116 * skel_finalize_map_data() remaps bpf array map value from the kernel memory into
117 * skel->rodata address.
119 * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
120 * both kernel and user space. The generated loader program does
121 * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
122 * depending on bpf_loader_ctx->flags.
124 static inline void skel_free_map_data(void *p
, __u64 addr
, size_t sz
)
128 /* When addr == ~0ULL the 'p' points to
129 * ((struct bpf_array *)map)->value. See skel_finalize_map_data.
133 static inline void *skel_prep_map_data(const void *val
, size_t mmap_sz
, size_t val_sz
)
137 addr
= kvmalloc(val_sz
, GFP_KERNEL
);
140 memcpy(addr
, val
, val_sz
);
144 static inline void *skel_finalize_map_data(__u64
*init_val
, size_t mmap_sz
, int flags
, int fd
)
149 kvfree((void *) (long) *init_val
);
152 /* At this point bpf_load_and_run() finished without error and
153 * 'fd' is a valid bpf map FD. All sanity checks below should succeed.
155 map
= bpf_map_get(fd
);
158 if (map
->map_type
!= BPF_MAP_TYPE_ARRAY
)
160 addr
= ((struct bpf_array
*)map
)->value
;
161 /* the addr stays valid, since FD is not closed */
169 static inline void *skel_alloc(size_t size
)
171 return calloc(1, size
);
174 static inline void skel_free(void *p
)
179 static inline void skel_free_map_data(void *p
, __u64 addr
, size_t sz
)
184 static inline void *skel_prep_map_data(const void *val
, size_t mmap_sz
, size_t val_sz
)
188 addr
= mmap(NULL
, mmap_sz
, PROT_READ
| PROT_WRITE
,
189 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
190 if (addr
== (void *) -1)
192 memcpy(addr
, val
, val_sz
);
196 static inline void *skel_finalize_map_data(__u64
*init_val
, size_t mmap_sz
, int flags
, int fd
)
200 addr
= mmap((void *) (long) *init_val
, mmap_sz
, flags
, MAP_SHARED
| MAP_FIXED
, fd
, 0);
201 if (addr
== (void *) -1)
207 static inline int skel_closenz(int fd
)
215 #define offsetofend(TYPE, MEMBER) \
216 (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
219 static inline int skel_map_create(enum bpf_map_type map_type
,
220 const char *map_name
,
225 const size_t attr_sz
= offsetofend(union bpf_attr
, map_extra
);
228 memset(&attr
, 0, attr_sz
);
230 attr
.map_type
= map_type
;
231 strncpy(attr
.map_name
, map_name
, sizeof(attr
.map_name
));
232 attr
.key_size
= key_size
;
233 attr
.value_size
= value_size
;
234 attr
.max_entries
= max_entries
;
236 return skel_sys_bpf(BPF_MAP_CREATE
, &attr
, attr_sz
);
239 static inline int skel_map_update_elem(int fd
, const void *key
,
240 const void *value
, __u64 flags
)
242 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
245 memset(&attr
, 0, attr_sz
);
247 attr
.key
= (long) key
;
248 attr
.value
= (long) value
;
251 return skel_sys_bpf(BPF_MAP_UPDATE_ELEM
, &attr
, attr_sz
);
254 static inline int skel_map_delete_elem(int fd
, const void *key
)
256 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
259 memset(&attr
, 0, attr_sz
);
261 attr
.key
= (long)key
;
263 return skel_sys_bpf(BPF_MAP_DELETE_ELEM
, &attr
, attr_sz
);
266 static inline int skel_map_get_fd_by_id(__u32 id
)
268 const size_t attr_sz
= offsetofend(union bpf_attr
, flags
);
271 memset(&attr
, 0, attr_sz
);
274 return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID
, &attr
, attr_sz
);
277 static inline int skel_raw_tracepoint_open(const char *name
, int prog_fd
)
279 const size_t attr_sz
= offsetofend(union bpf_attr
, raw_tracepoint
.prog_fd
);
282 memset(&attr
, 0, attr_sz
);
283 attr
.raw_tracepoint
.name
= (long) name
;
284 attr
.raw_tracepoint
.prog_fd
= prog_fd
;
286 return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN
, &attr
, attr_sz
);
289 static inline int skel_link_create(int prog_fd
, int target_fd
,
290 enum bpf_attach_type attach_type
)
292 const size_t attr_sz
= offsetofend(union bpf_attr
, link_create
.iter_info_len
);
295 memset(&attr
, 0, attr_sz
);
296 attr
.link_create
.prog_fd
= prog_fd
;
297 attr
.link_create
.target_fd
= target_fd
;
298 attr
.link_create
.attach_type
= attach_type
;
300 return skel_sys_bpf(BPF_LINK_CREATE
, &attr
, attr_sz
);
306 #define set_err err = -errno
309 static inline int bpf_load_and_run(struct bpf_load_and_run_opts
*opts
)
311 const size_t prog_load_attr_sz
= offsetofend(union bpf_attr
, fd_array
);
312 const size_t test_run_attr_sz
= offsetofend(union bpf_attr
, test
);
313 int map_fd
= -1, prog_fd
= -1, key
= 0, err
;
316 err
= map_fd
= skel_map_create(BPF_MAP_TYPE_ARRAY
, "__loader.map", 4, opts
->data_sz
, 1);
318 opts
->errstr
= "failed to create loader map";
323 err
= skel_map_update_elem(map_fd
, &key
, opts
->data
, 0);
325 opts
->errstr
= "failed to update loader map";
330 memset(&attr
, 0, prog_load_attr_sz
);
331 attr
.prog_type
= BPF_PROG_TYPE_SYSCALL
;
332 attr
.insns
= (long) opts
->insns
;
333 attr
.insn_cnt
= opts
->insns_sz
/ sizeof(struct bpf_insn
);
334 attr
.license
= (long) "Dual BSD/GPL";
335 memcpy(attr
.prog_name
, "__loader.prog", sizeof("__loader.prog"));
336 attr
.fd_array
= (long) &map_fd
;
337 attr
.log_level
= opts
->ctx
->log_level
;
338 attr
.log_size
= opts
->ctx
->log_size
;
339 attr
.log_buf
= opts
->ctx
->log_buf
;
340 attr
.prog_flags
= BPF_F_SLEEPABLE
;
341 err
= prog_fd
= skel_sys_bpf(BPF_PROG_LOAD
, &attr
, prog_load_attr_sz
);
343 opts
->errstr
= "failed to load loader prog";
348 memset(&attr
, 0, test_run_attr_sz
);
349 attr
.test
.prog_fd
= prog_fd
;
350 attr
.test
.ctx_in
= (long) opts
->ctx
;
351 attr
.test
.ctx_size_in
= opts
->ctx
->sz
;
352 err
= skel_sys_bpf(BPF_PROG_RUN
, &attr
, test_run_attr_sz
);
353 if (err
< 0 || (int)attr
.test
.retval
< 0) {
355 opts
->errstr
= "failed to execute loader prog";
358 opts
->errstr
= "error returned by loader prog";
359 err
= (int)attr
.test
.retval
;