1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2021 Facebook */
7 #include <linux/filter.h>
12 #include "libbpf_internal.h"
14 #include "bpf_gen_internal.h"
15 #include "skel_internal.h"
16 #include <asm/byteorder.h>
17 #include "str_error.h"
19 #define MAX_USED_MAPS 64
20 #define MAX_USED_PROGS 32
21 #define MAX_KFUNC_DESCS 256
22 #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
24 /* The following structure describes the stack layout of the loader program.
25 * In addition R6 contains the pointer to context.
26 * R7 contains the result of the last sys_bpf command (typically error or FD).
27 * R9 contains the result of the last sys_close command.
30 * ctx - bpf program context
31 * stack - bpf program stack
32 * blob - bpf_attr-s, strings, insns, map data.
33 * All the bytes that loader prog will use for read/write.
38 __u32 prog_fd
[MAX_USED_PROGS
];
41 #define stack_off(field) \
42 (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
44 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
46 static int blob_fd_array_off(struct bpf_gen
*gen
, int index
)
48 return gen
->fd_array
+ index
* sizeof(int);
51 static int realloc_insn_buf(struct bpf_gen
*gen
, __u32 size
)
53 size_t off
= gen
->insn_cur
- gen
->insn_start
;
58 if (size
> INT32_MAX
|| off
+ size
> INT32_MAX
) {
62 insn_start
= realloc(gen
->insn_start
, off
+ size
);
65 free(gen
->insn_start
);
66 gen
->insn_start
= NULL
;
69 gen
->insn_start
= insn_start
;
70 gen
->insn_cur
= insn_start
+ off
;
74 static int realloc_data_buf(struct bpf_gen
*gen
, __u32 size
)
76 size_t off
= gen
->data_cur
- gen
->data_start
;
81 if (size
> INT32_MAX
|| off
+ size
> INT32_MAX
) {
85 data_start
= realloc(gen
->data_start
, off
+ size
);
88 free(gen
->data_start
);
89 gen
->data_start
= NULL
;
92 gen
->data_start
= data_start
;
93 gen
->data_cur
= data_start
+ off
;
97 static void emit(struct bpf_gen
*gen
, struct bpf_insn insn
)
99 if (realloc_insn_buf(gen
, sizeof(insn
)))
101 memcpy(gen
->insn_cur
, &insn
, sizeof(insn
));
102 gen
->insn_cur
+= sizeof(insn
);
105 static void emit2(struct bpf_gen
*gen
, struct bpf_insn insn1
, struct bpf_insn insn2
)
111 static int add_data(struct bpf_gen
*gen
, const void *data
, __u32 size
);
112 static void emit_sys_close_blob(struct bpf_gen
*gen
, int blob_off
);
114 void bpf_gen__init(struct bpf_gen
*gen
, int log_level
, int nr_progs
, int nr_maps
)
116 size_t stack_sz
= sizeof(struct loader_stack
), nr_progs_sz
;
119 gen
->fd_array
= add_data(gen
, NULL
, MAX_FD_ARRAY_SZ
* sizeof(int));
120 gen
->log_level
= log_level
;
121 /* save ctx pointer into R6 */
122 emit(gen
, BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
));
125 emit(gen
, BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
));
126 emit(gen
, BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -stack_sz
));
127 emit(gen
, BPF_MOV64_IMM(BPF_REG_2
, stack_sz
));
128 emit(gen
, BPF_MOV64_IMM(BPF_REG_3
, 0));
129 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel
));
131 /* amount of stack actually used, only used to calculate iterations, not stack offset */
132 nr_progs_sz
= offsetof(struct loader_stack
, prog_fd
[nr_progs
]);
133 /* jump over cleanup code */
134 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0,
135 /* size of cleanup code below (including map fd cleanup) */
136 (nr_progs_sz
/ 4) * 3 + 2 +
137 /* 6 insns for emit_sys_close_blob,
138 * 6 insns for debug_regs in emit_sys_close_blob
140 nr_maps
* (6 + (gen
->log_level
? 6 : 0))));
142 /* remember the label where all error branches will jump to */
143 gen
->cleanup_label
= gen
->insn_cur
- gen
->insn_start
;
144 /* emit cleanup code: close all temp FDs */
145 for (i
= 0; i
< nr_progs_sz
; i
+= 4) {
146 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_10
, -stack_sz
+ i
));
147 emit(gen
, BPF_JMP_IMM(BPF_JSLE
, BPF_REG_1
, 0, 1));
148 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_sys_close
));
150 for (i
= 0; i
< nr_maps
; i
++)
151 emit_sys_close_blob(gen
, blob_fd_array_off(gen
, i
));
152 /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
153 emit(gen
, BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
));
154 emit(gen
, BPF_EXIT_INSN());
157 static int add_data(struct bpf_gen
*gen
, const void *data
, __u32 size
)
159 __u32 size8
= roundup(size
, 8);
163 if (realloc_data_buf(gen
, size8
))
165 prev
= gen
->data_cur
;
167 memcpy(gen
->data_cur
, data
, size
);
168 memcpy(gen
->data_cur
+ size
, &zero
, size8
- size
);
170 memset(gen
->data_cur
, 0, size8
);
172 gen
->data_cur
+= size8
;
173 return prev
- gen
->data_start
;
176 /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
177 * to start of fd_array. Caller can decide if it is usable or not.
179 static int add_map_fd(struct bpf_gen
*gen
)
181 if (gen
->nr_maps
== MAX_USED_MAPS
) {
182 pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS
);
186 return gen
->nr_maps
++;
189 static int add_kfunc_btf_fd(struct bpf_gen
*gen
)
193 if (gen
->nr_fd_array
== MAX_KFUNC_DESCS
) {
194 cur
= add_data(gen
, NULL
, sizeof(int));
195 return (cur
- gen
->fd_array
) / sizeof(int);
197 return MAX_USED_MAPS
+ gen
->nr_fd_array
++;
200 static int insn_bytes_to_bpf_size(__u32 sz
)
203 case 8: return BPF_DW
;
204 case 4: return BPF_W
;
205 case 2: return BPF_H
;
206 case 1: return BPF_B
;
211 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
212 static void emit_rel_store(struct bpf_gen
*gen
, int off
, int data
)
214 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_0
, BPF_PSEUDO_MAP_IDX_VALUE
,
216 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
218 emit(gen
, BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0));
221 static void move_blob2blob(struct bpf_gen
*gen
, int off
, int size
, int blob_off
)
223 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_2
, BPF_PSEUDO_MAP_IDX_VALUE
,
225 emit(gen
, BPF_LDX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_0
, BPF_REG_2
, 0));
226 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
228 emit(gen
, BPF_STX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_1
, BPF_REG_0
, 0));
231 static void move_blob2ctx(struct bpf_gen
*gen
, int ctx_off
, int size
, int blob_off
)
233 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
235 emit(gen
, BPF_LDX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_0
, BPF_REG_1
, 0));
236 emit(gen
, BPF_STX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_6
, BPF_REG_0
, ctx_off
));
239 static void move_ctx2blob(struct bpf_gen
*gen
, int off
, int size
, int ctx_off
,
242 emit(gen
, BPF_LDX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_0
, BPF_REG_6
, ctx_off
));
244 /* If value in ctx is zero don't update the blob.
245 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
247 emit(gen
, BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3));
248 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
250 emit(gen
, BPF_STX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_1
, BPF_REG_0
, 0));
253 static void move_stack2blob(struct bpf_gen
*gen
, int off
, int size
, int stack_off
)
255 emit(gen
, BPF_LDX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_0
, BPF_REG_10
, stack_off
));
256 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
258 emit(gen
, BPF_STX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_1
, BPF_REG_0
, 0));
261 static void move_stack2ctx(struct bpf_gen
*gen
, int ctx_off
, int size
, int stack_off
)
263 emit(gen
, BPF_LDX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_0
, BPF_REG_10
, stack_off
));
264 emit(gen
, BPF_STX_MEM(insn_bytes_to_bpf_size(size
), BPF_REG_6
, BPF_REG_0
, ctx_off
));
267 static void emit_sys_bpf(struct bpf_gen
*gen
, int cmd
, int attr
, int attr_size
)
269 emit(gen
, BPF_MOV64_IMM(BPF_REG_1
, cmd
));
270 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_2
, BPF_PSEUDO_MAP_IDX_VALUE
,
272 emit(gen
, BPF_MOV64_IMM(BPF_REG_3
, attr_size
));
273 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_sys_bpf
));
274 /* remember the result in R7 */
275 emit(gen
, BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
));
278 static bool is_simm16(__s64 value
)
280 return value
== (__s64
)(__s16
)value
;
283 static void emit_check_err(struct bpf_gen
*gen
)
285 __s64 off
= -(gen
->insn_cur
- gen
->insn_start
- gen
->cleanup_label
) / 8 - 1;
287 /* R7 contains result of last sys_bpf command.
288 * if (R7 < 0) goto cleanup;
290 if (is_simm16(off
)) {
291 emit(gen
, BPF_JMP_IMM(BPF_JSLT
, BPF_REG_7
, 0, off
));
293 gen
->error
= -ERANGE
;
294 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0, -1));
298 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
299 static void emit_debug(struct bpf_gen
*gen
, int reg1
, int reg2
,
300 const char *fmt
, va_list args
)
307 ret
= vsnprintf(buf
, sizeof(buf
), fmt
, args
);
308 if (ret
< 1024 - 7 && reg1
>= 0 && reg2
< 0)
309 /* The special case to accommodate common debug_ret():
310 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
313 strcat(buf
, " r=%d");
314 len
= strlen(buf
) + 1;
315 addr
= add_data(gen
, buf
, len
);
317 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
319 emit(gen
, BPF_MOV64_IMM(BPF_REG_2
, len
));
321 emit(gen
, BPF_MOV64_REG(BPF_REG_3
, reg1
));
323 emit(gen
, BPF_MOV64_REG(BPF_REG_4
, reg2
));
324 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_trace_printk
));
327 static void debug_regs(struct bpf_gen
*gen
, int reg1
, int reg2
, const char *fmt
, ...)
332 emit_debug(gen
, reg1
, reg2
, fmt
, args
);
336 static void debug_ret(struct bpf_gen
*gen
, const char *fmt
, ...)
341 emit_debug(gen
, BPF_REG_7
, -1, fmt
, args
);
345 static void __emit_sys_close(struct bpf_gen
*gen
)
347 emit(gen
, BPF_JMP_IMM(BPF_JSLE
, BPF_REG_1
, 0,
348 /* 2 is the number of the following insns
349 * * 6 is additional insns in debug_regs
351 2 + (gen
->log_level
? 6 : 0)));
352 emit(gen
, BPF_MOV64_REG(BPF_REG_9
, BPF_REG_1
));
353 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_sys_close
));
354 debug_regs(gen
, BPF_REG_9
, BPF_REG_0
, "close(%%d) = %%d");
357 static void emit_sys_close_stack(struct bpf_gen
*gen
, int stack_off
)
359 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_10
, stack_off
));
360 __emit_sys_close(gen
);
363 static void emit_sys_close_blob(struct bpf_gen
*gen
, int blob_off
)
365 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_0
, BPF_PSEUDO_MAP_IDX_VALUE
,
367 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0));
368 __emit_sys_close(gen
);
371 int bpf_gen__finish(struct bpf_gen
*gen
, int nr_progs
, int nr_maps
)
375 if (nr_progs
< gen
->nr_progs
|| nr_maps
!= gen
->nr_maps
) {
376 pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
377 nr_progs
, gen
->nr_progs
, nr_maps
, gen
->nr_maps
);
378 gen
->error
= -EFAULT
;
381 emit_sys_close_stack(gen
, stack_off(btf_fd
));
382 for (i
= 0; i
< gen
->nr_progs
; i
++)
384 sizeof(struct bpf_loader_ctx
) +
385 sizeof(struct bpf_map_desc
) * gen
->nr_maps
+
386 sizeof(struct bpf_prog_desc
) * i
+
387 offsetof(struct bpf_prog_desc
, prog_fd
), 4,
388 stack_off(prog_fd
[i
]));
389 for (i
= 0; i
< gen
->nr_maps
; i
++)
391 sizeof(struct bpf_loader_ctx
) +
392 sizeof(struct bpf_map_desc
) * i
+
393 offsetof(struct bpf_map_desc
, map_fd
), 4,
394 blob_fd_array_off(gen
, i
));
395 emit(gen
, BPF_MOV64_IMM(BPF_REG_0
, 0));
396 emit(gen
, BPF_EXIT_INSN());
397 pr_debug("gen: finish %s\n", errstr(gen
->error
));
399 struct gen_loader_opts
*opts
= gen
->opts
;
401 opts
->insns
= gen
->insn_start
;
402 opts
->insns_sz
= gen
->insn_cur
- gen
->insn_start
;
403 opts
->data
= gen
->data_start
;
404 opts
->data_sz
= gen
->data_cur
- gen
->data_start
;
406 /* use target endianness for embedded loader */
407 if (gen
->swapped_endian
) {
408 struct bpf_insn
*insn
= (struct bpf_insn
*)opts
->insns
;
409 int insn_cnt
= opts
->insns_sz
/ sizeof(struct bpf_insn
);
411 for (i
= 0; i
< insn_cnt
; i
++)
412 bpf_insn_bswap(insn
++);
418 void bpf_gen__free(struct bpf_gen
*gen
)
422 free(gen
->data_start
);
423 free(gen
->insn_start
);
428 * Fields of bpf_attr are set to values in native byte-order before being
429 * written to the target-bound data blob, and may need endian conversion.
430 * This macro allows providing the correct value in situ more simply than
431 * writing a separate converter for *all fields* of *all records* included
432 * in union bpf_attr. Note that sizeof(rval) should match the assignment
433 * target to avoid runtime problems.
435 #define tgt_endian(rval) ({ \
436 typeof(rval) _val = (rval); \
437 if (gen->swapped_endian) { \
438 switch (sizeof(_val)) { \
440 case 2: _val = bswap_16(_val); break; \
441 case 4: _val = bswap_32(_val); break; \
442 case 8: _val = bswap_64(_val); break; \
443 default: pr_warn("unsupported bswap size!\n"); \
449 void bpf_gen__load_btf(struct bpf_gen
*gen
, const void *btf_raw_data
,
452 int attr_size
= offsetofend(union bpf_attr
, btf_log_level
);
453 int btf_data
, btf_load_attr
;
456 memset(&attr
, 0, attr_size
);
457 btf_data
= add_data(gen
, btf_raw_data
, btf_raw_size
);
459 attr
.btf_size
= tgt_endian(btf_raw_size
);
460 btf_load_attr
= add_data(gen
, &attr
, attr_size
);
461 pr_debug("gen: load_btf: off %d size %d, attr: off %d size %d\n",
462 btf_data
, btf_raw_size
, btf_load_attr
, attr_size
);
464 /* populate union bpf_attr with user provided log details */
465 move_ctx2blob(gen
, attr_field(btf_load_attr
, btf_log_level
), 4,
466 offsetof(struct bpf_loader_ctx
, log_level
), false);
467 move_ctx2blob(gen
, attr_field(btf_load_attr
, btf_log_size
), 4,
468 offsetof(struct bpf_loader_ctx
, log_size
), false);
469 move_ctx2blob(gen
, attr_field(btf_load_attr
, btf_log_buf
), 8,
470 offsetof(struct bpf_loader_ctx
, log_buf
), false);
471 /* populate union bpf_attr with a pointer to the BTF data */
472 emit_rel_store(gen
, attr_field(btf_load_attr
, btf
), btf_data
);
473 /* emit BTF_LOAD command */
474 emit_sys_bpf(gen
, BPF_BTF_LOAD
, btf_load_attr
, attr_size
);
475 debug_ret(gen
, "btf_load size %d", btf_raw_size
);
477 /* remember btf_fd in the stack, if successful */
478 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_7
, stack_off(btf_fd
)));
481 void bpf_gen__map_create(struct bpf_gen
*gen
,
482 enum bpf_map_type map_type
,
483 const char *map_name
,
484 __u32 key_size
, __u32 value_size
, __u32 max_entries
,
485 struct bpf_map_create_opts
*map_attr
, int map_idx
)
487 int attr_size
= offsetofend(union bpf_attr
, map_extra
);
488 bool close_inner_map_fd
= false;
489 int map_create_attr
, idx
;
492 memset(&attr
, 0, attr_size
);
493 attr
.map_type
= tgt_endian(map_type
);
494 attr
.key_size
= tgt_endian(key_size
);
495 attr
.value_size
= tgt_endian(value_size
);
496 attr
.map_flags
= tgt_endian(map_attr
->map_flags
);
497 attr
.map_extra
= tgt_endian(map_attr
->map_extra
);
499 libbpf_strlcpy(attr
.map_name
, map_name
, sizeof(attr
.map_name
));
500 attr
.numa_node
= tgt_endian(map_attr
->numa_node
);
501 attr
.map_ifindex
= tgt_endian(map_attr
->map_ifindex
);
502 attr
.max_entries
= tgt_endian(max_entries
);
503 attr
.btf_key_type_id
= tgt_endian(map_attr
->btf_key_type_id
);
504 attr
.btf_value_type_id
= tgt_endian(map_attr
->btf_value_type_id
);
506 map_create_attr
= add_data(gen
, &attr
, attr_size
);
507 pr_debug("gen: map_create: %s idx %d type %d value_type_id %d, attr: off %d size %d\n",
508 map_name
, map_idx
, map_type
, map_attr
->btf_value_type_id
,
509 map_create_attr
, attr_size
);
511 if (map_attr
->btf_value_type_id
)
512 /* populate union bpf_attr with btf_fd saved in the stack earlier */
513 move_stack2blob(gen
, attr_field(map_create_attr
, btf_fd
), 4,
516 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
517 case BPF_MAP_TYPE_HASH_OF_MAPS
:
518 move_stack2blob(gen
, attr_field(map_create_attr
, inner_map_fd
), 4,
519 stack_off(inner_map_fd
));
520 close_inner_map_fd
= true;
525 /* conditionally update max_entries */
527 move_ctx2blob(gen
, attr_field(map_create_attr
, max_entries
), 4,
528 sizeof(struct bpf_loader_ctx
) +
529 sizeof(struct bpf_map_desc
) * map_idx
+
530 offsetof(struct bpf_map_desc
, max_entries
),
531 true /* check that max_entries != 0 */);
532 /* emit MAP_CREATE command */
533 emit_sys_bpf(gen
, BPF_MAP_CREATE
, map_create_attr
, attr_size
);
534 debug_ret(gen
, "map_create %s idx %d type %d value_size %d value_btf_id %d",
535 map_name
, map_idx
, map_type
, value_size
,
536 map_attr
->btf_value_type_id
);
538 /* remember map_fd in the stack, if successful */
540 /* This bpf_gen__map_create() function is called with map_idx >= 0
541 * for all maps that libbpf loading logic tracks.
542 * It's called with -1 to create an inner map.
544 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_7
,
545 stack_off(inner_map_fd
)));
546 } else if (map_idx
!= gen
->nr_maps
) {
547 gen
->error
= -EDOM
; /* internal bug */
550 /* add_map_fd does gen->nr_maps++ */
551 idx
= add_map_fd(gen
);
552 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
553 0, 0, 0, blob_fd_array_off(gen
, idx
)));
554 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_7
, 0));
556 if (close_inner_map_fd
)
557 emit_sys_close_stack(gen
, stack_off(inner_map_fd
));
560 void bpf_gen__record_attach_target(struct bpf_gen
*gen
, const char *attach_name
,
561 enum bpf_attach_type type
)
566 btf_get_kernel_prefix_kind(type
, &prefix
, &kind
);
567 gen
->attach_kind
= kind
;
568 ret
= snprintf(gen
->attach_target
, sizeof(gen
->attach_target
), "%s%s",
569 prefix
, attach_name
);
570 if (ret
>= sizeof(gen
->attach_target
))
571 gen
->error
= -ENOSPC
;
574 static void emit_find_attach_target(struct bpf_gen
*gen
)
576 int name
, len
= strlen(gen
->attach_target
) + 1;
578 pr_debug("gen: find_attach_tgt %s %d\n", gen
->attach_target
, gen
->attach_kind
);
579 name
= add_data(gen
, gen
->attach_target
, len
);
581 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
583 emit(gen
, BPF_MOV64_IMM(BPF_REG_2
, len
));
584 emit(gen
, BPF_MOV64_IMM(BPF_REG_3
, gen
->attach_kind
));
585 emit(gen
, BPF_MOV64_IMM(BPF_REG_4
, 0));
586 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind
));
587 emit(gen
, BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
));
588 debug_ret(gen
, "find_by_name_kind(%s,%d)",
589 gen
->attach_target
, gen
->attach_kind
);
591 /* if successful, btf_id is in lower 32-bit of R7 and
592 * btf_obj_fd is in upper 32-bit
596 void bpf_gen__record_extern(struct bpf_gen
*gen
, const char *name
, bool is_weak
,
597 bool is_typeless
, bool is_ld64
, int kind
, int insn_idx
)
599 struct ksym_relo_desc
*relo
;
601 relo
= libbpf_reallocarray(gen
->relos
, gen
->relo_cnt
+ 1, sizeof(*relo
));
603 gen
->error
= -ENOMEM
;
607 relo
+= gen
->relo_cnt
;
609 relo
->is_weak
= is_weak
;
610 relo
->is_typeless
= is_typeless
;
611 relo
->is_ld64
= is_ld64
;
613 relo
->insn_idx
= insn_idx
;
617 /* returns existing ksym_desc with ref incremented, or inserts a new one */
618 static struct ksym_desc
*get_ksym_desc(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
)
620 struct ksym_desc
*kdesc
;
623 for (i
= 0; i
< gen
->nr_ksyms
; i
++) {
624 kdesc
= &gen
->ksyms
[i
];
625 if (kdesc
->kind
== relo
->kind
&& kdesc
->is_ld64
== relo
->is_ld64
&&
626 !strcmp(kdesc
->name
, relo
->name
)) {
631 kdesc
= libbpf_reallocarray(gen
->ksyms
, gen
->nr_ksyms
+ 1, sizeof(*kdesc
));
633 gen
->error
= -ENOMEM
;
637 kdesc
= &gen
->ksyms
[gen
->nr_ksyms
++];
638 kdesc
->name
= relo
->name
;
639 kdesc
->kind
= relo
->kind
;
643 kdesc
->is_ld64
= relo
->is_ld64
;
647 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
648 * Returns result in BPF_REG_7
650 static void emit_bpf_find_by_name_kind(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
)
652 int name_off
, len
= strlen(relo
->name
) + 1;
654 name_off
= add_data(gen
, relo
->name
, len
);
655 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
657 emit(gen
, BPF_MOV64_IMM(BPF_REG_2
, len
));
658 emit(gen
, BPF_MOV64_IMM(BPF_REG_3
, relo
->kind
));
659 emit(gen
, BPF_MOV64_IMM(BPF_REG_4
, 0));
660 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind
));
661 emit(gen
, BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
));
662 debug_ret(gen
, "find_by_name_kind(%s,%d)", relo
->name
, relo
->kind
);
665 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
666 * Returns result in BPF_REG_7
667 * Returns u64 symbol addr in BPF_REG_9
669 static void emit_bpf_kallsyms_lookup_name(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
)
671 int name_off
, len
= strlen(relo
->name
) + 1, res_off
;
673 name_off
= add_data(gen
, relo
->name
, len
);
674 res_off
= add_data(gen
, NULL
, 8); /* res is u64 */
675 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
677 emit(gen
, BPF_MOV64_IMM(BPF_REG_2
, len
));
678 emit(gen
, BPF_MOV64_IMM(BPF_REG_3
, 0));
679 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_4
, BPF_PSEUDO_MAP_IDX_VALUE
,
681 emit(gen
, BPF_MOV64_REG(BPF_REG_7
, BPF_REG_4
));
682 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name
));
683 emit(gen
, BPF_LDX_MEM(BPF_DW
, BPF_REG_9
, BPF_REG_7
, 0));
684 emit(gen
, BPF_MOV64_REG(BPF_REG_7
, BPF_REG_0
));
685 debug_ret(gen
, "kallsyms_lookup_name(%s,%d)", relo
->name
, relo
->kind
);
689 * BPF_REG_8 - pointer to instruction
691 * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
692 * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
693 * this would mean a new BTF fd index for each entry. By pairing symbol name
694 * with index, we get the insn->imm, insn->off pairing that kernel uses for
695 * kfunc_tab, which becomes the effective limit even though all of them may
696 * share same index in fd_array (such that kfunc_btf_tab has 1 element).
698 static void emit_relo_kfunc_btf(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
, int insn
)
700 struct ksym_desc
*kdesc
;
703 kdesc
= get_ksym_desc(gen
, relo
);
706 /* try to copy from existing bpf_insn */
707 if (kdesc
->ref
> 1) {
708 move_blob2blob(gen
, insn
+ offsetof(struct bpf_insn
, imm
), 4,
709 kdesc
->insn
+ offsetof(struct bpf_insn
, imm
));
710 move_blob2blob(gen
, insn
+ offsetof(struct bpf_insn
, off
), 2,
711 kdesc
->insn
+ offsetof(struct bpf_insn
, off
));
714 /* remember insn offset, so we can copy BTF ID and FD later */
716 emit_bpf_find_by_name_kind(gen
, relo
);
719 /* get index in fd_array to store BTF FD at */
720 btf_fd_idx
= add_kfunc_btf_fd(gen
);
721 if (btf_fd_idx
> INT16_MAX
) {
722 pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
723 btf_fd_idx
, relo
->name
);
727 kdesc
->off
= btf_fd_idx
;
728 /* jump to success case */
729 emit(gen
, BPF_JMP_IMM(BPF_JSGE
, BPF_REG_7
, 0, 3));
730 /* set value for imm, off as 0 */
731 emit(gen
, BPF_ST_MEM(BPF_W
, BPF_REG_8
, offsetof(struct bpf_insn
, imm
), 0));
732 emit(gen
, BPF_ST_MEM(BPF_H
, BPF_REG_8
, offsetof(struct bpf_insn
, off
), 0));
733 /* skip success case for ret < 0 */
734 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0, 10));
735 /* store btf_id into insn[insn_idx].imm */
736 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_8
, BPF_REG_7
, offsetof(struct bpf_insn
, imm
)));
737 /* obtain fd in BPF_REG_9 */
738 emit(gen
, BPF_MOV64_REG(BPF_REG_9
, BPF_REG_7
));
739 emit(gen
, BPF_ALU64_IMM(BPF_RSH
, BPF_REG_9
, 32));
740 /* load fd_array slot pointer */
741 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_0
, BPF_PSEUDO_MAP_IDX_VALUE
,
742 0, 0, 0, blob_fd_array_off(gen
, btf_fd_idx
)));
743 /* store BTF fd in slot, 0 for vmlinux */
744 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_9
, 0));
745 /* jump to insn[insn_idx].off store if fd denotes module BTF */
746 emit(gen
, BPF_JMP_IMM(BPF_JNE
, BPF_REG_9
, 0, 2));
747 /* set the default value for off */
748 emit(gen
, BPF_ST_MEM(BPF_H
, BPF_REG_8
, offsetof(struct bpf_insn
, off
), 0));
749 /* skip BTF fd store for vmlinux BTF */
750 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0, 1));
751 /* store index into insn[insn_idx].off */
752 emit(gen
, BPF_ST_MEM(BPF_H
, BPF_REG_8
, offsetof(struct bpf_insn
, off
), btf_fd_idx
));
756 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_8
,
757 offsetof(struct bpf_insn
, imm
)));
758 emit(gen
, BPF_LDX_MEM(BPF_H
, BPF_REG_9
, BPF_REG_8
,
759 offsetof(struct bpf_insn
, off
)));
760 debug_regs(gen
, BPF_REG_7
, BPF_REG_9
, " func (%s:count=%d): imm: %%d, off: %%d",
761 relo
->name
, kdesc
->ref
);
762 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_0
, BPF_PSEUDO_MAP_IDX_VALUE
,
763 0, 0, 0, blob_fd_array_off(gen
, kdesc
->off
)));
764 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_9
, BPF_REG_0
, 0));
765 debug_regs(gen
, BPF_REG_9
, -1, " func (%s:count=%d): btf_fd",
766 relo
->name
, kdesc
->ref
);
769 static void emit_ksym_relo_log(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
,
774 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_8
,
775 offsetof(struct bpf_insn
, imm
)));
776 emit(gen
, BPF_LDX_MEM(BPF_H
, BPF_REG_9
, BPF_REG_8
, sizeof(struct bpf_insn
) +
777 offsetof(struct bpf_insn
, imm
)));
778 debug_regs(gen
, BPF_REG_7
, BPF_REG_9
, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
779 relo
->is_typeless
, relo
->is_weak
, relo
->name
, ref
);
780 emit(gen
, BPF_LDX_MEM(BPF_B
, BPF_REG_9
, BPF_REG_8
, offsetofend(struct bpf_insn
, code
)));
781 debug_regs(gen
, BPF_REG_9
, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
782 relo
->is_typeless
, relo
->is_weak
, relo
->name
, ref
);
786 * BPF_REG_8 - pointer to instruction
788 static void emit_relo_ksym_typeless(struct bpf_gen
*gen
,
789 struct ksym_relo_desc
*relo
, int insn
)
791 struct ksym_desc
*kdesc
;
793 kdesc
= get_ksym_desc(gen
, relo
);
796 /* try to copy from existing ldimm64 insn */
797 if (kdesc
->ref
> 1) {
798 move_blob2blob(gen
, insn
+ offsetof(struct bpf_insn
, imm
), 4,
799 kdesc
->insn
+ offsetof(struct bpf_insn
, imm
));
800 move_blob2blob(gen
, insn
+ sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
), 4,
801 kdesc
->insn
+ sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
));
804 /* remember insn offset, so we can copy ksym addr later */
806 /* skip typeless ksym_desc in fd closing loop in cleanup_relos */
807 kdesc
->typeless
= true;
808 emit_bpf_kallsyms_lookup_name(gen
, relo
);
809 emit(gen
, BPF_JMP_IMM(BPF_JEQ
, BPF_REG_7
, -ENOENT
, 1));
811 /* store lower half of addr into insn[insn_idx].imm */
812 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_8
, BPF_REG_9
, offsetof(struct bpf_insn
, imm
)));
813 /* store upper half of addr into insn[insn_idx + 1].imm */
814 emit(gen
, BPF_ALU64_IMM(BPF_RSH
, BPF_REG_9
, 32));
815 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_8
, BPF_REG_9
,
816 sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
)));
818 emit_ksym_relo_log(gen
, relo
, kdesc
->ref
);
821 static __u32
src_reg_mask(struct bpf_gen
*gen
)
823 #if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */
824 return gen
->swapped_endian
? 0xf0 : 0x0f;
825 #elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */
826 return gen
->swapped_endian
? 0x0f : 0xf0;
828 #error "Unsupported bit endianness, cannot proceed"
833 * BPF_REG_8 - pointer to instruction
835 static void emit_relo_ksym_btf(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
, int insn
)
837 struct ksym_desc
*kdesc
;
840 kdesc
= get_ksym_desc(gen
, relo
);
843 /* try to copy from existing ldimm64 insn */
844 if (kdesc
->ref
> 1) {
845 move_blob2blob(gen
, insn
+ sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
), 4,
846 kdesc
->insn
+ sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
));
847 move_blob2blob(gen
, insn
+ offsetof(struct bpf_insn
, imm
), 4,
848 kdesc
->insn
+ offsetof(struct bpf_insn
, imm
));
849 /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
850 * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
852 emit(gen
, BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 3));
855 /* remember insn offset, so we can copy BTF ID and FD later */
857 emit_bpf_find_by_name_kind(gen
, relo
);
860 /* jump to success case */
861 emit(gen
, BPF_JMP_IMM(BPF_JSGE
, BPF_REG_7
, 0, 3));
862 /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
863 emit(gen
, BPF_ST_MEM(BPF_W
, BPF_REG_8
, offsetof(struct bpf_insn
, imm
), 0));
864 emit(gen
, BPF_ST_MEM(BPF_W
, BPF_REG_8
, sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
), 0));
865 /* skip success case for ret < 0 */
866 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0, 4));
867 /* store btf_id into insn[insn_idx].imm */
868 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_8
, BPF_REG_7
, offsetof(struct bpf_insn
, imm
)));
869 /* store btf_obj_fd into insn[insn_idx + 1].imm */
870 emit(gen
, BPF_ALU64_IMM(BPF_RSH
, BPF_REG_7
, 32));
871 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_8
, BPF_REG_7
,
872 sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
)));
873 /* skip src_reg adjustment */
874 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0, 3));
876 /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
877 reg_mask
= src_reg_mask(gen
);
878 emit(gen
, BPF_LDX_MEM(BPF_B
, BPF_REG_9
, BPF_REG_8
, offsetofend(struct bpf_insn
, code
)));
879 emit(gen
, BPF_ALU32_IMM(BPF_AND
, BPF_REG_9
, reg_mask
));
880 emit(gen
, BPF_STX_MEM(BPF_B
, BPF_REG_8
, BPF_REG_9
, offsetofend(struct bpf_insn
, code
)));
882 emit_ksym_relo_log(gen
, relo
, kdesc
->ref
);
885 void bpf_gen__record_relo_core(struct bpf_gen
*gen
,
886 const struct bpf_core_relo
*core_relo
)
888 struct bpf_core_relo
*relos
;
890 relos
= libbpf_reallocarray(gen
->core_relos
, gen
->core_relo_cnt
+ 1, sizeof(*relos
));
892 gen
->error
= -ENOMEM
;
895 gen
->core_relos
= relos
;
896 relos
+= gen
->core_relo_cnt
;
897 memcpy(relos
, core_relo
, sizeof(*relos
));
898 gen
->core_relo_cnt
++;
901 static void emit_relo(struct bpf_gen
*gen
, struct ksym_relo_desc
*relo
, int insns
)
905 pr_debug("gen: emit_relo (%d): %s at %d %s\n",
906 relo
->kind
, relo
->name
, relo
->insn_idx
, relo
->is_ld64
? "ld64" : "call");
907 insn
= insns
+ sizeof(struct bpf_insn
) * relo
->insn_idx
;
908 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_8
, BPF_PSEUDO_MAP_IDX_VALUE
, 0, 0, 0, insn
));
910 if (relo
->is_typeless
)
911 emit_relo_ksym_typeless(gen
, relo
, insn
);
913 emit_relo_ksym_btf(gen
, relo
, insn
);
915 emit_relo_kfunc_btf(gen
, relo
, insn
);
919 static void emit_relos(struct bpf_gen
*gen
, int insns
)
923 for (i
= 0; i
< gen
->relo_cnt
; i
++)
924 emit_relo(gen
, gen
->relos
+ i
, insns
);
927 static void cleanup_core_relo(struct bpf_gen
*gen
)
929 if (!gen
->core_relo_cnt
)
931 free(gen
->core_relos
);
932 gen
->core_relo_cnt
= 0;
933 gen
->core_relos
= NULL
;
936 static void cleanup_relos(struct bpf_gen
*gen
, int insns
)
938 struct ksym_desc
*kdesc
;
941 for (i
= 0; i
< gen
->nr_ksyms
; i
++) {
942 kdesc
= &gen
->ksyms
[i
];
943 /* only close fds for typed ksyms and kfuncs */
944 if (kdesc
->is_ld64
&& !kdesc
->typeless
) {
945 /* close fd recorded in insn[insn_idx + 1].imm */
947 insn
+= sizeof(struct bpf_insn
) + offsetof(struct bpf_insn
, imm
);
948 emit_sys_close_blob(gen
, insn
);
949 } else if (!kdesc
->is_ld64
) {
950 emit_sys_close_blob(gen
, blob_fd_array_off(gen
, kdesc
->off
));
951 if (kdesc
->off
< MAX_FD_ARRAY_SZ
)
965 cleanup_core_relo(gen
);
968 /* Convert func, line, and core relo info blobs to target endianness */
969 static void info_blob_bswap(struct bpf_gen
*gen
, int func_info
, int line_info
,
970 int core_relos
, struct bpf_prog_load_opts
*load_attr
)
972 struct bpf_func_info
*fi
= gen
->data_start
+ func_info
;
973 struct bpf_line_info
*li
= gen
->data_start
+ line_info
;
974 struct bpf_core_relo
*cr
= gen
->data_start
+ core_relos
;
977 for (i
= 0; i
< load_attr
->func_info_cnt
; i
++)
978 bpf_func_info_bswap(fi
++);
980 for (i
= 0; i
< load_attr
->line_info_cnt
; i
++)
981 bpf_line_info_bswap(li
++);
983 for (i
= 0; i
< gen
->core_relo_cnt
; i
++)
984 bpf_core_relo_bswap(cr
++);
987 void bpf_gen__prog_load(struct bpf_gen
*gen
,
988 enum bpf_prog_type prog_type
, const char *prog_name
,
989 const char *license
, struct bpf_insn
*insns
, size_t insn_cnt
,
990 struct bpf_prog_load_opts
*load_attr
, int prog_idx
)
992 int func_info_tot_sz
= load_attr
->func_info_cnt
*
993 load_attr
->func_info_rec_size
;
994 int line_info_tot_sz
= load_attr
->line_info_cnt
*
995 load_attr
->line_info_rec_size
;
996 int core_relo_tot_sz
= gen
->core_relo_cnt
*
997 sizeof(struct bpf_core_relo
);
998 int prog_load_attr
, license_off
, insns_off
, func_info
, line_info
, core_relos
;
999 int attr_size
= offsetofend(union bpf_attr
, core_relo_rec_size
);
1000 union bpf_attr attr
;
1002 memset(&attr
, 0, attr_size
);
1003 /* add license string to blob of bytes */
1004 license_off
= add_data(gen
, license
, strlen(license
) + 1);
1005 /* add insns to blob of bytes */
1006 insns_off
= add_data(gen
, insns
, insn_cnt
* sizeof(struct bpf_insn
));
1007 pr_debug("gen: prog_load: prog_idx %d type %d insn off %d insns_cnt %zd license off %d\n",
1008 prog_idx
, prog_type
, insns_off
, insn_cnt
, license_off
);
1010 /* convert blob insns to target endianness */
1011 if (gen
->swapped_endian
) {
1012 struct bpf_insn
*insn
= gen
->data_start
+ insns_off
;
1015 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
1016 bpf_insn_bswap(insn
);
1019 attr
.prog_type
= tgt_endian(prog_type
);
1020 attr
.expected_attach_type
= tgt_endian(load_attr
->expected_attach_type
);
1021 attr
.attach_btf_id
= tgt_endian(load_attr
->attach_btf_id
);
1022 attr
.prog_ifindex
= tgt_endian(load_attr
->prog_ifindex
);
1023 attr
.kern_version
= 0;
1024 attr
.insn_cnt
= tgt_endian((__u32
)insn_cnt
);
1025 attr
.prog_flags
= tgt_endian(load_attr
->prog_flags
);
1027 attr
.func_info_rec_size
= tgt_endian(load_attr
->func_info_rec_size
);
1028 attr
.func_info_cnt
= tgt_endian(load_attr
->func_info_cnt
);
1029 func_info
= add_data(gen
, load_attr
->func_info
, func_info_tot_sz
);
1030 pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n",
1031 func_info
, load_attr
->func_info_cnt
,
1032 load_attr
->func_info_rec_size
);
1034 attr
.line_info_rec_size
= tgt_endian(load_attr
->line_info_rec_size
);
1035 attr
.line_info_cnt
= tgt_endian(load_attr
->line_info_cnt
);
1036 line_info
= add_data(gen
, load_attr
->line_info
, line_info_tot_sz
);
1037 pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n",
1038 line_info
, load_attr
->line_info_cnt
,
1039 load_attr
->line_info_rec_size
);
1041 attr
.core_relo_rec_size
= tgt_endian((__u32
)sizeof(struct bpf_core_relo
));
1042 attr
.core_relo_cnt
= tgt_endian(gen
->core_relo_cnt
);
1043 core_relos
= add_data(gen
, gen
->core_relos
, core_relo_tot_sz
);
1044 pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n",
1045 core_relos
, gen
->core_relo_cnt
,
1046 sizeof(struct bpf_core_relo
));
1048 /* convert all info blobs to target endianness */
1049 if (gen
->swapped_endian
)
1050 info_blob_bswap(gen
, func_info
, line_info
, core_relos
, load_attr
);
1052 libbpf_strlcpy(attr
.prog_name
, prog_name
, sizeof(attr
.prog_name
));
1053 prog_load_attr
= add_data(gen
, &attr
, attr_size
);
1054 pr_debug("gen: prog_load: attr: off %d size %d\n",
1055 prog_load_attr
, attr_size
);
1057 /* populate union bpf_attr with a pointer to license */
1058 emit_rel_store(gen
, attr_field(prog_load_attr
, license
), license_off
);
1060 /* populate union bpf_attr with a pointer to instructions */
1061 emit_rel_store(gen
, attr_field(prog_load_attr
, insns
), insns_off
);
1063 /* populate union bpf_attr with a pointer to func_info */
1064 emit_rel_store(gen
, attr_field(prog_load_attr
, func_info
), func_info
);
1066 /* populate union bpf_attr with a pointer to line_info */
1067 emit_rel_store(gen
, attr_field(prog_load_attr
, line_info
), line_info
);
1069 /* populate union bpf_attr with a pointer to core_relos */
1070 emit_rel_store(gen
, attr_field(prog_load_attr
, core_relos
), core_relos
);
1072 /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
1073 emit_rel_store(gen
, attr_field(prog_load_attr
, fd_array
), gen
->fd_array
);
1075 /* populate union bpf_attr with user provided log details */
1076 move_ctx2blob(gen
, attr_field(prog_load_attr
, log_level
), 4,
1077 offsetof(struct bpf_loader_ctx
, log_level
), false);
1078 move_ctx2blob(gen
, attr_field(prog_load_attr
, log_size
), 4,
1079 offsetof(struct bpf_loader_ctx
, log_size
), false);
1080 move_ctx2blob(gen
, attr_field(prog_load_attr
, log_buf
), 8,
1081 offsetof(struct bpf_loader_ctx
, log_buf
), false);
1082 /* populate union bpf_attr with btf_fd saved in the stack earlier */
1083 move_stack2blob(gen
, attr_field(prog_load_attr
, prog_btf_fd
), 4,
1085 if (gen
->attach_kind
) {
1086 emit_find_attach_target(gen
);
1087 /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
1088 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_0
, BPF_PSEUDO_MAP_IDX_VALUE
,
1089 0, 0, 0, prog_load_attr
));
1090 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_7
,
1091 offsetof(union bpf_attr
, attach_btf_id
)));
1092 emit(gen
, BPF_ALU64_IMM(BPF_RSH
, BPF_REG_7
, 32));
1093 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_7
,
1094 offsetof(union bpf_attr
, attach_btf_obj_fd
)));
1096 emit_relos(gen
, insns_off
);
1097 /* emit PROG_LOAD command */
1098 emit_sys_bpf(gen
, BPF_PROG_LOAD
, prog_load_attr
, attr_size
);
1099 debug_ret(gen
, "prog_load %s insn_cnt %d", attr
.prog_name
, attr
.insn_cnt
);
1100 /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
1101 cleanup_relos(gen
, insns_off
);
1102 if (gen
->attach_kind
) {
1103 emit_sys_close_blob(gen
,
1104 attr_field(prog_load_attr
, attach_btf_obj_fd
));
1105 gen
->attach_kind
= 0;
1107 emit_check_err(gen
);
1108 /* remember prog_fd in the stack, if successful */
1109 emit(gen
, BPF_STX_MEM(BPF_W
, BPF_REG_10
, BPF_REG_7
,
1110 stack_off(prog_fd
[gen
->nr_progs
])));
1114 void bpf_gen__map_update_elem(struct bpf_gen
*gen
, int map_idx
, void *pvalue
,
1117 int attr_size
= offsetofend(union bpf_attr
, flags
);
1118 int map_update_attr
, value
, key
;
1119 union bpf_attr attr
;
1122 memset(&attr
, 0, attr_size
);
1124 value
= add_data(gen
, pvalue
, value_size
);
1125 key
= add_data(gen
, &zero
, sizeof(zero
));
1127 /* if (map_desc[map_idx].initial_value) {
1128 * if (ctx->flags & BPF_SKEL_KERNEL)
1129 * bpf_probe_read_kernel(value, value_size, initial_value);
1131 * bpf_copy_from_user(value, value_size, initial_value);
1134 emit(gen
, BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
,
1135 sizeof(struct bpf_loader_ctx
) +
1136 sizeof(struct bpf_map_desc
) * map_idx
+
1137 offsetof(struct bpf_map_desc
, initial_value
)));
1138 emit(gen
, BPF_JMP_IMM(BPF_JEQ
, BPF_REG_3
, 0, 8));
1139 emit2(gen
, BPF_LD_IMM64_RAW_FULL(BPF_REG_1
, BPF_PSEUDO_MAP_IDX_VALUE
,
1141 emit(gen
, BPF_MOV64_IMM(BPF_REG_2
, value_size
));
1142 emit(gen
, BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_6
,
1143 offsetof(struct bpf_loader_ctx
, flags
)));
1144 emit(gen
, BPF_JMP_IMM(BPF_JSET
, BPF_REG_0
, BPF_SKEL_KERNEL
, 2));
1145 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_copy_from_user
));
1146 emit(gen
, BPF_JMP_IMM(BPF_JA
, 0, 0, 1));
1147 emit(gen
, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel
));
1149 map_update_attr
= add_data(gen
, &attr
, attr_size
);
1150 pr_debug("gen: map_update_elem: idx %d, value: off %d size %d, attr: off %d size %d\n",
1151 map_idx
, value
, value_size
, map_update_attr
, attr_size
);
1152 move_blob2blob(gen
, attr_field(map_update_attr
, map_fd
), 4,
1153 blob_fd_array_off(gen
, map_idx
));
1154 emit_rel_store(gen
, attr_field(map_update_attr
, key
), key
);
1155 emit_rel_store(gen
, attr_field(map_update_attr
, value
), value
);
1156 /* emit MAP_UPDATE_ELEM command */
1157 emit_sys_bpf(gen
, BPF_MAP_UPDATE_ELEM
, map_update_attr
, attr_size
);
1158 debug_ret(gen
, "update_elem idx %d value_size %d", map_idx
, value_size
);
1159 emit_check_err(gen
);
1162 void bpf_gen__populate_outer_map(struct bpf_gen
*gen
, int outer_map_idx
, int slot
,
1165 int attr_size
= offsetofend(union bpf_attr
, flags
);
1166 int map_update_attr
, key
;
1167 union bpf_attr attr
;
1170 memset(&attr
, 0, attr_size
);
1172 tgt_slot
= tgt_endian(slot
);
1173 key
= add_data(gen
, &tgt_slot
, sizeof(tgt_slot
));
1175 map_update_attr
= add_data(gen
, &attr
, attr_size
);
1176 pr_debug("gen: populate_outer_map: outer %d key %d inner %d, attr: off %d size %d\n",
1177 outer_map_idx
, slot
, inner_map_idx
, map_update_attr
, attr_size
);
1178 move_blob2blob(gen
, attr_field(map_update_attr
, map_fd
), 4,
1179 blob_fd_array_off(gen
, outer_map_idx
));
1180 emit_rel_store(gen
, attr_field(map_update_attr
, key
), key
);
1181 emit_rel_store(gen
, attr_field(map_update_attr
, value
),
1182 blob_fd_array_off(gen
, inner_map_idx
));
1184 /* emit MAP_UPDATE_ELEM command */
1185 emit_sys_bpf(gen
, BPF_MAP_UPDATE_ELEM
, map_update_attr
, attr_size
);
1186 debug_ret(gen
, "populate_outer_map outer %d key %d inner %d",
1187 outer_map_idx
, slot
, inner_map_idx
);
1188 emit_check_err(gen
);
1191 void bpf_gen__map_freeze(struct bpf_gen
*gen
, int map_idx
)
1193 int attr_size
= offsetofend(union bpf_attr
, map_fd
);
1194 int map_freeze_attr
;
1195 union bpf_attr attr
;
1197 memset(&attr
, 0, attr_size
);
1198 map_freeze_attr
= add_data(gen
, &attr
, attr_size
);
1199 pr_debug("gen: map_freeze: idx %d, attr: off %d size %d\n",
1200 map_idx
, map_freeze_attr
, attr_size
);
1201 move_blob2blob(gen
, attr_field(map_freeze_attr
, map_fd
), 4,
1202 blob_fd_array_off(gen
, map_idx
));
1203 /* emit MAP_FREEZE command */
1204 emit_sys_bpf(gen
, BPF_MAP_FREEZE
, map_freeze_attr
, attr_size
);
1205 debug_ret(gen
, "map_freeze");
1206 emit_check_err(gen
);