1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
15 #include <sys/ioctl.h>
16 #include <sys/types.h>
18 #include <sys/syscall.h>
20 #include <linux/err.h>
21 #include <linux/perf_event.h>
22 #include <linux/sizes.h>
26 #include <bpf/libbpf.h>
30 #include "xlated_dumper.h"
32 const char * const prog_type_name
[] = {
33 [BPF_PROG_TYPE_UNSPEC
] = "unspec",
34 [BPF_PROG_TYPE_SOCKET_FILTER
] = "socket_filter",
35 [BPF_PROG_TYPE_KPROBE
] = "kprobe",
36 [BPF_PROG_TYPE_SCHED_CLS
] = "sched_cls",
37 [BPF_PROG_TYPE_SCHED_ACT
] = "sched_act",
38 [BPF_PROG_TYPE_TRACEPOINT
] = "tracepoint",
39 [BPF_PROG_TYPE_XDP
] = "xdp",
40 [BPF_PROG_TYPE_PERF_EVENT
] = "perf_event",
41 [BPF_PROG_TYPE_CGROUP_SKB
] = "cgroup_skb",
42 [BPF_PROG_TYPE_CGROUP_SOCK
] = "cgroup_sock",
43 [BPF_PROG_TYPE_LWT_IN
] = "lwt_in",
44 [BPF_PROG_TYPE_LWT_OUT
] = "lwt_out",
45 [BPF_PROG_TYPE_LWT_XMIT
] = "lwt_xmit",
46 [BPF_PROG_TYPE_SOCK_OPS
] = "sock_ops",
47 [BPF_PROG_TYPE_SK_SKB
] = "sk_skb",
48 [BPF_PROG_TYPE_CGROUP_DEVICE
] = "cgroup_device",
49 [BPF_PROG_TYPE_SK_MSG
] = "sk_msg",
50 [BPF_PROG_TYPE_RAW_TRACEPOINT
] = "raw_tracepoint",
51 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR
] = "cgroup_sock_addr",
52 [BPF_PROG_TYPE_LWT_SEG6LOCAL
] = "lwt_seg6local",
53 [BPF_PROG_TYPE_LIRC_MODE2
] = "lirc_mode2",
54 [BPF_PROG_TYPE_SK_REUSEPORT
] = "sk_reuseport",
55 [BPF_PROG_TYPE_FLOW_DISSECTOR
] = "flow_dissector",
56 [BPF_PROG_TYPE_CGROUP_SYSCTL
] = "cgroup_sysctl",
57 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE
] = "raw_tracepoint_writable",
58 [BPF_PROG_TYPE_CGROUP_SOCKOPT
] = "cgroup_sockopt",
59 [BPF_PROG_TYPE_TRACING
] = "tracing",
60 [BPF_PROG_TYPE_STRUCT_OPS
] = "struct_ops",
61 [BPF_PROG_TYPE_EXT
] = "ext",
62 [BPF_PROG_TYPE_LSM
] = "lsm",
63 [BPF_PROG_TYPE_SK_LOOKUP
] = "sk_lookup",
66 const size_t prog_type_name_size
= ARRAY_SIZE(prog_type_name
);
73 static const char * const attach_type_strings
[] = {
74 [BPF_SK_SKB_STREAM_PARSER
] = "stream_parser",
75 [BPF_SK_SKB_STREAM_VERDICT
] = "stream_verdict",
76 [BPF_SK_MSG_VERDICT
] = "msg_verdict",
77 [BPF_FLOW_DISSECTOR
] = "flow_dissector",
78 [__MAX_BPF_ATTACH_TYPE
] = NULL
,
81 static enum bpf_attach_type
parse_attach_type(const char *str
)
83 enum bpf_attach_type type
;
85 for (type
= 0; type
< __MAX_BPF_ATTACH_TYPE
; type
++) {
86 if (attach_type_strings
[type
] &&
87 is_prefix(str
, attach_type_strings
[type
]))
91 return __MAX_BPF_ATTACH_TYPE
;
94 static void print_boot_time(__u64 nsecs
, char *buf
, unsigned int size
)
96 struct timespec real_time_ts
, boot_time_ts
;
97 time_t wallclock_secs
;
102 if (clock_gettime(CLOCK_REALTIME
, &real_time_ts
) ||
103 clock_gettime(CLOCK_BOOTTIME
, &boot_time_ts
)) {
104 perror("Can't read clocks");
105 snprintf(buf
, size
, "%llu", nsecs
/ 1000000000);
109 wallclock_secs
= (real_time_ts
.tv_sec
- boot_time_ts
.tv_sec
) +
110 (real_time_ts
.tv_nsec
- boot_time_ts
.tv_nsec
+ nsecs
) /
114 if (!localtime_r(&wallclock_secs
, &load_tm
)) {
115 snprintf(buf
, size
, "%llu", nsecs
/ 1000000000);
120 strftime(buf
, size
, "%s", &load_tm
);
122 strftime(buf
, size
, "%FT%T%z", &load_tm
);
125 static void show_prog_maps(int fd
, __u32 num_maps
)
127 struct bpf_prog_info info
= {};
128 __u32 len
= sizeof(info
);
129 __u32 map_ids
[num_maps
];
133 info
.nr_map_ids
= num_maps
;
134 info
.map_ids
= ptr_to_u64(map_ids
);
136 err
= bpf_obj_get_info_by_fd(fd
, &info
, &len
);
137 if (err
|| !info
.nr_map_ids
)
141 jsonw_name(json_wtr
, "map_ids");
142 jsonw_start_array(json_wtr
);
143 for (i
= 0; i
< info
.nr_map_ids
; i
++)
144 jsonw_uint(json_wtr
, map_ids
[i
]);
145 jsonw_end_array(json_wtr
);
148 for (i
= 0; i
< info
.nr_map_ids
; i
++)
149 printf("%u%s", map_ids
[i
],
150 i
== info
.nr_map_ids
- 1 ? "" : ",");
154 static void print_prog_header_json(struct bpf_prog_info
*info
)
156 jsonw_uint_field(json_wtr
, "id", info
->id
);
157 if (info
->type
< ARRAY_SIZE(prog_type_name
))
158 jsonw_string_field(json_wtr
, "type",
159 prog_type_name
[info
->type
]);
161 jsonw_uint_field(json_wtr
, "type", info
->type
);
164 jsonw_string_field(json_wtr
, "name", info
->name
);
166 jsonw_name(json_wtr
, "tag");
167 jsonw_printf(json_wtr
, "\"" BPF_TAG_FMT
"\"",
168 info
->tag
[0], info
->tag
[1], info
->tag
[2], info
->tag
[3],
169 info
->tag
[4], info
->tag
[5], info
->tag
[6], info
->tag
[7]);
171 jsonw_bool_field(json_wtr
, "gpl_compatible", info
->gpl_compatible
);
172 if (info
->run_time_ns
) {
173 jsonw_uint_field(json_wtr
, "run_time_ns", info
->run_time_ns
);
174 jsonw_uint_field(json_wtr
, "run_cnt", info
->run_cnt
);
178 static void print_prog_json(struct bpf_prog_info
*info
, int fd
)
182 jsonw_start_object(json_wtr
);
183 print_prog_header_json(info
);
184 print_dev_json(info
->ifindex
, info
->netns_dev
, info
->netns_ino
);
186 if (info
->load_time
) {
189 print_boot_time(info
->load_time
, buf
, sizeof(buf
));
191 /* Piggy back on load_time, since 0 uid is a valid one */
192 jsonw_name(json_wtr
, "loaded_at");
193 jsonw_printf(json_wtr
, "%s", buf
);
194 jsonw_uint_field(json_wtr
, "uid", info
->created_by_uid
);
197 jsonw_uint_field(json_wtr
, "bytes_xlated", info
->xlated_prog_len
);
199 if (info
->jited_prog_len
) {
200 jsonw_bool_field(json_wtr
, "jited", true);
201 jsonw_uint_field(json_wtr
, "bytes_jited", info
->jited_prog_len
);
203 jsonw_bool_field(json_wtr
, "jited", false);
206 memlock
= get_fdinfo(fd
, "memlock");
208 jsonw_int_field(json_wtr
, "bytes_memlock", atoi(memlock
));
211 if (info
->nr_map_ids
)
212 show_prog_maps(fd
, info
->nr_map_ids
);
215 jsonw_int_field(json_wtr
, "btf_id", info
->btf_id
);
217 if (!hash_empty(prog_table
.table
)) {
218 struct pinned_obj
*obj
;
220 jsonw_name(json_wtr
, "pinned");
221 jsonw_start_array(json_wtr
);
222 hash_for_each_possible(prog_table
.table
, obj
, hash
, info
->id
) {
223 if (obj
->id
== info
->id
)
224 jsonw_string(json_wtr
, obj
->path
);
226 jsonw_end_array(json_wtr
);
229 emit_obj_refs_json(&refs_table
, info
->id
, json_wtr
);
231 jsonw_end_object(json_wtr
);
234 static void print_prog_header_plain(struct bpf_prog_info
*info
)
236 printf("%u: ", info
->id
);
237 if (info
->type
< ARRAY_SIZE(prog_type_name
))
238 printf("%s ", prog_type_name
[info
->type
]);
240 printf("type %u ", info
->type
);
243 printf("name %s ", info
->name
);
246 fprint_hex(stdout
, info
->tag
, BPF_TAG_SIZE
, "");
247 print_dev_plain(info
->ifindex
, info
->netns_dev
, info
->netns_ino
);
248 printf("%s", info
->gpl_compatible
? " gpl" : "");
249 if (info
->run_time_ns
)
250 printf(" run_time_ns %lld run_cnt %lld",
251 info
->run_time_ns
, info
->run_cnt
);
255 static void print_prog_plain(struct bpf_prog_info
*info
, int fd
)
259 print_prog_header_plain(info
);
261 if (info
->load_time
) {
264 print_boot_time(info
->load_time
, buf
, sizeof(buf
));
266 /* Piggy back on load_time, since 0 uid is a valid one */
267 printf("\tloaded_at %s uid %u\n", buf
, info
->created_by_uid
);
270 printf("\txlated %uB", info
->xlated_prog_len
);
272 if (info
->jited_prog_len
)
273 printf(" jited %uB", info
->jited_prog_len
);
275 printf(" not jited");
277 memlock
= get_fdinfo(fd
, "memlock");
279 printf(" memlock %sB", memlock
);
282 if (info
->nr_map_ids
)
283 show_prog_maps(fd
, info
->nr_map_ids
);
285 if (!hash_empty(prog_table
.table
)) {
286 struct pinned_obj
*obj
;
288 hash_for_each_possible(prog_table
.table
, obj
, hash
, info
->id
) {
289 if (obj
->id
== info
->id
)
290 printf("\n\tpinned %s", obj
->path
);
295 printf("\n\tbtf_id %d", info
->btf_id
);
297 emit_obj_refs_plain(&refs_table
, info
->id
, "\n\tpids ");
302 static int show_prog(int fd
)
304 struct bpf_prog_info info
= {};
305 __u32 len
= sizeof(info
);
308 err
= bpf_obj_get_info_by_fd(fd
, &info
, &len
);
310 p_err("can't get prog info: %s", strerror(errno
));
315 print_prog_json(&info
, fd
);
317 print_prog_plain(&info
, fd
);
322 static int do_show_subset(int argc
, char **argv
)
328 fds
= malloc(sizeof(int));
330 p_err("mem alloc failed");
333 nb_fds
= prog_parse_fds(&argc
, &argv
, &fds
);
337 if (json_output
&& nb_fds
> 1)
338 jsonw_start_array(json_wtr
); /* root array */
339 for (i
= 0; i
< nb_fds
; i
++) {
340 err
= show_prog(fds
[i
]);
342 for (; i
< nb_fds
; i
++)
348 if (json_output
&& nb_fds
> 1)
349 jsonw_end_array(json_wtr
); /* root array */
356 static int do_show(int argc
, char **argv
)
363 build_pinned_obj_table(&prog_table
, BPF_OBJ_PROG
);
364 build_obj_refs_table(&refs_table
, BPF_OBJ_PROG
);
367 return do_show_subset(argc
, argv
);
373 jsonw_start_array(json_wtr
);
375 err
= bpf_prog_get_next_id(id
, &id
);
377 if (errno
== ENOENT
) {
381 p_err("can't get next program: %s%s", strerror(errno
),
382 errno
== EINVAL
? " -- kernel too old?" : "");
387 fd
= bpf_prog_get_fd_by_id(id
);
391 p_err("can't get prog by id (%u): %s",
392 id
, strerror(errno
));
404 jsonw_end_array(json_wtr
);
406 delete_obj_refs_table(&refs_table
);
412 prog_dump(struct bpf_prog_info
*info
, enum dump_mode mode
,
413 char *filepath
, bool opcodes
, bool visual
, bool linum
)
415 struct bpf_prog_linfo
*prog_linfo
= NULL
;
416 const char *disasm_opt
= NULL
;
417 struct dump_data dd
= {};
418 void *func_info
= NULL
;
419 struct btf
*btf
= NULL
;
426 if (mode
== DUMP_JITED
) {
427 if (info
->jited_prog_len
== 0 || !info
->jited_prog_insns
) {
428 p_info("no instructions returned");
431 buf
= (unsigned char *)(info
->jited_prog_insns
);
432 member_len
= info
->jited_prog_len
;
433 } else { /* DUMP_XLATED */
434 if (info
->xlated_prog_len
== 0 || !info
->xlated_prog_insns
) {
435 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
438 buf
= (unsigned char *)info
->xlated_prog_insns
;
439 member_len
= info
->xlated_prog_len
;
442 if (info
->btf_id
&& btf__get_from_id(info
->btf_id
, &btf
)) {
443 p_err("failed to get btf");
447 func_info
= (void *)info
->func_info
;
449 if (info
->nr_line_info
) {
450 prog_linfo
= bpf_prog_linfo__new(info
);
452 p_info("error in processing bpf_line_info. continue without it.");
456 fd
= open(filepath
, O_WRONLY
| O_CREAT
| O_TRUNC
, 0600);
458 p_err("can't open file %s: %s", filepath
,
463 n
= write(fd
, buf
, member_len
);
465 if (n
!= member_len
) {
466 p_err("error writing output file: %s",
467 n
< 0 ? strerror(errno
) : "short write");
472 jsonw_null(json_wtr
);
473 } else if (mode
== DUMP_JITED
) {
474 const char *name
= NULL
;
477 name
= ifindex_to_bfd_params(info
->ifindex
,
485 if (info
->nr_jited_func_lens
&& info
->jited_func_lens
) {
486 struct kernel_sym
*sym
= NULL
;
487 struct bpf_func_info
*record
;
488 char sym_name
[SYM_MAX_NAME
];
489 unsigned char *img
= buf
;
493 if (info
->nr_jited_ksyms
) {
494 kernel_syms_load(&dd
);
495 ksyms
= (__u64
*) info
->jited_ksyms
;
499 jsonw_start_array(json_wtr
);
501 lens
= (__u32
*) info
->jited_func_lens
;
502 for (i
= 0; i
< info
->nr_jited_func_lens
; i
++) {
504 sym
= kernel_syms_search(&dd
, ksyms
[i
]);
506 sprintf(sym_name
, "%s", sym
->name
);
508 sprintf(sym_name
, "0x%016llx", ksyms
[i
]);
510 strcpy(sym_name
, "unknown");
514 record
= func_info
+ i
* info
->func_info_rec_size
;
515 btf_dumper_type_only(btf
, record
->type_id
,
521 jsonw_start_object(json_wtr
);
522 if (func_info
&& func_sig
[0] != '\0') {
523 jsonw_name(json_wtr
, "proto");
524 jsonw_string(json_wtr
, func_sig
);
526 jsonw_name(json_wtr
, "name");
527 jsonw_string(json_wtr
, sym_name
);
528 jsonw_name(json_wtr
, "insns");
530 if (func_info
&& func_sig
[0] != '\0')
531 printf("%s:\n", func_sig
);
532 printf("%s:\n", sym_name
);
535 disasm_print_insn(img
, lens
[i
], opcodes
,
536 name
, disasm_opt
, btf
,
537 prog_linfo
, ksyms
[i
], i
,
543 jsonw_end_object(json_wtr
);
549 jsonw_end_array(json_wtr
);
551 disasm_print_insn(buf
, member_len
, opcodes
, name
,
552 disasm_opt
, btf
, NULL
, 0, 0, false);
556 jsonw_null(json_wtr
);
558 dump_xlated_cfg(buf
, member_len
);
560 kernel_syms_load(&dd
);
561 dd
.nr_jited_ksyms
= info
->nr_jited_ksyms
;
562 dd
.jited_ksyms
= (__u64
*) info
->jited_ksyms
;
564 dd
.func_info
= func_info
;
565 dd
.finfo_rec_size
= info
->func_info_rec_size
;
566 dd
.prog_linfo
= prog_linfo
;
569 dump_xlated_json(&dd
, buf
, member_len
, opcodes
,
572 dump_xlated_plain(&dd
, buf
, member_len
, opcodes
,
574 kernel_syms_destroy(&dd
);
580 static int do_dump(int argc
, char **argv
)
582 struct bpf_prog_info_linear
*info_linear
;
583 char *filepath
= NULL
;
584 bool opcodes
= false;
593 if (is_prefix(*argv
, "jited")) {
597 } else if (is_prefix(*argv
, "xlated")) {
600 p_err("expected 'xlated' or 'jited', got: %s", *argv
);
608 fds
= malloc(sizeof(int));
610 p_err("mem alloc failed");
613 nb_fds
= prog_parse_fds(&argc
, &argv
, &fds
);
617 if (is_prefix(*argv
, "file")) {
620 p_err("expected file path");
624 p_err("several programs matched");
630 } else if (is_prefix(*argv
, "opcodes")) {
633 } else if (is_prefix(*argv
, "visual")) {
635 p_err("several programs matched");
641 } else if (is_prefix(*argv
, "linum")) {
651 if (mode
== DUMP_JITED
)
652 arrays
= 1UL << BPF_PROG_INFO_JITED_INSNS
;
654 arrays
= 1UL << BPF_PROG_INFO_XLATED_INSNS
;
656 arrays
|= 1UL << BPF_PROG_INFO_JITED_KSYMS
;
657 arrays
|= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS
;
658 arrays
|= 1UL << BPF_PROG_INFO_FUNC_INFO
;
659 arrays
|= 1UL << BPF_PROG_INFO_LINE_INFO
;
660 arrays
|= 1UL << BPF_PROG_INFO_JITED_LINE_INFO
;
662 if (json_output
&& nb_fds
> 1)
663 jsonw_start_array(json_wtr
); /* root array */
664 for (i
= 0; i
< nb_fds
; i
++) {
665 info_linear
= bpf_program__get_prog_info_linear(fds
[i
], arrays
);
666 if (IS_ERR_OR_NULL(info_linear
)) {
667 p_err("can't get prog info: %s", strerror(errno
));
671 if (json_output
&& nb_fds
> 1) {
672 jsonw_start_object(json_wtr
); /* prog object */
673 print_prog_header_json(&info_linear
->info
);
674 jsonw_name(json_wtr
, "insns");
675 } else if (nb_fds
> 1) {
676 print_prog_header_plain(&info_linear
->info
);
679 err
= prog_dump(&info_linear
->info
, mode
, filepath
, opcodes
,
682 if (json_output
&& nb_fds
> 1)
683 jsonw_end_object(json_wtr
); /* prog object */
684 else if (i
!= nb_fds
- 1 && nb_fds
> 1)
692 if (json_output
&& nb_fds
> 1)
693 jsonw_end_array(json_wtr
); /* root array */
696 for (; i
< nb_fds
; i
++)
703 static int do_pin(int argc
, char **argv
)
707 err
= do_pin_any(argc
, argv
, prog_parse_fd
);
708 if (!err
&& json_output
)
709 jsonw_null(json_wtr
);
719 static int map_replace_compar(const void *p1
, const void *p2
)
721 const struct map_replace
*a
= p1
, *b
= p2
;
723 return a
->idx
- b
->idx
;
726 static int parse_attach_detach_args(int argc
, char **argv
, int *progfd
,
727 enum bpf_attach_type
*attach_type
,
733 *progfd
= prog_parse_fd(&argc
, &argv
);
737 *attach_type
= parse_attach_type(*argv
);
738 if (*attach_type
== __MAX_BPF_ATTACH_TYPE
) {
739 p_err("invalid attach/detach type");
743 if (*attach_type
== BPF_FLOW_DISSECTOR
) {
752 *mapfd
= map_parse_fd(&argc
, &argv
);
759 static int do_attach(int argc
, char **argv
)
761 enum bpf_attach_type attach_type
;
765 err
= parse_attach_detach_args(argc
, argv
,
766 &progfd
, &attach_type
, &mapfd
);
770 err
= bpf_prog_attach(progfd
, mapfd
, attach_type
, 0);
772 p_err("failed prog attach to map");
777 jsonw_null(json_wtr
);
781 static int do_detach(int argc
, char **argv
)
783 enum bpf_attach_type attach_type
;
787 err
= parse_attach_detach_args(argc
, argv
,
788 &progfd
, &attach_type
, &mapfd
);
792 err
= bpf_prog_detach2(progfd
, mapfd
, attach_type
);
794 p_err("failed prog detach from map");
799 jsonw_null(json_wtr
);
803 static int check_single_stdin(char *file_data_in
, char *file_ctx_in
)
805 if (file_data_in
&& file_ctx_in
&&
806 !strcmp(file_data_in
, "-") && !strcmp(file_ctx_in
, "-")) {
807 p_err("cannot use standard input for both data_in and ctx_in");
814 static int get_run_data(const char *fname
, void **data_ptr
, unsigned int *size
)
816 size_t block_size
= 256;
817 size_t buf_size
= block_size
;
828 if (!strcmp(fname
, "-"))
831 f
= fopen(fname
, "r");
833 p_err("failed to open %s: %s", fname
, strerror(errno
));
837 *data_ptr
= malloc(block_size
);
839 p_err("failed to allocate memory for data_in/ctx_in: %s",
844 while ((nb_read
+= fread(*data_ptr
+ nb_read
, 1, block_size
, f
))) {
848 p_err("failed to read data_in/ctx_in from %s: %s",
849 fname
, strerror(errno
));
852 if (nb_read
> buf_size
- block_size
) {
853 if (buf_size
== UINT32_MAX
) {
854 p_err("data_in/ctx_in is too long (max: %d)",
858 /* No space for fread()-ing next chunk; realloc() */
860 tmp
= realloc(*data_ptr
, buf_size
);
862 p_err("failed to reallocate data_in/ctx_in: %s",
884 static void hex_print(void *data
, unsigned int size
, FILE *f
)
889 for (i
= 0; i
< size
; i
+= 16) {
891 fprintf(f
, "%07zx\t", i
);
893 /* Hexadecimal values */
894 for (j
= i
; j
< i
+ 16 && j
< size
; j
++)
895 fprintf(f
, "%02x%s", *(uint8_t *)(data
+ j
),
897 for (; j
< i
+ 16; j
++)
898 fprintf(f
, " %s", j
% 2 ? " " : "");
900 /* ASCII values (if relevant), '.' otherwise */
902 for (j
= i
; j
< i
+ 16 && j
< size
; j
++) {
903 c
= *(char *)(data
+ j
);
904 if (c
< ' ' || c
> '~')
906 fprintf(f
, "%c%s", c
, j
== i
+ 7 ? " " : "");
914 print_run_output(void *data
, unsigned int size
, const char *fname
,
915 const char *json_key
)
923 if (!strcmp(fname
, "-")) {
926 jsonw_name(json_wtr
, json_key
);
927 print_data_json(data
, size
);
929 hex_print(data
, size
, f
);
934 f
= fopen(fname
, "w");
936 p_err("failed to open %s: %s", fname
, strerror(errno
));
940 nb_written
= fwrite(data
, 1, size
, f
);
942 if (nb_written
!= size
) {
943 p_err("failed to write output data/ctx: %s", strerror(errno
));
950 static int alloc_run_data(void **data_ptr
, unsigned int size_out
)
952 *data_ptr
= calloc(size_out
, 1);
954 p_err("failed to allocate memory for output data/ctx: %s",
962 static int do_run(int argc
, char **argv
)
964 char *data_fname_in
= NULL
, *data_fname_out
= NULL
;
965 char *ctx_fname_in
= NULL
, *ctx_fname_out
= NULL
;
966 struct bpf_prog_test_run_attr test_attr
= {0};
967 const unsigned int default_size
= SZ_32K
;
968 void *data_in
= NULL
, *data_out
= NULL
;
969 void *ctx_in
= NULL
, *ctx_out
= NULL
;
970 unsigned int repeat
= 1;
976 fd
= prog_parse_fd(&argc
, &argv
);
981 if (detect_common_prefix(*argv
, "data_in", "data_out",
982 "data_size_out", NULL
))
984 if (detect_common_prefix(*argv
, "ctx_in", "ctx_out",
985 "ctx_size_out", NULL
))
988 if (is_prefix(*argv
, "data_in")) {
993 data_fname_in
= GET_ARG();
994 if (check_single_stdin(data_fname_in
, ctx_fname_in
))
996 } else if (is_prefix(*argv
, "data_out")) {
1001 data_fname_out
= GET_ARG();
1002 } else if (is_prefix(*argv
, "data_size_out")) {
1009 test_attr
.data_size_out
= strtoul(*argv
, &endptr
, 0);
1011 p_err("can't parse %s as output data size",
1016 } else if (is_prefix(*argv
, "ctx_in")) {
1021 ctx_fname_in
= GET_ARG();
1022 if (check_single_stdin(data_fname_in
, ctx_fname_in
))
1024 } else if (is_prefix(*argv
, "ctx_out")) {
1029 ctx_fname_out
= GET_ARG();
1030 } else if (is_prefix(*argv
, "ctx_size_out")) {
1037 test_attr
.ctx_size_out
= strtoul(*argv
, &endptr
, 0);
1039 p_err("can't parse %s as output context size",
1044 } else if (is_prefix(*argv
, "repeat")) {
1051 repeat
= strtoul(*argv
, &endptr
, 0);
1053 p_err("can't parse %s as repeat number",
1059 p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
1065 err
= get_run_data(data_fname_in
, &data_in
, &test_attr
.data_size_in
);
1070 if (!test_attr
.data_size_out
)
1071 test_attr
.data_size_out
= default_size
;
1072 err
= alloc_run_data(&data_out
, test_attr
.data_size_out
);
1077 err
= get_run_data(ctx_fname_in
, &ctx_in
, &test_attr
.ctx_size_in
);
1082 if (!test_attr
.ctx_size_out
)
1083 test_attr
.ctx_size_out
= default_size
;
1084 err
= alloc_run_data(&ctx_out
, test_attr
.ctx_size_out
);
1089 test_attr
.prog_fd
= fd
;
1090 test_attr
.repeat
= repeat
;
1091 test_attr
.data_in
= data_in
;
1092 test_attr
.data_out
= data_out
;
1093 test_attr
.ctx_in
= ctx_in
;
1094 test_attr
.ctx_out
= ctx_out
;
1096 err
= bpf_prog_test_run_xattr(&test_attr
);
1098 p_err("failed to run program: %s", strerror(errno
));
1105 jsonw_start_object(json_wtr
); /* root */
1107 /* Do not exit on errors occurring when printing output data/context,
1108 * we still want to print return value and duration for program run.
1110 if (test_attr
.data_size_out
)
1111 err
+= print_run_output(test_attr
.data_out
,
1112 test_attr
.data_size_out
,
1113 data_fname_out
, "data_out");
1114 if (test_attr
.ctx_size_out
)
1115 err
+= print_run_output(test_attr
.ctx_out
,
1116 test_attr
.ctx_size_out
,
1117 ctx_fname_out
, "ctx_out");
1120 jsonw_uint_field(json_wtr
, "retval", test_attr
.retval
);
1121 jsonw_uint_field(json_wtr
, "duration", test_attr
.duration
);
1122 jsonw_end_object(json_wtr
); /* root */
1124 fprintf(stdout
, "Return value: %u, duration%s: %uns\n",
1126 repeat
> 1 ? " (average)" : "", test_attr
.duration
);
1142 get_prog_type_by_name(const char *name
, enum bpf_prog_type
*prog_type
,
1143 enum bpf_attach_type
*expected_attach_type
)
1145 libbpf_print_fn_t print_backup
;
1148 ret
= libbpf_prog_type_by_name(name
, prog_type
, expected_attach_type
);
1152 /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
1153 print_backup
= libbpf_set_print(print_all_levels
);
1154 ret
= libbpf_prog_type_by_name(name
, prog_type
, expected_attach_type
);
1155 libbpf_set_print(print_backup
);
1160 static int load_with_options(int argc
, char **argv
, bool first_prog_only
)
1162 enum bpf_prog_type common_prog_type
= BPF_PROG_TYPE_UNSPEC
;
1163 DECLARE_LIBBPF_OPTS(bpf_object_open_opts
, open_opts
,
1164 .relaxed_maps
= relaxed_maps
,
1166 struct bpf_object_load_attr load_attr
= { 0 };
1167 enum bpf_attach_type expected_attach_type
;
1168 struct map_replace
*map_replace
= NULL
;
1169 struct bpf_program
*prog
= NULL
, *pos
;
1170 unsigned int old_map_fds
= 0;
1171 const char *pinmaps
= NULL
;
1172 struct bpf_object
*obj
;
1173 struct bpf_map
*map
;
1174 const char *pinfile
;
1184 pinfile
= GET_ARG();
1187 if (is_prefix(*argv
, "type")) {
1192 if (common_prog_type
!= BPF_PROG_TYPE_UNSPEC
) {
1193 p_err("program type already specified");
1194 goto err_free_reuse_maps
;
1197 goto err_free_reuse_maps
;
1199 /* Put a '/' at the end of type to appease libbpf */
1200 type
= malloc(strlen(*argv
) + 2);
1202 p_err("mem alloc failed");
1203 goto err_free_reuse_maps
;
1206 strcat(type
, *argv
);
1209 err
= get_prog_type_by_name(type
, &common_prog_type
,
1210 &expected_attach_type
);
1213 goto err_free_reuse_maps
;
1216 } else if (is_prefix(*argv
, "map")) {
1217 void *new_map_replace
;
1218 char *endptr
, *name
;
1224 goto err_free_reuse_maps
;
1226 if (is_prefix(*argv
, "idx")) {
1229 idx
= strtoul(*argv
, &endptr
, 0);
1231 p_err("can't parse %s as IDX", *argv
);
1232 goto err_free_reuse_maps
;
1235 } else if (is_prefix(*argv
, "name")) {
1241 p_err("expected 'idx' or 'name', got: '%s'?",
1243 goto err_free_reuse_maps
;
1247 fd
= map_parse_fd(&argc
, &argv
);
1249 goto err_free_reuse_maps
;
1251 new_map_replace
= reallocarray(map_replace
,
1253 sizeof(*map_replace
));
1254 if (!new_map_replace
) {
1255 p_err("mem alloc failed");
1256 goto err_free_reuse_maps
;
1258 map_replace
= new_map_replace
;
1260 map_replace
[old_map_fds
].idx
= idx
;
1261 map_replace
[old_map_fds
].name
= name
;
1262 map_replace
[old_map_fds
].fd
= fd
;
1264 } else if (is_prefix(*argv
, "dev")) {
1268 p_err("offload device already specified");
1269 goto err_free_reuse_maps
;
1272 goto err_free_reuse_maps
;
1274 ifindex
= if_nametoindex(*argv
);
1276 p_err("unrecognized netdevice '%s': %s",
1277 *argv
, strerror(errno
));
1278 goto err_free_reuse_maps
;
1281 } else if (is_prefix(*argv
, "pinmaps")) {
1285 goto err_free_reuse_maps
;
1287 pinmaps
= GET_ARG();
1289 p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
1291 goto err_free_reuse_maps
;
1297 obj
= bpf_object__open_file(file
, &open_opts
);
1298 if (IS_ERR_OR_NULL(obj
)) {
1299 p_err("failed to open object file");
1300 goto err_free_reuse_maps
;
1303 bpf_object__for_each_program(pos
, obj
) {
1304 enum bpf_prog_type prog_type
= common_prog_type
;
1306 if (prog_type
== BPF_PROG_TYPE_UNSPEC
) {
1307 const char *sec_name
= bpf_program__title(pos
, false);
1309 err
= get_prog_type_by_name(sec_name
, &prog_type
,
1310 &expected_attach_type
);
1315 bpf_program__set_ifindex(pos
, ifindex
);
1316 bpf_program__set_type(pos
, prog_type
);
1317 bpf_program__set_expected_attach_type(pos
, expected_attach_type
);
1320 qsort(map_replace
, old_map_fds
, sizeof(*map_replace
),
1321 map_replace_compar
);
1323 /* After the sort maps by name will be first on the list, because they
1324 * have idx == -1. Resolve them.
1327 while (j
< old_map_fds
&& map_replace
[j
].name
) {
1329 bpf_object__for_each_map(map
, obj
) {
1330 if (!strcmp(bpf_map__name(map
), map_replace
[j
].name
)) {
1331 map_replace
[j
].idx
= i
;
1336 if (map_replace
[j
].idx
== -1) {
1337 p_err("unable to find map '%s'", map_replace
[j
].name
);
1342 /* Resort if any names were resolved */
1344 qsort(map_replace
, old_map_fds
, sizeof(*map_replace
),
1345 map_replace_compar
);
1347 /* Set ifindex and name reuse */
1350 bpf_object__for_each_map(map
, obj
) {
1351 if (!bpf_map__is_offload_neutral(map
))
1352 bpf_map__set_ifindex(map
, ifindex
);
1354 if (j
< old_map_fds
&& idx
== map_replace
[j
].idx
) {
1355 err
= bpf_map__reuse_fd(map
, map_replace
[j
++].fd
);
1357 p_err("unable to set up map reuse: %d", err
);
1361 /* Next reuse wants to apply to the same map */
1362 if (j
< old_map_fds
&& map_replace
[j
].idx
== idx
) {
1363 p_err("replacement for map idx %d specified more than once",
1371 if (j
< old_map_fds
) {
1372 p_err("map idx '%d' not used", map_replace
[j
].idx
);
1376 load_attr
.obj
= obj
;
1378 /* log_level1 + log_level2 + stats, but not stable UAPI */
1379 load_attr
.log_level
= 1 + 2 + 4;
1381 err
= bpf_object__load_xattr(&load_attr
);
1383 p_err("failed to load object file");
1387 err
= mount_bpffs_for_pin(pinfile
);
1391 if (first_prog_only
) {
1392 prog
= bpf_program__next(NULL
, obj
);
1394 p_err("object file doesn't contain any bpf program");
1398 err
= bpf_obj_pin(bpf_program__fd(prog
), pinfile
);
1400 p_err("failed to pin program %s",
1401 bpf_program__title(prog
, false));
1405 err
= bpf_object__pin_programs(obj
, pinfile
);
1407 p_err("failed to pin all programs");
1413 err
= bpf_object__pin_maps(obj
, pinmaps
);
1415 p_err("failed to pin all maps");
1421 jsonw_null(json_wtr
);
1423 bpf_object__close(obj
);
1424 for (i
= 0; i
< old_map_fds
; i
++)
1425 close(map_replace
[i
].fd
);
1431 if (first_prog_only
)
1434 bpf_object__unpin_programs(obj
, pinfile
);
1436 bpf_object__close(obj
);
1437 err_free_reuse_maps
:
1438 for (i
= 0; i
< old_map_fds
; i
++)
1439 close(map_replace
[i
].fd
);
1444 static int do_load(int argc
, char **argv
)
1446 return load_with_options(argc
, argv
, true);
1449 static int do_loadall(int argc
, char **argv
)
1451 return load_with_options(argc
, argv
, false);
1454 #ifdef BPFTOOL_WITHOUT_SKELETONS
1456 static int do_profile(int argc
, char **argv
)
1458 p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
1462 #else /* BPFTOOL_WITHOUT_SKELETONS */
1464 #include "profiler.skel.h"
1466 struct profile_metric
{
1468 struct bpf_perf_event_value val
;
1469 struct perf_event_attr attr
;
1472 /* calculate ratios like instructions per cycle */
1473 const int ratio_metric
; /* 0 for N/A, 1 for index 0 (cycles) */
1474 const char *ratio_desc
;
1475 const float ratio_mul
;
1480 .type
= PERF_TYPE_HARDWARE
,
1481 .config
= PERF_COUNT_HW_CPU_CYCLES
,
1486 .name
= "instructions",
1488 .type
= PERF_TYPE_HARDWARE
,
1489 .config
= PERF_COUNT_HW_INSTRUCTIONS
,
1493 .ratio_desc
= "insns per cycle",
1497 .name
= "l1d_loads",
1499 .type
= PERF_TYPE_HW_CACHE
,
1501 PERF_COUNT_HW_CACHE_L1D
|
1502 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1503 (PERF_COUNT_HW_CACHE_RESULT_ACCESS
<< 16),
1508 .name
= "llc_misses",
1510 .type
= PERF_TYPE_HW_CACHE
,
1512 PERF_COUNT_HW_CACHE_LL
|
1513 (PERF_COUNT_HW_CACHE_OP_READ
<< 8) |
1514 (PERF_COUNT_HW_CACHE_RESULT_MISS
<< 16),
1518 .ratio_desc
= "LLC misses per million insns",
1523 static __u64 profile_total_count
;
1525 #define MAX_NUM_PROFILE_METRICS 4
1527 static int profile_parse_metrics(int argc
, char **argv
)
1529 unsigned int metric_cnt
;
1530 int selected_cnt
= 0;
1533 metric_cnt
= sizeof(metrics
) / sizeof(struct profile_metric
);
1536 for (i
= 0; i
< metric_cnt
; i
++) {
1537 if (is_prefix(argv
[0], metrics
[i
].name
)) {
1538 if (!metrics
[i
].selected
)
1540 metrics
[i
].selected
= true;
1544 if (i
== metric_cnt
) {
1545 p_err("unknown metric %s", argv
[0]);
1550 if (selected_cnt
> MAX_NUM_PROFILE_METRICS
) {
1551 p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
1552 selected_cnt
, MAX_NUM_PROFILE_METRICS
);
1555 return selected_cnt
;
1558 static void profile_read_values(struct profiler_bpf
*obj
)
1560 __u32 m
, cpu
, num_cpu
= obj
->rodata
->num_cpu
;
1561 int reading_map_fd
, count_map_fd
;
1562 __u64 counts
[num_cpu
];
1566 reading_map_fd
= bpf_map__fd(obj
->maps
.accum_readings
);
1567 count_map_fd
= bpf_map__fd(obj
->maps
.counts
);
1568 if (reading_map_fd
< 0 || count_map_fd
< 0) {
1569 p_err("failed to get fd for map");
1573 err
= bpf_map_lookup_elem(count_map_fd
, &key
, counts
);
1575 p_err("failed to read count_map: %s", strerror(errno
));
1579 profile_total_count
= 0;
1580 for (cpu
= 0; cpu
< num_cpu
; cpu
++)
1581 profile_total_count
+= counts
[cpu
];
1583 for (m
= 0; m
< ARRAY_SIZE(metrics
); m
++) {
1584 struct bpf_perf_event_value values
[num_cpu
];
1586 if (!metrics
[m
].selected
)
1589 err
= bpf_map_lookup_elem(reading_map_fd
, &key
, values
);
1591 p_err("failed to read reading_map: %s",
1595 for (cpu
= 0; cpu
< num_cpu
; cpu
++) {
1596 metrics
[m
].val
.counter
+= values
[cpu
].counter
;
1597 metrics
[m
].val
.enabled
+= values
[cpu
].enabled
;
1598 metrics
[m
].val
.running
+= values
[cpu
].running
;
1604 static void profile_print_readings_json(void)
1608 jsonw_start_array(json_wtr
);
1609 for (m
= 0; m
< ARRAY_SIZE(metrics
); m
++) {
1610 if (!metrics
[m
].selected
)
1612 jsonw_start_object(json_wtr
);
1613 jsonw_string_field(json_wtr
, "metric", metrics
[m
].name
);
1614 jsonw_lluint_field(json_wtr
, "run_cnt", profile_total_count
);
1615 jsonw_lluint_field(json_wtr
, "value", metrics
[m
].val
.counter
);
1616 jsonw_lluint_field(json_wtr
, "enabled", metrics
[m
].val
.enabled
);
1617 jsonw_lluint_field(json_wtr
, "running", metrics
[m
].val
.running
);
1619 jsonw_end_object(json_wtr
);
1621 jsonw_end_array(json_wtr
);
1624 static void profile_print_readings_plain(void)
1628 printf("\n%18llu %-20s\n", profile_total_count
, "run_cnt");
1629 for (m
= 0; m
< ARRAY_SIZE(metrics
); m
++) {
1630 struct bpf_perf_event_value
*val
= &metrics
[m
].val
;
1633 if (!metrics
[m
].selected
)
1635 printf("%18llu %-20s", val
->counter
, metrics
[m
].name
);
1637 r
= metrics
[m
].ratio_metric
- 1;
1638 if (r
>= 0 && metrics
[r
].selected
&&
1639 metrics
[r
].val
.counter
> 0) {
1640 printf("# %8.2f %-30s",
1641 val
->counter
* metrics
[m
].ratio_mul
/
1642 metrics
[r
].val
.counter
,
1643 metrics
[m
].ratio_desc
);
1645 printf("%-41s", "");
1648 if (val
->enabled
> val
->running
)
1650 val
->running
* 100.0 / val
->enabled
);
1655 static void profile_print_readings(void)
1658 profile_print_readings_json();
1660 profile_print_readings_plain();
1663 static char *profile_target_name(int tgt_fd
)
1665 struct bpf_prog_info_linear
*info_linear
;
1666 struct bpf_func_info
*func_info
;
1667 const struct btf_type
*t
;
1671 info_linear
= bpf_program__get_prog_info_linear(
1672 tgt_fd
, 1UL << BPF_PROG_INFO_FUNC_INFO
);
1673 if (IS_ERR_OR_NULL(info_linear
)) {
1674 p_err("failed to get info_linear for prog FD %d", tgt_fd
);
1678 if (info_linear
->info
.btf_id
== 0 ||
1679 btf__get_from_id(info_linear
->info
.btf_id
, &btf
)) {
1680 p_err("prog FD %d doesn't have valid btf", tgt_fd
);
1684 func_info
= (struct bpf_func_info
*)(info_linear
->info
.func_info
);
1685 t
= btf__type_by_id(btf
, func_info
[0].type_id
);
1687 p_err("btf %d doesn't have type %d",
1688 info_linear
->info
.btf_id
, func_info
[0].type_id
);
1691 name
= strdup(btf__name_by_offset(btf
, t
->name_off
));
1697 static struct profiler_bpf
*profile_obj
;
1698 static int profile_tgt_fd
= -1;
1699 static char *profile_tgt_name
;
1700 static int *profile_perf_events
;
1701 static int profile_perf_event_cnt
;
1703 static void profile_close_perf_events(struct profiler_bpf
*obj
)
1707 for (i
= profile_perf_event_cnt
- 1; i
>= 0; i
--)
1708 close(profile_perf_events
[i
]);
1710 free(profile_perf_events
);
1711 profile_perf_event_cnt
= 0;
1714 static int profile_open_perf_events(struct profiler_bpf
*obj
)
1716 unsigned int cpu
, m
;
1719 profile_perf_events
= calloc(
1720 sizeof(int), obj
->rodata
->num_cpu
* obj
->rodata
->num_metric
);
1721 if (!profile_perf_events
) {
1722 p_err("failed to allocate memory for perf_event array: %s",
1726 map_fd
= bpf_map__fd(obj
->maps
.events
);
1728 p_err("failed to get fd for events map");
1732 for (m
= 0; m
< ARRAY_SIZE(metrics
); m
++) {
1733 if (!metrics
[m
].selected
)
1735 for (cpu
= 0; cpu
< obj
->rodata
->num_cpu
; cpu
++) {
1736 pmu_fd
= syscall(__NR_perf_event_open
, &metrics
[m
].attr
,
1737 -1/*pid*/, cpu
, -1/*group_fd*/, 0);
1739 bpf_map_update_elem(map_fd
, &profile_perf_event_cnt
,
1740 &pmu_fd
, BPF_ANY
) ||
1741 ioctl(pmu_fd
, PERF_EVENT_IOC_ENABLE
, 0)) {
1742 p_err("failed to create event %s on cpu %d",
1743 metrics
[m
].name
, cpu
);
1746 profile_perf_events
[profile_perf_event_cnt
++] = pmu_fd
;
1752 static void profile_print_and_cleanup(void)
1754 profile_close_perf_events(profile_obj
);
1755 profile_read_values(profile_obj
);
1756 profile_print_readings();
1757 profiler_bpf__destroy(profile_obj
);
1759 close(profile_tgt_fd
);
1760 free(profile_tgt_name
);
1763 static void int_exit(int signo
)
1765 profile_print_and_cleanup();
1769 static int do_profile(int argc
, char **argv
)
1771 int num_metric
, num_cpu
, err
= -1;
1772 struct bpf_program
*prog
;
1773 unsigned long duration
;
1776 /* we at least need two args for the prog and one metric */
1780 /* parse target fd */
1781 profile_tgt_fd
= prog_parse_fd(&argc
, &argv
);
1782 if (profile_tgt_fd
< 0) {
1783 p_err("failed to parse fd");
1787 /* parse profiling optional duration */
1788 if (argc
> 2 && is_prefix(argv
[0], "duration")) {
1790 duration
= strtoul(*argv
, &endptr
, 0);
1795 duration
= UINT_MAX
;
1798 num_metric
= profile_parse_metrics(argc
, argv
);
1799 if (num_metric
<= 0)
1802 num_cpu
= libbpf_num_possible_cpus();
1804 p_err("failed to identify number of CPUs");
1808 profile_obj
= profiler_bpf__open();
1810 p_err("failed to open and/or load BPF object");
1814 profile_obj
->rodata
->num_cpu
= num_cpu
;
1815 profile_obj
->rodata
->num_metric
= num_metric
;
1817 /* adjust map sizes */
1818 bpf_map__resize(profile_obj
->maps
.events
, num_metric
* num_cpu
);
1819 bpf_map__resize(profile_obj
->maps
.fentry_readings
, num_metric
);
1820 bpf_map__resize(profile_obj
->maps
.accum_readings
, num_metric
);
1821 bpf_map__resize(profile_obj
->maps
.counts
, 1);
1823 /* change target name */
1824 profile_tgt_name
= profile_target_name(profile_tgt_fd
);
1825 if (!profile_tgt_name
)
1828 bpf_object__for_each_program(prog
, profile_obj
->obj
) {
1829 err
= bpf_program__set_attach_target(prog
, profile_tgt_fd
,
1832 p_err("failed to set attach target\n");
1838 err
= profiler_bpf__load(profile_obj
);
1840 p_err("failed to load profile_obj");
1844 err
= profile_open_perf_events(profile_obj
);
1848 err
= profiler_bpf__attach(profile_obj
);
1850 p_err("failed to attach profile_obj");
1853 signal(SIGINT
, int_exit
);
1856 profile_print_and_cleanup();
1860 profile_close_perf_events(profile_obj
);
1862 profiler_bpf__destroy(profile_obj
);
1863 close(profile_tgt_fd
);
1864 free(profile_tgt_name
);
1868 #endif /* BPFTOOL_WITHOUT_SKELETONS */
1870 static int do_help(int argc
, char **argv
)
1873 jsonw_null(json_wtr
);
1878 "Usage: %1$s %2$s { show | list } [PROG]\n"
1879 " %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
1880 " %1$s %2$s dump jited PROG [{ file FILE | opcodes | linum }]\n"
1881 " %1$s %2$s pin PROG FILE\n"
1882 " %1$s %2$s { load | loadall } OBJ PATH \\\n"
1883 " [type TYPE] [dev NAME] \\\n"
1884 " [map { idx IDX | name NAME } MAP]\\\n"
1885 " [pinmaps MAP_DIR]\n"
1886 " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
1887 " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
1888 " %1$s %2$s run PROG \\\n"
1889 " data_in FILE \\\n"
1890 " [data_out FILE [data_size_out L]] \\\n"
1891 " [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
1893 " %1$s %2$s profile PROG [duration DURATION] METRICs\n"
1894 " %1$s %2$s tracelog\n"
1897 " " HELP_SPEC_MAP
"\n"
1898 " " HELP_SPEC_PROGRAM
"\n"
1899 " TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
1900 " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
1901 " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
1902 " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
1903 " sk_reuseport | flow_dissector | cgroup/sysctl |\n"
1904 " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
1905 " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
1906 " cgroup/getpeername4 | cgroup/getpeername6 |\n"
1907 " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
1908 " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
1909 " cgroup/getsockopt | cgroup/setsockopt |\n"
1910 " struct_ops | fentry | fexit | freplace | sk_lookup }\n"
1911 " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
1912 " flow_dissector }\n"
1913 " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
1914 " " HELP_SPEC_OPTIONS
"\n"
1916 bin_name
, argv
[-2]);
1921 static const struct cmd cmds
[] = {
1922 { "show", do_show
},
1923 { "list", do_show
},
1924 { "help", do_help
},
1925 { "dump", do_dump
},
1927 { "load", do_load
},
1928 { "loadall", do_loadall
},
1929 { "attach", do_attach
},
1930 { "detach", do_detach
},
1931 { "tracelog", do_tracelog
},
1933 { "profile", do_profile
},
1937 int do_prog(int argc
, char **argv
)
1939 return cmd_select(cmds
, argc
, argv
, do_help
);