1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2020 Facebook */
6 #include <linux/netfilter.h>
7 #include <linux/netfilter_arp.h>
8 #include <linux/perf_event.h>
14 #include <bpf/hashmap.h>
16 #include "json_writer.h"
18 #include "xlated_dumper.h"
20 #define PERF_HW_CACHE_LEN 128
22 static struct hashmap
*link_table
;
23 static struct dump_data dd
;
25 static const char *perf_type_name
[PERF_TYPE_MAX
] = {
26 [PERF_TYPE_HARDWARE
] = "hardware",
27 [PERF_TYPE_SOFTWARE
] = "software",
28 [PERF_TYPE_TRACEPOINT
] = "tracepoint",
29 [PERF_TYPE_HW_CACHE
] = "hw-cache",
30 [PERF_TYPE_RAW
] = "raw",
31 [PERF_TYPE_BREAKPOINT
] = "breakpoint",
34 const char *event_symbols_hw
[PERF_COUNT_HW_MAX
] = {
35 [PERF_COUNT_HW_CPU_CYCLES
] = "cpu-cycles",
36 [PERF_COUNT_HW_INSTRUCTIONS
] = "instructions",
37 [PERF_COUNT_HW_CACHE_REFERENCES
] = "cache-references",
38 [PERF_COUNT_HW_CACHE_MISSES
] = "cache-misses",
39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = "branch-instructions",
40 [PERF_COUNT_HW_BRANCH_MISSES
] = "branch-misses",
41 [PERF_COUNT_HW_BUS_CYCLES
] = "bus-cycles",
42 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = "stalled-cycles-frontend",
43 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = "stalled-cycles-backend",
44 [PERF_COUNT_HW_REF_CPU_CYCLES
] = "ref-cycles",
47 const char *event_symbols_sw
[PERF_COUNT_SW_MAX
] = {
48 [PERF_COUNT_SW_CPU_CLOCK
] = "cpu-clock",
49 [PERF_COUNT_SW_TASK_CLOCK
] = "task-clock",
50 [PERF_COUNT_SW_PAGE_FAULTS
] = "page-faults",
51 [PERF_COUNT_SW_CONTEXT_SWITCHES
] = "context-switches",
52 [PERF_COUNT_SW_CPU_MIGRATIONS
] = "cpu-migrations",
53 [PERF_COUNT_SW_PAGE_FAULTS_MIN
] = "minor-faults",
54 [PERF_COUNT_SW_PAGE_FAULTS_MAJ
] = "major-faults",
55 [PERF_COUNT_SW_ALIGNMENT_FAULTS
] = "alignment-faults",
56 [PERF_COUNT_SW_EMULATION_FAULTS
] = "emulation-faults",
57 [PERF_COUNT_SW_DUMMY
] = "dummy",
58 [PERF_COUNT_SW_BPF_OUTPUT
] = "bpf-output",
59 [PERF_COUNT_SW_CGROUP_SWITCHES
] = "cgroup-switches",
62 const char *evsel__hw_cache
[PERF_COUNT_HW_CACHE_MAX
] = {
63 [PERF_COUNT_HW_CACHE_L1D
] = "L1-dcache",
64 [PERF_COUNT_HW_CACHE_L1I
] = "L1-icache",
65 [PERF_COUNT_HW_CACHE_LL
] = "LLC",
66 [PERF_COUNT_HW_CACHE_DTLB
] = "dTLB",
67 [PERF_COUNT_HW_CACHE_ITLB
] = "iTLB",
68 [PERF_COUNT_HW_CACHE_BPU
] = "branch",
69 [PERF_COUNT_HW_CACHE_NODE
] = "node",
72 const char *evsel__hw_cache_op
[PERF_COUNT_HW_CACHE_OP_MAX
] = {
73 [PERF_COUNT_HW_CACHE_OP_READ
] = "load",
74 [PERF_COUNT_HW_CACHE_OP_WRITE
] = "store",
75 [PERF_COUNT_HW_CACHE_OP_PREFETCH
] = "prefetch",
78 const char *evsel__hw_cache_result
[PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
79 [PERF_COUNT_HW_CACHE_RESULT_ACCESS
] = "refs",
80 [PERF_COUNT_HW_CACHE_RESULT_MISS
] = "misses",
83 #define perf_event_name(array, id) ({ \
84 const char *event_str = NULL; \
86 if ((id) < ARRAY_SIZE(array)) \
87 event_str = array[id]; \
91 static int link_parse_fd(int *argc
, char ***argv
)
95 if (is_prefix(**argv
, "id")) {
101 id
= strtoul(**argv
, &endptr
, 0);
103 p_err("can't parse %s as ID", **argv
);
108 fd
= bpf_link_get_fd_by_id(id
);
110 p_err("failed to get link with ID %d: %s", id
, strerror(errno
));
112 } else if (is_prefix(**argv
, "pinned")) {
120 return open_obj_pinned_any(path
, BPF_OBJ_LINK
);
123 p_err("expected 'id' or 'pinned', got: '%s'?", **argv
);
128 show_link_header_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
130 const char *link_type_str
;
132 jsonw_uint_field(wtr
, "id", info
->id
);
133 link_type_str
= libbpf_bpf_link_type_str(info
->type
);
135 jsonw_string_field(wtr
, "type", link_type_str
);
137 jsonw_uint_field(wtr
, "type", info
->type
);
139 jsonw_uint_field(json_wtr
, "prog_id", info
->prog_id
);
142 static void show_link_attach_type_json(__u32 attach_type
, json_writer_t
*wtr
)
144 const char *attach_type_str
;
146 attach_type_str
= libbpf_bpf_attach_type_str(attach_type
);
148 jsonw_string_field(wtr
, "attach_type", attach_type_str
);
150 jsonw_uint_field(wtr
, "attach_type", attach_type
);
153 static void show_link_ifindex_json(__u32 ifindex
, json_writer_t
*wtr
)
155 char devname
[IF_NAMESIZE
] = "(unknown)";
158 if_indextoname(ifindex
, devname
);
160 snprintf(devname
, sizeof(devname
), "(detached)");
161 jsonw_string_field(wtr
, "devname", devname
);
162 jsonw_uint_field(wtr
, "ifindex", ifindex
);
165 static bool is_iter_map_target(const char *target_name
)
167 return strcmp(target_name
, "bpf_map_elem") == 0 ||
168 strcmp(target_name
, "bpf_sk_storage_map") == 0;
171 static bool is_iter_cgroup_target(const char *target_name
)
173 return strcmp(target_name
, "cgroup") == 0;
176 static const char *cgroup_order_string(__u32 order
)
179 case BPF_CGROUP_ITER_ORDER_UNSPEC
:
180 return "order_unspec";
181 case BPF_CGROUP_ITER_SELF_ONLY
:
183 case BPF_CGROUP_ITER_DESCENDANTS_PRE
:
184 return "descendants_pre";
185 case BPF_CGROUP_ITER_DESCENDANTS_POST
:
186 return "descendants_post";
187 case BPF_CGROUP_ITER_ANCESTORS_UP
:
188 return "ancestors_up";
189 default: /* won't happen */
194 static bool is_iter_task_target(const char *target_name
)
196 return strcmp(target_name
, "task") == 0 ||
197 strcmp(target_name
, "task_file") == 0 ||
198 strcmp(target_name
, "task_vma") == 0;
201 static void show_iter_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
203 const char *target_name
= u64_to_ptr(info
->iter
.target_name
);
205 jsonw_string_field(wtr
, "target_name", target_name
);
207 if (is_iter_map_target(target_name
))
208 jsonw_uint_field(wtr
, "map_id", info
->iter
.map
.map_id
);
209 else if (is_iter_task_target(target_name
)) {
210 if (info
->iter
.task
.tid
)
211 jsonw_uint_field(wtr
, "tid", info
->iter
.task
.tid
);
212 else if (info
->iter
.task
.pid
)
213 jsonw_uint_field(wtr
, "pid", info
->iter
.task
.pid
);
216 if (is_iter_cgroup_target(target_name
)) {
217 jsonw_lluint_field(wtr
, "cgroup_id", info
->iter
.cgroup
.cgroup_id
);
218 jsonw_string_field(wtr
, "order",
219 cgroup_order_string(info
->iter
.cgroup
.order
));
223 void netfilter_dump_json(const struct bpf_link_info
*info
, json_writer_t
*wtr
)
225 jsonw_uint_field(json_wtr
, "pf",
227 jsonw_uint_field(json_wtr
, "hook",
228 info
->netfilter
.hooknum
);
229 jsonw_int_field(json_wtr
, "prio",
230 info
->netfilter
.priority
);
231 jsonw_uint_field(json_wtr
, "flags",
232 info
->netfilter
.flags
);
235 static int get_prog_info(int prog_id
, struct bpf_prog_info
*info
)
237 __u32 len
= sizeof(*info
);
240 prog_fd
= bpf_prog_get_fd_by_id(prog_id
);
244 memset(info
, 0, sizeof(*info
));
245 err
= bpf_prog_get_info_by_fd(prog_fd
, info
, &len
);
247 p_err("can't get prog info: %s", strerror(errno
));
257 static int cmp_addr_cookie(const void *A
, const void *B
)
259 const struct addr_cookie
*a
= A
, *b
= B
;
261 if (a
->addr
== b
->addr
)
263 return a
->addr
< b
->addr
? -1 : 1;
266 static struct addr_cookie
*
267 get_addr_cookie_array(__u64
*addrs
, __u64
*cookies
, __u32 count
)
269 struct addr_cookie
*data
;
272 data
= calloc(count
, sizeof(data
[0]));
274 p_err("mem alloc failed");
277 for (i
= 0; i
< count
; i
++) {
278 data
[i
].addr
= addrs
[i
];
279 data
[i
].cookie
= cookies
[i
];
281 qsort(data
, count
, sizeof(data
[0]), cmp_addr_cookie
);
286 show_kprobe_multi_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
288 struct addr_cookie
*data
;
291 jsonw_bool_field(json_wtr
, "retprobe",
292 info
->kprobe_multi
.flags
& BPF_F_KPROBE_MULTI_RETURN
);
293 jsonw_uint_field(json_wtr
, "func_cnt", info
->kprobe_multi
.count
);
294 jsonw_uint_field(json_wtr
, "missed", info
->kprobe_multi
.missed
);
295 jsonw_name(json_wtr
, "funcs");
296 jsonw_start_array(json_wtr
);
297 data
= get_addr_cookie_array(u64_to_ptr(info
->kprobe_multi
.addrs
),
298 u64_to_ptr(info
->kprobe_multi
.cookies
),
299 info
->kprobe_multi
.count
);
303 /* Load it once for all. */
305 kernel_syms_load(&dd
);
309 for (i
= 0; i
< dd
.sym_count
; i
++) {
310 if (dd
.sym_mapping
[i
].address
!= data
[j
].addr
)
312 jsonw_start_object(json_wtr
);
313 jsonw_uint_field(json_wtr
, "addr", dd
.sym_mapping
[i
].address
);
314 jsonw_string_field(json_wtr
, "func", dd
.sym_mapping
[i
].name
);
315 /* Print null if it is vmlinux */
316 if (dd
.sym_mapping
[i
].module
[0] == '\0') {
317 jsonw_name(json_wtr
, "module");
318 jsonw_null(json_wtr
);
320 jsonw_string_field(json_wtr
, "module", dd
.sym_mapping
[i
].module
);
322 jsonw_uint_field(json_wtr
, "cookie", data
[j
].cookie
);
323 jsonw_end_object(json_wtr
);
324 if (j
++ == info
->kprobe_multi
.count
)
327 jsonw_end_array(json_wtr
);
332 static __u64
*u64_to_arr(__u64 val
)
334 return (__u64
*) u64_to_ptr(val
);
338 show_uprobe_multi_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
342 jsonw_bool_field(json_wtr
, "retprobe",
343 info
->uprobe_multi
.flags
& BPF_F_UPROBE_MULTI_RETURN
);
344 jsonw_string_field(json_wtr
, "path", (char *) u64_to_ptr(info
->uprobe_multi
.path
));
345 jsonw_uint_field(json_wtr
, "func_cnt", info
->uprobe_multi
.count
);
346 jsonw_int_field(json_wtr
, "pid", (int) info
->uprobe_multi
.pid
);
347 jsonw_name(json_wtr
, "funcs");
348 jsonw_start_array(json_wtr
);
350 for (i
= 0; i
< info
->uprobe_multi
.count
; i
++) {
351 jsonw_start_object(json_wtr
);
352 jsonw_uint_field(json_wtr
, "offset",
353 u64_to_arr(info
->uprobe_multi
.offsets
)[i
]);
354 jsonw_uint_field(json_wtr
, "ref_ctr_offset",
355 u64_to_arr(info
->uprobe_multi
.ref_ctr_offsets
)[i
]);
356 jsonw_uint_field(json_wtr
, "cookie",
357 u64_to_arr(info
->uprobe_multi
.cookies
)[i
]);
358 jsonw_end_object(json_wtr
);
360 jsonw_end_array(json_wtr
);
364 show_perf_event_kprobe_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
366 jsonw_bool_field(wtr
, "retprobe", info
->perf_event
.type
== BPF_PERF_EVENT_KRETPROBE
);
367 jsonw_uint_field(wtr
, "addr", info
->perf_event
.kprobe
.addr
);
368 jsonw_string_field(wtr
, "func",
369 u64_to_ptr(info
->perf_event
.kprobe
.func_name
));
370 jsonw_uint_field(wtr
, "offset", info
->perf_event
.kprobe
.offset
);
371 jsonw_uint_field(wtr
, "missed", info
->perf_event
.kprobe
.missed
);
372 jsonw_uint_field(wtr
, "cookie", info
->perf_event
.kprobe
.cookie
);
376 show_perf_event_uprobe_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
378 jsonw_bool_field(wtr
, "retprobe", info
->perf_event
.type
== BPF_PERF_EVENT_URETPROBE
);
379 jsonw_string_field(wtr
, "file",
380 u64_to_ptr(info
->perf_event
.uprobe
.file_name
));
381 jsonw_uint_field(wtr
, "offset", info
->perf_event
.uprobe
.offset
);
382 jsonw_uint_field(wtr
, "cookie", info
->perf_event
.uprobe
.cookie
);
386 show_perf_event_tracepoint_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
388 jsonw_string_field(wtr
, "tracepoint",
389 u64_to_ptr(info
->perf_event
.tracepoint
.tp_name
));
390 jsonw_uint_field(wtr
, "cookie", info
->perf_event
.tracepoint
.cookie
);
393 static char *perf_config_hw_cache_str(__u64 config
)
395 const char *hw_cache
, *result
, *op
;
396 char *str
= malloc(PERF_HW_CACHE_LEN
);
399 p_err("mem alloc failed");
403 hw_cache
= perf_event_name(evsel__hw_cache
, config
& 0xff);
405 snprintf(str
, PERF_HW_CACHE_LEN
, "%s-", hw_cache
);
407 snprintf(str
, PERF_HW_CACHE_LEN
, "%lld-", config
& 0xff);
409 op
= perf_event_name(evsel__hw_cache_op
, (config
>> 8) & 0xff);
411 snprintf(str
+ strlen(str
), PERF_HW_CACHE_LEN
- strlen(str
),
414 snprintf(str
+ strlen(str
), PERF_HW_CACHE_LEN
- strlen(str
),
415 "%lld-", (config
>> 8) & 0xff);
417 result
= perf_event_name(evsel__hw_cache_result
, config
>> 16);
419 snprintf(str
+ strlen(str
), PERF_HW_CACHE_LEN
- strlen(str
),
422 snprintf(str
+ strlen(str
), PERF_HW_CACHE_LEN
- strlen(str
),
423 "%lld", config
>> 16);
427 static const char *perf_config_str(__u32 type
, __u64 config
)
429 const char *perf_config
;
432 case PERF_TYPE_HARDWARE
:
433 perf_config
= perf_event_name(event_symbols_hw
, config
);
435 case PERF_TYPE_SOFTWARE
:
436 perf_config
= perf_event_name(event_symbols_sw
, config
);
438 case PERF_TYPE_HW_CACHE
:
439 perf_config
= perf_config_hw_cache_str(config
);
449 show_perf_event_event_json(struct bpf_link_info
*info
, json_writer_t
*wtr
)
451 __u64 config
= info
->perf_event
.event
.config
;
452 __u32 type
= info
->perf_event
.event
.type
;
453 const char *perf_type
, *perf_config
;
455 perf_type
= perf_event_name(perf_type_name
, type
);
457 jsonw_string_field(wtr
, "event_type", perf_type
);
459 jsonw_uint_field(wtr
, "event_type", type
);
461 perf_config
= perf_config_str(type
, config
);
463 jsonw_string_field(wtr
, "event_config", perf_config
);
465 jsonw_uint_field(wtr
, "event_config", config
);
467 jsonw_uint_field(wtr
, "cookie", info
->perf_event
.event
.cookie
);
469 if (type
== PERF_TYPE_HW_CACHE
&& perf_config
)
470 free((void *)perf_config
);
473 static int show_link_close_json(int fd
, struct bpf_link_info
*info
)
475 struct bpf_prog_info prog_info
;
476 const char *prog_type_str
;
479 jsonw_start_object(json_wtr
);
481 show_link_header_json(info
, json_wtr
);
483 switch (info
->type
) {
484 case BPF_LINK_TYPE_RAW_TRACEPOINT
:
485 jsonw_string_field(json_wtr
, "tp_name",
486 u64_to_ptr(info
->raw_tracepoint
.tp_name
));
488 case BPF_LINK_TYPE_TRACING
:
489 err
= get_prog_info(info
->prog_id
, &prog_info
);
493 prog_type_str
= libbpf_bpf_prog_type_str(prog_info
.type
);
494 /* libbpf will return NULL for variants unknown to it. */
496 jsonw_string_field(json_wtr
, "prog_type", prog_type_str
);
498 jsonw_uint_field(json_wtr
, "prog_type", prog_info
.type
);
500 show_link_attach_type_json(info
->tracing
.attach_type
,
502 jsonw_uint_field(json_wtr
, "target_obj_id", info
->tracing
.target_obj_id
);
503 jsonw_uint_field(json_wtr
, "target_btf_id", info
->tracing
.target_btf_id
);
505 case BPF_LINK_TYPE_CGROUP
:
506 jsonw_lluint_field(json_wtr
, "cgroup_id",
507 info
->cgroup
.cgroup_id
);
508 show_link_attach_type_json(info
->cgroup
.attach_type
, json_wtr
);
510 case BPF_LINK_TYPE_ITER
:
511 show_iter_json(info
, json_wtr
);
513 case BPF_LINK_TYPE_NETNS
:
514 jsonw_uint_field(json_wtr
, "netns_ino",
515 info
->netns
.netns_ino
);
516 show_link_attach_type_json(info
->netns
.attach_type
, json_wtr
);
518 case BPF_LINK_TYPE_NETFILTER
:
519 netfilter_dump_json(info
, json_wtr
);
521 case BPF_LINK_TYPE_TCX
:
522 show_link_ifindex_json(info
->tcx
.ifindex
, json_wtr
);
523 show_link_attach_type_json(info
->tcx
.attach_type
, json_wtr
);
525 case BPF_LINK_TYPE_NETKIT
:
526 show_link_ifindex_json(info
->netkit
.ifindex
, json_wtr
);
527 show_link_attach_type_json(info
->netkit
.attach_type
, json_wtr
);
529 case BPF_LINK_TYPE_SOCKMAP
:
530 jsonw_uint_field(json_wtr
, "map_id", info
->sockmap
.map_id
);
531 show_link_attach_type_json(info
->sockmap
.attach_type
, json_wtr
);
533 case BPF_LINK_TYPE_XDP
:
534 show_link_ifindex_json(info
->xdp
.ifindex
, json_wtr
);
536 case BPF_LINK_TYPE_STRUCT_OPS
:
537 jsonw_uint_field(json_wtr
, "map_id",
538 info
->struct_ops
.map_id
);
540 case BPF_LINK_TYPE_KPROBE_MULTI
:
541 show_kprobe_multi_json(info
, json_wtr
);
543 case BPF_LINK_TYPE_UPROBE_MULTI
:
544 show_uprobe_multi_json(info
, json_wtr
);
546 case BPF_LINK_TYPE_PERF_EVENT
:
547 switch (info
->perf_event
.type
) {
548 case BPF_PERF_EVENT_EVENT
:
549 show_perf_event_event_json(info
, json_wtr
);
551 case BPF_PERF_EVENT_TRACEPOINT
:
552 show_perf_event_tracepoint_json(info
, json_wtr
);
554 case BPF_PERF_EVENT_KPROBE
:
555 case BPF_PERF_EVENT_KRETPROBE
:
556 show_perf_event_kprobe_json(info
, json_wtr
);
558 case BPF_PERF_EVENT_UPROBE
:
559 case BPF_PERF_EVENT_URETPROBE
:
560 show_perf_event_uprobe_json(info
, json_wtr
);
570 if (!hashmap__empty(link_table
)) {
571 struct hashmap_entry
*entry
;
573 jsonw_name(json_wtr
, "pinned");
574 jsonw_start_array(json_wtr
);
575 hashmap__for_each_key_entry(link_table
, entry
, info
->id
)
576 jsonw_string(json_wtr
, entry
->pvalue
);
577 jsonw_end_array(json_wtr
);
580 emit_obj_refs_json(refs_table
, info
->id
, json_wtr
);
582 jsonw_end_object(json_wtr
);
587 static void show_link_header_plain(struct bpf_link_info
*info
)
589 const char *link_type_str
;
591 printf("%u: ", info
->id
);
592 link_type_str
= libbpf_bpf_link_type_str(info
->type
);
594 printf("%s ", link_type_str
);
596 printf("type %u ", info
->type
);
598 if (info
->type
== BPF_LINK_TYPE_STRUCT_OPS
)
599 printf("map %u ", info
->struct_ops
.map_id
);
601 printf("prog %u ", info
->prog_id
);
604 static void show_link_attach_type_plain(__u32 attach_type
)
606 const char *attach_type_str
;
608 attach_type_str
= libbpf_bpf_attach_type_str(attach_type
);
610 printf("attach_type %s ", attach_type_str
);
612 printf("attach_type %u ", attach_type
);
615 static void show_link_ifindex_plain(__u32 ifindex
)
617 char devname
[IF_NAMESIZE
* 2] = "(unknown)";
618 char tmpname
[IF_NAMESIZE
];
622 ret
= if_indextoname(ifindex
, tmpname
);
624 snprintf(devname
, sizeof(devname
), "(detached)");
626 snprintf(devname
, sizeof(devname
), "%s(%d)",
628 printf("ifindex %s ", devname
);
631 static void show_iter_plain(struct bpf_link_info
*info
)
633 const char *target_name
= u64_to_ptr(info
->iter
.target_name
);
635 printf("target_name %s ", target_name
);
637 if (is_iter_map_target(target_name
))
638 printf("map_id %u ", info
->iter
.map
.map_id
);
639 else if (is_iter_task_target(target_name
)) {
640 if (info
->iter
.task
.tid
)
641 printf("tid %u ", info
->iter
.task
.tid
);
642 else if (info
->iter
.task
.pid
)
643 printf("pid %u ", info
->iter
.task
.pid
);
646 if (is_iter_cgroup_target(target_name
)) {
647 printf("cgroup_id %llu ", info
->iter
.cgroup
.cgroup_id
);
649 cgroup_order_string(info
->iter
.cgroup
.order
));
653 static const char * const pf2name
[] = {
654 [NFPROTO_INET
] = "inet",
655 [NFPROTO_IPV4
] = "ip",
656 [NFPROTO_ARP
] = "arp",
657 [NFPROTO_NETDEV
] = "netdev",
658 [NFPROTO_BRIDGE
] = "bridge",
659 [NFPROTO_IPV6
] = "ip6",
662 static const char * const inethook2name
[] = {
663 [NF_INET_PRE_ROUTING
] = "prerouting",
664 [NF_INET_LOCAL_IN
] = "input",
665 [NF_INET_FORWARD
] = "forward",
666 [NF_INET_LOCAL_OUT
] = "output",
667 [NF_INET_POST_ROUTING
] = "postrouting",
670 static const char * const arphook2name
[] = {
671 [NF_ARP_IN
] = "input",
672 [NF_ARP_OUT
] = "output",
675 void netfilter_dump_plain(const struct bpf_link_info
*info
)
677 const char *hookname
= NULL
, *pfname
= NULL
;
678 unsigned int hook
= info
->netfilter
.hooknum
;
679 unsigned int pf
= info
->netfilter
.pf
;
681 if (pf
< ARRAY_SIZE(pf2name
))
682 pfname
= pf2name
[pf
];
685 case NFPROTO_BRIDGE
: /* bridge shares numbers with enum nf_inet_hooks */
689 if (hook
< ARRAY_SIZE(inethook2name
))
690 hookname
= inethook2name
[hook
];
693 if (hook
< ARRAY_SIZE(arphook2name
))
694 hookname
= arphook2name
[hook
];
700 printf("\n\t%s", pfname
);
702 printf("\n\tpf: %d", pf
);
705 printf(" %s", hookname
);
707 printf(", hook %u,", hook
);
709 printf(" prio %d", info
->netfilter
.priority
);
711 if (info
->netfilter
.flags
)
712 printf(" flags 0x%x", info
->netfilter
.flags
);
715 static void show_kprobe_multi_plain(struct bpf_link_info
*info
)
717 struct addr_cookie
*data
;
720 if (!info
->kprobe_multi
.count
)
723 if (info
->kprobe_multi
.flags
& BPF_F_KPROBE_MULTI_RETURN
)
724 printf("\n\tkretprobe.multi ");
726 printf("\n\tkprobe.multi ");
727 printf("func_cnt %u ", info
->kprobe_multi
.count
);
728 if (info
->kprobe_multi
.missed
)
729 printf("missed %llu ", info
->kprobe_multi
.missed
);
730 data
= get_addr_cookie_array(u64_to_ptr(info
->kprobe_multi
.addrs
),
731 u64_to_ptr(info
->kprobe_multi
.cookies
),
732 info
->kprobe_multi
.count
);
736 /* Load it once for all. */
738 kernel_syms_load(&dd
);
742 printf("\n\t%-16s %-16s %s", "addr", "cookie", "func [module]");
743 for (i
= 0; i
< dd
.sym_count
; i
++) {
744 if (dd
.sym_mapping
[i
].address
!= data
[j
].addr
)
746 printf("\n\t%016lx %-16llx %s",
747 dd
.sym_mapping
[i
].address
, data
[j
].cookie
, dd
.sym_mapping
[i
].name
);
748 if (dd
.sym_mapping
[i
].module
[0] != '\0')
749 printf(" [%s] ", dd
.sym_mapping
[i
].module
);
753 if (j
++ == info
->kprobe_multi
.count
)
760 static void show_uprobe_multi_plain(struct bpf_link_info
*info
)
764 if (!info
->uprobe_multi
.count
)
767 if (info
->uprobe_multi
.flags
& BPF_F_UPROBE_MULTI_RETURN
)
768 printf("\n\turetprobe.multi ");
770 printf("\n\tuprobe.multi ");
772 printf("path %s ", (char *) u64_to_ptr(info
->uprobe_multi
.path
));
773 printf("func_cnt %u ", info
->uprobe_multi
.count
);
775 if (info
->uprobe_multi
.pid
)
776 printf("pid %d ", info
->uprobe_multi
.pid
);
778 printf("\n\t%-16s %-16s %-16s", "offset", "ref_ctr_offset", "cookies");
779 for (i
= 0; i
< info
->uprobe_multi
.count
; i
++) {
780 printf("\n\t0x%-16llx 0x%-16llx 0x%-16llx",
781 u64_to_arr(info
->uprobe_multi
.offsets
)[i
],
782 u64_to_arr(info
->uprobe_multi
.ref_ctr_offsets
)[i
],
783 u64_to_arr(info
->uprobe_multi
.cookies
)[i
]);
787 static void show_perf_event_kprobe_plain(struct bpf_link_info
*info
)
791 buf
= u64_to_ptr(info
->perf_event
.kprobe
.func_name
);
792 if (buf
[0] == '\0' && !info
->perf_event
.kprobe
.addr
)
795 if (info
->perf_event
.type
== BPF_PERF_EVENT_KRETPROBE
)
796 printf("\n\tkretprobe ");
798 printf("\n\tkprobe ");
799 if (info
->perf_event
.kprobe
.addr
)
800 printf("%llx ", info
->perf_event
.kprobe
.addr
);
802 if (info
->perf_event
.kprobe
.offset
)
803 printf("+%#x", info
->perf_event
.kprobe
.offset
);
804 if (info
->perf_event
.kprobe
.missed
)
805 printf(" missed %llu", info
->perf_event
.kprobe
.missed
);
806 if (info
->perf_event
.kprobe
.cookie
)
807 printf(" cookie %llu", info
->perf_event
.kprobe
.cookie
);
811 static void show_perf_event_uprobe_plain(struct bpf_link_info
*info
)
815 buf
= u64_to_ptr(info
->perf_event
.uprobe
.file_name
);
819 if (info
->perf_event
.type
== BPF_PERF_EVENT_URETPROBE
)
820 printf("\n\turetprobe ");
822 printf("\n\tuprobe ");
823 printf("%s+%#x ", buf
, info
->perf_event
.uprobe
.offset
);
824 if (info
->perf_event
.uprobe
.cookie
)
825 printf("cookie %llu ", info
->perf_event
.uprobe
.cookie
);
828 static void show_perf_event_tracepoint_plain(struct bpf_link_info
*info
)
832 buf
= u64_to_ptr(info
->perf_event
.tracepoint
.tp_name
);
836 printf("\n\ttracepoint %s ", buf
);
837 if (info
->perf_event
.tracepoint
.cookie
)
838 printf("cookie %llu ", info
->perf_event
.tracepoint
.cookie
);
841 static void show_perf_event_event_plain(struct bpf_link_info
*info
)
843 __u64 config
= info
->perf_event
.event
.config
;
844 __u32 type
= info
->perf_event
.event
.type
;
845 const char *perf_type
, *perf_config
;
847 printf("\n\tevent ");
848 perf_type
= perf_event_name(perf_type_name
, type
);
850 printf("%s:", perf_type
);
852 printf("%u :", type
);
854 perf_config
= perf_config_str(type
, config
);
856 printf("%s ", perf_config
);
858 printf("%llu ", config
);
860 if (info
->perf_event
.event
.cookie
)
861 printf("cookie %llu ", info
->perf_event
.event
.cookie
);
863 if (type
== PERF_TYPE_HW_CACHE
&& perf_config
)
864 free((void *)perf_config
);
867 static int show_link_close_plain(int fd
, struct bpf_link_info
*info
)
869 struct bpf_prog_info prog_info
;
870 const char *prog_type_str
;
873 show_link_header_plain(info
);
875 switch (info
->type
) {
876 case BPF_LINK_TYPE_RAW_TRACEPOINT
:
877 printf("\n\ttp '%s' ",
878 (const char *)u64_to_ptr(info
->raw_tracepoint
.tp_name
));
880 case BPF_LINK_TYPE_TRACING
:
881 err
= get_prog_info(info
->prog_id
, &prog_info
);
885 prog_type_str
= libbpf_bpf_prog_type_str(prog_info
.type
);
886 /* libbpf will return NULL for variants unknown to it. */
888 printf("\n\tprog_type %s ", prog_type_str
);
890 printf("\n\tprog_type %u ", prog_info
.type
);
892 show_link_attach_type_plain(info
->tracing
.attach_type
);
893 if (info
->tracing
.target_obj_id
|| info
->tracing
.target_btf_id
)
894 printf("\n\ttarget_obj_id %u target_btf_id %u ",
895 info
->tracing
.target_obj_id
,
896 info
->tracing
.target_btf_id
);
898 case BPF_LINK_TYPE_CGROUP
:
899 printf("\n\tcgroup_id %zu ", (size_t)info
->cgroup
.cgroup_id
);
900 show_link_attach_type_plain(info
->cgroup
.attach_type
);
902 case BPF_LINK_TYPE_ITER
:
903 show_iter_plain(info
);
905 case BPF_LINK_TYPE_NETNS
:
906 printf("\n\tnetns_ino %u ", info
->netns
.netns_ino
);
907 show_link_attach_type_plain(info
->netns
.attach_type
);
909 case BPF_LINK_TYPE_NETFILTER
:
910 netfilter_dump_plain(info
);
912 case BPF_LINK_TYPE_TCX
:
914 show_link_ifindex_plain(info
->tcx
.ifindex
);
915 show_link_attach_type_plain(info
->tcx
.attach_type
);
917 case BPF_LINK_TYPE_NETKIT
:
919 show_link_ifindex_plain(info
->netkit
.ifindex
);
920 show_link_attach_type_plain(info
->netkit
.attach_type
);
922 case BPF_LINK_TYPE_SOCKMAP
:
924 printf("map_id %u ", info
->sockmap
.map_id
);
925 show_link_attach_type_plain(info
->sockmap
.attach_type
);
927 case BPF_LINK_TYPE_XDP
:
929 show_link_ifindex_plain(info
->xdp
.ifindex
);
931 case BPF_LINK_TYPE_KPROBE_MULTI
:
932 show_kprobe_multi_plain(info
);
934 case BPF_LINK_TYPE_UPROBE_MULTI
:
935 show_uprobe_multi_plain(info
);
937 case BPF_LINK_TYPE_PERF_EVENT
:
938 switch (info
->perf_event
.type
) {
939 case BPF_PERF_EVENT_EVENT
:
940 show_perf_event_event_plain(info
);
942 case BPF_PERF_EVENT_TRACEPOINT
:
943 show_perf_event_tracepoint_plain(info
);
945 case BPF_PERF_EVENT_KPROBE
:
946 case BPF_PERF_EVENT_KRETPROBE
:
947 show_perf_event_kprobe_plain(info
);
949 case BPF_PERF_EVENT_UPROBE
:
950 case BPF_PERF_EVENT_URETPROBE
:
951 show_perf_event_uprobe_plain(info
);
961 if (!hashmap__empty(link_table
)) {
962 struct hashmap_entry
*entry
;
964 hashmap__for_each_key_entry(link_table
, entry
, info
->id
)
965 printf("\n\tpinned %s", (char *)entry
->pvalue
);
967 emit_obj_refs_plain(refs_table
, info
->id
, "\n\tpids ");
974 static int do_show_link(int fd
)
976 __u64
*ref_ctr_offsets
= NULL
, *offsets
= NULL
, *cookies
= NULL
;
977 struct bpf_link_info info
;
978 __u32 len
= sizeof(info
);
979 char path_buf
[PATH_MAX
];
985 memset(&info
, 0, sizeof(info
));
988 err
= bpf_link_get_info_by_fd(fd
, &info
, &len
);
990 p_err("can't get link info: %s",
995 if (info
.type
== BPF_LINK_TYPE_RAW_TRACEPOINT
&&
996 !info
.raw_tracepoint
.tp_name
) {
997 info
.raw_tracepoint
.tp_name
= ptr_to_u64(&buf
);
998 info
.raw_tracepoint
.tp_name_len
= sizeof(buf
);
1001 if (info
.type
== BPF_LINK_TYPE_ITER
&&
1002 !info
.iter
.target_name
) {
1003 info
.iter
.target_name
= ptr_to_u64(&buf
);
1004 info
.iter
.target_name_len
= sizeof(buf
);
1007 if (info
.type
== BPF_LINK_TYPE_KPROBE_MULTI
&&
1008 !info
.kprobe_multi
.addrs
) {
1009 count
= info
.kprobe_multi
.count
;
1011 addrs
= calloc(count
, sizeof(__u64
));
1013 p_err("mem alloc failed");
1017 info
.kprobe_multi
.addrs
= ptr_to_u64(addrs
);
1018 cookies
= calloc(count
, sizeof(__u64
));
1020 p_err("mem alloc failed");
1025 info
.kprobe_multi
.cookies
= ptr_to_u64(cookies
);
1029 if (info
.type
== BPF_LINK_TYPE_UPROBE_MULTI
&&
1030 !info
.uprobe_multi
.offsets
) {
1031 count
= info
.uprobe_multi
.count
;
1033 offsets
= calloc(count
, sizeof(__u64
));
1035 p_err("mem alloc failed");
1039 info
.uprobe_multi
.offsets
= ptr_to_u64(offsets
);
1040 ref_ctr_offsets
= calloc(count
, sizeof(__u64
));
1041 if (!ref_ctr_offsets
) {
1042 p_err("mem alloc failed");
1047 info
.uprobe_multi
.ref_ctr_offsets
= ptr_to_u64(ref_ctr_offsets
);
1048 cookies
= calloc(count
, sizeof(__u64
));
1050 p_err("mem alloc failed");
1051 free(ref_ctr_offsets
);
1056 info
.uprobe_multi
.cookies
= ptr_to_u64(cookies
);
1057 info
.uprobe_multi
.path
= ptr_to_u64(path_buf
);
1058 info
.uprobe_multi
.path_size
= sizeof(path_buf
);
1062 if (info
.type
== BPF_LINK_TYPE_PERF_EVENT
) {
1063 switch (info
.perf_event
.type
) {
1064 case BPF_PERF_EVENT_TRACEPOINT
:
1065 if (!info
.perf_event
.tracepoint
.tp_name
) {
1066 info
.perf_event
.tracepoint
.tp_name
= ptr_to_u64(&buf
);
1067 info
.perf_event
.tracepoint
.name_len
= sizeof(buf
);
1071 case BPF_PERF_EVENT_KPROBE
:
1072 case BPF_PERF_EVENT_KRETPROBE
:
1073 if (!info
.perf_event
.kprobe
.func_name
) {
1074 info
.perf_event
.kprobe
.func_name
= ptr_to_u64(&buf
);
1075 info
.perf_event
.kprobe
.name_len
= sizeof(buf
);
1079 case BPF_PERF_EVENT_UPROBE
:
1080 case BPF_PERF_EVENT_URETPROBE
:
1081 if (!info
.perf_event
.uprobe
.file_name
) {
1082 info
.perf_event
.uprobe
.file_name
= ptr_to_u64(&buf
);
1083 info
.perf_event
.uprobe
.name_len
= sizeof(buf
);
1093 show_link_close_json(fd
, &info
);
1095 show_link_close_plain(fd
, &info
);
1097 free(ref_ctr_offsets
);
1105 static int do_show(int argc
, char **argv
)
1111 link_table
= hashmap__new(hash_fn_for_key_as_id
,
1112 equal_fn_for_key_as_id
, NULL
);
1113 if (IS_ERR(link_table
)) {
1114 p_err("failed to create hashmap for pinned paths");
1117 build_pinned_obj_table(link_table
, BPF_OBJ_LINK
);
1119 build_obj_refs_table(&refs_table
, BPF_OBJ_LINK
);
1122 fd
= link_parse_fd(&argc
, &argv
);
1133 jsonw_start_array(json_wtr
);
1135 err
= bpf_link_get_next_id(id
, &id
);
1137 if (errno
== ENOENT
)
1139 p_err("can't get next link: %s%s", strerror(errno
),
1140 errno
== EINVAL
? " -- kernel too old?" : "");
1144 fd
= bpf_link_get_fd_by_id(id
);
1146 if (errno
== ENOENT
)
1148 p_err("can't get link by id (%u): %s",
1149 id
, strerror(errno
));
1153 err
= do_show_link(fd
);
1158 jsonw_end_array(json_wtr
);
1160 delete_obj_refs_table(refs_table
);
1163 delete_pinned_obj_table(link_table
);
1167 kernel_syms_destroy(&dd
);
1168 return errno
== ENOENT
? 0 : -1;
1171 static int do_pin(int argc
, char **argv
)
1175 err
= do_pin_any(argc
, argv
, link_parse_fd
);
1176 if (!err
&& json_output
)
1177 jsonw_null(json_wtr
);
1181 static int do_detach(int argc
, char **argv
)
1186 p_err("link specifier is invalid or missing\n");
1190 fd
= link_parse_fd(&argc
, &argv
);
1194 err
= bpf_link_detach(fd
);
1199 p_err("failed link detach: %s", strerror(-err
));
1204 jsonw_null(json_wtr
);
1209 static int do_help(int argc
, char **argv
)
1212 jsonw_null(json_wtr
);
1217 "Usage: %1$s %2$s { show | list } [LINK]\n"
1218 " %1$s %2$s pin LINK FILE\n"
1219 " %1$s %2$s detach LINK\n"
1222 " " HELP_SPEC_LINK
"\n"
1223 " " HELP_SPEC_OPTIONS
" |\n"
1224 " {-f|--bpffs} | {-n|--nomount} }\n"
1226 bin_name
, argv
[-2]);
1231 static const struct cmd cmds
[] = {
1232 { "show", do_show
},
1233 { "list", do_show
},
1234 { "help", do_help
},
1236 { "detach", do_detach
},
1240 int do_link(int argc
, char **argv
)
1242 return cmd_select(cmds
, argc
, argv
, do_help
);