bpftool: recognize BPF_PROG_TYPE_CGROUP_DEVICE programs
[linux/fpc-iii.git] / tools / bpf / bpftool / prog.c
blob099e21cf1b5c7e5f5287890815e9265815541166
1 /*
2 * Copyright (C) 2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 /* Author: Jakub Kicinski <kubakici@wp.pl> */
36 #include <errno.h>
37 #include <fcntl.h>
38 #include <stdarg.h>
39 #include <stdio.h>
40 #include <stdlib.h>
41 #include <string.h>
42 #include <time.h>
43 #include <unistd.h>
44 #include <sys/types.h>
45 #include <sys/stat.h>
47 #include <bpf.h>
48 #include <libbpf.h>
50 #include "main.h"
51 #include "disasm.h"
53 static const char * const prog_type_name[] = {
54 [BPF_PROG_TYPE_UNSPEC] = "unspec",
55 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
56 [BPF_PROG_TYPE_KPROBE] = "kprobe",
57 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
58 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
59 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
60 [BPF_PROG_TYPE_XDP] = "xdp",
61 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
62 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
63 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
64 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
65 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
66 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
67 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
68 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
69 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
72 static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
74 struct timespec real_time_ts, boot_time_ts;
75 time_t wallclock_secs;
76 struct tm load_tm;
78 buf[--size] = '\0';
80 if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
81 clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
82 perror("Can't read clocks");
83 snprintf(buf, size, "%llu", nsecs / 1000000000);
84 return;
87 wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
88 nsecs / 1000000000;
90 if (!localtime_r(&wallclock_secs, &load_tm)) {
91 snprintf(buf, size, "%llu", nsecs / 1000000000);
92 return;
95 strftime(buf, size, "%b %d/%H:%M", &load_tm);
98 static int prog_fd_by_tag(unsigned char *tag)
100 struct bpf_prog_info info = {};
101 __u32 len = sizeof(info);
102 unsigned int id = 0;
103 int err;
104 int fd;
106 while (true) {
107 err = bpf_prog_get_next_id(id, &id);
108 if (err) {
109 p_err("%s", strerror(errno));
110 return -1;
113 fd = bpf_prog_get_fd_by_id(id);
114 if (fd < 0) {
115 p_err("can't get prog by id (%u): %s",
116 id, strerror(errno));
117 return -1;
120 err = bpf_obj_get_info_by_fd(fd, &info, &len);
121 if (err) {
122 p_err("can't get prog info (%u): %s",
123 id, strerror(errno));
124 close(fd);
125 return -1;
128 if (!memcmp(tag, info.tag, BPF_TAG_SIZE))
129 return fd;
131 close(fd);
135 int prog_parse_fd(int *argc, char ***argv)
137 int fd;
139 if (is_prefix(**argv, "id")) {
140 unsigned int id;
141 char *endptr;
143 NEXT_ARGP();
145 id = strtoul(**argv, &endptr, 0);
146 if (*endptr) {
147 p_err("can't parse %s as ID", **argv);
148 return -1;
150 NEXT_ARGP();
152 fd = bpf_prog_get_fd_by_id(id);
153 if (fd < 0)
154 p_err("get by id (%u): %s", id, strerror(errno));
155 return fd;
156 } else if (is_prefix(**argv, "tag")) {
157 unsigned char tag[BPF_TAG_SIZE];
159 NEXT_ARGP();
161 if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
162 tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
163 != BPF_TAG_SIZE) {
164 p_err("can't parse tag");
165 return -1;
167 NEXT_ARGP();
169 return prog_fd_by_tag(tag);
170 } else if (is_prefix(**argv, "pinned")) {
171 char *path;
173 NEXT_ARGP();
175 path = **argv;
176 NEXT_ARGP();
178 return open_obj_pinned_any(path, BPF_OBJ_PROG);
181 p_err("expected 'id', 'tag' or 'pinned', got: '%s'?", **argv);
182 return -1;
185 static void show_prog_maps(int fd, u32 num_maps)
187 struct bpf_prog_info info = {};
188 __u32 len = sizeof(info);
189 __u32 map_ids[num_maps];
190 unsigned int i;
191 int err;
193 info.nr_map_ids = num_maps;
194 info.map_ids = ptr_to_u64(map_ids);
196 err = bpf_obj_get_info_by_fd(fd, &info, &len);
197 if (err || !info.nr_map_ids)
198 return;
200 if (json_output) {
201 jsonw_name(json_wtr, "map_ids");
202 jsonw_start_array(json_wtr);
203 for (i = 0; i < info.nr_map_ids; i++)
204 jsonw_uint(json_wtr, map_ids[i]);
205 jsonw_end_array(json_wtr);
206 } else {
207 printf(" map_ids ");
208 for (i = 0; i < info.nr_map_ids; i++)
209 printf("%u%s", map_ids[i],
210 i == info.nr_map_ids - 1 ? "" : ",");
214 static void print_prog_json(struct bpf_prog_info *info, int fd)
216 char *memlock;
218 jsonw_start_object(json_wtr);
219 jsonw_uint_field(json_wtr, "id", info->id);
220 if (info->type < ARRAY_SIZE(prog_type_name))
221 jsonw_string_field(json_wtr, "type",
222 prog_type_name[info->type]);
223 else
224 jsonw_uint_field(json_wtr, "type", info->type);
226 if (*info->name)
227 jsonw_string_field(json_wtr, "name", info->name);
229 jsonw_name(json_wtr, "tag");
230 jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
231 info->tag[0], info->tag[1], info->tag[2], info->tag[3],
232 info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
234 print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
236 if (info->load_time) {
237 char buf[32];
239 print_boot_time(info->load_time, buf, sizeof(buf));
241 /* Piggy back on load_time, since 0 uid is a valid one */
242 jsonw_string_field(json_wtr, "loaded_at", buf);
243 jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
246 jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
248 if (info->jited_prog_len) {
249 jsonw_bool_field(json_wtr, "jited", true);
250 jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
251 } else {
252 jsonw_bool_field(json_wtr, "jited", false);
255 memlock = get_fdinfo(fd, "memlock");
256 if (memlock)
257 jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
258 free(memlock);
260 if (info->nr_map_ids)
261 show_prog_maps(fd, info->nr_map_ids);
263 if (!hash_empty(prog_table.table)) {
264 struct pinned_obj *obj;
266 jsonw_name(json_wtr, "pinned");
267 jsonw_start_array(json_wtr);
268 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
269 if (obj->id == info->id)
270 jsonw_string(json_wtr, obj->path);
272 jsonw_end_array(json_wtr);
275 jsonw_end_object(json_wtr);
278 static void print_prog_plain(struct bpf_prog_info *info, int fd)
280 char *memlock;
282 printf("%u: ", info->id);
283 if (info->type < ARRAY_SIZE(prog_type_name))
284 printf("%s ", prog_type_name[info->type]);
285 else
286 printf("type %u ", info->type);
288 if (*info->name)
289 printf("name %s ", info->name);
291 printf("tag ");
292 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
293 print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
294 printf("\n");
296 if (info->load_time) {
297 char buf[32];
299 print_boot_time(info->load_time, buf, sizeof(buf));
301 /* Piggy back on load_time, since 0 uid is a valid one */
302 printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
305 printf("\txlated %uB", info->xlated_prog_len);
307 if (info->jited_prog_len)
308 printf(" jited %uB", info->jited_prog_len);
309 else
310 printf(" not jited");
312 memlock = get_fdinfo(fd, "memlock");
313 if (memlock)
314 printf(" memlock %sB", memlock);
315 free(memlock);
317 if (info->nr_map_ids)
318 show_prog_maps(fd, info->nr_map_ids);
320 if (!hash_empty(prog_table.table)) {
321 struct pinned_obj *obj;
323 printf("\n");
324 hash_for_each_possible(prog_table.table, obj, hash, info->id) {
325 if (obj->id == info->id)
326 printf("\tpinned %s\n", obj->path);
330 printf("\n");
333 static int show_prog(int fd)
335 struct bpf_prog_info info = {};
336 __u32 len = sizeof(info);
337 int err;
339 err = bpf_obj_get_info_by_fd(fd, &info, &len);
340 if (err) {
341 p_err("can't get prog info: %s", strerror(errno));
342 return -1;
345 if (json_output)
346 print_prog_json(&info, fd);
347 else
348 print_prog_plain(&info, fd);
350 return 0;
353 static int do_show(int argc, char **argv)
355 __u32 id = 0;
356 int err;
357 int fd;
359 if (show_pinned)
360 build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
362 if (argc == 2) {
363 fd = prog_parse_fd(&argc, &argv);
364 if (fd < 0)
365 return -1;
367 return show_prog(fd);
370 if (argc)
371 return BAD_ARG();
373 if (json_output)
374 jsonw_start_array(json_wtr);
375 while (true) {
376 err = bpf_prog_get_next_id(id, &id);
377 if (err) {
378 if (errno == ENOENT) {
379 err = 0;
380 break;
382 p_err("can't get next program: %s%s", strerror(errno),
383 errno == EINVAL ? " -- kernel too old?" : "");
384 err = -1;
385 break;
388 fd = bpf_prog_get_fd_by_id(id);
389 if (fd < 0) {
390 if (errno == ENOENT)
391 continue;
392 p_err("can't get prog by id (%u): %s",
393 id, strerror(errno));
394 err = -1;
395 break;
398 err = show_prog(fd);
399 close(fd);
400 if (err)
401 break;
404 if (json_output)
405 jsonw_end_array(json_wtr);
407 return err;
410 #define SYM_MAX_NAME 256
412 struct kernel_sym {
413 unsigned long address;
414 char name[SYM_MAX_NAME];
417 struct dump_data {
418 unsigned long address_call_base;
419 struct kernel_sym *sym_mapping;
420 __u32 sym_count;
421 char scratch_buff[SYM_MAX_NAME];
424 static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
426 return ((struct kernel_sym *)sym_a)->address -
427 ((struct kernel_sym *)sym_b)->address;
430 static void kernel_syms_load(struct dump_data *dd)
432 struct kernel_sym *sym;
433 char buff[256];
434 void *tmp, *address;
435 FILE *fp;
437 fp = fopen("/proc/kallsyms", "r");
438 if (!fp)
439 return;
441 while (!feof(fp)) {
442 if (!fgets(buff, sizeof(buff), fp))
443 break;
444 tmp = realloc(dd->sym_mapping,
445 (dd->sym_count + 1) *
446 sizeof(*dd->sym_mapping));
447 if (!tmp) {
448 out:
449 free(dd->sym_mapping);
450 dd->sym_mapping = NULL;
451 fclose(fp);
452 return;
454 dd->sym_mapping = tmp;
455 sym = &dd->sym_mapping[dd->sym_count];
456 if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
457 continue;
458 sym->address = (unsigned long)address;
459 if (!strcmp(sym->name, "__bpf_call_base")) {
460 dd->address_call_base = sym->address;
461 /* sysctl kernel.kptr_restrict was set */
462 if (!sym->address)
463 goto out;
465 if (sym->address)
466 dd->sym_count++;
469 fclose(fp);
471 qsort(dd->sym_mapping, dd->sym_count,
472 sizeof(*dd->sym_mapping), kernel_syms_cmp);
475 static void kernel_syms_destroy(struct dump_data *dd)
477 free(dd->sym_mapping);
480 static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
481 unsigned long key)
483 struct kernel_sym sym = {
484 .address = key,
487 return dd->sym_mapping ?
488 bsearch(&sym, dd->sym_mapping, dd->sym_count,
489 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
492 static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
494 va_list args;
496 va_start(args, fmt);
497 vprintf(fmt, args);
498 va_end(args);
501 static const char *print_call_pcrel(struct dump_data *dd,
502 struct kernel_sym *sym,
503 unsigned long address,
504 const struct bpf_insn *insn)
506 if (sym)
507 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
508 "%+d#%s", insn->off, sym->name);
509 else
510 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
511 "%+d#0x%lx", insn->off, address);
512 return dd->scratch_buff;
515 static const char *print_call_helper(struct dump_data *dd,
516 struct kernel_sym *sym,
517 unsigned long address)
519 if (sym)
520 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
521 "%s", sym->name);
522 else
523 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
524 "0x%lx", address);
525 return dd->scratch_buff;
528 static const char *print_call(void *private_data,
529 const struct bpf_insn *insn)
531 struct dump_data *dd = private_data;
532 unsigned long address = dd->address_call_base + insn->imm;
533 struct kernel_sym *sym;
535 sym = kernel_syms_search(dd, address);
536 if (insn->src_reg == BPF_PSEUDO_CALL)
537 return print_call_pcrel(dd, sym, address, insn);
538 else
539 return print_call_helper(dd, sym, address);
542 static const char *print_imm(void *private_data,
543 const struct bpf_insn *insn,
544 __u64 full_imm)
546 struct dump_data *dd = private_data;
548 if (insn->src_reg == BPF_PSEUDO_MAP_FD)
549 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
550 "map[id:%u]", insn->imm);
551 else
552 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
553 "0x%llx", (unsigned long long)full_imm);
554 return dd->scratch_buff;
557 static void dump_xlated_plain(struct dump_data *dd, void *buf,
558 unsigned int len, bool opcodes)
560 const struct bpf_insn_cbs cbs = {
561 .cb_print = print_insn,
562 .cb_call = print_call,
563 .cb_imm = print_imm,
564 .private_data = dd,
566 struct bpf_insn *insn = buf;
567 bool double_insn = false;
568 unsigned int i;
570 for (i = 0; i < len / sizeof(*insn); i++) {
571 if (double_insn) {
572 double_insn = false;
573 continue;
576 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
578 printf("% 4d: ", i);
579 print_bpf_insn(&cbs, NULL, insn + i, true);
581 if (opcodes) {
582 printf(" ");
583 fprint_hex(stdout, insn + i, 8, " ");
584 if (double_insn && i < len - 1) {
585 printf(" ");
586 fprint_hex(stdout, insn + i + 1, 8, " ");
588 printf("\n");
593 static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
595 unsigned int l = strlen(fmt);
596 char chomped_fmt[l];
597 va_list args;
599 va_start(args, fmt);
600 if (l > 0) {
601 strncpy(chomped_fmt, fmt, l - 1);
602 chomped_fmt[l - 1] = '\0';
604 jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
605 va_end(args);
608 static void dump_xlated_json(struct dump_data *dd, void *buf,
609 unsigned int len, bool opcodes)
611 const struct bpf_insn_cbs cbs = {
612 .cb_print = print_insn_json,
613 .cb_call = print_call,
614 .cb_imm = print_imm,
615 .private_data = dd,
617 struct bpf_insn *insn = buf;
618 bool double_insn = false;
619 unsigned int i;
621 jsonw_start_array(json_wtr);
622 for (i = 0; i < len / sizeof(*insn); i++) {
623 if (double_insn) {
624 double_insn = false;
625 continue;
627 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
629 jsonw_start_object(json_wtr);
630 jsonw_name(json_wtr, "disasm");
631 print_bpf_insn(&cbs, NULL, insn + i, true);
633 if (opcodes) {
634 jsonw_name(json_wtr, "opcodes");
635 jsonw_start_object(json_wtr);
637 jsonw_name(json_wtr, "code");
638 jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
640 jsonw_name(json_wtr, "src_reg");
641 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
643 jsonw_name(json_wtr, "dst_reg");
644 jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
646 jsonw_name(json_wtr, "off");
647 print_hex_data_json((uint8_t *)(&insn[i].off), 2);
649 jsonw_name(json_wtr, "imm");
650 if (double_insn && i < len - 1)
651 print_hex_data_json((uint8_t *)(&insn[i].imm),
652 12);
653 else
654 print_hex_data_json((uint8_t *)(&insn[i].imm),
656 jsonw_end_object(json_wtr);
658 jsonw_end_object(json_wtr);
660 jsonw_end_array(json_wtr);
663 static int do_dump(int argc, char **argv)
665 struct bpf_prog_info info = {};
666 struct dump_data dd = {};
667 __u32 len = sizeof(info);
668 unsigned int buf_size;
669 char *filepath = NULL;
670 bool opcodes = false;
671 unsigned char *buf;
672 __u32 *member_len;
673 __u64 *member_ptr;
674 ssize_t n;
675 int err;
676 int fd;
678 if (is_prefix(*argv, "jited")) {
679 member_len = &info.jited_prog_len;
680 member_ptr = &info.jited_prog_insns;
681 } else if (is_prefix(*argv, "xlated")) {
682 member_len = &info.xlated_prog_len;
683 member_ptr = &info.xlated_prog_insns;
684 } else {
685 p_err("expected 'xlated' or 'jited', got: %s", *argv);
686 return -1;
688 NEXT_ARG();
690 if (argc < 2)
691 usage();
693 fd = prog_parse_fd(&argc, &argv);
694 if (fd < 0)
695 return -1;
697 if (is_prefix(*argv, "file")) {
698 NEXT_ARG();
699 if (!argc) {
700 p_err("expected file path");
701 return -1;
704 filepath = *argv;
705 NEXT_ARG();
706 } else if (is_prefix(*argv, "opcodes")) {
707 opcodes = true;
708 NEXT_ARG();
711 if (argc) {
712 usage();
713 return -1;
716 err = bpf_obj_get_info_by_fd(fd, &info, &len);
717 if (err) {
718 p_err("can't get prog info: %s", strerror(errno));
719 return -1;
722 if (!*member_len) {
723 p_info("no instructions returned");
724 close(fd);
725 return 0;
728 buf_size = *member_len;
730 buf = malloc(buf_size);
731 if (!buf) {
732 p_err("mem alloc failed");
733 close(fd);
734 return -1;
737 memset(&info, 0, sizeof(info));
739 *member_ptr = ptr_to_u64(buf);
740 *member_len = buf_size;
742 err = bpf_obj_get_info_by_fd(fd, &info, &len);
743 close(fd);
744 if (err) {
745 p_err("can't get prog info: %s", strerror(errno));
746 goto err_free;
749 if (*member_len > buf_size) {
750 p_err("too many instructions returned");
751 goto err_free;
754 if ((member_len == &info.jited_prog_len &&
755 info.jited_prog_insns == 0) ||
756 (member_len == &info.xlated_prog_len &&
757 info.xlated_prog_insns == 0)) {
758 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
759 goto err_free;
762 if (filepath) {
763 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
764 if (fd < 0) {
765 p_err("can't open file %s: %s", filepath,
766 strerror(errno));
767 goto err_free;
770 n = write(fd, buf, *member_len);
771 close(fd);
772 if (n != *member_len) {
773 p_err("error writing output file: %s",
774 n < 0 ? strerror(errno) : "short write");
775 goto err_free;
777 } else {
778 if (member_len == &info.jited_prog_len) {
779 disasm_print_insn(buf, *member_len, opcodes);
780 } else {
781 kernel_syms_load(&dd);
782 if (json_output)
783 dump_xlated_json(&dd, buf, *member_len, opcodes);
784 else
785 dump_xlated_plain(&dd, buf, *member_len, opcodes);
786 kernel_syms_destroy(&dd);
790 free(buf);
791 return 0;
793 err_free:
794 free(buf);
795 return -1;
798 static int do_pin(int argc, char **argv)
800 int err;
802 err = do_pin_any(argc, argv, bpf_prog_get_fd_by_id);
803 if (!err && json_output)
804 jsonw_null(json_wtr);
805 return err;
808 static int do_load(int argc, char **argv)
810 struct bpf_object *obj;
811 int prog_fd;
813 if (argc != 2)
814 usage();
816 if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) {
817 p_err("failed to load program");
818 return -1;
821 if (do_pin_fd(prog_fd, argv[1])) {
822 p_err("failed to pin program");
823 return -1;
826 if (json_output)
827 jsonw_null(json_wtr);
829 return 0;
832 static int do_help(int argc, char **argv)
834 if (json_output) {
835 jsonw_null(json_wtr);
836 return 0;
839 fprintf(stderr,
840 "Usage: %s %s { show | list } [PROG]\n"
841 " %s %s dump xlated PROG [{ file FILE | opcodes }]\n"
842 " %s %s dump jited PROG [{ file FILE | opcodes }]\n"
843 " %s %s pin PROG FILE\n"
844 " %s %s load OBJ FILE\n"
845 " %s %s help\n"
846 "\n"
847 " " HELP_SPEC_PROGRAM "\n"
848 " " HELP_SPEC_OPTIONS "\n"
850 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
851 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
853 return 0;
856 static const struct cmd cmds[] = {
857 { "show", do_show },
858 { "list", do_show },
859 { "help", do_help },
860 { "dump", do_dump },
861 { "pin", do_pin },
862 { "load", do_load },
863 { 0 }
866 int do_prog(int argc, char **argv)
868 return cmd_select(cmds, argc, argv, do_help);