1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * BTF-to-C type converter.
6 * Copyright (c) 2019 Facebook
17 #include <linux/err.h>
18 #include <linux/btf.h>
19 #include <linux/kernel.h>
23 #include "libbpf_internal.h"
24 #include "str_error.h"
26 static const char PREFIXES
[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
27 static const size_t PREFIX_CNT
= sizeof(PREFIXES
) - 1;
29 static const char *pfx(int lvl
)
31 return lvl
>= PREFIX_CNT
? PREFIXES
: &PREFIXES
[PREFIX_CNT
- lvl
];
34 enum btf_dump_type_order_state
{
40 enum btf_dump_type_emit_state
{
46 /* per-type auxiliary state */
47 struct btf_dump_type_aux_state
{
48 /* topological sorting state */
49 enum btf_dump_type_order_state order_state
: 2;
50 /* emitting state used to determine the need for forward declaration */
51 enum btf_dump_type_emit_state emit_state
: 2;
52 /* whether forward declaration was already emitted */
54 /* whether unique non-duplicate name was already assigned */
55 __u8 name_resolved
: 1;
56 /* whether type is referenced from any other type */
60 /* indent string length; one indent string is added for each indent level */
61 #define BTF_DATA_INDENT_STR_LEN 32
64 * Common internal data for BTF type data dump operations.
66 struct btf_dump_data
{
67 const void *data_end
; /* end of valid data to show */
71 __u8 indent_lvl
; /* base indent level */
72 char indent_str
[BTF_DATA_INDENT_STR_LEN
];
73 /* below are used during iteration */
76 bool is_array_terminated
;
81 const struct btf
*btf
;
82 btf_dump_printf_fn_t printf_fn
;
89 /* per-type auxiliary state */
90 struct btf_dump_type_aux_state
*type_states
;
91 size_t type_states_cap
;
92 /* per-type optional cached unique name, must be freed, if present */
93 const char **cached_names
;
94 size_t cached_names_cap
;
96 /* topo-sorted list of dependent type definitions */
102 * stack of type declarations (e.g., chain of modifiers, arrays,
109 /* maps struct/union/enum name to a number of name occurrences */
110 struct hashmap
*type_names
;
112 * maps typedef identifiers and enum value names to a number of such
115 struct hashmap
*ident_names
;
117 * data for typed display; allocated if needed.
119 struct btf_dump_data
*typed_dump
;
122 static size_t str_hash_fn(long key
, void *ctx
)
124 return str_hash((void *)key
);
127 static bool str_equal_fn(long a
, long b
, void *ctx
)
129 return strcmp((void *)a
, (void *)b
) == 0;
132 static const char *btf_name_of(const struct btf_dump
*d
, __u32 name_off
)
134 return btf__name_by_offset(d
->btf
, name_off
);
137 static void btf_dump_printf(const struct btf_dump
*d
, const char *fmt
, ...)
142 d
->printf_fn(d
->cb_ctx
, fmt
, args
);
146 static int btf_dump_mark_referenced(struct btf_dump
*d
);
147 static int btf_dump_resize(struct btf_dump
*d
);
149 struct btf_dump
*btf_dump__new(const struct btf
*btf
,
150 btf_dump_printf_fn_t printf_fn
,
152 const struct btf_dump_opts
*opts
)
157 if (!OPTS_VALID(opts
, btf_dump_opts
))
158 return libbpf_err_ptr(-EINVAL
);
161 return libbpf_err_ptr(-EINVAL
);
163 d
= calloc(1, sizeof(struct btf_dump
));
165 return libbpf_err_ptr(-ENOMEM
);
168 d
->printf_fn
= printf_fn
;
170 d
->ptr_sz
= btf__pointer_size(btf
) ? : sizeof(void *);
172 d
->type_names
= hashmap__new(str_hash_fn
, str_equal_fn
, NULL
);
173 if (IS_ERR(d
->type_names
)) {
174 err
= PTR_ERR(d
->type_names
);
175 d
->type_names
= NULL
;
178 d
->ident_names
= hashmap__new(str_hash_fn
, str_equal_fn
, NULL
);
179 if (IS_ERR(d
->ident_names
)) {
180 err
= PTR_ERR(d
->ident_names
);
181 d
->ident_names
= NULL
;
185 err
= btf_dump_resize(d
);
192 return libbpf_err_ptr(err
);
195 static int btf_dump_resize(struct btf_dump
*d
)
197 int err
, last_id
= btf__type_cnt(d
->btf
) - 1;
199 if (last_id
<= d
->last_id
)
202 if (libbpf_ensure_mem((void **)&d
->type_states
, &d
->type_states_cap
,
203 sizeof(*d
->type_states
), last_id
+ 1))
205 if (libbpf_ensure_mem((void **)&d
->cached_names
, &d
->cached_names_cap
,
206 sizeof(*d
->cached_names
), last_id
+ 1))
209 if (d
->last_id
== 0) {
210 /* VOID is special */
211 d
->type_states
[0].order_state
= ORDERED
;
212 d
->type_states
[0].emit_state
= EMITTED
;
215 /* eagerly determine referenced types for anon enums */
216 err
= btf_dump_mark_referenced(d
);
220 d
->last_id
= last_id
;
224 static void btf_dump_free_names(struct hashmap
*map
)
227 struct hashmap_entry
*cur
;
229 hashmap__for_each_entry(map
, cur
, bkt
)
230 free((void *)cur
->pkey
);
235 void btf_dump__free(struct btf_dump
*d
)
239 if (IS_ERR_OR_NULL(d
))
242 free(d
->type_states
);
243 if (d
->cached_names
) {
244 /* any set cached name is owned by us and should be freed */
245 for (i
= 0; i
<= d
->last_id
; i
++) {
246 if (d
->cached_names
[i
])
247 free((void *)d
->cached_names
[i
]);
250 free(d
->cached_names
);
253 btf_dump_free_names(d
->type_names
);
254 btf_dump_free_names(d
->ident_names
);
259 static int btf_dump_order_type(struct btf_dump
*d
, __u32 id
, bool through_ptr
);
260 static void btf_dump_emit_type(struct btf_dump
*d
, __u32 id
, __u32 cont_id
);
263 * Dump BTF type in a compilable C syntax, including all the necessary
264 * dependent types, necessary for compilation. If some of the dependent types
265 * were already emitted as part of previous btf_dump__dump_type() invocation
266 * for another type, they won't be emitted again. This API allows callers to
267 * filter out BTF types according to user-defined criterias and emitted only
268 * minimal subset of types, necessary to compile everything. Full struct/union
269 * definitions will still be emitted, even if the only usage is through
270 * pointer and could be satisfied with just a forward declaration.
272 * Dumping is done in two high-level passes:
273 * 1. Topologically sort type definitions to satisfy C rules of compilation.
274 * 2. Emit type definitions in C syntax.
276 * Returns 0 on success; <0, otherwise.
278 int btf_dump__dump_type(struct btf_dump
*d
, __u32 id
)
282 if (id
>= btf__type_cnt(d
->btf
))
283 return libbpf_err(-EINVAL
);
285 err
= btf_dump_resize(d
);
287 return libbpf_err(err
);
289 d
->emit_queue_cnt
= 0;
290 err
= btf_dump_order_type(d
, id
, false);
292 return libbpf_err(err
);
294 for (i
= 0; i
< d
->emit_queue_cnt
; i
++)
295 btf_dump_emit_type(d
, d
->emit_queue
[i
], 0 /*top-level*/);
301 * Mark all types that are referenced from any other type. This is used to
302 * determine top-level anonymous enums that need to be emitted as an
303 * independent type declarations.
304 * Anonymous enums come in two flavors: either embedded in a struct's field
305 * definition, in which case they have to be declared inline as part of field
306 * type declaration; or as a top-level anonymous enum, typically used for
307 * declaring global constants. It's impossible to distinguish between two
308 * without knowing whether given enum type was referenced from other type:
309 * top-level anonymous enum won't be referenced by anything, while embedded
312 static int btf_dump_mark_referenced(struct btf_dump
*d
)
314 int i
, j
, n
= btf__type_cnt(d
->btf
);
315 const struct btf_type
*t
;
318 for (i
= d
->last_id
+ 1; i
< n
; i
++) {
319 t
= btf__type_by_id(d
->btf
, i
);
322 switch (btf_kind(t
)) {
325 case BTF_KIND_ENUM64
:
330 case BTF_KIND_VOLATILE
:
332 case BTF_KIND_RESTRICT
:
334 case BTF_KIND_TYPEDEF
:
337 case BTF_KIND_DECL_TAG
:
338 case BTF_KIND_TYPE_TAG
:
339 d
->type_states
[t
->type
].referenced
= 1;
342 case BTF_KIND_ARRAY
: {
343 const struct btf_array
*a
= btf_array(t
);
345 d
->type_states
[a
->index_type
].referenced
= 1;
346 d
->type_states
[a
->type
].referenced
= 1;
349 case BTF_KIND_STRUCT
:
350 case BTF_KIND_UNION
: {
351 const struct btf_member
*m
= btf_members(t
);
353 for (j
= 0; j
< vlen
; j
++, m
++)
354 d
->type_states
[m
->type
].referenced
= 1;
357 case BTF_KIND_FUNC_PROTO
: {
358 const struct btf_param
*p
= btf_params(t
);
360 for (j
= 0; j
< vlen
; j
++, p
++)
361 d
->type_states
[p
->type
].referenced
= 1;
364 case BTF_KIND_DATASEC
: {
365 const struct btf_var_secinfo
*v
= btf_var_secinfos(t
);
367 for (j
= 0; j
< vlen
; j
++, v
++)
368 d
->type_states
[v
->type
].referenced
= 1;
378 static int btf_dump_add_emit_queue_id(struct btf_dump
*d
, __u32 id
)
383 if (d
->emit_queue_cnt
>= d
->emit_queue_cap
) {
384 new_cap
= max(16, d
->emit_queue_cap
* 3 / 2);
385 new_queue
= libbpf_reallocarray(d
->emit_queue
, new_cap
, sizeof(new_queue
[0]));
388 d
->emit_queue
= new_queue
;
389 d
->emit_queue_cap
= new_cap
;
392 d
->emit_queue
[d
->emit_queue_cnt
++] = id
;
397 * Determine order of emitting dependent types and specified type to satisfy
398 * C compilation rules. This is done through topological sorting with an
399 * additional complication which comes from C rules. The main idea for C is
400 * that if some type is "embedded" into a struct/union, it's size needs to be
401 * known at the time of definition of containing type. E.g., for:
404 * struct B { struct A x; }
406 * struct A *HAS* to be defined before struct B, because it's "embedded",
407 * i.e., it is part of struct B layout. But in the following case:
410 * struct B { struct A *x; }
413 * it's enough to just have a forward declaration of struct A at the time of
414 * struct B definition, as struct B has a pointer to struct A, so the size of
415 * field x is known without knowing struct A size: it's sizeof(void *).
417 * Unfortunately, there are some trickier cases we need to handle, e.g.:
419 * struct A {}; // if this was forward-declaration: compilation error
421 * struct { // anonymous struct
426 * In this case, struct B's field x is a pointer, so it's size is known
427 * regardless of the size of (anonymous) struct it points to. But because this
428 * struct is anonymous and thus defined inline inside struct B, *and* it
429 * embeds struct A, compiler requires full definition of struct A to be known
430 * before struct B can be defined. This creates a transitive dependency
431 * between struct A and struct B. If struct A was forward-declared before
432 * struct B definition and fully defined after struct B definition, that would
433 * trigger compilation error.
435 * All this means that while we are doing topological sorting on BTF type
436 * graph, we need to determine relationships between different types (graph
438 * - weak link (relationship) between X and Y, if Y *CAN* be
439 * forward-declared at the point of X definition;
440 * - strong link, if Y *HAS* to be fully-defined before X can be defined.
442 * The rule is as follows. Given a chain of BTF types from X to Y, if there is
443 * BTF_KIND_PTR type in the chain and at least one non-anonymous type
444 * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
445 * Weak/strong relationship is determined recursively during DFS traversal and
446 * is returned as a result from btf_dump_order_type().
448 * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
449 * but it is not guaranteeing that no extraneous forward declarations will be
452 * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
453 * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
454 * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
455 * entire graph path, so depending where from one came to that BTF type, it
456 * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
457 * once they are processed, there is no need to do it again, so they are
458 * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
459 * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
460 * in any case, once those are processed, no need to do it again, as the
461 * result won't change.
464 * - 1, if type is part of strong link (so there is strong topological
465 * ordering requirements);
466 * - 0, if type is part of weak link (so can be satisfied through forward
468 * - <0, on error (e.g., unsatisfiable type loop detected).
470 static int btf_dump_order_type(struct btf_dump
*d
, __u32 id
, bool through_ptr
)
473 * Order state is used to detect strong link cycles, but only for BTF
474 * kinds that are or could be an independent definition (i.e.,
475 * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
476 * func_protos, modifiers are just means to get to these definitions.
477 * Int/void don't need definitions, they are assumed to be always
478 * properly defined. We also ignore datasec, var, and funcs for now.
479 * So for all non-defining kinds, we never even set ordering state,
480 * for defining kinds we set ORDERING and subsequently ORDERED if it
481 * forms a strong link.
483 struct btf_dump_type_aux_state
*tstate
= &d
->type_states
[id
];
484 const struct btf_type
*t
;
488 /* return true, letting typedefs know that it's ok to be emitted */
489 if (tstate
->order_state
== ORDERED
)
492 t
= btf__type_by_id(d
->btf
, id
);
494 if (tstate
->order_state
== ORDERING
) {
495 /* type loop, but resolvable through fwd declaration */
496 if (btf_is_composite(t
) && through_ptr
&& t
->name_off
!= 0)
498 pr_warn("unsatisfiable type cycle, id:[%u]\n", id
);
502 switch (btf_kind(t
)) {
505 tstate
->order_state
= ORDERED
;
509 err
= btf_dump_order_type(d
, t
->type
, true);
510 tstate
->order_state
= ORDERED
;
514 return btf_dump_order_type(d
, btf_array(t
)->type
, false);
516 case BTF_KIND_STRUCT
:
517 case BTF_KIND_UNION
: {
518 const struct btf_member
*m
= btf_members(t
);
520 * struct/union is part of strong link, only if it's embedded
521 * (so no ptr in a path) or it's anonymous (so has to be
522 * defined inline, even if declared through ptr)
524 if (through_ptr
&& t
->name_off
!= 0)
527 tstate
->order_state
= ORDERING
;
530 for (i
= 0; i
< vlen
; i
++, m
++) {
531 err
= btf_dump_order_type(d
, m
->type
, false);
536 if (t
->name_off
!= 0) {
537 err
= btf_dump_add_emit_queue_id(d
, id
);
542 tstate
->order_state
= ORDERED
;
546 case BTF_KIND_ENUM64
:
549 * non-anonymous or non-referenced enums are top-level
550 * declarations and should be emitted. Same logic can be
551 * applied to FWDs, it won't hurt anyways.
553 if (t
->name_off
!= 0 || !tstate
->referenced
) {
554 err
= btf_dump_add_emit_queue_id(d
, id
);
558 tstate
->order_state
= ORDERED
;
561 case BTF_KIND_TYPEDEF
: {
564 is_strong
= btf_dump_order_type(d
, t
->type
, through_ptr
);
568 /* typedef is similar to struct/union w.r.t. fwd-decls */
569 if (through_ptr
&& !is_strong
)
572 /* typedef is always a named definition */
573 err
= btf_dump_add_emit_queue_id(d
, id
);
577 d
->type_states
[id
].order_state
= ORDERED
;
580 case BTF_KIND_VOLATILE
:
582 case BTF_KIND_RESTRICT
:
583 case BTF_KIND_TYPE_TAG
:
584 return btf_dump_order_type(d
, t
->type
, through_ptr
);
586 case BTF_KIND_FUNC_PROTO
: {
587 const struct btf_param
*p
= btf_params(t
);
590 err
= btf_dump_order_type(d
, t
->type
, through_ptr
);
596 for (i
= 0; i
< vlen
; i
++, p
++) {
597 err
= btf_dump_order_type(d
, p
->type
, through_ptr
);
607 case BTF_KIND_DATASEC
:
608 case BTF_KIND_DECL_TAG
:
609 d
->type_states
[id
].order_state
= ORDERED
;
617 static void btf_dump_emit_missing_aliases(struct btf_dump
*d
, __u32 id
,
618 const struct btf_type
*t
);
620 static void btf_dump_emit_struct_fwd(struct btf_dump
*d
, __u32 id
,
621 const struct btf_type
*t
);
622 static void btf_dump_emit_struct_def(struct btf_dump
*d
, __u32 id
,
623 const struct btf_type
*t
, int lvl
);
625 static void btf_dump_emit_enum_fwd(struct btf_dump
*d
, __u32 id
,
626 const struct btf_type
*t
);
627 static void btf_dump_emit_enum_def(struct btf_dump
*d
, __u32 id
,
628 const struct btf_type
*t
, int lvl
);
630 static void btf_dump_emit_fwd_def(struct btf_dump
*d
, __u32 id
,
631 const struct btf_type
*t
);
633 static void btf_dump_emit_typedef_def(struct btf_dump
*d
, __u32 id
,
634 const struct btf_type
*t
, int lvl
);
636 /* a local view into a shared stack */
642 static void btf_dump_emit_type_decl(struct btf_dump
*d
, __u32 id
,
643 const char *fname
, int lvl
);
644 static void btf_dump_emit_type_chain(struct btf_dump
*d
,
645 struct id_stack
*decl_stack
,
646 const char *fname
, int lvl
);
648 static const char *btf_dump_type_name(struct btf_dump
*d
, __u32 id
);
649 static const char *btf_dump_ident_name(struct btf_dump
*d
, __u32 id
);
650 static size_t btf_dump_name_dups(struct btf_dump
*d
, struct hashmap
*name_map
,
651 const char *orig_name
);
653 static bool btf_dump_is_blacklisted(struct btf_dump
*d
, __u32 id
)
655 const struct btf_type
*t
= btf__type_by_id(d
->btf
, id
);
657 /* __builtin_va_list is a compiler built-in, which causes compilation
658 * errors, when compiling w/ different compiler, then used to compile
659 * original code (e.g., GCC to compile kernel, Clang to use generated
660 * C header from BTF). As it is built-in, it should be already defined
661 * properly internally in compiler.
663 if (t
->name_off
== 0)
665 return strcmp(btf_name_of(d
, t
->name_off
), "__builtin_va_list") == 0;
669 * Emit C-syntax definitions of types from chains of BTF types.
671 * High-level handling of determining necessary forward declarations are handled
672 * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
673 * declarations/definitions in C syntax are handled by a combo of
674 * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
675 * corresponding btf_dump_emit_*_{def,fwd}() functions.
677 * We also keep track of "containing struct/union type ID" to determine when
678 * we reference it from inside and thus can avoid emitting unnecessary forward
681 * This algorithm is designed in such a way, that even if some error occurs
682 * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
683 * that doesn't comply to C rules completely), algorithm will try to proceed
684 * and produce as much meaningful output as possible.
686 static void btf_dump_emit_type(struct btf_dump
*d
, __u32 id
, __u32 cont_id
)
688 struct btf_dump_type_aux_state
*tstate
= &d
->type_states
[id
];
689 bool top_level_def
= cont_id
== 0;
690 const struct btf_type
*t
;
693 if (tstate
->emit_state
== EMITTED
)
696 t
= btf__type_by_id(d
->btf
, id
);
699 if (tstate
->emit_state
== EMITTING
) {
700 if (tstate
->fwd_emitted
)
704 case BTF_KIND_STRUCT
:
707 * if we are referencing a struct/union that we are
708 * part of - then no need for fwd declaration
712 if (t
->name_off
== 0) {
713 pr_warn("anonymous struct/union loop, id:[%u]\n",
717 btf_dump_emit_struct_fwd(d
, id
, t
);
718 btf_dump_printf(d
, ";\n\n");
719 tstate
->fwd_emitted
= 1;
721 case BTF_KIND_TYPEDEF
:
723 * for typedef fwd_emitted means typedef definition
724 * was emitted, but it can be used only for "weak"
725 * references through pointer only, not for embedding
727 if (!btf_dump_is_blacklisted(d
, id
)) {
728 btf_dump_emit_typedef_def(d
, id
, t
, 0);
729 btf_dump_printf(d
, ";\n\n");
731 tstate
->fwd_emitted
= 1;
742 /* Emit type alias definitions if necessary */
743 btf_dump_emit_missing_aliases(d
, id
, t
);
745 tstate
->emit_state
= EMITTED
;
748 case BTF_KIND_ENUM64
:
750 btf_dump_emit_enum_def(d
, id
, t
, 0);
751 btf_dump_printf(d
, ";\n\n");
753 tstate
->emit_state
= EMITTED
;
756 case BTF_KIND_VOLATILE
:
758 case BTF_KIND_RESTRICT
:
759 case BTF_KIND_TYPE_TAG
:
760 btf_dump_emit_type(d
, t
->type
, cont_id
);
763 btf_dump_emit_type(d
, btf_array(t
)->type
, cont_id
);
766 btf_dump_emit_fwd_def(d
, id
, t
);
767 btf_dump_printf(d
, ";\n\n");
768 tstate
->emit_state
= EMITTED
;
770 case BTF_KIND_TYPEDEF
:
771 tstate
->emit_state
= EMITTING
;
772 btf_dump_emit_type(d
, t
->type
, id
);
774 * typedef can server as both definition and forward
775 * declaration; at this stage someone depends on
776 * typedef as a forward declaration (refers to it
777 * through pointer), so unless we already did it,
778 * emit typedef as a forward declaration
780 if (!tstate
->fwd_emitted
&& !btf_dump_is_blacklisted(d
, id
)) {
781 btf_dump_emit_typedef_def(d
, id
, t
, 0);
782 btf_dump_printf(d
, ";\n\n");
784 tstate
->emit_state
= EMITTED
;
786 case BTF_KIND_STRUCT
:
788 tstate
->emit_state
= EMITTING
;
789 /* if it's a top-level struct/union definition or struct/union
790 * is anonymous, then in C we'll be emitting all fields and
791 * their types (as opposed to just `struct X`), so we need to
792 * make sure that all types, referenced from struct/union
793 * members have necessary forward-declarations, where
796 if (top_level_def
|| t
->name_off
== 0) {
797 const struct btf_member
*m
= btf_members(t
);
798 __u16 vlen
= btf_vlen(t
);
801 new_cont_id
= t
->name_off
== 0 ? cont_id
: id
;
802 for (i
= 0; i
< vlen
; i
++, m
++)
803 btf_dump_emit_type(d
, m
->type
, new_cont_id
);
804 } else if (!tstate
->fwd_emitted
&& id
!= cont_id
) {
805 btf_dump_emit_struct_fwd(d
, id
, t
);
806 btf_dump_printf(d
, ";\n\n");
807 tstate
->fwd_emitted
= 1;
811 btf_dump_emit_struct_def(d
, id
, t
, 0);
812 btf_dump_printf(d
, ";\n\n");
813 tstate
->emit_state
= EMITTED
;
815 tstate
->emit_state
= NOT_EMITTED
;
818 case BTF_KIND_FUNC_PROTO
: {
819 const struct btf_param
*p
= btf_params(t
);
820 __u16 n
= btf_vlen(t
);
823 btf_dump_emit_type(d
, t
->type
, cont_id
);
824 for (i
= 0; i
< n
; i
++, p
++)
825 btf_dump_emit_type(d
, p
->type
, cont_id
);
834 static bool btf_is_struct_packed(const struct btf
*btf
, __u32 id
,
835 const struct btf_type
*t
)
837 const struct btf_member
*m
;
838 int max_align
= 1, align
, i
, bit_sz
;
843 /* all non-bitfield fields have to be naturally aligned */
844 for (i
= 0; i
< vlen
; i
++, m
++) {
845 align
= btf__align_of(btf
, m
->type
);
846 bit_sz
= btf_member_bitfield_size(t
, i
);
847 if (align
&& bit_sz
== 0 && m
->offset
% (8 * align
) != 0)
849 max_align
= max(align
, max_align
);
851 /* size of a non-packed struct has to be a multiple of its alignment */
852 if (t
->size
% max_align
!= 0)
855 * if original struct was marked as packed, but its layout is
856 * naturally aligned, we'll detect that it's not packed
861 static void btf_dump_emit_bit_padding(const struct btf_dump
*d
,
862 int cur_off
, int next_off
, int next_align
,
863 bool in_bitfield
, int lvl
)
869 {"long", d
->ptr_sz
* 8}, {"int", 32}, {"short", 16}, {"char", 8}
871 int new_off
= 0, pad_bits
= 0, bits
, i
;
872 const char *pad_type
= NULL
;
874 if (cur_off
>= next_off
)
877 /* For filling out padding we want to take advantage of
878 * natural alignment rules to minimize unnecessary explicit
879 * padding. First, we find the largest type (among long, int,
880 * short, or char) that can be used to force naturally aligned
881 * boundary. Once determined, we'll use such type to fill in
882 * the remaining padding gap. In some cases we can rely on
883 * compiler filling some gaps, but sometimes we need to force
884 * alignment to close natural alignment with markers like
885 * `long: 0` (this is always the case for bitfields). Note
886 * that even if struct itself has, let's say 4-byte alignment
887 * (i.e., it only uses up to int-aligned types), using `long:
888 * X;` explicit padding doesn't actually change struct's
889 * overall alignment requirements, but compiler does take into
890 * account that type's (long, in this example) natural
891 * alignment requirements when adding implicit padding. We use
892 * this fact heavily and don't worry about ruining correct
893 * struct alignment requirement.
895 for (i
= 0; i
< ARRAY_SIZE(pads
); i
++) {
896 pad_bits
= pads
[i
].bits
;
897 pad_type
= pads
[i
].name
;
899 new_off
= roundup(cur_off
, pad_bits
);
900 if (new_off
<= next_off
)
904 if (new_off
> cur_off
&& new_off
<= next_off
) {
905 /* We need explicit `<type>: 0` aligning mark if next
906 * field is right on alignment offset and its
907 * alignment requirement is less strict than <type>'s
908 * alignment (so compiler won't naturally align to the
909 * offset we expect), or if subsequent `<type>: X`,
910 * will actually completely fit in the remaining hole,
911 * making compiler basically ignore `<type>: X`
915 (new_off
== next_off
&& roundup(cur_off
, next_align
* 8) != new_off
) ||
916 (new_off
!= next_off
&& next_off
- new_off
<= new_off
- cur_off
))
917 /* but for bitfields we'll emit explicit bit count */
918 btf_dump_printf(d
, "\n%s%s: %d;", pfx(lvl
), pad_type
,
919 in_bitfield
? new_off
- cur_off
: 0);
923 /* Now we know we start at naturally aligned offset for a chosen
924 * padding type (long, int, short, or char), and so the rest is just
925 * a straightforward filling of remaining padding gap with full
926 * `<type>: sizeof(<type>);` markers, except for the last one, which
927 * might need smaller than sizeof(<type>) padding.
929 while (cur_off
!= next_off
) {
930 bits
= min(next_off
- cur_off
, pad_bits
);
931 if (bits
== pad_bits
) {
932 btf_dump_printf(d
, "\n%s%s: %d;", pfx(lvl
), pad_type
, pad_bits
);
936 /* For the remainder padding that doesn't cover entire
937 * pad_type bit length, we pick the smallest necessary type.
938 * This is pure aesthetics, we could have just used `long`,
939 * but having smallest necessary one communicates better the
940 * scale of the padding gap.
942 for (i
= ARRAY_SIZE(pads
) - 1; i
>= 0; i
--) {
943 pad_type
= pads
[i
].name
;
944 pad_bits
= pads
[i
].bits
;
948 btf_dump_printf(d
, "\n%s%s: %d;", pfx(lvl
), pad_type
, bits
);
955 static void btf_dump_emit_struct_fwd(struct btf_dump
*d
, __u32 id
,
956 const struct btf_type
*t
)
958 btf_dump_printf(d
, "%s%s%s",
959 btf_is_struct(t
) ? "struct" : "union",
960 t
->name_off
? " " : "",
961 btf_dump_type_name(d
, id
));
964 static void btf_dump_emit_struct_def(struct btf_dump
*d
,
966 const struct btf_type
*t
,
969 const struct btf_member
*m
= btf_members(t
);
970 bool is_struct
= btf_is_struct(t
);
971 bool packed
, prev_bitfield
= false;
972 int align
, i
, off
= 0;
973 __u16 vlen
= btf_vlen(t
);
975 align
= btf__align_of(d
->btf
, id
);
976 packed
= is_struct
? btf_is_struct_packed(d
->btf
, id
, t
) : 0;
978 btf_dump_printf(d
, "%s%s%s {",
979 is_struct
? "struct" : "union",
980 t
->name_off
? " " : "",
981 btf_dump_type_name(d
, id
));
983 for (i
= 0; i
< vlen
; i
++, m
++) {
985 int m_off
, m_sz
, m_align
;
988 fname
= btf_name_of(d
, m
->name_off
);
989 m_sz
= btf_member_bitfield_size(t
, i
);
990 m_off
= btf_member_bit_offset(t
, i
);
991 m_align
= packed
? 1 : btf__align_of(d
->btf
, m
->type
);
993 in_bitfield
= prev_bitfield
&& m_sz
!= 0;
995 btf_dump_emit_bit_padding(d
, off
, m_off
, m_align
, in_bitfield
, lvl
+ 1);
996 btf_dump_printf(d
, "\n%s", pfx(lvl
+ 1));
997 btf_dump_emit_type_decl(d
, m
->type
, fname
, lvl
+ 1);
1000 btf_dump_printf(d
, ": %d", m_sz
);
1002 prev_bitfield
= true;
1004 m_sz
= max((__s64
)0, btf__resolve_size(d
->btf
, m
->type
));
1005 off
= m_off
+ m_sz
* 8;
1006 prev_bitfield
= false;
1009 btf_dump_printf(d
, ";");
1012 /* pad at the end, if necessary */
1014 btf_dump_emit_bit_padding(d
, off
, t
->size
* 8, align
, false, lvl
+ 1);
1017 * Keep `struct empty {}` on a single line,
1018 * only print newline when there are regular or padding fields.
1020 if (vlen
|| t
->size
) {
1021 btf_dump_printf(d
, "\n");
1022 btf_dump_printf(d
, "%s}", pfx(lvl
));
1024 btf_dump_printf(d
, "}");
1027 btf_dump_printf(d
, " __attribute__((packed))");
1030 static const char *missing_base_types
[][2] = {
1032 * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
1033 * SIMD intrinsics. Alias them to standard base types.
1035 { "__Poly8_t", "unsigned char" },
1036 { "__Poly16_t", "unsigned short" },
1037 { "__Poly64_t", "unsigned long long" },
1038 { "__Poly128_t", "unsigned __int128" },
1041 static void btf_dump_emit_missing_aliases(struct btf_dump
*d
, __u32 id
,
1042 const struct btf_type
*t
)
1044 const char *name
= btf_dump_type_name(d
, id
);
1047 for (i
= 0; i
< ARRAY_SIZE(missing_base_types
); i
++) {
1048 if (strcmp(name
, missing_base_types
[i
][0]) == 0) {
1049 btf_dump_printf(d
, "typedef %s %s;\n\n",
1050 missing_base_types
[i
][1], name
);
1056 static void btf_dump_emit_enum_fwd(struct btf_dump
*d
, __u32 id
,
1057 const struct btf_type
*t
)
1059 btf_dump_printf(d
, "enum %s", btf_dump_type_name(d
, id
));
1062 static void btf_dump_emit_enum32_val(struct btf_dump
*d
,
1063 const struct btf_type
*t
,
1064 int lvl
, __u16 vlen
)
1066 const struct btf_enum
*v
= btf_enum(t
);
1067 bool is_signed
= btf_kflag(t
);
1068 const char *fmt_str
;
1073 for (i
= 0; i
< vlen
; i
++, v
++) {
1074 name
= btf_name_of(d
, v
->name_off
);
1075 /* enumerators share namespace with typedef idents */
1076 dup_cnt
= btf_dump_name_dups(d
, d
->ident_names
, name
);
1078 fmt_str
= is_signed
? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
1079 btf_dump_printf(d
, fmt_str
, pfx(lvl
+ 1), name
, dup_cnt
, v
->val
);
1081 fmt_str
= is_signed
? "\n%s%s = %d," : "\n%s%s = %u,";
1082 btf_dump_printf(d
, fmt_str
, pfx(lvl
+ 1), name
, v
->val
);
1087 static void btf_dump_emit_enum64_val(struct btf_dump
*d
,
1088 const struct btf_type
*t
,
1089 int lvl
, __u16 vlen
)
1091 const struct btf_enum64
*v
= btf_enum64(t
);
1092 bool is_signed
= btf_kflag(t
);
1093 const char *fmt_str
;
1099 for (i
= 0; i
< vlen
; i
++, v
++) {
1100 name
= btf_name_of(d
, v
->name_off
);
1101 dup_cnt
= btf_dump_name_dups(d
, d
->ident_names
, name
);
1102 val
= btf_enum64_value(v
);
1104 fmt_str
= is_signed
? "\n%s%s___%zd = %lldLL,"
1105 : "\n%s%s___%zd = %lluULL,";
1106 btf_dump_printf(d
, fmt_str
,
1107 pfx(lvl
+ 1), name
, dup_cnt
,
1108 (unsigned long long)val
);
1110 fmt_str
= is_signed
? "\n%s%s = %lldLL,"
1111 : "\n%s%s = %lluULL,";
1112 btf_dump_printf(d
, fmt_str
,
1114 (unsigned long long)val
);
1118 static void btf_dump_emit_enum_def(struct btf_dump
*d
, __u32 id
,
1119 const struct btf_type
*t
,
1122 __u16 vlen
= btf_vlen(t
);
1124 btf_dump_printf(d
, "enum%s%s",
1125 t
->name_off
? " " : "",
1126 btf_dump_type_name(d
, id
));
1131 btf_dump_printf(d
, " {");
1133 btf_dump_emit_enum32_val(d
, t
, lvl
, vlen
);
1135 btf_dump_emit_enum64_val(d
, t
, lvl
, vlen
);
1136 btf_dump_printf(d
, "\n%s}", pfx(lvl
));
1138 /* special case enums with special sizes */
1140 /* one-byte enums can be forced with mode(byte) attribute */
1141 btf_dump_printf(d
, " __attribute__((mode(byte)))");
1142 } else if (t
->size
== 8 && d
->ptr_sz
== 8) {
1143 /* enum can be 8-byte sized if one of the enumerator values
1144 * doesn't fit in 32-bit integer, or by adding mode(word)
1145 * attribute (but probably only on 64-bit architectures); do
1146 * our best here to try to satisfy the contract without adding
1147 * unnecessary attributes
1149 bool needs_word_mode
;
1151 if (btf_is_enum(t
)) {
1152 /* enum can't represent 64-bit values, so we need word mode */
1153 needs_word_mode
= true;
1155 /* enum64 needs mode(word) if none of its values has
1156 * non-zero upper 32-bits (which means that all values
1157 * fit in 32-bit integers and won't cause compiler to
1158 * bump enum to be 64-bit naturally
1162 needs_word_mode
= true;
1163 for (i
= 0; i
< vlen
; i
++) {
1164 if (btf_enum64(t
)[i
].val_hi32
!= 0) {
1165 needs_word_mode
= false;
1170 if (needs_word_mode
)
1171 btf_dump_printf(d
, " __attribute__((mode(word)))");
1176 static void btf_dump_emit_fwd_def(struct btf_dump
*d
, __u32 id
,
1177 const struct btf_type
*t
)
1179 const char *name
= btf_dump_type_name(d
, id
);
1182 btf_dump_printf(d
, "union %s", name
);
1184 btf_dump_printf(d
, "struct %s", name
);
1187 static void btf_dump_emit_typedef_def(struct btf_dump
*d
, __u32 id
,
1188 const struct btf_type
*t
, int lvl
)
1190 const char *name
= btf_dump_ident_name(d
, id
);
1193 * Old GCC versions are emitting invalid typedef for __gnuc_va_list
1194 * pointing to VOID. This generates warnings from btf_dump() and
1195 * results in uncompilable header file, so we are fixing it up here
1196 * with valid typedef into __builtin_va_list.
1198 if (t
->type
== 0 && strcmp(name
, "__gnuc_va_list") == 0) {
1199 btf_dump_printf(d
, "typedef __builtin_va_list __gnuc_va_list");
1203 btf_dump_printf(d
, "typedef ");
1204 btf_dump_emit_type_decl(d
, t
->type
, name
, lvl
);
1207 static int btf_dump_push_decl_stack_id(struct btf_dump
*d
, __u32 id
)
1212 if (d
->decl_stack_cnt
>= d
->decl_stack_cap
) {
1213 new_cap
= max(16, d
->decl_stack_cap
* 3 / 2);
1214 new_stack
= libbpf_reallocarray(d
->decl_stack
, new_cap
, sizeof(new_stack
[0]));
1217 d
->decl_stack
= new_stack
;
1218 d
->decl_stack_cap
= new_cap
;
1221 d
->decl_stack
[d
->decl_stack_cnt
++] = id
;
1227 * Emit type declaration (e.g., field type declaration in a struct or argument
1228 * declaration in function prototype) in correct C syntax.
1230 * For most types it's trivial, but there are few quirky type declaration
1231 * cases worth mentioning:
1232 * - function prototypes (especially nesting of function prototypes);
1234 * - const/volatile/restrict for pointers vs other types.
1236 * For a good discussion of *PARSING* C syntax (as a human), see
1237 * Peter van der Linden's "Expert C Programming: Deep C Secrets",
1238 * Ch.3 "Unscrambling Declarations in C".
1240 * It won't help with BTF to C conversion much, though, as it's an opposite
1241 * problem. So we came up with this algorithm in reverse to van der Linden's
1242 * parsing algorithm. It goes from structured BTF representation of type
1243 * declaration to a valid compilable C syntax.
1245 * For instance, consider this C typedef:
1246 * typedef const int * const * arr[10] arr_t;
1247 * It will be represented in BTF with this chain of BTF types:
1248 * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
1250 * Notice how [const] modifier always goes before type it modifies in BTF type
1251 * graph, but in C syntax, const/volatile/restrict modifiers are written to
1252 * the right of pointers, but to the left of other types. There are also other
1253 * quirks, like function pointers, arrays of them, functions returning other
1256 * We handle that by pushing all the types to a stack, until we hit "terminal"
1257 * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
1258 * top of a stack, modifiers are handled differently. Array/function pointers
1259 * have also wildly different syntax and how nesting of them are done. See
1260 * code for authoritative definition.
1262 * To avoid allocating new stack for each independent chain of BTF types, we
1263 * share one bigger stack, with each chain working only on its own local view
1264 * of a stack frame. Some care is required to "pop" stack frames after
1265 * processing type declaration chain.
1267 int btf_dump__emit_type_decl(struct btf_dump
*d
, __u32 id
,
1268 const struct btf_dump_emit_type_decl_opts
*opts
)
1273 if (!OPTS_VALID(opts
, btf_dump_emit_type_decl_opts
))
1274 return libbpf_err(-EINVAL
);
1276 err
= btf_dump_resize(d
);
1278 return libbpf_err(err
);
1280 fname
= OPTS_GET(opts
, field_name
, "");
1281 lvl
= OPTS_GET(opts
, indent_level
, 0);
1282 d
->strip_mods
= OPTS_GET(opts
, strip_mods
, false);
1283 btf_dump_emit_type_decl(d
, id
, fname
, lvl
);
1284 d
->strip_mods
= false;
1288 static void btf_dump_emit_type_decl(struct btf_dump
*d
, __u32 id
,
1289 const char *fname
, int lvl
)
1291 struct id_stack decl_stack
;
1292 const struct btf_type
*t
;
1293 int err
, stack_start
;
1295 stack_start
= d
->decl_stack_cnt
;
1297 t
= btf__type_by_id(d
->btf
, id
);
1298 if (d
->strip_mods
&& btf_is_mod(t
))
1301 err
= btf_dump_push_decl_stack_id(d
, id
);
1304 * if we don't have enough memory for entire type decl
1305 * chain, restore stack, emit warning, and try to
1306 * proceed nevertheless
1308 pr_warn("not enough memory for decl stack: %s\n", errstr(err
));
1309 d
->decl_stack_cnt
= stack_start
;
1317 switch (btf_kind(t
)) {
1319 case BTF_KIND_VOLATILE
:
1320 case BTF_KIND_CONST
:
1321 case BTF_KIND_RESTRICT
:
1322 case BTF_KIND_FUNC_PROTO
:
1323 case BTF_KIND_TYPE_TAG
:
1326 case BTF_KIND_ARRAY
:
1327 id
= btf_array(t
)->type
;
1331 case BTF_KIND_ENUM64
:
1333 case BTF_KIND_STRUCT
:
1334 case BTF_KIND_UNION
:
1335 case BTF_KIND_TYPEDEF
:
1336 case BTF_KIND_FLOAT
:
1339 pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
1346 * We might be inside a chain of declarations (e.g., array of function
1347 * pointers returning anonymous (so inlined) structs, having another
1348 * array field). Each of those needs its own "stack frame" to handle
1349 * emitting of declarations. Those stack frames are non-overlapping
1350 * portions of shared btf_dump->decl_stack. To make it a bit nicer to
1351 * handle this set of nested stacks, we create a view corresponding to
1352 * our own "stack frame" and work with it as an independent stack.
1353 * We'll need to clean up after emit_type_chain() returns, though.
1355 decl_stack
.ids
= d
->decl_stack
+ stack_start
;
1356 decl_stack
.cnt
= d
->decl_stack_cnt
- stack_start
;
1357 btf_dump_emit_type_chain(d
, &decl_stack
, fname
, lvl
);
1359 * emit_type_chain() guarantees that it will pop its entire decl_stack
1360 * frame before returning. But it works with a read-only view into
1361 * decl_stack, so it doesn't actually pop anything from the
1362 * perspective of shared btf_dump->decl_stack, per se. We need to
1363 * reset decl_stack state to how it was before us to avoid it growing
1366 d
->decl_stack_cnt
= stack_start
;
1369 static void btf_dump_emit_mods(struct btf_dump
*d
, struct id_stack
*decl_stack
)
1371 const struct btf_type
*t
;
1374 while (decl_stack
->cnt
) {
1375 id
= decl_stack
->ids
[decl_stack
->cnt
- 1];
1376 t
= btf__type_by_id(d
->btf
, id
);
1378 switch (btf_kind(t
)) {
1379 case BTF_KIND_VOLATILE
:
1380 btf_dump_printf(d
, "volatile ");
1382 case BTF_KIND_CONST
:
1383 btf_dump_printf(d
, "const ");
1385 case BTF_KIND_RESTRICT
:
1386 btf_dump_printf(d
, "restrict ");
1395 static void btf_dump_drop_mods(struct btf_dump
*d
, struct id_stack
*decl_stack
)
1397 const struct btf_type
*t
;
1400 while (decl_stack
->cnt
) {
1401 id
= decl_stack
->ids
[decl_stack
->cnt
- 1];
1402 t
= btf__type_by_id(d
->btf
, id
);
1409 static void btf_dump_emit_name(const struct btf_dump
*d
,
1410 const char *name
, bool last_was_ptr
)
1412 bool separate
= name
[0] && !last_was_ptr
;
1414 btf_dump_printf(d
, "%s%s", separate
? " " : "", name
);
1417 static void btf_dump_emit_type_chain(struct btf_dump
*d
,
1418 struct id_stack
*decls
,
1419 const char *fname
, int lvl
)
1422 * last_was_ptr is used to determine if we need to separate pointer
1423 * asterisk (*) from previous part of type signature with space, so
1424 * that we get `int ***`, instead of `int * * *`. We default to true
1425 * for cases where we have single pointer in a chain. E.g., in ptr ->
1426 * func_proto case. func_proto will start a new emit_type_chain call
1427 * with just ptr, which should be emitted as (*) or (*<fname>), so we
1428 * don't want to prepend space for that last pointer.
1430 bool last_was_ptr
= true;
1431 const struct btf_type
*t
;
1436 while (decls
->cnt
) {
1437 id
= decls
->ids
[--decls
->cnt
];
1439 /* VOID is a special snowflake */
1440 btf_dump_emit_mods(d
, decls
);
1441 btf_dump_printf(d
, "void");
1442 last_was_ptr
= false;
1446 t
= btf__type_by_id(d
->btf
, id
);
1451 case BTF_KIND_FLOAT
:
1452 btf_dump_emit_mods(d
, decls
);
1453 name
= btf_name_of(d
, t
->name_off
);
1454 btf_dump_printf(d
, "%s", name
);
1456 case BTF_KIND_STRUCT
:
1457 case BTF_KIND_UNION
:
1458 btf_dump_emit_mods(d
, decls
);
1459 /* inline anonymous struct/union */
1460 if (t
->name_off
== 0 && !d
->skip_anon_defs
)
1461 btf_dump_emit_struct_def(d
, id
, t
, lvl
);
1463 btf_dump_emit_struct_fwd(d
, id
, t
);
1466 case BTF_KIND_ENUM64
:
1467 btf_dump_emit_mods(d
, decls
);
1468 /* inline anonymous enum */
1469 if (t
->name_off
== 0 && !d
->skip_anon_defs
)
1470 btf_dump_emit_enum_def(d
, id
, t
, lvl
);
1472 btf_dump_emit_enum_fwd(d
, id
, t
);
1475 btf_dump_emit_mods(d
, decls
);
1476 btf_dump_emit_fwd_def(d
, id
, t
);
1478 case BTF_KIND_TYPEDEF
:
1479 btf_dump_emit_mods(d
, decls
);
1480 btf_dump_printf(d
, "%s", btf_dump_ident_name(d
, id
));
1483 btf_dump_printf(d
, "%s", last_was_ptr
? "*" : " *");
1485 case BTF_KIND_VOLATILE
:
1486 btf_dump_printf(d
, " volatile");
1488 case BTF_KIND_CONST
:
1489 btf_dump_printf(d
, " const");
1491 case BTF_KIND_RESTRICT
:
1492 btf_dump_printf(d
, " restrict");
1494 case BTF_KIND_TYPE_TAG
:
1495 btf_dump_emit_mods(d
, decls
);
1496 name
= btf_name_of(d
, t
->name_off
);
1497 btf_dump_printf(d
, " __attribute__((btf_type_tag(\"%s\")))", name
);
1499 case BTF_KIND_ARRAY
: {
1500 const struct btf_array
*a
= btf_array(t
);
1501 const struct btf_type
*next_t
;
1506 * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
1507 * which causes it to emit extra const/volatile
1508 * modifiers for an array, if array's element type has
1509 * const/volatile modifiers. Clang doesn't do that.
1510 * In general, it doesn't seem very meaningful to have
1511 * a const/volatile modifier for array, so we are
1512 * going to silently skip them here.
1514 btf_dump_drop_mods(d
, decls
);
1516 if (decls
->cnt
== 0) {
1517 btf_dump_emit_name(d
, fname
, last_was_ptr
);
1518 btf_dump_printf(d
, "[%u]", a
->nelems
);
1522 next_id
= decls
->ids
[decls
->cnt
- 1];
1523 next_t
= btf__type_by_id(d
->btf
, next_id
);
1524 multidim
= btf_is_array(next_t
);
1525 /* we need space if we have named non-pointer */
1526 if (fname
[0] && !last_was_ptr
)
1527 btf_dump_printf(d
, " ");
1528 /* no parentheses for multi-dimensional array */
1530 btf_dump_printf(d
, "(");
1531 btf_dump_emit_type_chain(d
, decls
, fname
, lvl
);
1533 btf_dump_printf(d
, ")");
1534 btf_dump_printf(d
, "[%u]", a
->nelems
);
1537 case BTF_KIND_FUNC_PROTO
: {
1538 const struct btf_param
*p
= btf_params(t
);
1539 __u16 vlen
= btf_vlen(t
);
1543 * GCC emits extra volatile qualifier for
1544 * __attribute__((noreturn)) function pointers. Clang
1545 * doesn't do it. It's a GCC quirk for backwards
1546 * compatibility with code written for GCC <2.5. So,
1547 * similarly to extra qualifiers for array, just drop
1548 * them, instead of handling them.
1550 btf_dump_drop_mods(d
, decls
);
1552 btf_dump_printf(d
, " (");
1553 btf_dump_emit_type_chain(d
, decls
, fname
, lvl
);
1554 btf_dump_printf(d
, ")");
1556 btf_dump_emit_name(d
, fname
, last_was_ptr
);
1558 btf_dump_printf(d
, "(");
1560 * Clang for BPF target generates func_proto with no
1561 * args as a func_proto with a single void arg (e.g.,
1562 * `int (*f)(void)` vs just `int (*f)()`). We are
1563 * going to emit valid empty args (void) syntax for
1564 * such case. Similarly and conveniently, valid
1565 * no args case can be special-cased here as well.
1567 if (vlen
== 0 || (vlen
== 1 && p
->type
== 0)) {
1568 btf_dump_printf(d
, "void)");
1572 for (i
= 0; i
< vlen
; i
++, p
++) {
1574 btf_dump_printf(d
, ", ");
1576 /* last arg of type void is vararg */
1577 if (i
== vlen
- 1 && p
->type
== 0) {
1578 btf_dump_printf(d
, "...");
1582 name
= btf_name_of(d
, p
->name_off
);
1583 btf_dump_emit_type_decl(d
, p
->type
, name
, lvl
);
1586 btf_dump_printf(d
, ")");
1590 pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
1595 last_was_ptr
= kind
== BTF_KIND_PTR
;
1598 btf_dump_emit_name(d
, fname
, last_was_ptr
);
1601 /* show type name as (type_name) */
1602 static void btf_dump_emit_type_cast(struct btf_dump
*d
, __u32 id
,
1605 const struct btf_type
*t
;
1607 /* for array members, we don't bother emitting type name for each
1608 * member to avoid the redundancy of
1609 * .name = (char[4])[(char)'f',(char)'o',(char)'o',]
1611 if (d
->typed_dump
->is_array_member
)
1614 /* avoid type name specification for variable/section; it will be done
1615 * for the associated variable value(s).
1617 t
= btf__type_by_id(d
->btf
, id
);
1618 if (btf_is_var(t
) || btf_is_datasec(t
))
1622 btf_dump_printf(d
, "(");
1624 d
->skip_anon_defs
= true;
1625 d
->strip_mods
= true;
1626 btf_dump_emit_type_decl(d
, id
, "", 0);
1627 d
->strip_mods
= false;
1628 d
->skip_anon_defs
= false;
1631 btf_dump_printf(d
, ")");
1634 /* return number of duplicates (occurrences) of a given name */
1635 static size_t btf_dump_name_dups(struct btf_dump
*d
, struct hashmap
*name_map
,
1636 const char *orig_name
)
1638 char *old_name
, *new_name
;
1642 new_name
= strdup(orig_name
);
1646 (void)hashmap__find(name_map
, orig_name
, &dup_cnt
);
1649 err
= hashmap__set(name_map
, new_name
, dup_cnt
, &old_name
, NULL
);
1658 static const char *btf_dump_resolve_name(struct btf_dump
*d
, __u32 id
,
1659 struct hashmap
*name_map
)
1661 struct btf_dump_type_aux_state
*s
= &d
->type_states
[id
];
1662 const struct btf_type
*t
= btf__type_by_id(d
->btf
, id
);
1663 const char *orig_name
= btf_name_of(d
, t
->name_off
);
1664 const char **cached_name
= &d
->cached_names
[id
];
1667 if (t
->name_off
== 0)
1670 if (s
->name_resolved
)
1671 return *cached_name
? *cached_name
: orig_name
;
1673 if (btf_is_fwd(t
) || (btf_is_enum(t
) && btf_vlen(t
) == 0)) {
1674 s
->name_resolved
= 1;
1678 dup_cnt
= btf_dump_name_dups(d
, name_map
, orig_name
);
1680 const size_t max_len
= 256;
1681 char new_name
[max_len
];
1683 snprintf(new_name
, max_len
, "%s___%zu", orig_name
, dup_cnt
);
1684 *cached_name
= strdup(new_name
);
1687 s
->name_resolved
= 1;
1688 return *cached_name
? *cached_name
: orig_name
;
1691 static const char *btf_dump_type_name(struct btf_dump
*d
, __u32 id
)
1693 return btf_dump_resolve_name(d
, id
, d
->type_names
);
1696 static const char *btf_dump_ident_name(struct btf_dump
*d
, __u32 id
)
1698 return btf_dump_resolve_name(d
, id
, d
->ident_names
);
1701 static int btf_dump_dump_type_data(struct btf_dump
*d
,
1703 const struct btf_type
*t
,
1709 static const char *btf_dump_data_newline(struct btf_dump
*d
)
1711 return d
->typed_dump
->compact
|| d
->typed_dump
->depth
== 0 ? "" : "\n";
1714 static const char *btf_dump_data_delim(struct btf_dump
*d
)
1716 return d
->typed_dump
->depth
== 0 ? "" : ",";
1719 static void btf_dump_data_pfx(struct btf_dump
*d
)
1721 int i
, lvl
= d
->typed_dump
->indent_lvl
+ d
->typed_dump
->depth
;
1723 if (d
->typed_dump
->compact
)
1726 for (i
= 0; i
< lvl
; i
++)
1727 btf_dump_printf(d
, "%s", d
->typed_dump
->indent_str
);
1730 /* A macro is used here as btf_type_value[s]() appends format specifiers
1731 * to the format specifier passed in; these do the work of appending
1732 * delimiters etc while the caller simply has to specify the type values
1733 * in the format specifier + value(s).
1735 #define btf_dump_type_values(d, fmt, ...) \
1736 btf_dump_printf(d, fmt "%s%s", \
1738 btf_dump_data_delim(d), \
1739 btf_dump_data_newline(d))
1741 static int btf_dump_unsupported_data(struct btf_dump
*d
,
1742 const struct btf_type
*t
,
1745 btf_dump_printf(d
, "<unsupported kind:%u>", btf_kind(t
));
1749 static int btf_dump_get_bitfield_value(struct btf_dump
*d
,
1750 const struct btf_type
*t
,
1756 __u16 left_shift_bits
, right_shift_bits
;
1757 const __u8
*bytes
= data
;
1762 /* Maximum supported bitfield size is 64 bits */
1764 pr_warn("unexpected bitfield size %d\n", t
->size
);
1768 /* Bitfield value retrieval is done in two steps; first relevant bytes are
1769 * stored in num, then we left/right shift num to eliminate irrelevant bits.
1771 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1772 for (i
= t
->size
- 1; i
>= 0; i
--)
1773 num
= num
* 256 + bytes
[i
];
1774 nr_copy_bits
= bit_sz
+ bits_offset
;
1775 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1776 for (i
= 0; i
< t
->size
; i
++)
1777 num
= num
* 256 + bytes
[i
];
1778 nr_copy_bits
= t
->size
* 8 - bits_offset
;
1780 # error "Unrecognized __BYTE_ORDER__"
1782 left_shift_bits
= 64 - nr_copy_bits
;
1783 right_shift_bits
= 64 - bit_sz
;
1785 *value
= (num
<< left_shift_bits
) >> right_shift_bits
;
1790 static int btf_dump_bitfield_check_zero(struct btf_dump
*d
,
1791 const struct btf_type
*t
,
1799 err
= btf_dump_get_bitfield_value(d
, t
, data
, bits_offset
, bit_sz
, &check_num
);
1807 static int btf_dump_bitfield_data(struct btf_dump
*d
,
1808 const struct btf_type
*t
,
1816 err
= btf_dump_get_bitfield_value(d
, t
, data
, bits_offset
, bit_sz
, &print_num
);
1820 btf_dump_type_values(d
, "0x%llx", (unsigned long long)print_num
);
1825 /* ints, floats and ptrs */
1826 static int btf_dump_base_type_check_zero(struct btf_dump
*d
,
1827 const struct btf_type
*t
,
1831 static __u8 bytecmp
[16] = {};
1834 /* For pointer types, pointer size is not defined on a per-type basis.
1835 * On dump creation however, we store the pointer size.
1837 if (btf_kind(t
) == BTF_KIND_PTR
)
1838 nr_bytes
= d
->ptr_sz
;
1842 if (nr_bytes
< 1 || nr_bytes
> 16) {
1843 pr_warn("unexpected size %d for id [%u]\n", nr_bytes
, id
);
1847 if (memcmp(data
, bytecmp
, nr_bytes
) == 0)
1852 static bool ptr_is_aligned(const struct btf
*btf
, __u32 type_id
,
1855 int alignment
= btf__align_of(btf
, type_id
);
1860 return ((uintptr_t)data
) % alignment
== 0;
1863 static int btf_dump_int_data(struct btf_dump
*d
,
1864 const struct btf_type
*t
,
1869 __u8 encoding
= btf_int_encoding(t
);
1870 bool sign
= encoding
& BTF_INT_SIGNED
;
1871 char buf
[16] __attribute__((aligned(16)));
1874 if (sz
== 0 || sz
> sizeof(buf
)) {
1875 pr_warn("unexpected size %d for id [%u]\n", sz
, type_id
);
1879 /* handle packed int data - accesses of integers not aligned on
1880 * int boundaries can cause problems on some platforms.
1882 if (!ptr_is_aligned(d
->btf
, type_id
, data
)) {
1883 memcpy(buf
, data
, sz
);
1889 const __u64
*ints
= data
;
1892 /* avoid use of __int128 as some 32-bit platforms do not
1895 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1898 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1902 # error "Unrecognized __BYTE_ORDER__"
1905 btf_dump_type_values(d
, "0x%llx", (unsigned long long)lsi
);
1907 btf_dump_type_values(d
, "0x%llx%016llx", (unsigned long long)msi
,
1908 (unsigned long long)lsi
);
1913 btf_dump_type_values(d
, "%lld", *(long long *)data
);
1915 btf_dump_type_values(d
, "%llu", *(unsigned long long *)data
);
1919 btf_dump_type_values(d
, "%d", *(__s32
*)data
);
1921 btf_dump_type_values(d
, "%u", *(__u32
*)data
);
1925 btf_dump_type_values(d
, "%d", *(__s16
*)data
);
1927 btf_dump_type_values(d
, "%u", *(__u16
*)data
);
1930 if (d
->typed_dump
->is_array_char
) {
1931 /* check for null terminator */
1932 if (d
->typed_dump
->is_array_terminated
)
1934 if (*(char *)data
== '\0') {
1935 btf_dump_type_values(d
, "'\\0'");
1936 d
->typed_dump
->is_array_terminated
= true;
1939 if (isprint(*(char *)data
)) {
1940 btf_dump_type_values(d
, "'%c'", *(char *)data
);
1945 btf_dump_type_values(d
, "%d", *(__s8
*)data
);
1947 btf_dump_type_values(d
, "%u", *(__u8
*)data
);
1950 pr_warn("unexpected sz %d for id [%u]\n", sz
, type_id
);
1962 static int btf_dump_float_data(struct btf_dump
*d
,
1963 const struct btf_type
*t
,
1967 const union float_data
*flp
= data
;
1968 union float_data fl
;
1971 /* handle unaligned data; copy to local union */
1972 if (!ptr_is_aligned(d
->btf
, type_id
, data
)) {
1973 memcpy(&fl
, data
, sz
);
1979 btf_dump_type_values(d
, "%Lf", flp
->ld
);
1982 btf_dump_type_values(d
, "%lf", flp
->d
);
1985 btf_dump_type_values(d
, "%f", flp
->f
);
1988 pr_warn("unexpected size %d for id [%u]\n", sz
, type_id
);
1994 static int btf_dump_var_data(struct btf_dump
*d
,
1995 const struct btf_type
*v
,
1999 enum btf_func_linkage linkage
= btf_var(v
)->linkage
;
2000 const struct btf_type
*t
;
2005 case BTF_FUNC_STATIC
:
2008 case BTF_FUNC_EXTERN
:
2011 case BTF_FUNC_GLOBAL
:
2017 /* format of output here is [linkage] [type] [varname] = (type)value,
2018 * for example "static int cpu_profile_flip = (int)1"
2020 btf_dump_printf(d
, "%s", l
);
2022 t
= btf__type_by_id(d
->btf
, type_id
);
2023 btf_dump_emit_type_cast(d
, type_id
, false);
2024 btf_dump_printf(d
, " %s = ", btf_name_of(d
, v
->name_off
));
2025 return btf_dump_dump_type_data(d
, NULL
, t
, type_id
, data
, 0, 0);
2028 static int btf_dump_array_data(struct btf_dump
*d
,
2029 const struct btf_type
*t
,
2033 const struct btf_array
*array
= btf_array(t
);
2034 const struct btf_type
*elem_type
;
2035 __u32 i
, elem_type_id
;
2037 bool is_array_member
;
2038 bool is_array_terminated
;
2040 elem_type_id
= array
->type
;
2041 elem_type
= skip_mods_and_typedefs(d
->btf
, elem_type_id
, NULL
);
2042 elem_size
= btf__resolve_size(d
->btf
, elem_type_id
);
2043 if (elem_size
<= 0) {
2044 pr_warn("unexpected elem size %zd for array type [%u]\n",
2045 (ssize_t
)elem_size
, id
);
2049 if (btf_is_int(elem_type
)) {
2051 * BTF_INT_CHAR encoding never seems to be set for
2052 * char arrays, so if size is 1 and element is
2053 * printable as a char, we'll do that.
2056 d
->typed_dump
->is_array_char
= true;
2059 /* note that we increment depth before calling btf_dump_print() below;
2060 * this is intentional. btf_dump_data_newline() will not print a
2061 * newline for depth 0 (since this leaves us with trailing newlines
2062 * at the end of typed display), so depth is incremented first.
2063 * For similar reasons, we decrement depth before showing the closing
2066 d
->typed_dump
->depth
++;
2067 btf_dump_printf(d
, "[%s", btf_dump_data_newline(d
));
2069 /* may be a multidimensional array, so store current "is array member"
2070 * status so we can restore it correctly later.
2072 is_array_member
= d
->typed_dump
->is_array_member
;
2073 d
->typed_dump
->is_array_member
= true;
2074 is_array_terminated
= d
->typed_dump
->is_array_terminated
;
2075 d
->typed_dump
->is_array_terminated
= false;
2076 for (i
= 0; i
< array
->nelems
; i
++, data
+= elem_size
) {
2077 if (d
->typed_dump
->is_array_terminated
)
2079 btf_dump_dump_type_data(d
, NULL
, elem_type
, elem_type_id
, data
, 0, 0);
2081 d
->typed_dump
->is_array_member
= is_array_member
;
2082 d
->typed_dump
->is_array_terminated
= is_array_terminated
;
2083 d
->typed_dump
->depth
--;
2084 btf_dump_data_pfx(d
);
2085 btf_dump_type_values(d
, "]");
2090 static int btf_dump_struct_data(struct btf_dump
*d
,
2091 const struct btf_type
*t
,
2095 const struct btf_member
*m
= btf_members(t
);
2096 __u16 n
= btf_vlen(t
);
2099 /* note that we increment depth before calling btf_dump_print() below;
2100 * this is intentional. btf_dump_data_newline() will not print a
2101 * newline for depth 0 (since this leaves us with trailing newlines
2102 * at the end of typed display), so depth is incremented first.
2103 * For similar reasons, we decrement depth before showing the closing
2106 d
->typed_dump
->depth
++;
2107 btf_dump_printf(d
, "{%s", btf_dump_data_newline(d
));
2109 for (i
= 0; i
< n
; i
++, m
++) {
2110 const struct btf_type
*mtype
;
2115 mtype
= btf__type_by_id(d
->btf
, m
->type
);
2116 mname
= btf_name_of(d
, m
->name_off
);
2117 moffset
= btf_member_bit_offset(t
, i
);
2119 bit_sz
= btf_member_bitfield_size(t
, i
);
2120 err
= btf_dump_dump_type_data(d
, mname
, mtype
, m
->type
, data
+ moffset
/ 8,
2121 moffset
% 8, bit_sz
);
2125 d
->typed_dump
->depth
--;
2126 btf_dump_data_pfx(d
);
2127 btf_dump_type_values(d
, "}");
2133 unsigned long long lp
;
2136 static int btf_dump_ptr_data(struct btf_dump
*d
,
2137 const struct btf_type
*t
,
2141 if (ptr_is_aligned(d
->btf
, id
, data
) && d
->ptr_sz
== sizeof(void *)) {
2142 btf_dump_type_values(d
, "%p", *(void **)data
);
2146 memcpy(&pt
, data
, d
->ptr_sz
);
2148 btf_dump_type_values(d
, "0x%x", pt
.p
);
2150 btf_dump_type_values(d
, "0x%llx", pt
.lp
);
2155 static int btf_dump_get_enum_value(struct btf_dump
*d
,
2156 const struct btf_type
*t
,
2161 bool is_signed
= btf_kflag(t
);
2163 if (!ptr_is_aligned(d
->btf
, id
, data
)) {
2167 err
= btf_dump_get_bitfield_value(d
, t
, data
, 0, 0, &val
);
2170 *value
= (__s64
)val
;
2176 *value
= *(__s64
*)data
;
2179 *value
= is_signed
? (__s64
)*(__s32
*)data
: *(__u32
*)data
;
2182 *value
= is_signed
? *(__s16
*)data
: *(__u16
*)data
;
2185 *value
= is_signed
? *(__s8
*)data
: *(__u8
*)data
;
2188 pr_warn("unexpected size %d for enum, id:[%u]\n", t
->size
, id
);
2193 static int btf_dump_enum_data(struct btf_dump
*d
,
2194 const struct btf_type
*t
,
2202 err
= btf_dump_get_enum_value(d
, t
, data
, id
, &value
);
2206 is_signed
= btf_kflag(t
);
2207 if (btf_is_enum(t
)) {
2208 const struct btf_enum
*e
;
2210 for (i
= 0, e
= btf_enum(t
); i
< btf_vlen(t
); i
++, e
++) {
2211 if (value
!= e
->val
)
2213 btf_dump_type_values(d
, "%s", btf_name_of(d
, e
->name_off
));
2217 btf_dump_type_values(d
, is_signed
? "%d" : "%u", value
);
2219 const struct btf_enum64
*e
;
2221 for (i
= 0, e
= btf_enum64(t
); i
< btf_vlen(t
); i
++, e
++) {
2222 if (value
!= btf_enum64_value(e
))
2224 btf_dump_type_values(d
, "%s", btf_name_of(d
, e
->name_off
));
2228 btf_dump_type_values(d
, is_signed
? "%lldLL" : "%lluULL",
2229 (unsigned long long)value
);
2234 static int btf_dump_datasec_data(struct btf_dump
*d
,
2235 const struct btf_type
*t
,
2239 const struct btf_var_secinfo
*vsi
;
2240 const struct btf_type
*var
;
2244 btf_dump_type_values(d
, "SEC(\"%s\") ", btf_name_of(d
, t
->name_off
));
2246 for (i
= 0, vsi
= btf_var_secinfos(t
); i
< btf_vlen(t
); i
++, vsi
++) {
2247 var
= btf__type_by_id(d
->btf
, vsi
->type
);
2248 err
= btf_dump_dump_type_data(d
, NULL
, var
, vsi
->type
, data
+ vsi
->offset
, 0, 0);
2251 btf_dump_printf(d
, ";");
2256 /* return size of type, or if base type overflows, return -E2BIG. */
2257 static int btf_dump_type_data_check_overflow(struct btf_dump
*d
,
2258 const struct btf_type
*t
,
2267 /* bits_offset is at most 7. bit_sz is at most 128. */
2268 __u8 nr_bytes
= (bits_offset
+ bit_sz
+ 7) / 8;
2270 /* When bit_sz is non zero, it is called from
2271 * btf_dump_struct_data() where it only cares about
2272 * negative error value.
2273 * Return nr_bytes in success case to make it
2274 * consistent as the regular integer case below.
2276 return data
+ nr_bytes
> d
->typed_dump
->data_end
? -E2BIG
: nr_bytes
;
2279 size
= btf__resolve_size(d
->btf
, id
);
2281 if (size
< 0 || size
>= INT_MAX
) {
2282 pr_warn("unexpected size [%zu] for id [%u]\n",
2287 /* Only do overflow checking for base types; we do not want to
2288 * avoid showing part of a struct, union or array, even if we
2289 * do not have enough data to show the full object. By
2290 * restricting overflow checking to base types we can ensure
2291 * that partial display succeeds, while avoiding overflowing
2292 * and using bogus data for display.
2294 t
= skip_mods_and_typedefs(d
->btf
, id
, NULL
);
2296 pr_warn("unexpected error skipping mods/typedefs for id [%u]\n",
2301 switch (btf_kind(t
)) {
2303 case BTF_KIND_FLOAT
:
2306 case BTF_KIND_ENUM64
:
2307 if (data
+ bits_offset
/ 8 + size
> d
->typed_dump
->data_end
)
2316 static int btf_dump_type_data_check_zero(struct btf_dump
*d
,
2317 const struct btf_type
*t
,
2326 /* toplevel exceptions; we show zero values if
2327 * - we ask for them (emit_zeros)
2328 * - if we are at top-level so we see "struct empty { }"
2329 * - or if we are an array member and the array is non-empty and
2330 * not a char array; we don't want to be in a situation where we
2331 * have an integer array 0, 1, 0, 1 and only show non-zero values.
2332 * If the array contains zeroes only, or is a char array starting
2333 * with a '\0', the array-level check_zero() will prevent showing it;
2334 * we are concerned with determining zero value at the array member
2337 if (d
->typed_dump
->emit_zeroes
|| d
->typed_dump
->depth
== 0 ||
2338 (d
->typed_dump
->is_array_member
&&
2339 !d
->typed_dump
->is_array_char
))
2342 t
= skip_mods_and_typedefs(d
->btf
, id
, NULL
);
2344 switch (btf_kind(t
)) {
2347 return btf_dump_bitfield_check_zero(d
, t
, data
, bits_offset
, bit_sz
);
2348 return btf_dump_base_type_check_zero(d
, t
, id
, data
);
2349 case BTF_KIND_FLOAT
:
2351 return btf_dump_base_type_check_zero(d
, t
, id
, data
);
2352 case BTF_KIND_ARRAY
: {
2353 const struct btf_array
*array
= btf_array(t
);
2354 const struct btf_type
*elem_type
;
2355 __u32 elem_type_id
, elem_size
;
2358 elem_type_id
= array
->type
;
2359 elem_size
= btf__resolve_size(d
->btf
, elem_type_id
);
2360 elem_type
= skip_mods_and_typedefs(d
->btf
, elem_type_id
, NULL
);
2362 ischar
= btf_is_int(elem_type
) && elem_size
== 1;
2364 /* check all elements; if _any_ element is nonzero, all
2365 * of array is displayed. We make an exception however
2366 * for char arrays where the first element is 0; these
2367 * are considered zeroed also, even if later elements are
2368 * non-zero because the string is terminated.
2370 for (i
= 0; i
< array
->nelems
; i
++) {
2371 if (i
== 0 && ischar
&& *(char *)data
== 0)
2373 err
= btf_dump_type_data_check_zero(d
, elem_type
,
2378 if (err
!= -ENODATA
)
2383 case BTF_KIND_STRUCT
:
2384 case BTF_KIND_UNION
: {
2385 const struct btf_member
*m
= btf_members(t
);
2386 __u16 n
= btf_vlen(t
);
2388 /* if any struct/union member is non-zero, the struct/union
2389 * is considered non-zero and dumped.
2391 for (i
= 0; i
< n
; i
++, m
++) {
2392 const struct btf_type
*mtype
;
2395 mtype
= btf__type_by_id(d
->btf
, m
->type
);
2396 moffset
= btf_member_bit_offset(t
, i
);
2398 /* btf_int_bits() does not store member bitfield size;
2399 * bitfield size needs to be stored here so int display
2400 * of member can retrieve it.
2402 bit_sz
= btf_member_bitfield_size(t
, i
);
2403 err
= btf_dump_type_data_check_zero(d
, mtype
, m
->type
, data
+ moffset
/ 8,
2404 moffset
% 8, bit_sz
);
2411 case BTF_KIND_ENUM64
:
2412 err
= btf_dump_get_enum_value(d
, t
, data
, id
, &value
);
2423 /* returns size of data dumped, or error. */
2424 static int btf_dump_dump_type_data(struct btf_dump
*d
,
2426 const struct btf_type
*t
,
2434 size
= btf_dump_type_data_check_overflow(d
, t
, id
, data
, bits_offset
, bit_sz
);
2437 err
= btf_dump_type_data_check_zero(d
, t
, id
, data
, bits_offset
, bit_sz
);
2439 /* zeroed data is expected and not an error, so simply skip
2440 * dumping such data. Record other errors however.
2442 if (err
== -ENODATA
)
2446 btf_dump_data_pfx(d
);
2448 if (!d
->typed_dump
->skip_names
) {
2449 if (fname
&& strlen(fname
) > 0)
2450 btf_dump_printf(d
, ".%s = ", fname
);
2451 btf_dump_emit_type_cast(d
, id
, true);
2454 t
= skip_mods_and_typedefs(d
->btf
, id
, NULL
);
2456 switch (btf_kind(t
)) {
2460 case BTF_KIND_FUNC_PROTO
:
2461 case BTF_KIND_DECL_TAG
:
2462 err
= btf_dump_unsupported_data(d
, t
, id
);
2466 err
= btf_dump_bitfield_data(d
, t
, data
, bits_offset
, bit_sz
);
2468 err
= btf_dump_int_data(d
, t
, id
, data
, bits_offset
);
2470 case BTF_KIND_FLOAT
:
2471 err
= btf_dump_float_data(d
, t
, id
, data
);
2474 err
= btf_dump_ptr_data(d
, t
, id
, data
);
2476 case BTF_KIND_ARRAY
:
2477 err
= btf_dump_array_data(d
, t
, id
, data
);
2479 case BTF_KIND_STRUCT
:
2480 case BTF_KIND_UNION
:
2481 err
= btf_dump_struct_data(d
, t
, id
, data
);
2484 case BTF_KIND_ENUM64
:
2485 /* handle bitfield and int enum values */
2490 err
= btf_dump_get_bitfield_value(d
, t
, data
, bits_offset
, bit_sz
,
2494 enum_val
= (__s64
)print_num
;
2495 err
= btf_dump_enum_data(d
, t
, id
, &enum_val
);
2497 err
= btf_dump_enum_data(d
, t
, id
, data
);
2500 err
= btf_dump_var_data(d
, t
, id
, data
);
2502 case BTF_KIND_DATASEC
:
2503 err
= btf_dump_datasec_data(d
, t
, id
, data
);
2506 pr_warn("unexpected kind [%u] for id [%u]\n",
2507 BTF_INFO_KIND(t
->info
), id
);
2515 int btf_dump__dump_type_data(struct btf_dump
*d
, __u32 id
,
2516 const void *data
, size_t data_sz
,
2517 const struct btf_dump_type_data_opts
*opts
)
2519 struct btf_dump_data typed_dump
= {};
2520 const struct btf_type
*t
;
2523 if (!OPTS_VALID(opts
, btf_dump_type_data_opts
))
2524 return libbpf_err(-EINVAL
);
2526 t
= btf__type_by_id(d
->btf
, id
);
2528 return libbpf_err(-ENOENT
);
2530 d
->typed_dump
= &typed_dump
;
2531 d
->typed_dump
->data_end
= data
+ data_sz
;
2532 d
->typed_dump
->indent_lvl
= OPTS_GET(opts
, indent_level
, 0);
2534 /* default indent string is a tab */
2535 if (!OPTS_GET(opts
, indent_str
, NULL
))
2536 d
->typed_dump
->indent_str
[0] = '\t';
2538 libbpf_strlcpy(d
->typed_dump
->indent_str
, opts
->indent_str
,
2539 sizeof(d
->typed_dump
->indent_str
));
2541 d
->typed_dump
->compact
= OPTS_GET(opts
, compact
, false);
2542 d
->typed_dump
->skip_names
= OPTS_GET(opts
, skip_names
, false);
2543 d
->typed_dump
->emit_zeroes
= OPTS_GET(opts
, emit_zeroes
, false);
2545 ret
= btf_dump_dump_type_data(d
, NULL
, t
, id
, data
, 0, 0);
2547 d
->typed_dump
= NULL
;
2549 return libbpf_err(ret
);