1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
4 #include <linux/list.h>
10 #include "xlated_dumper.h"
13 struct list_head funcs
;
20 struct bpf_insn
*start
;
28 struct list_head e_prevs
;
29 struct list_head e_succs
;
30 struct bpf_insn
*head
;
31 struct bpf_insn
*tail
;
35 #define EDGE_FLAG_EMPTY 0x0
36 #define EDGE_FLAG_FALLTHROUGH 0x1
37 #define EDGE_FLAG_JUMP 0x2
45 #define ENTRY_BLOCK_INDEX 0
46 #define EXIT_BLOCK_INDEX 1
47 #define NUM_FIXED_BLOCKS 2
48 #define func_prev(func) list_prev_entry(func, l)
49 #define func_next(func) list_next_entry(func, l)
50 #define bb_prev(bb) list_prev_entry(bb, l)
51 #define bb_next(bb) list_next_entry(bb, l)
52 #define entry_bb(func) func_first_bb(func)
53 #define exit_bb(func) func_last_bb(func)
54 #define cfg_first_func(cfg) \
55 list_first_entry(&cfg->funcs, struct func_node, l)
56 #define cfg_last_func(cfg) \
57 list_last_entry(&cfg->funcs, struct func_node, l)
58 #define func_first_bb(func) \
59 list_first_entry(&func->bbs, struct bb_node, l)
60 #define func_last_bb(func) \
61 list_last_entry(&func->bbs, struct bb_node, l)
63 static struct func_node
*cfg_append_func(struct cfg
*cfg
, struct bpf_insn
*insn
)
65 struct func_node
*new_func
, *func
;
67 list_for_each_entry(func
, &cfg
->funcs
, l
) {
68 if (func
->start
== insn
)
70 else if (func
->start
> insn
)
74 func
= func_prev(func
);
75 new_func
= calloc(1, sizeof(*new_func
));
77 p_err("OOM when allocating FUNC node");
80 new_func
->start
= insn
;
81 new_func
->idx
= cfg
->func_num
;
82 list_add(&new_func
->l
, &func
->l
);
88 static struct bb_node
*func_append_bb(struct func_node
*func
,
89 struct bpf_insn
*insn
)
91 struct bb_node
*new_bb
, *bb
;
93 list_for_each_entry(bb
, &func
->bbs
, l
) {
96 else if (bb
->head
> insn
)
101 new_bb
= calloc(1, sizeof(*new_bb
));
103 p_err("OOM when allocating BB node");
107 INIT_LIST_HEAD(&new_bb
->e_prevs
);
108 INIT_LIST_HEAD(&new_bb
->e_succs
);
109 list_add(&new_bb
->l
, &bb
->l
);
114 static struct bb_node
*func_insert_dummy_bb(struct list_head
*after
)
118 bb
= calloc(1, sizeof(*bb
));
120 p_err("OOM when allocating BB node");
124 INIT_LIST_HEAD(&bb
->e_prevs
);
125 INIT_LIST_HEAD(&bb
->e_succs
);
126 list_add(&bb
->l
, after
);
131 static bool cfg_partition_funcs(struct cfg
*cfg
, struct bpf_insn
*cur
,
132 struct bpf_insn
*end
)
134 struct func_node
*func
, *last_func
;
136 func
= cfg_append_func(cfg
, cur
);
140 for (; cur
< end
; cur
++) {
141 if (cur
->code
!= (BPF_JMP
| BPF_CALL
))
143 if (cur
->src_reg
!= BPF_PSEUDO_CALL
)
145 func
= cfg_append_func(cfg
, cur
+ cur
->off
+ 1);
150 last_func
= cfg_last_func(cfg
);
151 last_func
->end
= end
- 1;
152 func
= cfg_first_func(cfg
);
153 list_for_each_entry_from(func
, &last_func
->l
, l
) {
154 func
->end
= func_next(func
)->start
- 1;
160 static bool is_jmp_insn(__u8 code
)
162 return BPF_CLASS(code
) == BPF_JMP
|| BPF_CLASS(code
) == BPF_JMP32
;
165 static bool func_partition_bb_head(struct func_node
*func
)
167 struct bpf_insn
*cur
, *end
;
172 INIT_LIST_HEAD(&func
->bbs
);
173 bb
= func_append_bb(func
, cur
);
177 for (; cur
<= end
; cur
++) {
178 if (is_jmp_insn(cur
->code
)) {
179 __u8 opcode
= BPF_OP(cur
->code
);
181 if (opcode
== BPF_EXIT
|| opcode
== BPF_CALL
)
184 bb
= func_append_bb(func
, cur
+ cur
->off
+ 1);
188 if (opcode
!= BPF_JA
) {
189 bb
= func_append_bb(func
, cur
+ 1);
199 static void func_partition_bb_tail(struct func_node
*func
)
201 unsigned int bb_idx
= NUM_FIXED_BLOCKS
;
202 struct bb_node
*bb
, *last
;
204 last
= func_last_bb(func
);
205 last
->tail
= func
->end
;
206 bb
= func_first_bb(func
);
207 list_for_each_entry_from(bb
, &last
->l
, l
) {
208 bb
->tail
= bb_next(bb
)->head
- 1;
212 last
->idx
= bb_idx
++;
213 func
->bb_num
= bb_idx
;
216 static bool func_add_special_bb(struct func_node
*func
)
220 bb
= func_insert_dummy_bb(&func
->bbs
);
223 bb
->idx
= ENTRY_BLOCK_INDEX
;
225 bb
= func_insert_dummy_bb(&func_last_bb(func
)->l
);
228 bb
->idx
= EXIT_BLOCK_INDEX
;
233 static bool func_partition_bb(struct func_node
*func
)
235 if (func_partition_bb_head(func
))
238 func_partition_bb_tail(func
);
243 static struct bb_node
*func_search_bb_with_head(struct func_node
*func
,
244 struct bpf_insn
*insn
)
248 list_for_each_entry(bb
, &func
->bbs
, l
) {
249 if (bb
->head
== insn
)
256 static struct edge_node
*new_edge(struct bb_node
*src
, struct bb_node
*dst
,
261 e
= calloc(1, sizeof(*e
));
263 p_err("OOM when allocating edge node");
277 static bool func_add_bb_edges(struct func_node
*func
)
279 struct bpf_insn
*insn
;
284 e
= new_edge(bb
, bb_next(bb
), EDGE_FLAG_FALLTHROUGH
);
287 list_add_tail(&e
->l
, &bb
->e_succs
);
290 e
= new_edge(bb_prev(bb
), bb
, EDGE_FLAG_FALLTHROUGH
);
293 list_add_tail(&e
->l
, &bb
->e_prevs
);
297 list_for_each_entry_from(bb
, &exit_bb(func
)->l
, l
) {
298 e
= new_edge(bb
, NULL
, EDGE_FLAG_EMPTY
);
304 if (!is_jmp_insn(insn
->code
) ||
305 BPF_OP(insn
->code
) == BPF_EXIT
) {
306 e
->dst
= bb_next(bb
);
307 e
->flags
|= EDGE_FLAG_FALLTHROUGH
;
308 list_add_tail(&e
->l
, &bb
->e_succs
);
310 } else if (BPF_OP(insn
->code
) == BPF_JA
) {
311 e
->dst
= func_search_bb_with_head(func
,
312 insn
+ insn
->off
+ 1);
313 e
->flags
|= EDGE_FLAG_JUMP
;
314 list_add_tail(&e
->l
, &bb
->e_succs
);
318 e
->dst
= bb_next(bb
);
319 e
->flags
|= EDGE_FLAG_FALLTHROUGH
;
320 list_add_tail(&e
->l
, &bb
->e_succs
);
322 e
= new_edge(bb
, NULL
, EDGE_FLAG_JUMP
);
326 e
->dst
= func_search_bb_with_head(func
, insn
+ insn
->off
+ 1);
327 list_add_tail(&e
->l
, &bb
->e_succs
);
333 static bool cfg_build(struct cfg
*cfg
, struct bpf_insn
*insn
, unsigned int len
)
335 int cnt
= len
/ sizeof(*insn
);
336 struct func_node
*func
;
338 INIT_LIST_HEAD(&cfg
->funcs
);
340 if (cfg_partition_funcs(cfg
, insn
, insn
+ cnt
))
343 list_for_each_entry(func
, &cfg
->funcs
, l
) {
344 if (func_partition_bb(func
) || func_add_special_bb(func
))
347 if (func_add_bb_edges(func
))
354 static void cfg_destroy(struct cfg
*cfg
)
356 struct func_node
*func
, *func2
;
358 list_for_each_entry_safe(func
, func2
, &cfg
->funcs
, l
) {
359 struct bb_node
*bb
, *bb2
;
361 list_for_each_entry_safe(bb
, bb2
, &func
->bbs
, l
) {
362 struct edge_node
*e
, *e2
;
364 list_for_each_entry_safe(e
, e2
, &bb
->e_prevs
, l
) {
369 list_for_each_entry_safe(e
, e2
, &bb
->e_succs
, l
) {
383 static void draw_bb_node(struct func_node
*func
, struct bb_node
*bb
)
387 if (bb
->idx
== ENTRY_BLOCK_INDEX
|| bb
->idx
== EXIT_BLOCK_INDEX
)
392 printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
393 func
->idx
, bb
->idx
, shape
);
395 if (bb
->idx
== ENTRY_BLOCK_INDEX
) {
397 } else if (bb
->idx
== EXIT_BLOCK_INDEX
) {
400 unsigned int start_idx
;
401 struct dump_data dd
= {};
404 kernel_syms_load(&dd
);
405 start_idx
= bb
->head
- func
->start
;
406 dump_xlated_for_graph(&dd
, bb
->head
, bb
->tail
, start_idx
);
407 kernel_syms_destroy(&dd
);
414 static void draw_bb_succ_edges(struct func_node
*func
, struct bb_node
*bb
)
416 const char *style
= "\"solid,bold\"";
417 const char *color
= "black";
418 int func_idx
= func
->idx
;
422 if (list_empty(&bb
->e_succs
))
425 list_for_each_entry(e
, &bb
->e_succs
, l
) {
426 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
427 func_idx
, e
->src
->idx
, func_idx
, e
->dst
->idx
,
428 style
, color
, weight
);
433 static void func_output_bb_def(struct func_node
*func
)
437 list_for_each_entry(bb
, &func
->bbs
, l
) {
438 draw_bb_node(func
, bb
);
442 static void func_output_edges(struct func_node
*func
)
444 int func_idx
= func
->idx
;
447 list_for_each_entry(bb
, &func
->bbs
, l
) {
448 draw_bb_succ_edges(func
, bb
);
451 /* Add an invisible edge from ENTRY to EXIT, this is to
452 * improve the graph layout.
454 printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
455 func_idx
, ENTRY_BLOCK_INDEX
, func_idx
, EXIT_BLOCK_INDEX
);
458 static void cfg_dump(struct cfg
*cfg
)
460 struct func_node
*func
;
462 printf("digraph \"DOT graph for eBPF program\" {\n");
463 list_for_each_entry(func
, &cfg
->funcs
, l
) {
464 printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
465 func
->idx
, func
->idx
);
466 func_output_bb_def(func
);
467 func_output_edges(func
);
473 void dump_xlated_cfg(void *buf
, unsigned int len
)
475 struct bpf_insn
*insn
= buf
;
478 memset(&cfg
, 0, sizeof(cfg
));
479 if (cfg_build(&cfg
, insn
, len
))