1 // SPDX-License-Identifier: GPL-2.0
3 * Common functionality for RV32 and RV64 BPF JIT compilers
5 * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
10 #include <linux/filter.h>
11 #include <linux/memory.h>
12 #include <asm/patch.h>
16 /* Number of iterations to try until offsets converge. */
17 #define NR_JIT_ITERATIONS 32
19 static int build_body(struct rv_jit_context
*ctx
, bool extra_pass
, int *offset
)
21 const struct bpf_prog
*prog
= ctx
->prog
;
24 for (i
= 0; i
< prog
->len
; i
++) {
25 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
28 ret
= bpf_jit_emit_insn(insn
, ctx
, extra_pass
);
29 /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
33 offset
[i
] = ctx
->ninsns
;
40 bool bpf_jit_needs_zext(void)
45 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
47 unsigned int prog_size
= 0, extable_size
= 0;
48 bool tmp_blinded
= false, extra_pass
= false;
49 struct bpf_prog
*tmp
, *orig_prog
= prog
;
50 int pass
= 0, prev_ninsns
= 0, i
;
51 struct rv_jit_data
*jit_data
;
52 struct rv_jit_context
*ctx
;
54 if (!prog
->jit_requested
)
57 tmp
= bpf_jit_blind_constants(prog
);
65 jit_data
= prog
->aux
->jit_data
;
67 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
72 prog
->aux
->jit_data
= jit_data
;
79 prog_size
= sizeof(*ctx
->insns
) * ctx
->ninsns
;
83 ctx
->arena_vm_start
= bpf_arena_get_kern_vm_start(prog
->aux
->arena
);
84 ctx
->user_vm_start
= bpf_arena_get_user_vm_start(prog
->aux
->arena
);
86 ctx
->offset
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
92 if (build_body(ctx
, extra_pass
, NULL
)) {
97 for (i
= 0; i
< prog
->len
; i
++) {
99 ctx
->offset
[i
] = prev_ninsns
;
102 for (i
= 0; i
< NR_JIT_ITERATIONS
; i
++) {
106 bpf_jit_build_prologue(ctx
, bpf_is_subprog(prog
));
107 ctx
->prologue_len
= ctx
->ninsns
;
109 if (build_body(ctx
, extra_pass
, ctx
->offset
)) {
114 ctx
->epilogue_offset
= ctx
->ninsns
;
115 bpf_jit_build_epilogue(ctx
);
117 if (ctx
->ninsns
== prev_ninsns
) {
118 if (jit_data
->header
)
120 /* obtain the actual image size */
121 extable_size
= prog
->aux
->num_exentries
*
122 sizeof(struct exception_table_entry
);
123 prog_size
= sizeof(*ctx
->insns
) * ctx
->ninsns
;
125 jit_data
->ro_header
=
126 bpf_jit_binary_pack_alloc(prog_size
+ extable_size
,
127 &jit_data
->ro_image
, sizeof(u32
),
128 &jit_data
->header
, &jit_data
->image
,
130 if (!jit_data
->ro_header
) {
136 * Use the image(RW) for writing the JITed instructions. But also save
137 * the ro_image(RX) for calculating the offsets in the image. The RW
138 * image will be later copied to the RX image from where the program
139 * will run. The bpf_jit_binary_pack_finalize() will do this copy in the
142 ctx
->ro_insns
= (u16
*)jit_data
->ro_image
;
143 ctx
->insns
= (u16
*)jit_data
->image
;
145 * Now, when the image is allocated, the image can
146 * potentially shrink more (auipc/jalr -> jal).
149 prev_ninsns
= ctx
->ninsns
;
152 if (i
== NR_JIT_ITERATIONS
) {
153 pr_err("bpf-jit: image did not converge in <%d passes!\n", i
);
159 prog
->aux
->extable
= (void *)ctx
->ro_insns
+ prog_size
;
166 bpf_jit_build_prologue(ctx
, bpf_is_subprog(prog
));
167 if (build_body(ctx
, extra_pass
, NULL
)) {
171 bpf_jit_build_epilogue(ctx
);
173 if (bpf_jit_enable
> 1)
174 bpf_jit_dump(prog
->len
, prog_size
, pass
, ctx
->insns
);
176 prog
->bpf_func
= (void *)ctx
->ro_insns
+ cfi_get_offset();
178 prog
->jited_len
= prog_size
- cfi_get_offset();
180 if (!prog
->is_func
|| extra_pass
) {
181 if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data
->ro_header
, jit_data
->header
))) {
182 /* ro_header has been freed */
183 jit_data
->ro_header
= NULL
;
188 * The instructions have now been copied to the ROX region from
189 * where they will execute.
190 * Write any modified data cache blocks out to memory and
191 * invalidate the corresponding blocks in the instruction cache.
193 bpf_flush_icache(jit_data
->ro_header
, ctx
->ro_insns
+ ctx
->ninsns
);
194 for (i
= 0; i
< prog
->len
; i
++)
195 ctx
->offset
[i
] = ninsns_rvoff(ctx
->offset
[i
]);
196 bpf_prog_fill_jited_linfo(prog
, ctx
->offset
);
200 prog
->aux
->jit_data
= NULL
;
205 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
210 if (jit_data
->header
) {
211 bpf_arch_text_copy(&jit_data
->ro_header
->size
, &jit_data
->header
->size
,
212 sizeof(jit_data
->header
->size
));
213 bpf_jit_binary_pack_free(jit_data
->ro_header
, jit_data
->header
);
218 u64
bpf_jit_alloc_exec_limit(void)
220 return BPF_JIT_REGION_SIZE
;
223 void *bpf_arch_text_copy(void *dst
, void *src
, size_t len
)
227 mutex_lock(&text_mutex
);
228 ret
= patch_text_nosync(dst
, src
, len
);
229 mutex_unlock(&text_mutex
);
232 return ERR_PTR(-EINVAL
);
237 int bpf_arch_text_invalidate(void *dst
, size_t len
)
241 mutex_lock(&text_mutex
);
242 ret
= patch_text_set_nosync(dst
, 0, len
);
243 mutex_unlock(&text_mutex
);
248 void bpf_jit_free(struct bpf_prog
*prog
)
251 struct rv_jit_data
*jit_data
= prog
->aux
->jit_data
;
252 struct bpf_binary_header
*hdr
;
255 * If we fail the final pass of JIT (from jit_subprogs),
256 * the program may not be finalized yet. Call finalize here
260 bpf_jit_binary_pack_finalize(jit_data
->ro_header
, jit_data
->header
);
263 hdr
= bpf_jit_binary_pack_hdr(prog
);
264 bpf_jit_binary_pack_free(hdr
, NULL
);
265 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog
));
268 bpf_prog_unlock_free(prog
);