1 // SPDX-License-Identifier: GPL-2.0
3 * The back-end-agnostic part of Just-In-Time compiler for eBPF bytecode.
5 * Copyright (c) 2024 Synopsys Inc.
6 * Author: Shahab Vahedi <shahab@synopsys.com>
12 * Check for the return value. A pattern used often in this file.
13 * There must be a "ret" variable of type "int" in the scope.
15 #define CHECK_RET(cmd) \
22 #ifdef ARC_BPF_JIT_DEBUG
23 /* Dumps bytes in /var/log/messages at KERN_INFO level (4). */
24 static void dump_bytes(const u8
*buf
, u32 len
, const char *header
)
29 pr_info("-----------------[ %s ]-----------------\n", header
);
31 for (i
= 0, j
= 0; i
< len
; i
++) {
32 /* Last input byte? */
34 j
+= scnprintf(line
+ j
, 64 - j
, "0x%02x", buf
[i
]);
35 pr_info("%s\n", line
);
39 else if (i
% 8 == 7) {
40 j
+= scnprintf(line
+ j
, 64 - j
, "0x%02x", buf
[i
]);
41 pr_info("%s\n", line
);
44 j
+= scnprintf(line
+ j
, 64 - j
, "0x%02x, ", buf
[i
]);
48 #endif /* ARC_BPF_JIT_DEBUG */
50 /********************* JIT context ***********************/
53 * buf: Translated instructions end up here.
54 * len: The length of whole block in bytes.
55 * index: The offset at which the _next_ instruction may be put.
64 * This is a subset of "struct jit_context" that its information is deemed
65 * necessary for the next extra pass to come.
67 * bpf_header: Needed to finally lock the region.
68 * bpf2insn: Used to find the translation for instructions of interest.
70 * Things like "jit.buf" and "jit.len" can be retrieved respectively from
71 * "prog->bpf_func" and "prog->jited_len".
74 struct bpf_binary_header
*bpf_header
;
79 * The JIT pertinent context that is used by different functions.
81 * prog: The current eBPF program being handled.
82 * orig_prog: The original eBPF program before any possible change.
83 * jit: The JIT buffer and its length.
84 * bpf_header: The JITed program header. "jit.buf" points inside it.
85 * emit: If set, opcodes are written to memory; else, a dry-run.
86 * do_zext: If true, 32-bit sub-regs must be zero extended.
87 * bpf2insn: Maps BPF insn indices to their counterparts in jit.buf.
88 * bpf2insn_valid: Indicates if "bpf2ins" is populated with the mappings.
89 * jit_data: A piece of memory to transfer data to the next pass.
90 * arc_regs_clobbered: Each bit status determines if that arc reg is clobbered.
91 * save_blink: Whether ARC's "blink" register needs to be saved.
92 * frame_size: Derived from "prog->aux->stack_depth".
93 * epilogue_offset: Used by early "return"s in the code to jump here.
94 * need_extra_pass: A forecast if an "extra_pass" will occur.
95 * is_extra_pass: Indicates if the current pass is an extra pass.
96 * user_bpf_prog: True, if VM opcodes come from a real program.
97 * blinded: True if "constant blinding" step returned a new "prog".
98 * success: Indicates if the whole JIT went OK.
101 struct bpf_prog
*prog
;
102 struct bpf_prog
*orig_prog
;
103 struct jit_buffer jit
;
104 struct bpf_binary_header
*bpf_header
;
109 struct arc_jit_data
*jit_data
;
110 u32 arc_regs_clobbered
;
114 bool need_extra_pass
;
122 * If we're in ARC_BPF_JIT_DEBUG mode and the debug level is right, dump the
123 * input BPF stream. "bpf_jit_dump()" is not fully suited for this purpose.
125 static void vm_dump(const struct bpf_prog
*prog
)
127 #ifdef ARC_BPF_JIT_DEBUG
128 if (bpf_jit_enable
> 1)
129 dump_bytes((u8
*)prog
->insns
, 8 * prog
->len
, " VM ");
134 * If the right level of debug is set, dump the bytes. There are 2 variants
137 * 1. Use the standard bpf_jit_dump() which is meant only for JITed code.
138 * 2. Use the dump_bytes() to match its "vm_dump()" instance.
140 static void jit_dump(const struct jit_context
*ctx
)
142 #ifdef ARC_BPF_JIT_DEBUG
145 const int pass
= ctx
->is_extra_pass
? 2 : 1;
147 if (bpf_jit_enable
<= 1 || !ctx
->prog
->jited
)
150 #ifdef ARC_BPF_JIT_DEBUG
151 scnprintf(header
, sizeof(header
), "JIT:%d", pass
);
152 dump_bytes(ctx
->jit
.buf
, ctx
->jit
.len
, header
);
155 bpf_jit_dump(ctx
->prog
->len
, ctx
->jit
.len
, pass
, ctx
->jit
.buf
);
159 /* Initialise the context so there's no garbage. */
160 static int jit_ctx_init(struct jit_context
*ctx
, struct bpf_prog
*prog
)
162 memset(ctx
, 0, sizeof(*ctx
));
164 ctx
->orig_prog
= prog
;
166 /* If constant blinding was requested but failed, scram. */
167 ctx
->prog
= bpf_jit_blind_constants(prog
);
168 if (IS_ERR(ctx
->prog
))
169 return PTR_ERR(ctx
->prog
);
170 ctx
->blinded
= (ctx
->prog
!= ctx
->orig_prog
);
172 /* If the verifier doesn't zero-extend, then we have to do it. */
173 ctx
->do_zext
= !ctx
->prog
->aux
->verifier_zext
;
175 ctx
->is_extra_pass
= ctx
->prog
->jited
;
176 ctx
->user_bpf_prog
= ctx
->prog
->is_func
;
182 * Only after the first iteration of normal pass (the dry-run),
183 * there are valid offsets in ctx->bpf2insn array.
185 static inline bool offsets_available(const struct jit_context
*ctx
)
187 return ctx
->bpf2insn_valid
;
191 * "*mem" should be freed when there is no "extra pass" to come,
192 * or the compilation terminated abruptly. A few of such memory
193 * allocations are: ctx->jit_data and ctx->bpf2insn.
195 static inline void maybe_free(struct jit_context
*ctx
, void **mem
)
198 if (!ctx
->success
|| !ctx
->need_extra_pass
) {
206 * Free memories based on the status of the context.
208 * A note about "bpf_header": On successful runs, "bpf_header" is
209 * not freed, because "jit.buf", a sub-array of it, is returned as
210 * the "bpf_func". However, "bpf_header" is lost and nothing points
211 * to it. This should not cause a leakage, because apparently
212 * "bpf_header" can be revived by "bpf_jit_binary_hdr()". This is
213 * how "bpf_jit_free()" in "kernel/bpf/core.c" releases the memory.
215 static void jit_ctx_cleanup(struct jit_context
*ctx
)
218 /* if all went well, release the orig_prog. */
220 bpf_jit_prog_release_other(ctx
->prog
, ctx
->orig_prog
);
222 bpf_jit_prog_release_other(ctx
->orig_prog
, ctx
->prog
);
225 maybe_free(ctx
, (void **)&ctx
->bpf2insn
);
226 maybe_free(ctx
, (void **)&ctx
->jit_data
);
229 ctx
->bpf2insn_valid
= false;
231 /* Freeing "bpf_header" is enough. "jit.buf" is a sub-array of it. */
232 if (!ctx
->success
&& ctx
->bpf_header
) {
233 bpf_jit_binary_free(ctx
->bpf_header
);
234 ctx
->bpf_header
= NULL
;
241 ctx
->do_zext
= false;
245 * Analyse the register usage and record the frame size.
246 * The register usage is determined by consulting the back-end.
248 static void analyze_reg_usage(struct jit_context
*ctx
)
252 const struct bpf_insn
*insn
= ctx
->prog
->insnsi
;
254 for (i
= 0; i
< ctx
->prog
->len
; i
++) {
258 bpf_reg
= insn
[i
].dst_reg
;
259 call
= (insn
[i
].code
== (BPF_JMP
| BPF_CALL
)) ? true : false;
260 usage
|= mask_for_used_regs(bpf_reg
, call
);
263 ctx
->arc_regs_clobbered
= usage
;
264 ctx
->frame_size
= ctx
->prog
->aux
->stack_depth
;
267 /* Verify that no instruction will be emitted when there is no buffer. */
268 static inline int jit_buffer_check(const struct jit_context
*ctx
)
272 pr_err("bpf-jit: inconsistence state; no "
273 "buffer to emit instructions.\n");
275 } else if (ctx
->jit
.index
> ctx
->jit
.len
) {
276 pr_err("bpf-jit: estimated JIT length is less "
277 "than the emitted instructions.\n");
284 /* On a dry-run (emit=false), "jit.len" is growing gradually. */
285 static inline void jit_buffer_update(struct jit_context
*ctx
, u32 n
)
293 /* Based on "emit", determine the address where instructions are emitted. */
294 static inline u8
*effective_jit_buf(const struct jit_context
*ctx
)
296 return ctx
->emit
? (ctx
->jit
.buf
+ ctx
->jit
.index
) : NULL
;
299 /* Prologue based on context variables set by "analyze_reg_usage()". */
300 static int handle_prologue(struct jit_context
*ctx
)
303 u8
*buf
= effective_jit_buf(ctx
);
306 CHECK_RET(jit_buffer_check(ctx
));
308 len
= arc_prologue(buf
, ctx
->arc_regs_clobbered
, ctx
->frame_size
);
309 jit_buffer_update(ctx
, len
);
314 /* The counter part for "handle_prologue()". */
315 static int handle_epilogue(struct jit_context
*ctx
)
318 u8
*buf
= effective_jit_buf(ctx
);
321 CHECK_RET(jit_buffer_check(ctx
));
323 len
= arc_epilogue(buf
, ctx
->arc_regs_clobbered
, ctx
->frame_size
);
324 jit_buffer_update(ctx
, len
);
329 /* Tell which number of the BPF instruction we are dealing with. */
330 static inline s32
get_index_for_insn(const struct jit_context
*ctx
,
331 const struct bpf_insn
*insn
)
333 return (insn
- ctx
->prog
->insnsi
);
337 * In most of the cases, the "offset" is read from "insn->off". However,
338 * if it is an unconditional BPF_JMP32, then it comes from "insn->imm".
340 * (Courtesy of "cpu=v4" support)
342 static inline s32
get_offset(const struct bpf_insn
*insn
)
344 if ((BPF_CLASS(insn
->code
) == BPF_JMP32
) &&
345 (BPF_OP(insn
->code
) == BPF_JA
))
352 * Determine to which number of the BPF instruction we're jumping to.
354 * The "offset" is interpreted as the "number" of BPF instructions
355 * from the _next_ BPF instruction. e.g.:
357 * 4 means 4 instructions after the next insn
358 * 0 means 0 instructions after the next insn -> fallthrough.
359 * -1 means 1 instruction before the next insn -> jmp to current insn.
361 * Another way to look at this, "offset" is the number of instructions
362 * that exist between the current instruction and the target instruction.
364 * It is worth noting that a "mov r,i64", which is 16-byte long, is
365 * treated as two instructions long, therefore "offset" needn't be
366 * treated specially for those. Everything is uniform.
368 static inline s32
get_target_index_for_insn(const struct jit_context
*ctx
,
369 const struct bpf_insn
*insn
)
371 return (get_index_for_insn(ctx
, insn
) + 1) + get_offset(insn
);
374 /* Is there an immediate operand encoded in the "insn"? */
375 static inline bool has_imm(const struct bpf_insn
*insn
)
377 return BPF_SRC(insn
->code
) == BPF_K
;
380 /* Is the last BPF instruction? */
381 static inline bool is_last_insn(const struct bpf_prog
*prog
, u32 idx
)
383 return idx
== (prog
->len
- 1);
387 * Invocation of this function, conditionally signals the need for
388 * an extra pass. The conditions that must be met are:
390 * 1. The current pass itself shouldn't be an extra pass.
391 * 2. The stream of bytes being JITed must come from a user program.
393 static inline void set_need_for_extra_pass(struct jit_context
*ctx
)
395 if (!ctx
->is_extra_pass
)
396 ctx
->need_extra_pass
= ctx
->user_bpf_prog
;
400 * Check if the "size" is valid and then transfer the control to
401 * the back-end for the swap.
403 static int handle_swap(u8
*buf
, u8 rd
, u8 size
, u8 endian
,
404 bool force
, bool do_zext
, u8
*len
)
406 /* Sanity check on the size. */
413 pr_err("bpf-jit: invalid size for swap.\n");
417 *len
= gen_swap(buf
, rd
, size
, endian
, force
, do_zext
);
422 /* Checks if the (instruction) index is in valid range. */
423 static inline bool check_insn_idx_valid(const struct jit_context
*ctx
,
426 return (idx
>= 0 && idx
< ctx
->prog
->len
);
430 * Decouple the back-end from BPF by converting BPF conditions
431 * to internal enum. ARC_CC_* start from 0 and are used as index
432 * to an array. BPF_J* usage must end after this conversion.
434 static int bpf_cond_to_arc(const u8 op
, u8
*arc_cc
)
444 *arc_cc
= ARC_CC_UGT
;
447 *arc_cc
= ARC_CC_UGE
;
450 *arc_cc
= ARC_CC_SET
;
456 *arc_cc
= ARC_CC_SGT
;
459 *arc_cc
= ARC_CC_SGE
;
462 *arc_cc
= ARC_CC_ULT
;
465 *arc_cc
= ARC_CC_ULE
;
468 *arc_cc
= ARC_CC_SLT
;
471 *arc_cc
= ARC_CC_SLE
;
474 pr_err("bpf-jit: can't handle condition 0x%02X\n", op
);
481 * Check a few things for a supposedly "jump" instruction:
483 * 0. "insn" is a "jump" instruction, but not the "call/exit" variant.
484 * 1. The current "insn" index is in valid range.
485 * 2. The index of target instruction is in valid range.
487 static int check_bpf_jump(const struct jit_context
*ctx
,
488 const struct bpf_insn
*insn
)
490 const u8
class = BPF_CLASS(insn
->code
);
491 const u8 op
= BPF_OP(insn
->code
);
493 /* Must be a jmp(32) instruction that is not a "call/exit". */
494 if ((class != BPF_JMP
&& class != BPF_JMP32
) ||
495 (op
== BPF_CALL
|| op
== BPF_EXIT
)) {
496 pr_err("bpf-jit: not a jump instruction.\n");
500 if (!check_insn_idx_valid(ctx
, get_index_for_insn(ctx
, insn
))) {
501 pr_err("bpf-jit: the bpf jump insn is not in prog.\n");
505 if (!check_insn_idx_valid(ctx
, get_target_index_for_insn(ctx
, insn
))) {
506 pr_err("bpf-jit: bpf jump label is out of range.\n");
514 * Based on input "insn", consult "ctx->bpf2insn" to get the
515 * related index (offset) of the translation in JIT stream.
517 static u32
get_curr_jit_off(const struct jit_context
*ctx
,
518 const struct bpf_insn
*insn
)
520 const s32 idx
= get_index_for_insn(ctx
, insn
);
521 #ifdef ARC_BPF_JIT_DEBUG
522 BUG_ON(!offsets_available(ctx
) || !check_insn_idx_valid(ctx
, idx
));
524 return ctx
->bpf2insn
[idx
];
528 * The input "insn" must be a jump instruction.
530 * Based on input "insn", consult "ctx->bpf2insn" to get the
531 * related JIT index (offset) of "target instruction" that
532 * "insn" would jump to.
534 static u32
get_targ_jit_off(const struct jit_context
*ctx
,
535 const struct bpf_insn
*insn
)
537 const s32 tidx
= get_target_index_for_insn(ctx
, insn
);
538 #ifdef ARC_BPF_JIT_DEBUG
539 BUG_ON(!offsets_available(ctx
) || !check_insn_idx_valid(ctx
, tidx
));
541 return ctx
->bpf2insn
[tidx
];
545 * This function will return 0 for a feasible jump.
547 * Consult the back-end to check if it finds it feasible to emit
548 * the necessary instructions based on "cond" and the displacement
549 * between the "from_off" and the "to_off".
551 static int feasible_jit_jump(u32 from_off
, u32 to_off
, u8 cond
, bool j32
)
556 if (!check_jmp_32(from_off
, to_off
, cond
))
559 if (!check_jmp_64(from_off
, to_off
, cond
))
564 pr_err("bpf-jit: the JIT displacement is not OK.\n");
570 * This jump handler performs the following steps:
572 * 1. Compute ARC's internal condition code from BPF's
573 * 2. Determine the bitness of the operation (32 vs. 64)
574 * 3. Sanity check on BPF stream
575 * 4. Sanity check on what is supposed to be JIT's displacement
576 * 5. And finally, emit the necessary instructions
578 * The last two steps are performed through the back-end.
579 * The value of steps 1 and 2 are necessary inputs for the back-end.
581 static int handle_jumps(const struct jit_context
*ctx
,
582 const struct bpf_insn
*insn
,
587 u8
*buf
= effective_jit_buf(ctx
);
588 const bool j32
= (BPF_CLASS(insn
->code
) == BPF_JMP32
) ? true : false;
589 const u8 rd
= insn
->dst_reg
;
590 u8 rs
= insn
->src_reg
;
591 u32 curr_off
= 0, targ_off
= 0;
595 /* Map the BPF condition to internal enum. */
596 CHECK_RET(bpf_cond_to_arc(BPF_OP(insn
->code
), &cond
));
598 /* Sanity check on the BPF byte stream. */
599 CHECK_RET(check_bpf_jump(ctx
, insn
));
602 * Move the immediate into a temporary register _now_ for 2 reasons:
604 * 1. "gen_jmp_{32,64}()" deal with operands in registers.
606 * 2. The "len" parameter will grow so that the current jit offset
607 * (curr_off) will have increased to a point where the necessary
608 * instructions can be inserted by "gen_jmp_{32,64}()".
610 if (has_imm(insn
) && cond
!= ARC_CC_AL
) {
612 *len
+= mov_r32_i32(BUF(buf
, *len
), JIT_REG_TMP
,
615 *len
+= mov_r64_i32(BUF(buf
, *len
), JIT_REG_TMP
,
621 /* If the offsets are known, check if the branch can occur. */
622 if (offsets_available(ctx
)) {
623 curr_off
= get_curr_jit_off(ctx
, insn
) + *len
;
624 targ_off
= get_targ_jit_off(ctx
, insn
);
626 /* Sanity check on the back-end side. */
627 CHECK_RET(feasible_jit_jump(curr_off
, targ_off
, cond
, j32
));
631 *len
+= gen_jmp_32(BUF(buf
, *len
), rd
, rs
, cond
,
634 *len
+= gen_jmp_64(BUF(buf
, *len
), rd
, rs
, cond
,
641 /* Jump to translated epilogue address. */
642 static int handle_jmp_epilogue(struct jit_context
*ctx
,
643 const struct bpf_insn
*insn
, u8
*len
)
645 u8
*buf
= effective_jit_buf(ctx
);
646 u32 curr_off
= 0, epi_off
= 0;
648 /* Check the offset only if the data is available. */
649 if (offsets_available(ctx
)) {
650 curr_off
= get_curr_jit_off(ctx
, insn
);
651 epi_off
= ctx
->epilogue_offset
;
653 if (!check_jmp_64(curr_off
, epi_off
, ARC_CC_AL
)) {
654 pr_err("bpf-jit: epilogue offset is not valid.\n");
659 /* Jump to "epilogue offset" (rd and rs don't matter). */
660 *len
= gen_jmp_64(buf
, 0, 0, ARC_CC_AL
, curr_off
, epi_off
);
665 /* Try to get the resolved address and generate the instructions. */
666 static int handle_call(struct jit_context
*ctx
,
667 const struct bpf_insn
*insn
,
671 bool in_kernel_func
, fixed
= false;
673 u8
*buf
= effective_jit_buf(ctx
);
675 ret
= bpf_jit_get_func_addr(ctx
->prog
, insn
, ctx
->is_extra_pass
,
678 pr_err("bpf-jit: can't get the address for call.\n");
681 in_kernel_func
= (fixed
? true : false);
683 /* No valuable address retrieved (yet). */
685 set_need_for_extra_pass(ctx
);
687 *len
= gen_func_call(buf
, (ARC_ADDR
)addr
, in_kernel_func
);
689 if (insn
->src_reg
!= BPF_PSEUDO_CALL
) {
690 /* Assigning ABI's return reg to JIT's return reg. */
691 *len
+= arc_to_bpf_return(BUF(buf
, *len
));
698 * Try to generate instructions for loading a 64-bit immediate.
699 * These sort of instructions are usually associated with the 64-bit
700 * relocations: R_BPF_64_64. Therefore, signal the need for an extra
701 * pass if the circumstances are right.
703 static int handle_ld_imm64(struct jit_context
*ctx
,
704 const struct bpf_insn
*insn
,
707 const s32 idx
= get_index_for_insn(ctx
, insn
);
708 u8
*buf
= effective_jit_buf(ctx
);
710 /* We're about to consume 2 VM instructions. */
711 if (is_last_insn(ctx
->prog
, idx
)) {
712 pr_err("bpf-jit: need more data for 64-bit immediate.\n");
716 *len
= mov_r64_i64(buf
, insn
->dst_reg
, insn
->imm
, (insn
+ 1)->imm
);
718 if (bpf_pseudo_func(insn
))
719 set_need_for_extra_pass(ctx
);
725 * Handles one eBPF instruction at a time. To make this function faster,
726 * it does not call "jit_buffer_check()". Else, it would call it for every
727 * instruction. As a result, it should not be invoked directly. Only
728 * "handle_body()", that has already executed the "check", may call this
731 * If the "ret" value is negative, something has went wrong. Else,
732 * it mostly holds the value 0 and rarely 1. Number 1 signals
733 * the loop in "handle_body()" to skip the next instruction, because
734 * it has been consumed as part of a 64-bit immediate value.
736 static int handle_insn(struct jit_context
*ctx
, u32 idx
)
738 const struct bpf_insn
*insn
= &ctx
->prog
->insnsi
[idx
];
739 const u8 code
= insn
->code
;
740 const u8 dst
= insn
->dst_reg
;
741 const u8 src
= insn
->src_reg
;
742 const s16 off
= insn
->off
;
743 const s32 imm
= insn
->imm
;
744 u8
*buf
= effective_jit_buf(ctx
);
749 /* dst += src (32-bit) */
750 case BPF_ALU
| BPF_ADD
| BPF_X
:
751 len
= add_r32(buf
, dst
, src
);
753 /* dst += imm (32-bit) */
754 case BPF_ALU
| BPF_ADD
| BPF_K
:
755 len
= add_r32_i32(buf
, dst
, imm
);
757 /* dst -= src (32-bit) */
758 case BPF_ALU
| BPF_SUB
| BPF_X
:
759 len
= sub_r32(buf
, dst
, src
);
761 /* dst -= imm (32-bit) */
762 case BPF_ALU
| BPF_SUB
| BPF_K
:
763 len
= sub_r32_i32(buf
, dst
, imm
);
765 /* dst = -dst (32-bit) */
766 case BPF_ALU
| BPF_NEG
:
767 len
= neg_r32(buf
, dst
);
769 /* dst *= src (32-bit) */
770 case BPF_ALU
| BPF_MUL
| BPF_X
:
771 len
= mul_r32(buf
, dst
, src
);
773 /* dst *= imm (32-bit) */
774 case BPF_ALU
| BPF_MUL
| BPF_K
:
775 len
= mul_r32_i32(buf
, dst
, imm
);
777 /* dst /= src (32-bit) */
778 case BPF_ALU
| BPF_DIV
| BPF_X
:
779 len
= div_r32(buf
, dst
, src
, off
== 1);
781 /* dst /= imm (32-bit) */
782 case BPF_ALU
| BPF_DIV
| BPF_K
:
783 len
= div_r32_i32(buf
, dst
, imm
, off
== 1);
785 /* dst %= src (32-bit) */
786 case BPF_ALU
| BPF_MOD
| BPF_X
:
787 len
= mod_r32(buf
, dst
, src
, off
== 1);
789 /* dst %= imm (32-bit) */
790 case BPF_ALU
| BPF_MOD
| BPF_K
:
791 len
= mod_r32_i32(buf
, dst
, imm
, off
== 1);
793 /* dst &= src (32-bit) */
794 case BPF_ALU
| BPF_AND
| BPF_X
:
795 len
= and_r32(buf
, dst
, src
);
797 /* dst &= imm (32-bit) */
798 case BPF_ALU
| BPF_AND
| BPF_K
:
799 len
= and_r32_i32(buf
, dst
, imm
);
801 /* dst |= src (32-bit) */
802 case BPF_ALU
| BPF_OR
| BPF_X
:
803 len
= or_r32(buf
, dst
, src
);
805 /* dst |= imm (32-bit) */
806 case BPF_ALU
| BPF_OR
| BPF_K
:
807 len
= or_r32_i32(buf
, dst
, imm
);
809 /* dst ^= src (32-bit) */
810 case BPF_ALU
| BPF_XOR
| BPF_X
:
811 len
= xor_r32(buf
, dst
, src
);
813 /* dst ^= imm (32-bit) */
814 case BPF_ALU
| BPF_XOR
| BPF_K
:
815 len
= xor_r32_i32(buf
, dst
, imm
);
817 /* dst <<= src (32-bit) */
818 case BPF_ALU
| BPF_LSH
| BPF_X
:
819 len
= lsh_r32(buf
, dst
, src
);
821 /* dst <<= imm (32-bit) */
822 case BPF_ALU
| BPF_LSH
| BPF_K
:
823 len
= lsh_r32_i32(buf
, dst
, imm
);
825 /* dst >>= src (32-bit) [unsigned] */
826 case BPF_ALU
| BPF_RSH
| BPF_X
:
827 len
= rsh_r32(buf
, dst
, src
);
829 /* dst >>= imm (32-bit) [unsigned] */
830 case BPF_ALU
| BPF_RSH
| BPF_K
:
831 len
= rsh_r32_i32(buf
, dst
, imm
);
833 /* dst >>= src (32-bit) [signed] */
834 case BPF_ALU
| BPF_ARSH
| BPF_X
:
835 len
= arsh_r32(buf
, dst
, src
);
837 /* dst >>= imm (32-bit) [signed] */
838 case BPF_ALU
| BPF_ARSH
| BPF_K
:
839 len
= arsh_r32_i32(buf
, dst
, imm
);
841 /* dst = src (32-bit) */
842 case BPF_ALU
| BPF_MOV
| BPF_X
:
843 len
= mov_r32(buf
, dst
, src
, (u8
)off
);
845 /* dst = imm32 (32-bit) */
846 case BPF_ALU
| BPF_MOV
| BPF_K
:
847 len
= mov_r32_i32(buf
, dst
, imm
);
849 /* dst = swap(dst) */
850 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
851 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
852 case BPF_ALU64
| BPF_END
| BPF_FROM_LE
: {
853 CHECK_RET(handle_swap(buf
, dst
, imm
, BPF_SRC(code
),
854 BPF_CLASS(code
) == BPF_ALU64
,
855 ctx
->do_zext
, &len
));
858 /* dst += src (64-bit) */
859 case BPF_ALU64
| BPF_ADD
| BPF_X
:
860 len
= add_r64(buf
, dst
, src
);
862 /* dst += imm32 (64-bit) */
863 case BPF_ALU64
| BPF_ADD
| BPF_K
:
864 len
= add_r64_i32(buf
, dst
, imm
);
866 /* dst -= src (64-bit) */
867 case BPF_ALU64
| BPF_SUB
| BPF_X
:
868 len
= sub_r64(buf
, dst
, src
);
870 /* dst -= imm32 (64-bit) */
871 case BPF_ALU64
| BPF_SUB
| BPF_K
:
872 len
= sub_r64_i32(buf
, dst
, imm
);
874 /* dst = -dst (64-bit) */
875 case BPF_ALU64
| BPF_NEG
:
876 len
= neg_r64(buf
, dst
);
878 /* dst *= src (64-bit) */
879 case BPF_ALU64
| BPF_MUL
| BPF_X
:
880 len
= mul_r64(buf
, dst
, src
);
882 /* dst *= imm32 (64-bit) */
883 case BPF_ALU64
| BPF_MUL
| BPF_K
:
884 len
= mul_r64_i32(buf
, dst
, imm
);
886 /* dst &= src (64-bit) */
887 case BPF_ALU64
| BPF_AND
| BPF_X
:
888 len
= and_r64(buf
, dst
, src
);
890 /* dst &= imm32 (64-bit) */
891 case BPF_ALU64
| BPF_AND
| BPF_K
:
892 len
= and_r64_i32(buf
, dst
, imm
);
894 /* dst |= src (64-bit) */
895 case BPF_ALU64
| BPF_OR
| BPF_X
:
896 len
= or_r64(buf
, dst
, src
);
898 /* dst |= imm32 (64-bit) */
899 case BPF_ALU64
| BPF_OR
| BPF_K
:
900 len
= or_r64_i32(buf
, dst
, imm
);
902 /* dst ^= src (64-bit) */
903 case BPF_ALU64
| BPF_XOR
| BPF_X
:
904 len
= xor_r64(buf
, dst
, src
);
906 /* dst ^= imm32 (64-bit) */
907 case BPF_ALU64
| BPF_XOR
| BPF_K
:
908 len
= xor_r64_i32(buf
, dst
, imm
);
910 /* dst <<= src (64-bit) */
911 case BPF_ALU64
| BPF_LSH
| BPF_X
:
912 len
= lsh_r64(buf
, dst
, src
);
914 /* dst <<= imm32 (64-bit) */
915 case BPF_ALU64
| BPF_LSH
| BPF_K
:
916 len
= lsh_r64_i32(buf
, dst
, imm
);
918 /* dst >>= src (64-bit) [unsigned] */
919 case BPF_ALU64
| BPF_RSH
| BPF_X
:
920 len
= rsh_r64(buf
, dst
, src
);
922 /* dst >>= imm32 (64-bit) [unsigned] */
923 case BPF_ALU64
| BPF_RSH
| BPF_K
:
924 len
= rsh_r64_i32(buf
, dst
, imm
);
926 /* dst >>= src (64-bit) [signed] */
927 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
928 len
= arsh_r64(buf
, dst
, src
);
930 /* dst >>= imm32 (64-bit) [signed] */
931 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
932 len
= arsh_r64_i32(buf
, dst
, imm
);
934 /* dst = src (64-bit) */
935 case BPF_ALU64
| BPF_MOV
| BPF_X
:
936 len
= mov_r64(buf
, dst
, src
, (u8
)off
);
938 /* dst = imm32 (sign extend to 64-bit) */
939 case BPF_ALU64
| BPF_MOV
| BPF_K
:
940 len
= mov_r64_i32(buf
, dst
, imm
);
943 case BPF_LD
| BPF_DW
| BPF_IMM
:
944 CHECK_RET(handle_ld_imm64(ctx
, insn
, &len
));
945 /* Tell the loop to skip the next instruction. */
948 /* dst = *(size *)(src + off) */
949 case BPF_LDX
| BPF_MEM
| BPF_W
:
950 case BPF_LDX
| BPF_MEM
| BPF_H
:
951 case BPF_LDX
| BPF_MEM
| BPF_B
:
952 case BPF_LDX
| BPF_MEM
| BPF_DW
:
953 len
= load_r(buf
, dst
, src
, off
, BPF_SIZE(code
), false);
955 case BPF_LDX
| BPF_MEMSX
| BPF_W
:
956 case BPF_LDX
| BPF_MEMSX
| BPF_H
:
957 case BPF_LDX
| BPF_MEMSX
| BPF_B
:
958 len
= load_r(buf
, dst
, src
, off
, BPF_SIZE(code
), true);
960 /* *(size *)(dst + off) = src */
961 case BPF_STX
| BPF_MEM
| BPF_W
:
962 case BPF_STX
| BPF_MEM
| BPF_H
:
963 case BPF_STX
| BPF_MEM
| BPF_B
:
964 case BPF_STX
| BPF_MEM
| BPF_DW
:
965 len
= store_r(buf
, src
, dst
, off
, BPF_SIZE(code
));
967 case BPF_ST
| BPF_MEM
| BPF_W
:
968 case BPF_ST
| BPF_MEM
| BPF_H
:
969 case BPF_ST
| BPF_MEM
| BPF_B
:
970 case BPF_ST
| BPF_MEM
| BPF_DW
:
971 len
= store_i(buf
, imm
, dst
, off
, BPF_SIZE(code
));
973 case BPF_JMP
| BPF_JA
:
974 case BPF_JMP
| BPF_JEQ
| BPF_X
:
975 case BPF_JMP
| BPF_JEQ
| BPF_K
:
976 case BPF_JMP
| BPF_JNE
| BPF_X
:
977 case BPF_JMP
| BPF_JNE
| BPF_K
:
978 case BPF_JMP
| BPF_JSET
| BPF_X
:
979 case BPF_JMP
| BPF_JSET
| BPF_K
:
980 case BPF_JMP
| BPF_JGT
| BPF_X
:
981 case BPF_JMP
| BPF_JGT
| BPF_K
:
982 case BPF_JMP
| BPF_JGE
| BPF_X
:
983 case BPF_JMP
| BPF_JGE
| BPF_K
:
984 case BPF_JMP
| BPF_JSGT
| BPF_X
:
985 case BPF_JMP
| BPF_JSGT
| BPF_K
:
986 case BPF_JMP
| BPF_JSGE
| BPF_X
:
987 case BPF_JMP
| BPF_JSGE
| BPF_K
:
988 case BPF_JMP
| BPF_JLT
| BPF_X
:
989 case BPF_JMP
| BPF_JLT
| BPF_K
:
990 case BPF_JMP
| BPF_JLE
| BPF_X
:
991 case BPF_JMP
| BPF_JLE
| BPF_K
:
992 case BPF_JMP
| BPF_JSLT
| BPF_X
:
993 case BPF_JMP
| BPF_JSLT
| BPF_K
:
994 case BPF_JMP
| BPF_JSLE
| BPF_X
:
995 case BPF_JMP
| BPF_JSLE
| BPF_K
:
996 case BPF_JMP32
| BPF_JA
:
997 case BPF_JMP32
| BPF_JEQ
| BPF_X
:
998 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
999 case BPF_JMP32
| BPF_JNE
| BPF_X
:
1000 case BPF_JMP32
| BPF_JNE
| BPF_K
:
1001 case BPF_JMP32
| BPF_JSET
| BPF_X
:
1002 case BPF_JMP32
| BPF_JSET
| BPF_K
:
1003 case BPF_JMP32
| BPF_JGT
| BPF_X
:
1004 case BPF_JMP32
| BPF_JGT
| BPF_K
:
1005 case BPF_JMP32
| BPF_JGE
| BPF_X
:
1006 case BPF_JMP32
| BPF_JGE
| BPF_K
:
1007 case BPF_JMP32
| BPF_JSGT
| BPF_X
:
1008 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
1009 case BPF_JMP32
| BPF_JSGE
| BPF_X
:
1010 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
1011 case BPF_JMP32
| BPF_JLT
| BPF_X
:
1012 case BPF_JMP32
| BPF_JLT
| BPF_K
:
1013 case BPF_JMP32
| BPF_JLE
| BPF_X
:
1014 case BPF_JMP32
| BPF_JLE
| BPF_K
:
1015 case BPF_JMP32
| BPF_JSLT
| BPF_X
:
1016 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
1017 case BPF_JMP32
| BPF_JSLE
| BPF_X
:
1018 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
1019 CHECK_RET(handle_jumps(ctx
, insn
, &len
));
1021 case BPF_JMP
| BPF_CALL
:
1022 CHECK_RET(handle_call(ctx
, insn
, &len
));
1025 case BPF_JMP
| BPF_EXIT
:
1026 /* If this is the last instruction, epilogue will follow. */
1027 if (is_last_insn(ctx
->prog
, idx
))
1029 CHECK_RET(handle_jmp_epilogue(ctx
, insn
, &len
));
1032 pr_err("bpf-jit: can't handle instruction code 0x%02X\n", code
);
1036 if (BPF_CLASS(code
) == BPF_ALU
) {
1038 * Skip the "swap" instructions. Even 64-bit swaps are of type
1039 * BPF_ALU (and not BPF_ALU64). Therefore, for the swaps, one
1040 * has to look at the "size" of the operations rather than the
1041 * ALU type. "gen_swap()" specifically takes care of that.
1043 if (BPF_OP(code
) != BPF_END
&& ctx
->do_zext
)
1044 len
+= zext(BUF(buf
, len
), dst
);
1047 jit_buffer_update(ctx
, len
);
1052 static int handle_body(struct jit_context
*ctx
)
1055 bool populate_bpf2insn
= false;
1056 const struct bpf_prog
*prog
= ctx
->prog
;
1058 CHECK_RET(jit_buffer_check(ctx
));
1061 * Record the mapping for the instructions during the dry-run.
1062 * Doing it this way allows us to have the mapping ready for
1063 * the jump instructions during the real compilation phase.
1066 populate_bpf2insn
= true;
1068 for (u32 i
= 0; i
< prog
->len
; i
++) {
1069 /* During the dry-run, jit.len grows gradually per BPF insn. */
1070 if (populate_bpf2insn
)
1071 ctx
->bpf2insn
[i
] = ctx
->jit
.len
;
1073 CHECK_RET(handle_insn(ctx
, i
));
1075 /* "ret" is 1 if two (64-bit) chunks were consumed. */
1076 ctx
->bpf2insn
[i
+ 1] = ctx
->bpf2insn
[i
];
1081 /* If bpf2insn had to be populated, then it is done at this point. */
1082 if (populate_bpf2insn
)
1083 ctx
->bpf2insn_valid
= true;
1089 * Initialize the memory with "unimp_s" which is the mnemonic for
1090 * "unimplemented" instruction and always raises an exception.
1092 * The instruction is 2 bytes. If "size" is odd, there is not much
1093 * that can be done about the last byte in "area". Because, the
1094 * CPU always fetches instructions in two bytes. Therefore, the
1095 * byte beyond the last one is going to accompany it during a
1096 * possible fetch. In the most likely case of a little endian
1097 * system, that beyond-byte will become the major opcode and
1098 * we have no control over its initialisation.
1100 static void fill_ill_insn(void *area
, unsigned int size
)
1102 const u16 unimp_s
= 0x79e0;
1105 *((u8
*)area
+ (size
- 1)) = 0xff;
1109 memset16(area
, unimp_s
, size
>> 1);
1112 /* Piece of memory that can be allocated at the beginning of jit_prepare(). */
1113 static int jit_prepare_early_mem_alloc(struct jit_context
*ctx
)
1115 ctx
->bpf2insn
= kcalloc(ctx
->prog
->len
, sizeof(ctx
->jit
.len
),
1118 if (!ctx
->bpf2insn
) {
1119 pr_err("bpf-jit: could not allocate memory for "
1120 "mapping of the instructions.\n");
1128 * Memory allocations that rely on parameters known at the end of
1131 static int jit_prepare_final_mem_alloc(struct jit_context
*ctx
)
1133 const size_t alignment
= sizeof(u32
);
1135 ctx
->bpf_header
= bpf_jit_binary_alloc(ctx
->jit
.len
, &ctx
->jit
.buf
,
1136 alignment
, fill_ill_insn
);
1137 if (!ctx
->bpf_header
) {
1138 pr_err("bpf-jit: could not allocate memory for translation.\n");
1142 if (ctx
->need_extra_pass
) {
1143 ctx
->jit_data
= kzalloc(sizeof(*ctx
->jit_data
), GFP_KERNEL
);
1152 * The first phase of the translation without actually emitting any
1153 * instruction. It helps in getting a forecast on some aspects, such
1154 * as the length of the whole program or where the epilogue starts.
1156 * Whenever the necessary parameters are known, memories are allocated.
1158 static int jit_prepare(struct jit_context
*ctx
)
1165 CHECK_RET(jit_prepare_early_mem_alloc(ctx
));
1167 /* Get the length of prologue section after some register analysis. */
1168 analyze_reg_usage(ctx
);
1169 CHECK_RET(handle_prologue(ctx
));
1171 CHECK_RET(handle_body(ctx
));
1173 /* Record at which offset epilogue begins. */
1174 ctx
->epilogue_offset
= ctx
->jit
.len
;
1176 /* Process the epilogue section now. */
1177 CHECK_RET(handle_epilogue(ctx
));
1179 CHECK_RET(jit_prepare_final_mem_alloc(ctx
));
1185 * jit_compile() is the real compilation phase. jit_prepare() is
1186 * invoked before jit_compile() as a dry-run to make sure everything
1187 * will go OK and allocate the necessary memory.
1189 * In the end, jit_compile() checks if it has produced the same number
1190 * of instructions as jit_prepare() would.
1192 static int jit_compile(struct jit_context
*ctx
)
1196 /* Let there be code. */
1199 CHECK_RET(handle_prologue(ctx
));
1201 CHECK_RET(handle_body(ctx
));
1203 CHECK_RET(handle_epilogue(ctx
));
1205 if (ctx
->jit
.index
!= ctx
->jit
.len
) {
1206 pr_err("bpf-jit: divergence between the phases; "
1207 "%u vs. %u (bytes).\n",
1208 ctx
->jit
.len
, ctx
->jit
.index
);
1216 * Calling this function implies a successful JIT. A successful
1217 * translation is signaled by setting the right parameters:
1219 * prog->jited=1, prog->jited_len=..., prog->bpf_func=...
1221 static int jit_finalize(struct jit_context
*ctx
)
1223 struct bpf_prog
*prog
= ctx
->prog
;
1225 /* We're going to need this information for the "do_extra_pass()". */
1226 if (ctx
->need_extra_pass
) {
1227 ctx
->jit_data
->bpf_header
= ctx
->bpf_header
;
1228 ctx
->jit_data
->bpf2insn
= ctx
->bpf2insn
;
1229 prog
->aux
->jit_data
= (void *)ctx
->jit_data
;
1232 * If things seem finalised, then mark the JITed memory
1233 * as R-X and flush it.
1235 if (bpf_jit_binary_lock_ro(ctx
->bpf_header
)) {
1236 pr_err("bpf-jit: Could not lock the JIT memory.\n");
1239 flush_icache_range((unsigned long)ctx
->bpf_header
,
1241 BUF(ctx
->jit
.buf
, ctx
->jit
.len
));
1242 prog
->aux
->jit_data
= NULL
;
1243 bpf_prog_fill_jited_linfo(prog
, ctx
->bpf2insn
);
1246 ctx
->success
= true;
1247 prog
->bpf_func
= (void *)ctx
->jit
.buf
;
1248 prog
->jited_len
= ctx
->jit
.len
;
1251 jit_ctx_cleanup(ctx
);
1258 * A lenient verification for the existence of JIT context in "prog".
1259 * Apparently the JIT internals, namely jit_subprogs() in bpf/verifier.c,
1260 * may request for a second compilation although nothing needs to be done.
1262 static inline int check_jit_context(const struct bpf_prog
*prog
)
1264 if (!prog
->aux
->jit_data
) {
1265 pr_notice("bpf-jit: no jit data for the extra pass.\n");
1272 /* Reuse the previous pass's data. */
1273 static int jit_resume_context(struct jit_context
*ctx
)
1275 struct arc_jit_data
*jdata
=
1276 (struct arc_jit_data
*)ctx
->prog
->aux
->jit_data
;
1279 pr_err("bpf-jit: no jit data for the extra pass.\n");
1283 ctx
->jit
.buf
= (u8
*)ctx
->prog
->bpf_func
;
1284 ctx
->jit
.len
= ctx
->prog
->jited_len
;
1285 ctx
->bpf_header
= jdata
->bpf_header
;
1286 ctx
->bpf2insn
= (u32
*)jdata
->bpf2insn
;
1287 ctx
->bpf2insn_valid
= ctx
->bpf2insn
? true : false;
1288 ctx
->jit_data
= jdata
;
1294 * Patch in the new addresses. The instructions of interest are:
1299 * For "call"s, it resolves the addresses one more time through the
1302 * For 64-bit immediate loads, it just retranslates them, because the BPF
1303 * core in kernel might have changed the value since the normal pass.
1305 static int jit_patch_relocations(struct jit_context
*ctx
)
1307 const u8 bpf_opc_call
= BPF_JMP
| BPF_CALL
;
1308 const u8 bpf_opc_ldi64
= BPF_LD
| BPF_DW
| BPF_IMM
;
1309 const struct bpf_prog
*prog
= ctx
->prog
;
1313 for (u32 i
= 0; i
< prog
->len
; i
++) {
1314 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
1317 * Adjust "ctx.jit.index", so "gen_*()" functions below
1318 * can use it for their output addresses.
1320 ctx
->jit
.index
= ctx
->bpf2insn
[i
];
1322 if (insn
->code
== bpf_opc_call
) {
1323 CHECK_RET(handle_call(ctx
, insn
, &dummy
));
1324 } else if (insn
->code
== bpf_opc_ldi64
) {
1325 CHECK_RET(handle_ld_imm64(ctx
, insn
, &dummy
));
1326 /* Skip the next instruction. */
1334 * A normal pass that involves a "dry-run" phase, jit_prepare(),
1335 * to get the necessary data for the real compilation phase,
1338 static struct bpf_prog
*do_normal_pass(struct bpf_prog
*prog
)
1340 struct jit_context ctx
;
1342 /* Bail out if JIT is disabled. */
1343 if (!prog
->jit_requested
)
1346 if (jit_ctx_init(&ctx
, prog
)) {
1347 jit_ctx_cleanup(&ctx
);
1351 /* Get the lengths and allocate buffer. */
1352 if (jit_prepare(&ctx
)) {
1353 jit_ctx_cleanup(&ctx
);
1357 if (jit_compile(&ctx
)) {
1358 jit_ctx_cleanup(&ctx
);
1362 if (jit_finalize(&ctx
)) {
1363 jit_ctx_cleanup(&ctx
);
1371 * If there are multi-function BPF programs that call each other,
1372 * their translated addresses are not known all at once. Therefore,
1373 * an extra pass is needed to consult the bpf_jit_get_func_addr()
1374 * again to get the newly translated addresses in order to resolve
1377 static struct bpf_prog
*do_extra_pass(struct bpf_prog
*prog
)
1379 struct jit_context ctx
;
1381 /* Skip if there's no context to resume from. */
1382 if (check_jit_context(prog
))
1385 if (jit_ctx_init(&ctx
, prog
)) {
1386 jit_ctx_cleanup(&ctx
);
1390 if (jit_resume_context(&ctx
)) {
1391 jit_ctx_cleanup(&ctx
);
1395 if (jit_patch_relocations(&ctx
)) {
1396 jit_ctx_cleanup(&ctx
);
1400 if (jit_finalize(&ctx
)) {
1401 jit_ctx_cleanup(&ctx
);
1409 * This function may be invoked twice for the same stream of BPF
1410 * instructions. The "extra pass" happens, when there are
1411 * (re)locations involved that their addresses are not known
1412 * during the first run.
1414 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1418 /* Was this program already translated? */
1420 return do_normal_pass(prog
);
1422 return do_extra_pass(prog
);