1 // SPDX-License-Identifier: GPL-2.0-only
3 * Just-In-Time compiler for eBPF filters on MIPS
5 * Copyright (c) 2017 Cavium, Inc.
9 * Copyright (c) 2014 Imagination Technologies Ltd.
10 * Author: Markos Chandras <markos.chandras@imgtec.com>
13 #include <linux/bitops.h>
14 #include <linux/errno.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17 #include <linux/slab.h>
18 #include <asm/bitops.h>
19 #include <asm/byteorder.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cpu-features.h>
22 #include <asm/isa-rev.h>
25 /* Registers used by JIT */
28 #define MIPS_R_V0 2 /* BPF_R0 */
30 #define MIPS_R_A0 4 /* BPF_R1 */
31 #define MIPS_R_A1 5 /* BPF_R2 */
32 #define MIPS_R_A2 6 /* BPF_R3 */
33 #define MIPS_R_A3 7 /* BPF_R4 */
34 #define MIPS_R_A4 8 /* BPF_R5 */
35 #define MIPS_R_T4 12 /* BPF_AX */
39 #define MIPS_R_S0 16 /* BPF_R6 */
40 #define MIPS_R_S1 17 /* BPF_R7 */
41 #define MIPS_R_S2 18 /* BPF_R8 */
42 #define MIPS_R_S3 19 /* BPF_R9 */
43 #define MIPS_R_S4 20 /* BPF_TCC */
53 #define EBPF_SAVE_S0 BIT(0)
54 #define EBPF_SAVE_S1 BIT(1)
55 #define EBPF_SAVE_S2 BIT(2)
56 #define EBPF_SAVE_S3 BIT(3)
57 #define EBPF_SAVE_S4 BIT(4)
58 #define EBPF_SAVE_RA BIT(5)
59 #define EBPF_SEEN_FP BIT(6)
60 #define EBPF_SEEN_TC BIT(7)
61 #define EBPF_TCC_IN_V1 BIT(8)
64 * For the mips64 ISA, we need to track the value range or type for
65 * each JIT register. The BPF machine requires zero extended 32-bit
66 * values, but the mips64 ISA requires sign extended 32-bit values.
67 * At each point in the BPF program we track the state of every
68 * register so that we can zero extend or sign extend as the BPF
74 /* not known to be 32-bit compatible. */
76 /* 32-bit compatible, no truncation needed for 64-bit ops. */
78 /* 32-bit compatible, need truncation for 64-bit ops. */
80 /* 32-bit no sign/zero extension needed. */
85 * high bit of offsets indicates if long branch conversion done at
88 #define OFFSETS_B_CONV BIT(31)
91 * struct jit_ctx - JIT context
93 * @stack_size: eBPF stack size
94 * @idx: Instruction index
96 * @offsets: Instruction offsets
97 * @target: Memory location for the compiled filter
98 * @reg_val_types Packed enum reg_val_type for each register.
101 const struct bpf_prog
*skf
;
108 unsigned int long_b_conversion
:1;
109 unsigned int gen_b_offsets
:1;
110 unsigned int use_bbit_insns
:1;
113 static void set_reg_val_type(u64
*rvt
, int reg
, enum reg_val_type type
)
115 *rvt
&= ~(7ull << (reg
* 3));
116 *rvt
|= ((u64
)type
<< (reg
* 3));
119 static enum reg_val_type
get_reg_val_type(const struct jit_ctx
*ctx
,
122 return (ctx
->reg_val_types
[index
] >> (reg
* 3)) & 7;
125 /* Simply emit the instruction if the JIT memory space has been allocated */
126 #define emit_instr_long(ctx, func64, func32, ...) \
128 if ((ctx)->target != NULL) { \
129 u32 *p = &(ctx)->target[ctx->idx]; \
130 if (IS_ENABLED(CONFIG_64BIT)) \
131 uasm_i_##func64(&p, ##__VA_ARGS__); \
133 uasm_i_##func32(&p, ##__VA_ARGS__); \
138 #define emit_instr(ctx, func, ...) \
139 emit_instr_long(ctx, func, func, ##__VA_ARGS__)
141 static unsigned int j_target(struct jit_ctx
*ctx
, int target_idx
)
143 unsigned long target_va
, base_va
;
149 base_va
= (unsigned long)ctx
->target
;
150 target_va
= base_va
+ (ctx
->offsets
[target_idx
] & ~OFFSETS_B_CONV
);
152 if ((base_va
& ~0x0ffffffful
) != (target_va
& ~0x0ffffffful
))
153 return (unsigned int)-1;
154 r
= target_va
& 0x0ffffffful
;
158 /* Compute the immediate value for PC-relative branches. */
159 static u32
b_imm(unsigned int tgt
, struct jit_ctx
*ctx
)
161 if (!ctx
->gen_b_offsets
)
165 * We want a pc-relative branch. tgt is the instruction offset
166 * we want to jump to.
169 * I: target_offset <- sign_extend(offset)
170 * I+1: PC += target_offset (delay slot)
172 * ctx->idx currently points to the branch instruction
173 * but the offset is added to the delay slot so we need
176 return (ctx
->offsets
[tgt
] & ~OFFSETS_B_CONV
) -
180 enum which_ebpf_reg
{
188 * For eBPF, the register mapping naturally falls out of the
189 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
190 * separate frame pointer, so BPF_REG_10 relative accesses are
191 * adjusted to be $sp relative.
193 static int ebpf_to_mips_reg(struct jit_ctx
*ctx
,
194 const struct bpf_insn
*insn
,
195 enum which_ebpf_reg w
)
197 int ebpf_reg
= (w
== src_reg
|| w
== src_reg_no_fp
) ?
198 insn
->src_reg
: insn
->dst_reg
;
214 ctx
->flags
|= EBPF_SAVE_S0
;
217 ctx
->flags
|= EBPF_SAVE_S1
;
220 ctx
->flags
|= EBPF_SAVE_S2
;
223 ctx
->flags
|= EBPF_SAVE_S3
;
226 if (w
== dst_reg
|| w
== src_reg_no_fp
)
228 ctx
->flags
|= EBPF_SEEN_FP
;
230 * Needs special handling, return something that
231 * cannot be clobbered just in case.
238 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg
);
243 * eBPF stack frame will be something like:
245 * Entry $sp ------> +--------------------------------+
247 * +--------------------------------+
249 * +--------------------------------+
251 * +--------------------------------+
253 * +--------------------------------+
255 * +--------------------------------+
257 * +--------------------------------+
258 * | tmp-storage (if $ra saved) |
259 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
260 * | BPF_REG_10 relative storage |
261 * | MAX_BPF_STACK (optional) |
265 * $sp --------> +--------------------------------+
267 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
268 * area is not allocated.
270 static int gen_int_prologue(struct jit_ctx
*ctx
)
272 int stack_adjust
= 0;
276 if (ctx
->flags
& EBPF_SAVE_RA
)
278 * If RA we are doing a function call and may need
279 * extra 8-byte tmp area.
281 stack_adjust
+= 2 * sizeof(long);
282 if (ctx
->flags
& EBPF_SAVE_S0
)
283 stack_adjust
+= sizeof(long);
284 if (ctx
->flags
& EBPF_SAVE_S1
)
285 stack_adjust
+= sizeof(long);
286 if (ctx
->flags
& EBPF_SAVE_S2
)
287 stack_adjust
+= sizeof(long);
288 if (ctx
->flags
& EBPF_SAVE_S3
)
289 stack_adjust
+= sizeof(long);
290 if (ctx
->flags
& EBPF_SAVE_S4
)
291 stack_adjust
+= sizeof(long);
293 BUILD_BUG_ON(MAX_BPF_STACK
& 7);
294 locals_size
= (ctx
->flags
& EBPF_SEEN_FP
) ? MAX_BPF_STACK
: 0;
296 stack_adjust
+= locals_size
;
298 ctx
->stack_size
= stack_adjust
;
301 * First instruction initializes the tail call count (TCC).
302 * On tail call we skip this instruction, and the TCC is
303 * passed in $v1 from the caller.
305 emit_instr(ctx
, addiu
, MIPS_R_V1
, MIPS_R_ZERO
, MAX_TAIL_CALL_CNT
);
307 emit_instr_long(ctx
, daddiu
, addiu
,
308 MIPS_R_SP
, MIPS_R_SP
, -stack_adjust
);
312 store_offset
= stack_adjust
- sizeof(long);
314 if (ctx
->flags
& EBPF_SAVE_RA
) {
315 emit_instr_long(ctx
, sd
, sw
,
316 MIPS_R_RA
, store_offset
, MIPS_R_SP
);
317 store_offset
-= sizeof(long);
319 if (ctx
->flags
& EBPF_SAVE_S0
) {
320 emit_instr_long(ctx
, sd
, sw
,
321 MIPS_R_S0
, store_offset
, MIPS_R_SP
);
322 store_offset
-= sizeof(long);
324 if (ctx
->flags
& EBPF_SAVE_S1
) {
325 emit_instr_long(ctx
, sd
, sw
,
326 MIPS_R_S1
, store_offset
, MIPS_R_SP
);
327 store_offset
-= sizeof(long);
329 if (ctx
->flags
& EBPF_SAVE_S2
) {
330 emit_instr_long(ctx
, sd
, sw
,
331 MIPS_R_S2
, store_offset
, MIPS_R_SP
);
332 store_offset
-= sizeof(long);
334 if (ctx
->flags
& EBPF_SAVE_S3
) {
335 emit_instr_long(ctx
, sd
, sw
,
336 MIPS_R_S3
, store_offset
, MIPS_R_SP
);
337 store_offset
-= sizeof(long);
339 if (ctx
->flags
& EBPF_SAVE_S4
) {
340 emit_instr_long(ctx
, sd
, sw
,
341 MIPS_R_S4
, store_offset
, MIPS_R_SP
);
342 store_offset
-= sizeof(long);
345 if ((ctx
->flags
& EBPF_SEEN_TC
) && !(ctx
->flags
& EBPF_TCC_IN_V1
))
346 emit_instr_long(ctx
, daddu
, addu
,
347 MIPS_R_S4
, MIPS_R_V1
, MIPS_R_ZERO
);
352 static int build_int_epilogue(struct jit_ctx
*ctx
, int dest_reg
)
354 const struct bpf_prog
*prog
= ctx
->skf
;
355 int stack_adjust
= ctx
->stack_size
;
356 int store_offset
= stack_adjust
- sizeof(long);
357 enum reg_val_type td
;
360 if (dest_reg
== MIPS_R_RA
) {
361 /* Don't let zero extended value escape. */
362 td
= get_reg_val_type(ctx
, prog
->len
, BPF_REG_0
);
364 emit_instr(ctx
, sll
, r0
, r0
, 0);
367 if (ctx
->flags
& EBPF_SAVE_RA
) {
368 emit_instr_long(ctx
, ld
, lw
,
369 MIPS_R_RA
, store_offset
, MIPS_R_SP
);
370 store_offset
-= sizeof(long);
372 if (ctx
->flags
& EBPF_SAVE_S0
) {
373 emit_instr_long(ctx
, ld
, lw
,
374 MIPS_R_S0
, store_offset
, MIPS_R_SP
);
375 store_offset
-= sizeof(long);
377 if (ctx
->flags
& EBPF_SAVE_S1
) {
378 emit_instr_long(ctx
, ld
, lw
,
379 MIPS_R_S1
, store_offset
, MIPS_R_SP
);
380 store_offset
-= sizeof(long);
382 if (ctx
->flags
& EBPF_SAVE_S2
) {
383 emit_instr_long(ctx
, ld
, lw
,
384 MIPS_R_S2
, store_offset
, MIPS_R_SP
);
385 store_offset
-= sizeof(long);
387 if (ctx
->flags
& EBPF_SAVE_S3
) {
388 emit_instr_long(ctx
, ld
, lw
,
389 MIPS_R_S3
, store_offset
, MIPS_R_SP
);
390 store_offset
-= sizeof(long);
392 if (ctx
->flags
& EBPF_SAVE_S4
) {
393 emit_instr_long(ctx
, ld
, lw
,
394 MIPS_R_S4
, store_offset
, MIPS_R_SP
);
395 store_offset
-= sizeof(long);
397 emit_instr(ctx
, jr
, dest_reg
);
400 emit_instr_long(ctx
, daddiu
, addiu
,
401 MIPS_R_SP
, MIPS_R_SP
, stack_adjust
);
403 emit_instr(ctx
, nop
);
408 static void gen_imm_to_reg(const struct bpf_insn
*insn
, int reg
,
411 if (insn
->imm
>= S16_MIN
&& insn
->imm
<= S16_MAX
) {
412 emit_instr(ctx
, addiu
, reg
, MIPS_R_ZERO
, insn
->imm
);
414 int lower
= (s16
)(insn
->imm
& 0xffff);
415 int upper
= insn
->imm
- lower
;
417 emit_instr(ctx
, lui
, reg
, upper
>> 16);
418 emit_instr(ctx
, addiu
, reg
, reg
, lower
);
422 static int gen_imm_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
425 int upper_bound
, lower_bound
;
426 int dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
431 switch (BPF_OP(insn
->code
)) {
434 upper_bound
= S16_MAX
;
435 lower_bound
= S16_MIN
;
438 upper_bound
= -(int)S16_MIN
;
439 lower_bound
= -(int)S16_MAX
;
444 upper_bound
= 0xffff;
450 /* Shift amounts are truncated, no need for bounds */
451 upper_bound
= S32_MAX
;
452 lower_bound
= S32_MIN
;
459 * Immediate move clobbers the register, so no sign/zero
462 if (BPF_CLASS(insn
->code
) == BPF_ALU64
&&
463 BPF_OP(insn
->code
) != BPF_MOV
&&
464 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) == REG_32BIT
)
465 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
466 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
467 if (BPF_CLASS(insn
->code
) == BPF_ALU
&&
468 BPF_OP(insn
->code
) != BPF_LSH
&&
469 BPF_OP(insn
->code
) != BPF_MOV
&&
470 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) != REG_32BIT
)
471 emit_instr(ctx
, sll
, dst
, dst
, 0);
473 if (insn
->imm
>= lower_bound
&& insn
->imm
<= upper_bound
) {
474 /* single insn immediate case */
475 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
476 case BPF_ALU64
| BPF_MOV
:
477 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
479 case BPF_ALU64
| BPF_AND
:
480 case BPF_ALU
| BPF_AND
:
481 emit_instr(ctx
, andi
, dst
, dst
, insn
->imm
);
483 case BPF_ALU64
| BPF_OR
:
484 case BPF_ALU
| BPF_OR
:
485 emit_instr(ctx
, ori
, dst
, dst
, insn
->imm
);
487 case BPF_ALU64
| BPF_XOR
:
488 case BPF_ALU
| BPF_XOR
:
489 emit_instr(ctx
, xori
, dst
, dst
, insn
->imm
);
491 case BPF_ALU64
| BPF_ADD
:
492 emit_instr(ctx
, daddiu
, dst
, dst
, insn
->imm
);
494 case BPF_ALU64
| BPF_SUB
:
495 emit_instr(ctx
, daddiu
, dst
, dst
, -insn
->imm
);
497 case BPF_ALU64
| BPF_RSH
:
498 emit_instr(ctx
, dsrl_safe
, dst
, dst
, insn
->imm
& 0x3f);
500 case BPF_ALU
| BPF_RSH
:
501 emit_instr(ctx
, srl
, dst
, dst
, insn
->imm
& 0x1f);
503 case BPF_ALU64
| BPF_LSH
:
504 emit_instr(ctx
, dsll_safe
, dst
, dst
, insn
->imm
& 0x3f);
506 case BPF_ALU
| BPF_LSH
:
507 emit_instr(ctx
, sll
, dst
, dst
, insn
->imm
& 0x1f);
509 case BPF_ALU64
| BPF_ARSH
:
510 emit_instr(ctx
, dsra_safe
, dst
, dst
, insn
->imm
& 0x3f);
512 case BPF_ALU
| BPF_ARSH
:
513 emit_instr(ctx
, sra
, dst
, dst
, insn
->imm
& 0x1f);
515 case BPF_ALU
| BPF_MOV
:
516 emit_instr(ctx
, addiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
518 case BPF_ALU
| BPF_ADD
:
519 emit_instr(ctx
, addiu
, dst
, dst
, insn
->imm
);
521 case BPF_ALU
| BPF_SUB
:
522 emit_instr(ctx
, addiu
, dst
, dst
, -insn
->imm
);
528 /* multi insn immediate case */
529 if (BPF_OP(insn
->code
) == BPF_MOV
) {
530 gen_imm_to_reg(insn
, dst
, ctx
);
532 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
533 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
534 case BPF_ALU64
| BPF_AND
:
535 case BPF_ALU
| BPF_AND
:
536 emit_instr(ctx
, and, dst
, dst
, MIPS_R_AT
);
538 case BPF_ALU64
| BPF_OR
:
539 case BPF_ALU
| BPF_OR
:
540 emit_instr(ctx
, or, dst
, dst
, MIPS_R_AT
);
542 case BPF_ALU64
| BPF_XOR
:
543 case BPF_ALU
| BPF_XOR
:
544 emit_instr(ctx
, xor, dst
, dst
, MIPS_R_AT
);
546 case BPF_ALU64
| BPF_ADD
:
547 emit_instr(ctx
, daddu
, dst
, dst
, MIPS_R_AT
);
549 case BPF_ALU64
| BPF_SUB
:
550 emit_instr(ctx
, dsubu
, dst
, dst
, MIPS_R_AT
);
552 case BPF_ALU
| BPF_ADD
:
553 emit_instr(ctx
, addu
, dst
, dst
, MIPS_R_AT
);
555 case BPF_ALU
| BPF_SUB
:
556 emit_instr(ctx
, subu
, dst
, dst
, MIPS_R_AT
);
567 static void emit_const_to_reg(struct jit_ctx
*ctx
, int dst
, u64 value
)
569 if (value
>= 0xffffffffffff8000ull
|| value
< 0x8000ull
) {
570 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, (int)value
);
571 } else if (value
>= 0xffffffff80000000ull
||
572 (value
< 0x80000000 && value
> 0xffff)) {
573 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)(value
>> 16));
574 emit_instr(ctx
, ori
, dst
, dst
, (unsigned int)(value
& 0xffff));
577 bool seen_part
= false;
578 int needed_shift
= 0;
580 for (i
= 0; i
< 4; i
++) {
581 u64 part
= (value
>> (16 * (3 - i
))) & 0xffff;
583 if (seen_part
&& needed_shift
> 0 && (part
|| i
== 3)) {
584 emit_instr(ctx
, dsll_safe
, dst
, dst
, needed_shift
);
588 if (i
== 0 || (!seen_part
&& i
< 3 && part
< 0x8000)) {
589 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)part
);
592 emit_instr(ctx
, ori
, dst
,
593 seen_part
? dst
: MIPS_R_ZERO
,
604 static int emit_bpf_tail_call(struct jit_ctx
*ctx
, int this_idx
)
609 ctx
->flags
|= EBPF_SEEN_TC
;
611 * if (index >= array->map.max_entries)
614 off
= offsetof(struct bpf_array
, map
.max_entries
);
615 emit_instr(ctx
, lwu
, MIPS_R_T5
, off
, MIPS_R_A1
);
616 emit_instr(ctx
, sltu
, MIPS_R_AT
, MIPS_R_T5
, MIPS_R_A2
);
617 b_off
= b_imm(this_idx
+ 1, ctx
);
618 emit_instr(ctx
, bne
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
624 tcc_reg
= (ctx
->flags
& EBPF_TCC_IN_V1
) ? MIPS_R_V1
: MIPS_R_S4
;
625 emit_instr(ctx
, daddiu
, MIPS_R_T5
, tcc_reg
, -1);
626 b_off
= b_imm(this_idx
+ 1, ctx
);
627 emit_instr(ctx
, bltz
, tcc_reg
, b_off
);
629 * prog = array->ptrs[index];
634 emit_instr(ctx
, dsll
, MIPS_R_T8
, MIPS_R_A2
, 3);
635 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, MIPS_R_A1
);
636 off
= offsetof(struct bpf_array
, ptrs
);
637 emit_instr(ctx
, ld
, MIPS_R_AT
, off
, MIPS_R_T8
);
638 b_off
= b_imm(this_idx
+ 1, ctx
);
639 emit_instr(ctx
, beq
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
641 emit_instr(ctx
, nop
);
643 /* goto *(prog->bpf_func + 4); */
644 off
= offsetof(struct bpf_prog
, bpf_func
);
645 emit_instr(ctx
, ld
, MIPS_R_T9
, off
, MIPS_R_AT
);
646 /* All systems are go... propagate TCC */
647 emit_instr(ctx
, daddu
, MIPS_R_V1
, MIPS_R_T5
, MIPS_R_ZERO
);
648 /* Skip first instruction (TCC initialization) */
649 emit_instr(ctx
, daddiu
, MIPS_R_T9
, MIPS_R_T9
, 4);
650 return build_int_epilogue(ctx
, MIPS_R_T9
);
653 static bool is_bad_offset(int b_off
)
655 return b_off
> 0x1ffff || b_off
< -0x20000;
658 /* Returns the number of insn slots consumed. */
659 static int build_one_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
660 int this_idx
, int exit_idx
)
662 int src
, dst
, r
, td
, ts
, mem_off
, b_off
;
663 bool need_swap
, did_move
, cmp_eq
;
664 unsigned int target
= 0;
667 int bpf_op
= BPF_OP(insn
->code
);
669 if (IS_ENABLED(CONFIG_32BIT
) && ((BPF_CLASS(insn
->code
) == BPF_ALU64
)
670 || (bpf_op
== BPF_DW
)))
673 switch (insn
->code
) {
674 case BPF_ALU64
| BPF_ADD
| BPF_K
: /* ALU64_IMM */
675 case BPF_ALU64
| BPF_SUB
| BPF_K
: /* ALU64_IMM */
676 case BPF_ALU64
| BPF_OR
| BPF_K
: /* ALU64_IMM */
677 case BPF_ALU64
| BPF_AND
| BPF_K
: /* ALU64_IMM */
678 case BPF_ALU64
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
679 case BPF_ALU64
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
680 case BPF_ALU64
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
681 case BPF_ALU64
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
682 case BPF_ALU64
| BPF_MOV
| BPF_K
: /* ALU64_IMM */
683 case BPF_ALU
| BPF_MOV
| BPF_K
: /* ALU32_IMM */
684 case BPF_ALU
| BPF_ADD
| BPF_K
: /* ALU32_IMM */
685 case BPF_ALU
| BPF_SUB
| BPF_K
: /* ALU32_IMM */
686 case BPF_ALU
| BPF_OR
| BPF_K
: /* ALU64_IMM */
687 case BPF_ALU
| BPF_AND
| BPF_K
: /* ALU64_IMM */
688 case BPF_ALU
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
689 case BPF_ALU
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
690 case BPF_ALU
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
691 case BPF_ALU
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
692 r
= gen_imm_insn(insn
, ctx
, this_idx
);
696 case BPF_ALU64
| BPF_MUL
| BPF_K
: /* ALU64_IMM */
697 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
700 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
701 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
702 if (insn
->imm
== 1) /* Mult by 1 is a nop */
704 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
705 if (MIPS_ISA_REV
>= 6) {
706 emit_instr(ctx
, dmulu
, dst
, dst
, MIPS_R_AT
);
708 emit_instr(ctx
, dmultu
, MIPS_R_AT
, dst
);
709 emit_instr(ctx
, mflo
, dst
);
712 case BPF_ALU64
| BPF_NEG
| BPF_K
: /* ALU64_IMM */
713 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
716 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
717 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
718 emit_instr(ctx
, dsubu
, dst
, MIPS_R_ZERO
, dst
);
720 case BPF_ALU
| BPF_MUL
| BPF_K
: /* ALU_IMM */
721 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
724 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
725 if (td
== REG_64BIT
) {
727 emit_instr(ctx
, sll
, dst
, dst
, 0);
729 if (insn
->imm
== 1) /* Mult by 1 is a nop */
731 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
732 if (MIPS_ISA_REV
>= 6) {
733 emit_instr(ctx
, mulu
, dst
, dst
, MIPS_R_AT
);
735 emit_instr(ctx
, multu
, dst
, MIPS_R_AT
);
736 emit_instr(ctx
, mflo
, dst
);
739 case BPF_ALU
| BPF_NEG
| BPF_K
: /* ALU_IMM */
740 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
743 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
744 if (td
== REG_64BIT
) {
746 emit_instr(ctx
, sll
, dst
, dst
, 0);
748 emit_instr(ctx
, subu
, dst
, MIPS_R_ZERO
, dst
);
750 case BPF_ALU
| BPF_DIV
| BPF_K
: /* ALU_IMM */
751 case BPF_ALU
| BPF_MOD
| BPF_K
: /* ALU_IMM */
754 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
757 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
760 emit_instr(ctx
, sll
, dst
, dst
, 0);
761 if (insn
->imm
== 1) {
762 /* div by 1 is a nop, mod by 1 is zero */
763 if (bpf_op
== BPF_MOD
)
764 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
767 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
768 if (MIPS_ISA_REV
>= 6) {
769 if (bpf_op
== BPF_DIV
)
770 emit_instr(ctx
, divu_r6
, dst
, dst
, MIPS_R_AT
);
772 emit_instr(ctx
, modu
, dst
, dst
, MIPS_R_AT
);
775 emit_instr(ctx
, divu
, dst
, MIPS_R_AT
);
776 if (bpf_op
== BPF_DIV
)
777 emit_instr(ctx
, mflo
, dst
);
779 emit_instr(ctx
, mfhi
, dst
);
781 case BPF_ALU64
| BPF_DIV
| BPF_K
: /* ALU_IMM */
782 case BPF_ALU64
| BPF_MOD
| BPF_K
: /* ALU_IMM */
785 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
788 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
789 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
790 if (insn
->imm
== 1) {
791 /* div by 1 is a nop, mod by 1 is zero */
792 if (bpf_op
== BPF_MOD
)
793 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
796 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
797 if (MIPS_ISA_REV
>= 6) {
798 if (bpf_op
== BPF_DIV
)
799 emit_instr(ctx
, ddivu_r6
, dst
, dst
, MIPS_R_AT
);
801 emit_instr(ctx
, modu
, dst
, dst
, MIPS_R_AT
);
804 emit_instr(ctx
, ddivu
, dst
, MIPS_R_AT
);
805 if (bpf_op
== BPF_DIV
)
806 emit_instr(ctx
, mflo
, dst
);
808 emit_instr(ctx
, mfhi
, dst
);
810 case BPF_ALU64
| BPF_MOV
| BPF_X
: /* ALU64_REG */
811 case BPF_ALU64
| BPF_ADD
| BPF_X
: /* ALU64_REG */
812 case BPF_ALU64
| BPF_SUB
| BPF_X
: /* ALU64_REG */
813 case BPF_ALU64
| BPF_XOR
| BPF_X
: /* ALU64_REG */
814 case BPF_ALU64
| BPF_OR
| BPF_X
: /* ALU64_REG */
815 case BPF_ALU64
| BPF_AND
| BPF_X
: /* ALU64_REG */
816 case BPF_ALU64
| BPF_MUL
| BPF_X
: /* ALU64_REG */
817 case BPF_ALU64
| BPF_DIV
| BPF_X
: /* ALU64_REG */
818 case BPF_ALU64
| BPF_MOD
| BPF_X
: /* ALU64_REG */
819 case BPF_ALU64
| BPF_LSH
| BPF_X
: /* ALU64_REG */
820 case BPF_ALU64
| BPF_RSH
| BPF_X
: /* ALU64_REG */
821 case BPF_ALU64
| BPF_ARSH
| BPF_X
: /* ALU64_REG */
822 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg
);
823 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
824 if (src
< 0 || dst
< 0)
826 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
827 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
829 if (insn
->src_reg
== BPF_REG_10
) {
830 if (bpf_op
== BPF_MOV
) {
831 emit_instr(ctx
, daddiu
, dst
, MIPS_R_SP
, MAX_BPF_STACK
);
834 emit_instr(ctx
, daddiu
, MIPS_R_AT
, MIPS_R_SP
, MAX_BPF_STACK
);
837 } else if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
838 int tmp_reg
= MIPS_R_AT
;
840 if (bpf_op
== BPF_MOV
) {
844 emit_instr(ctx
, daddu
, tmp_reg
, src
, MIPS_R_ZERO
);
845 emit_instr(ctx
, dinsu
, tmp_reg
, MIPS_R_ZERO
, 32, 32);
851 emit_instr(ctx
, daddu
, dst
, src
, MIPS_R_ZERO
);
854 emit_instr(ctx
, daddu
, dst
, dst
, src
);
857 emit_instr(ctx
, dsubu
, dst
, dst
, src
);
860 emit_instr(ctx
, xor, dst
, dst
, src
);
863 emit_instr(ctx
, or, dst
, dst
, src
);
866 emit_instr(ctx
, and, dst
, dst
, src
);
869 if (MIPS_ISA_REV
>= 6) {
870 emit_instr(ctx
, dmulu
, dst
, dst
, src
);
872 emit_instr(ctx
, dmultu
, dst
, src
);
873 emit_instr(ctx
, mflo
, dst
);
878 if (MIPS_ISA_REV
>= 6) {
879 if (bpf_op
== BPF_DIV
)
880 emit_instr(ctx
, ddivu_r6
,
883 emit_instr(ctx
, modu
, dst
, dst
, src
);
886 emit_instr(ctx
, ddivu
, dst
, src
);
887 if (bpf_op
== BPF_DIV
)
888 emit_instr(ctx
, mflo
, dst
);
890 emit_instr(ctx
, mfhi
, dst
);
893 emit_instr(ctx
, dsllv
, dst
, dst
, src
);
896 emit_instr(ctx
, dsrlv
, dst
, dst
, src
);
899 emit_instr(ctx
, dsrav
, dst
, dst
, src
);
902 pr_err("ALU64_REG NOT HANDLED\n");
906 case BPF_ALU
| BPF_MOV
| BPF_X
: /* ALU_REG */
907 case BPF_ALU
| BPF_ADD
| BPF_X
: /* ALU_REG */
908 case BPF_ALU
| BPF_SUB
| BPF_X
: /* ALU_REG */
909 case BPF_ALU
| BPF_XOR
| BPF_X
: /* ALU_REG */
910 case BPF_ALU
| BPF_OR
| BPF_X
: /* ALU_REG */
911 case BPF_ALU
| BPF_AND
| BPF_X
: /* ALU_REG */
912 case BPF_ALU
| BPF_MUL
| BPF_X
: /* ALU_REG */
913 case BPF_ALU
| BPF_DIV
| BPF_X
: /* ALU_REG */
914 case BPF_ALU
| BPF_MOD
| BPF_X
: /* ALU_REG */
915 case BPF_ALU
| BPF_LSH
| BPF_X
: /* ALU_REG */
916 case BPF_ALU
| BPF_RSH
| BPF_X
: /* ALU_REG */
917 case BPF_ALU
| BPF_ARSH
| BPF_X
: /* ALU_REG */
918 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
919 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
920 if (src
< 0 || dst
< 0)
922 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
923 if (td
== REG_64BIT
) {
925 emit_instr(ctx
, sll
, dst
, dst
, 0);
928 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
929 if (ts
== REG_64BIT
) {
930 int tmp_reg
= MIPS_R_AT
;
932 if (bpf_op
== BPF_MOV
) {
937 emit_instr(ctx
, sll
, tmp_reg
, src
, 0);
943 emit_instr(ctx
, addu
, dst
, src
, MIPS_R_ZERO
);
946 emit_instr(ctx
, addu
, dst
, dst
, src
);
949 emit_instr(ctx
, subu
, dst
, dst
, src
);
952 emit_instr(ctx
, xor, dst
, dst
, src
);
955 emit_instr(ctx
, or, dst
, dst
, src
);
958 emit_instr(ctx
, and, dst
, dst
, src
);
961 emit_instr(ctx
, mul
, dst
, dst
, src
);
965 if (MIPS_ISA_REV
>= 6) {
966 if (bpf_op
== BPF_DIV
)
967 emit_instr(ctx
, divu_r6
, dst
, dst
, src
);
969 emit_instr(ctx
, modu
, dst
, dst
, src
);
972 emit_instr(ctx
, divu
, dst
, src
);
973 if (bpf_op
== BPF_DIV
)
974 emit_instr(ctx
, mflo
, dst
);
976 emit_instr(ctx
, mfhi
, dst
);
979 emit_instr(ctx
, sllv
, dst
, dst
, src
);
982 emit_instr(ctx
, srlv
, dst
, dst
, src
);
985 emit_instr(ctx
, srav
, dst
, dst
, src
);
988 pr_err("ALU_REG NOT HANDLED\n");
992 case BPF_JMP
| BPF_EXIT
:
993 if (this_idx
+ 1 < exit_idx
) {
994 b_off
= b_imm(exit_idx
, ctx
);
995 if (is_bad_offset(b_off
))
997 emit_instr(ctx
, beq
, MIPS_R_ZERO
, MIPS_R_ZERO
, b_off
);
998 emit_instr(ctx
, nop
);
1001 case BPF_JMP
| BPF_JEQ
| BPF_K
: /* JMP_IMM */
1002 case BPF_JMP
| BPF_JNE
| BPF_K
: /* JMP_IMM */
1003 cmp_eq
= (bpf_op
== BPF_JEQ
);
1004 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1007 if (insn
->imm
== 0) {
1010 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
1014 case BPF_JMP
| BPF_JEQ
| BPF_X
: /* JMP_REG */
1015 case BPF_JMP
| BPF_JNE
| BPF_X
:
1016 case BPF_JMP
| BPF_JSLT
| BPF_X
:
1017 case BPF_JMP
| BPF_JSLE
| BPF_X
:
1018 case BPF_JMP
| BPF_JSGT
| BPF_X
:
1019 case BPF_JMP
| BPF_JSGE
| BPF_X
:
1020 case BPF_JMP
| BPF_JLT
| BPF_X
:
1021 case BPF_JMP
| BPF_JLE
| BPF_X
:
1022 case BPF_JMP
| BPF_JGT
| BPF_X
:
1023 case BPF_JMP
| BPF_JGE
| BPF_X
:
1024 case BPF_JMP
| BPF_JSET
| BPF_X
:
1025 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1026 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1027 if (src
< 0 || dst
< 0)
1029 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
1030 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
1031 if (td
== REG_32BIT
&& ts
!= REG_32BIT
) {
1032 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
1034 } else if (ts
== REG_32BIT
&& td
!= REG_32BIT
) {
1035 emit_instr(ctx
, sll
, MIPS_R_AT
, dst
, 0);
1038 if (bpf_op
== BPF_JSET
) {
1039 emit_instr(ctx
, and, MIPS_R_AT
, dst
, src
);
1043 } else if (bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSLE
) {
1044 emit_instr(ctx
, dsubu
, MIPS_R_AT
, dst
, src
);
1045 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1046 b_off
= b_imm(exit_idx
, ctx
);
1047 if (is_bad_offset(b_off
))
1049 if (bpf_op
== BPF_JSGT
)
1050 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
1052 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
1053 emit_instr(ctx
, nop
);
1054 return 2; /* We consumed the exit. */
1056 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1057 if (is_bad_offset(b_off
))
1059 if (bpf_op
== BPF_JSGT
)
1060 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
1062 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
1063 emit_instr(ctx
, nop
);
1065 } else if (bpf_op
== BPF_JSGE
|| bpf_op
== BPF_JSLT
) {
1066 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, src
);
1067 cmp_eq
= bpf_op
== BPF_JSGE
;
1070 } else if (bpf_op
== BPF_JGT
|| bpf_op
== BPF_JLE
) {
1071 /* dst or src could be AT */
1072 emit_instr(ctx
, dsubu
, MIPS_R_T8
, dst
, src
);
1073 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1074 /* SP known to be non-zero, movz becomes boolean not */
1075 if (MIPS_ISA_REV
>= 6) {
1076 emit_instr(ctx
, seleqz
, MIPS_R_T9
,
1077 MIPS_R_SP
, MIPS_R_T8
);
1079 emit_instr(ctx
, movz
, MIPS_R_T9
,
1080 MIPS_R_SP
, MIPS_R_T8
);
1081 emit_instr(ctx
, movn
, MIPS_R_T9
,
1082 MIPS_R_ZERO
, MIPS_R_T8
);
1084 emit_instr(ctx
, or, MIPS_R_AT
, MIPS_R_T9
, MIPS_R_AT
);
1085 cmp_eq
= bpf_op
== BPF_JGT
;
1088 } else if (bpf_op
== BPF_JGE
|| bpf_op
== BPF_JLT
) {
1089 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1090 cmp_eq
= bpf_op
== BPF_JGE
;
1093 } else { /* JNE/JEQ case */
1094 cmp_eq
= (bpf_op
== BPF_JEQ
);
1098 * If the next insn is EXIT and we are jumping arround
1099 * only it, invert the sense of the compare and
1100 * conditionally jump to the exit. Poor man's branch
1103 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1104 b_off
= b_imm(exit_idx
, ctx
);
1105 if (is_bad_offset(b_off
)) {
1106 target
= j_target(ctx
, exit_idx
);
1107 if (target
== (unsigned int)-1)
1111 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1112 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1113 ctx
->long_b_conversion
= 1;
1118 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1120 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1121 emit_instr(ctx
, nop
);
1122 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1123 emit_instr(ctx
, j
, target
);
1124 emit_instr(ctx
, nop
);
1126 return 2; /* We consumed the exit. */
1128 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1129 if (is_bad_offset(b_off
)) {
1130 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1131 if (target
== (unsigned int)-1)
1135 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1136 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1137 ctx
->long_b_conversion
= 1;
1142 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1144 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1145 emit_instr(ctx
, nop
);
1146 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1147 emit_instr(ctx
, j
, target
);
1148 emit_instr(ctx
, nop
);
1151 case BPF_JMP
| BPF_JSGT
| BPF_K
: /* JMP_IMM */
1152 case BPF_JMP
| BPF_JSGE
| BPF_K
: /* JMP_IMM */
1153 case BPF_JMP
| BPF_JSLT
| BPF_K
: /* JMP_IMM */
1154 case BPF_JMP
| BPF_JSLE
| BPF_K
: /* JMP_IMM */
1155 cmp_eq
= (bpf_op
== BPF_JSGE
);
1156 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1160 if (insn
->imm
== 0) {
1161 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1162 b_off
= b_imm(exit_idx
, ctx
);
1163 if (is_bad_offset(b_off
))
1167 emit_instr(ctx
, blez
, dst
, b_off
);
1170 emit_instr(ctx
, bltz
, dst
, b_off
);
1173 emit_instr(ctx
, bgez
, dst
, b_off
);
1176 emit_instr(ctx
, bgtz
, dst
, b_off
);
1179 emit_instr(ctx
, nop
);
1180 return 2; /* We consumed the exit. */
1182 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1183 if (is_bad_offset(b_off
))
1187 emit_instr(ctx
, bgtz
, dst
, b_off
);
1190 emit_instr(ctx
, bgez
, dst
, b_off
);
1193 emit_instr(ctx
, bltz
, dst
, b_off
);
1196 emit_instr(ctx
, blez
, dst
, b_off
);
1199 emit_instr(ctx
, nop
);
1203 * only "LT" compare available, so we must use imm + 1
1204 * to generate "GT" and imm -1 to generate LE
1206 if (bpf_op
== BPF_JSGT
)
1207 t64s
= insn
->imm
+ 1;
1208 else if (bpf_op
== BPF_JSLE
)
1209 t64s
= insn
->imm
+ 1;
1213 cmp_eq
= bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSGE
;
1214 if (t64s
>= S16_MIN
&& t64s
<= S16_MAX
) {
1215 emit_instr(ctx
, slti
, MIPS_R_AT
, dst
, (int)t64s
);
1220 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1221 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1226 case BPF_JMP
| BPF_JGT
| BPF_K
:
1227 case BPF_JMP
| BPF_JGE
| BPF_K
:
1228 case BPF_JMP
| BPF_JLT
| BPF_K
:
1229 case BPF_JMP
| BPF_JLE
| BPF_K
:
1230 cmp_eq
= (bpf_op
== BPF_JGE
);
1231 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1235 * only "LT" compare available, so we must use imm + 1
1236 * to generate "GT" and imm -1 to generate LE
1238 if (bpf_op
== BPF_JGT
)
1239 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1240 else if (bpf_op
== BPF_JLE
)
1241 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1243 t64s
= (u64
)(u32
)(insn
->imm
);
1245 cmp_eq
= bpf_op
== BPF_JGT
|| bpf_op
== BPF_JGE
;
1247 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1248 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1253 case BPF_JMP
| BPF_JSET
| BPF_K
: /* JMP_IMM */
1254 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1258 if (ctx
->use_bbit_insns
&& hweight32((u32
)insn
->imm
) == 1) {
1259 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1260 b_off
= b_imm(exit_idx
, ctx
);
1261 if (is_bad_offset(b_off
))
1263 emit_instr(ctx
, bbit0
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1264 emit_instr(ctx
, nop
);
1265 return 2; /* We consumed the exit. */
1267 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1268 if (is_bad_offset(b_off
))
1270 emit_instr(ctx
, bbit1
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1271 emit_instr(ctx
, nop
);
1274 t64
= (u32
)insn
->imm
;
1275 emit_const_to_reg(ctx
, MIPS_R_AT
, t64
);
1276 emit_instr(ctx
, and, MIPS_R_AT
, dst
, MIPS_R_AT
);
1282 case BPF_JMP
| BPF_JA
:
1284 * Prefer relative branch for easier debugging, but
1285 * fall back if needed.
1287 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1288 if (is_bad_offset(b_off
)) {
1289 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1290 if (target
== (unsigned int)-1)
1292 emit_instr(ctx
, j
, target
);
1294 emit_instr(ctx
, b
, b_off
);
1296 emit_instr(ctx
, nop
);
1298 case BPF_LD
| BPF_DW
| BPF_IMM
:
1299 if (insn
->src_reg
!= 0)
1301 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1304 t64
= ((u64
)(u32
)insn
->imm
) | ((u64
)(insn
+ 1)->imm
<< 32);
1305 emit_const_to_reg(ctx
, dst
, t64
);
1306 return 2; /* Double slot insn */
1308 case BPF_JMP
| BPF_CALL
:
1309 ctx
->flags
|= EBPF_SAVE_RA
;
1310 t64s
= (s64
)insn
->imm
+ (long)__bpf_call_base
;
1311 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)t64s
);
1312 emit_instr(ctx
, jalr
, MIPS_R_RA
, MIPS_R_T9
);
1314 emit_instr(ctx
, nop
);
1317 case BPF_JMP
| BPF_TAIL_CALL
:
1318 if (emit_bpf_tail_call(ctx
, this_idx
))
1322 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1323 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1324 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1327 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
1328 if (insn
->imm
== 64 && td
== REG_32BIT
)
1329 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
1331 if (insn
->imm
!= 64 && td
== REG_64BIT
) {
1333 emit_instr(ctx
, sll
, dst
, dst
, 0);
1337 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_LE
);
1339 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_BE
);
1341 if (insn
->imm
== 16) {
1343 emit_instr(ctx
, wsbh
, dst
, dst
);
1344 emit_instr(ctx
, andi
, dst
, dst
, 0xffff);
1345 } else if (insn
->imm
== 32) {
1347 emit_instr(ctx
, wsbh
, dst
, dst
);
1348 emit_instr(ctx
, rotr
, dst
, dst
, 16);
1350 } else { /* 64-bit*/
1352 emit_instr(ctx
, dsbh
, dst
, dst
);
1353 emit_instr(ctx
, dshd
, dst
, dst
);
1358 case BPF_ST
| BPF_B
| BPF_MEM
:
1359 case BPF_ST
| BPF_H
| BPF_MEM
:
1360 case BPF_ST
| BPF_W
| BPF_MEM
:
1361 case BPF_ST
| BPF_DW
| BPF_MEM
:
1362 if (insn
->dst_reg
== BPF_REG_10
) {
1363 ctx
->flags
|= EBPF_SEEN_FP
;
1365 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1367 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1370 mem_off
= insn
->off
;
1372 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
1373 switch (BPF_SIZE(insn
->code
)) {
1375 emit_instr(ctx
, sb
, MIPS_R_AT
, mem_off
, dst
);
1378 emit_instr(ctx
, sh
, MIPS_R_AT
, mem_off
, dst
);
1381 emit_instr(ctx
, sw
, MIPS_R_AT
, mem_off
, dst
);
1384 emit_instr(ctx
, sd
, MIPS_R_AT
, mem_off
, dst
);
1389 case BPF_LDX
| BPF_B
| BPF_MEM
:
1390 case BPF_LDX
| BPF_H
| BPF_MEM
:
1391 case BPF_LDX
| BPF_W
| BPF_MEM
:
1392 case BPF_LDX
| BPF_DW
| BPF_MEM
:
1393 if (insn
->src_reg
== BPF_REG_10
) {
1394 ctx
->flags
|= EBPF_SEEN_FP
;
1396 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1398 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1401 mem_off
= insn
->off
;
1403 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1406 switch (BPF_SIZE(insn
->code
)) {
1408 emit_instr(ctx
, lbu
, dst
, mem_off
, src
);
1411 emit_instr(ctx
, lhu
, dst
, mem_off
, src
);
1414 emit_instr(ctx
, lw
, dst
, mem_off
, src
);
1417 emit_instr(ctx
, ld
, dst
, mem_off
, src
);
1422 case BPF_STX
| BPF_B
| BPF_MEM
:
1423 case BPF_STX
| BPF_H
| BPF_MEM
:
1424 case BPF_STX
| BPF_W
| BPF_MEM
:
1425 case BPF_STX
| BPF_DW
| BPF_MEM
:
1426 case BPF_STX
| BPF_W
| BPF_XADD
:
1427 case BPF_STX
| BPF_DW
| BPF_XADD
:
1428 if (insn
->dst_reg
== BPF_REG_10
) {
1429 ctx
->flags
|= EBPF_SEEN_FP
;
1431 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1433 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1436 mem_off
= insn
->off
;
1438 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1441 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1443 * If mem_off does not fit within the 9 bit ll/sc
1444 * instruction immediate field, use a temp reg.
1446 if (MIPS_ISA_REV
>= 6 &&
1447 (mem_off
>= BIT(8) || mem_off
< -BIT(8))) {
1448 emit_instr(ctx
, daddiu
, MIPS_R_T6
,
1453 switch (BPF_SIZE(insn
->code
)) {
1455 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1456 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
1459 emit_instr(ctx
, ll
, MIPS_R_T8
, mem_off
, dst
);
1460 emit_instr(ctx
, addu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1461 emit_instr(ctx
, sc
, MIPS_R_T8
, mem_off
, dst
);
1463 * On failure back up to LL (-4
1464 * instructions of 4 bytes each
1466 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1467 emit_instr(ctx
, nop
);
1470 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1471 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1472 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1475 emit_instr(ctx
, lld
, MIPS_R_T8
, mem_off
, dst
);
1476 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1477 emit_instr(ctx
, scd
, MIPS_R_T8
, mem_off
, dst
);
1478 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1479 emit_instr(ctx
, nop
);
1482 } else { /* BPF_MEM */
1483 switch (BPF_SIZE(insn
->code
)) {
1485 emit_instr(ctx
, sb
, src
, mem_off
, dst
);
1488 emit_instr(ctx
, sh
, src
, mem_off
, dst
);
1491 emit_instr(ctx
, sw
, src
, mem_off
, dst
);
1494 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1495 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1496 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1499 emit_instr(ctx
, sd
, src
, mem_off
, dst
);
1506 pr_err("NOT HANDLED %d - (%02x)\n",
1507 this_idx
, (unsigned int)insn
->code
);
1513 #define RVT_VISITED_MASK 0xc000000000000000ull
1514 #define RVT_FALL_THROUGH 0x4000000000000000ull
1515 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1516 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1518 static int build_int_body(struct jit_ctx
*ctx
)
1520 const struct bpf_prog
*prog
= ctx
->skf
;
1521 const struct bpf_insn
*insn
;
1524 for (i
= 0; i
< prog
->len
; ) {
1525 insn
= prog
->insnsi
+ i
;
1526 if ((ctx
->reg_val_types
[i
] & RVT_VISITED_MASK
) == 0) {
1527 /* dead instruction, don't emit it. */
1532 if (ctx
->target
== NULL
)
1533 ctx
->offsets
[i
] = (ctx
->offsets
[i
] & OFFSETS_B_CONV
) | (ctx
->idx
* 4);
1535 r
= build_one_insn(insn
, ctx
, i
, prog
->len
);
1540 /* epilogue offset */
1541 if (ctx
->target
== NULL
)
1542 ctx
->offsets
[i
] = ctx
->idx
* 4;
1545 * All exits have an offset of the epilogue, some offsets may
1546 * not have been set due to banch-around threading, so set
1549 if (ctx
->target
== NULL
)
1550 for (i
= 0; i
< prog
->len
; i
++) {
1551 insn
= prog
->insnsi
+ i
;
1552 if (insn
->code
== (BPF_JMP
| BPF_EXIT
))
1553 ctx
->offsets
[i
] = ctx
->idx
* 4;
1558 /* return the last idx processed, or negative for error */
1559 static int reg_val_propagate_range(struct jit_ctx
*ctx
, u64 initial_rvt
,
1560 int start_idx
, bool follow_taken
)
1562 const struct bpf_prog
*prog
= ctx
->skf
;
1563 const struct bpf_insn
*insn
;
1564 u64 exit_rvt
= initial_rvt
;
1565 u64
*rvt
= ctx
->reg_val_types
;
1569 for (idx
= start_idx
; idx
< prog
->len
; idx
++) {
1570 rvt
[idx
] = (rvt
[idx
] & RVT_VISITED_MASK
) | exit_rvt
;
1571 insn
= prog
->insnsi
+ idx
;
1572 switch (BPF_CLASS(insn
->code
)) {
1574 switch (BPF_OP(insn
->code
)) {
1586 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1589 if (BPF_SRC(insn
->code
)) {
1590 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1592 /* IMM to REG move*/
1594 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1596 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1600 if (insn
->imm
== 64)
1601 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1602 else if (insn
->imm
== 32)
1603 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1604 else /* insn->imm == 16 */
1605 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1608 rvt
[idx
] |= RVT_DONE
;
1611 switch (BPF_OP(insn
->code
)) {
1613 if (BPF_SRC(insn
->code
)) {
1614 /* REG to REG move*/
1615 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1617 /* IMM to REG move*/
1619 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1621 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1625 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1627 rvt
[idx
] |= RVT_DONE
;
1630 switch (BPF_SIZE(insn
->code
)) {
1632 if (BPF_MODE(insn
->code
) == BPF_IMM
) {
1635 val
= (s64
)((u32
)insn
->imm
| ((u64
)(insn
+ 1)->imm
<< 32));
1636 if (val
> 0 && val
<= S32_MAX
)
1637 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1638 else if (val
>= S32_MIN
&& val
<= S32_MAX
)
1639 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1641 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1642 rvt
[idx
] |= RVT_DONE
;
1645 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1650 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1653 if (BPF_MODE(insn
->code
) == BPF_IMM
)
1654 set_reg_val_type(&exit_rvt
, insn
->dst_reg
,
1655 insn
->imm
>= 0 ? REG_32BIT_POS
: REG_32BIT
);
1657 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1660 rvt
[idx
] |= RVT_DONE
;
1663 switch (BPF_SIZE(insn
->code
)) {
1665 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1669 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1672 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1675 rvt
[idx
] |= RVT_DONE
;
1678 switch (BPF_OP(insn
->code
)) {
1680 rvt
[idx
] = RVT_DONE
| exit_rvt
;
1681 rvt
[prog
->len
] = exit_rvt
;
1684 rvt
[idx
] |= RVT_DONE
;
1699 rvt
[idx
] |= RVT_BRANCH_TAKEN
;
1701 follow_taken
= false;
1703 rvt
[idx
] |= RVT_FALL_THROUGH
;
1707 set_reg_val_type(&exit_rvt
, BPF_REG_0
, REG_64BIT
);
1708 /* Upon call return, argument registers are clobbered. */
1709 for (reg
= BPF_REG_0
; reg
<= BPF_REG_5
; reg
++)
1710 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1712 rvt
[idx
] |= RVT_DONE
;
1715 WARN(1, "Unhandled BPF_JMP case.\n");
1716 rvt
[idx
] |= RVT_DONE
;
1721 rvt
[idx
] |= RVT_DONE
;
1729 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1730 * each eBPF insn. This allows unneeded sign and zero extension
1731 * operations to be omitted.
1733 * Doesn't handle yet confluence of control paths with conflicting
1734 * ranges, but it is good enough for most sane code.
1736 static int reg_val_propagate(struct jit_ctx
*ctx
)
1738 const struct bpf_prog
*prog
= ctx
->skf
;
1744 * 11 registers * 3 bits/reg leaves top bits free for other
1745 * uses. Bit-62..63 used to see if we have visited an insn.
1749 /* Upon entry, argument registers are 64-bit. */
1750 for (reg
= BPF_REG_1
; reg
<= BPF_REG_5
; reg
++)
1751 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1754 * First follow all conditional branches on the fall-through
1755 * edge of control flow..
1757 reg_val_propagate_range(ctx
, exit_rvt
, 0, false);
1760 * Then repeatedly find the first conditional branch where
1761 * both edges of control flow have not been taken, and follow
1762 * the branch taken edge. We will end up restarting the
1763 * search once per conditional branch insn.
1765 for (i
= 0; i
< prog
->len
; i
++) {
1766 u64 rvt
= ctx
->reg_val_types
[i
];
1768 if ((rvt
& RVT_VISITED_MASK
) == RVT_DONE
||
1769 (rvt
& RVT_VISITED_MASK
) == 0)
1771 if ((rvt
& RVT_VISITED_MASK
) == RVT_FALL_THROUGH
) {
1772 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, true);
1773 } else { /* RVT_BRANCH_TAKEN */
1774 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1775 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, false);
1777 goto restart_search
;
1780 * Eventually all conditional branches have been followed on
1781 * both branches and we are done. Any insn that has not been
1782 * visited at this point is dead.
1788 static void jit_fill_hole(void *area
, unsigned int size
)
1792 /* We are guaranteed to have aligned memory. */
1793 for (p
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1794 uasm_i_break(&p
, BRK_BUG
); /* Increments p */
1797 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1799 struct bpf_prog
*orig_prog
= prog
;
1800 bool tmp_blinded
= false;
1801 struct bpf_prog
*tmp
;
1802 struct bpf_binary_header
*header
= NULL
;
1804 unsigned int image_size
;
1807 if (!prog
->jit_requested
)
1810 tmp
= bpf_jit_blind_constants(prog
);
1811 /* If blinding was requested and we failed during blinding,
1812 * we must fall back to the interpreter.
1821 memset(&ctx
, 0, sizeof(ctx
));
1824 switch (current_cpu_type()) {
1825 case CPU_CAVIUM_OCTEON
:
1826 case CPU_CAVIUM_OCTEON_PLUS
:
1827 case CPU_CAVIUM_OCTEON2
:
1828 case CPU_CAVIUM_OCTEON3
:
1829 ctx
.use_bbit_insns
= 1;
1832 ctx
.use_bbit_insns
= 0;
1836 ctx
.offsets
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.offsets
), GFP_KERNEL
);
1837 if (ctx
.offsets
== NULL
)
1840 ctx
.reg_val_types
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.reg_val_types
), GFP_KERNEL
);
1841 if (ctx
.reg_val_types
== NULL
)
1846 if (reg_val_propagate(&ctx
))
1850 * First pass discovers used resources and instruction offsets
1851 * assuming short branches are used.
1853 if (build_int_body(&ctx
))
1857 * If no calls are made (EBPF_SAVE_RA), then tail call count
1858 * in $v1, else we must save in n$s4.
1860 if (ctx
.flags
& EBPF_SEEN_TC
) {
1861 if (ctx
.flags
& EBPF_SAVE_RA
)
1862 ctx
.flags
|= EBPF_SAVE_S4
;
1864 ctx
.flags
|= EBPF_TCC_IN_V1
;
1868 * Second pass generates offsets, if any branches are out of
1869 * range a jump-around long sequence is generated, and we have
1870 * to try again from the beginning to generate the new
1871 * offsets. This is done until no additional conversions are
1876 ctx
.gen_b_offsets
= 1;
1877 ctx
.long_b_conversion
= 0;
1878 if (gen_int_prologue(&ctx
))
1880 if (build_int_body(&ctx
))
1882 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1884 } while (ctx
.long_b_conversion
);
1886 image_size
= 4 * ctx
.idx
;
1888 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1889 sizeof(u32
), jit_fill_hole
);
1893 ctx
.target
= (u32
*)image_ptr
;
1895 /* Third pass generates the code */
1897 if (gen_int_prologue(&ctx
))
1899 if (build_int_body(&ctx
))
1901 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1904 /* Update the icache */
1905 flush_icache_range((unsigned long)ctx
.target
,
1906 (unsigned long)&ctx
.target
[ctx
.idx
]);
1908 if (bpf_jit_enable
> 1)
1910 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.target
);
1912 bpf_jit_binary_lock_ro(header
);
1913 prog
->bpf_func
= (void *)ctx
.target
;
1915 prog
->jited_len
= image_size
;
1918 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
1921 kfree(ctx
.reg_val_types
);
1928 bpf_jit_binary_free(header
);