2 * Just-In-Time compiler for eBPF filters on MIPS
4 * Copyright (c) 2017 Cavium, Inc.
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
16 #include <linux/bitops.h>
17 #include <linux/errno.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/slab.h>
21 #include <asm/bitops.h>
22 #include <asm/byteorder.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cpu-features.h>
27 /* Registers used by JIT */
30 #define MIPS_R_V0 2 /* BPF_R0 */
32 #define MIPS_R_A0 4 /* BPF_R1 */
33 #define MIPS_R_A1 5 /* BPF_R2 */
34 #define MIPS_R_A2 6 /* BPF_R3 */
35 #define MIPS_R_A3 7 /* BPF_R4 */
36 #define MIPS_R_A4 8 /* BPF_R5 */
37 #define MIPS_R_T4 12 /* BPF_AX */
41 #define MIPS_R_S0 16 /* BPF_R6 */
42 #define MIPS_R_S1 17 /* BPF_R7 */
43 #define MIPS_R_S2 18 /* BPF_R8 */
44 #define MIPS_R_S3 19 /* BPF_R9 */
45 #define MIPS_R_S4 20 /* BPF_TCC */
55 #define EBPF_SAVE_S0 BIT(0)
56 #define EBPF_SAVE_S1 BIT(1)
57 #define EBPF_SAVE_S2 BIT(2)
58 #define EBPF_SAVE_S3 BIT(3)
59 #define EBPF_SAVE_S4 BIT(4)
60 #define EBPF_SAVE_RA BIT(5)
61 #define EBPF_SEEN_FP BIT(6)
62 #define EBPF_SEEN_TC BIT(7)
63 #define EBPF_TCC_IN_V1 BIT(8)
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
76 /* not known to be 32-bit compatible. */
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
80 /* 32-bit compatible, need truncation for 64-bit ops. */
82 /* 32-bit zero extended. */
84 /* 32-bit no sign/zero extension needed. */
89 * high bit of offsets indicates if long branch conversion done at
92 #define OFFSETS_B_CONV BIT(31)
95 * struct jit_ctx - JIT context
97 * @stack_size: eBPF stack size
98 * @idx: Instruction index
100 * @offsets: Instruction offsets
101 * @target: Memory location for the compiled filter
102 * @reg_val_types Packed enum reg_val_type for each register.
105 const struct bpf_prog
*skf
;
112 unsigned int long_b_conversion
:1;
113 unsigned int gen_b_offsets
:1;
114 unsigned int use_bbit_insns
:1;
117 static void set_reg_val_type(u64
*rvt
, int reg
, enum reg_val_type type
)
119 *rvt
&= ~(7ull << (reg
* 3));
120 *rvt
|= ((u64
)type
<< (reg
* 3));
123 static enum reg_val_type
get_reg_val_type(const struct jit_ctx
*ctx
,
126 return (ctx
->reg_val_types
[index
] >> (reg
* 3)) & 7;
129 /* Simply emit the instruction if the JIT memory space has been allocated */
130 #define emit_instr(ctx, func, ...) \
132 if ((ctx)->target != NULL) { \
133 u32 *p = &(ctx)->target[ctx->idx]; \
134 uasm_i_##func(&p, ##__VA_ARGS__); \
139 static unsigned int j_target(struct jit_ctx
*ctx
, int target_idx
)
141 unsigned long target_va
, base_va
;
147 base_va
= (unsigned long)ctx
->target
;
148 target_va
= base_va
+ (ctx
->offsets
[target_idx
] & ~OFFSETS_B_CONV
);
150 if ((base_va
& ~0x0ffffffful
) != (target_va
& ~0x0ffffffful
))
151 return (unsigned int)-1;
152 r
= target_va
& 0x0ffffffful
;
156 /* Compute the immediate value for PC-relative branches. */
157 static u32
b_imm(unsigned int tgt
, struct jit_ctx
*ctx
)
159 if (!ctx
->gen_b_offsets
)
163 * We want a pc-relative branch. tgt is the instruction offset
164 * we want to jump to.
167 * I: target_offset <- sign_extend(offset)
168 * I+1: PC += target_offset (delay slot)
170 * ctx->idx currently points to the branch instruction
171 * but the offset is added to the delay slot so we need
174 return (ctx
->offsets
[tgt
] & ~OFFSETS_B_CONV
) -
178 enum which_ebpf_reg
{
186 * For eBPF, the register mapping naturally falls out of the
187 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
188 * separate frame pointer, so BPF_REG_10 relative accesses are
189 * adjusted to be $sp relative.
191 int ebpf_to_mips_reg(struct jit_ctx
*ctx
, const struct bpf_insn
*insn
,
192 enum which_ebpf_reg w
)
194 int ebpf_reg
= (w
== src_reg
|| w
== src_reg_no_fp
) ?
195 insn
->src_reg
: insn
->dst_reg
;
211 ctx
->flags
|= EBPF_SAVE_S0
;
214 ctx
->flags
|= EBPF_SAVE_S1
;
217 ctx
->flags
|= EBPF_SAVE_S2
;
220 ctx
->flags
|= EBPF_SAVE_S3
;
223 if (w
== dst_reg
|| w
== src_reg_no_fp
)
225 ctx
->flags
|= EBPF_SEEN_FP
;
227 * Needs special handling, return something that
228 * cannot be clobbered just in case.
235 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg
);
240 * eBPF stack frame will be something like:
242 * Entry $sp ------> +--------------------------------+
244 * +--------------------------------+
246 * +--------------------------------+
248 * +--------------------------------+
250 * +--------------------------------+
252 * +--------------------------------+
254 * +--------------------------------+
255 * | tmp-storage (if $ra saved) |
256 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
257 * | BPF_REG_10 relative storage |
258 * | MAX_BPF_STACK (optional) |
262 * $sp --------> +--------------------------------+
264 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
265 * area is not allocated.
267 static int gen_int_prologue(struct jit_ctx
*ctx
)
269 int stack_adjust
= 0;
273 if (ctx
->flags
& EBPF_SAVE_RA
)
275 * If RA we are doing a function call and may need
276 * extra 8-byte tmp area.
279 if (ctx
->flags
& EBPF_SAVE_S0
)
281 if (ctx
->flags
& EBPF_SAVE_S1
)
283 if (ctx
->flags
& EBPF_SAVE_S2
)
285 if (ctx
->flags
& EBPF_SAVE_S3
)
287 if (ctx
->flags
& EBPF_SAVE_S4
)
290 BUILD_BUG_ON(MAX_BPF_STACK
& 7);
291 locals_size
= (ctx
->flags
& EBPF_SEEN_FP
) ? MAX_BPF_STACK
: 0;
293 stack_adjust
+= locals_size
;
295 ctx
->stack_size
= stack_adjust
;
298 * First instruction initializes the tail call count (TCC).
299 * On tail call we skip this instruction, and the TCC is
300 * passed in $v1 from the caller.
302 emit_instr(ctx
, daddiu
, MIPS_R_V1
, MIPS_R_ZERO
, MAX_TAIL_CALL_CNT
);
304 emit_instr(ctx
, daddiu
, MIPS_R_SP
, MIPS_R_SP
, -stack_adjust
);
308 store_offset
= stack_adjust
- 8;
310 if (ctx
->flags
& EBPF_SAVE_RA
) {
311 emit_instr(ctx
, sd
, MIPS_R_RA
, store_offset
, MIPS_R_SP
);
314 if (ctx
->flags
& EBPF_SAVE_S0
) {
315 emit_instr(ctx
, sd
, MIPS_R_S0
, store_offset
, MIPS_R_SP
);
318 if (ctx
->flags
& EBPF_SAVE_S1
) {
319 emit_instr(ctx
, sd
, MIPS_R_S1
, store_offset
, MIPS_R_SP
);
322 if (ctx
->flags
& EBPF_SAVE_S2
) {
323 emit_instr(ctx
, sd
, MIPS_R_S2
, store_offset
, MIPS_R_SP
);
326 if (ctx
->flags
& EBPF_SAVE_S3
) {
327 emit_instr(ctx
, sd
, MIPS_R_S3
, store_offset
, MIPS_R_SP
);
330 if (ctx
->flags
& EBPF_SAVE_S4
) {
331 emit_instr(ctx
, sd
, MIPS_R_S4
, store_offset
, MIPS_R_SP
);
335 if ((ctx
->flags
& EBPF_SEEN_TC
) && !(ctx
->flags
& EBPF_TCC_IN_V1
))
336 emit_instr(ctx
, daddu
, MIPS_R_S4
, MIPS_R_V1
, MIPS_R_ZERO
);
341 static int build_int_epilogue(struct jit_ctx
*ctx
, int dest_reg
)
343 const struct bpf_prog
*prog
= ctx
->skf
;
344 int stack_adjust
= ctx
->stack_size
;
345 int store_offset
= stack_adjust
- 8;
346 enum reg_val_type td
;
349 if (dest_reg
== MIPS_R_RA
) {
350 /* Don't let zero extended value escape. */
351 td
= get_reg_val_type(ctx
, prog
->len
, BPF_REG_0
);
352 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)
353 emit_instr(ctx
, sll
, r0
, r0
, 0);
356 if (ctx
->flags
& EBPF_SAVE_RA
) {
357 emit_instr(ctx
, ld
, MIPS_R_RA
, store_offset
, MIPS_R_SP
);
360 if (ctx
->flags
& EBPF_SAVE_S0
) {
361 emit_instr(ctx
, ld
, MIPS_R_S0
, store_offset
, MIPS_R_SP
);
364 if (ctx
->flags
& EBPF_SAVE_S1
) {
365 emit_instr(ctx
, ld
, MIPS_R_S1
, store_offset
, MIPS_R_SP
);
368 if (ctx
->flags
& EBPF_SAVE_S2
) {
369 emit_instr(ctx
, ld
, MIPS_R_S2
, store_offset
, MIPS_R_SP
);
372 if (ctx
->flags
& EBPF_SAVE_S3
) {
373 emit_instr(ctx
, ld
, MIPS_R_S3
, store_offset
, MIPS_R_SP
);
376 if (ctx
->flags
& EBPF_SAVE_S4
) {
377 emit_instr(ctx
, ld
, MIPS_R_S4
, store_offset
, MIPS_R_SP
);
380 emit_instr(ctx
, jr
, dest_reg
);
383 emit_instr(ctx
, daddiu
, MIPS_R_SP
, MIPS_R_SP
, stack_adjust
);
385 emit_instr(ctx
, nop
);
390 static void gen_imm_to_reg(const struct bpf_insn
*insn
, int reg
,
393 if (insn
->imm
>= S16_MIN
&& insn
->imm
<= S16_MAX
) {
394 emit_instr(ctx
, addiu
, reg
, MIPS_R_ZERO
, insn
->imm
);
396 int lower
= (s16
)(insn
->imm
& 0xffff);
397 int upper
= insn
->imm
- lower
;
399 emit_instr(ctx
, lui
, reg
, upper
>> 16);
400 emit_instr(ctx
, addiu
, reg
, reg
, lower
);
404 static int gen_imm_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
407 int upper_bound
, lower_bound
;
408 int dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
413 switch (BPF_OP(insn
->code
)) {
416 upper_bound
= S16_MAX
;
417 lower_bound
= S16_MIN
;
420 upper_bound
= -(int)S16_MIN
;
421 lower_bound
= -(int)S16_MAX
;
426 upper_bound
= 0xffff;
432 /* Shift amounts are truncated, no need for bounds */
433 upper_bound
= S32_MAX
;
434 lower_bound
= S32_MIN
;
441 * Immediate move clobbers the register, so no sign/zero
444 if (BPF_CLASS(insn
->code
) == BPF_ALU64
&&
445 BPF_OP(insn
->code
) != BPF_MOV
&&
446 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) == REG_32BIT
)
447 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
448 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
449 if (BPF_CLASS(insn
->code
) == BPF_ALU
&&
450 BPF_OP(insn
->code
) != BPF_LSH
&&
451 BPF_OP(insn
->code
) != BPF_MOV
&&
452 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) != REG_32BIT
)
453 emit_instr(ctx
, sll
, dst
, dst
, 0);
455 if (insn
->imm
>= lower_bound
&& insn
->imm
<= upper_bound
) {
456 /* single insn immediate case */
457 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
458 case BPF_ALU64
| BPF_MOV
:
459 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
461 case BPF_ALU64
| BPF_AND
:
462 case BPF_ALU
| BPF_AND
:
463 emit_instr(ctx
, andi
, dst
, dst
, insn
->imm
);
465 case BPF_ALU64
| BPF_OR
:
466 case BPF_ALU
| BPF_OR
:
467 emit_instr(ctx
, ori
, dst
, dst
, insn
->imm
);
469 case BPF_ALU64
| BPF_XOR
:
470 case BPF_ALU
| BPF_XOR
:
471 emit_instr(ctx
, xori
, dst
, dst
, insn
->imm
);
473 case BPF_ALU64
| BPF_ADD
:
474 emit_instr(ctx
, daddiu
, dst
, dst
, insn
->imm
);
476 case BPF_ALU64
| BPF_SUB
:
477 emit_instr(ctx
, daddiu
, dst
, dst
, -insn
->imm
);
479 case BPF_ALU64
| BPF_RSH
:
480 emit_instr(ctx
, dsrl_safe
, dst
, dst
, insn
->imm
& 0x3f);
482 case BPF_ALU
| BPF_RSH
:
483 emit_instr(ctx
, srl
, dst
, dst
, insn
->imm
& 0x1f);
485 case BPF_ALU64
| BPF_LSH
:
486 emit_instr(ctx
, dsll_safe
, dst
, dst
, insn
->imm
& 0x3f);
488 case BPF_ALU
| BPF_LSH
:
489 emit_instr(ctx
, sll
, dst
, dst
, insn
->imm
& 0x1f);
491 case BPF_ALU64
| BPF_ARSH
:
492 emit_instr(ctx
, dsra_safe
, dst
, dst
, insn
->imm
& 0x3f);
494 case BPF_ALU
| BPF_ARSH
:
495 emit_instr(ctx
, sra
, dst
, dst
, insn
->imm
& 0x1f);
497 case BPF_ALU
| BPF_MOV
:
498 emit_instr(ctx
, addiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
500 case BPF_ALU
| BPF_ADD
:
501 emit_instr(ctx
, addiu
, dst
, dst
, insn
->imm
);
503 case BPF_ALU
| BPF_SUB
:
504 emit_instr(ctx
, addiu
, dst
, dst
, -insn
->imm
);
510 /* multi insn immediate case */
511 if (BPF_OP(insn
->code
) == BPF_MOV
) {
512 gen_imm_to_reg(insn
, dst
, ctx
);
514 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
515 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
516 case BPF_ALU64
| BPF_AND
:
517 case BPF_ALU
| BPF_AND
:
518 emit_instr(ctx
, and, dst
, dst
, MIPS_R_AT
);
520 case BPF_ALU64
| BPF_OR
:
521 case BPF_ALU
| BPF_OR
:
522 emit_instr(ctx
, or, dst
, dst
, MIPS_R_AT
);
524 case BPF_ALU64
| BPF_XOR
:
525 case BPF_ALU
| BPF_XOR
:
526 emit_instr(ctx
, xor, dst
, dst
, MIPS_R_AT
);
528 case BPF_ALU64
| BPF_ADD
:
529 emit_instr(ctx
, daddu
, dst
, dst
, MIPS_R_AT
);
531 case BPF_ALU64
| BPF_SUB
:
532 emit_instr(ctx
, dsubu
, dst
, dst
, MIPS_R_AT
);
534 case BPF_ALU
| BPF_ADD
:
535 emit_instr(ctx
, addu
, dst
, dst
, MIPS_R_AT
);
537 case BPF_ALU
| BPF_SUB
:
538 emit_instr(ctx
, subu
, dst
, dst
, MIPS_R_AT
);
549 static void emit_const_to_reg(struct jit_ctx
*ctx
, int dst
, u64 value
)
551 if (value
>= 0xffffffffffff8000ull
|| value
< 0x8000ull
) {
552 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, (int)value
);
553 } else if (value
>= 0xffffffff80000000ull
||
554 (value
< 0x80000000 && value
> 0xffff)) {
555 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)(value
>> 16));
556 emit_instr(ctx
, ori
, dst
, dst
, (unsigned int)(value
& 0xffff));
559 bool seen_part
= false;
560 int needed_shift
= 0;
562 for (i
= 0; i
< 4; i
++) {
563 u64 part
= (value
>> (16 * (3 - i
))) & 0xffff;
565 if (seen_part
&& needed_shift
> 0 && (part
|| i
== 3)) {
566 emit_instr(ctx
, dsll_safe
, dst
, dst
, needed_shift
);
570 if (i
== 0 || (!seen_part
&& i
< 3 && part
< 0x8000)) {
571 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)part
);
574 emit_instr(ctx
, ori
, dst
,
575 seen_part
? dst
: MIPS_R_ZERO
,
586 static int emit_bpf_tail_call(struct jit_ctx
*ctx
, int this_idx
)
590 ctx
->flags
|= EBPF_SEEN_TC
;
592 * if (index >= array->map.max_entries)
595 off
= offsetof(struct bpf_array
, map
.max_entries
);
596 emit_instr(ctx
, lwu
, MIPS_R_T5
, off
, MIPS_R_A1
);
597 emit_instr(ctx
, sltu
, MIPS_R_AT
, MIPS_R_T5
, MIPS_R_A2
);
598 b_off
= b_imm(this_idx
+ 1, ctx
);
599 emit_instr(ctx
, bne
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
605 emit_instr(ctx
, daddiu
, MIPS_R_T5
,
606 (ctx
->flags
& EBPF_TCC_IN_V1
) ? MIPS_R_V1
: MIPS_R_S4
, -1);
607 b_off
= b_imm(this_idx
+ 1, ctx
);
608 emit_instr(ctx
, bltz
, MIPS_R_T5
, b_off
);
610 * prog = array->ptrs[index];
615 emit_instr(ctx
, dsll
, MIPS_R_T8
, MIPS_R_A2
, 3);
616 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, MIPS_R_A1
);
617 off
= offsetof(struct bpf_array
, ptrs
);
618 emit_instr(ctx
, ld
, MIPS_R_AT
, off
, MIPS_R_T8
);
619 b_off
= b_imm(this_idx
+ 1, ctx
);
620 emit_instr(ctx
, beq
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
622 emit_instr(ctx
, nop
);
624 /* goto *(prog->bpf_func + 4); */
625 off
= offsetof(struct bpf_prog
, bpf_func
);
626 emit_instr(ctx
, ld
, MIPS_R_T9
, off
, MIPS_R_AT
);
627 /* All systems are go... propagate TCC */
628 emit_instr(ctx
, daddu
, MIPS_R_V1
, MIPS_R_T5
, MIPS_R_ZERO
);
629 /* Skip first instruction (TCC initialization) */
630 emit_instr(ctx
, daddiu
, MIPS_R_T9
, MIPS_R_T9
, 4);
631 return build_int_epilogue(ctx
, MIPS_R_T9
);
634 static bool is_bad_offset(int b_off
)
636 return b_off
> 0x1ffff || b_off
< -0x20000;
639 /* Returns the number of insn slots consumed. */
640 static int build_one_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
641 int this_idx
, int exit_idx
)
643 int src
, dst
, r
, td
, ts
, mem_off
, b_off
;
644 bool need_swap
, did_move
, cmp_eq
;
645 unsigned int target
= 0;
648 int bpf_op
= BPF_OP(insn
->code
);
650 switch (insn
->code
) {
651 case BPF_ALU64
| BPF_ADD
| BPF_K
: /* ALU64_IMM */
652 case BPF_ALU64
| BPF_SUB
| BPF_K
: /* ALU64_IMM */
653 case BPF_ALU64
| BPF_OR
| BPF_K
: /* ALU64_IMM */
654 case BPF_ALU64
| BPF_AND
| BPF_K
: /* ALU64_IMM */
655 case BPF_ALU64
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
656 case BPF_ALU64
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
657 case BPF_ALU64
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
658 case BPF_ALU64
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
659 case BPF_ALU64
| BPF_MOV
| BPF_K
: /* ALU64_IMM */
660 case BPF_ALU
| BPF_MOV
| BPF_K
: /* ALU32_IMM */
661 case BPF_ALU
| BPF_ADD
| BPF_K
: /* ALU32_IMM */
662 case BPF_ALU
| BPF_SUB
| BPF_K
: /* ALU32_IMM */
663 case BPF_ALU
| BPF_OR
| BPF_K
: /* ALU64_IMM */
664 case BPF_ALU
| BPF_AND
| BPF_K
: /* ALU64_IMM */
665 case BPF_ALU
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
666 case BPF_ALU
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
667 case BPF_ALU
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
668 case BPF_ALU
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
669 r
= gen_imm_insn(insn
, ctx
, this_idx
);
673 case BPF_ALU64
| BPF_MUL
| BPF_K
: /* ALU64_IMM */
674 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
677 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
678 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
679 if (insn
->imm
== 1) /* Mult by 1 is a nop */
681 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
682 emit_instr(ctx
, dmultu
, MIPS_R_AT
, dst
);
683 emit_instr(ctx
, mflo
, dst
);
685 case BPF_ALU64
| BPF_NEG
| BPF_K
: /* ALU64_IMM */
686 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
689 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
690 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
691 emit_instr(ctx
, dsubu
, dst
, MIPS_R_ZERO
, dst
);
693 case BPF_ALU
| BPF_MUL
| BPF_K
: /* ALU_IMM */
694 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
697 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
698 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
700 emit_instr(ctx
, sll
, dst
, dst
, 0);
702 if (insn
->imm
== 1) /* Mult by 1 is a nop */
704 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
705 emit_instr(ctx
, multu
, dst
, MIPS_R_AT
);
706 emit_instr(ctx
, mflo
, dst
);
708 case BPF_ALU
| BPF_NEG
| BPF_K
: /* ALU_IMM */
709 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
712 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
713 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
715 emit_instr(ctx
, sll
, dst
, dst
, 0);
717 emit_instr(ctx
, subu
, dst
, MIPS_R_ZERO
, dst
);
719 case BPF_ALU
| BPF_DIV
| BPF_K
: /* ALU_IMM */
720 case BPF_ALU
| BPF_MOD
| BPF_K
: /* ALU_IMM */
723 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
726 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
727 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)
729 emit_instr(ctx
, sll
, dst
, dst
, 0);
730 if (insn
->imm
== 1) {
731 /* div by 1 is a nop, mod by 1 is zero */
732 if (bpf_op
== BPF_MOD
)
733 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
736 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
737 emit_instr(ctx
, divu
, dst
, MIPS_R_AT
);
738 if (bpf_op
== BPF_DIV
)
739 emit_instr(ctx
, mflo
, dst
);
741 emit_instr(ctx
, mfhi
, dst
);
743 case BPF_ALU64
| BPF_DIV
| BPF_K
: /* ALU_IMM */
744 case BPF_ALU64
| BPF_MOD
| BPF_K
: /* ALU_IMM */
747 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
750 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
751 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
752 if (insn
->imm
== 1) {
753 /* div by 1 is a nop, mod by 1 is zero */
754 if (bpf_op
== BPF_MOD
)
755 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
758 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
759 emit_instr(ctx
, ddivu
, dst
, MIPS_R_AT
);
760 if (bpf_op
== BPF_DIV
)
761 emit_instr(ctx
, mflo
, dst
);
763 emit_instr(ctx
, mfhi
, dst
);
765 case BPF_ALU64
| BPF_MOV
| BPF_X
: /* ALU64_REG */
766 case BPF_ALU64
| BPF_ADD
| BPF_X
: /* ALU64_REG */
767 case BPF_ALU64
| BPF_SUB
| BPF_X
: /* ALU64_REG */
768 case BPF_ALU64
| BPF_XOR
| BPF_X
: /* ALU64_REG */
769 case BPF_ALU64
| BPF_OR
| BPF_X
: /* ALU64_REG */
770 case BPF_ALU64
| BPF_AND
| BPF_X
: /* ALU64_REG */
771 case BPF_ALU64
| BPF_MUL
| BPF_X
: /* ALU64_REG */
772 case BPF_ALU64
| BPF_DIV
| BPF_X
: /* ALU64_REG */
773 case BPF_ALU64
| BPF_MOD
| BPF_X
: /* ALU64_REG */
774 case BPF_ALU64
| BPF_LSH
| BPF_X
: /* ALU64_REG */
775 case BPF_ALU64
| BPF_RSH
| BPF_X
: /* ALU64_REG */
776 case BPF_ALU64
| BPF_ARSH
| BPF_X
: /* ALU64_REG */
777 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg
);
778 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
779 if (src
< 0 || dst
< 0)
781 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
782 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
784 if (insn
->src_reg
== BPF_REG_10
) {
785 if (bpf_op
== BPF_MOV
) {
786 emit_instr(ctx
, daddiu
, dst
, MIPS_R_SP
, MAX_BPF_STACK
);
789 emit_instr(ctx
, daddiu
, MIPS_R_AT
, MIPS_R_SP
, MAX_BPF_STACK
);
792 } else if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
793 int tmp_reg
= MIPS_R_AT
;
795 if (bpf_op
== BPF_MOV
) {
799 emit_instr(ctx
, daddu
, tmp_reg
, src
, MIPS_R_ZERO
);
800 emit_instr(ctx
, dinsu
, tmp_reg
, MIPS_R_ZERO
, 32, 32);
806 emit_instr(ctx
, daddu
, dst
, src
, MIPS_R_ZERO
);
809 emit_instr(ctx
, daddu
, dst
, dst
, src
);
812 emit_instr(ctx
, dsubu
, dst
, dst
, src
);
815 emit_instr(ctx
, xor, dst
, dst
, src
);
818 emit_instr(ctx
, or, dst
, dst
, src
);
821 emit_instr(ctx
, and, dst
, dst
, src
);
824 emit_instr(ctx
, dmultu
, dst
, src
);
825 emit_instr(ctx
, mflo
, dst
);
829 emit_instr(ctx
, ddivu
, dst
, src
);
830 if (bpf_op
== BPF_DIV
)
831 emit_instr(ctx
, mflo
, dst
);
833 emit_instr(ctx
, mfhi
, dst
);
836 emit_instr(ctx
, dsllv
, dst
, dst
, src
);
839 emit_instr(ctx
, dsrlv
, dst
, dst
, src
);
842 emit_instr(ctx
, dsrav
, dst
, dst
, src
);
845 pr_err("ALU64_REG NOT HANDLED\n");
849 case BPF_ALU
| BPF_MOV
| BPF_X
: /* ALU_REG */
850 case BPF_ALU
| BPF_ADD
| BPF_X
: /* ALU_REG */
851 case BPF_ALU
| BPF_SUB
| BPF_X
: /* ALU_REG */
852 case BPF_ALU
| BPF_XOR
| BPF_X
: /* ALU_REG */
853 case BPF_ALU
| BPF_OR
| BPF_X
: /* ALU_REG */
854 case BPF_ALU
| BPF_AND
| BPF_X
: /* ALU_REG */
855 case BPF_ALU
| BPF_MUL
| BPF_X
: /* ALU_REG */
856 case BPF_ALU
| BPF_DIV
| BPF_X
: /* ALU_REG */
857 case BPF_ALU
| BPF_MOD
| BPF_X
: /* ALU_REG */
858 case BPF_ALU
| BPF_LSH
| BPF_X
: /* ALU_REG */
859 case BPF_ALU
| BPF_RSH
| BPF_X
: /* ALU_REG */
860 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
861 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
862 if (src
< 0 || dst
< 0)
864 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
865 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
867 emit_instr(ctx
, sll
, dst
, dst
, 0);
870 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
871 if (ts
== REG_64BIT
|| ts
== REG_32BIT_ZERO_EX
) {
872 int tmp_reg
= MIPS_R_AT
;
874 if (bpf_op
== BPF_MOV
) {
879 emit_instr(ctx
, sll
, tmp_reg
, src
, 0);
885 emit_instr(ctx
, addu
, dst
, src
, MIPS_R_ZERO
);
888 emit_instr(ctx
, addu
, dst
, dst
, src
);
891 emit_instr(ctx
, subu
, dst
, dst
, src
);
894 emit_instr(ctx
, xor, dst
, dst
, src
);
897 emit_instr(ctx
, or, dst
, dst
, src
);
900 emit_instr(ctx
, and, dst
, dst
, src
);
903 emit_instr(ctx
, mul
, dst
, dst
, src
);
907 emit_instr(ctx
, divu
, dst
, src
);
908 if (bpf_op
== BPF_DIV
)
909 emit_instr(ctx
, mflo
, dst
);
911 emit_instr(ctx
, mfhi
, dst
);
914 emit_instr(ctx
, sllv
, dst
, dst
, src
);
917 emit_instr(ctx
, srlv
, dst
, dst
, src
);
920 pr_err("ALU_REG NOT HANDLED\n");
924 case BPF_JMP
| BPF_EXIT
:
925 if (this_idx
+ 1 < exit_idx
) {
926 b_off
= b_imm(exit_idx
, ctx
);
927 if (is_bad_offset(b_off
))
929 emit_instr(ctx
, beq
, MIPS_R_ZERO
, MIPS_R_ZERO
, b_off
);
930 emit_instr(ctx
, nop
);
933 case BPF_JMP
| BPF_JEQ
| BPF_K
: /* JMP_IMM */
934 case BPF_JMP
| BPF_JNE
| BPF_K
: /* JMP_IMM */
935 cmp_eq
= (bpf_op
== BPF_JEQ
);
936 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
939 if (insn
->imm
== 0) {
942 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
946 case BPF_JMP
| BPF_JEQ
| BPF_X
: /* JMP_REG */
947 case BPF_JMP
| BPF_JNE
| BPF_X
:
948 case BPF_JMP
| BPF_JSLT
| BPF_X
:
949 case BPF_JMP
| BPF_JSLE
| BPF_X
:
950 case BPF_JMP
| BPF_JSGT
| BPF_X
:
951 case BPF_JMP
| BPF_JSGE
| BPF_X
:
952 case BPF_JMP
| BPF_JLT
| BPF_X
:
953 case BPF_JMP
| BPF_JLE
| BPF_X
:
954 case BPF_JMP
| BPF_JGT
| BPF_X
:
955 case BPF_JMP
| BPF_JGE
| BPF_X
:
956 case BPF_JMP
| BPF_JSET
| BPF_X
:
957 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
958 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
959 if (src
< 0 || dst
< 0)
961 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
962 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
963 if (td
== REG_32BIT
&& ts
!= REG_32BIT
) {
964 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
966 } else if (ts
== REG_32BIT
&& td
!= REG_32BIT
) {
967 emit_instr(ctx
, sll
, MIPS_R_AT
, dst
, 0);
970 if (bpf_op
== BPF_JSET
) {
971 emit_instr(ctx
, and, MIPS_R_AT
, dst
, src
);
975 } else if (bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSLE
) {
976 emit_instr(ctx
, dsubu
, MIPS_R_AT
, dst
, src
);
977 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
978 b_off
= b_imm(exit_idx
, ctx
);
979 if (is_bad_offset(b_off
))
981 if (bpf_op
== BPF_JSGT
)
982 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
984 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
985 emit_instr(ctx
, nop
);
986 return 2; /* We consumed the exit. */
988 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
989 if (is_bad_offset(b_off
))
991 if (bpf_op
== BPF_JSGT
)
992 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
994 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
995 emit_instr(ctx
, nop
);
997 } else if (bpf_op
== BPF_JSGE
|| bpf_op
== BPF_JSLT
) {
998 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, src
);
999 cmp_eq
= bpf_op
== BPF_JSGE
;
1002 } else if (bpf_op
== BPF_JGT
|| bpf_op
== BPF_JLE
) {
1003 /* dst or src could be AT */
1004 emit_instr(ctx
, dsubu
, MIPS_R_T8
, dst
, src
);
1005 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1006 /* SP known to be non-zero, movz becomes boolean not */
1007 emit_instr(ctx
, movz
, MIPS_R_T9
, MIPS_R_SP
, MIPS_R_T8
);
1008 emit_instr(ctx
, movn
, MIPS_R_T9
, MIPS_R_ZERO
, MIPS_R_T8
);
1009 emit_instr(ctx
, or, MIPS_R_AT
, MIPS_R_T9
, MIPS_R_AT
);
1010 cmp_eq
= bpf_op
== BPF_JGT
;
1013 } else if (bpf_op
== BPF_JGE
|| bpf_op
== BPF_JLT
) {
1014 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1015 cmp_eq
= bpf_op
== BPF_JGE
;
1018 } else { /* JNE/JEQ case */
1019 cmp_eq
= (bpf_op
== BPF_JEQ
);
1023 * If the next insn is EXIT and we are jumping arround
1024 * only it, invert the sense of the compare and
1025 * conditionally jump to the exit. Poor man's branch
1028 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1029 b_off
= b_imm(exit_idx
, ctx
);
1030 if (is_bad_offset(b_off
)) {
1031 target
= j_target(ctx
, exit_idx
);
1032 if (target
== (unsigned int)-1)
1036 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1037 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1038 ctx
->long_b_conversion
= 1;
1043 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1045 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1046 emit_instr(ctx
, nop
);
1047 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1048 emit_instr(ctx
, j
, target
);
1049 emit_instr(ctx
, nop
);
1051 return 2; /* We consumed the exit. */
1053 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1054 if (is_bad_offset(b_off
)) {
1055 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1056 if (target
== (unsigned int)-1)
1060 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1061 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1062 ctx
->long_b_conversion
= 1;
1067 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1069 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1070 emit_instr(ctx
, nop
);
1071 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1072 emit_instr(ctx
, j
, target
);
1073 emit_instr(ctx
, nop
);
1076 case BPF_JMP
| BPF_JSGT
| BPF_K
: /* JMP_IMM */
1077 case BPF_JMP
| BPF_JSGE
| BPF_K
: /* JMP_IMM */
1078 case BPF_JMP
| BPF_JSLT
| BPF_K
: /* JMP_IMM */
1079 case BPF_JMP
| BPF_JSLE
| BPF_K
: /* JMP_IMM */
1080 cmp_eq
= (bpf_op
== BPF_JSGE
);
1081 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1085 if (insn
->imm
== 0) {
1086 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1087 b_off
= b_imm(exit_idx
, ctx
);
1088 if (is_bad_offset(b_off
))
1092 emit_instr(ctx
, blez
, dst
, b_off
);
1095 emit_instr(ctx
, bltz
, dst
, b_off
);
1098 emit_instr(ctx
, bgez
, dst
, b_off
);
1101 emit_instr(ctx
, bgtz
, dst
, b_off
);
1104 emit_instr(ctx
, nop
);
1105 return 2; /* We consumed the exit. */
1107 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1108 if (is_bad_offset(b_off
))
1112 emit_instr(ctx
, bgtz
, dst
, b_off
);
1115 emit_instr(ctx
, bgez
, dst
, b_off
);
1118 emit_instr(ctx
, bltz
, dst
, b_off
);
1121 emit_instr(ctx
, blez
, dst
, b_off
);
1124 emit_instr(ctx
, nop
);
1128 * only "LT" compare available, so we must use imm + 1
1129 * to generate "GT" and imm -1 to generate LE
1131 if (bpf_op
== BPF_JSGT
)
1132 t64s
= insn
->imm
+ 1;
1133 else if (bpf_op
== BPF_JSLE
)
1134 t64s
= insn
->imm
+ 1;
1138 cmp_eq
= bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSGE
;
1139 if (t64s
>= S16_MIN
&& t64s
<= S16_MAX
) {
1140 emit_instr(ctx
, slti
, MIPS_R_AT
, dst
, (int)t64s
);
1145 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1146 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1151 case BPF_JMP
| BPF_JGT
| BPF_K
:
1152 case BPF_JMP
| BPF_JGE
| BPF_K
:
1153 case BPF_JMP
| BPF_JLT
| BPF_K
:
1154 case BPF_JMP
| BPF_JLE
| BPF_K
:
1155 cmp_eq
= (bpf_op
== BPF_JGE
);
1156 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1160 * only "LT" compare available, so we must use imm + 1
1161 * to generate "GT" and imm -1 to generate LE
1163 if (bpf_op
== BPF_JGT
)
1164 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1165 else if (bpf_op
== BPF_JLE
)
1166 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1168 t64s
= (u64
)(u32
)(insn
->imm
);
1170 cmp_eq
= bpf_op
== BPF_JGT
|| bpf_op
== BPF_JGE
;
1172 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1173 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1178 case BPF_JMP
| BPF_JSET
| BPF_K
: /* JMP_IMM */
1179 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1183 if (ctx
->use_bbit_insns
&& hweight32((u32
)insn
->imm
) == 1) {
1184 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1185 b_off
= b_imm(exit_idx
, ctx
);
1186 if (is_bad_offset(b_off
))
1188 emit_instr(ctx
, bbit0
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1189 emit_instr(ctx
, nop
);
1190 return 2; /* We consumed the exit. */
1192 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1193 if (is_bad_offset(b_off
))
1195 emit_instr(ctx
, bbit1
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1196 emit_instr(ctx
, nop
);
1199 t64
= (u32
)insn
->imm
;
1200 emit_const_to_reg(ctx
, MIPS_R_AT
, t64
);
1201 emit_instr(ctx
, and, MIPS_R_AT
, dst
, MIPS_R_AT
);
1207 case BPF_JMP
| BPF_JA
:
1209 * Prefer relative branch for easier debugging, but
1210 * fall back if needed.
1212 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1213 if (is_bad_offset(b_off
)) {
1214 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1215 if (target
== (unsigned int)-1)
1217 emit_instr(ctx
, j
, target
);
1219 emit_instr(ctx
, b
, b_off
);
1221 emit_instr(ctx
, nop
);
1223 case BPF_LD
| BPF_DW
| BPF_IMM
:
1224 if (insn
->src_reg
!= 0)
1226 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1229 t64
= ((u64
)(u32
)insn
->imm
) | ((u64
)(insn
+ 1)->imm
<< 32);
1230 emit_const_to_reg(ctx
, dst
, t64
);
1231 return 2; /* Double slot insn */
1233 case BPF_JMP
| BPF_CALL
:
1234 ctx
->flags
|= EBPF_SAVE_RA
;
1235 t64s
= (s64
)insn
->imm
+ (s64
)__bpf_call_base
;
1236 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)t64s
);
1237 emit_instr(ctx
, jalr
, MIPS_R_RA
, MIPS_R_T9
);
1239 emit_instr(ctx
, nop
);
1242 case BPF_JMP
| BPF_TAIL_CALL
:
1243 if (emit_bpf_tail_call(ctx
, this_idx
))
1247 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1248 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1249 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1252 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
1253 if (insn
->imm
== 64 && td
== REG_32BIT
)
1254 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
1256 if (insn
->imm
!= 64 &&
1257 (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)) {
1259 emit_instr(ctx
, sll
, dst
, dst
, 0);
1263 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_LE
);
1265 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_BE
);
1267 if (insn
->imm
== 16) {
1269 emit_instr(ctx
, wsbh
, dst
, dst
);
1270 emit_instr(ctx
, andi
, dst
, dst
, 0xffff);
1271 } else if (insn
->imm
== 32) {
1273 emit_instr(ctx
, wsbh
, dst
, dst
);
1274 emit_instr(ctx
, rotr
, dst
, dst
, 16);
1276 } else { /* 64-bit*/
1278 emit_instr(ctx
, dsbh
, dst
, dst
);
1279 emit_instr(ctx
, dshd
, dst
, dst
);
1284 case BPF_ST
| BPF_B
| BPF_MEM
:
1285 case BPF_ST
| BPF_H
| BPF_MEM
:
1286 case BPF_ST
| BPF_W
| BPF_MEM
:
1287 case BPF_ST
| BPF_DW
| BPF_MEM
:
1288 if (insn
->dst_reg
== BPF_REG_10
) {
1289 ctx
->flags
|= EBPF_SEEN_FP
;
1291 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1293 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1296 mem_off
= insn
->off
;
1298 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
1299 switch (BPF_SIZE(insn
->code
)) {
1301 emit_instr(ctx
, sb
, MIPS_R_AT
, mem_off
, dst
);
1304 emit_instr(ctx
, sh
, MIPS_R_AT
, mem_off
, dst
);
1307 emit_instr(ctx
, sw
, MIPS_R_AT
, mem_off
, dst
);
1310 emit_instr(ctx
, sd
, MIPS_R_AT
, mem_off
, dst
);
1315 case BPF_LDX
| BPF_B
| BPF_MEM
:
1316 case BPF_LDX
| BPF_H
| BPF_MEM
:
1317 case BPF_LDX
| BPF_W
| BPF_MEM
:
1318 case BPF_LDX
| BPF_DW
| BPF_MEM
:
1319 if (insn
->src_reg
== BPF_REG_10
) {
1320 ctx
->flags
|= EBPF_SEEN_FP
;
1322 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1324 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1327 mem_off
= insn
->off
;
1329 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1332 switch (BPF_SIZE(insn
->code
)) {
1334 emit_instr(ctx
, lbu
, dst
, mem_off
, src
);
1337 emit_instr(ctx
, lhu
, dst
, mem_off
, src
);
1340 emit_instr(ctx
, lw
, dst
, mem_off
, src
);
1343 emit_instr(ctx
, ld
, dst
, mem_off
, src
);
1348 case BPF_STX
| BPF_B
| BPF_MEM
:
1349 case BPF_STX
| BPF_H
| BPF_MEM
:
1350 case BPF_STX
| BPF_W
| BPF_MEM
:
1351 case BPF_STX
| BPF_DW
| BPF_MEM
:
1352 case BPF_STX
| BPF_W
| BPF_XADD
:
1353 case BPF_STX
| BPF_DW
| BPF_XADD
:
1354 if (insn
->dst_reg
== BPF_REG_10
) {
1355 ctx
->flags
|= EBPF_SEEN_FP
;
1357 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1359 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1362 mem_off
= insn
->off
;
1364 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1367 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1368 switch (BPF_SIZE(insn
->code
)) {
1370 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1371 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
1374 emit_instr(ctx
, ll
, MIPS_R_T8
, mem_off
, dst
);
1375 emit_instr(ctx
, addu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1376 emit_instr(ctx
, sc
, MIPS_R_T8
, mem_off
, dst
);
1378 * On failure back up to LL (-4
1379 * instructions of 4 bytes each
1381 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1382 emit_instr(ctx
, nop
);
1385 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1386 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1387 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1390 emit_instr(ctx
, lld
, MIPS_R_T8
, mem_off
, dst
);
1391 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1392 emit_instr(ctx
, scd
, MIPS_R_T8
, mem_off
, dst
);
1393 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1394 emit_instr(ctx
, nop
);
1397 } else { /* BPF_MEM */
1398 switch (BPF_SIZE(insn
->code
)) {
1400 emit_instr(ctx
, sb
, src
, mem_off
, dst
);
1403 emit_instr(ctx
, sh
, src
, mem_off
, dst
);
1406 emit_instr(ctx
, sw
, src
, mem_off
, dst
);
1409 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1410 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1411 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1414 emit_instr(ctx
, sd
, src
, mem_off
, dst
);
1421 pr_err("NOT HANDLED %d - (%02x)\n",
1422 this_idx
, (unsigned int)insn
->code
);
1428 #define RVT_VISITED_MASK 0xc000000000000000ull
1429 #define RVT_FALL_THROUGH 0x4000000000000000ull
1430 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1431 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1433 static int build_int_body(struct jit_ctx
*ctx
)
1435 const struct bpf_prog
*prog
= ctx
->skf
;
1436 const struct bpf_insn
*insn
;
1439 for (i
= 0; i
< prog
->len
; ) {
1440 insn
= prog
->insnsi
+ i
;
1441 if ((ctx
->reg_val_types
[i
] & RVT_VISITED_MASK
) == 0) {
1442 /* dead instruction, don't emit it. */
1447 if (ctx
->target
== NULL
)
1448 ctx
->offsets
[i
] = (ctx
->offsets
[i
] & OFFSETS_B_CONV
) | (ctx
->idx
* 4);
1450 r
= build_one_insn(insn
, ctx
, i
, prog
->len
);
1455 /* epilogue offset */
1456 if (ctx
->target
== NULL
)
1457 ctx
->offsets
[i
] = ctx
->idx
* 4;
1460 * All exits have an offset of the epilogue, some offsets may
1461 * not have been set due to banch-around threading, so set
1464 if (ctx
->target
== NULL
)
1465 for (i
= 0; i
< prog
->len
; i
++) {
1466 insn
= prog
->insnsi
+ i
;
1467 if (insn
->code
== (BPF_JMP
| BPF_EXIT
))
1468 ctx
->offsets
[i
] = ctx
->idx
* 4;
1473 /* return the last idx processed, or negative for error */
1474 static int reg_val_propagate_range(struct jit_ctx
*ctx
, u64 initial_rvt
,
1475 int start_idx
, bool follow_taken
)
1477 const struct bpf_prog
*prog
= ctx
->skf
;
1478 const struct bpf_insn
*insn
;
1479 u64 exit_rvt
= initial_rvt
;
1480 u64
*rvt
= ctx
->reg_val_types
;
1484 for (idx
= start_idx
; idx
< prog
->len
; idx
++) {
1485 rvt
[idx
] = (rvt
[idx
] & RVT_VISITED_MASK
) | exit_rvt
;
1486 insn
= prog
->insnsi
+ idx
;
1487 switch (BPF_CLASS(insn
->code
)) {
1489 switch (BPF_OP(insn
->code
)) {
1501 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1504 if (BPF_SRC(insn
->code
)) {
1505 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1507 /* IMM to REG move*/
1509 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1511 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1515 if (insn
->imm
== 64)
1516 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1517 else if (insn
->imm
== 32)
1518 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1519 else /* insn->imm == 16 */
1520 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1523 rvt
[idx
] |= RVT_DONE
;
1526 switch (BPF_OP(insn
->code
)) {
1528 if (BPF_SRC(insn
->code
)) {
1529 /* REG to REG move*/
1530 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1532 /* IMM to REG move*/
1534 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1536 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1540 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1542 rvt
[idx
] |= RVT_DONE
;
1545 switch (BPF_SIZE(insn
->code
)) {
1547 if (BPF_MODE(insn
->code
) == BPF_IMM
) {
1550 val
= (s64
)((u32
)insn
->imm
| ((u64
)(insn
+ 1)->imm
<< 32));
1551 if (val
> 0 && val
<= S32_MAX
)
1552 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1553 else if (val
>= S32_MIN
&& val
<= S32_MAX
)
1554 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1556 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1557 rvt
[idx
] |= RVT_DONE
;
1560 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1565 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1568 if (BPF_MODE(insn
->code
) == BPF_IMM
)
1569 set_reg_val_type(&exit_rvt
, insn
->dst_reg
,
1570 insn
->imm
>= 0 ? REG_32BIT_POS
: REG_32BIT
);
1572 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1575 rvt
[idx
] |= RVT_DONE
;
1578 switch (BPF_SIZE(insn
->code
)) {
1580 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1584 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1587 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1590 rvt
[idx
] |= RVT_DONE
;
1593 switch (BPF_OP(insn
->code
)) {
1595 rvt
[idx
] = RVT_DONE
| exit_rvt
;
1596 rvt
[prog
->len
] = exit_rvt
;
1599 rvt
[idx
] |= RVT_DONE
;
1614 rvt
[idx
] |= RVT_BRANCH_TAKEN
;
1616 follow_taken
= false;
1618 rvt
[idx
] |= RVT_FALL_THROUGH
;
1622 set_reg_val_type(&exit_rvt
, BPF_REG_0
, REG_64BIT
);
1623 /* Upon call return, argument registers are clobbered. */
1624 for (reg
= BPF_REG_0
; reg
<= BPF_REG_5
; reg
++)
1625 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1627 rvt
[idx
] |= RVT_DONE
;
1630 WARN(1, "Unhandled BPF_JMP case.\n");
1631 rvt
[idx
] |= RVT_DONE
;
1636 rvt
[idx
] |= RVT_DONE
;
1644 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1645 * each eBPF insn. This allows unneeded sign and zero extension
1646 * operations to be omitted.
1648 * Doesn't handle yet confluence of control paths with conflicting
1649 * ranges, but it is good enough for most sane code.
1651 static int reg_val_propagate(struct jit_ctx
*ctx
)
1653 const struct bpf_prog
*prog
= ctx
->skf
;
1659 * 11 registers * 3 bits/reg leaves top bits free for other
1660 * uses. Bit-62..63 used to see if we have visited an insn.
1664 /* Upon entry, argument registers are 64-bit. */
1665 for (reg
= BPF_REG_1
; reg
<= BPF_REG_5
; reg
++)
1666 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1669 * First follow all conditional branches on the fall-through
1670 * edge of control flow..
1672 reg_val_propagate_range(ctx
, exit_rvt
, 0, false);
1675 * Then repeatedly find the first conditional branch where
1676 * both edges of control flow have not been taken, and follow
1677 * the branch taken edge. We will end up restarting the
1678 * search once per conditional branch insn.
1680 for (i
= 0; i
< prog
->len
; i
++) {
1681 u64 rvt
= ctx
->reg_val_types
[i
];
1683 if ((rvt
& RVT_VISITED_MASK
) == RVT_DONE
||
1684 (rvt
& RVT_VISITED_MASK
) == 0)
1686 if ((rvt
& RVT_VISITED_MASK
) == RVT_FALL_THROUGH
) {
1687 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, true);
1688 } else { /* RVT_BRANCH_TAKEN */
1689 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1690 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, false);
1692 goto restart_search
;
1695 * Eventually all conditional branches have been followed on
1696 * both branches and we are done. Any insn that has not been
1697 * visited at this point is dead.
1703 static void jit_fill_hole(void *area
, unsigned int size
)
1707 /* We are guaranteed to have aligned memory. */
1708 for (p
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1709 uasm_i_break(&p
, BRK_BUG
); /* Increments p */
1712 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1714 struct bpf_prog
*orig_prog
= prog
;
1715 bool tmp_blinded
= false;
1716 struct bpf_prog
*tmp
;
1717 struct bpf_binary_header
*header
= NULL
;
1719 unsigned int image_size
;
1722 if (!prog
->jit_requested
|| !cpu_has_mips64r2
)
1725 tmp
= bpf_jit_blind_constants(prog
);
1726 /* If blinding was requested and we failed during blinding,
1727 * we must fall back to the interpreter.
1736 memset(&ctx
, 0, sizeof(ctx
));
1739 switch (current_cpu_type()) {
1740 case CPU_CAVIUM_OCTEON
:
1741 case CPU_CAVIUM_OCTEON_PLUS
:
1742 case CPU_CAVIUM_OCTEON2
:
1743 case CPU_CAVIUM_OCTEON3
:
1744 ctx
.use_bbit_insns
= 1;
1747 ctx
.use_bbit_insns
= 0;
1751 ctx
.offsets
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.offsets
), GFP_KERNEL
);
1752 if (ctx
.offsets
== NULL
)
1755 ctx
.reg_val_types
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.reg_val_types
), GFP_KERNEL
);
1756 if (ctx
.reg_val_types
== NULL
)
1761 if (reg_val_propagate(&ctx
))
1765 * First pass discovers used resources and instruction offsets
1766 * assuming short branches are used.
1768 if (build_int_body(&ctx
))
1772 * If no calls are made (EBPF_SAVE_RA), then tail call count
1773 * in $v1, else we must save in n$s4.
1775 if (ctx
.flags
& EBPF_SEEN_TC
) {
1776 if (ctx
.flags
& EBPF_SAVE_RA
)
1777 ctx
.flags
|= EBPF_SAVE_S4
;
1779 ctx
.flags
|= EBPF_TCC_IN_V1
;
1783 * Second pass generates offsets, if any branches are out of
1784 * range a jump-around long sequence is generated, and we have
1785 * to try again from the beginning to generate the new
1786 * offsets. This is done until no additional conversions are
1791 ctx
.gen_b_offsets
= 1;
1792 ctx
.long_b_conversion
= 0;
1793 if (gen_int_prologue(&ctx
))
1795 if (build_int_body(&ctx
))
1797 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1799 } while (ctx
.long_b_conversion
);
1801 image_size
= 4 * ctx
.idx
;
1803 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1804 sizeof(u32
), jit_fill_hole
);
1808 ctx
.target
= (u32
*)image_ptr
;
1810 /* Third pass generates the code */
1812 if (gen_int_prologue(&ctx
))
1814 if (build_int_body(&ctx
))
1816 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1819 /* Update the icache */
1820 flush_icache_range((unsigned long)ctx
.target
,
1821 (unsigned long)&ctx
.target
[ctx
.idx
]);
1823 if (bpf_jit_enable
> 1)
1825 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.target
);
1827 bpf_jit_binary_lock_ro(header
);
1828 prog
->bpf_func
= (void *)ctx
.target
;
1830 prog
->jited_len
= image_size
;
1833 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
1836 kfree(ctx
.reg_val_types
);
1843 bpf_jit_binary_free(header
);