2 * Just-In-Time compiler for eBPF filters on MIPS
4 * Copyright (c) 2017 Cavium, Inc.
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
16 #include <linux/bitops.h>
17 #include <linux/errno.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/slab.h>
21 #include <asm/bitops.h>
22 #include <asm/byteorder.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cpu-features.h>
27 /* Registers used by JIT */
30 #define MIPS_R_V0 2 /* BPF_R0 */
32 #define MIPS_R_A0 4 /* BPF_R1 */
33 #define MIPS_R_A1 5 /* BPF_R2 */
34 #define MIPS_R_A2 6 /* BPF_R3 */
35 #define MIPS_R_A3 7 /* BPF_R4 */
36 #define MIPS_R_A4 8 /* BPF_R5 */
37 #define MIPS_R_T4 12 /* BPF_AX */
41 #define MIPS_R_S0 16 /* BPF_R6 */
42 #define MIPS_R_S1 17 /* BPF_R7 */
43 #define MIPS_R_S2 18 /* BPF_R8 */
44 #define MIPS_R_S3 19 /* BPF_R9 */
45 #define MIPS_R_S4 20 /* BPF_TCC */
55 #define EBPF_SAVE_S0 BIT(0)
56 #define EBPF_SAVE_S1 BIT(1)
57 #define EBPF_SAVE_S2 BIT(2)
58 #define EBPF_SAVE_S3 BIT(3)
59 #define EBPF_SAVE_S4 BIT(4)
60 #define EBPF_SAVE_RA BIT(5)
61 #define EBPF_SEEN_FP BIT(6)
62 #define EBPF_SEEN_TC BIT(7)
63 #define EBPF_TCC_IN_V1 BIT(8)
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
76 /* not known to be 32-bit compatible. */
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
80 /* 32-bit compatible, need truncation for 64-bit ops. */
82 /* 32-bit zero extended. */
84 /* 32-bit no sign/zero extension needed. */
89 * high bit of offsets indicates if long branch conversion done at
92 #define OFFSETS_B_CONV BIT(31)
95 * struct jit_ctx - JIT context
97 * @stack_size: eBPF stack size
98 * @idx: Instruction index
100 * @offsets: Instruction offsets
101 * @target: Memory location for the compiled filter
102 * @reg_val_types Packed enum reg_val_type for each register.
105 const struct bpf_prog
*skf
;
112 unsigned int long_b_conversion
:1;
113 unsigned int gen_b_offsets
:1;
114 unsigned int use_bbit_insns
:1;
117 static void set_reg_val_type(u64
*rvt
, int reg
, enum reg_val_type type
)
119 *rvt
&= ~(7ull << (reg
* 3));
120 *rvt
|= ((u64
)type
<< (reg
* 3));
123 static enum reg_val_type
get_reg_val_type(const struct jit_ctx
*ctx
,
126 return (ctx
->reg_val_types
[index
] >> (reg
* 3)) & 7;
129 /* Simply emit the instruction if the JIT memory space has been allocated */
130 #define emit_instr(ctx, func, ...) \
132 if ((ctx)->target != NULL) { \
133 u32 *p = &(ctx)->target[ctx->idx]; \
134 uasm_i_##func(&p, ##__VA_ARGS__); \
139 static unsigned int j_target(struct jit_ctx
*ctx
, int target_idx
)
141 unsigned long target_va
, base_va
;
147 base_va
= (unsigned long)ctx
->target
;
148 target_va
= base_va
+ (ctx
->offsets
[target_idx
] & ~OFFSETS_B_CONV
);
150 if ((base_va
& ~0x0ffffffful
) != (target_va
& ~0x0ffffffful
))
151 return (unsigned int)-1;
152 r
= target_va
& 0x0ffffffful
;
156 /* Compute the immediate value for PC-relative branches. */
157 static u32
b_imm(unsigned int tgt
, struct jit_ctx
*ctx
)
159 if (!ctx
->gen_b_offsets
)
163 * We want a pc-relative branch. tgt is the instruction offset
164 * we want to jump to.
167 * I: target_offset <- sign_extend(offset)
168 * I+1: PC += target_offset (delay slot)
170 * ctx->idx currently points to the branch instruction
171 * but the offset is added to the delay slot so we need
174 return (ctx
->offsets
[tgt
] & ~OFFSETS_B_CONV
) -
178 enum which_ebpf_reg
{
186 * For eBPF, the register mapping naturally falls out of the
187 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
188 * separate frame pointer, so BPF_REG_10 relative accesses are
189 * adjusted to be $sp relative.
191 int ebpf_to_mips_reg(struct jit_ctx
*ctx
, const struct bpf_insn
*insn
,
192 enum which_ebpf_reg w
)
194 int ebpf_reg
= (w
== src_reg
|| w
== src_reg_no_fp
) ?
195 insn
->src_reg
: insn
->dst_reg
;
211 ctx
->flags
|= EBPF_SAVE_S0
;
214 ctx
->flags
|= EBPF_SAVE_S1
;
217 ctx
->flags
|= EBPF_SAVE_S2
;
220 ctx
->flags
|= EBPF_SAVE_S3
;
223 if (w
== dst_reg
|| w
== src_reg_no_fp
)
225 ctx
->flags
|= EBPF_SEEN_FP
;
227 * Needs special handling, return something that
228 * cannot be clobbered just in case.
235 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg
);
240 * eBPF stack frame will be something like:
242 * Entry $sp ------> +--------------------------------+
244 * +--------------------------------+
246 * +--------------------------------+
248 * +--------------------------------+
250 * +--------------------------------+
252 * +--------------------------------+
254 * +--------------------------------+
255 * | tmp-storage (if $ra saved) |
256 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
257 * | BPF_REG_10 relative storage |
258 * | MAX_BPF_STACK (optional) |
262 * $sp --------> +--------------------------------+
264 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
265 * area is not allocated.
267 static int gen_int_prologue(struct jit_ctx
*ctx
)
269 int stack_adjust
= 0;
273 if (ctx
->flags
& EBPF_SAVE_RA
)
275 * If RA we are doing a function call and may need
276 * extra 8-byte tmp area.
279 if (ctx
->flags
& EBPF_SAVE_S0
)
281 if (ctx
->flags
& EBPF_SAVE_S1
)
283 if (ctx
->flags
& EBPF_SAVE_S2
)
285 if (ctx
->flags
& EBPF_SAVE_S3
)
287 if (ctx
->flags
& EBPF_SAVE_S4
)
290 BUILD_BUG_ON(MAX_BPF_STACK
& 7);
291 locals_size
= (ctx
->flags
& EBPF_SEEN_FP
) ? MAX_BPF_STACK
: 0;
293 stack_adjust
+= locals_size
;
295 ctx
->stack_size
= stack_adjust
;
298 * First instruction initializes the tail call count (TCC).
299 * On tail call we skip this instruction, and the TCC is
300 * passed in $v1 from the caller.
302 emit_instr(ctx
, daddiu
, MIPS_R_V1
, MIPS_R_ZERO
, MAX_TAIL_CALL_CNT
);
304 emit_instr(ctx
, daddiu
, MIPS_R_SP
, MIPS_R_SP
, -stack_adjust
);
308 store_offset
= stack_adjust
- 8;
310 if (ctx
->flags
& EBPF_SAVE_RA
) {
311 emit_instr(ctx
, sd
, MIPS_R_RA
, store_offset
, MIPS_R_SP
);
314 if (ctx
->flags
& EBPF_SAVE_S0
) {
315 emit_instr(ctx
, sd
, MIPS_R_S0
, store_offset
, MIPS_R_SP
);
318 if (ctx
->flags
& EBPF_SAVE_S1
) {
319 emit_instr(ctx
, sd
, MIPS_R_S1
, store_offset
, MIPS_R_SP
);
322 if (ctx
->flags
& EBPF_SAVE_S2
) {
323 emit_instr(ctx
, sd
, MIPS_R_S2
, store_offset
, MIPS_R_SP
);
326 if (ctx
->flags
& EBPF_SAVE_S3
) {
327 emit_instr(ctx
, sd
, MIPS_R_S3
, store_offset
, MIPS_R_SP
);
330 if (ctx
->flags
& EBPF_SAVE_S4
) {
331 emit_instr(ctx
, sd
, MIPS_R_S4
, store_offset
, MIPS_R_SP
);
335 if ((ctx
->flags
& EBPF_SEEN_TC
) && !(ctx
->flags
& EBPF_TCC_IN_V1
))
336 emit_instr(ctx
, daddu
, MIPS_R_S4
, MIPS_R_V1
, MIPS_R_ZERO
);
341 static int build_int_epilogue(struct jit_ctx
*ctx
, int dest_reg
)
343 const struct bpf_prog
*prog
= ctx
->skf
;
344 int stack_adjust
= ctx
->stack_size
;
345 int store_offset
= stack_adjust
- 8;
346 enum reg_val_type td
;
349 if (dest_reg
== MIPS_R_RA
) {
350 /* Don't let zero extended value escape. */
351 td
= get_reg_val_type(ctx
, prog
->len
, BPF_REG_0
);
352 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)
353 emit_instr(ctx
, sll
, r0
, r0
, 0);
356 if (ctx
->flags
& EBPF_SAVE_RA
) {
357 emit_instr(ctx
, ld
, MIPS_R_RA
, store_offset
, MIPS_R_SP
);
360 if (ctx
->flags
& EBPF_SAVE_S0
) {
361 emit_instr(ctx
, ld
, MIPS_R_S0
, store_offset
, MIPS_R_SP
);
364 if (ctx
->flags
& EBPF_SAVE_S1
) {
365 emit_instr(ctx
, ld
, MIPS_R_S1
, store_offset
, MIPS_R_SP
);
368 if (ctx
->flags
& EBPF_SAVE_S2
) {
369 emit_instr(ctx
, ld
, MIPS_R_S2
, store_offset
, MIPS_R_SP
);
372 if (ctx
->flags
& EBPF_SAVE_S3
) {
373 emit_instr(ctx
, ld
, MIPS_R_S3
, store_offset
, MIPS_R_SP
);
376 if (ctx
->flags
& EBPF_SAVE_S4
) {
377 emit_instr(ctx
, ld
, MIPS_R_S4
, store_offset
, MIPS_R_SP
);
380 emit_instr(ctx
, jr
, dest_reg
);
383 emit_instr(ctx
, daddiu
, MIPS_R_SP
, MIPS_R_SP
, stack_adjust
);
385 emit_instr(ctx
, nop
);
390 static void gen_imm_to_reg(const struct bpf_insn
*insn
, int reg
,
393 if (insn
->imm
>= S16_MIN
&& insn
->imm
<= S16_MAX
) {
394 emit_instr(ctx
, addiu
, reg
, MIPS_R_ZERO
, insn
->imm
);
396 int lower
= (s16
)(insn
->imm
& 0xffff);
397 int upper
= insn
->imm
- lower
;
399 emit_instr(ctx
, lui
, reg
, upper
>> 16);
400 emit_instr(ctx
, addiu
, reg
, reg
, lower
);
404 static int gen_imm_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
407 int upper_bound
, lower_bound
;
408 int dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
413 switch (BPF_OP(insn
->code
)) {
416 upper_bound
= S16_MAX
;
417 lower_bound
= S16_MIN
;
420 upper_bound
= -(int)S16_MIN
;
421 lower_bound
= -(int)S16_MAX
;
426 upper_bound
= 0xffff;
432 /* Shift amounts are truncated, no need for bounds */
433 upper_bound
= S32_MAX
;
434 lower_bound
= S32_MIN
;
441 * Immediate move clobbers the register, so no sign/zero
444 if (BPF_CLASS(insn
->code
) == BPF_ALU64
&&
445 BPF_OP(insn
->code
) != BPF_MOV
&&
446 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) == REG_32BIT
)
447 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
448 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
449 if (BPF_CLASS(insn
->code
) == BPF_ALU
&&
450 BPF_OP(insn
->code
) != BPF_LSH
&&
451 BPF_OP(insn
->code
) != BPF_MOV
&&
452 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) != REG_32BIT
)
453 emit_instr(ctx
, sll
, dst
, dst
, 0);
455 if (insn
->imm
>= lower_bound
&& insn
->imm
<= upper_bound
) {
456 /* single insn immediate case */
457 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
458 case BPF_ALU64
| BPF_MOV
:
459 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
461 case BPF_ALU64
| BPF_AND
:
462 case BPF_ALU
| BPF_AND
:
463 emit_instr(ctx
, andi
, dst
, dst
, insn
->imm
);
465 case BPF_ALU64
| BPF_OR
:
466 case BPF_ALU
| BPF_OR
:
467 emit_instr(ctx
, ori
, dst
, dst
, insn
->imm
);
469 case BPF_ALU64
| BPF_XOR
:
470 case BPF_ALU
| BPF_XOR
:
471 emit_instr(ctx
, xori
, dst
, dst
, insn
->imm
);
473 case BPF_ALU64
| BPF_ADD
:
474 emit_instr(ctx
, daddiu
, dst
, dst
, insn
->imm
);
476 case BPF_ALU64
| BPF_SUB
:
477 emit_instr(ctx
, daddiu
, dst
, dst
, -insn
->imm
);
479 case BPF_ALU64
| BPF_RSH
:
480 emit_instr(ctx
, dsrl_safe
, dst
, dst
, insn
->imm
& 0x3f);
482 case BPF_ALU
| BPF_RSH
:
483 emit_instr(ctx
, srl
, dst
, dst
, insn
->imm
& 0x1f);
485 case BPF_ALU64
| BPF_LSH
:
486 emit_instr(ctx
, dsll_safe
, dst
, dst
, insn
->imm
& 0x3f);
488 case BPF_ALU
| BPF_LSH
:
489 emit_instr(ctx
, sll
, dst
, dst
, insn
->imm
& 0x1f);
491 case BPF_ALU64
| BPF_ARSH
:
492 emit_instr(ctx
, dsra_safe
, dst
, dst
, insn
->imm
& 0x3f);
494 case BPF_ALU
| BPF_ARSH
:
495 emit_instr(ctx
, sra
, dst
, dst
, insn
->imm
& 0x1f);
497 case BPF_ALU
| BPF_MOV
:
498 emit_instr(ctx
, addiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
500 case BPF_ALU
| BPF_ADD
:
501 emit_instr(ctx
, addiu
, dst
, dst
, insn
->imm
);
503 case BPF_ALU
| BPF_SUB
:
504 emit_instr(ctx
, addiu
, dst
, dst
, -insn
->imm
);
510 /* multi insn immediate case */
511 if (BPF_OP(insn
->code
) == BPF_MOV
) {
512 gen_imm_to_reg(insn
, dst
, ctx
);
514 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
515 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
516 case BPF_ALU64
| BPF_AND
:
517 case BPF_ALU
| BPF_AND
:
518 emit_instr(ctx
, and, dst
, dst
, MIPS_R_AT
);
520 case BPF_ALU64
| BPF_OR
:
521 case BPF_ALU
| BPF_OR
:
522 emit_instr(ctx
, or, dst
, dst
, MIPS_R_AT
);
524 case BPF_ALU64
| BPF_XOR
:
525 case BPF_ALU
| BPF_XOR
:
526 emit_instr(ctx
, xor, dst
, dst
, MIPS_R_AT
);
528 case BPF_ALU64
| BPF_ADD
:
529 emit_instr(ctx
, daddu
, dst
, dst
, MIPS_R_AT
);
531 case BPF_ALU64
| BPF_SUB
:
532 emit_instr(ctx
, dsubu
, dst
, dst
, MIPS_R_AT
);
534 case BPF_ALU
| BPF_ADD
:
535 emit_instr(ctx
, addu
, dst
, dst
, MIPS_R_AT
);
537 case BPF_ALU
| BPF_SUB
:
538 emit_instr(ctx
, subu
, dst
, dst
, MIPS_R_AT
);
549 static void emit_const_to_reg(struct jit_ctx
*ctx
, int dst
, u64 value
)
551 if (value
>= 0xffffffffffff8000ull
|| value
< 0x8000ull
) {
552 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, (int)value
);
553 } else if (value
>= 0xffffffff80000000ull
||
554 (value
< 0x80000000 && value
> 0xffff)) {
555 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)(value
>> 16));
556 emit_instr(ctx
, ori
, dst
, dst
, (unsigned int)(value
& 0xffff));
559 bool seen_part
= false;
560 int needed_shift
= 0;
562 for (i
= 0; i
< 4; i
++) {
563 u64 part
= (value
>> (16 * (3 - i
))) & 0xffff;
565 if (seen_part
&& needed_shift
> 0 && (part
|| i
== 3)) {
566 emit_instr(ctx
, dsll_safe
, dst
, dst
, needed_shift
);
570 if (i
== 0 || (!seen_part
&& i
< 3 && part
< 0x8000)) {
571 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)part
);
574 emit_instr(ctx
, ori
, dst
,
575 seen_part
? dst
: MIPS_R_ZERO
,
586 static int emit_bpf_tail_call(struct jit_ctx
*ctx
, int this_idx
)
591 ctx
->flags
|= EBPF_SEEN_TC
;
593 * if (index >= array->map.max_entries)
596 off
= offsetof(struct bpf_array
, map
.max_entries
);
597 emit_instr(ctx
, lwu
, MIPS_R_T5
, off
, MIPS_R_A1
);
598 emit_instr(ctx
, sltu
, MIPS_R_AT
, MIPS_R_T5
, MIPS_R_A2
);
599 b_off
= b_imm(this_idx
+ 1, ctx
);
600 emit_instr(ctx
, bne
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
606 tcc_reg
= (ctx
->flags
& EBPF_TCC_IN_V1
) ? MIPS_R_V1
: MIPS_R_S4
;
607 emit_instr(ctx
, daddiu
, MIPS_R_T5
, tcc_reg
, -1);
608 b_off
= b_imm(this_idx
+ 1, ctx
);
609 emit_instr(ctx
, bltz
, tcc_reg
, b_off
);
611 * prog = array->ptrs[index];
616 emit_instr(ctx
, dsll
, MIPS_R_T8
, MIPS_R_A2
, 3);
617 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, MIPS_R_A1
);
618 off
= offsetof(struct bpf_array
, ptrs
);
619 emit_instr(ctx
, ld
, MIPS_R_AT
, off
, MIPS_R_T8
);
620 b_off
= b_imm(this_idx
+ 1, ctx
);
621 emit_instr(ctx
, beq
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
623 emit_instr(ctx
, nop
);
625 /* goto *(prog->bpf_func + 4); */
626 off
= offsetof(struct bpf_prog
, bpf_func
);
627 emit_instr(ctx
, ld
, MIPS_R_T9
, off
, MIPS_R_AT
);
628 /* All systems are go... propagate TCC */
629 emit_instr(ctx
, daddu
, MIPS_R_V1
, MIPS_R_T5
, MIPS_R_ZERO
);
630 /* Skip first instruction (TCC initialization) */
631 emit_instr(ctx
, daddiu
, MIPS_R_T9
, MIPS_R_T9
, 4);
632 return build_int_epilogue(ctx
, MIPS_R_T9
);
635 static bool is_bad_offset(int b_off
)
637 return b_off
> 0x1ffff || b_off
< -0x20000;
640 /* Returns the number of insn slots consumed. */
641 static int build_one_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
642 int this_idx
, int exit_idx
)
644 int src
, dst
, r
, td
, ts
, mem_off
, b_off
;
645 bool need_swap
, did_move
, cmp_eq
;
646 unsigned int target
= 0;
649 int bpf_op
= BPF_OP(insn
->code
);
651 switch (insn
->code
) {
652 case BPF_ALU64
| BPF_ADD
| BPF_K
: /* ALU64_IMM */
653 case BPF_ALU64
| BPF_SUB
| BPF_K
: /* ALU64_IMM */
654 case BPF_ALU64
| BPF_OR
| BPF_K
: /* ALU64_IMM */
655 case BPF_ALU64
| BPF_AND
| BPF_K
: /* ALU64_IMM */
656 case BPF_ALU64
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
657 case BPF_ALU64
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
658 case BPF_ALU64
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
659 case BPF_ALU64
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
660 case BPF_ALU64
| BPF_MOV
| BPF_K
: /* ALU64_IMM */
661 case BPF_ALU
| BPF_MOV
| BPF_K
: /* ALU32_IMM */
662 case BPF_ALU
| BPF_ADD
| BPF_K
: /* ALU32_IMM */
663 case BPF_ALU
| BPF_SUB
| BPF_K
: /* ALU32_IMM */
664 case BPF_ALU
| BPF_OR
| BPF_K
: /* ALU64_IMM */
665 case BPF_ALU
| BPF_AND
| BPF_K
: /* ALU64_IMM */
666 case BPF_ALU
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
667 case BPF_ALU
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
668 case BPF_ALU
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
669 case BPF_ALU
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
670 r
= gen_imm_insn(insn
, ctx
, this_idx
);
674 case BPF_ALU64
| BPF_MUL
| BPF_K
: /* ALU64_IMM */
675 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
678 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
679 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
680 if (insn
->imm
== 1) /* Mult by 1 is a nop */
682 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
683 emit_instr(ctx
, dmultu
, MIPS_R_AT
, dst
);
684 emit_instr(ctx
, mflo
, dst
);
686 case BPF_ALU64
| BPF_NEG
| BPF_K
: /* ALU64_IMM */
687 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
690 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
691 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
692 emit_instr(ctx
, dsubu
, dst
, MIPS_R_ZERO
, dst
);
694 case BPF_ALU
| BPF_MUL
| BPF_K
: /* ALU_IMM */
695 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
698 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
699 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
701 emit_instr(ctx
, sll
, dst
, dst
, 0);
703 if (insn
->imm
== 1) /* Mult by 1 is a nop */
705 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
706 emit_instr(ctx
, multu
, dst
, MIPS_R_AT
);
707 emit_instr(ctx
, mflo
, dst
);
709 case BPF_ALU
| BPF_NEG
| BPF_K
: /* ALU_IMM */
710 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
713 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
714 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
716 emit_instr(ctx
, sll
, dst
, dst
, 0);
718 emit_instr(ctx
, subu
, dst
, MIPS_R_ZERO
, dst
);
720 case BPF_ALU
| BPF_DIV
| BPF_K
: /* ALU_IMM */
721 case BPF_ALU
| BPF_MOD
| BPF_K
: /* ALU_IMM */
724 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
727 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
728 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)
730 emit_instr(ctx
, sll
, dst
, dst
, 0);
731 if (insn
->imm
== 1) {
732 /* div by 1 is a nop, mod by 1 is zero */
733 if (bpf_op
== BPF_MOD
)
734 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
737 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
738 emit_instr(ctx
, divu
, dst
, MIPS_R_AT
);
739 if (bpf_op
== BPF_DIV
)
740 emit_instr(ctx
, mflo
, dst
);
742 emit_instr(ctx
, mfhi
, dst
);
744 case BPF_ALU64
| BPF_DIV
| BPF_K
: /* ALU_IMM */
745 case BPF_ALU64
| BPF_MOD
| BPF_K
: /* ALU_IMM */
748 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
751 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
752 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
753 if (insn
->imm
== 1) {
754 /* div by 1 is a nop, mod by 1 is zero */
755 if (bpf_op
== BPF_MOD
)
756 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
759 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
760 emit_instr(ctx
, ddivu
, dst
, MIPS_R_AT
);
761 if (bpf_op
== BPF_DIV
)
762 emit_instr(ctx
, mflo
, dst
);
764 emit_instr(ctx
, mfhi
, dst
);
766 case BPF_ALU64
| BPF_MOV
| BPF_X
: /* ALU64_REG */
767 case BPF_ALU64
| BPF_ADD
| BPF_X
: /* ALU64_REG */
768 case BPF_ALU64
| BPF_SUB
| BPF_X
: /* ALU64_REG */
769 case BPF_ALU64
| BPF_XOR
| BPF_X
: /* ALU64_REG */
770 case BPF_ALU64
| BPF_OR
| BPF_X
: /* ALU64_REG */
771 case BPF_ALU64
| BPF_AND
| BPF_X
: /* ALU64_REG */
772 case BPF_ALU64
| BPF_MUL
| BPF_X
: /* ALU64_REG */
773 case BPF_ALU64
| BPF_DIV
| BPF_X
: /* ALU64_REG */
774 case BPF_ALU64
| BPF_MOD
| BPF_X
: /* ALU64_REG */
775 case BPF_ALU64
| BPF_LSH
| BPF_X
: /* ALU64_REG */
776 case BPF_ALU64
| BPF_RSH
| BPF_X
: /* ALU64_REG */
777 case BPF_ALU64
| BPF_ARSH
| BPF_X
: /* ALU64_REG */
778 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg
);
779 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
780 if (src
< 0 || dst
< 0)
782 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
783 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
785 if (insn
->src_reg
== BPF_REG_10
) {
786 if (bpf_op
== BPF_MOV
) {
787 emit_instr(ctx
, daddiu
, dst
, MIPS_R_SP
, MAX_BPF_STACK
);
790 emit_instr(ctx
, daddiu
, MIPS_R_AT
, MIPS_R_SP
, MAX_BPF_STACK
);
793 } else if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
794 int tmp_reg
= MIPS_R_AT
;
796 if (bpf_op
== BPF_MOV
) {
800 emit_instr(ctx
, daddu
, tmp_reg
, src
, MIPS_R_ZERO
);
801 emit_instr(ctx
, dinsu
, tmp_reg
, MIPS_R_ZERO
, 32, 32);
807 emit_instr(ctx
, daddu
, dst
, src
, MIPS_R_ZERO
);
810 emit_instr(ctx
, daddu
, dst
, dst
, src
);
813 emit_instr(ctx
, dsubu
, dst
, dst
, src
);
816 emit_instr(ctx
, xor, dst
, dst
, src
);
819 emit_instr(ctx
, or, dst
, dst
, src
);
822 emit_instr(ctx
, and, dst
, dst
, src
);
825 emit_instr(ctx
, dmultu
, dst
, src
);
826 emit_instr(ctx
, mflo
, dst
);
830 emit_instr(ctx
, ddivu
, dst
, src
);
831 if (bpf_op
== BPF_DIV
)
832 emit_instr(ctx
, mflo
, dst
);
834 emit_instr(ctx
, mfhi
, dst
);
837 emit_instr(ctx
, dsllv
, dst
, dst
, src
);
840 emit_instr(ctx
, dsrlv
, dst
, dst
, src
);
843 emit_instr(ctx
, dsrav
, dst
, dst
, src
);
846 pr_err("ALU64_REG NOT HANDLED\n");
850 case BPF_ALU
| BPF_MOV
| BPF_X
: /* ALU_REG */
851 case BPF_ALU
| BPF_ADD
| BPF_X
: /* ALU_REG */
852 case BPF_ALU
| BPF_SUB
| BPF_X
: /* ALU_REG */
853 case BPF_ALU
| BPF_XOR
| BPF_X
: /* ALU_REG */
854 case BPF_ALU
| BPF_OR
| BPF_X
: /* ALU_REG */
855 case BPF_ALU
| BPF_AND
| BPF_X
: /* ALU_REG */
856 case BPF_ALU
| BPF_MUL
| BPF_X
: /* ALU_REG */
857 case BPF_ALU
| BPF_DIV
| BPF_X
: /* ALU_REG */
858 case BPF_ALU
| BPF_MOD
| BPF_X
: /* ALU_REG */
859 case BPF_ALU
| BPF_LSH
| BPF_X
: /* ALU_REG */
860 case BPF_ALU
| BPF_RSH
| BPF_X
: /* ALU_REG */
861 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
862 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
863 if (src
< 0 || dst
< 0)
865 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
866 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
868 emit_instr(ctx
, sll
, dst
, dst
, 0);
871 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
872 if (ts
== REG_64BIT
|| ts
== REG_32BIT_ZERO_EX
) {
873 int tmp_reg
= MIPS_R_AT
;
875 if (bpf_op
== BPF_MOV
) {
880 emit_instr(ctx
, sll
, tmp_reg
, src
, 0);
886 emit_instr(ctx
, addu
, dst
, src
, MIPS_R_ZERO
);
889 emit_instr(ctx
, addu
, dst
, dst
, src
);
892 emit_instr(ctx
, subu
, dst
, dst
, src
);
895 emit_instr(ctx
, xor, dst
, dst
, src
);
898 emit_instr(ctx
, or, dst
, dst
, src
);
901 emit_instr(ctx
, and, dst
, dst
, src
);
904 emit_instr(ctx
, mul
, dst
, dst
, src
);
908 emit_instr(ctx
, divu
, dst
, src
);
909 if (bpf_op
== BPF_DIV
)
910 emit_instr(ctx
, mflo
, dst
);
912 emit_instr(ctx
, mfhi
, dst
);
915 emit_instr(ctx
, sllv
, dst
, dst
, src
);
918 emit_instr(ctx
, srlv
, dst
, dst
, src
);
921 pr_err("ALU_REG NOT HANDLED\n");
925 case BPF_JMP
| BPF_EXIT
:
926 if (this_idx
+ 1 < exit_idx
) {
927 b_off
= b_imm(exit_idx
, ctx
);
928 if (is_bad_offset(b_off
))
930 emit_instr(ctx
, beq
, MIPS_R_ZERO
, MIPS_R_ZERO
, b_off
);
931 emit_instr(ctx
, nop
);
934 case BPF_JMP
| BPF_JEQ
| BPF_K
: /* JMP_IMM */
935 case BPF_JMP
| BPF_JNE
| BPF_K
: /* JMP_IMM */
936 cmp_eq
= (bpf_op
== BPF_JEQ
);
937 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
940 if (insn
->imm
== 0) {
943 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
947 case BPF_JMP
| BPF_JEQ
| BPF_X
: /* JMP_REG */
948 case BPF_JMP
| BPF_JNE
| BPF_X
:
949 case BPF_JMP
| BPF_JSLT
| BPF_X
:
950 case BPF_JMP
| BPF_JSLE
| BPF_X
:
951 case BPF_JMP
| BPF_JSGT
| BPF_X
:
952 case BPF_JMP
| BPF_JSGE
| BPF_X
:
953 case BPF_JMP
| BPF_JLT
| BPF_X
:
954 case BPF_JMP
| BPF_JLE
| BPF_X
:
955 case BPF_JMP
| BPF_JGT
| BPF_X
:
956 case BPF_JMP
| BPF_JGE
| BPF_X
:
957 case BPF_JMP
| BPF_JSET
| BPF_X
:
958 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
959 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
960 if (src
< 0 || dst
< 0)
962 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
963 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
964 if (td
== REG_32BIT
&& ts
!= REG_32BIT
) {
965 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
967 } else if (ts
== REG_32BIT
&& td
!= REG_32BIT
) {
968 emit_instr(ctx
, sll
, MIPS_R_AT
, dst
, 0);
971 if (bpf_op
== BPF_JSET
) {
972 emit_instr(ctx
, and, MIPS_R_AT
, dst
, src
);
976 } else if (bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSLE
) {
977 emit_instr(ctx
, dsubu
, MIPS_R_AT
, dst
, src
);
978 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
979 b_off
= b_imm(exit_idx
, ctx
);
980 if (is_bad_offset(b_off
))
982 if (bpf_op
== BPF_JSGT
)
983 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
985 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
986 emit_instr(ctx
, nop
);
987 return 2; /* We consumed the exit. */
989 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
990 if (is_bad_offset(b_off
))
992 if (bpf_op
== BPF_JSGT
)
993 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
995 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
996 emit_instr(ctx
, nop
);
998 } else if (bpf_op
== BPF_JSGE
|| bpf_op
== BPF_JSLT
) {
999 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, src
);
1000 cmp_eq
= bpf_op
== BPF_JSGE
;
1003 } else if (bpf_op
== BPF_JGT
|| bpf_op
== BPF_JLE
) {
1004 /* dst or src could be AT */
1005 emit_instr(ctx
, dsubu
, MIPS_R_T8
, dst
, src
);
1006 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1007 /* SP known to be non-zero, movz becomes boolean not */
1008 emit_instr(ctx
, movz
, MIPS_R_T9
, MIPS_R_SP
, MIPS_R_T8
);
1009 emit_instr(ctx
, movn
, MIPS_R_T9
, MIPS_R_ZERO
, MIPS_R_T8
);
1010 emit_instr(ctx
, or, MIPS_R_AT
, MIPS_R_T9
, MIPS_R_AT
);
1011 cmp_eq
= bpf_op
== BPF_JGT
;
1014 } else if (bpf_op
== BPF_JGE
|| bpf_op
== BPF_JLT
) {
1015 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1016 cmp_eq
= bpf_op
== BPF_JGE
;
1019 } else { /* JNE/JEQ case */
1020 cmp_eq
= (bpf_op
== BPF_JEQ
);
1024 * If the next insn is EXIT and we are jumping arround
1025 * only it, invert the sense of the compare and
1026 * conditionally jump to the exit. Poor man's branch
1029 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1030 b_off
= b_imm(exit_idx
, ctx
);
1031 if (is_bad_offset(b_off
)) {
1032 target
= j_target(ctx
, exit_idx
);
1033 if (target
== (unsigned int)-1)
1037 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1038 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1039 ctx
->long_b_conversion
= 1;
1044 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1046 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1047 emit_instr(ctx
, nop
);
1048 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1049 emit_instr(ctx
, j
, target
);
1050 emit_instr(ctx
, nop
);
1052 return 2; /* We consumed the exit. */
1054 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1055 if (is_bad_offset(b_off
)) {
1056 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1057 if (target
== (unsigned int)-1)
1061 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1062 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1063 ctx
->long_b_conversion
= 1;
1068 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1070 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1071 emit_instr(ctx
, nop
);
1072 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1073 emit_instr(ctx
, j
, target
);
1074 emit_instr(ctx
, nop
);
1077 case BPF_JMP
| BPF_JSGT
| BPF_K
: /* JMP_IMM */
1078 case BPF_JMP
| BPF_JSGE
| BPF_K
: /* JMP_IMM */
1079 case BPF_JMP
| BPF_JSLT
| BPF_K
: /* JMP_IMM */
1080 case BPF_JMP
| BPF_JSLE
| BPF_K
: /* JMP_IMM */
1081 cmp_eq
= (bpf_op
== BPF_JSGE
);
1082 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1086 if (insn
->imm
== 0) {
1087 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1088 b_off
= b_imm(exit_idx
, ctx
);
1089 if (is_bad_offset(b_off
))
1093 emit_instr(ctx
, blez
, dst
, b_off
);
1096 emit_instr(ctx
, bltz
, dst
, b_off
);
1099 emit_instr(ctx
, bgez
, dst
, b_off
);
1102 emit_instr(ctx
, bgtz
, dst
, b_off
);
1105 emit_instr(ctx
, nop
);
1106 return 2; /* We consumed the exit. */
1108 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1109 if (is_bad_offset(b_off
))
1113 emit_instr(ctx
, bgtz
, dst
, b_off
);
1116 emit_instr(ctx
, bgez
, dst
, b_off
);
1119 emit_instr(ctx
, bltz
, dst
, b_off
);
1122 emit_instr(ctx
, blez
, dst
, b_off
);
1125 emit_instr(ctx
, nop
);
1129 * only "LT" compare available, so we must use imm + 1
1130 * to generate "GT" and imm -1 to generate LE
1132 if (bpf_op
== BPF_JSGT
)
1133 t64s
= insn
->imm
+ 1;
1134 else if (bpf_op
== BPF_JSLE
)
1135 t64s
= insn
->imm
+ 1;
1139 cmp_eq
= bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSGE
;
1140 if (t64s
>= S16_MIN
&& t64s
<= S16_MAX
) {
1141 emit_instr(ctx
, slti
, MIPS_R_AT
, dst
, (int)t64s
);
1146 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1147 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1152 case BPF_JMP
| BPF_JGT
| BPF_K
:
1153 case BPF_JMP
| BPF_JGE
| BPF_K
:
1154 case BPF_JMP
| BPF_JLT
| BPF_K
:
1155 case BPF_JMP
| BPF_JLE
| BPF_K
:
1156 cmp_eq
= (bpf_op
== BPF_JGE
);
1157 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1161 * only "LT" compare available, so we must use imm + 1
1162 * to generate "GT" and imm -1 to generate LE
1164 if (bpf_op
== BPF_JGT
)
1165 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1166 else if (bpf_op
== BPF_JLE
)
1167 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1169 t64s
= (u64
)(u32
)(insn
->imm
);
1171 cmp_eq
= bpf_op
== BPF_JGT
|| bpf_op
== BPF_JGE
;
1173 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1174 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1179 case BPF_JMP
| BPF_JSET
| BPF_K
: /* JMP_IMM */
1180 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1184 if (ctx
->use_bbit_insns
&& hweight32((u32
)insn
->imm
) == 1) {
1185 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1186 b_off
= b_imm(exit_idx
, ctx
);
1187 if (is_bad_offset(b_off
))
1189 emit_instr(ctx
, bbit0
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1190 emit_instr(ctx
, nop
);
1191 return 2; /* We consumed the exit. */
1193 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1194 if (is_bad_offset(b_off
))
1196 emit_instr(ctx
, bbit1
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1197 emit_instr(ctx
, nop
);
1200 t64
= (u32
)insn
->imm
;
1201 emit_const_to_reg(ctx
, MIPS_R_AT
, t64
);
1202 emit_instr(ctx
, and, MIPS_R_AT
, dst
, MIPS_R_AT
);
1208 case BPF_JMP
| BPF_JA
:
1210 * Prefer relative branch for easier debugging, but
1211 * fall back if needed.
1213 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1214 if (is_bad_offset(b_off
)) {
1215 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1216 if (target
== (unsigned int)-1)
1218 emit_instr(ctx
, j
, target
);
1220 emit_instr(ctx
, b
, b_off
);
1222 emit_instr(ctx
, nop
);
1224 case BPF_LD
| BPF_DW
| BPF_IMM
:
1225 if (insn
->src_reg
!= 0)
1227 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1230 t64
= ((u64
)(u32
)insn
->imm
) | ((u64
)(insn
+ 1)->imm
<< 32);
1231 emit_const_to_reg(ctx
, dst
, t64
);
1232 return 2; /* Double slot insn */
1234 case BPF_JMP
| BPF_CALL
:
1235 ctx
->flags
|= EBPF_SAVE_RA
;
1236 t64s
= (s64
)insn
->imm
+ (s64
)__bpf_call_base
;
1237 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)t64s
);
1238 emit_instr(ctx
, jalr
, MIPS_R_RA
, MIPS_R_T9
);
1240 emit_instr(ctx
, nop
);
1243 case BPF_JMP
| BPF_TAIL_CALL
:
1244 if (emit_bpf_tail_call(ctx
, this_idx
))
1248 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1249 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1250 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1253 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
1254 if (insn
->imm
== 64 && td
== REG_32BIT
)
1255 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
1257 if (insn
->imm
!= 64 &&
1258 (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)) {
1260 emit_instr(ctx
, sll
, dst
, dst
, 0);
1264 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_LE
);
1266 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_BE
);
1268 if (insn
->imm
== 16) {
1270 emit_instr(ctx
, wsbh
, dst
, dst
);
1271 emit_instr(ctx
, andi
, dst
, dst
, 0xffff);
1272 } else if (insn
->imm
== 32) {
1274 emit_instr(ctx
, wsbh
, dst
, dst
);
1275 emit_instr(ctx
, rotr
, dst
, dst
, 16);
1277 } else { /* 64-bit*/
1279 emit_instr(ctx
, dsbh
, dst
, dst
);
1280 emit_instr(ctx
, dshd
, dst
, dst
);
1285 case BPF_ST
| BPF_B
| BPF_MEM
:
1286 case BPF_ST
| BPF_H
| BPF_MEM
:
1287 case BPF_ST
| BPF_W
| BPF_MEM
:
1288 case BPF_ST
| BPF_DW
| BPF_MEM
:
1289 if (insn
->dst_reg
== BPF_REG_10
) {
1290 ctx
->flags
|= EBPF_SEEN_FP
;
1292 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1294 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1297 mem_off
= insn
->off
;
1299 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
1300 switch (BPF_SIZE(insn
->code
)) {
1302 emit_instr(ctx
, sb
, MIPS_R_AT
, mem_off
, dst
);
1305 emit_instr(ctx
, sh
, MIPS_R_AT
, mem_off
, dst
);
1308 emit_instr(ctx
, sw
, MIPS_R_AT
, mem_off
, dst
);
1311 emit_instr(ctx
, sd
, MIPS_R_AT
, mem_off
, dst
);
1316 case BPF_LDX
| BPF_B
| BPF_MEM
:
1317 case BPF_LDX
| BPF_H
| BPF_MEM
:
1318 case BPF_LDX
| BPF_W
| BPF_MEM
:
1319 case BPF_LDX
| BPF_DW
| BPF_MEM
:
1320 if (insn
->src_reg
== BPF_REG_10
) {
1321 ctx
->flags
|= EBPF_SEEN_FP
;
1323 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1325 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1328 mem_off
= insn
->off
;
1330 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1333 switch (BPF_SIZE(insn
->code
)) {
1335 emit_instr(ctx
, lbu
, dst
, mem_off
, src
);
1338 emit_instr(ctx
, lhu
, dst
, mem_off
, src
);
1341 emit_instr(ctx
, lw
, dst
, mem_off
, src
);
1344 emit_instr(ctx
, ld
, dst
, mem_off
, src
);
1349 case BPF_STX
| BPF_B
| BPF_MEM
:
1350 case BPF_STX
| BPF_H
| BPF_MEM
:
1351 case BPF_STX
| BPF_W
| BPF_MEM
:
1352 case BPF_STX
| BPF_DW
| BPF_MEM
:
1353 case BPF_STX
| BPF_W
| BPF_XADD
:
1354 case BPF_STX
| BPF_DW
| BPF_XADD
:
1355 if (insn
->dst_reg
== BPF_REG_10
) {
1356 ctx
->flags
|= EBPF_SEEN_FP
;
1358 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1360 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1363 mem_off
= insn
->off
;
1365 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1368 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1369 switch (BPF_SIZE(insn
->code
)) {
1371 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1372 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
1375 emit_instr(ctx
, ll
, MIPS_R_T8
, mem_off
, dst
);
1376 emit_instr(ctx
, addu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1377 emit_instr(ctx
, sc
, MIPS_R_T8
, mem_off
, dst
);
1379 * On failure back up to LL (-4
1380 * instructions of 4 bytes each
1382 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1383 emit_instr(ctx
, nop
);
1386 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1387 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1388 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1391 emit_instr(ctx
, lld
, MIPS_R_T8
, mem_off
, dst
);
1392 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1393 emit_instr(ctx
, scd
, MIPS_R_T8
, mem_off
, dst
);
1394 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1395 emit_instr(ctx
, nop
);
1398 } else { /* BPF_MEM */
1399 switch (BPF_SIZE(insn
->code
)) {
1401 emit_instr(ctx
, sb
, src
, mem_off
, dst
);
1404 emit_instr(ctx
, sh
, src
, mem_off
, dst
);
1407 emit_instr(ctx
, sw
, src
, mem_off
, dst
);
1410 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1411 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1412 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1415 emit_instr(ctx
, sd
, src
, mem_off
, dst
);
1422 pr_err("NOT HANDLED %d - (%02x)\n",
1423 this_idx
, (unsigned int)insn
->code
);
1429 #define RVT_VISITED_MASK 0xc000000000000000ull
1430 #define RVT_FALL_THROUGH 0x4000000000000000ull
1431 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1432 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1434 static int build_int_body(struct jit_ctx
*ctx
)
1436 const struct bpf_prog
*prog
= ctx
->skf
;
1437 const struct bpf_insn
*insn
;
1440 for (i
= 0; i
< prog
->len
; ) {
1441 insn
= prog
->insnsi
+ i
;
1442 if ((ctx
->reg_val_types
[i
] & RVT_VISITED_MASK
) == 0) {
1443 /* dead instruction, don't emit it. */
1448 if (ctx
->target
== NULL
)
1449 ctx
->offsets
[i
] = (ctx
->offsets
[i
] & OFFSETS_B_CONV
) | (ctx
->idx
* 4);
1451 r
= build_one_insn(insn
, ctx
, i
, prog
->len
);
1456 /* epilogue offset */
1457 if (ctx
->target
== NULL
)
1458 ctx
->offsets
[i
] = ctx
->idx
* 4;
1461 * All exits have an offset of the epilogue, some offsets may
1462 * not have been set due to banch-around threading, so set
1465 if (ctx
->target
== NULL
)
1466 for (i
= 0; i
< prog
->len
; i
++) {
1467 insn
= prog
->insnsi
+ i
;
1468 if (insn
->code
== (BPF_JMP
| BPF_EXIT
))
1469 ctx
->offsets
[i
] = ctx
->idx
* 4;
1474 /* return the last idx processed, or negative for error */
1475 static int reg_val_propagate_range(struct jit_ctx
*ctx
, u64 initial_rvt
,
1476 int start_idx
, bool follow_taken
)
1478 const struct bpf_prog
*prog
= ctx
->skf
;
1479 const struct bpf_insn
*insn
;
1480 u64 exit_rvt
= initial_rvt
;
1481 u64
*rvt
= ctx
->reg_val_types
;
1485 for (idx
= start_idx
; idx
< prog
->len
; idx
++) {
1486 rvt
[idx
] = (rvt
[idx
] & RVT_VISITED_MASK
) | exit_rvt
;
1487 insn
= prog
->insnsi
+ idx
;
1488 switch (BPF_CLASS(insn
->code
)) {
1490 switch (BPF_OP(insn
->code
)) {
1502 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1505 if (BPF_SRC(insn
->code
)) {
1506 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1508 /* IMM to REG move*/
1510 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1512 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1516 if (insn
->imm
== 64)
1517 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1518 else if (insn
->imm
== 32)
1519 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1520 else /* insn->imm == 16 */
1521 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1524 rvt
[idx
] |= RVT_DONE
;
1527 switch (BPF_OP(insn
->code
)) {
1529 if (BPF_SRC(insn
->code
)) {
1530 /* REG to REG move*/
1531 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1533 /* IMM to REG move*/
1535 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1537 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1541 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1543 rvt
[idx
] |= RVT_DONE
;
1546 switch (BPF_SIZE(insn
->code
)) {
1548 if (BPF_MODE(insn
->code
) == BPF_IMM
) {
1551 val
= (s64
)((u32
)insn
->imm
| ((u64
)(insn
+ 1)->imm
<< 32));
1552 if (val
> 0 && val
<= S32_MAX
)
1553 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1554 else if (val
>= S32_MIN
&& val
<= S32_MAX
)
1555 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1557 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1558 rvt
[idx
] |= RVT_DONE
;
1561 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1566 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1569 if (BPF_MODE(insn
->code
) == BPF_IMM
)
1570 set_reg_val_type(&exit_rvt
, insn
->dst_reg
,
1571 insn
->imm
>= 0 ? REG_32BIT_POS
: REG_32BIT
);
1573 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1576 rvt
[idx
] |= RVT_DONE
;
1579 switch (BPF_SIZE(insn
->code
)) {
1581 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1585 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1588 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1591 rvt
[idx
] |= RVT_DONE
;
1594 switch (BPF_OP(insn
->code
)) {
1596 rvt
[idx
] = RVT_DONE
| exit_rvt
;
1597 rvt
[prog
->len
] = exit_rvt
;
1600 rvt
[idx
] |= RVT_DONE
;
1615 rvt
[idx
] |= RVT_BRANCH_TAKEN
;
1617 follow_taken
= false;
1619 rvt
[idx
] |= RVT_FALL_THROUGH
;
1623 set_reg_val_type(&exit_rvt
, BPF_REG_0
, REG_64BIT
);
1624 /* Upon call return, argument registers are clobbered. */
1625 for (reg
= BPF_REG_0
; reg
<= BPF_REG_5
; reg
++)
1626 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1628 rvt
[idx
] |= RVT_DONE
;
1631 WARN(1, "Unhandled BPF_JMP case.\n");
1632 rvt
[idx
] |= RVT_DONE
;
1637 rvt
[idx
] |= RVT_DONE
;
1645 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1646 * each eBPF insn. This allows unneeded sign and zero extension
1647 * operations to be omitted.
1649 * Doesn't handle yet confluence of control paths with conflicting
1650 * ranges, but it is good enough for most sane code.
1652 static int reg_val_propagate(struct jit_ctx
*ctx
)
1654 const struct bpf_prog
*prog
= ctx
->skf
;
1660 * 11 registers * 3 bits/reg leaves top bits free for other
1661 * uses. Bit-62..63 used to see if we have visited an insn.
1665 /* Upon entry, argument registers are 64-bit. */
1666 for (reg
= BPF_REG_1
; reg
<= BPF_REG_5
; reg
++)
1667 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1670 * First follow all conditional branches on the fall-through
1671 * edge of control flow..
1673 reg_val_propagate_range(ctx
, exit_rvt
, 0, false);
1676 * Then repeatedly find the first conditional branch where
1677 * both edges of control flow have not been taken, and follow
1678 * the branch taken edge. We will end up restarting the
1679 * search once per conditional branch insn.
1681 for (i
= 0; i
< prog
->len
; i
++) {
1682 u64 rvt
= ctx
->reg_val_types
[i
];
1684 if ((rvt
& RVT_VISITED_MASK
) == RVT_DONE
||
1685 (rvt
& RVT_VISITED_MASK
) == 0)
1687 if ((rvt
& RVT_VISITED_MASK
) == RVT_FALL_THROUGH
) {
1688 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, true);
1689 } else { /* RVT_BRANCH_TAKEN */
1690 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1691 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, false);
1693 goto restart_search
;
1696 * Eventually all conditional branches have been followed on
1697 * both branches and we are done. Any insn that has not been
1698 * visited at this point is dead.
1704 static void jit_fill_hole(void *area
, unsigned int size
)
1708 /* We are guaranteed to have aligned memory. */
1709 for (p
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1710 uasm_i_break(&p
, BRK_BUG
); /* Increments p */
1713 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1715 struct bpf_prog
*orig_prog
= prog
;
1716 bool tmp_blinded
= false;
1717 struct bpf_prog
*tmp
;
1718 struct bpf_binary_header
*header
= NULL
;
1720 unsigned int image_size
;
1723 if (!prog
->jit_requested
|| !cpu_has_mips64r2
)
1726 tmp
= bpf_jit_blind_constants(prog
);
1727 /* If blinding was requested and we failed during blinding,
1728 * we must fall back to the interpreter.
1737 memset(&ctx
, 0, sizeof(ctx
));
1740 switch (current_cpu_type()) {
1741 case CPU_CAVIUM_OCTEON
:
1742 case CPU_CAVIUM_OCTEON_PLUS
:
1743 case CPU_CAVIUM_OCTEON2
:
1744 case CPU_CAVIUM_OCTEON3
:
1745 ctx
.use_bbit_insns
= 1;
1748 ctx
.use_bbit_insns
= 0;
1752 ctx
.offsets
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.offsets
), GFP_KERNEL
);
1753 if (ctx
.offsets
== NULL
)
1756 ctx
.reg_val_types
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.reg_val_types
), GFP_KERNEL
);
1757 if (ctx
.reg_val_types
== NULL
)
1762 if (reg_val_propagate(&ctx
))
1766 * First pass discovers used resources and instruction offsets
1767 * assuming short branches are used.
1769 if (build_int_body(&ctx
))
1773 * If no calls are made (EBPF_SAVE_RA), then tail call count
1774 * in $v1, else we must save in n$s4.
1776 if (ctx
.flags
& EBPF_SEEN_TC
) {
1777 if (ctx
.flags
& EBPF_SAVE_RA
)
1778 ctx
.flags
|= EBPF_SAVE_S4
;
1780 ctx
.flags
|= EBPF_TCC_IN_V1
;
1784 * Second pass generates offsets, if any branches are out of
1785 * range a jump-around long sequence is generated, and we have
1786 * to try again from the beginning to generate the new
1787 * offsets. This is done until no additional conversions are
1792 ctx
.gen_b_offsets
= 1;
1793 ctx
.long_b_conversion
= 0;
1794 if (gen_int_prologue(&ctx
))
1796 if (build_int_body(&ctx
))
1798 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1800 } while (ctx
.long_b_conversion
);
1802 image_size
= 4 * ctx
.idx
;
1804 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1805 sizeof(u32
), jit_fill_hole
);
1809 ctx
.target
= (u32
*)image_ptr
;
1811 /* Third pass generates the code */
1813 if (gen_int_prologue(&ctx
))
1815 if (build_int_body(&ctx
))
1817 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1820 /* Update the icache */
1821 flush_icache_range((unsigned long)ctx
.target
,
1822 (unsigned long)&ctx
.target
[ctx
.idx
]);
1824 if (bpf_jit_enable
> 1)
1826 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.target
);
1828 bpf_jit_binary_lock_ro(header
);
1829 prog
->bpf_func
= (void *)ctx
.target
;
1831 prog
->jited_len
= image_size
;
1834 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
1837 kfree(ctx
.reg_val_types
);
1844 bpf_jit_binary_free(header
);