2 * Just-In-Time compiler for eBPF filters on MIPS
4 * Copyright (c) 2017 Cavium, Inc.
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
16 #include <linux/bitops.h>
17 #include <linux/errno.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/slab.h>
21 #include <asm/bitops.h>
22 #include <asm/byteorder.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cpu-features.h>
27 /* Registers used by JIT */
30 #define MIPS_R_V0 2 /* BPF_R0 */
32 #define MIPS_R_A0 4 /* BPF_R1 */
33 #define MIPS_R_A1 5 /* BPF_R2 */
34 #define MIPS_R_A2 6 /* BPF_R3 */
35 #define MIPS_R_A3 7 /* BPF_R4 */
36 #define MIPS_R_A4 8 /* BPF_R5 */
37 #define MIPS_R_T4 12 /* BPF_AX */
41 #define MIPS_R_S0 16 /* BPF_R6 */
42 #define MIPS_R_S1 17 /* BPF_R7 */
43 #define MIPS_R_S2 18 /* BPF_R8 */
44 #define MIPS_R_S3 19 /* BPF_R9 */
45 #define MIPS_R_S4 20 /* BPF_TCC */
55 #define EBPF_SAVE_S0 BIT(0)
56 #define EBPF_SAVE_S1 BIT(1)
57 #define EBPF_SAVE_S2 BIT(2)
58 #define EBPF_SAVE_S3 BIT(3)
59 #define EBPF_SAVE_S4 BIT(4)
60 #define EBPF_SAVE_RA BIT(5)
61 #define EBPF_SEEN_FP BIT(6)
62 #define EBPF_SEEN_TC BIT(7)
63 #define EBPF_TCC_IN_V1 BIT(8)
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
76 /* not known to be 32-bit compatible. */
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
80 /* 32-bit compatible, need truncation for 64-bit ops. */
82 /* 32-bit zero extended. */
84 /* 32-bit no sign/zero extension needed. */
89 * high bit of offsets indicates if long branch conversion done at
92 #define OFFSETS_B_CONV BIT(31)
95 * struct jit_ctx - JIT context
97 * @stack_size: eBPF stack size
98 * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
99 * @idx: Instruction index
101 * @offsets: Instruction offsets
102 * @target: Memory location for the compiled filter
103 * @reg_val_types Packed enum reg_val_type for each register.
106 const struct bpf_prog
*skf
;
114 unsigned int long_b_conversion
:1;
115 unsigned int gen_b_offsets
:1;
116 unsigned int use_bbit_insns
:1;
119 static void set_reg_val_type(u64
*rvt
, int reg
, enum reg_val_type type
)
121 *rvt
&= ~(7ull << (reg
* 3));
122 *rvt
|= ((u64
)type
<< (reg
* 3));
125 static enum reg_val_type
get_reg_val_type(const struct jit_ctx
*ctx
,
128 return (ctx
->reg_val_types
[index
] >> (reg
* 3)) & 7;
131 /* Simply emit the instruction if the JIT memory space has been allocated */
132 #define emit_instr(ctx, func, ...) \
134 if ((ctx)->target != NULL) { \
135 u32 *p = &(ctx)->target[ctx->idx]; \
136 uasm_i_##func(&p, ##__VA_ARGS__); \
141 static unsigned int j_target(struct jit_ctx
*ctx
, int target_idx
)
143 unsigned long target_va
, base_va
;
149 base_va
= (unsigned long)ctx
->target
;
150 target_va
= base_va
+ (ctx
->offsets
[target_idx
] & ~OFFSETS_B_CONV
);
152 if ((base_va
& ~0x0ffffffful
) != (target_va
& ~0x0ffffffful
))
153 return (unsigned int)-1;
154 r
= target_va
& 0x0ffffffful
;
158 /* Compute the immediate value for PC-relative branches. */
159 static u32
b_imm(unsigned int tgt
, struct jit_ctx
*ctx
)
161 if (!ctx
->gen_b_offsets
)
165 * We want a pc-relative branch. tgt is the instruction offset
166 * we want to jump to.
169 * I: target_offset <- sign_extend(offset)
170 * I+1: PC += target_offset (delay slot)
172 * ctx->idx currently points to the branch instruction
173 * but the offset is added to the delay slot so we need
176 return (ctx
->offsets
[tgt
] & ~OFFSETS_B_CONV
) -
180 enum which_ebpf_reg
{
188 * For eBPF, the register mapping naturally falls out of the
189 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
190 * separate frame pointer, so BPF_REG_10 relative accesses are
191 * adjusted to be $sp relative.
193 int ebpf_to_mips_reg(struct jit_ctx
*ctx
, const struct bpf_insn
*insn
,
194 enum which_ebpf_reg w
)
196 int ebpf_reg
= (w
== src_reg
|| w
== src_reg_no_fp
) ?
197 insn
->src_reg
: insn
->dst_reg
;
213 ctx
->flags
|= EBPF_SAVE_S0
;
216 ctx
->flags
|= EBPF_SAVE_S1
;
219 ctx
->flags
|= EBPF_SAVE_S2
;
222 ctx
->flags
|= EBPF_SAVE_S3
;
225 if (w
== dst_reg
|| w
== src_reg_no_fp
)
227 ctx
->flags
|= EBPF_SEEN_FP
;
229 * Needs special handling, return something that
230 * cannot be clobbered just in case.
237 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg
);
242 * eBPF stack frame will be something like:
244 * Entry $sp ------> +--------------------------------+
246 * +--------------------------------+
248 * +--------------------------------+
250 * +--------------------------------+
252 * +--------------------------------+
254 * +--------------------------------+
256 * +--------------------------------+
257 * | tmp-storage (if $ra saved) |
258 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
259 * | BPF_REG_10 relative storage |
260 * | MAX_BPF_STACK (optional) |
264 * $sp --------> +--------------------------------+
266 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
267 * area is not allocated.
269 static int gen_int_prologue(struct jit_ctx
*ctx
)
271 int stack_adjust
= 0;
275 if (ctx
->flags
& EBPF_SAVE_RA
)
277 * If RA we are doing a function call and may need
278 * extra 8-byte tmp area.
281 if (ctx
->flags
& EBPF_SAVE_S0
)
283 if (ctx
->flags
& EBPF_SAVE_S1
)
285 if (ctx
->flags
& EBPF_SAVE_S2
)
287 if (ctx
->flags
& EBPF_SAVE_S3
)
289 if (ctx
->flags
& EBPF_SAVE_S4
)
292 BUILD_BUG_ON(MAX_BPF_STACK
& 7);
293 locals_size
= (ctx
->flags
& EBPF_SEEN_FP
) ? MAX_BPF_STACK
: 0;
295 stack_adjust
+= locals_size
;
296 ctx
->tmp_offset
= locals_size
;
298 ctx
->stack_size
= stack_adjust
;
301 * First instruction initializes the tail call count (TCC).
302 * On tail call we skip this instruction, and the TCC is
303 * passed in $v1 from the caller.
305 emit_instr(ctx
, daddiu
, MIPS_R_V1
, MIPS_R_ZERO
, MAX_TAIL_CALL_CNT
);
307 emit_instr(ctx
, daddiu
, MIPS_R_SP
, MIPS_R_SP
, -stack_adjust
);
311 store_offset
= stack_adjust
- 8;
313 if (ctx
->flags
& EBPF_SAVE_RA
) {
314 emit_instr(ctx
, sd
, MIPS_R_RA
, store_offset
, MIPS_R_SP
);
317 if (ctx
->flags
& EBPF_SAVE_S0
) {
318 emit_instr(ctx
, sd
, MIPS_R_S0
, store_offset
, MIPS_R_SP
);
321 if (ctx
->flags
& EBPF_SAVE_S1
) {
322 emit_instr(ctx
, sd
, MIPS_R_S1
, store_offset
, MIPS_R_SP
);
325 if (ctx
->flags
& EBPF_SAVE_S2
) {
326 emit_instr(ctx
, sd
, MIPS_R_S2
, store_offset
, MIPS_R_SP
);
329 if (ctx
->flags
& EBPF_SAVE_S3
) {
330 emit_instr(ctx
, sd
, MIPS_R_S3
, store_offset
, MIPS_R_SP
);
333 if (ctx
->flags
& EBPF_SAVE_S4
) {
334 emit_instr(ctx
, sd
, MIPS_R_S4
, store_offset
, MIPS_R_SP
);
338 if ((ctx
->flags
& EBPF_SEEN_TC
) && !(ctx
->flags
& EBPF_TCC_IN_V1
))
339 emit_instr(ctx
, daddu
, MIPS_R_S4
, MIPS_R_V1
, MIPS_R_ZERO
);
344 static int build_int_epilogue(struct jit_ctx
*ctx
, int dest_reg
)
346 const struct bpf_prog
*prog
= ctx
->skf
;
347 int stack_adjust
= ctx
->stack_size
;
348 int store_offset
= stack_adjust
- 8;
351 if (dest_reg
== MIPS_R_RA
&&
352 get_reg_val_type(ctx
, prog
->len
, BPF_REG_0
) == REG_32BIT_ZERO_EX
)
353 /* Don't let zero extended value escape. */
354 emit_instr(ctx
, sll
, r0
, r0
, 0);
356 if (ctx
->flags
& EBPF_SAVE_RA
) {
357 emit_instr(ctx
, ld
, MIPS_R_RA
, store_offset
, MIPS_R_SP
);
360 if (ctx
->flags
& EBPF_SAVE_S0
) {
361 emit_instr(ctx
, ld
, MIPS_R_S0
, store_offset
, MIPS_R_SP
);
364 if (ctx
->flags
& EBPF_SAVE_S1
) {
365 emit_instr(ctx
, ld
, MIPS_R_S1
, store_offset
, MIPS_R_SP
);
368 if (ctx
->flags
& EBPF_SAVE_S2
) {
369 emit_instr(ctx
, ld
, MIPS_R_S2
, store_offset
, MIPS_R_SP
);
372 if (ctx
->flags
& EBPF_SAVE_S3
) {
373 emit_instr(ctx
, ld
, MIPS_R_S3
, store_offset
, MIPS_R_SP
);
376 if (ctx
->flags
& EBPF_SAVE_S4
) {
377 emit_instr(ctx
, ld
, MIPS_R_S4
, store_offset
, MIPS_R_SP
);
380 emit_instr(ctx
, jr
, dest_reg
);
383 emit_instr(ctx
, daddiu
, MIPS_R_SP
, MIPS_R_SP
, stack_adjust
);
385 emit_instr(ctx
, nop
);
390 static void gen_imm_to_reg(const struct bpf_insn
*insn
, int reg
,
393 if (insn
->imm
>= S16_MIN
&& insn
->imm
<= S16_MAX
) {
394 emit_instr(ctx
, addiu
, reg
, MIPS_R_ZERO
, insn
->imm
);
396 int lower
= (s16
)(insn
->imm
& 0xffff);
397 int upper
= insn
->imm
- lower
;
399 emit_instr(ctx
, lui
, reg
, upper
>> 16);
400 emit_instr(ctx
, addiu
, reg
, reg
, lower
);
405 static int gen_imm_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
408 int upper_bound
, lower_bound
;
409 int dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
414 switch (BPF_OP(insn
->code
)) {
417 upper_bound
= S16_MAX
;
418 lower_bound
= S16_MIN
;
421 upper_bound
= -(int)S16_MIN
;
422 lower_bound
= -(int)S16_MAX
;
427 upper_bound
= 0xffff;
433 /* Shift amounts are truncated, no need for bounds */
434 upper_bound
= S32_MAX
;
435 lower_bound
= S32_MIN
;
442 * Immediate move clobbers the register, so no sign/zero
445 if (BPF_CLASS(insn
->code
) == BPF_ALU64
&&
446 BPF_OP(insn
->code
) != BPF_MOV
&&
447 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) == REG_32BIT
)
448 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
449 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
450 if (BPF_CLASS(insn
->code
) == BPF_ALU
&&
451 BPF_OP(insn
->code
) != BPF_LSH
&&
452 BPF_OP(insn
->code
) != BPF_MOV
&&
453 get_reg_val_type(ctx
, idx
, insn
->dst_reg
) != REG_32BIT
)
454 emit_instr(ctx
, sll
, dst
, dst
, 0);
456 if (insn
->imm
>= lower_bound
&& insn
->imm
<= upper_bound
) {
457 /* single insn immediate case */
458 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
459 case BPF_ALU64
| BPF_MOV
:
460 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
462 case BPF_ALU64
| BPF_AND
:
463 case BPF_ALU
| BPF_AND
:
464 emit_instr(ctx
, andi
, dst
, dst
, insn
->imm
);
466 case BPF_ALU64
| BPF_OR
:
467 case BPF_ALU
| BPF_OR
:
468 emit_instr(ctx
, ori
, dst
, dst
, insn
->imm
);
470 case BPF_ALU64
| BPF_XOR
:
471 case BPF_ALU
| BPF_XOR
:
472 emit_instr(ctx
, xori
, dst
, dst
, insn
->imm
);
474 case BPF_ALU64
| BPF_ADD
:
475 emit_instr(ctx
, daddiu
, dst
, dst
, insn
->imm
);
477 case BPF_ALU64
| BPF_SUB
:
478 emit_instr(ctx
, daddiu
, dst
, dst
, -insn
->imm
);
480 case BPF_ALU64
| BPF_RSH
:
481 emit_instr(ctx
, dsrl_safe
, dst
, dst
, insn
->imm
& 0x3f);
483 case BPF_ALU
| BPF_RSH
:
484 emit_instr(ctx
, srl
, dst
, dst
, insn
->imm
& 0x1f);
486 case BPF_ALU64
| BPF_LSH
:
487 emit_instr(ctx
, dsll_safe
, dst
, dst
, insn
->imm
& 0x3f);
489 case BPF_ALU
| BPF_LSH
:
490 emit_instr(ctx
, sll
, dst
, dst
, insn
->imm
& 0x1f);
492 case BPF_ALU64
| BPF_ARSH
:
493 emit_instr(ctx
, dsra_safe
, dst
, dst
, insn
->imm
& 0x3f);
495 case BPF_ALU
| BPF_ARSH
:
496 emit_instr(ctx
, sra
, dst
, dst
, insn
->imm
& 0x1f);
498 case BPF_ALU
| BPF_MOV
:
499 emit_instr(ctx
, addiu
, dst
, MIPS_R_ZERO
, insn
->imm
);
501 case BPF_ALU
| BPF_ADD
:
502 emit_instr(ctx
, addiu
, dst
, dst
, insn
->imm
);
504 case BPF_ALU
| BPF_SUB
:
505 emit_instr(ctx
, addiu
, dst
, dst
, -insn
->imm
);
511 /* multi insn immediate case */
512 if (BPF_OP(insn
->code
) == BPF_MOV
) {
513 gen_imm_to_reg(insn
, dst
, ctx
);
515 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
516 switch (BPF_OP(insn
->code
) | BPF_CLASS(insn
->code
)) {
517 case BPF_ALU64
| BPF_AND
:
518 case BPF_ALU
| BPF_AND
:
519 emit_instr(ctx
, and, dst
, dst
, MIPS_R_AT
);
521 case BPF_ALU64
| BPF_OR
:
522 case BPF_ALU
| BPF_OR
:
523 emit_instr(ctx
, or, dst
, dst
, MIPS_R_AT
);
525 case BPF_ALU64
| BPF_XOR
:
526 case BPF_ALU
| BPF_XOR
:
527 emit_instr(ctx
, xor, dst
, dst
, MIPS_R_AT
);
529 case BPF_ALU64
| BPF_ADD
:
530 emit_instr(ctx
, daddu
, dst
, dst
, MIPS_R_AT
);
532 case BPF_ALU64
| BPF_SUB
:
533 emit_instr(ctx
, dsubu
, dst
, dst
, MIPS_R_AT
);
535 case BPF_ALU
| BPF_ADD
:
536 emit_instr(ctx
, addu
, dst
, dst
, MIPS_R_AT
);
538 case BPF_ALU
| BPF_SUB
:
539 emit_instr(ctx
, subu
, dst
, dst
, MIPS_R_AT
);
550 static void * __must_check
551 ool_skb_header_pointer(const struct sk_buff
*skb
, int offset
,
552 int len
, void *buffer
)
554 return skb_header_pointer(skb
, offset
, len
, buffer
);
557 static int size_to_len(const struct bpf_insn
*insn
)
559 switch (BPF_SIZE(insn
->code
)) {
572 static void emit_const_to_reg(struct jit_ctx
*ctx
, int dst
, u64 value
)
574 if (value
>= 0xffffffffffff8000ull
|| value
< 0x8000ull
) {
575 emit_instr(ctx
, daddiu
, dst
, MIPS_R_ZERO
, (int)value
);
576 } else if (value
>= 0xffffffff80000000ull
||
577 (value
< 0x80000000 && value
> 0xffff)) {
578 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)(value
>> 16));
579 emit_instr(ctx
, ori
, dst
, dst
, (unsigned int)(value
& 0xffff));
582 bool seen_part
= false;
583 int needed_shift
= 0;
585 for (i
= 0; i
< 4; i
++) {
586 u64 part
= (value
>> (16 * (3 - i
))) & 0xffff;
588 if (seen_part
&& needed_shift
> 0 && (part
|| i
== 3)) {
589 emit_instr(ctx
, dsll_safe
, dst
, dst
, needed_shift
);
593 if (i
== 0 || (!seen_part
&& i
< 3 && part
< 0x8000)) {
594 emit_instr(ctx
, lui
, dst
, (s32
)(s16
)part
);
597 emit_instr(ctx
, ori
, dst
,
598 seen_part
? dst
: MIPS_R_ZERO
,
609 static int emit_bpf_tail_call(struct jit_ctx
*ctx
, int this_idx
)
613 ctx
->flags
|= EBPF_SEEN_TC
;
615 * if (index >= array->map.max_entries)
618 off
= offsetof(struct bpf_array
, map
.max_entries
);
619 emit_instr(ctx
, lwu
, MIPS_R_T5
, off
, MIPS_R_A1
);
620 emit_instr(ctx
, sltu
, MIPS_R_AT
, MIPS_R_T5
, MIPS_R_A2
);
621 b_off
= b_imm(this_idx
+ 1, ctx
);
622 emit_instr(ctx
, bne
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
628 emit_instr(ctx
, daddiu
, MIPS_R_T5
,
629 (ctx
->flags
& EBPF_TCC_IN_V1
) ? MIPS_R_V1
: MIPS_R_S4
, -1);
630 b_off
= b_imm(this_idx
+ 1, ctx
);
631 emit_instr(ctx
, bltz
, MIPS_R_T5
, b_off
);
633 * prog = array->ptrs[index];
638 emit_instr(ctx
, dsll
, MIPS_R_T8
, MIPS_R_A2
, 3);
639 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, MIPS_R_A1
);
640 off
= offsetof(struct bpf_array
, ptrs
);
641 emit_instr(ctx
, ld
, MIPS_R_AT
, off
, MIPS_R_T8
);
642 b_off
= b_imm(this_idx
+ 1, ctx
);
643 emit_instr(ctx
, beq
, MIPS_R_AT
, MIPS_R_ZERO
, b_off
);
645 emit_instr(ctx
, nop
);
647 /* goto *(prog->bpf_func + 4); */
648 off
= offsetof(struct bpf_prog
, bpf_func
);
649 emit_instr(ctx
, ld
, MIPS_R_T9
, off
, MIPS_R_AT
);
650 /* All systems are go... propagate TCC */
651 emit_instr(ctx
, daddu
, MIPS_R_V1
, MIPS_R_T5
, MIPS_R_ZERO
);
652 /* Skip first instruction (TCC initialization) */
653 emit_instr(ctx
, daddiu
, MIPS_R_T9
, MIPS_R_T9
, 4);
654 return build_int_epilogue(ctx
, MIPS_R_T9
);
657 static bool is_bad_offset(int b_off
)
659 return b_off
> 0x1ffff || b_off
< -0x20000;
662 /* Returns the number of insn slots consumed. */
663 static int build_one_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
,
664 int this_idx
, int exit_idx
)
666 int src
, dst
, r
, td
, ts
, mem_off
, b_off
;
667 bool need_swap
, did_move
, cmp_eq
;
668 unsigned int target
= 0;
671 int bpf_op
= BPF_OP(insn
->code
);
673 switch (insn
->code
) {
674 case BPF_ALU64
| BPF_ADD
| BPF_K
: /* ALU64_IMM */
675 case BPF_ALU64
| BPF_SUB
| BPF_K
: /* ALU64_IMM */
676 case BPF_ALU64
| BPF_OR
| BPF_K
: /* ALU64_IMM */
677 case BPF_ALU64
| BPF_AND
| BPF_K
: /* ALU64_IMM */
678 case BPF_ALU64
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
679 case BPF_ALU64
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
680 case BPF_ALU64
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
681 case BPF_ALU64
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
682 case BPF_ALU64
| BPF_MOV
| BPF_K
: /* ALU64_IMM */
683 case BPF_ALU
| BPF_MOV
| BPF_K
: /* ALU32_IMM */
684 case BPF_ALU
| BPF_ADD
| BPF_K
: /* ALU32_IMM */
685 case BPF_ALU
| BPF_SUB
| BPF_K
: /* ALU32_IMM */
686 case BPF_ALU
| BPF_OR
| BPF_K
: /* ALU64_IMM */
687 case BPF_ALU
| BPF_AND
| BPF_K
: /* ALU64_IMM */
688 case BPF_ALU
| BPF_LSH
| BPF_K
: /* ALU64_IMM */
689 case BPF_ALU
| BPF_RSH
| BPF_K
: /* ALU64_IMM */
690 case BPF_ALU
| BPF_XOR
| BPF_K
: /* ALU64_IMM */
691 case BPF_ALU
| BPF_ARSH
| BPF_K
: /* ALU64_IMM */
692 r
= gen_imm_insn(insn
, ctx
, this_idx
);
696 case BPF_ALU64
| BPF_MUL
| BPF_K
: /* ALU64_IMM */
697 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
700 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
701 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
702 if (insn
->imm
== 1) /* Mult by 1 is a nop */
704 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
705 emit_instr(ctx
, dmultu
, MIPS_R_AT
, dst
);
706 emit_instr(ctx
, mflo
, dst
);
708 case BPF_ALU64
| BPF_NEG
| BPF_K
: /* ALU64_IMM */
709 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
712 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
713 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
714 emit_instr(ctx
, dsubu
, dst
, MIPS_R_ZERO
, dst
);
716 case BPF_ALU
| BPF_MUL
| BPF_K
: /* ALU_IMM */
717 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
720 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
721 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
723 emit_instr(ctx
, sll
, dst
, dst
, 0);
725 if (insn
->imm
== 1) /* Mult by 1 is a nop */
727 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
728 emit_instr(ctx
, multu
, dst
, MIPS_R_AT
);
729 emit_instr(ctx
, mflo
, dst
);
731 case BPF_ALU
| BPF_NEG
| BPF_K
: /* ALU_IMM */
732 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
735 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
736 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
738 emit_instr(ctx
, sll
, dst
, dst
, 0);
740 emit_instr(ctx
, subu
, dst
, MIPS_R_ZERO
, dst
);
742 case BPF_ALU
| BPF_DIV
| BPF_K
: /* ALU_IMM */
743 case BPF_ALU
| BPF_MOD
| BPF_K
: /* ALU_IMM */
746 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
749 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
750 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)
752 emit_instr(ctx
, sll
, dst
, dst
, 0);
753 if (insn
->imm
== 1) {
754 /* div by 1 is a nop, mod by 1 is zero */
755 if (bpf_op
== BPF_MOD
)
756 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
759 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
760 emit_instr(ctx
, divu
, dst
, MIPS_R_AT
);
761 if (bpf_op
== BPF_DIV
)
762 emit_instr(ctx
, mflo
, dst
);
764 emit_instr(ctx
, mfhi
, dst
);
766 case BPF_ALU64
| BPF_DIV
| BPF_K
: /* ALU_IMM */
767 case BPF_ALU64
| BPF_MOD
| BPF_K
: /* ALU_IMM */
770 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
773 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
774 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
775 if (insn
->imm
== 1) {
776 /* div by 1 is a nop, mod by 1 is zero */
777 if (bpf_op
== BPF_MOD
)
778 emit_instr(ctx
, addu
, dst
, MIPS_R_ZERO
, MIPS_R_ZERO
);
781 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
782 emit_instr(ctx
, ddivu
, dst
, MIPS_R_AT
);
783 if (bpf_op
== BPF_DIV
)
784 emit_instr(ctx
, mflo
, dst
);
786 emit_instr(ctx
, mfhi
, dst
);
788 case BPF_ALU64
| BPF_MOV
| BPF_X
: /* ALU64_REG */
789 case BPF_ALU64
| BPF_ADD
| BPF_X
: /* ALU64_REG */
790 case BPF_ALU64
| BPF_SUB
| BPF_X
: /* ALU64_REG */
791 case BPF_ALU64
| BPF_XOR
| BPF_X
: /* ALU64_REG */
792 case BPF_ALU64
| BPF_OR
| BPF_X
: /* ALU64_REG */
793 case BPF_ALU64
| BPF_AND
| BPF_X
: /* ALU64_REG */
794 case BPF_ALU64
| BPF_MUL
| BPF_X
: /* ALU64_REG */
795 case BPF_ALU64
| BPF_DIV
| BPF_X
: /* ALU64_REG */
796 case BPF_ALU64
| BPF_MOD
| BPF_X
: /* ALU64_REG */
797 case BPF_ALU64
| BPF_LSH
| BPF_X
: /* ALU64_REG */
798 case BPF_ALU64
| BPF_RSH
| BPF_X
: /* ALU64_REG */
799 case BPF_ALU64
| BPF_ARSH
| BPF_X
: /* ALU64_REG */
800 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg
);
801 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
802 if (src
< 0 || dst
< 0)
804 if (get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
) == REG_32BIT
)
805 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
807 if (insn
->src_reg
== BPF_REG_10
) {
808 if (bpf_op
== BPF_MOV
) {
809 emit_instr(ctx
, daddiu
, dst
, MIPS_R_SP
, MAX_BPF_STACK
);
812 emit_instr(ctx
, daddiu
, MIPS_R_AT
, MIPS_R_SP
, MAX_BPF_STACK
);
815 } else if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
816 int tmp_reg
= MIPS_R_AT
;
818 if (bpf_op
== BPF_MOV
) {
822 emit_instr(ctx
, daddu
, tmp_reg
, src
, MIPS_R_ZERO
);
823 emit_instr(ctx
, dinsu
, tmp_reg
, MIPS_R_ZERO
, 32, 32);
829 emit_instr(ctx
, daddu
, dst
, src
, MIPS_R_ZERO
);
832 emit_instr(ctx
, daddu
, dst
, dst
, src
);
835 emit_instr(ctx
, dsubu
, dst
, dst
, src
);
838 emit_instr(ctx
, xor, dst
, dst
, src
);
841 emit_instr(ctx
, or, dst
, dst
, src
);
844 emit_instr(ctx
, and, dst
, dst
, src
);
847 emit_instr(ctx
, dmultu
, dst
, src
);
848 emit_instr(ctx
, mflo
, dst
);
852 emit_instr(ctx
, ddivu
, dst
, src
);
853 if (bpf_op
== BPF_DIV
)
854 emit_instr(ctx
, mflo
, dst
);
856 emit_instr(ctx
, mfhi
, dst
);
859 emit_instr(ctx
, dsllv
, dst
, dst
, src
);
862 emit_instr(ctx
, dsrlv
, dst
, dst
, src
);
865 emit_instr(ctx
, dsrav
, dst
, dst
, src
);
868 pr_err("ALU64_REG NOT HANDLED\n");
872 case BPF_ALU
| BPF_MOV
| BPF_X
: /* ALU_REG */
873 case BPF_ALU
| BPF_ADD
| BPF_X
: /* ALU_REG */
874 case BPF_ALU
| BPF_SUB
| BPF_X
: /* ALU_REG */
875 case BPF_ALU
| BPF_XOR
| BPF_X
: /* ALU_REG */
876 case BPF_ALU
| BPF_OR
| BPF_X
: /* ALU_REG */
877 case BPF_ALU
| BPF_AND
| BPF_X
: /* ALU_REG */
878 case BPF_ALU
| BPF_MUL
| BPF_X
: /* ALU_REG */
879 case BPF_ALU
| BPF_DIV
| BPF_X
: /* ALU_REG */
880 case BPF_ALU
| BPF_MOD
| BPF_X
: /* ALU_REG */
881 case BPF_ALU
| BPF_LSH
| BPF_X
: /* ALU_REG */
882 case BPF_ALU
| BPF_RSH
| BPF_X
: /* ALU_REG */
883 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
884 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
885 if (src
< 0 || dst
< 0)
887 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
888 if (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
) {
890 emit_instr(ctx
, sll
, dst
, dst
, 0);
893 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
894 if (ts
== REG_64BIT
|| ts
== REG_32BIT_ZERO_EX
) {
895 int tmp_reg
= MIPS_R_AT
;
897 if (bpf_op
== BPF_MOV
) {
902 emit_instr(ctx
, sll
, tmp_reg
, src
, 0);
908 emit_instr(ctx
, addu
, dst
, src
, MIPS_R_ZERO
);
911 emit_instr(ctx
, addu
, dst
, dst
, src
);
914 emit_instr(ctx
, subu
, dst
, dst
, src
);
917 emit_instr(ctx
, xor, dst
, dst
, src
);
920 emit_instr(ctx
, or, dst
, dst
, src
);
923 emit_instr(ctx
, and, dst
, dst
, src
);
926 emit_instr(ctx
, mul
, dst
, dst
, src
);
930 emit_instr(ctx
, divu
, dst
, src
);
931 if (bpf_op
== BPF_DIV
)
932 emit_instr(ctx
, mflo
, dst
);
934 emit_instr(ctx
, mfhi
, dst
);
937 emit_instr(ctx
, sllv
, dst
, dst
, src
);
940 emit_instr(ctx
, srlv
, dst
, dst
, src
);
943 pr_err("ALU_REG NOT HANDLED\n");
947 case BPF_JMP
| BPF_EXIT
:
948 if (this_idx
+ 1 < exit_idx
) {
949 b_off
= b_imm(exit_idx
, ctx
);
950 if (is_bad_offset(b_off
))
952 emit_instr(ctx
, beq
, MIPS_R_ZERO
, MIPS_R_ZERO
, b_off
);
953 emit_instr(ctx
, nop
);
956 case BPF_JMP
| BPF_JEQ
| BPF_K
: /* JMP_IMM */
957 case BPF_JMP
| BPF_JNE
| BPF_K
: /* JMP_IMM */
958 cmp_eq
= (bpf_op
== BPF_JEQ
);
959 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
962 if (insn
->imm
== 0) {
965 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
969 case BPF_JMP
| BPF_JEQ
| BPF_X
: /* JMP_REG */
970 case BPF_JMP
| BPF_JNE
| BPF_X
:
971 case BPF_JMP
| BPF_JSLT
| BPF_X
:
972 case BPF_JMP
| BPF_JSLE
| BPF_X
:
973 case BPF_JMP
| BPF_JSGT
| BPF_X
:
974 case BPF_JMP
| BPF_JSGE
| BPF_X
:
975 case BPF_JMP
| BPF_JLT
| BPF_X
:
976 case BPF_JMP
| BPF_JLE
| BPF_X
:
977 case BPF_JMP
| BPF_JGT
| BPF_X
:
978 case BPF_JMP
| BPF_JGE
| BPF_X
:
979 case BPF_JMP
| BPF_JSET
| BPF_X
:
980 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
981 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
982 if (src
< 0 || dst
< 0)
984 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
985 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
986 if (td
== REG_32BIT
&& ts
!= REG_32BIT
) {
987 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
989 } else if (ts
== REG_32BIT
&& td
!= REG_32BIT
) {
990 emit_instr(ctx
, sll
, MIPS_R_AT
, dst
, 0);
993 if (bpf_op
== BPF_JSET
) {
994 emit_instr(ctx
, and, MIPS_R_AT
, dst
, src
);
998 } else if (bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSLE
) {
999 emit_instr(ctx
, dsubu
, MIPS_R_AT
, dst
, src
);
1000 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1001 b_off
= b_imm(exit_idx
, ctx
);
1002 if (is_bad_offset(b_off
))
1004 if (bpf_op
== BPF_JSGT
)
1005 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
1007 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
1008 emit_instr(ctx
, nop
);
1009 return 2; /* We consumed the exit. */
1011 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1012 if (is_bad_offset(b_off
))
1014 if (bpf_op
== BPF_JSGT
)
1015 emit_instr(ctx
, bgtz
, MIPS_R_AT
, b_off
);
1017 emit_instr(ctx
, blez
, MIPS_R_AT
, b_off
);
1018 emit_instr(ctx
, nop
);
1020 } else if (bpf_op
== BPF_JSGE
|| bpf_op
== BPF_JSLT
) {
1021 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, src
);
1022 cmp_eq
= bpf_op
== BPF_JSGE
;
1025 } else if (bpf_op
== BPF_JGT
|| bpf_op
== BPF_JLE
) {
1026 /* dst or src could be AT */
1027 emit_instr(ctx
, dsubu
, MIPS_R_T8
, dst
, src
);
1028 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1029 /* SP known to be non-zero, movz becomes boolean not */
1030 emit_instr(ctx
, movz
, MIPS_R_T9
, MIPS_R_SP
, MIPS_R_T8
);
1031 emit_instr(ctx
, movn
, MIPS_R_T9
, MIPS_R_ZERO
, MIPS_R_T8
);
1032 emit_instr(ctx
, or, MIPS_R_AT
, MIPS_R_T9
, MIPS_R_AT
);
1033 cmp_eq
= bpf_op
== BPF_JGT
;
1036 } else if (bpf_op
== BPF_JGE
|| bpf_op
== BPF_JLT
) {
1037 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, src
);
1038 cmp_eq
= bpf_op
== BPF_JGE
;
1041 } else { /* JNE/JEQ case */
1042 cmp_eq
= (bpf_op
== BPF_JEQ
);
1046 * If the next insn is EXIT and we are jumping arround
1047 * only it, invert the sense of the compare and
1048 * conditionally jump to the exit. Poor man's branch
1051 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1052 b_off
= b_imm(exit_idx
, ctx
);
1053 if (is_bad_offset(b_off
)) {
1054 target
= j_target(ctx
, exit_idx
);
1055 if (target
== (unsigned int)-1)
1059 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1060 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1061 ctx
->long_b_conversion
= 1;
1066 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1068 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1069 emit_instr(ctx
, nop
);
1070 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1071 emit_instr(ctx
, j
, target
);
1072 emit_instr(ctx
, nop
);
1074 return 2; /* We consumed the exit. */
1076 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1077 if (is_bad_offset(b_off
)) {
1078 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1079 if (target
== (unsigned int)-1)
1083 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1084 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1085 ctx
->long_b_conversion
= 1;
1090 emit_instr(ctx
, beq
, dst
, src
, b_off
);
1092 emit_instr(ctx
, bne
, dst
, src
, b_off
);
1093 emit_instr(ctx
, nop
);
1094 if (ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
) {
1095 emit_instr(ctx
, j
, target
);
1096 emit_instr(ctx
, nop
);
1099 case BPF_JMP
| BPF_JSGT
| BPF_K
: /* JMP_IMM */
1100 case BPF_JMP
| BPF_JSGE
| BPF_K
: /* JMP_IMM */
1101 case BPF_JMP
| BPF_JSLT
| BPF_K
: /* JMP_IMM */
1102 case BPF_JMP
| BPF_JSLE
| BPF_K
: /* JMP_IMM */
1103 cmp_eq
= (bpf_op
== BPF_JSGE
);
1104 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1108 if (insn
->imm
== 0) {
1109 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1110 b_off
= b_imm(exit_idx
, ctx
);
1111 if (is_bad_offset(b_off
))
1115 emit_instr(ctx
, blez
, dst
, b_off
);
1118 emit_instr(ctx
, bltz
, dst
, b_off
);
1121 emit_instr(ctx
, bgez
, dst
, b_off
);
1124 emit_instr(ctx
, bgtz
, dst
, b_off
);
1127 emit_instr(ctx
, nop
);
1128 return 2; /* We consumed the exit. */
1130 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1131 if (is_bad_offset(b_off
))
1135 emit_instr(ctx
, bgtz
, dst
, b_off
);
1138 emit_instr(ctx
, bgez
, dst
, b_off
);
1141 emit_instr(ctx
, bltz
, dst
, b_off
);
1144 emit_instr(ctx
, blez
, dst
, b_off
);
1147 emit_instr(ctx
, nop
);
1151 * only "LT" compare available, so we must use imm + 1
1152 * to generate "GT" and imm -1 to generate LE
1154 if (bpf_op
== BPF_JSGT
)
1155 t64s
= insn
->imm
+ 1;
1156 else if (bpf_op
== BPF_JSLE
)
1157 t64s
= insn
->imm
+ 1;
1161 cmp_eq
= bpf_op
== BPF_JSGT
|| bpf_op
== BPF_JSGE
;
1162 if (t64s
>= S16_MIN
&& t64s
<= S16_MAX
) {
1163 emit_instr(ctx
, slti
, MIPS_R_AT
, dst
, (int)t64s
);
1168 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1169 emit_instr(ctx
, slt
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1174 case BPF_JMP
| BPF_JGT
| BPF_K
:
1175 case BPF_JMP
| BPF_JGE
| BPF_K
:
1176 case BPF_JMP
| BPF_JLT
| BPF_K
:
1177 case BPF_JMP
| BPF_JLE
| BPF_K
:
1178 cmp_eq
= (bpf_op
== BPF_JGE
);
1179 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1183 * only "LT" compare available, so we must use imm + 1
1184 * to generate "GT" and imm -1 to generate LE
1186 if (bpf_op
== BPF_JGT
)
1187 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1188 else if (bpf_op
== BPF_JLE
)
1189 t64s
= (u64
)(u32
)(insn
->imm
) + 1;
1191 t64s
= (u64
)(u32
)(insn
->imm
);
1193 cmp_eq
= bpf_op
== BPF_JGT
|| bpf_op
== BPF_JGE
;
1195 emit_const_to_reg(ctx
, MIPS_R_AT
, (u64
)t64s
);
1196 emit_instr(ctx
, sltu
, MIPS_R_AT
, dst
, MIPS_R_AT
);
1201 case BPF_JMP
| BPF_JSET
| BPF_K
: /* JMP_IMM */
1202 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg_fp_ok
);
1206 if (ctx
->use_bbit_insns
&& hweight32((u32
)insn
->imm
) == 1) {
1207 if ((insn
+ 1)->code
== (BPF_JMP
| BPF_EXIT
) && insn
->off
== 1) {
1208 b_off
= b_imm(exit_idx
, ctx
);
1209 if (is_bad_offset(b_off
))
1211 emit_instr(ctx
, bbit0
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1212 emit_instr(ctx
, nop
);
1213 return 2; /* We consumed the exit. */
1215 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1216 if (is_bad_offset(b_off
))
1218 emit_instr(ctx
, bbit1
, dst
, ffs((u32
)insn
->imm
) - 1, b_off
);
1219 emit_instr(ctx
, nop
);
1222 t64
= (u32
)insn
->imm
;
1223 emit_const_to_reg(ctx
, MIPS_R_AT
, t64
);
1224 emit_instr(ctx
, and, MIPS_R_AT
, dst
, MIPS_R_AT
);
1230 case BPF_JMP
| BPF_JA
:
1232 * Prefer relative branch for easier debugging, but
1233 * fall back if needed.
1235 b_off
= b_imm(this_idx
+ insn
->off
+ 1, ctx
);
1236 if (is_bad_offset(b_off
)) {
1237 target
= j_target(ctx
, this_idx
+ insn
->off
+ 1);
1238 if (target
== (unsigned int)-1)
1240 emit_instr(ctx
, j
, target
);
1242 emit_instr(ctx
, b
, b_off
);
1244 emit_instr(ctx
, nop
);
1246 case BPF_LD
| BPF_DW
| BPF_IMM
:
1247 if (insn
->src_reg
!= 0)
1249 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1252 t64
= ((u64
)(u32
)insn
->imm
) | ((u64
)(insn
+ 1)->imm
<< 32);
1253 emit_const_to_reg(ctx
, dst
, t64
);
1254 return 2; /* Double slot insn */
1256 case BPF_JMP
| BPF_CALL
:
1257 ctx
->flags
|= EBPF_SAVE_RA
;
1258 t64s
= (s64
)insn
->imm
+ (s64
)__bpf_call_base
;
1259 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)t64s
);
1260 emit_instr(ctx
, jalr
, MIPS_R_RA
, MIPS_R_T9
);
1262 emit_instr(ctx
, nop
);
1265 case BPF_JMP
| BPF_TAIL_CALL
:
1266 if (emit_bpf_tail_call(ctx
, this_idx
))
1270 case BPF_LD
| BPF_B
| BPF_ABS
:
1271 case BPF_LD
| BPF_H
| BPF_ABS
:
1272 case BPF_LD
| BPF_W
| BPF_ABS
:
1273 case BPF_LD
| BPF_DW
| BPF_ABS
:
1274 ctx
->flags
|= EBPF_SAVE_RA
;
1276 gen_imm_to_reg(insn
, MIPS_R_A1
, ctx
);
1277 emit_instr(ctx
, addiu
, MIPS_R_A2
, MIPS_R_ZERO
, size_to_len(insn
));
1279 if (insn
->imm
< 0) {
1280 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)bpf_internal_load_pointer_neg_helper
);
1282 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)ool_skb_header_pointer
);
1283 emit_instr(ctx
, daddiu
, MIPS_R_A3
, MIPS_R_SP
, ctx
->tmp_offset
);
1287 case BPF_LD
| BPF_B
| BPF_IND
:
1288 case BPF_LD
| BPF_H
| BPF_IND
:
1289 case BPF_LD
| BPF_W
| BPF_IND
:
1290 case BPF_LD
| BPF_DW
| BPF_IND
:
1291 ctx
->flags
|= EBPF_SAVE_RA
;
1292 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1295 ts
= get_reg_val_type(ctx
, this_idx
, insn
->src_reg
);
1296 if (ts
== REG_32BIT_ZERO_EX
) {
1298 emit_instr(ctx
, sll
, MIPS_R_A1
, src
, 0);
1301 if (insn
->imm
>= S16_MIN
&& insn
->imm
<= S16_MAX
) {
1302 emit_instr(ctx
, daddiu
, MIPS_R_A1
, src
, insn
->imm
);
1304 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
1305 emit_instr(ctx
, daddu
, MIPS_R_A1
, MIPS_R_AT
, src
);
1307 /* truncate to 32-bit int */
1308 emit_instr(ctx
, sll
, MIPS_R_A1
, MIPS_R_A1
, 0);
1309 emit_instr(ctx
, daddiu
, MIPS_R_A3
, MIPS_R_SP
, ctx
->tmp_offset
);
1310 emit_instr(ctx
, slt
, MIPS_R_AT
, MIPS_R_A1
, MIPS_R_ZERO
);
1312 emit_const_to_reg(ctx
, MIPS_R_T8
, (u64
)bpf_internal_load_pointer_neg_helper
);
1313 emit_const_to_reg(ctx
, MIPS_R_T9
, (u64
)ool_skb_header_pointer
);
1314 emit_instr(ctx
, addiu
, MIPS_R_A2
, MIPS_R_ZERO
, size_to_len(insn
));
1315 emit_instr(ctx
, movn
, MIPS_R_T9
, MIPS_R_T8
, MIPS_R_AT
);
1318 emit_instr(ctx
, jalr
, MIPS_R_RA
, MIPS_R_T9
);
1319 /* delay slot move */
1320 emit_instr(ctx
, daddu
, MIPS_R_A0
, MIPS_R_S0
, MIPS_R_ZERO
);
1322 /* Check the error value */
1323 b_off
= b_imm(exit_idx
, ctx
);
1324 if (is_bad_offset(b_off
)) {
1325 target
= j_target(ctx
, exit_idx
);
1326 if (target
== (unsigned int)-1)
1329 if (!(ctx
->offsets
[this_idx
] & OFFSETS_B_CONV
)) {
1330 ctx
->offsets
[this_idx
] |= OFFSETS_B_CONV
;
1331 ctx
->long_b_conversion
= 1;
1333 emit_instr(ctx
, bne
, MIPS_R_V0
, MIPS_R_ZERO
, 4 * 3);
1334 emit_instr(ctx
, nop
);
1335 emit_instr(ctx
, j
, target
);
1336 emit_instr(ctx
, nop
);
1338 emit_instr(ctx
, beq
, MIPS_R_V0
, MIPS_R_ZERO
, b_off
);
1339 emit_instr(ctx
, nop
);
1348 switch (BPF_SIZE(insn
->code
)) {
1350 emit_instr(ctx
, lbu
, dst
, 0, MIPS_R_V0
);
1353 emit_instr(ctx
, lhu
, dst
, 0, MIPS_R_V0
);
1355 emit_instr(ctx
, wsbh
, dst
, dst
);
1358 emit_instr(ctx
, lw
, dst
, 0, MIPS_R_V0
);
1360 emit_instr(ctx
, wsbh
, dst
, dst
);
1361 emit_instr(ctx
, rotr
, dst
, dst
, 16);
1365 emit_instr(ctx
, ld
, dst
, 0, MIPS_R_V0
);
1367 emit_instr(ctx
, dsbh
, dst
, dst
);
1368 emit_instr(ctx
, dshd
, dst
, dst
);
1374 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1375 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1376 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1379 td
= get_reg_val_type(ctx
, this_idx
, insn
->dst_reg
);
1380 if (insn
->imm
== 64 && td
== REG_32BIT
)
1381 emit_instr(ctx
, dinsu
, dst
, MIPS_R_ZERO
, 32, 32);
1383 if (insn
->imm
!= 64 &&
1384 (td
== REG_64BIT
|| td
== REG_32BIT_ZERO_EX
)) {
1386 emit_instr(ctx
, sll
, dst
, dst
, 0);
1390 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_LE
);
1392 need_swap
= (BPF_SRC(insn
->code
) == BPF_FROM_BE
);
1394 if (insn
->imm
== 16) {
1396 emit_instr(ctx
, wsbh
, dst
, dst
);
1397 emit_instr(ctx
, andi
, dst
, dst
, 0xffff);
1398 } else if (insn
->imm
== 32) {
1400 emit_instr(ctx
, wsbh
, dst
, dst
);
1401 emit_instr(ctx
, rotr
, dst
, dst
, 16);
1403 } else { /* 64-bit*/
1405 emit_instr(ctx
, dsbh
, dst
, dst
);
1406 emit_instr(ctx
, dshd
, dst
, dst
);
1411 case BPF_ST
| BPF_B
| BPF_MEM
:
1412 case BPF_ST
| BPF_H
| BPF_MEM
:
1413 case BPF_ST
| BPF_W
| BPF_MEM
:
1414 case BPF_ST
| BPF_DW
| BPF_MEM
:
1415 if (insn
->dst_reg
== BPF_REG_10
) {
1416 ctx
->flags
|= EBPF_SEEN_FP
;
1418 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1420 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1423 mem_off
= insn
->off
;
1425 gen_imm_to_reg(insn
, MIPS_R_AT
, ctx
);
1426 switch (BPF_SIZE(insn
->code
)) {
1428 emit_instr(ctx
, sb
, MIPS_R_AT
, mem_off
, dst
);
1431 emit_instr(ctx
, sh
, MIPS_R_AT
, mem_off
, dst
);
1434 emit_instr(ctx
, sw
, MIPS_R_AT
, mem_off
, dst
);
1437 emit_instr(ctx
, sd
, MIPS_R_AT
, mem_off
, dst
);
1442 case BPF_LDX
| BPF_B
| BPF_MEM
:
1443 case BPF_LDX
| BPF_H
| BPF_MEM
:
1444 case BPF_LDX
| BPF_W
| BPF_MEM
:
1445 case BPF_LDX
| BPF_DW
| BPF_MEM
:
1446 if (insn
->src_reg
== BPF_REG_10
) {
1447 ctx
->flags
|= EBPF_SEEN_FP
;
1449 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1451 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1454 mem_off
= insn
->off
;
1456 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1459 switch (BPF_SIZE(insn
->code
)) {
1461 emit_instr(ctx
, lbu
, dst
, mem_off
, src
);
1464 emit_instr(ctx
, lhu
, dst
, mem_off
, src
);
1467 emit_instr(ctx
, lw
, dst
, mem_off
, src
);
1470 emit_instr(ctx
, ld
, dst
, mem_off
, src
);
1475 case BPF_STX
| BPF_B
| BPF_MEM
:
1476 case BPF_STX
| BPF_H
| BPF_MEM
:
1477 case BPF_STX
| BPF_W
| BPF_MEM
:
1478 case BPF_STX
| BPF_DW
| BPF_MEM
:
1479 case BPF_STX
| BPF_W
| BPF_XADD
:
1480 case BPF_STX
| BPF_DW
| BPF_XADD
:
1481 if (insn
->dst_reg
== BPF_REG_10
) {
1482 ctx
->flags
|= EBPF_SEEN_FP
;
1484 mem_off
= insn
->off
+ MAX_BPF_STACK
;
1486 dst
= ebpf_to_mips_reg(ctx
, insn
, dst_reg
);
1489 mem_off
= insn
->off
;
1491 src
= ebpf_to_mips_reg(ctx
, insn
, src_reg_no_fp
);
1494 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
1495 switch (BPF_SIZE(insn
->code
)) {
1497 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1498 emit_instr(ctx
, sll
, MIPS_R_AT
, src
, 0);
1501 emit_instr(ctx
, ll
, MIPS_R_T8
, mem_off
, dst
);
1502 emit_instr(ctx
, addu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1503 emit_instr(ctx
, sc
, MIPS_R_T8
, mem_off
, dst
);
1505 * On failure back up to LL (-4
1506 * instructions of 4 bytes each
1508 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1509 emit_instr(ctx
, nop
);
1512 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1513 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1514 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1517 emit_instr(ctx
, lld
, MIPS_R_T8
, mem_off
, dst
);
1518 emit_instr(ctx
, daddu
, MIPS_R_T8
, MIPS_R_T8
, src
);
1519 emit_instr(ctx
, scd
, MIPS_R_T8
, mem_off
, dst
);
1520 emit_instr(ctx
, beq
, MIPS_R_T8
, MIPS_R_ZERO
, -4 * 4);
1521 emit_instr(ctx
, nop
);
1524 } else { /* BPF_MEM */
1525 switch (BPF_SIZE(insn
->code
)) {
1527 emit_instr(ctx
, sb
, src
, mem_off
, dst
);
1530 emit_instr(ctx
, sh
, src
, mem_off
, dst
);
1533 emit_instr(ctx
, sw
, src
, mem_off
, dst
);
1536 if (get_reg_val_type(ctx
, this_idx
, insn
->src_reg
) == REG_32BIT
) {
1537 emit_instr(ctx
, daddu
, MIPS_R_AT
, src
, MIPS_R_ZERO
);
1538 emit_instr(ctx
, dinsu
, MIPS_R_AT
, MIPS_R_ZERO
, 32, 32);
1541 emit_instr(ctx
, sd
, src
, mem_off
, dst
);
1548 pr_err("NOT HANDLED %d - (%02x)\n",
1549 this_idx
, (unsigned int)insn
->code
);
1555 #define RVT_VISITED_MASK 0xc000000000000000ull
1556 #define RVT_FALL_THROUGH 0x4000000000000000ull
1557 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1558 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1560 static int build_int_body(struct jit_ctx
*ctx
)
1562 const struct bpf_prog
*prog
= ctx
->skf
;
1563 const struct bpf_insn
*insn
;
1566 for (i
= 0; i
< prog
->len
; ) {
1567 insn
= prog
->insnsi
+ i
;
1568 if ((ctx
->reg_val_types
[i
] & RVT_VISITED_MASK
) == 0) {
1569 /* dead instruction, don't emit it. */
1574 if (ctx
->target
== NULL
)
1575 ctx
->offsets
[i
] = (ctx
->offsets
[i
] & OFFSETS_B_CONV
) | (ctx
->idx
* 4);
1577 r
= build_one_insn(insn
, ctx
, i
, prog
->len
);
1582 /* epilogue offset */
1583 if (ctx
->target
== NULL
)
1584 ctx
->offsets
[i
] = ctx
->idx
* 4;
1587 * All exits have an offset of the epilogue, some offsets may
1588 * not have been set due to banch-around threading, so set
1591 if (ctx
->target
== NULL
)
1592 for (i
= 0; i
< prog
->len
; i
++) {
1593 insn
= prog
->insnsi
+ i
;
1594 if (insn
->code
== (BPF_JMP
| BPF_EXIT
))
1595 ctx
->offsets
[i
] = ctx
->idx
* 4;
1600 /* return the last idx processed, or negative for error */
1601 static int reg_val_propagate_range(struct jit_ctx
*ctx
, u64 initial_rvt
,
1602 int start_idx
, bool follow_taken
)
1604 const struct bpf_prog
*prog
= ctx
->skf
;
1605 const struct bpf_insn
*insn
;
1606 u64 exit_rvt
= initial_rvt
;
1607 u64
*rvt
= ctx
->reg_val_types
;
1611 for (idx
= start_idx
; idx
< prog
->len
; idx
++) {
1612 rvt
[idx
] = (rvt
[idx
] & RVT_VISITED_MASK
) | exit_rvt
;
1613 insn
= prog
->insnsi
+ idx
;
1614 switch (BPF_CLASS(insn
->code
)) {
1616 switch (BPF_OP(insn
->code
)) {
1628 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1631 if (BPF_SRC(insn
->code
)) {
1632 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1634 /* IMM to REG move*/
1636 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1638 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1642 if (insn
->imm
== 64)
1643 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1644 else if (insn
->imm
== 32)
1645 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1646 else /* insn->imm == 16 */
1647 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1650 rvt
[idx
] |= RVT_DONE
;
1653 switch (BPF_OP(insn
->code
)) {
1655 if (BPF_SRC(insn
->code
)) {
1656 /* REG to REG move*/
1657 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1659 /* IMM to REG move*/
1661 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1663 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1667 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1669 rvt
[idx
] |= RVT_DONE
;
1672 switch (BPF_SIZE(insn
->code
)) {
1674 if (BPF_MODE(insn
->code
) == BPF_IMM
) {
1677 val
= (s64
)((u32
)insn
->imm
| ((u64
)(insn
+ 1)->imm
<< 32));
1678 if (val
> 0 && val
<= S32_MAX
)
1679 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1680 else if (val
>= S32_MIN
&& val
<= S32_MAX
)
1681 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT_32BIT
);
1683 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1684 rvt
[idx
] |= RVT_DONE
;
1687 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1692 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1695 if (BPF_MODE(insn
->code
) == BPF_IMM
)
1696 set_reg_val_type(&exit_rvt
, insn
->dst_reg
,
1697 insn
->imm
>= 0 ? REG_32BIT_POS
: REG_32BIT
);
1699 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1702 rvt
[idx
] |= RVT_DONE
;
1705 switch (BPF_SIZE(insn
->code
)) {
1707 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_64BIT
);
1711 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT_POS
);
1714 set_reg_val_type(&exit_rvt
, insn
->dst_reg
, REG_32BIT
);
1717 rvt
[idx
] |= RVT_DONE
;
1720 switch (BPF_OP(insn
->code
)) {
1722 rvt
[idx
] = RVT_DONE
| exit_rvt
;
1723 rvt
[prog
->len
] = exit_rvt
;
1726 rvt
[idx
] |= RVT_DONE
;
1741 rvt
[idx
] |= RVT_BRANCH_TAKEN
;
1743 follow_taken
= false;
1745 rvt
[idx
] |= RVT_FALL_THROUGH
;
1749 set_reg_val_type(&exit_rvt
, BPF_REG_0
, REG_64BIT
);
1750 /* Upon call return, argument registers are clobbered. */
1751 for (reg
= BPF_REG_0
; reg
<= BPF_REG_5
; reg
++)
1752 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1754 rvt
[idx
] |= RVT_DONE
;
1757 WARN(1, "Unhandled BPF_JMP case.\n");
1758 rvt
[idx
] |= RVT_DONE
;
1763 rvt
[idx
] |= RVT_DONE
;
1771 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1772 * each eBPF insn. This allows unneeded sign and zero extension
1773 * operations to be omitted.
1775 * Doesn't handle yet confluence of control paths with conflicting
1776 * ranges, but it is good enough for most sane code.
1778 static int reg_val_propagate(struct jit_ctx
*ctx
)
1780 const struct bpf_prog
*prog
= ctx
->skf
;
1786 * 11 registers * 3 bits/reg leaves top bits free for other
1787 * uses. Bit-62..63 used to see if we have visited an insn.
1791 /* Upon entry, argument registers are 64-bit. */
1792 for (reg
= BPF_REG_1
; reg
<= BPF_REG_5
; reg
++)
1793 set_reg_val_type(&exit_rvt
, reg
, REG_64BIT
);
1796 * First follow all conditional branches on the fall-through
1797 * edge of control flow..
1799 reg_val_propagate_range(ctx
, exit_rvt
, 0, false);
1802 * Then repeatedly find the first conditional branch where
1803 * both edges of control flow have not been taken, and follow
1804 * the branch taken edge. We will end up restarting the
1805 * search once per conditional branch insn.
1807 for (i
= 0; i
< prog
->len
; i
++) {
1808 u64 rvt
= ctx
->reg_val_types
[i
];
1810 if ((rvt
& RVT_VISITED_MASK
) == RVT_DONE
||
1811 (rvt
& RVT_VISITED_MASK
) == 0)
1813 if ((rvt
& RVT_VISITED_MASK
) == RVT_FALL_THROUGH
) {
1814 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, true);
1815 } else { /* RVT_BRANCH_TAKEN */
1816 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1817 reg_val_propagate_range(ctx
, rvt
& ~RVT_VISITED_MASK
, i
, false);
1819 goto restart_search
;
1822 * Eventually all conditional branches have been followed on
1823 * both branches and we are done. Any insn that has not been
1824 * visited at this point is dead.
1830 static void jit_fill_hole(void *area
, unsigned int size
)
1834 /* We are guaranteed to have aligned memory. */
1835 for (p
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
1836 uasm_i_break(&p
, BRK_BUG
); /* Increments p */
1839 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1841 struct bpf_prog
*orig_prog
= prog
;
1842 bool tmp_blinded
= false;
1843 struct bpf_prog
*tmp
;
1844 struct bpf_binary_header
*header
= NULL
;
1846 unsigned int image_size
;
1849 if (!prog
->jit_requested
|| !cpu_has_mips64r2
)
1852 tmp
= bpf_jit_blind_constants(prog
);
1853 /* If blinding was requested and we failed during blinding,
1854 * we must fall back to the interpreter.
1863 memset(&ctx
, 0, sizeof(ctx
));
1866 switch (current_cpu_type()) {
1867 case CPU_CAVIUM_OCTEON
:
1868 case CPU_CAVIUM_OCTEON_PLUS
:
1869 case CPU_CAVIUM_OCTEON2
:
1870 case CPU_CAVIUM_OCTEON3
:
1871 ctx
.use_bbit_insns
= 1;
1874 ctx
.use_bbit_insns
= 0;
1878 ctx
.offsets
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.offsets
), GFP_KERNEL
);
1879 if (ctx
.offsets
== NULL
)
1882 ctx
.reg_val_types
= kcalloc(prog
->len
+ 1, sizeof(*ctx
.reg_val_types
), GFP_KERNEL
);
1883 if (ctx
.reg_val_types
== NULL
)
1888 if (reg_val_propagate(&ctx
))
1892 * First pass discovers used resources and instruction offsets
1893 * assuming short branches are used.
1895 if (build_int_body(&ctx
))
1899 * If no calls are made (EBPF_SAVE_RA), then tail call count
1900 * in $v1, else we must save in n$s4.
1902 if (ctx
.flags
& EBPF_SEEN_TC
) {
1903 if (ctx
.flags
& EBPF_SAVE_RA
)
1904 ctx
.flags
|= EBPF_SAVE_S4
;
1906 ctx
.flags
|= EBPF_TCC_IN_V1
;
1910 * Second pass generates offsets, if any branches are out of
1911 * range a jump-around long sequence is generated, and we have
1912 * to try again from the beginning to generate the new
1913 * offsets. This is done until no additional conversions are
1918 ctx
.gen_b_offsets
= 1;
1919 ctx
.long_b_conversion
= 0;
1920 if (gen_int_prologue(&ctx
))
1922 if (build_int_body(&ctx
))
1924 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1926 } while (ctx
.long_b_conversion
);
1928 image_size
= 4 * ctx
.idx
;
1930 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
1931 sizeof(u32
), jit_fill_hole
);
1935 ctx
.target
= (u32
*)image_ptr
;
1937 /* Third pass generates the code */
1939 if (gen_int_prologue(&ctx
))
1941 if (build_int_body(&ctx
))
1943 if (build_int_epilogue(&ctx
, MIPS_R_RA
))
1946 /* Update the icache */
1947 flush_icache_range((unsigned long)ctx
.target
,
1948 (unsigned long)(ctx
.target
+ ctx
.idx
* sizeof(u32
)));
1950 if (bpf_jit_enable
> 1)
1952 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.target
);
1954 bpf_jit_binary_lock_ro(header
);
1955 prog
->bpf_func
= (void *)ctx
.target
;
1957 prog
->jited_len
= image_size
;
1960 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
1963 kfree(ctx
.reg_val_types
);
1970 bpf_jit_binary_free(header
);