Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / mips / net / ebpf_jit.c
blob3e2798bfea4f5849fe1cac82352f8e47569b8039
1 /*
2 * Just-In-Time compiler for eBPF filters on MIPS
4 * Copyright (c) 2017 Cavium, Inc.
6 * Based on code from:
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
16 #include <linux/bitops.h>
17 #include <linux/errno.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/slab.h>
21 #include <asm/bitops.h>
22 #include <asm/byteorder.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cpu-features.h>
25 #include <asm/uasm.h>
27 /* Registers used by JIT */
28 #define MIPS_R_ZERO 0
29 #define MIPS_R_AT 1
30 #define MIPS_R_V0 2 /* BPF_R0 */
31 #define MIPS_R_V1 3
32 #define MIPS_R_A0 4 /* BPF_R1 */
33 #define MIPS_R_A1 5 /* BPF_R2 */
34 #define MIPS_R_A2 6 /* BPF_R3 */
35 #define MIPS_R_A3 7 /* BPF_R4 */
36 #define MIPS_R_A4 8 /* BPF_R5 */
37 #define MIPS_R_T4 12 /* BPF_AX */
38 #define MIPS_R_T5 13
39 #define MIPS_R_T6 14
40 #define MIPS_R_T7 15
41 #define MIPS_R_S0 16 /* BPF_R6 */
42 #define MIPS_R_S1 17 /* BPF_R7 */
43 #define MIPS_R_S2 18 /* BPF_R8 */
44 #define MIPS_R_S3 19 /* BPF_R9 */
45 #define MIPS_R_S4 20 /* BPF_TCC */
46 #define MIPS_R_S5 21
47 #define MIPS_R_S6 22
48 #define MIPS_R_S7 23
49 #define MIPS_R_T8 24
50 #define MIPS_R_T9 25
51 #define MIPS_R_SP 29
52 #define MIPS_R_RA 31
54 /* eBPF flags */
55 #define EBPF_SAVE_S0 BIT(0)
56 #define EBPF_SAVE_S1 BIT(1)
57 #define EBPF_SAVE_S2 BIT(2)
58 #define EBPF_SAVE_S3 BIT(3)
59 #define EBPF_SAVE_S4 BIT(4)
60 #define EBPF_SAVE_RA BIT(5)
61 #define EBPF_SEEN_FP BIT(6)
62 #define EBPF_SEEN_TC BIT(7)
63 #define EBPF_TCC_IN_V1 BIT(8)
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
71 * semantics require.
73 enum reg_val_type {
74 /* uninitialized */
75 REG_UNKNOWN,
76 /* not known to be 32-bit compatible. */
77 REG_64BIT,
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS
89 * high bit of offsets indicates if long branch conversion done at
90 * this insn.
92 #define OFFSETS_B_CONV BIT(31)
94 /**
95 * struct jit_ctx - JIT context
96 * @skf: The sk_filter
97 * @stack_size: eBPF stack size
98 * @tmp_offset: eBPF $sp offset to 8-byte temporary memory
99 * @idx: Instruction index
100 * @flags: JIT flags
101 * @offsets: Instruction offsets
102 * @target: Memory location for the compiled filter
103 * @reg_val_types Packed enum reg_val_type for each register.
105 struct jit_ctx {
106 const struct bpf_prog *skf;
107 int stack_size;
108 int tmp_offset;
109 u32 idx;
110 u32 flags;
111 u32 *offsets;
112 u32 *target;
113 u64 *reg_val_types;
114 unsigned int long_b_conversion:1;
115 unsigned int gen_b_offsets:1;
116 unsigned int use_bbit_insns:1;
119 static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
121 *rvt &= ~(7ull << (reg * 3));
122 *rvt |= ((u64)type << (reg * 3));
125 static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
126 int index, int reg)
128 return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
131 /* Simply emit the instruction if the JIT memory space has been allocated */
132 #define emit_instr(ctx, func, ...) \
133 do { \
134 if ((ctx)->target != NULL) { \
135 u32 *p = &(ctx)->target[ctx->idx]; \
136 uasm_i_##func(&p, ##__VA_ARGS__); \
138 (ctx)->idx++; \
139 } while (0)
141 static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
143 unsigned long target_va, base_va;
144 unsigned int r;
146 if (!ctx->target)
147 return 0;
149 base_va = (unsigned long)ctx->target;
150 target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
152 if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
153 return (unsigned int)-1;
154 r = target_va & 0x0ffffffful;
155 return r;
158 /* Compute the immediate value for PC-relative branches. */
159 static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
161 if (!ctx->gen_b_offsets)
162 return 0;
165 * We want a pc-relative branch. tgt is the instruction offset
166 * we want to jump to.
168 * Branch on MIPS:
169 * I: target_offset <- sign_extend(offset)
170 * I+1: PC += target_offset (delay slot)
172 * ctx->idx currently points to the branch instruction
173 * but the offset is added to the delay slot so we need
174 * to subtract 4.
176 return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
177 (ctx->idx * 4) - 4;
180 enum which_ebpf_reg {
181 src_reg,
182 src_reg_no_fp,
183 dst_reg,
184 dst_reg_fp_ok
188 * For eBPF, the register mapping naturally falls out of the
189 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
190 * separate frame pointer, so BPF_REG_10 relative accesses are
191 * adjusted to be $sp relative.
193 int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
194 enum which_ebpf_reg w)
196 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
197 insn->src_reg : insn->dst_reg;
199 switch (ebpf_reg) {
200 case BPF_REG_0:
201 return MIPS_R_V0;
202 case BPF_REG_1:
203 return MIPS_R_A0;
204 case BPF_REG_2:
205 return MIPS_R_A1;
206 case BPF_REG_3:
207 return MIPS_R_A2;
208 case BPF_REG_4:
209 return MIPS_R_A3;
210 case BPF_REG_5:
211 return MIPS_R_A4;
212 case BPF_REG_6:
213 ctx->flags |= EBPF_SAVE_S0;
214 return MIPS_R_S0;
215 case BPF_REG_7:
216 ctx->flags |= EBPF_SAVE_S1;
217 return MIPS_R_S1;
218 case BPF_REG_8:
219 ctx->flags |= EBPF_SAVE_S2;
220 return MIPS_R_S2;
221 case BPF_REG_9:
222 ctx->flags |= EBPF_SAVE_S3;
223 return MIPS_R_S3;
224 case BPF_REG_10:
225 if (w == dst_reg || w == src_reg_no_fp)
226 goto bad_reg;
227 ctx->flags |= EBPF_SEEN_FP;
229 * Needs special handling, return something that
230 * cannot be clobbered just in case.
232 return MIPS_R_ZERO;
233 case BPF_REG_AX:
234 return MIPS_R_T4;
235 default:
236 bad_reg:
237 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
238 return -EINVAL;
242 * eBPF stack frame will be something like:
244 * Entry $sp ------> +--------------------------------+
245 * | $ra (optional) |
246 * +--------------------------------+
247 * | $s0 (optional) |
248 * +--------------------------------+
249 * | $s1 (optional) |
250 * +--------------------------------+
251 * | $s2 (optional) |
252 * +--------------------------------+
253 * | $s3 (optional) |
254 * +--------------------------------+
255 * | $s4 (optional) |
256 * +--------------------------------+
257 * | tmp-storage (if $ra saved) |
258 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
259 * | BPF_REG_10 relative storage |
260 * | MAX_BPF_STACK (optional) |
261 * | . |
262 * | . |
263 * | . |
264 * $sp --------> +--------------------------------+
266 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
267 * area is not allocated.
269 static int gen_int_prologue(struct jit_ctx *ctx)
271 int stack_adjust = 0;
272 int store_offset;
273 int locals_size;
275 if (ctx->flags & EBPF_SAVE_RA)
277 * If RA we are doing a function call and may need
278 * extra 8-byte tmp area.
280 stack_adjust += 16;
281 if (ctx->flags & EBPF_SAVE_S0)
282 stack_adjust += 8;
283 if (ctx->flags & EBPF_SAVE_S1)
284 stack_adjust += 8;
285 if (ctx->flags & EBPF_SAVE_S2)
286 stack_adjust += 8;
287 if (ctx->flags & EBPF_SAVE_S3)
288 stack_adjust += 8;
289 if (ctx->flags & EBPF_SAVE_S4)
290 stack_adjust += 8;
292 BUILD_BUG_ON(MAX_BPF_STACK & 7);
293 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
295 stack_adjust += locals_size;
296 ctx->tmp_offset = locals_size;
298 ctx->stack_size = stack_adjust;
301 * First instruction initializes the tail call count (TCC).
302 * On tail call we skip this instruction, and the TCC is
303 * passed in $v1 from the caller.
305 emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
306 if (stack_adjust)
307 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
308 else
309 return 0;
311 store_offset = stack_adjust - 8;
313 if (ctx->flags & EBPF_SAVE_RA) {
314 emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
315 store_offset -= 8;
317 if (ctx->flags & EBPF_SAVE_S0) {
318 emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
319 store_offset -= 8;
321 if (ctx->flags & EBPF_SAVE_S1) {
322 emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
323 store_offset -= 8;
325 if (ctx->flags & EBPF_SAVE_S2) {
326 emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
327 store_offset -= 8;
329 if (ctx->flags & EBPF_SAVE_S3) {
330 emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
331 store_offset -= 8;
333 if (ctx->flags & EBPF_SAVE_S4) {
334 emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
335 store_offset -= 8;
338 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
339 emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
341 return 0;
344 static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
346 const struct bpf_prog *prog = ctx->skf;
347 int stack_adjust = ctx->stack_size;
348 int store_offset = stack_adjust - 8;
349 int r0 = MIPS_R_V0;
351 if (dest_reg == MIPS_R_RA &&
352 get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
353 /* Don't let zero extended value escape. */
354 emit_instr(ctx, sll, r0, r0, 0);
356 if (ctx->flags & EBPF_SAVE_RA) {
357 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
358 store_offset -= 8;
360 if (ctx->flags & EBPF_SAVE_S0) {
361 emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
362 store_offset -= 8;
364 if (ctx->flags & EBPF_SAVE_S1) {
365 emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
366 store_offset -= 8;
368 if (ctx->flags & EBPF_SAVE_S2) {
369 emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
370 store_offset -= 8;
372 if (ctx->flags & EBPF_SAVE_S3) {
373 emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
374 store_offset -= 8;
376 if (ctx->flags & EBPF_SAVE_S4) {
377 emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
378 store_offset -= 8;
380 emit_instr(ctx, jr, dest_reg);
382 if (stack_adjust)
383 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
384 else
385 emit_instr(ctx, nop);
387 return 0;
390 static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
391 struct jit_ctx *ctx)
393 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
394 emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
395 } else {
396 int lower = (s16)(insn->imm & 0xffff);
397 int upper = insn->imm - lower;
399 emit_instr(ctx, lui, reg, upper >> 16);
400 emit_instr(ctx, addiu, reg, reg, lower);
405 static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
406 int idx)
408 int upper_bound, lower_bound;
409 int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
411 if (dst < 0)
412 return dst;
414 switch (BPF_OP(insn->code)) {
415 case BPF_MOV:
416 case BPF_ADD:
417 upper_bound = S16_MAX;
418 lower_bound = S16_MIN;
419 break;
420 case BPF_SUB:
421 upper_bound = -(int)S16_MIN;
422 lower_bound = -(int)S16_MAX;
423 break;
424 case BPF_AND:
425 case BPF_OR:
426 case BPF_XOR:
427 upper_bound = 0xffff;
428 lower_bound = 0;
429 break;
430 case BPF_RSH:
431 case BPF_LSH:
432 case BPF_ARSH:
433 /* Shift amounts are truncated, no need for bounds */
434 upper_bound = S32_MAX;
435 lower_bound = S32_MIN;
436 break;
437 default:
438 return -EINVAL;
442 * Immediate move clobbers the register, so no sign/zero
443 * extension needed.
445 if (BPF_CLASS(insn->code) == BPF_ALU64 &&
446 BPF_OP(insn->code) != BPF_MOV &&
447 get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
448 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
449 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
450 if (BPF_CLASS(insn->code) == BPF_ALU &&
451 BPF_OP(insn->code) != BPF_LSH &&
452 BPF_OP(insn->code) != BPF_MOV &&
453 get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
454 emit_instr(ctx, sll, dst, dst, 0);
456 if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
457 /* single insn immediate case */
458 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
459 case BPF_ALU64 | BPF_MOV:
460 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
461 break;
462 case BPF_ALU64 | BPF_AND:
463 case BPF_ALU | BPF_AND:
464 emit_instr(ctx, andi, dst, dst, insn->imm);
465 break;
466 case BPF_ALU64 | BPF_OR:
467 case BPF_ALU | BPF_OR:
468 emit_instr(ctx, ori, dst, dst, insn->imm);
469 break;
470 case BPF_ALU64 | BPF_XOR:
471 case BPF_ALU | BPF_XOR:
472 emit_instr(ctx, xori, dst, dst, insn->imm);
473 break;
474 case BPF_ALU64 | BPF_ADD:
475 emit_instr(ctx, daddiu, dst, dst, insn->imm);
476 break;
477 case BPF_ALU64 | BPF_SUB:
478 emit_instr(ctx, daddiu, dst, dst, -insn->imm);
479 break;
480 case BPF_ALU64 | BPF_RSH:
481 emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
482 break;
483 case BPF_ALU | BPF_RSH:
484 emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
485 break;
486 case BPF_ALU64 | BPF_LSH:
487 emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
488 break;
489 case BPF_ALU | BPF_LSH:
490 emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
491 break;
492 case BPF_ALU64 | BPF_ARSH:
493 emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
494 break;
495 case BPF_ALU | BPF_ARSH:
496 emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
497 break;
498 case BPF_ALU | BPF_MOV:
499 emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
500 break;
501 case BPF_ALU | BPF_ADD:
502 emit_instr(ctx, addiu, dst, dst, insn->imm);
503 break;
504 case BPF_ALU | BPF_SUB:
505 emit_instr(ctx, addiu, dst, dst, -insn->imm);
506 break;
507 default:
508 return -EINVAL;
510 } else {
511 /* multi insn immediate case */
512 if (BPF_OP(insn->code) == BPF_MOV) {
513 gen_imm_to_reg(insn, dst, ctx);
514 } else {
515 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
516 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
517 case BPF_ALU64 | BPF_AND:
518 case BPF_ALU | BPF_AND:
519 emit_instr(ctx, and, dst, dst, MIPS_R_AT);
520 break;
521 case BPF_ALU64 | BPF_OR:
522 case BPF_ALU | BPF_OR:
523 emit_instr(ctx, or, dst, dst, MIPS_R_AT);
524 break;
525 case BPF_ALU64 | BPF_XOR:
526 case BPF_ALU | BPF_XOR:
527 emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
528 break;
529 case BPF_ALU64 | BPF_ADD:
530 emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
531 break;
532 case BPF_ALU64 | BPF_SUB:
533 emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
534 break;
535 case BPF_ALU | BPF_ADD:
536 emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
537 break;
538 case BPF_ALU | BPF_SUB:
539 emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
540 break;
541 default:
542 return -EINVAL;
547 return 0;
550 static void * __must_check
551 ool_skb_header_pointer(const struct sk_buff *skb, int offset,
552 int len, void *buffer)
554 return skb_header_pointer(skb, offset, len, buffer);
557 static int size_to_len(const struct bpf_insn *insn)
559 switch (BPF_SIZE(insn->code)) {
560 case BPF_B:
561 return 1;
562 case BPF_H:
563 return 2;
564 case BPF_W:
565 return 4;
566 case BPF_DW:
567 return 8;
569 return 0;
572 static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
574 if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
575 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
576 } else if (value >= 0xffffffff80000000ull ||
577 (value < 0x80000000 && value > 0xffff)) {
578 emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
579 emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
580 } else {
581 int i;
582 bool seen_part = false;
583 int needed_shift = 0;
585 for (i = 0; i < 4; i++) {
586 u64 part = (value >> (16 * (3 - i))) & 0xffff;
588 if (seen_part && needed_shift > 0 && (part || i == 3)) {
589 emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
590 needed_shift = 0;
592 if (part) {
593 if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
594 emit_instr(ctx, lui, dst, (s32)(s16)part);
595 needed_shift = -16;
596 } else {
597 emit_instr(ctx, ori, dst,
598 seen_part ? dst : MIPS_R_ZERO,
599 (unsigned int)part);
601 seen_part = true;
603 if (seen_part)
604 needed_shift += 16;
609 static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
611 int off, b_off;
613 ctx->flags |= EBPF_SEEN_TC;
615 * if (index >= array->map.max_entries)
616 * goto out;
618 off = offsetof(struct bpf_array, map.max_entries);
619 emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
620 emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
621 b_off = b_imm(this_idx + 1, ctx);
622 emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
624 * if (--TCC < 0)
625 * goto out;
627 /* Delay slot */
628 emit_instr(ctx, daddiu, MIPS_R_T5,
629 (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
630 b_off = b_imm(this_idx + 1, ctx);
631 emit_instr(ctx, bltz, MIPS_R_T5, b_off);
633 * prog = array->ptrs[index];
634 * if (prog == NULL)
635 * goto out;
637 /* Delay slot */
638 emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
639 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
640 off = offsetof(struct bpf_array, ptrs);
641 emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
642 b_off = b_imm(this_idx + 1, ctx);
643 emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
644 /* Delay slot */
645 emit_instr(ctx, nop);
647 /* goto *(prog->bpf_func + 4); */
648 off = offsetof(struct bpf_prog, bpf_func);
649 emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
650 /* All systems are go... propagate TCC */
651 emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
652 /* Skip first instruction (TCC initialization) */
653 emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
654 return build_int_epilogue(ctx, MIPS_R_T9);
657 static bool is_bad_offset(int b_off)
659 return b_off > 0x1ffff || b_off < -0x20000;
662 /* Returns the number of insn slots consumed. */
663 static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
664 int this_idx, int exit_idx)
666 int src, dst, r, td, ts, mem_off, b_off;
667 bool need_swap, did_move, cmp_eq;
668 unsigned int target = 0;
669 u64 t64;
670 s64 t64s;
671 int bpf_op = BPF_OP(insn->code);
673 switch (insn->code) {
674 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
675 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
676 case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
677 case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
678 case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
679 case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
680 case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
681 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
682 case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
683 case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
684 case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
685 case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
686 case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
687 case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
688 case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
689 case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
690 case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
691 case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
692 r = gen_imm_insn(insn, ctx, this_idx);
693 if (r < 0)
694 return r;
695 break;
696 case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
697 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
698 if (dst < 0)
699 return dst;
700 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
701 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
702 if (insn->imm == 1) /* Mult by 1 is a nop */
703 break;
704 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
705 emit_instr(ctx, dmultu, MIPS_R_AT, dst);
706 emit_instr(ctx, mflo, dst);
707 break;
708 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
709 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
710 if (dst < 0)
711 return dst;
712 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
713 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
714 emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
715 break;
716 case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
717 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
718 if (dst < 0)
719 return dst;
720 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
721 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
722 /* sign extend */
723 emit_instr(ctx, sll, dst, dst, 0);
725 if (insn->imm == 1) /* Mult by 1 is a nop */
726 break;
727 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
728 emit_instr(ctx, multu, dst, MIPS_R_AT);
729 emit_instr(ctx, mflo, dst);
730 break;
731 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
732 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
733 if (dst < 0)
734 return dst;
735 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
736 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
737 /* sign extend */
738 emit_instr(ctx, sll, dst, dst, 0);
740 emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
741 break;
742 case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
743 case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
744 if (insn->imm == 0)
745 return -EINVAL;
746 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
747 if (dst < 0)
748 return dst;
749 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
750 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
751 /* sign extend */
752 emit_instr(ctx, sll, dst, dst, 0);
753 if (insn->imm == 1) {
754 /* div by 1 is a nop, mod by 1 is zero */
755 if (bpf_op == BPF_MOD)
756 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
757 break;
759 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
760 emit_instr(ctx, divu, dst, MIPS_R_AT);
761 if (bpf_op == BPF_DIV)
762 emit_instr(ctx, mflo, dst);
763 else
764 emit_instr(ctx, mfhi, dst);
765 break;
766 case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
767 case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
768 if (insn->imm == 0)
769 return -EINVAL;
770 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
771 if (dst < 0)
772 return dst;
773 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
774 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
775 if (insn->imm == 1) {
776 /* div by 1 is a nop, mod by 1 is zero */
777 if (bpf_op == BPF_MOD)
778 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
779 break;
781 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
782 emit_instr(ctx, ddivu, dst, MIPS_R_AT);
783 if (bpf_op == BPF_DIV)
784 emit_instr(ctx, mflo, dst);
785 else
786 emit_instr(ctx, mfhi, dst);
787 break;
788 case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
789 case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
790 case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
791 case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
792 case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
793 case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
794 case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
795 case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
796 case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
797 case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
798 case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
799 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
800 src = ebpf_to_mips_reg(ctx, insn, src_reg);
801 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
802 if (src < 0 || dst < 0)
803 return -EINVAL;
804 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
805 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
806 did_move = false;
807 if (insn->src_reg == BPF_REG_10) {
808 if (bpf_op == BPF_MOV) {
809 emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
810 did_move = true;
811 } else {
812 emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
813 src = MIPS_R_AT;
815 } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
816 int tmp_reg = MIPS_R_AT;
818 if (bpf_op == BPF_MOV) {
819 tmp_reg = dst;
820 did_move = true;
822 emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
823 emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
824 src = MIPS_R_AT;
826 switch (bpf_op) {
827 case BPF_MOV:
828 if (!did_move)
829 emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
830 break;
831 case BPF_ADD:
832 emit_instr(ctx, daddu, dst, dst, src);
833 break;
834 case BPF_SUB:
835 emit_instr(ctx, dsubu, dst, dst, src);
836 break;
837 case BPF_XOR:
838 emit_instr(ctx, xor, dst, dst, src);
839 break;
840 case BPF_OR:
841 emit_instr(ctx, or, dst, dst, src);
842 break;
843 case BPF_AND:
844 emit_instr(ctx, and, dst, dst, src);
845 break;
846 case BPF_MUL:
847 emit_instr(ctx, dmultu, dst, src);
848 emit_instr(ctx, mflo, dst);
849 break;
850 case BPF_DIV:
851 case BPF_MOD:
852 emit_instr(ctx, ddivu, dst, src);
853 if (bpf_op == BPF_DIV)
854 emit_instr(ctx, mflo, dst);
855 else
856 emit_instr(ctx, mfhi, dst);
857 break;
858 case BPF_LSH:
859 emit_instr(ctx, dsllv, dst, dst, src);
860 break;
861 case BPF_RSH:
862 emit_instr(ctx, dsrlv, dst, dst, src);
863 break;
864 case BPF_ARSH:
865 emit_instr(ctx, dsrav, dst, dst, src);
866 break;
867 default:
868 pr_err("ALU64_REG NOT HANDLED\n");
869 return -EINVAL;
871 break;
872 case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
873 case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
874 case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
875 case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
876 case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
877 case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
878 case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
879 case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
880 case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
881 case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
882 case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
883 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
884 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
885 if (src < 0 || dst < 0)
886 return -EINVAL;
887 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
888 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
889 /* sign extend */
890 emit_instr(ctx, sll, dst, dst, 0);
892 did_move = false;
893 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
894 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
895 int tmp_reg = MIPS_R_AT;
897 if (bpf_op == BPF_MOV) {
898 tmp_reg = dst;
899 did_move = true;
901 /* sign extend */
902 emit_instr(ctx, sll, tmp_reg, src, 0);
903 src = MIPS_R_AT;
905 switch (bpf_op) {
906 case BPF_MOV:
907 if (!did_move)
908 emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
909 break;
910 case BPF_ADD:
911 emit_instr(ctx, addu, dst, dst, src);
912 break;
913 case BPF_SUB:
914 emit_instr(ctx, subu, dst, dst, src);
915 break;
916 case BPF_XOR:
917 emit_instr(ctx, xor, dst, dst, src);
918 break;
919 case BPF_OR:
920 emit_instr(ctx, or, dst, dst, src);
921 break;
922 case BPF_AND:
923 emit_instr(ctx, and, dst, dst, src);
924 break;
925 case BPF_MUL:
926 emit_instr(ctx, mul, dst, dst, src);
927 break;
928 case BPF_DIV:
929 case BPF_MOD:
930 emit_instr(ctx, divu, dst, src);
931 if (bpf_op == BPF_DIV)
932 emit_instr(ctx, mflo, dst);
933 else
934 emit_instr(ctx, mfhi, dst);
935 break;
936 case BPF_LSH:
937 emit_instr(ctx, sllv, dst, dst, src);
938 break;
939 case BPF_RSH:
940 emit_instr(ctx, srlv, dst, dst, src);
941 break;
942 default:
943 pr_err("ALU_REG NOT HANDLED\n");
944 return -EINVAL;
946 break;
947 case BPF_JMP | BPF_EXIT:
948 if (this_idx + 1 < exit_idx) {
949 b_off = b_imm(exit_idx, ctx);
950 if (is_bad_offset(b_off))
951 return -E2BIG;
952 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
953 emit_instr(ctx, nop);
955 break;
956 case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
957 case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
958 cmp_eq = (bpf_op == BPF_JEQ);
959 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
960 if (dst < 0)
961 return dst;
962 if (insn->imm == 0) {
963 src = MIPS_R_ZERO;
964 } else {
965 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
966 src = MIPS_R_AT;
968 goto jeq_common;
969 case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
970 case BPF_JMP | BPF_JNE | BPF_X:
971 case BPF_JMP | BPF_JSLT | BPF_X:
972 case BPF_JMP | BPF_JSLE | BPF_X:
973 case BPF_JMP | BPF_JSGT | BPF_X:
974 case BPF_JMP | BPF_JSGE | BPF_X:
975 case BPF_JMP | BPF_JLT | BPF_X:
976 case BPF_JMP | BPF_JLE | BPF_X:
977 case BPF_JMP | BPF_JGT | BPF_X:
978 case BPF_JMP | BPF_JGE | BPF_X:
979 case BPF_JMP | BPF_JSET | BPF_X:
980 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
981 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
982 if (src < 0 || dst < 0)
983 return -EINVAL;
984 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
985 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
986 if (td == REG_32BIT && ts != REG_32BIT) {
987 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
988 src = MIPS_R_AT;
989 } else if (ts == REG_32BIT && td != REG_32BIT) {
990 emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
991 dst = MIPS_R_AT;
993 if (bpf_op == BPF_JSET) {
994 emit_instr(ctx, and, MIPS_R_AT, dst, src);
995 cmp_eq = false;
996 dst = MIPS_R_AT;
997 src = MIPS_R_ZERO;
998 } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
999 emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
1000 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1001 b_off = b_imm(exit_idx, ctx);
1002 if (is_bad_offset(b_off))
1003 return -E2BIG;
1004 if (bpf_op == BPF_JSGT)
1005 emit_instr(ctx, blez, MIPS_R_AT, b_off);
1006 else
1007 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
1008 emit_instr(ctx, nop);
1009 return 2; /* We consumed the exit. */
1011 b_off = b_imm(this_idx + insn->off + 1, ctx);
1012 if (is_bad_offset(b_off))
1013 return -E2BIG;
1014 if (bpf_op == BPF_JSGT)
1015 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
1016 else
1017 emit_instr(ctx, blez, MIPS_R_AT, b_off);
1018 emit_instr(ctx, nop);
1019 break;
1020 } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
1021 emit_instr(ctx, slt, MIPS_R_AT, dst, src);
1022 cmp_eq = bpf_op == BPF_JSGE;
1023 dst = MIPS_R_AT;
1024 src = MIPS_R_ZERO;
1025 } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
1026 /* dst or src could be AT */
1027 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1028 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1029 /* SP known to be non-zero, movz becomes boolean not */
1030 emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
1031 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
1032 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1033 cmp_eq = bpf_op == BPF_JGT;
1034 dst = MIPS_R_AT;
1035 src = MIPS_R_ZERO;
1036 } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
1037 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1038 cmp_eq = bpf_op == BPF_JGE;
1039 dst = MIPS_R_AT;
1040 src = MIPS_R_ZERO;
1041 } else { /* JNE/JEQ case */
1042 cmp_eq = (bpf_op == BPF_JEQ);
1044 jeq_common:
1046 * If the next insn is EXIT and we are jumping arround
1047 * only it, invert the sense of the compare and
1048 * conditionally jump to the exit. Poor man's branch
1049 * chaining.
1051 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1052 b_off = b_imm(exit_idx, ctx);
1053 if (is_bad_offset(b_off)) {
1054 target = j_target(ctx, exit_idx);
1055 if (target == (unsigned int)-1)
1056 return -E2BIG;
1057 cmp_eq = !cmp_eq;
1058 b_off = 4 * 3;
1059 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1060 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1061 ctx->long_b_conversion = 1;
1065 if (cmp_eq)
1066 emit_instr(ctx, bne, dst, src, b_off);
1067 else
1068 emit_instr(ctx, beq, dst, src, b_off);
1069 emit_instr(ctx, nop);
1070 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1071 emit_instr(ctx, j, target);
1072 emit_instr(ctx, nop);
1074 return 2; /* We consumed the exit. */
1076 b_off = b_imm(this_idx + insn->off + 1, ctx);
1077 if (is_bad_offset(b_off)) {
1078 target = j_target(ctx, this_idx + insn->off + 1);
1079 if (target == (unsigned int)-1)
1080 return -E2BIG;
1081 cmp_eq = !cmp_eq;
1082 b_off = 4 * 3;
1083 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1084 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1085 ctx->long_b_conversion = 1;
1089 if (cmp_eq)
1090 emit_instr(ctx, beq, dst, src, b_off);
1091 else
1092 emit_instr(ctx, bne, dst, src, b_off);
1093 emit_instr(ctx, nop);
1094 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1095 emit_instr(ctx, j, target);
1096 emit_instr(ctx, nop);
1098 break;
1099 case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
1100 case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
1101 case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
1102 case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
1103 cmp_eq = (bpf_op == BPF_JSGE);
1104 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1105 if (dst < 0)
1106 return dst;
1108 if (insn->imm == 0) {
1109 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1110 b_off = b_imm(exit_idx, ctx);
1111 if (is_bad_offset(b_off))
1112 return -E2BIG;
1113 switch (bpf_op) {
1114 case BPF_JSGT:
1115 emit_instr(ctx, blez, dst, b_off);
1116 break;
1117 case BPF_JSGE:
1118 emit_instr(ctx, bltz, dst, b_off);
1119 break;
1120 case BPF_JSLT:
1121 emit_instr(ctx, bgez, dst, b_off);
1122 break;
1123 case BPF_JSLE:
1124 emit_instr(ctx, bgtz, dst, b_off);
1125 break;
1127 emit_instr(ctx, nop);
1128 return 2; /* We consumed the exit. */
1130 b_off = b_imm(this_idx + insn->off + 1, ctx);
1131 if (is_bad_offset(b_off))
1132 return -E2BIG;
1133 switch (bpf_op) {
1134 case BPF_JSGT:
1135 emit_instr(ctx, bgtz, dst, b_off);
1136 break;
1137 case BPF_JSGE:
1138 emit_instr(ctx, bgez, dst, b_off);
1139 break;
1140 case BPF_JSLT:
1141 emit_instr(ctx, bltz, dst, b_off);
1142 break;
1143 case BPF_JSLE:
1144 emit_instr(ctx, blez, dst, b_off);
1145 break;
1147 emit_instr(ctx, nop);
1148 break;
1151 * only "LT" compare available, so we must use imm + 1
1152 * to generate "GT" and imm -1 to generate LE
1154 if (bpf_op == BPF_JSGT)
1155 t64s = insn->imm + 1;
1156 else if (bpf_op == BPF_JSLE)
1157 t64s = insn->imm + 1;
1158 else
1159 t64s = insn->imm;
1161 cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
1162 if (t64s >= S16_MIN && t64s <= S16_MAX) {
1163 emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
1164 src = MIPS_R_AT;
1165 dst = MIPS_R_ZERO;
1166 goto jeq_common;
1168 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1169 emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
1170 src = MIPS_R_AT;
1171 dst = MIPS_R_ZERO;
1172 goto jeq_common;
1174 case BPF_JMP | BPF_JGT | BPF_K:
1175 case BPF_JMP | BPF_JGE | BPF_K:
1176 case BPF_JMP | BPF_JLT | BPF_K:
1177 case BPF_JMP | BPF_JLE | BPF_K:
1178 cmp_eq = (bpf_op == BPF_JGE);
1179 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1180 if (dst < 0)
1181 return dst;
1183 * only "LT" compare available, so we must use imm + 1
1184 * to generate "GT" and imm -1 to generate LE
1186 if (bpf_op == BPF_JGT)
1187 t64s = (u64)(u32)(insn->imm) + 1;
1188 else if (bpf_op == BPF_JLE)
1189 t64s = (u64)(u32)(insn->imm) + 1;
1190 else
1191 t64s = (u64)(u32)(insn->imm);
1193 cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
1195 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1196 emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
1197 src = MIPS_R_AT;
1198 dst = MIPS_R_ZERO;
1199 goto jeq_common;
1201 case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
1202 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1203 if (dst < 0)
1204 return dst;
1206 if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
1207 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1208 b_off = b_imm(exit_idx, ctx);
1209 if (is_bad_offset(b_off))
1210 return -E2BIG;
1211 emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
1212 emit_instr(ctx, nop);
1213 return 2; /* We consumed the exit. */
1215 b_off = b_imm(this_idx + insn->off + 1, ctx);
1216 if (is_bad_offset(b_off))
1217 return -E2BIG;
1218 emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
1219 emit_instr(ctx, nop);
1220 break;
1222 t64 = (u32)insn->imm;
1223 emit_const_to_reg(ctx, MIPS_R_AT, t64);
1224 emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
1225 src = MIPS_R_AT;
1226 dst = MIPS_R_ZERO;
1227 cmp_eq = false;
1228 goto jeq_common;
1230 case BPF_JMP | BPF_JA:
1232 * Prefer relative branch for easier debugging, but
1233 * fall back if needed.
1235 b_off = b_imm(this_idx + insn->off + 1, ctx);
1236 if (is_bad_offset(b_off)) {
1237 target = j_target(ctx, this_idx + insn->off + 1);
1238 if (target == (unsigned int)-1)
1239 return -E2BIG;
1240 emit_instr(ctx, j, target);
1241 } else {
1242 emit_instr(ctx, b, b_off);
1244 emit_instr(ctx, nop);
1245 break;
1246 case BPF_LD | BPF_DW | BPF_IMM:
1247 if (insn->src_reg != 0)
1248 return -EINVAL;
1249 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1250 if (dst < 0)
1251 return dst;
1252 t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
1253 emit_const_to_reg(ctx, dst, t64);
1254 return 2; /* Double slot insn */
1256 case BPF_JMP | BPF_CALL:
1257 ctx->flags |= EBPF_SAVE_RA;
1258 t64s = (s64)insn->imm + (s64)__bpf_call_base;
1259 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1260 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1261 /* delay slot */
1262 emit_instr(ctx, nop);
1263 break;
1265 case BPF_JMP | BPF_TAIL_CALL:
1266 if (emit_bpf_tail_call(ctx, this_idx))
1267 return -EINVAL;
1268 break;
1270 case BPF_LD | BPF_B | BPF_ABS:
1271 case BPF_LD | BPF_H | BPF_ABS:
1272 case BPF_LD | BPF_W | BPF_ABS:
1273 case BPF_LD | BPF_DW | BPF_ABS:
1274 ctx->flags |= EBPF_SAVE_RA;
1276 gen_imm_to_reg(insn, MIPS_R_A1, ctx);
1277 emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
1279 if (insn->imm < 0) {
1280 emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
1281 } else {
1282 emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
1283 emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
1285 goto ld_skb_common;
1287 case BPF_LD | BPF_B | BPF_IND:
1288 case BPF_LD | BPF_H | BPF_IND:
1289 case BPF_LD | BPF_W | BPF_IND:
1290 case BPF_LD | BPF_DW | BPF_IND:
1291 ctx->flags |= EBPF_SAVE_RA;
1292 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1293 if (src < 0)
1294 return src;
1295 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
1296 if (ts == REG_32BIT_ZERO_EX) {
1297 /* sign extend */
1298 emit_instr(ctx, sll, MIPS_R_A1, src, 0);
1299 src = MIPS_R_A1;
1301 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
1302 emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
1303 } else {
1304 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1305 emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
1307 /* truncate to 32-bit int */
1308 emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
1309 emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
1310 emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
1312 emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
1313 emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
1314 emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
1315 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
1317 ld_skb_common:
1318 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1319 /* delay slot move */
1320 emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
1322 /* Check the error value */
1323 b_off = b_imm(exit_idx, ctx);
1324 if (is_bad_offset(b_off)) {
1325 target = j_target(ctx, exit_idx);
1326 if (target == (unsigned int)-1)
1327 return -E2BIG;
1329 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1330 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1331 ctx->long_b_conversion = 1;
1333 emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
1334 emit_instr(ctx, nop);
1335 emit_instr(ctx, j, target);
1336 emit_instr(ctx, nop);
1337 } else {
1338 emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
1339 emit_instr(ctx, nop);
1342 #ifdef __BIG_ENDIAN
1343 need_swap = false;
1344 #else
1345 need_swap = true;
1346 #endif
1347 dst = MIPS_R_V0;
1348 switch (BPF_SIZE(insn->code)) {
1349 case BPF_B:
1350 emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
1351 break;
1352 case BPF_H:
1353 emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
1354 if (need_swap)
1355 emit_instr(ctx, wsbh, dst, dst);
1356 break;
1357 case BPF_W:
1358 emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
1359 if (need_swap) {
1360 emit_instr(ctx, wsbh, dst, dst);
1361 emit_instr(ctx, rotr, dst, dst, 16);
1363 break;
1364 case BPF_DW:
1365 emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
1366 if (need_swap) {
1367 emit_instr(ctx, dsbh, dst, dst);
1368 emit_instr(ctx, dshd, dst, dst);
1370 break;
1373 break;
1374 case BPF_ALU | BPF_END | BPF_FROM_BE:
1375 case BPF_ALU | BPF_END | BPF_FROM_LE:
1376 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1377 if (dst < 0)
1378 return dst;
1379 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1380 if (insn->imm == 64 && td == REG_32BIT)
1381 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1383 if (insn->imm != 64 &&
1384 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1385 /* sign extend */
1386 emit_instr(ctx, sll, dst, dst, 0);
1389 #ifdef __BIG_ENDIAN
1390 need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
1391 #else
1392 need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
1393 #endif
1394 if (insn->imm == 16) {
1395 if (need_swap)
1396 emit_instr(ctx, wsbh, dst, dst);
1397 emit_instr(ctx, andi, dst, dst, 0xffff);
1398 } else if (insn->imm == 32) {
1399 if (need_swap) {
1400 emit_instr(ctx, wsbh, dst, dst);
1401 emit_instr(ctx, rotr, dst, dst, 16);
1403 } else { /* 64-bit*/
1404 if (need_swap) {
1405 emit_instr(ctx, dsbh, dst, dst);
1406 emit_instr(ctx, dshd, dst, dst);
1409 break;
1411 case BPF_ST | BPF_B | BPF_MEM:
1412 case BPF_ST | BPF_H | BPF_MEM:
1413 case BPF_ST | BPF_W | BPF_MEM:
1414 case BPF_ST | BPF_DW | BPF_MEM:
1415 if (insn->dst_reg == BPF_REG_10) {
1416 ctx->flags |= EBPF_SEEN_FP;
1417 dst = MIPS_R_SP;
1418 mem_off = insn->off + MAX_BPF_STACK;
1419 } else {
1420 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1421 if (dst < 0)
1422 return dst;
1423 mem_off = insn->off;
1425 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1426 switch (BPF_SIZE(insn->code)) {
1427 case BPF_B:
1428 emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
1429 break;
1430 case BPF_H:
1431 emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
1432 break;
1433 case BPF_W:
1434 emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
1435 break;
1436 case BPF_DW:
1437 emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
1438 break;
1440 break;
1442 case BPF_LDX | BPF_B | BPF_MEM:
1443 case BPF_LDX | BPF_H | BPF_MEM:
1444 case BPF_LDX | BPF_W | BPF_MEM:
1445 case BPF_LDX | BPF_DW | BPF_MEM:
1446 if (insn->src_reg == BPF_REG_10) {
1447 ctx->flags |= EBPF_SEEN_FP;
1448 src = MIPS_R_SP;
1449 mem_off = insn->off + MAX_BPF_STACK;
1450 } else {
1451 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1452 if (src < 0)
1453 return src;
1454 mem_off = insn->off;
1456 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1457 if (dst < 0)
1458 return dst;
1459 switch (BPF_SIZE(insn->code)) {
1460 case BPF_B:
1461 emit_instr(ctx, lbu, dst, mem_off, src);
1462 break;
1463 case BPF_H:
1464 emit_instr(ctx, lhu, dst, mem_off, src);
1465 break;
1466 case BPF_W:
1467 emit_instr(ctx, lw, dst, mem_off, src);
1468 break;
1469 case BPF_DW:
1470 emit_instr(ctx, ld, dst, mem_off, src);
1471 break;
1473 break;
1475 case BPF_STX | BPF_B | BPF_MEM:
1476 case BPF_STX | BPF_H | BPF_MEM:
1477 case BPF_STX | BPF_W | BPF_MEM:
1478 case BPF_STX | BPF_DW | BPF_MEM:
1479 case BPF_STX | BPF_W | BPF_XADD:
1480 case BPF_STX | BPF_DW | BPF_XADD:
1481 if (insn->dst_reg == BPF_REG_10) {
1482 ctx->flags |= EBPF_SEEN_FP;
1483 dst = MIPS_R_SP;
1484 mem_off = insn->off + MAX_BPF_STACK;
1485 } else {
1486 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1487 if (dst < 0)
1488 return dst;
1489 mem_off = insn->off;
1491 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1492 if (src < 0)
1493 return src;
1494 if (BPF_MODE(insn->code) == BPF_XADD) {
1495 switch (BPF_SIZE(insn->code)) {
1496 case BPF_W:
1497 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1498 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1499 src = MIPS_R_AT;
1501 emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
1502 emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
1503 emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
1505 * On failure back up to LL (-4
1506 * instructions of 4 bytes each
1508 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1509 emit_instr(ctx, nop);
1510 break;
1511 case BPF_DW:
1512 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1513 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1514 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1515 src = MIPS_R_AT;
1517 emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
1518 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
1519 emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
1520 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1521 emit_instr(ctx, nop);
1522 break;
1524 } else { /* BPF_MEM */
1525 switch (BPF_SIZE(insn->code)) {
1526 case BPF_B:
1527 emit_instr(ctx, sb, src, mem_off, dst);
1528 break;
1529 case BPF_H:
1530 emit_instr(ctx, sh, src, mem_off, dst);
1531 break;
1532 case BPF_W:
1533 emit_instr(ctx, sw, src, mem_off, dst);
1534 break;
1535 case BPF_DW:
1536 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1537 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1538 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1539 src = MIPS_R_AT;
1541 emit_instr(ctx, sd, src, mem_off, dst);
1542 break;
1545 break;
1547 default:
1548 pr_err("NOT HANDLED %d - (%02x)\n",
1549 this_idx, (unsigned int)insn->code);
1550 return -EINVAL;
1552 return 1;
1555 #define RVT_VISITED_MASK 0xc000000000000000ull
1556 #define RVT_FALL_THROUGH 0x4000000000000000ull
1557 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1558 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1560 static int build_int_body(struct jit_ctx *ctx)
1562 const struct bpf_prog *prog = ctx->skf;
1563 const struct bpf_insn *insn;
1564 int i, r;
1566 for (i = 0; i < prog->len; ) {
1567 insn = prog->insnsi + i;
1568 if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
1569 /* dead instruction, don't emit it. */
1570 i++;
1571 continue;
1574 if (ctx->target == NULL)
1575 ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
1577 r = build_one_insn(insn, ctx, i, prog->len);
1578 if (r < 0)
1579 return r;
1580 i += r;
1582 /* epilogue offset */
1583 if (ctx->target == NULL)
1584 ctx->offsets[i] = ctx->idx * 4;
1587 * All exits have an offset of the epilogue, some offsets may
1588 * not have been set due to banch-around threading, so set
1589 * them now.
1591 if (ctx->target == NULL)
1592 for (i = 0; i < prog->len; i++) {
1593 insn = prog->insnsi + i;
1594 if (insn->code == (BPF_JMP | BPF_EXIT))
1595 ctx->offsets[i] = ctx->idx * 4;
1597 return 0;
1600 /* return the last idx processed, or negative for error */
1601 static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1602 int start_idx, bool follow_taken)
1604 const struct bpf_prog *prog = ctx->skf;
1605 const struct bpf_insn *insn;
1606 u64 exit_rvt = initial_rvt;
1607 u64 *rvt = ctx->reg_val_types;
1608 int idx;
1609 int reg;
1611 for (idx = start_idx; idx < prog->len; idx++) {
1612 rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
1613 insn = prog->insnsi + idx;
1614 switch (BPF_CLASS(insn->code)) {
1615 case BPF_ALU:
1616 switch (BPF_OP(insn->code)) {
1617 case BPF_ADD:
1618 case BPF_SUB:
1619 case BPF_MUL:
1620 case BPF_DIV:
1621 case BPF_OR:
1622 case BPF_AND:
1623 case BPF_LSH:
1624 case BPF_RSH:
1625 case BPF_NEG:
1626 case BPF_MOD:
1627 case BPF_XOR:
1628 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1629 break;
1630 case BPF_MOV:
1631 if (BPF_SRC(insn->code)) {
1632 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1633 } else {
1634 /* IMM to REG move*/
1635 if (insn->imm >= 0)
1636 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1637 else
1638 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1640 break;
1641 case BPF_END:
1642 if (insn->imm == 64)
1643 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1644 else if (insn->imm == 32)
1645 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1646 else /* insn->imm == 16 */
1647 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1648 break;
1650 rvt[idx] |= RVT_DONE;
1651 break;
1652 case BPF_ALU64:
1653 switch (BPF_OP(insn->code)) {
1654 case BPF_MOV:
1655 if (BPF_SRC(insn->code)) {
1656 /* REG to REG move*/
1657 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1658 } else {
1659 /* IMM to REG move*/
1660 if (insn->imm >= 0)
1661 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1662 else
1663 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1665 break;
1666 default:
1667 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1669 rvt[idx] |= RVT_DONE;
1670 break;
1671 case BPF_LD:
1672 switch (BPF_SIZE(insn->code)) {
1673 case BPF_DW:
1674 if (BPF_MODE(insn->code) == BPF_IMM) {
1675 s64 val;
1677 val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
1678 if (val > 0 && val <= S32_MAX)
1679 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1680 else if (val >= S32_MIN && val <= S32_MAX)
1681 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1682 else
1683 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1684 rvt[idx] |= RVT_DONE;
1685 idx++;
1686 } else {
1687 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1689 break;
1690 case BPF_B:
1691 case BPF_H:
1692 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1693 break;
1694 case BPF_W:
1695 if (BPF_MODE(insn->code) == BPF_IMM)
1696 set_reg_val_type(&exit_rvt, insn->dst_reg,
1697 insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
1698 else
1699 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1700 break;
1702 rvt[idx] |= RVT_DONE;
1703 break;
1704 case BPF_LDX:
1705 switch (BPF_SIZE(insn->code)) {
1706 case BPF_DW:
1707 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1708 break;
1709 case BPF_B:
1710 case BPF_H:
1711 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1712 break;
1713 case BPF_W:
1714 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1715 break;
1717 rvt[idx] |= RVT_DONE;
1718 break;
1719 case BPF_JMP:
1720 switch (BPF_OP(insn->code)) {
1721 case BPF_EXIT:
1722 rvt[idx] = RVT_DONE | exit_rvt;
1723 rvt[prog->len] = exit_rvt;
1724 return idx;
1725 case BPF_JA:
1726 rvt[idx] |= RVT_DONE;
1727 idx += insn->off;
1728 break;
1729 case BPF_JEQ:
1730 case BPF_JGT:
1731 case BPF_JGE:
1732 case BPF_JLT:
1733 case BPF_JLE:
1734 case BPF_JSET:
1735 case BPF_JNE:
1736 case BPF_JSGT:
1737 case BPF_JSGE:
1738 case BPF_JSLT:
1739 case BPF_JSLE:
1740 if (follow_taken) {
1741 rvt[idx] |= RVT_BRANCH_TAKEN;
1742 idx += insn->off;
1743 follow_taken = false;
1744 } else {
1745 rvt[idx] |= RVT_FALL_THROUGH;
1747 break;
1748 case BPF_CALL:
1749 set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
1750 /* Upon call return, argument registers are clobbered. */
1751 for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
1752 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1754 rvt[idx] |= RVT_DONE;
1755 break;
1756 default:
1757 WARN(1, "Unhandled BPF_JMP case.\n");
1758 rvt[idx] |= RVT_DONE;
1759 break;
1761 break;
1762 default:
1763 rvt[idx] |= RVT_DONE;
1764 break;
1767 return idx;
1771 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1772 * each eBPF insn. This allows unneeded sign and zero extension
1773 * operations to be omitted.
1775 * Doesn't handle yet confluence of control paths with conflicting
1776 * ranges, but it is good enough for most sane code.
1778 static int reg_val_propagate(struct jit_ctx *ctx)
1780 const struct bpf_prog *prog = ctx->skf;
1781 u64 exit_rvt;
1782 int reg;
1783 int i;
1786 * 11 registers * 3 bits/reg leaves top bits free for other
1787 * uses. Bit-62..63 used to see if we have visited an insn.
1789 exit_rvt = 0;
1791 /* Upon entry, argument registers are 64-bit. */
1792 for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
1793 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1796 * First follow all conditional branches on the fall-through
1797 * edge of control flow..
1799 reg_val_propagate_range(ctx, exit_rvt, 0, false);
1800 restart_search:
1802 * Then repeatedly find the first conditional branch where
1803 * both edges of control flow have not been taken, and follow
1804 * the branch taken edge. We will end up restarting the
1805 * search once per conditional branch insn.
1807 for (i = 0; i < prog->len; i++) {
1808 u64 rvt = ctx->reg_val_types[i];
1810 if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
1811 (rvt & RVT_VISITED_MASK) == 0)
1812 continue;
1813 if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
1814 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
1815 } else { /* RVT_BRANCH_TAKEN */
1816 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1817 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
1819 goto restart_search;
1822 * Eventually all conditional branches have been followed on
1823 * both branches and we are done. Any insn that has not been
1824 * visited at this point is dead.
1827 return 0;
1830 static void jit_fill_hole(void *area, unsigned int size)
1832 u32 *p;
1834 /* We are guaranteed to have aligned memory. */
1835 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
1836 uasm_i_break(&p, BRK_BUG); /* Increments p */
1839 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1841 struct bpf_prog *orig_prog = prog;
1842 bool tmp_blinded = false;
1843 struct bpf_prog *tmp;
1844 struct bpf_binary_header *header = NULL;
1845 struct jit_ctx ctx;
1846 unsigned int image_size;
1847 u8 *image_ptr;
1849 if (!prog->jit_requested || !cpu_has_mips64r2)
1850 return prog;
1852 tmp = bpf_jit_blind_constants(prog);
1853 /* If blinding was requested and we failed during blinding,
1854 * we must fall back to the interpreter.
1856 if (IS_ERR(tmp))
1857 return orig_prog;
1858 if (tmp != prog) {
1859 tmp_blinded = true;
1860 prog = tmp;
1863 memset(&ctx, 0, sizeof(ctx));
1865 preempt_disable();
1866 switch (current_cpu_type()) {
1867 case CPU_CAVIUM_OCTEON:
1868 case CPU_CAVIUM_OCTEON_PLUS:
1869 case CPU_CAVIUM_OCTEON2:
1870 case CPU_CAVIUM_OCTEON3:
1871 ctx.use_bbit_insns = 1;
1872 break;
1873 default:
1874 ctx.use_bbit_insns = 0;
1876 preempt_enable();
1878 ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1879 if (ctx.offsets == NULL)
1880 goto out_err;
1882 ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
1883 if (ctx.reg_val_types == NULL)
1884 goto out_err;
1886 ctx.skf = prog;
1888 if (reg_val_propagate(&ctx))
1889 goto out_err;
1892 * First pass discovers used resources and instruction offsets
1893 * assuming short branches are used.
1895 if (build_int_body(&ctx))
1896 goto out_err;
1899 * If no calls are made (EBPF_SAVE_RA), then tail call count
1900 * in $v1, else we must save in n$s4.
1902 if (ctx.flags & EBPF_SEEN_TC) {
1903 if (ctx.flags & EBPF_SAVE_RA)
1904 ctx.flags |= EBPF_SAVE_S4;
1905 else
1906 ctx.flags |= EBPF_TCC_IN_V1;
1910 * Second pass generates offsets, if any branches are out of
1911 * range a jump-around long sequence is generated, and we have
1912 * to try again from the beginning to generate the new
1913 * offsets. This is done until no additional conversions are
1914 * necessary.
1916 do {
1917 ctx.idx = 0;
1918 ctx.gen_b_offsets = 1;
1919 ctx.long_b_conversion = 0;
1920 if (gen_int_prologue(&ctx))
1921 goto out_err;
1922 if (build_int_body(&ctx))
1923 goto out_err;
1924 if (build_int_epilogue(&ctx, MIPS_R_RA))
1925 goto out_err;
1926 } while (ctx.long_b_conversion);
1928 image_size = 4 * ctx.idx;
1930 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1931 sizeof(u32), jit_fill_hole);
1932 if (header == NULL)
1933 goto out_err;
1935 ctx.target = (u32 *)image_ptr;
1937 /* Third pass generates the code */
1938 ctx.idx = 0;
1939 if (gen_int_prologue(&ctx))
1940 goto out_err;
1941 if (build_int_body(&ctx))
1942 goto out_err;
1943 if (build_int_epilogue(&ctx, MIPS_R_RA))
1944 goto out_err;
1946 /* Update the icache */
1947 flush_icache_range((unsigned long)ctx.target,
1948 (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
1950 if (bpf_jit_enable > 1)
1951 /* Dump JIT code */
1952 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1954 bpf_jit_binary_lock_ro(header);
1955 prog->bpf_func = (void *)ctx.target;
1956 prog->jited = 1;
1957 prog->jited_len = image_size;
1958 out_normal:
1959 if (tmp_blinded)
1960 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1961 tmp : orig_prog);
1962 kfree(ctx.offsets);
1963 kfree(ctx.reg_val_types);
1965 return prog;
1967 out_err:
1968 prog = orig_prog;
1969 if (header)
1970 bpf_jit_binary_free(header);
1971 goto out_normal;