MIPS: eBPF: Fix icache flush end address
[linux/fpc-iii.git] / arch / mips / net / ebpf_jit.c
blob9bda82ed75eb77ab704c5c22627ab64c5677b2b8
1 /*
2 * Just-In-Time compiler for eBPF filters on MIPS
4 * Copyright (c) 2017 Cavium, Inc.
6 * Based on code from:
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
16 #include <linux/bitops.h>
17 #include <linux/errno.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/slab.h>
21 #include <asm/bitops.h>
22 #include <asm/byteorder.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cpu-features.h>
25 #include <asm/uasm.h>
27 /* Registers used by JIT */
28 #define MIPS_R_ZERO 0
29 #define MIPS_R_AT 1
30 #define MIPS_R_V0 2 /* BPF_R0 */
31 #define MIPS_R_V1 3
32 #define MIPS_R_A0 4 /* BPF_R1 */
33 #define MIPS_R_A1 5 /* BPF_R2 */
34 #define MIPS_R_A2 6 /* BPF_R3 */
35 #define MIPS_R_A3 7 /* BPF_R4 */
36 #define MIPS_R_A4 8 /* BPF_R5 */
37 #define MIPS_R_T4 12 /* BPF_AX */
38 #define MIPS_R_T5 13
39 #define MIPS_R_T6 14
40 #define MIPS_R_T7 15
41 #define MIPS_R_S0 16 /* BPF_R6 */
42 #define MIPS_R_S1 17 /* BPF_R7 */
43 #define MIPS_R_S2 18 /* BPF_R8 */
44 #define MIPS_R_S3 19 /* BPF_R9 */
45 #define MIPS_R_S4 20 /* BPF_TCC */
46 #define MIPS_R_S5 21
47 #define MIPS_R_S6 22
48 #define MIPS_R_S7 23
49 #define MIPS_R_T8 24
50 #define MIPS_R_T9 25
51 #define MIPS_R_SP 29
52 #define MIPS_R_RA 31
54 /* eBPF flags */
55 #define EBPF_SAVE_S0 BIT(0)
56 #define EBPF_SAVE_S1 BIT(1)
57 #define EBPF_SAVE_S2 BIT(2)
58 #define EBPF_SAVE_S3 BIT(3)
59 #define EBPF_SAVE_S4 BIT(4)
60 #define EBPF_SAVE_RA BIT(5)
61 #define EBPF_SEEN_FP BIT(6)
62 #define EBPF_SEEN_TC BIT(7)
63 #define EBPF_TCC_IN_V1 BIT(8)
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
71 * semantics require.
73 enum reg_val_type {
74 /* uninitialized */
75 REG_UNKNOWN,
76 /* not known to be 32-bit compatible. */
77 REG_64BIT,
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS
89 * high bit of offsets indicates if long branch conversion done at
90 * this insn.
92 #define OFFSETS_B_CONV BIT(31)
94 /**
95 * struct jit_ctx - JIT context
96 * @skf: The sk_filter
97 * @stack_size: eBPF stack size
98 * @idx: Instruction index
99 * @flags: JIT flags
100 * @offsets: Instruction offsets
101 * @target: Memory location for the compiled filter
102 * @reg_val_types Packed enum reg_val_type for each register.
104 struct jit_ctx {
105 const struct bpf_prog *skf;
106 int stack_size;
107 u32 idx;
108 u32 flags;
109 u32 *offsets;
110 u32 *target;
111 u64 *reg_val_types;
112 unsigned int long_b_conversion:1;
113 unsigned int gen_b_offsets:1;
114 unsigned int use_bbit_insns:1;
117 static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
119 *rvt &= ~(7ull << (reg * 3));
120 *rvt |= ((u64)type << (reg * 3));
123 static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
124 int index, int reg)
126 return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
129 /* Simply emit the instruction if the JIT memory space has been allocated */
130 #define emit_instr(ctx, func, ...) \
131 do { \
132 if ((ctx)->target != NULL) { \
133 u32 *p = &(ctx)->target[ctx->idx]; \
134 uasm_i_##func(&p, ##__VA_ARGS__); \
136 (ctx)->idx++; \
137 } while (0)
139 static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
141 unsigned long target_va, base_va;
142 unsigned int r;
144 if (!ctx->target)
145 return 0;
147 base_va = (unsigned long)ctx->target;
148 target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
150 if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
151 return (unsigned int)-1;
152 r = target_va & 0x0ffffffful;
153 return r;
156 /* Compute the immediate value for PC-relative branches. */
157 static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
159 if (!ctx->gen_b_offsets)
160 return 0;
163 * We want a pc-relative branch. tgt is the instruction offset
164 * we want to jump to.
166 * Branch on MIPS:
167 * I: target_offset <- sign_extend(offset)
168 * I+1: PC += target_offset (delay slot)
170 * ctx->idx currently points to the branch instruction
171 * but the offset is added to the delay slot so we need
172 * to subtract 4.
174 return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
175 (ctx->idx * 4) - 4;
178 enum which_ebpf_reg {
179 src_reg,
180 src_reg_no_fp,
181 dst_reg,
182 dst_reg_fp_ok
186 * For eBPF, the register mapping naturally falls out of the
187 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
188 * separate frame pointer, so BPF_REG_10 relative accesses are
189 * adjusted to be $sp relative.
191 int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
192 enum which_ebpf_reg w)
194 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
195 insn->src_reg : insn->dst_reg;
197 switch (ebpf_reg) {
198 case BPF_REG_0:
199 return MIPS_R_V0;
200 case BPF_REG_1:
201 return MIPS_R_A0;
202 case BPF_REG_2:
203 return MIPS_R_A1;
204 case BPF_REG_3:
205 return MIPS_R_A2;
206 case BPF_REG_4:
207 return MIPS_R_A3;
208 case BPF_REG_5:
209 return MIPS_R_A4;
210 case BPF_REG_6:
211 ctx->flags |= EBPF_SAVE_S0;
212 return MIPS_R_S0;
213 case BPF_REG_7:
214 ctx->flags |= EBPF_SAVE_S1;
215 return MIPS_R_S1;
216 case BPF_REG_8:
217 ctx->flags |= EBPF_SAVE_S2;
218 return MIPS_R_S2;
219 case BPF_REG_9:
220 ctx->flags |= EBPF_SAVE_S3;
221 return MIPS_R_S3;
222 case BPF_REG_10:
223 if (w == dst_reg || w == src_reg_no_fp)
224 goto bad_reg;
225 ctx->flags |= EBPF_SEEN_FP;
227 * Needs special handling, return something that
228 * cannot be clobbered just in case.
230 return MIPS_R_ZERO;
231 case BPF_REG_AX:
232 return MIPS_R_T4;
233 default:
234 bad_reg:
235 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
236 return -EINVAL;
240 * eBPF stack frame will be something like:
242 * Entry $sp ------> +--------------------------------+
243 * | $ra (optional) |
244 * +--------------------------------+
245 * | $s0 (optional) |
246 * +--------------------------------+
247 * | $s1 (optional) |
248 * +--------------------------------+
249 * | $s2 (optional) |
250 * +--------------------------------+
251 * | $s3 (optional) |
252 * +--------------------------------+
253 * | $s4 (optional) |
254 * +--------------------------------+
255 * | tmp-storage (if $ra saved) |
256 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
257 * | BPF_REG_10 relative storage |
258 * | MAX_BPF_STACK (optional) |
259 * | . |
260 * | . |
261 * | . |
262 * $sp --------> +--------------------------------+
264 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
265 * area is not allocated.
267 static int gen_int_prologue(struct jit_ctx *ctx)
269 int stack_adjust = 0;
270 int store_offset;
271 int locals_size;
273 if (ctx->flags & EBPF_SAVE_RA)
275 * If RA we are doing a function call and may need
276 * extra 8-byte tmp area.
278 stack_adjust += 16;
279 if (ctx->flags & EBPF_SAVE_S0)
280 stack_adjust += 8;
281 if (ctx->flags & EBPF_SAVE_S1)
282 stack_adjust += 8;
283 if (ctx->flags & EBPF_SAVE_S2)
284 stack_adjust += 8;
285 if (ctx->flags & EBPF_SAVE_S3)
286 stack_adjust += 8;
287 if (ctx->flags & EBPF_SAVE_S4)
288 stack_adjust += 8;
290 BUILD_BUG_ON(MAX_BPF_STACK & 7);
291 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
293 stack_adjust += locals_size;
295 ctx->stack_size = stack_adjust;
298 * First instruction initializes the tail call count (TCC).
299 * On tail call we skip this instruction, and the TCC is
300 * passed in $v1 from the caller.
302 emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
303 if (stack_adjust)
304 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
305 else
306 return 0;
308 store_offset = stack_adjust - 8;
310 if (ctx->flags & EBPF_SAVE_RA) {
311 emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
312 store_offset -= 8;
314 if (ctx->flags & EBPF_SAVE_S0) {
315 emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
316 store_offset -= 8;
318 if (ctx->flags & EBPF_SAVE_S1) {
319 emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
320 store_offset -= 8;
322 if (ctx->flags & EBPF_SAVE_S2) {
323 emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
324 store_offset -= 8;
326 if (ctx->flags & EBPF_SAVE_S3) {
327 emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
328 store_offset -= 8;
330 if (ctx->flags & EBPF_SAVE_S4) {
331 emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
332 store_offset -= 8;
335 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
336 emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
338 return 0;
341 static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
343 const struct bpf_prog *prog = ctx->skf;
344 int stack_adjust = ctx->stack_size;
345 int store_offset = stack_adjust - 8;
346 enum reg_val_type td;
347 int r0 = MIPS_R_V0;
349 if (dest_reg == MIPS_R_RA) {
350 /* Don't let zero extended value escape. */
351 td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
352 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
353 emit_instr(ctx, sll, r0, r0, 0);
356 if (ctx->flags & EBPF_SAVE_RA) {
357 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
358 store_offset -= 8;
360 if (ctx->flags & EBPF_SAVE_S0) {
361 emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
362 store_offset -= 8;
364 if (ctx->flags & EBPF_SAVE_S1) {
365 emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
366 store_offset -= 8;
368 if (ctx->flags & EBPF_SAVE_S2) {
369 emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
370 store_offset -= 8;
372 if (ctx->flags & EBPF_SAVE_S3) {
373 emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
374 store_offset -= 8;
376 if (ctx->flags & EBPF_SAVE_S4) {
377 emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
378 store_offset -= 8;
380 emit_instr(ctx, jr, dest_reg);
382 if (stack_adjust)
383 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
384 else
385 emit_instr(ctx, nop);
387 return 0;
390 static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
391 struct jit_ctx *ctx)
393 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
394 emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
395 } else {
396 int lower = (s16)(insn->imm & 0xffff);
397 int upper = insn->imm - lower;
399 emit_instr(ctx, lui, reg, upper >> 16);
400 emit_instr(ctx, addiu, reg, reg, lower);
404 static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
405 int idx)
407 int upper_bound, lower_bound;
408 int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
410 if (dst < 0)
411 return dst;
413 switch (BPF_OP(insn->code)) {
414 case BPF_MOV:
415 case BPF_ADD:
416 upper_bound = S16_MAX;
417 lower_bound = S16_MIN;
418 break;
419 case BPF_SUB:
420 upper_bound = -(int)S16_MIN;
421 lower_bound = -(int)S16_MAX;
422 break;
423 case BPF_AND:
424 case BPF_OR:
425 case BPF_XOR:
426 upper_bound = 0xffff;
427 lower_bound = 0;
428 break;
429 case BPF_RSH:
430 case BPF_LSH:
431 case BPF_ARSH:
432 /* Shift amounts are truncated, no need for bounds */
433 upper_bound = S32_MAX;
434 lower_bound = S32_MIN;
435 break;
436 default:
437 return -EINVAL;
441 * Immediate move clobbers the register, so no sign/zero
442 * extension needed.
444 if (BPF_CLASS(insn->code) == BPF_ALU64 &&
445 BPF_OP(insn->code) != BPF_MOV &&
446 get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
447 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
448 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
449 if (BPF_CLASS(insn->code) == BPF_ALU &&
450 BPF_OP(insn->code) != BPF_LSH &&
451 BPF_OP(insn->code) != BPF_MOV &&
452 get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
453 emit_instr(ctx, sll, dst, dst, 0);
455 if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
456 /* single insn immediate case */
457 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
458 case BPF_ALU64 | BPF_MOV:
459 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
460 break;
461 case BPF_ALU64 | BPF_AND:
462 case BPF_ALU | BPF_AND:
463 emit_instr(ctx, andi, dst, dst, insn->imm);
464 break;
465 case BPF_ALU64 | BPF_OR:
466 case BPF_ALU | BPF_OR:
467 emit_instr(ctx, ori, dst, dst, insn->imm);
468 break;
469 case BPF_ALU64 | BPF_XOR:
470 case BPF_ALU | BPF_XOR:
471 emit_instr(ctx, xori, dst, dst, insn->imm);
472 break;
473 case BPF_ALU64 | BPF_ADD:
474 emit_instr(ctx, daddiu, dst, dst, insn->imm);
475 break;
476 case BPF_ALU64 | BPF_SUB:
477 emit_instr(ctx, daddiu, dst, dst, -insn->imm);
478 break;
479 case BPF_ALU64 | BPF_RSH:
480 emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
481 break;
482 case BPF_ALU | BPF_RSH:
483 emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
484 break;
485 case BPF_ALU64 | BPF_LSH:
486 emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
487 break;
488 case BPF_ALU | BPF_LSH:
489 emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
490 break;
491 case BPF_ALU64 | BPF_ARSH:
492 emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
493 break;
494 case BPF_ALU | BPF_ARSH:
495 emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
496 break;
497 case BPF_ALU | BPF_MOV:
498 emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
499 break;
500 case BPF_ALU | BPF_ADD:
501 emit_instr(ctx, addiu, dst, dst, insn->imm);
502 break;
503 case BPF_ALU | BPF_SUB:
504 emit_instr(ctx, addiu, dst, dst, -insn->imm);
505 break;
506 default:
507 return -EINVAL;
509 } else {
510 /* multi insn immediate case */
511 if (BPF_OP(insn->code) == BPF_MOV) {
512 gen_imm_to_reg(insn, dst, ctx);
513 } else {
514 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
515 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
516 case BPF_ALU64 | BPF_AND:
517 case BPF_ALU | BPF_AND:
518 emit_instr(ctx, and, dst, dst, MIPS_R_AT);
519 break;
520 case BPF_ALU64 | BPF_OR:
521 case BPF_ALU | BPF_OR:
522 emit_instr(ctx, or, dst, dst, MIPS_R_AT);
523 break;
524 case BPF_ALU64 | BPF_XOR:
525 case BPF_ALU | BPF_XOR:
526 emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
527 break;
528 case BPF_ALU64 | BPF_ADD:
529 emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
530 break;
531 case BPF_ALU64 | BPF_SUB:
532 emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
533 break;
534 case BPF_ALU | BPF_ADD:
535 emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
536 break;
537 case BPF_ALU | BPF_SUB:
538 emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
539 break;
540 default:
541 return -EINVAL;
546 return 0;
549 static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
551 if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
552 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
553 } else if (value >= 0xffffffff80000000ull ||
554 (value < 0x80000000 && value > 0xffff)) {
555 emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
556 emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
557 } else {
558 int i;
559 bool seen_part = false;
560 int needed_shift = 0;
562 for (i = 0; i < 4; i++) {
563 u64 part = (value >> (16 * (3 - i))) & 0xffff;
565 if (seen_part && needed_shift > 0 && (part || i == 3)) {
566 emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
567 needed_shift = 0;
569 if (part) {
570 if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
571 emit_instr(ctx, lui, dst, (s32)(s16)part);
572 needed_shift = -16;
573 } else {
574 emit_instr(ctx, ori, dst,
575 seen_part ? dst : MIPS_R_ZERO,
576 (unsigned int)part);
578 seen_part = true;
580 if (seen_part)
581 needed_shift += 16;
586 static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
588 int off, b_off;
590 ctx->flags |= EBPF_SEEN_TC;
592 * if (index >= array->map.max_entries)
593 * goto out;
595 off = offsetof(struct bpf_array, map.max_entries);
596 emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
597 emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
598 b_off = b_imm(this_idx + 1, ctx);
599 emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
601 * if (--TCC < 0)
602 * goto out;
604 /* Delay slot */
605 emit_instr(ctx, daddiu, MIPS_R_T5,
606 (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
607 b_off = b_imm(this_idx + 1, ctx);
608 emit_instr(ctx, bltz, MIPS_R_T5, b_off);
610 * prog = array->ptrs[index];
611 * if (prog == NULL)
612 * goto out;
614 /* Delay slot */
615 emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
616 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
617 off = offsetof(struct bpf_array, ptrs);
618 emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
619 b_off = b_imm(this_idx + 1, ctx);
620 emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
621 /* Delay slot */
622 emit_instr(ctx, nop);
624 /* goto *(prog->bpf_func + 4); */
625 off = offsetof(struct bpf_prog, bpf_func);
626 emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
627 /* All systems are go... propagate TCC */
628 emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
629 /* Skip first instruction (TCC initialization) */
630 emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
631 return build_int_epilogue(ctx, MIPS_R_T9);
634 static bool is_bad_offset(int b_off)
636 return b_off > 0x1ffff || b_off < -0x20000;
639 /* Returns the number of insn slots consumed. */
640 static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
641 int this_idx, int exit_idx)
643 int src, dst, r, td, ts, mem_off, b_off;
644 bool need_swap, did_move, cmp_eq;
645 unsigned int target = 0;
646 u64 t64;
647 s64 t64s;
648 int bpf_op = BPF_OP(insn->code);
650 switch (insn->code) {
651 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
652 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
653 case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
654 case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
655 case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
656 case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
657 case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
658 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
659 case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
660 case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
661 case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
662 case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
663 case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
664 case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
665 case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
666 case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
667 case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
668 case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
669 r = gen_imm_insn(insn, ctx, this_idx);
670 if (r < 0)
671 return r;
672 break;
673 case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
674 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
675 if (dst < 0)
676 return dst;
677 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
678 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
679 if (insn->imm == 1) /* Mult by 1 is a nop */
680 break;
681 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
682 emit_instr(ctx, dmultu, MIPS_R_AT, dst);
683 emit_instr(ctx, mflo, dst);
684 break;
685 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
686 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
687 if (dst < 0)
688 return dst;
689 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
690 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
691 emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
692 break;
693 case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
694 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
695 if (dst < 0)
696 return dst;
697 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
698 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
699 /* sign extend */
700 emit_instr(ctx, sll, dst, dst, 0);
702 if (insn->imm == 1) /* Mult by 1 is a nop */
703 break;
704 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
705 emit_instr(ctx, multu, dst, MIPS_R_AT);
706 emit_instr(ctx, mflo, dst);
707 break;
708 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
709 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
710 if (dst < 0)
711 return dst;
712 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
713 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
714 /* sign extend */
715 emit_instr(ctx, sll, dst, dst, 0);
717 emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
718 break;
719 case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
720 case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
721 if (insn->imm == 0)
722 return -EINVAL;
723 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
724 if (dst < 0)
725 return dst;
726 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
727 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
728 /* sign extend */
729 emit_instr(ctx, sll, dst, dst, 0);
730 if (insn->imm == 1) {
731 /* div by 1 is a nop, mod by 1 is zero */
732 if (bpf_op == BPF_MOD)
733 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
734 break;
736 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
737 emit_instr(ctx, divu, dst, MIPS_R_AT);
738 if (bpf_op == BPF_DIV)
739 emit_instr(ctx, mflo, dst);
740 else
741 emit_instr(ctx, mfhi, dst);
742 break;
743 case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
744 case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
745 if (insn->imm == 0)
746 return -EINVAL;
747 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
748 if (dst < 0)
749 return dst;
750 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
751 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
752 if (insn->imm == 1) {
753 /* div by 1 is a nop, mod by 1 is zero */
754 if (bpf_op == BPF_MOD)
755 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
756 break;
758 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
759 emit_instr(ctx, ddivu, dst, MIPS_R_AT);
760 if (bpf_op == BPF_DIV)
761 emit_instr(ctx, mflo, dst);
762 else
763 emit_instr(ctx, mfhi, dst);
764 break;
765 case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
766 case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
767 case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
768 case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
769 case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
770 case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
771 case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
772 case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
773 case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
774 case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
775 case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
776 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
777 src = ebpf_to_mips_reg(ctx, insn, src_reg);
778 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
779 if (src < 0 || dst < 0)
780 return -EINVAL;
781 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
782 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
783 did_move = false;
784 if (insn->src_reg == BPF_REG_10) {
785 if (bpf_op == BPF_MOV) {
786 emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
787 did_move = true;
788 } else {
789 emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
790 src = MIPS_R_AT;
792 } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
793 int tmp_reg = MIPS_R_AT;
795 if (bpf_op == BPF_MOV) {
796 tmp_reg = dst;
797 did_move = true;
799 emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
800 emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
801 src = MIPS_R_AT;
803 switch (bpf_op) {
804 case BPF_MOV:
805 if (!did_move)
806 emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
807 break;
808 case BPF_ADD:
809 emit_instr(ctx, daddu, dst, dst, src);
810 break;
811 case BPF_SUB:
812 emit_instr(ctx, dsubu, dst, dst, src);
813 break;
814 case BPF_XOR:
815 emit_instr(ctx, xor, dst, dst, src);
816 break;
817 case BPF_OR:
818 emit_instr(ctx, or, dst, dst, src);
819 break;
820 case BPF_AND:
821 emit_instr(ctx, and, dst, dst, src);
822 break;
823 case BPF_MUL:
824 emit_instr(ctx, dmultu, dst, src);
825 emit_instr(ctx, mflo, dst);
826 break;
827 case BPF_DIV:
828 case BPF_MOD:
829 emit_instr(ctx, ddivu, dst, src);
830 if (bpf_op == BPF_DIV)
831 emit_instr(ctx, mflo, dst);
832 else
833 emit_instr(ctx, mfhi, dst);
834 break;
835 case BPF_LSH:
836 emit_instr(ctx, dsllv, dst, dst, src);
837 break;
838 case BPF_RSH:
839 emit_instr(ctx, dsrlv, dst, dst, src);
840 break;
841 case BPF_ARSH:
842 emit_instr(ctx, dsrav, dst, dst, src);
843 break;
844 default:
845 pr_err("ALU64_REG NOT HANDLED\n");
846 return -EINVAL;
848 break;
849 case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
850 case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
851 case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
852 case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
853 case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
854 case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
855 case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
856 case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
857 case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
858 case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
859 case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
860 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
861 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
862 if (src < 0 || dst < 0)
863 return -EINVAL;
864 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
865 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
866 /* sign extend */
867 emit_instr(ctx, sll, dst, dst, 0);
869 did_move = false;
870 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
871 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
872 int tmp_reg = MIPS_R_AT;
874 if (bpf_op == BPF_MOV) {
875 tmp_reg = dst;
876 did_move = true;
878 /* sign extend */
879 emit_instr(ctx, sll, tmp_reg, src, 0);
880 src = MIPS_R_AT;
882 switch (bpf_op) {
883 case BPF_MOV:
884 if (!did_move)
885 emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
886 break;
887 case BPF_ADD:
888 emit_instr(ctx, addu, dst, dst, src);
889 break;
890 case BPF_SUB:
891 emit_instr(ctx, subu, dst, dst, src);
892 break;
893 case BPF_XOR:
894 emit_instr(ctx, xor, dst, dst, src);
895 break;
896 case BPF_OR:
897 emit_instr(ctx, or, dst, dst, src);
898 break;
899 case BPF_AND:
900 emit_instr(ctx, and, dst, dst, src);
901 break;
902 case BPF_MUL:
903 emit_instr(ctx, mul, dst, dst, src);
904 break;
905 case BPF_DIV:
906 case BPF_MOD:
907 emit_instr(ctx, divu, dst, src);
908 if (bpf_op == BPF_DIV)
909 emit_instr(ctx, mflo, dst);
910 else
911 emit_instr(ctx, mfhi, dst);
912 break;
913 case BPF_LSH:
914 emit_instr(ctx, sllv, dst, dst, src);
915 break;
916 case BPF_RSH:
917 emit_instr(ctx, srlv, dst, dst, src);
918 break;
919 default:
920 pr_err("ALU_REG NOT HANDLED\n");
921 return -EINVAL;
923 break;
924 case BPF_JMP | BPF_EXIT:
925 if (this_idx + 1 < exit_idx) {
926 b_off = b_imm(exit_idx, ctx);
927 if (is_bad_offset(b_off))
928 return -E2BIG;
929 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
930 emit_instr(ctx, nop);
932 break;
933 case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
934 case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
935 cmp_eq = (bpf_op == BPF_JEQ);
936 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
937 if (dst < 0)
938 return dst;
939 if (insn->imm == 0) {
940 src = MIPS_R_ZERO;
941 } else {
942 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
943 src = MIPS_R_AT;
945 goto jeq_common;
946 case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
947 case BPF_JMP | BPF_JNE | BPF_X:
948 case BPF_JMP | BPF_JSLT | BPF_X:
949 case BPF_JMP | BPF_JSLE | BPF_X:
950 case BPF_JMP | BPF_JSGT | BPF_X:
951 case BPF_JMP | BPF_JSGE | BPF_X:
952 case BPF_JMP | BPF_JLT | BPF_X:
953 case BPF_JMP | BPF_JLE | BPF_X:
954 case BPF_JMP | BPF_JGT | BPF_X:
955 case BPF_JMP | BPF_JGE | BPF_X:
956 case BPF_JMP | BPF_JSET | BPF_X:
957 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
958 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
959 if (src < 0 || dst < 0)
960 return -EINVAL;
961 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
962 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
963 if (td == REG_32BIT && ts != REG_32BIT) {
964 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
965 src = MIPS_R_AT;
966 } else if (ts == REG_32BIT && td != REG_32BIT) {
967 emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
968 dst = MIPS_R_AT;
970 if (bpf_op == BPF_JSET) {
971 emit_instr(ctx, and, MIPS_R_AT, dst, src);
972 cmp_eq = false;
973 dst = MIPS_R_AT;
974 src = MIPS_R_ZERO;
975 } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
976 emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
977 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
978 b_off = b_imm(exit_idx, ctx);
979 if (is_bad_offset(b_off))
980 return -E2BIG;
981 if (bpf_op == BPF_JSGT)
982 emit_instr(ctx, blez, MIPS_R_AT, b_off);
983 else
984 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
985 emit_instr(ctx, nop);
986 return 2; /* We consumed the exit. */
988 b_off = b_imm(this_idx + insn->off + 1, ctx);
989 if (is_bad_offset(b_off))
990 return -E2BIG;
991 if (bpf_op == BPF_JSGT)
992 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
993 else
994 emit_instr(ctx, blez, MIPS_R_AT, b_off);
995 emit_instr(ctx, nop);
996 break;
997 } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
998 emit_instr(ctx, slt, MIPS_R_AT, dst, src);
999 cmp_eq = bpf_op == BPF_JSGE;
1000 dst = MIPS_R_AT;
1001 src = MIPS_R_ZERO;
1002 } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
1003 /* dst or src could be AT */
1004 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1005 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1006 /* SP known to be non-zero, movz becomes boolean not */
1007 emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
1008 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
1009 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1010 cmp_eq = bpf_op == BPF_JGT;
1011 dst = MIPS_R_AT;
1012 src = MIPS_R_ZERO;
1013 } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
1014 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1015 cmp_eq = bpf_op == BPF_JGE;
1016 dst = MIPS_R_AT;
1017 src = MIPS_R_ZERO;
1018 } else { /* JNE/JEQ case */
1019 cmp_eq = (bpf_op == BPF_JEQ);
1021 jeq_common:
1023 * If the next insn is EXIT and we are jumping arround
1024 * only it, invert the sense of the compare and
1025 * conditionally jump to the exit. Poor man's branch
1026 * chaining.
1028 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1029 b_off = b_imm(exit_idx, ctx);
1030 if (is_bad_offset(b_off)) {
1031 target = j_target(ctx, exit_idx);
1032 if (target == (unsigned int)-1)
1033 return -E2BIG;
1034 cmp_eq = !cmp_eq;
1035 b_off = 4 * 3;
1036 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1037 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1038 ctx->long_b_conversion = 1;
1042 if (cmp_eq)
1043 emit_instr(ctx, bne, dst, src, b_off);
1044 else
1045 emit_instr(ctx, beq, dst, src, b_off);
1046 emit_instr(ctx, nop);
1047 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1048 emit_instr(ctx, j, target);
1049 emit_instr(ctx, nop);
1051 return 2; /* We consumed the exit. */
1053 b_off = b_imm(this_idx + insn->off + 1, ctx);
1054 if (is_bad_offset(b_off)) {
1055 target = j_target(ctx, this_idx + insn->off + 1);
1056 if (target == (unsigned int)-1)
1057 return -E2BIG;
1058 cmp_eq = !cmp_eq;
1059 b_off = 4 * 3;
1060 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1061 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1062 ctx->long_b_conversion = 1;
1066 if (cmp_eq)
1067 emit_instr(ctx, beq, dst, src, b_off);
1068 else
1069 emit_instr(ctx, bne, dst, src, b_off);
1070 emit_instr(ctx, nop);
1071 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1072 emit_instr(ctx, j, target);
1073 emit_instr(ctx, nop);
1075 break;
1076 case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
1077 case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
1078 case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
1079 case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
1080 cmp_eq = (bpf_op == BPF_JSGE);
1081 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1082 if (dst < 0)
1083 return dst;
1085 if (insn->imm == 0) {
1086 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1087 b_off = b_imm(exit_idx, ctx);
1088 if (is_bad_offset(b_off))
1089 return -E2BIG;
1090 switch (bpf_op) {
1091 case BPF_JSGT:
1092 emit_instr(ctx, blez, dst, b_off);
1093 break;
1094 case BPF_JSGE:
1095 emit_instr(ctx, bltz, dst, b_off);
1096 break;
1097 case BPF_JSLT:
1098 emit_instr(ctx, bgez, dst, b_off);
1099 break;
1100 case BPF_JSLE:
1101 emit_instr(ctx, bgtz, dst, b_off);
1102 break;
1104 emit_instr(ctx, nop);
1105 return 2; /* We consumed the exit. */
1107 b_off = b_imm(this_idx + insn->off + 1, ctx);
1108 if (is_bad_offset(b_off))
1109 return -E2BIG;
1110 switch (bpf_op) {
1111 case BPF_JSGT:
1112 emit_instr(ctx, bgtz, dst, b_off);
1113 break;
1114 case BPF_JSGE:
1115 emit_instr(ctx, bgez, dst, b_off);
1116 break;
1117 case BPF_JSLT:
1118 emit_instr(ctx, bltz, dst, b_off);
1119 break;
1120 case BPF_JSLE:
1121 emit_instr(ctx, blez, dst, b_off);
1122 break;
1124 emit_instr(ctx, nop);
1125 break;
1128 * only "LT" compare available, so we must use imm + 1
1129 * to generate "GT" and imm -1 to generate LE
1131 if (bpf_op == BPF_JSGT)
1132 t64s = insn->imm + 1;
1133 else if (bpf_op == BPF_JSLE)
1134 t64s = insn->imm + 1;
1135 else
1136 t64s = insn->imm;
1138 cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
1139 if (t64s >= S16_MIN && t64s <= S16_MAX) {
1140 emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
1141 src = MIPS_R_AT;
1142 dst = MIPS_R_ZERO;
1143 goto jeq_common;
1145 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1146 emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
1147 src = MIPS_R_AT;
1148 dst = MIPS_R_ZERO;
1149 goto jeq_common;
1151 case BPF_JMP | BPF_JGT | BPF_K:
1152 case BPF_JMP | BPF_JGE | BPF_K:
1153 case BPF_JMP | BPF_JLT | BPF_K:
1154 case BPF_JMP | BPF_JLE | BPF_K:
1155 cmp_eq = (bpf_op == BPF_JGE);
1156 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1157 if (dst < 0)
1158 return dst;
1160 * only "LT" compare available, so we must use imm + 1
1161 * to generate "GT" and imm -1 to generate LE
1163 if (bpf_op == BPF_JGT)
1164 t64s = (u64)(u32)(insn->imm) + 1;
1165 else if (bpf_op == BPF_JLE)
1166 t64s = (u64)(u32)(insn->imm) + 1;
1167 else
1168 t64s = (u64)(u32)(insn->imm);
1170 cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
1172 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1173 emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
1174 src = MIPS_R_AT;
1175 dst = MIPS_R_ZERO;
1176 goto jeq_common;
1178 case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
1179 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1180 if (dst < 0)
1181 return dst;
1183 if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
1184 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1185 b_off = b_imm(exit_idx, ctx);
1186 if (is_bad_offset(b_off))
1187 return -E2BIG;
1188 emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
1189 emit_instr(ctx, nop);
1190 return 2; /* We consumed the exit. */
1192 b_off = b_imm(this_idx + insn->off + 1, ctx);
1193 if (is_bad_offset(b_off))
1194 return -E2BIG;
1195 emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
1196 emit_instr(ctx, nop);
1197 break;
1199 t64 = (u32)insn->imm;
1200 emit_const_to_reg(ctx, MIPS_R_AT, t64);
1201 emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
1202 src = MIPS_R_AT;
1203 dst = MIPS_R_ZERO;
1204 cmp_eq = false;
1205 goto jeq_common;
1207 case BPF_JMP | BPF_JA:
1209 * Prefer relative branch for easier debugging, but
1210 * fall back if needed.
1212 b_off = b_imm(this_idx + insn->off + 1, ctx);
1213 if (is_bad_offset(b_off)) {
1214 target = j_target(ctx, this_idx + insn->off + 1);
1215 if (target == (unsigned int)-1)
1216 return -E2BIG;
1217 emit_instr(ctx, j, target);
1218 } else {
1219 emit_instr(ctx, b, b_off);
1221 emit_instr(ctx, nop);
1222 break;
1223 case BPF_LD | BPF_DW | BPF_IMM:
1224 if (insn->src_reg != 0)
1225 return -EINVAL;
1226 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1227 if (dst < 0)
1228 return dst;
1229 t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
1230 emit_const_to_reg(ctx, dst, t64);
1231 return 2; /* Double slot insn */
1233 case BPF_JMP | BPF_CALL:
1234 ctx->flags |= EBPF_SAVE_RA;
1235 t64s = (s64)insn->imm + (s64)__bpf_call_base;
1236 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1237 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1238 /* delay slot */
1239 emit_instr(ctx, nop);
1240 break;
1242 case BPF_JMP | BPF_TAIL_CALL:
1243 if (emit_bpf_tail_call(ctx, this_idx))
1244 return -EINVAL;
1245 break;
1247 case BPF_ALU | BPF_END | BPF_FROM_BE:
1248 case BPF_ALU | BPF_END | BPF_FROM_LE:
1249 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1250 if (dst < 0)
1251 return dst;
1252 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1253 if (insn->imm == 64 && td == REG_32BIT)
1254 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1256 if (insn->imm != 64 &&
1257 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1258 /* sign extend */
1259 emit_instr(ctx, sll, dst, dst, 0);
1262 #ifdef __BIG_ENDIAN
1263 need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
1264 #else
1265 need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
1266 #endif
1267 if (insn->imm == 16) {
1268 if (need_swap)
1269 emit_instr(ctx, wsbh, dst, dst);
1270 emit_instr(ctx, andi, dst, dst, 0xffff);
1271 } else if (insn->imm == 32) {
1272 if (need_swap) {
1273 emit_instr(ctx, wsbh, dst, dst);
1274 emit_instr(ctx, rotr, dst, dst, 16);
1276 } else { /* 64-bit*/
1277 if (need_swap) {
1278 emit_instr(ctx, dsbh, dst, dst);
1279 emit_instr(ctx, dshd, dst, dst);
1282 break;
1284 case BPF_ST | BPF_B | BPF_MEM:
1285 case BPF_ST | BPF_H | BPF_MEM:
1286 case BPF_ST | BPF_W | BPF_MEM:
1287 case BPF_ST | BPF_DW | BPF_MEM:
1288 if (insn->dst_reg == BPF_REG_10) {
1289 ctx->flags |= EBPF_SEEN_FP;
1290 dst = MIPS_R_SP;
1291 mem_off = insn->off + MAX_BPF_STACK;
1292 } else {
1293 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1294 if (dst < 0)
1295 return dst;
1296 mem_off = insn->off;
1298 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1299 switch (BPF_SIZE(insn->code)) {
1300 case BPF_B:
1301 emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
1302 break;
1303 case BPF_H:
1304 emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
1305 break;
1306 case BPF_W:
1307 emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
1308 break;
1309 case BPF_DW:
1310 emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
1311 break;
1313 break;
1315 case BPF_LDX | BPF_B | BPF_MEM:
1316 case BPF_LDX | BPF_H | BPF_MEM:
1317 case BPF_LDX | BPF_W | BPF_MEM:
1318 case BPF_LDX | BPF_DW | BPF_MEM:
1319 if (insn->src_reg == BPF_REG_10) {
1320 ctx->flags |= EBPF_SEEN_FP;
1321 src = MIPS_R_SP;
1322 mem_off = insn->off + MAX_BPF_STACK;
1323 } else {
1324 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1325 if (src < 0)
1326 return src;
1327 mem_off = insn->off;
1329 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1330 if (dst < 0)
1331 return dst;
1332 switch (BPF_SIZE(insn->code)) {
1333 case BPF_B:
1334 emit_instr(ctx, lbu, dst, mem_off, src);
1335 break;
1336 case BPF_H:
1337 emit_instr(ctx, lhu, dst, mem_off, src);
1338 break;
1339 case BPF_W:
1340 emit_instr(ctx, lw, dst, mem_off, src);
1341 break;
1342 case BPF_DW:
1343 emit_instr(ctx, ld, dst, mem_off, src);
1344 break;
1346 break;
1348 case BPF_STX | BPF_B | BPF_MEM:
1349 case BPF_STX | BPF_H | BPF_MEM:
1350 case BPF_STX | BPF_W | BPF_MEM:
1351 case BPF_STX | BPF_DW | BPF_MEM:
1352 case BPF_STX | BPF_W | BPF_XADD:
1353 case BPF_STX | BPF_DW | BPF_XADD:
1354 if (insn->dst_reg == BPF_REG_10) {
1355 ctx->flags |= EBPF_SEEN_FP;
1356 dst = MIPS_R_SP;
1357 mem_off = insn->off + MAX_BPF_STACK;
1358 } else {
1359 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1360 if (dst < 0)
1361 return dst;
1362 mem_off = insn->off;
1364 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1365 if (src < 0)
1366 return src;
1367 if (BPF_MODE(insn->code) == BPF_XADD) {
1368 switch (BPF_SIZE(insn->code)) {
1369 case BPF_W:
1370 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1371 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1372 src = MIPS_R_AT;
1374 emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
1375 emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
1376 emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
1378 * On failure back up to LL (-4
1379 * instructions of 4 bytes each
1381 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1382 emit_instr(ctx, nop);
1383 break;
1384 case BPF_DW:
1385 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1386 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1387 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1388 src = MIPS_R_AT;
1390 emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
1391 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
1392 emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
1393 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1394 emit_instr(ctx, nop);
1395 break;
1397 } else { /* BPF_MEM */
1398 switch (BPF_SIZE(insn->code)) {
1399 case BPF_B:
1400 emit_instr(ctx, sb, src, mem_off, dst);
1401 break;
1402 case BPF_H:
1403 emit_instr(ctx, sh, src, mem_off, dst);
1404 break;
1405 case BPF_W:
1406 emit_instr(ctx, sw, src, mem_off, dst);
1407 break;
1408 case BPF_DW:
1409 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1410 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1411 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1412 src = MIPS_R_AT;
1414 emit_instr(ctx, sd, src, mem_off, dst);
1415 break;
1418 break;
1420 default:
1421 pr_err("NOT HANDLED %d - (%02x)\n",
1422 this_idx, (unsigned int)insn->code);
1423 return -EINVAL;
1425 return 1;
1428 #define RVT_VISITED_MASK 0xc000000000000000ull
1429 #define RVT_FALL_THROUGH 0x4000000000000000ull
1430 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1431 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1433 static int build_int_body(struct jit_ctx *ctx)
1435 const struct bpf_prog *prog = ctx->skf;
1436 const struct bpf_insn *insn;
1437 int i, r;
1439 for (i = 0; i < prog->len; ) {
1440 insn = prog->insnsi + i;
1441 if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
1442 /* dead instruction, don't emit it. */
1443 i++;
1444 continue;
1447 if (ctx->target == NULL)
1448 ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
1450 r = build_one_insn(insn, ctx, i, prog->len);
1451 if (r < 0)
1452 return r;
1453 i += r;
1455 /* epilogue offset */
1456 if (ctx->target == NULL)
1457 ctx->offsets[i] = ctx->idx * 4;
1460 * All exits have an offset of the epilogue, some offsets may
1461 * not have been set due to banch-around threading, so set
1462 * them now.
1464 if (ctx->target == NULL)
1465 for (i = 0; i < prog->len; i++) {
1466 insn = prog->insnsi + i;
1467 if (insn->code == (BPF_JMP | BPF_EXIT))
1468 ctx->offsets[i] = ctx->idx * 4;
1470 return 0;
1473 /* return the last idx processed, or negative for error */
1474 static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1475 int start_idx, bool follow_taken)
1477 const struct bpf_prog *prog = ctx->skf;
1478 const struct bpf_insn *insn;
1479 u64 exit_rvt = initial_rvt;
1480 u64 *rvt = ctx->reg_val_types;
1481 int idx;
1482 int reg;
1484 for (idx = start_idx; idx < prog->len; idx++) {
1485 rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
1486 insn = prog->insnsi + idx;
1487 switch (BPF_CLASS(insn->code)) {
1488 case BPF_ALU:
1489 switch (BPF_OP(insn->code)) {
1490 case BPF_ADD:
1491 case BPF_SUB:
1492 case BPF_MUL:
1493 case BPF_DIV:
1494 case BPF_OR:
1495 case BPF_AND:
1496 case BPF_LSH:
1497 case BPF_RSH:
1498 case BPF_NEG:
1499 case BPF_MOD:
1500 case BPF_XOR:
1501 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1502 break;
1503 case BPF_MOV:
1504 if (BPF_SRC(insn->code)) {
1505 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1506 } else {
1507 /* IMM to REG move*/
1508 if (insn->imm >= 0)
1509 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1510 else
1511 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1513 break;
1514 case BPF_END:
1515 if (insn->imm == 64)
1516 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1517 else if (insn->imm == 32)
1518 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1519 else /* insn->imm == 16 */
1520 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1521 break;
1523 rvt[idx] |= RVT_DONE;
1524 break;
1525 case BPF_ALU64:
1526 switch (BPF_OP(insn->code)) {
1527 case BPF_MOV:
1528 if (BPF_SRC(insn->code)) {
1529 /* REG to REG move*/
1530 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1531 } else {
1532 /* IMM to REG move*/
1533 if (insn->imm >= 0)
1534 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1535 else
1536 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1538 break;
1539 default:
1540 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1542 rvt[idx] |= RVT_DONE;
1543 break;
1544 case BPF_LD:
1545 switch (BPF_SIZE(insn->code)) {
1546 case BPF_DW:
1547 if (BPF_MODE(insn->code) == BPF_IMM) {
1548 s64 val;
1550 val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
1551 if (val > 0 && val <= S32_MAX)
1552 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1553 else if (val >= S32_MIN && val <= S32_MAX)
1554 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1555 else
1556 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1557 rvt[idx] |= RVT_DONE;
1558 idx++;
1559 } else {
1560 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1562 break;
1563 case BPF_B:
1564 case BPF_H:
1565 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1566 break;
1567 case BPF_W:
1568 if (BPF_MODE(insn->code) == BPF_IMM)
1569 set_reg_val_type(&exit_rvt, insn->dst_reg,
1570 insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
1571 else
1572 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1573 break;
1575 rvt[idx] |= RVT_DONE;
1576 break;
1577 case BPF_LDX:
1578 switch (BPF_SIZE(insn->code)) {
1579 case BPF_DW:
1580 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1581 break;
1582 case BPF_B:
1583 case BPF_H:
1584 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1585 break;
1586 case BPF_W:
1587 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1588 break;
1590 rvt[idx] |= RVT_DONE;
1591 break;
1592 case BPF_JMP:
1593 switch (BPF_OP(insn->code)) {
1594 case BPF_EXIT:
1595 rvt[idx] = RVT_DONE | exit_rvt;
1596 rvt[prog->len] = exit_rvt;
1597 return idx;
1598 case BPF_JA:
1599 rvt[idx] |= RVT_DONE;
1600 idx += insn->off;
1601 break;
1602 case BPF_JEQ:
1603 case BPF_JGT:
1604 case BPF_JGE:
1605 case BPF_JLT:
1606 case BPF_JLE:
1607 case BPF_JSET:
1608 case BPF_JNE:
1609 case BPF_JSGT:
1610 case BPF_JSGE:
1611 case BPF_JSLT:
1612 case BPF_JSLE:
1613 if (follow_taken) {
1614 rvt[idx] |= RVT_BRANCH_TAKEN;
1615 idx += insn->off;
1616 follow_taken = false;
1617 } else {
1618 rvt[idx] |= RVT_FALL_THROUGH;
1620 break;
1621 case BPF_CALL:
1622 set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
1623 /* Upon call return, argument registers are clobbered. */
1624 for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
1625 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1627 rvt[idx] |= RVT_DONE;
1628 break;
1629 default:
1630 WARN(1, "Unhandled BPF_JMP case.\n");
1631 rvt[idx] |= RVT_DONE;
1632 break;
1634 break;
1635 default:
1636 rvt[idx] |= RVT_DONE;
1637 break;
1640 return idx;
1644 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1645 * each eBPF insn. This allows unneeded sign and zero extension
1646 * operations to be omitted.
1648 * Doesn't handle yet confluence of control paths with conflicting
1649 * ranges, but it is good enough for most sane code.
1651 static int reg_val_propagate(struct jit_ctx *ctx)
1653 const struct bpf_prog *prog = ctx->skf;
1654 u64 exit_rvt;
1655 int reg;
1656 int i;
1659 * 11 registers * 3 bits/reg leaves top bits free for other
1660 * uses. Bit-62..63 used to see if we have visited an insn.
1662 exit_rvt = 0;
1664 /* Upon entry, argument registers are 64-bit. */
1665 for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
1666 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1669 * First follow all conditional branches on the fall-through
1670 * edge of control flow..
1672 reg_val_propagate_range(ctx, exit_rvt, 0, false);
1673 restart_search:
1675 * Then repeatedly find the first conditional branch where
1676 * both edges of control flow have not been taken, and follow
1677 * the branch taken edge. We will end up restarting the
1678 * search once per conditional branch insn.
1680 for (i = 0; i < prog->len; i++) {
1681 u64 rvt = ctx->reg_val_types[i];
1683 if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
1684 (rvt & RVT_VISITED_MASK) == 0)
1685 continue;
1686 if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
1687 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
1688 } else { /* RVT_BRANCH_TAKEN */
1689 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1690 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
1692 goto restart_search;
1695 * Eventually all conditional branches have been followed on
1696 * both branches and we are done. Any insn that has not been
1697 * visited at this point is dead.
1700 return 0;
1703 static void jit_fill_hole(void *area, unsigned int size)
1705 u32 *p;
1707 /* We are guaranteed to have aligned memory. */
1708 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
1709 uasm_i_break(&p, BRK_BUG); /* Increments p */
1712 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1714 struct bpf_prog *orig_prog = prog;
1715 bool tmp_blinded = false;
1716 struct bpf_prog *tmp;
1717 struct bpf_binary_header *header = NULL;
1718 struct jit_ctx ctx;
1719 unsigned int image_size;
1720 u8 *image_ptr;
1722 if (!prog->jit_requested || !cpu_has_mips64r2)
1723 return prog;
1725 tmp = bpf_jit_blind_constants(prog);
1726 /* If blinding was requested and we failed during blinding,
1727 * we must fall back to the interpreter.
1729 if (IS_ERR(tmp))
1730 return orig_prog;
1731 if (tmp != prog) {
1732 tmp_blinded = true;
1733 prog = tmp;
1736 memset(&ctx, 0, sizeof(ctx));
1738 preempt_disable();
1739 switch (current_cpu_type()) {
1740 case CPU_CAVIUM_OCTEON:
1741 case CPU_CAVIUM_OCTEON_PLUS:
1742 case CPU_CAVIUM_OCTEON2:
1743 case CPU_CAVIUM_OCTEON3:
1744 ctx.use_bbit_insns = 1;
1745 break;
1746 default:
1747 ctx.use_bbit_insns = 0;
1749 preempt_enable();
1751 ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1752 if (ctx.offsets == NULL)
1753 goto out_err;
1755 ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
1756 if (ctx.reg_val_types == NULL)
1757 goto out_err;
1759 ctx.skf = prog;
1761 if (reg_val_propagate(&ctx))
1762 goto out_err;
1765 * First pass discovers used resources and instruction offsets
1766 * assuming short branches are used.
1768 if (build_int_body(&ctx))
1769 goto out_err;
1772 * If no calls are made (EBPF_SAVE_RA), then tail call count
1773 * in $v1, else we must save in n$s4.
1775 if (ctx.flags & EBPF_SEEN_TC) {
1776 if (ctx.flags & EBPF_SAVE_RA)
1777 ctx.flags |= EBPF_SAVE_S4;
1778 else
1779 ctx.flags |= EBPF_TCC_IN_V1;
1783 * Second pass generates offsets, if any branches are out of
1784 * range a jump-around long sequence is generated, and we have
1785 * to try again from the beginning to generate the new
1786 * offsets. This is done until no additional conversions are
1787 * necessary.
1789 do {
1790 ctx.idx = 0;
1791 ctx.gen_b_offsets = 1;
1792 ctx.long_b_conversion = 0;
1793 if (gen_int_prologue(&ctx))
1794 goto out_err;
1795 if (build_int_body(&ctx))
1796 goto out_err;
1797 if (build_int_epilogue(&ctx, MIPS_R_RA))
1798 goto out_err;
1799 } while (ctx.long_b_conversion);
1801 image_size = 4 * ctx.idx;
1803 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1804 sizeof(u32), jit_fill_hole);
1805 if (header == NULL)
1806 goto out_err;
1808 ctx.target = (u32 *)image_ptr;
1810 /* Third pass generates the code */
1811 ctx.idx = 0;
1812 if (gen_int_prologue(&ctx))
1813 goto out_err;
1814 if (build_int_body(&ctx))
1815 goto out_err;
1816 if (build_int_epilogue(&ctx, MIPS_R_RA))
1817 goto out_err;
1819 /* Update the icache */
1820 flush_icache_range((unsigned long)ctx.target,
1821 (unsigned long)&ctx.target[ctx.idx]);
1823 if (bpf_jit_enable > 1)
1824 /* Dump JIT code */
1825 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1827 bpf_jit_binary_lock_ro(header);
1828 prog->bpf_func = (void *)ctx.target;
1829 prog->jited = 1;
1830 prog->jited_len = image_size;
1831 out_normal:
1832 if (tmp_blinded)
1833 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1834 tmp : orig_prog);
1835 kfree(ctx.offsets);
1836 kfree(ctx.reg_val_types);
1838 return prog;
1840 out_err:
1841 prog = orig_prog;
1842 if (header)
1843 bpf_jit_binary_free(header);
1844 goto out_normal;