vxlan: check return value of gro_cells_init()
[linux/fpc-iii.git] / arch / mips / net / ebpf_jit.c
blob3832c462860823f1ae229121ea2d11037f157084
1 /*
2 * Just-In-Time compiler for eBPF filters on MIPS
4 * Copyright (c) 2017 Cavium, Inc.
6 * Based on code from:
8 * Copyright (c) 2014 Imagination Technologies Ltd.
9 * Author: Markos Chandras <markos.chandras@imgtec.com>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; version 2 of the License.
16 #include <linux/bitops.h>
17 #include <linux/errno.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/slab.h>
21 #include <asm/bitops.h>
22 #include <asm/byteorder.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cpu-features.h>
25 #include <asm/uasm.h>
27 /* Registers used by JIT */
28 #define MIPS_R_ZERO 0
29 #define MIPS_R_AT 1
30 #define MIPS_R_V0 2 /* BPF_R0 */
31 #define MIPS_R_V1 3
32 #define MIPS_R_A0 4 /* BPF_R1 */
33 #define MIPS_R_A1 5 /* BPF_R2 */
34 #define MIPS_R_A2 6 /* BPF_R3 */
35 #define MIPS_R_A3 7 /* BPF_R4 */
36 #define MIPS_R_A4 8 /* BPF_R5 */
37 #define MIPS_R_T4 12 /* BPF_AX */
38 #define MIPS_R_T5 13
39 #define MIPS_R_T6 14
40 #define MIPS_R_T7 15
41 #define MIPS_R_S0 16 /* BPF_R6 */
42 #define MIPS_R_S1 17 /* BPF_R7 */
43 #define MIPS_R_S2 18 /* BPF_R8 */
44 #define MIPS_R_S3 19 /* BPF_R9 */
45 #define MIPS_R_S4 20 /* BPF_TCC */
46 #define MIPS_R_S5 21
47 #define MIPS_R_S6 22
48 #define MIPS_R_S7 23
49 #define MIPS_R_T8 24
50 #define MIPS_R_T9 25
51 #define MIPS_R_SP 29
52 #define MIPS_R_RA 31
54 /* eBPF flags */
55 #define EBPF_SAVE_S0 BIT(0)
56 #define EBPF_SAVE_S1 BIT(1)
57 #define EBPF_SAVE_S2 BIT(2)
58 #define EBPF_SAVE_S3 BIT(3)
59 #define EBPF_SAVE_S4 BIT(4)
60 #define EBPF_SAVE_RA BIT(5)
61 #define EBPF_SEEN_FP BIT(6)
62 #define EBPF_SEEN_TC BIT(7)
63 #define EBPF_TCC_IN_V1 BIT(8)
66 * For the mips64 ISA, we need to track the value range or type for
67 * each JIT register. The BPF machine requires zero extended 32-bit
68 * values, but the mips64 ISA requires sign extended 32-bit values.
69 * At each point in the BPF program we track the state of every
70 * register so that we can zero extend or sign extend as the BPF
71 * semantics require.
73 enum reg_val_type {
74 /* uninitialized */
75 REG_UNKNOWN,
76 /* not known to be 32-bit compatible. */
77 REG_64BIT,
78 /* 32-bit compatible, no truncation needed for 64-bit ops. */
79 REG_64BIT_32BIT,
80 /* 32-bit compatible, need truncation for 64-bit ops. */
81 REG_32BIT,
82 /* 32-bit zero extended. */
83 REG_32BIT_ZERO_EX,
84 /* 32-bit no sign/zero extension needed. */
85 REG_32BIT_POS
89 * high bit of offsets indicates if long branch conversion done at
90 * this insn.
92 #define OFFSETS_B_CONV BIT(31)
94 /**
95 * struct jit_ctx - JIT context
96 * @skf: The sk_filter
97 * @stack_size: eBPF stack size
98 * @idx: Instruction index
99 * @flags: JIT flags
100 * @offsets: Instruction offsets
101 * @target: Memory location for the compiled filter
102 * @reg_val_types Packed enum reg_val_type for each register.
104 struct jit_ctx {
105 const struct bpf_prog *skf;
106 int stack_size;
107 u32 idx;
108 u32 flags;
109 u32 *offsets;
110 u32 *target;
111 u64 *reg_val_types;
112 unsigned int long_b_conversion:1;
113 unsigned int gen_b_offsets:1;
114 unsigned int use_bbit_insns:1;
117 static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
119 *rvt &= ~(7ull << (reg * 3));
120 *rvt |= ((u64)type << (reg * 3));
123 static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
124 int index, int reg)
126 return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
129 /* Simply emit the instruction if the JIT memory space has been allocated */
130 #define emit_instr(ctx, func, ...) \
131 do { \
132 if ((ctx)->target != NULL) { \
133 u32 *p = &(ctx)->target[ctx->idx]; \
134 uasm_i_##func(&p, ##__VA_ARGS__); \
136 (ctx)->idx++; \
137 } while (0)
139 static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
141 unsigned long target_va, base_va;
142 unsigned int r;
144 if (!ctx->target)
145 return 0;
147 base_va = (unsigned long)ctx->target;
148 target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
150 if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
151 return (unsigned int)-1;
152 r = target_va & 0x0ffffffful;
153 return r;
156 /* Compute the immediate value for PC-relative branches. */
157 static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
159 if (!ctx->gen_b_offsets)
160 return 0;
163 * We want a pc-relative branch. tgt is the instruction offset
164 * we want to jump to.
166 * Branch on MIPS:
167 * I: target_offset <- sign_extend(offset)
168 * I+1: PC += target_offset (delay slot)
170 * ctx->idx currently points to the branch instruction
171 * but the offset is added to the delay slot so we need
172 * to subtract 4.
174 return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
175 (ctx->idx * 4) - 4;
178 enum which_ebpf_reg {
179 src_reg,
180 src_reg_no_fp,
181 dst_reg,
182 dst_reg_fp_ok
186 * For eBPF, the register mapping naturally falls out of the
187 * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
188 * separate frame pointer, so BPF_REG_10 relative accesses are
189 * adjusted to be $sp relative.
191 int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
192 enum which_ebpf_reg w)
194 int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
195 insn->src_reg : insn->dst_reg;
197 switch (ebpf_reg) {
198 case BPF_REG_0:
199 return MIPS_R_V0;
200 case BPF_REG_1:
201 return MIPS_R_A0;
202 case BPF_REG_2:
203 return MIPS_R_A1;
204 case BPF_REG_3:
205 return MIPS_R_A2;
206 case BPF_REG_4:
207 return MIPS_R_A3;
208 case BPF_REG_5:
209 return MIPS_R_A4;
210 case BPF_REG_6:
211 ctx->flags |= EBPF_SAVE_S0;
212 return MIPS_R_S0;
213 case BPF_REG_7:
214 ctx->flags |= EBPF_SAVE_S1;
215 return MIPS_R_S1;
216 case BPF_REG_8:
217 ctx->flags |= EBPF_SAVE_S2;
218 return MIPS_R_S2;
219 case BPF_REG_9:
220 ctx->flags |= EBPF_SAVE_S3;
221 return MIPS_R_S3;
222 case BPF_REG_10:
223 if (w == dst_reg || w == src_reg_no_fp)
224 goto bad_reg;
225 ctx->flags |= EBPF_SEEN_FP;
227 * Needs special handling, return something that
228 * cannot be clobbered just in case.
230 return MIPS_R_ZERO;
231 case BPF_REG_AX:
232 return MIPS_R_T4;
233 default:
234 bad_reg:
235 WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
236 return -EINVAL;
240 * eBPF stack frame will be something like:
242 * Entry $sp ------> +--------------------------------+
243 * | $ra (optional) |
244 * +--------------------------------+
245 * | $s0 (optional) |
246 * +--------------------------------+
247 * | $s1 (optional) |
248 * +--------------------------------+
249 * | $s2 (optional) |
250 * +--------------------------------+
251 * | $s3 (optional) |
252 * +--------------------------------+
253 * | $s4 (optional) |
254 * +--------------------------------+
255 * | tmp-storage (if $ra saved) |
256 * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
257 * | BPF_REG_10 relative storage |
258 * | MAX_BPF_STACK (optional) |
259 * | . |
260 * | . |
261 * | . |
262 * $sp --------> +--------------------------------+
264 * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
265 * area is not allocated.
267 static int gen_int_prologue(struct jit_ctx *ctx)
269 int stack_adjust = 0;
270 int store_offset;
271 int locals_size;
273 if (ctx->flags & EBPF_SAVE_RA)
275 * If RA we are doing a function call and may need
276 * extra 8-byte tmp area.
278 stack_adjust += 16;
279 if (ctx->flags & EBPF_SAVE_S0)
280 stack_adjust += 8;
281 if (ctx->flags & EBPF_SAVE_S1)
282 stack_adjust += 8;
283 if (ctx->flags & EBPF_SAVE_S2)
284 stack_adjust += 8;
285 if (ctx->flags & EBPF_SAVE_S3)
286 stack_adjust += 8;
287 if (ctx->flags & EBPF_SAVE_S4)
288 stack_adjust += 8;
290 BUILD_BUG_ON(MAX_BPF_STACK & 7);
291 locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
293 stack_adjust += locals_size;
295 ctx->stack_size = stack_adjust;
298 * First instruction initializes the tail call count (TCC).
299 * On tail call we skip this instruction, and the TCC is
300 * passed in $v1 from the caller.
302 emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
303 if (stack_adjust)
304 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
305 else
306 return 0;
308 store_offset = stack_adjust - 8;
310 if (ctx->flags & EBPF_SAVE_RA) {
311 emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
312 store_offset -= 8;
314 if (ctx->flags & EBPF_SAVE_S0) {
315 emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
316 store_offset -= 8;
318 if (ctx->flags & EBPF_SAVE_S1) {
319 emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
320 store_offset -= 8;
322 if (ctx->flags & EBPF_SAVE_S2) {
323 emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
324 store_offset -= 8;
326 if (ctx->flags & EBPF_SAVE_S3) {
327 emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
328 store_offset -= 8;
330 if (ctx->flags & EBPF_SAVE_S4) {
331 emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
332 store_offset -= 8;
335 if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
336 emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
338 return 0;
341 static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
343 const struct bpf_prog *prog = ctx->skf;
344 int stack_adjust = ctx->stack_size;
345 int store_offset = stack_adjust - 8;
346 enum reg_val_type td;
347 int r0 = MIPS_R_V0;
349 if (dest_reg == MIPS_R_RA) {
350 /* Don't let zero extended value escape. */
351 td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
352 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
353 emit_instr(ctx, sll, r0, r0, 0);
356 if (ctx->flags & EBPF_SAVE_RA) {
357 emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
358 store_offset -= 8;
360 if (ctx->flags & EBPF_SAVE_S0) {
361 emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
362 store_offset -= 8;
364 if (ctx->flags & EBPF_SAVE_S1) {
365 emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
366 store_offset -= 8;
368 if (ctx->flags & EBPF_SAVE_S2) {
369 emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
370 store_offset -= 8;
372 if (ctx->flags & EBPF_SAVE_S3) {
373 emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
374 store_offset -= 8;
376 if (ctx->flags & EBPF_SAVE_S4) {
377 emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
378 store_offset -= 8;
380 emit_instr(ctx, jr, dest_reg);
382 if (stack_adjust)
383 emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
384 else
385 emit_instr(ctx, nop);
387 return 0;
390 static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
391 struct jit_ctx *ctx)
393 if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
394 emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
395 } else {
396 int lower = (s16)(insn->imm & 0xffff);
397 int upper = insn->imm - lower;
399 emit_instr(ctx, lui, reg, upper >> 16);
400 emit_instr(ctx, addiu, reg, reg, lower);
404 static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
405 int idx)
407 int upper_bound, lower_bound;
408 int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
410 if (dst < 0)
411 return dst;
413 switch (BPF_OP(insn->code)) {
414 case BPF_MOV:
415 case BPF_ADD:
416 upper_bound = S16_MAX;
417 lower_bound = S16_MIN;
418 break;
419 case BPF_SUB:
420 upper_bound = -(int)S16_MIN;
421 lower_bound = -(int)S16_MAX;
422 break;
423 case BPF_AND:
424 case BPF_OR:
425 case BPF_XOR:
426 upper_bound = 0xffff;
427 lower_bound = 0;
428 break;
429 case BPF_RSH:
430 case BPF_LSH:
431 case BPF_ARSH:
432 /* Shift amounts are truncated, no need for bounds */
433 upper_bound = S32_MAX;
434 lower_bound = S32_MIN;
435 break;
436 default:
437 return -EINVAL;
441 * Immediate move clobbers the register, so no sign/zero
442 * extension needed.
444 if (BPF_CLASS(insn->code) == BPF_ALU64 &&
445 BPF_OP(insn->code) != BPF_MOV &&
446 get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
447 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
448 /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
449 if (BPF_CLASS(insn->code) == BPF_ALU &&
450 BPF_OP(insn->code) != BPF_LSH &&
451 BPF_OP(insn->code) != BPF_MOV &&
452 get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
453 emit_instr(ctx, sll, dst, dst, 0);
455 if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
456 /* single insn immediate case */
457 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
458 case BPF_ALU64 | BPF_MOV:
459 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
460 break;
461 case BPF_ALU64 | BPF_AND:
462 case BPF_ALU | BPF_AND:
463 emit_instr(ctx, andi, dst, dst, insn->imm);
464 break;
465 case BPF_ALU64 | BPF_OR:
466 case BPF_ALU | BPF_OR:
467 emit_instr(ctx, ori, dst, dst, insn->imm);
468 break;
469 case BPF_ALU64 | BPF_XOR:
470 case BPF_ALU | BPF_XOR:
471 emit_instr(ctx, xori, dst, dst, insn->imm);
472 break;
473 case BPF_ALU64 | BPF_ADD:
474 emit_instr(ctx, daddiu, dst, dst, insn->imm);
475 break;
476 case BPF_ALU64 | BPF_SUB:
477 emit_instr(ctx, daddiu, dst, dst, -insn->imm);
478 break;
479 case BPF_ALU64 | BPF_RSH:
480 emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
481 break;
482 case BPF_ALU | BPF_RSH:
483 emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
484 break;
485 case BPF_ALU64 | BPF_LSH:
486 emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
487 break;
488 case BPF_ALU | BPF_LSH:
489 emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
490 break;
491 case BPF_ALU64 | BPF_ARSH:
492 emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
493 break;
494 case BPF_ALU | BPF_ARSH:
495 emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
496 break;
497 case BPF_ALU | BPF_MOV:
498 emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
499 break;
500 case BPF_ALU | BPF_ADD:
501 emit_instr(ctx, addiu, dst, dst, insn->imm);
502 break;
503 case BPF_ALU | BPF_SUB:
504 emit_instr(ctx, addiu, dst, dst, -insn->imm);
505 break;
506 default:
507 return -EINVAL;
509 } else {
510 /* multi insn immediate case */
511 if (BPF_OP(insn->code) == BPF_MOV) {
512 gen_imm_to_reg(insn, dst, ctx);
513 } else {
514 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
515 switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
516 case BPF_ALU64 | BPF_AND:
517 case BPF_ALU | BPF_AND:
518 emit_instr(ctx, and, dst, dst, MIPS_R_AT);
519 break;
520 case BPF_ALU64 | BPF_OR:
521 case BPF_ALU | BPF_OR:
522 emit_instr(ctx, or, dst, dst, MIPS_R_AT);
523 break;
524 case BPF_ALU64 | BPF_XOR:
525 case BPF_ALU | BPF_XOR:
526 emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
527 break;
528 case BPF_ALU64 | BPF_ADD:
529 emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
530 break;
531 case BPF_ALU64 | BPF_SUB:
532 emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
533 break;
534 case BPF_ALU | BPF_ADD:
535 emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
536 break;
537 case BPF_ALU | BPF_SUB:
538 emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
539 break;
540 default:
541 return -EINVAL;
546 return 0;
549 static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
551 if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
552 emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
553 } else if (value >= 0xffffffff80000000ull ||
554 (value < 0x80000000 && value > 0xffff)) {
555 emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
556 emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
557 } else {
558 int i;
559 bool seen_part = false;
560 int needed_shift = 0;
562 for (i = 0; i < 4; i++) {
563 u64 part = (value >> (16 * (3 - i))) & 0xffff;
565 if (seen_part && needed_shift > 0 && (part || i == 3)) {
566 emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
567 needed_shift = 0;
569 if (part) {
570 if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
571 emit_instr(ctx, lui, dst, (s32)(s16)part);
572 needed_shift = -16;
573 } else {
574 emit_instr(ctx, ori, dst,
575 seen_part ? dst : MIPS_R_ZERO,
576 (unsigned int)part);
578 seen_part = true;
580 if (seen_part)
581 needed_shift += 16;
586 static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
588 int off, b_off;
589 int tcc_reg;
591 ctx->flags |= EBPF_SEEN_TC;
593 * if (index >= array->map.max_entries)
594 * goto out;
596 off = offsetof(struct bpf_array, map.max_entries);
597 emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
598 emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
599 b_off = b_imm(this_idx + 1, ctx);
600 emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
602 * if (TCC-- < 0)
603 * goto out;
605 /* Delay slot */
606 tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
607 emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
608 b_off = b_imm(this_idx + 1, ctx);
609 emit_instr(ctx, bltz, tcc_reg, b_off);
611 * prog = array->ptrs[index];
612 * if (prog == NULL)
613 * goto out;
615 /* Delay slot */
616 emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
617 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
618 off = offsetof(struct bpf_array, ptrs);
619 emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
620 b_off = b_imm(this_idx + 1, ctx);
621 emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
622 /* Delay slot */
623 emit_instr(ctx, nop);
625 /* goto *(prog->bpf_func + 4); */
626 off = offsetof(struct bpf_prog, bpf_func);
627 emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
628 /* All systems are go... propagate TCC */
629 emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
630 /* Skip first instruction (TCC initialization) */
631 emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
632 return build_int_epilogue(ctx, MIPS_R_T9);
635 static bool is_bad_offset(int b_off)
637 return b_off > 0x1ffff || b_off < -0x20000;
640 /* Returns the number of insn slots consumed. */
641 static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
642 int this_idx, int exit_idx)
644 int src, dst, r, td, ts, mem_off, b_off;
645 bool need_swap, did_move, cmp_eq;
646 unsigned int target = 0;
647 u64 t64;
648 s64 t64s;
649 int bpf_op = BPF_OP(insn->code);
651 switch (insn->code) {
652 case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
653 case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
654 case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
655 case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
656 case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
657 case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
658 case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
659 case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
660 case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
661 case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
662 case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
663 case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
664 case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
665 case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
666 case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
667 case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
668 case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
669 case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
670 r = gen_imm_insn(insn, ctx, this_idx);
671 if (r < 0)
672 return r;
673 break;
674 case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
675 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
676 if (dst < 0)
677 return dst;
678 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
679 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
680 if (insn->imm == 1) /* Mult by 1 is a nop */
681 break;
682 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
683 emit_instr(ctx, dmultu, MIPS_R_AT, dst);
684 emit_instr(ctx, mflo, dst);
685 break;
686 case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
687 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
688 if (dst < 0)
689 return dst;
690 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
691 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
692 emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
693 break;
694 case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
695 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
696 if (dst < 0)
697 return dst;
698 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
699 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
700 /* sign extend */
701 emit_instr(ctx, sll, dst, dst, 0);
703 if (insn->imm == 1) /* Mult by 1 is a nop */
704 break;
705 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
706 emit_instr(ctx, multu, dst, MIPS_R_AT);
707 emit_instr(ctx, mflo, dst);
708 break;
709 case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
710 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
711 if (dst < 0)
712 return dst;
713 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
714 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
715 /* sign extend */
716 emit_instr(ctx, sll, dst, dst, 0);
718 emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
719 break;
720 case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
721 case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
722 if (insn->imm == 0)
723 return -EINVAL;
724 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
725 if (dst < 0)
726 return dst;
727 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
728 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
729 /* sign extend */
730 emit_instr(ctx, sll, dst, dst, 0);
731 if (insn->imm == 1) {
732 /* div by 1 is a nop, mod by 1 is zero */
733 if (bpf_op == BPF_MOD)
734 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
735 break;
737 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
738 emit_instr(ctx, divu, dst, MIPS_R_AT);
739 if (bpf_op == BPF_DIV)
740 emit_instr(ctx, mflo, dst);
741 else
742 emit_instr(ctx, mfhi, dst);
743 break;
744 case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
745 case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
746 if (insn->imm == 0)
747 return -EINVAL;
748 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
749 if (dst < 0)
750 return dst;
751 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
752 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
753 if (insn->imm == 1) {
754 /* div by 1 is a nop, mod by 1 is zero */
755 if (bpf_op == BPF_MOD)
756 emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
757 break;
759 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
760 emit_instr(ctx, ddivu, dst, MIPS_R_AT);
761 if (bpf_op == BPF_DIV)
762 emit_instr(ctx, mflo, dst);
763 else
764 emit_instr(ctx, mfhi, dst);
765 break;
766 case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
767 case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
768 case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
769 case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
770 case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
771 case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
772 case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
773 case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
774 case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
775 case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
776 case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
777 case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
778 src = ebpf_to_mips_reg(ctx, insn, src_reg);
779 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
780 if (src < 0 || dst < 0)
781 return -EINVAL;
782 if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
783 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
784 did_move = false;
785 if (insn->src_reg == BPF_REG_10) {
786 if (bpf_op == BPF_MOV) {
787 emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
788 did_move = true;
789 } else {
790 emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
791 src = MIPS_R_AT;
793 } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
794 int tmp_reg = MIPS_R_AT;
796 if (bpf_op == BPF_MOV) {
797 tmp_reg = dst;
798 did_move = true;
800 emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
801 emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
802 src = MIPS_R_AT;
804 switch (bpf_op) {
805 case BPF_MOV:
806 if (!did_move)
807 emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
808 break;
809 case BPF_ADD:
810 emit_instr(ctx, daddu, dst, dst, src);
811 break;
812 case BPF_SUB:
813 emit_instr(ctx, dsubu, dst, dst, src);
814 break;
815 case BPF_XOR:
816 emit_instr(ctx, xor, dst, dst, src);
817 break;
818 case BPF_OR:
819 emit_instr(ctx, or, dst, dst, src);
820 break;
821 case BPF_AND:
822 emit_instr(ctx, and, dst, dst, src);
823 break;
824 case BPF_MUL:
825 emit_instr(ctx, dmultu, dst, src);
826 emit_instr(ctx, mflo, dst);
827 break;
828 case BPF_DIV:
829 case BPF_MOD:
830 emit_instr(ctx, ddivu, dst, src);
831 if (bpf_op == BPF_DIV)
832 emit_instr(ctx, mflo, dst);
833 else
834 emit_instr(ctx, mfhi, dst);
835 break;
836 case BPF_LSH:
837 emit_instr(ctx, dsllv, dst, dst, src);
838 break;
839 case BPF_RSH:
840 emit_instr(ctx, dsrlv, dst, dst, src);
841 break;
842 case BPF_ARSH:
843 emit_instr(ctx, dsrav, dst, dst, src);
844 break;
845 default:
846 pr_err("ALU64_REG NOT HANDLED\n");
847 return -EINVAL;
849 break;
850 case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
851 case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
852 case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
853 case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
854 case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
855 case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
856 case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
857 case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
858 case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
859 case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
860 case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
861 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
862 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
863 if (src < 0 || dst < 0)
864 return -EINVAL;
865 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
866 if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
867 /* sign extend */
868 emit_instr(ctx, sll, dst, dst, 0);
870 did_move = false;
871 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
872 if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
873 int tmp_reg = MIPS_R_AT;
875 if (bpf_op == BPF_MOV) {
876 tmp_reg = dst;
877 did_move = true;
879 /* sign extend */
880 emit_instr(ctx, sll, tmp_reg, src, 0);
881 src = MIPS_R_AT;
883 switch (bpf_op) {
884 case BPF_MOV:
885 if (!did_move)
886 emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
887 break;
888 case BPF_ADD:
889 emit_instr(ctx, addu, dst, dst, src);
890 break;
891 case BPF_SUB:
892 emit_instr(ctx, subu, dst, dst, src);
893 break;
894 case BPF_XOR:
895 emit_instr(ctx, xor, dst, dst, src);
896 break;
897 case BPF_OR:
898 emit_instr(ctx, or, dst, dst, src);
899 break;
900 case BPF_AND:
901 emit_instr(ctx, and, dst, dst, src);
902 break;
903 case BPF_MUL:
904 emit_instr(ctx, mul, dst, dst, src);
905 break;
906 case BPF_DIV:
907 case BPF_MOD:
908 emit_instr(ctx, divu, dst, src);
909 if (bpf_op == BPF_DIV)
910 emit_instr(ctx, mflo, dst);
911 else
912 emit_instr(ctx, mfhi, dst);
913 break;
914 case BPF_LSH:
915 emit_instr(ctx, sllv, dst, dst, src);
916 break;
917 case BPF_RSH:
918 emit_instr(ctx, srlv, dst, dst, src);
919 break;
920 default:
921 pr_err("ALU_REG NOT HANDLED\n");
922 return -EINVAL;
924 break;
925 case BPF_JMP | BPF_EXIT:
926 if (this_idx + 1 < exit_idx) {
927 b_off = b_imm(exit_idx, ctx);
928 if (is_bad_offset(b_off))
929 return -E2BIG;
930 emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
931 emit_instr(ctx, nop);
933 break;
934 case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
935 case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
936 cmp_eq = (bpf_op == BPF_JEQ);
937 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
938 if (dst < 0)
939 return dst;
940 if (insn->imm == 0) {
941 src = MIPS_R_ZERO;
942 } else {
943 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
944 src = MIPS_R_AT;
946 goto jeq_common;
947 case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
948 case BPF_JMP | BPF_JNE | BPF_X:
949 case BPF_JMP | BPF_JSLT | BPF_X:
950 case BPF_JMP | BPF_JSLE | BPF_X:
951 case BPF_JMP | BPF_JSGT | BPF_X:
952 case BPF_JMP | BPF_JSGE | BPF_X:
953 case BPF_JMP | BPF_JLT | BPF_X:
954 case BPF_JMP | BPF_JLE | BPF_X:
955 case BPF_JMP | BPF_JGT | BPF_X:
956 case BPF_JMP | BPF_JGE | BPF_X:
957 case BPF_JMP | BPF_JSET | BPF_X:
958 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
959 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
960 if (src < 0 || dst < 0)
961 return -EINVAL;
962 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
963 ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
964 if (td == REG_32BIT && ts != REG_32BIT) {
965 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
966 src = MIPS_R_AT;
967 } else if (ts == REG_32BIT && td != REG_32BIT) {
968 emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
969 dst = MIPS_R_AT;
971 if (bpf_op == BPF_JSET) {
972 emit_instr(ctx, and, MIPS_R_AT, dst, src);
973 cmp_eq = false;
974 dst = MIPS_R_AT;
975 src = MIPS_R_ZERO;
976 } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
977 emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
978 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
979 b_off = b_imm(exit_idx, ctx);
980 if (is_bad_offset(b_off))
981 return -E2BIG;
982 if (bpf_op == BPF_JSGT)
983 emit_instr(ctx, blez, MIPS_R_AT, b_off);
984 else
985 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
986 emit_instr(ctx, nop);
987 return 2; /* We consumed the exit. */
989 b_off = b_imm(this_idx + insn->off + 1, ctx);
990 if (is_bad_offset(b_off))
991 return -E2BIG;
992 if (bpf_op == BPF_JSGT)
993 emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
994 else
995 emit_instr(ctx, blez, MIPS_R_AT, b_off);
996 emit_instr(ctx, nop);
997 break;
998 } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
999 emit_instr(ctx, slt, MIPS_R_AT, dst, src);
1000 cmp_eq = bpf_op == BPF_JSGE;
1001 dst = MIPS_R_AT;
1002 src = MIPS_R_ZERO;
1003 } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
1004 /* dst or src could be AT */
1005 emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
1006 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1007 /* SP known to be non-zero, movz becomes boolean not */
1008 emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
1009 emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
1010 emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
1011 cmp_eq = bpf_op == BPF_JGT;
1012 dst = MIPS_R_AT;
1013 src = MIPS_R_ZERO;
1014 } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
1015 emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
1016 cmp_eq = bpf_op == BPF_JGE;
1017 dst = MIPS_R_AT;
1018 src = MIPS_R_ZERO;
1019 } else { /* JNE/JEQ case */
1020 cmp_eq = (bpf_op == BPF_JEQ);
1022 jeq_common:
1024 * If the next insn is EXIT and we are jumping arround
1025 * only it, invert the sense of the compare and
1026 * conditionally jump to the exit. Poor man's branch
1027 * chaining.
1029 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1030 b_off = b_imm(exit_idx, ctx);
1031 if (is_bad_offset(b_off)) {
1032 target = j_target(ctx, exit_idx);
1033 if (target == (unsigned int)-1)
1034 return -E2BIG;
1035 cmp_eq = !cmp_eq;
1036 b_off = 4 * 3;
1037 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1038 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1039 ctx->long_b_conversion = 1;
1043 if (cmp_eq)
1044 emit_instr(ctx, bne, dst, src, b_off);
1045 else
1046 emit_instr(ctx, beq, dst, src, b_off);
1047 emit_instr(ctx, nop);
1048 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1049 emit_instr(ctx, j, target);
1050 emit_instr(ctx, nop);
1052 return 2; /* We consumed the exit. */
1054 b_off = b_imm(this_idx + insn->off + 1, ctx);
1055 if (is_bad_offset(b_off)) {
1056 target = j_target(ctx, this_idx + insn->off + 1);
1057 if (target == (unsigned int)-1)
1058 return -E2BIG;
1059 cmp_eq = !cmp_eq;
1060 b_off = 4 * 3;
1061 if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
1062 ctx->offsets[this_idx] |= OFFSETS_B_CONV;
1063 ctx->long_b_conversion = 1;
1067 if (cmp_eq)
1068 emit_instr(ctx, beq, dst, src, b_off);
1069 else
1070 emit_instr(ctx, bne, dst, src, b_off);
1071 emit_instr(ctx, nop);
1072 if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
1073 emit_instr(ctx, j, target);
1074 emit_instr(ctx, nop);
1076 break;
1077 case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
1078 case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
1079 case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
1080 case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
1081 cmp_eq = (bpf_op == BPF_JSGE);
1082 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1083 if (dst < 0)
1084 return dst;
1086 if (insn->imm == 0) {
1087 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1088 b_off = b_imm(exit_idx, ctx);
1089 if (is_bad_offset(b_off))
1090 return -E2BIG;
1091 switch (bpf_op) {
1092 case BPF_JSGT:
1093 emit_instr(ctx, blez, dst, b_off);
1094 break;
1095 case BPF_JSGE:
1096 emit_instr(ctx, bltz, dst, b_off);
1097 break;
1098 case BPF_JSLT:
1099 emit_instr(ctx, bgez, dst, b_off);
1100 break;
1101 case BPF_JSLE:
1102 emit_instr(ctx, bgtz, dst, b_off);
1103 break;
1105 emit_instr(ctx, nop);
1106 return 2; /* We consumed the exit. */
1108 b_off = b_imm(this_idx + insn->off + 1, ctx);
1109 if (is_bad_offset(b_off))
1110 return -E2BIG;
1111 switch (bpf_op) {
1112 case BPF_JSGT:
1113 emit_instr(ctx, bgtz, dst, b_off);
1114 break;
1115 case BPF_JSGE:
1116 emit_instr(ctx, bgez, dst, b_off);
1117 break;
1118 case BPF_JSLT:
1119 emit_instr(ctx, bltz, dst, b_off);
1120 break;
1121 case BPF_JSLE:
1122 emit_instr(ctx, blez, dst, b_off);
1123 break;
1125 emit_instr(ctx, nop);
1126 break;
1129 * only "LT" compare available, so we must use imm + 1
1130 * to generate "GT" and imm -1 to generate LE
1132 if (bpf_op == BPF_JSGT)
1133 t64s = insn->imm + 1;
1134 else if (bpf_op == BPF_JSLE)
1135 t64s = insn->imm + 1;
1136 else
1137 t64s = insn->imm;
1139 cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
1140 if (t64s >= S16_MIN && t64s <= S16_MAX) {
1141 emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
1142 src = MIPS_R_AT;
1143 dst = MIPS_R_ZERO;
1144 goto jeq_common;
1146 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1147 emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
1148 src = MIPS_R_AT;
1149 dst = MIPS_R_ZERO;
1150 goto jeq_common;
1152 case BPF_JMP | BPF_JGT | BPF_K:
1153 case BPF_JMP | BPF_JGE | BPF_K:
1154 case BPF_JMP | BPF_JLT | BPF_K:
1155 case BPF_JMP | BPF_JLE | BPF_K:
1156 cmp_eq = (bpf_op == BPF_JGE);
1157 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1158 if (dst < 0)
1159 return dst;
1161 * only "LT" compare available, so we must use imm + 1
1162 * to generate "GT" and imm -1 to generate LE
1164 if (bpf_op == BPF_JGT)
1165 t64s = (u64)(u32)(insn->imm) + 1;
1166 else if (bpf_op == BPF_JLE)
1167 t64s = (u64)(u32)(insn->imm) + 1;
1168 else
1169 t64s = (u64)(u32)(insn->imm);
1171 cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
1173 emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
1174 emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
1175 src = MIPS_R_AT;
1176 dst = MIPS_R_ZERO;
1177 goto jeq_common;
1179 case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
1180 dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
1181 if (dst < 0)
1182 return dst;
1184 if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
1185 if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
1186 b_off = b_imm(exit_idx, ctx);
1187 if (is_bad_offset(b_off))
1188 return -E2BIG;
1189 emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
1190 emit_instr(ctx, nop);
1191 return 2; /* We consumed the exit. */
1193 b_off = b_imm(this_idx + insn->off + 1, ctx);
1194 if (is_bad_offset(b_off))
1195 return -E2BIG;
1196 emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
1197 emit_instr(ctx, nop);
1198 break;
1200 t64 = (u32)insn->imm;
1201 emit_const_to_reg(ctx, MIPS_R_AT, t64);
1202 emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
1203 src = MIPS_R_AT;
1204 dst = MIPS_R_ZERO;
1205 cmp_eq = false;
1206 goto jeq_common;
1208 case BPF_JMP | BPF_JA:
1210 * Prefer relative branch for easier debugging, but
1211 * fall back if needed.
1213 b_off = b_imm(this_idx + insn->off + 1, ctx);
1214 if (is_bad_offset(b_off)) {
1215 target = j_target(ctx, this_idx + insn->off + 1);
1216 if (target == (unsigned int)-1)
1217 return -E2BIG;
1218 emit_instr(ctx, j, target);
1219 } else {
1220 emit_instr(ctx, b, b_off);
1222 emit_instr(ctx, nop);
1223 break;
1224 case BPF_LD | BPF_DW | BPF_IMM:
1225 if (insn->src_reg != 0)
1226 return -EINVAL;
1227 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1228 if (dst < 0)
1229 return dst;
1230 t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
1231 emit_const_to_reg(ctx, dst, t64);
1232 return 2; /* Double slot insn */
1234 case BPF_JMP | BPF_CALL:
1235 ctx->flags |= EBPF_SAVE_RA;
1236 t64s = (s64)insn->imm + (s64)__bpf_call_base;
1237 emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
1238 emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
1239 /* delay slot */
1240 emit_instr(ctx, nop);
1241 break;
1243 case BPF_JMP | BPF_TAIL_CALL:
1244 if (emit_bpf_tail_call(ctx, this_idx))
1245 return -EINVAL;
1246 break;
1248 case BPF_ALU | BPF_END | BPF_FROM_BE:
1249 case BPF_ALU | BPF_END | BPF_FROM_LE:
1250 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1251 if (dst < 0)
1252 return dst;
1253 td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
1254 if (insn->imm == 64 && td == REG_32BIT)
1255 emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
1257 if (insn->imm != 64 &&
1258 (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
1259 /* sign extend */
1260 emit_instr(ctx, sll, dst, dst, 0);
1263 #ifdef __BIG_ENDIAN
1264 need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
1265 #else
1266 need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
1267 #endif
1268 if (insn->imm == 16) {
1269 if (need_swap)
1270 emit_instr(ctx, wsbh, dst, dst);
1271 emit_instr(ctx, andi, dst, dst, 0xffff);
1272 } else if (insn->imm == 32) {
1273 if (need_swap) {
1274 emit_instr(ctx, wsbh, dst, dst);
1275 emit_instr(ctx, rotr, dst, dst, 16);
1277 } else { /* 64-bit*/
1278 if (need_swap) {
1279 emit_instr(ctx, dsbh, dst, dst);
1280 emit_instr(ctx, dshd, dst, dst);
1283 break;
1285 case BPF_ST | BPF_B | BPF_MEM:
1286 case BPF_ST | BPF_H | BPF_MEM:
1287 case BPF_ST | BPF_W | BPF_MEM:
1288 case BPF_ST | BPF_DW | BPF_MEM:
1289 if (insn->dst_reg == BPF_REG_10) {
1290 ctx->flags |= EBPF_SEEN_FP;
1291 dst = MIPS_R_SP;
1292 mem_off = insn->off + MAX_BPF_STACK;
1293 } else {
1294 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1295 if (dst < 0)
1296 return dst;
1297 mem_off = insn->off;
1299 gen_imm_to_reg(insn, MIPS_R_AT, ctx);
1300 switch (BPF_SIZE(insn->code)) {
1301 case BPF_B:
1302 emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
1303 break;
1304 case BPF_H:
1305 emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
1306 break;
1307 case BPF_W:
1308 emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
1309 break;
1310 case BPF_DW:
1311 emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
1312 break;
1314 break;
1316 case BPF_LDX | BPF_B | BPF_MEM:
1317 case BPF_LDX | BPF_H | BPF_MEM:
1318 case BPF_LDX | BPF_W | BPF_MEM:
1319 case BPF_LDX | BPF_DW | BPF_MEM:
1320 if (insn->src_reg == BPF_REG_10) {
1321 ctx->flags |= EBPF_SEEN_FP;
1322 src = MIPS_R_SP;
1323 mem_off = insn->off + MAX_BPF_STACK;
1324 } else {
1325 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1326 if (src < 0)
1327 return src;
1328 mem_off = insn->off;
1330 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1331 if (dst < 0)
1332 return dst;
1333 switch (BPF_SIZE(insn->code)) {
1334 case BPF_B:
1335 emit_instr(ctx, lbu, dst, mem_off, src);
1336 break;
1337 case BPF_H:
1338 emit_instr(ctx, lhu, dst, mem_off, src);
1339 break;
1340 case BPF_W:
1341 emit_instr(ctx, lw, dst, mem_off, src);
1342 break;
1343 case BPF_DW:
1344 emit_instr(ctx, ld, dst, mem_off, src);
1345 break;
1347 break;
1349 case BPF_STX | BPF_B | BPF_MEM:
1350 case BPF_STX | BPF_H | BPF_MEM:
1351 case BPF_STX | BPF_W | BPF_MEM:
1352 case BPF_STX | BPF_DW | BPF_MEM:
1353 case BPF_STX | BPF_W | BPF_XADD:
1354 case BPF_STX | BPF_DW | BPF_XADD:
1355 if (insn->dst_reg == BPF_REG_10) {
1356 ctx->flags |= EBPF_SEEN_FP;
1357 dst = MIPS_R_SP;
1358 mem_off = insn->off + MAX_BPF_STACK;
1359 } else {
1360 dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
1361 if (dst < 0)
1362 return dst;
1363 mem_off = insn->off;
1365 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
1366 if (src < 0)
1367 return src;
1368 if (BPF_MODE(insn->code) == BPF_XADD) {
1369 switch (BPF_SIZE(insn->code)) {
1370 case BPF_W:
1371 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1372 emit_instr(ctx, sll, MIPS_R_AT, src, 0);
1373 src = MIPS_R_AT;
1375 emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
1376 emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
1377 emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
1379 * On failure back up to LL (-4
1380 * instructions of 4 bytes each
1382 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1383 emit_instr(ctx, nop);
1384 break;
1385 case BPF_DW:
1386 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1387 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1388 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1389 src = MIPS_R_AT;
1391 emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
1392 emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
1393 emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
1394 emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
1395 emit_instr(ctx, nop);
1396 break;
1398 } else { /* BPF_MEM */
1399 switch (BPF_SIZE(insn->code)) {
1400 case BPF_B:
1401 emit_instr(ctx, sb, src, mem_off, dst);
1402 break;
1403 case BPF_H:
1404 emit_instr(ctx, sh, src, mem_off, dst);
1405 break;
1406 case BPF_W:
1407 emit_instr(ctx, sw, src, mem_off, dst);
1408 break;
1409 case BPF_DW:
1410 if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
1411 emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
1412 emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
1413 src = MIPS_R_AT;
1415 emit_instr(ctx, sd, src, mem_off, dst);
1416 break;
1419 break;
1421 default:
1422 pr_err("NOT HANDLED %d - (%02x)\n",
1423 this_idx, (unsigned int)insn->code);
1424 return -EINVAL;
1426 return 1;
1429 #define RVT_VISITED_MASK 0xc000000000000000ull
1430 #define RVT_FALL_THROUGH 0x4000000000000000ull
1431 #define RVT_BRANCH_TAKEN 0x8000000000000000ull
1432 #define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
1434 static int build_int_body(struct jit_ctx *ctx)
1436 const struct bpf_prog *prog = ctx->skf;
1437 const struct bpf_insn *insn;
1438 int i, r;
1440 for (i = 0; i < prog->len; ) {
1441 insn = prog->insnsi + i;
1442 if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
1443 /* dead instruction, don't emit it. */
1444 i++;
1445 continue;
1448 if (ctx->target == NULL)
1449 ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
1451 r = build_one_insn(insn, ctx, i, prog->len);
1452 if (r < 0)
1453 return r;
1454 i += r;
1456 /* epilogue offset */
1457 if (ctx->target == NULL)
1458 ctx->offsets[i] = ctx->idx * 4;
1461 * All exits have an offset of the epilogue, some offsets may
1462 * not have been set due to banch-around threading, so set
1463 * them now.
1465 if (ctx->target == NULL)
1466 for (i = 0; i < prog->len; i++) {
1467 insn = prog->insnsi + i;
1468 if (insn->code == (BPF_JMP | BPF_EXIT))
1469 ctx->offsets[i] = ctx->idx * 4;
1471 return 0;
1474 /* return the last idx processed, or negative for error */
1475 static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
1476 int start_idx, bool follow_taken)
1478 const struct bpf_prog *prog = ctx->skf;
1479 const struct bpf_insn *insn;
1480 u64 exit_rvt = initial_rvt;
1481 u64 *rvt = ctx->reg_val_types;
1482 int idx;
1483 int reg;
1485 for (idx = start_idx; idx < prog->len; idx++) {
1486 rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
1487 insn = prog->insnsi + idx;
1488 switch (BPF_CLASS(insn->code)) {
1489 case BPF_ALU:
1490 switch (BPF_OP(insn->code)) {
1491 case BPF_ADD:
1492 case BPF_SUB:
1493 case BPF_MUL:
1494 case BPF_DIV:
1495 case BPF_OR:
1496 case BPF_AND:
1497 case BPF_LSH:
1498 case BPF_RSH:
1499 case BPF_NEG:
1500 case BPF_MOD:
1501 case BPF_XOR:
1502 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1503 break;
1504 case BPF_MOV:
1505 if (BPF_SRC(insn->code)) {
1506 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1507 } else {
1508 /* IMM to REG move*/
1509 if (insn->imm >= 0)
1510 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1511 else
1512 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1514 break;
1515 case BPF_END:
1516 if (insn->imm == 64)
1517 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1518 else if (insn->imm == 32)
1519 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1520 else /* insn->imm == 16 */
1521 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1522 break;
1524 rvt[idx] |= RVT_DONE;
1525 break;
1526 case BPF_ALU64:
1527 switch (BPF_OP(insn->code)) {
1528 case BPF_MOV:
1529 if (BPF_SRC(insn->code)) {
1530 /* REG to REG move*/
1531 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1532 } else {
1533 /* IMM to REG move*/
1534 if (insn->imm >= 0)
1535 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1536 else
1537 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1539 break;
1540 default:
1541 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1543 rvt[idx] |= RVT_DONE;
1544 break;
1545 case BPF_LD:
1546 switch (BPF_SIZE(insn->code)) {
1547 case BPF_DW:
1548 if (BPF_MODE(insn->code) == BPF_IMM) {
1549 s64 val;
1551 val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
1552 if (val > 0 && val <= S32_MAX)
1553 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1554 else if (val >= S32_MIN && val <= S32_MAX)
1555 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
1556 else
1557 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1558 rvt[idx] |= RVT_DONE;
1559 idx++;
1560 } else {
1561 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1563 break;
1564 case BPF_B:
1565 case BPF_H:
1566 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1567 break;
1568 case BPF_W:
1569 if (BPF_MODE(insn->code) == BPF_IMM)
1570 set_reg_val_type(&exit_rvt, insn->dst_reg,
1571 insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
1572 else
1573 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1574 break;
1576 rvt[idx] |= RVT_DONE;
1577 break;
1578 case BPF_LDX:
1579 switch (BPF_SIZE(insn->code)) {
1580 case BPF_DW:
1581 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
1582 break;
1583 case BPF_B:
1584 case BPF_H:
1585 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
1586 break;
1587 case BPF_W:
1588 set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
1589 break;
1591 rvt[idx] |= RVT_DONE;
1592 break;
1593 case BPF_JMP:
1594 switch (BPF_OP(insn->code)) {
1595 case BPF_EXIT:
1596 rvt[idx] = RVT_DONE | exit_rvt;
1597 rvt[prog->len] = exit_rvt;
1598 return idx;
1599 case BPF_JA:
1600 rvt[idx] |= RVT_DONE;
1601 idx += insn->off;
1602 break;
1603 case BPF_JEQ:
1604 case BPF_JGT:
1605 case BPF_JGE:
1606 case BPF_JLT:
1607 case BPF_JLE:
1608 case BPF_JSET:
1609 case BPF_JNE:
1610 case BPF_JSGT:
1611 case BPF_JSGE:
1612 case BPF_JSLT:
1613 case BPF_JSLE:
1614 if (follow_taken) {
1615 rvt[idx] |= RVT_BRANCH_TAKEN;
1616 idx += insn->off;
1617 follow_taken = false;
1618 } else {
1619 rvt[idx] |= RVT_FALL_THROUGH;
1621 break;
1622 case BPF_CALL:
1623 set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
1624 /* Upon call return, argument registers are clobbered. */
1625 for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
1626 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1628 rvt[idx] |= RVT_DONE;
1629 break;
1630 default:
1631 WARN(1, "Unhandled BPF_JMP case.\n");
1632 rvt[idx] |= RVT_DONE;
1633 break;
1635 break;
1636 default:
1637 rvt[idx] |= RVT_DONE;
1638 break;
1641 return idx;
1645 * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
1646 * each eBPF insn. This allows unneeded sign and zero extension
1647 * operations to be omitted.
1649 * Doesn't handle yet confluence of control paths with conflicting
1650 * ranges, but it is good enough for most sane code.
1652 static int reg_val_propagate(struct jit_ctx *ctx)
1654 const struct bpf_prog *prog = ctx->skf;
1655 u64 exit_rvt;
1656 int reg;
1657 int i;
1660 * 11 registers * 3 bits/reg leaves top bits free for other
1661 * uses. Bit-62..63 used to see if we have visited an insn.
1663 exit_rvt = 0;
1665 /* Upon entry, argument registers are 64-bit. */
1666 for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
1667 set_reg_val_type(&exit_rvt, reg, REG_64BIT);
1670 * First follow all conditional branches on the fall-through
1671 * edge of control flow..
1673 reg_val_propagate_range(ctx, exit_rvt, 0, false);
1674 restart_search:
1676 * Then repeatedly find the first conditional branch where
1677 * both edges of control flow have not been taken, and follow
1678 * the branch taken edge. We will end up restarting the
1679 * search once per conditional branch insn.
1681 for (i = 0; i < prog->len; i++) {
1682 u64 rvt = ctx->reg_val_types[i];
1684 if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
1685 (rvt & RVT_VISITED_MASK) == 0)
1686 continue;
1687 if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
1688 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
1689 } else { /* RVT_BRANCH_TAKEN */
1690 WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
1691 reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
1693 goto restart_search;
1696 * Eventually all conditional branches have been followed on
1697 * both branches and we are done. Any insn that has not been
1698 * visited at this point is dead.
1701 return 0;
1704 static void jit_fill_hole(void *area, unsigned int size)
1706 u32 *p;
1708 /* We are guaranteed to have aligned memory. */
1709 for (p = area; size >= sizeof(u32); size -= sizeof(u32))
1710 uasm_i_break(&p, BRK_BUG); /* Increments p */
1713 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1715 struct bpf_prog *orig_prog = prog;
1716 bool tmp_blinded = false;
1717 struct bpf_prog *tmp;
1718 struct bpf_binary_header *header = NULL;
1719 struct jit_ctx ctx;
1720 unsigned int image_size;
1721 u8 *image_ptr;
1723 if (!prog->jit_requested || !cpu_has_mips64r2)
1724 return prog;
1726 tmp = bpf_jit_blind_constants(prog);
1727 /* If blinding was requested and we failed during blinding,
1728 * we must fall back to the interpreter.
1730 if (IS_ERR(tmp))
1731 return orig_prog;
1732 if (tmp != prog) {
1733 tmp_blinded = true;
1734 prog = tmp;
1737 memset(&ctx, 0, sizeof(ctx));
1739 preempt_disable();
1740 switch (current_cpu_type()) {
1741 case CPU_CAVIUM_OCTEON:
1742 case CPU_CAVIUM_OCTEON_PLUS:
1743 case CPU_CAVIUM_OCTEON2:
1744 case CPU_CAVIUM_OCTEON3:
1745 ctx.use_bbit_insns = 1;
1746 break;
1747 default:
1748 ctx.use_bbit_insns = 0;
1750 preempt_enable();
1752 ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1753 if (ctx.offsets == NULL)
1754 goto out_err;
1756 ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
1757 if (ctx.reg_val_types == NULL)
1758 goto out_err;
1760 ctx.skf = prog;
1762 if (reg_val_propagate(&ctx))
1763 goto out_err;
1766 * First pass discovers used resources and instruction offsets
1767 * assuming short branches are used.
1769 if (build_int_body(&ctx))
1770 goto out_err;
1773 * If no calls are made (EBPF_SAVE_RA), then tail call count
1774 * in $v1, else we must save in n$s4.
1776 if (ctx.flags & EBPF_SEEN_TC) {
1777 if (ctx.flags & EBPF_SAVE_RA)
1778 ctx.flags |= EBPF_SAVE_S4;
1779 else
1780 ctx.flags |= EBPF_TCC_IN_V1;
1784 * Second pass generates offsets, if any branches are out of
1785 * range a jump-around long sequence is generated, and we have
1786 * to try again from the beginning to generate the new
1787 * offsets. This is done until no additional conversions are
1788 * necessary.
1790 do {
1791 ctx.idx = 0;
1792 ctx.gen_b_offsets = 1;
1793 ctx.long_b_conversion = 0;
1794 if (gen_int_prologue(&ctx))
1795 goto out_err;
1796 if (build_int_body(&ctx))
1797 goto out_err;
1798 if (build_int_epilogue(&ctx, MIPS_R_RA))
1799 goto out_err;
1800 } while (ctx.long_b_conversion);
1802 image_size = 4 * ctx.idx;
1804 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1805 sizeof(u32), jit_fill_hole);
1806 if (header == NULL)
1807 goto out_err;
1809 ctx.target = (u32 *)image_ptr;
1811 /* Third pass generates the code */
1812 ctx.idx = 0;
1813 if (gen_int_prologue(&ctx))
1814 goto out_err;
1815 if (build_int_body(&ctx))
1816 goto out_err;
1817 if (build_int_epilogue(&ctx, MIPS_R_RA))
1818 goto out_err;
1820 /* Update the icache */
1821 flush_icache_range((unsigned long)ctx.target,
1822 (unsigned long)&ctx.target[ctx.idx]);
1824 if (bpf_jit_enable > 1)
1825 /* Dump JIT code */
1826 bpf_jit_dump(prog->len, image_size, 2, ctx.target);
1828 bpf_jit_binary_lock_ro(header);
1829 prog->bpf_func = (void *)ctx.target;
1830 prog->jited = 1;
1831 prog->jited_len = image_size;
1832 out_normal:
1833 if (tmp_blinded)
1834 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1835 tmp : orig_prog);
1836 kfree(ctx.offsets);
1837 kfree(ctx.reg_val_types);
1839 return prog;
1841 out_err:
1842 prog = orig_prog;
1843 if (header)
1844 bpf_jit_binary_free(header);
1845 goto out_normal;