2 * BPF JIT compiler for ARM64
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "bpf_jit: " fmt
21 #include <linux/bpf.h>
22 #include <linux/filter.h>
23 #include <linux/printk.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
27 #include <asm/byteorder.h>
28 #include <asm/cacheflush.h>
29 #include <asm/debug-monitors.h>
33 int bpf_jit_enable __read_mostly
;
35 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
36 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
37 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
39 /* Map BPF registers to A64 registers */
40 static const int bpf2a64
[] = {
41 /* return value from in-kernel function, and exit value from eBPF */
42 [BPF_REG_0
] = A64_R(7),
43 /* arguments from eBPF program to in-kernel function */
44 [BPF_REG_1
] = A64_R(0),
45 [BPF_REG_2
] = A64_R(1),
46 [BPF_REG_3
] = A64_R(2),
47 [BPF_REG_4
] = A64_R(3),
48 [BPF_REG_5
] = A64_R(4),
49 /* callee saved registers that in-kernel function will preserve */
50 [BPF_REG_6
] = A64_R(19),
51 [BPF_REG_7
] = A64_R(20),
52 [BPF_REG_8
] = A64_R(21),
53 [BPF_REG_9
] = A64_R(22),
54 /* read-only frame pointer to access stack */
55 [BPF_REG_FP
] = A64_R(25),
56 /* temporary registers for internal BPF JIT */
57 [TMP_REG_1
] = A64_R(10),
58 [TMP_REG_2
] = A64_R(11),
60 [TCALL_CNT
] = A64_R(26),
61 /* temporary register for blinding constants */
62 [BPF_REG_AX
] = A64_R(9),
66 const struct bpf_prog
*prog
;
73 static inline void emit(const u32 insn
, struct jit_ctx
*ctx
)
75 if (ctx
->image
!= NULL
)
76 ctx
->image
[ctx
->idx
] = cpu_to_le32(insn
);
81 static inline void emit_a64_mov_i64(const int reg
, const u64 val
,
87 emit(A64_MOVZ(1, reg
, tmp
& 0xffff, shift
), ctx
);
92 emit(A64_MOVK(1, reg
, tmp
& 0xffff, shift
), ctx
);
98 static inline void emit_a64_mov_i(const int is64
, const int reg
,
99 const s32 val
, struct jit_ctx
*ctx
)
102 u16 lo
= val
& 0xffff;
106 emit(A64_MOVN(is64
, reg
, (u16
)~lo
, 0), ctx
);
108 emit(A64_MOVN(is64
, reg
, (u16
)~hi
, 16), ctx
);
109 emit(A64_MOVK(is64
, reg
, lo
, 0), ctx
);
112 emit(A64_MOVZ(is64
, reg
, lo
, 0), ctx
);
114 emit(A64_MOVK(is64
, reg
, hi
, 16), ctx
);
118 static inline int bpf2a64_offset(int bpf_to
, int bpf_from
,
119 const struct jit_ctx
*ctx
)
121 int to
= ctx
->offset
[bpf_to
];
122 /* -1 to account for the Branch instruction */
123 int from
= ctx
->offset
[bpf_from
] - 1;
128 static void jit_fill_hole(void *area
, unsigned int size
)
131 /* We are guaranteed to have aligned memory. */
132 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
133 *ptr
++ = cpu_to_le32(AARCH64_BREAK_FAULT
);
136 static inline int epilogue_offset(const struct jit_ctx
*ctx
)
138 int to
= ctx
->epilogue_offset
;
144 /* Stack must be multiples of 16B */
145 #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
147 #define _STACK_SIZE \
149 + 4 /* extra for skb_copy_bits buffer */)
151 #define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
153 #define PROLOGUE_OFFSET 8
155 static int build_prologue(struct jit_ctx
*ctx
)
157 const u8 r6
= bpf2a64
[BPF_REG_6
];
158 const u8 r7
= bpf2a64
[BPF_REG_7
];
159 const u8 r8
= bpf2a64
[BPF_REG_8
];
160 const u8 r9
= bpf2a64
[BPF_REG_9
];
161 const u8 fp
= bpf2a64
[BPF_REG_FP
];
162 const u8 tcc
= bpf2a64
[TCALL_CNT
];
163 const int idx0
= ctx
->idx
;
167 * BPF prog stack layout
170 * original A64_SP => 0:+-----+ BPF prologue
172 * current A64_FP => -16:+-----+
173 * | ... | callee saved registers
174 * BPF fp register => -64:+-----+ <= (BPF_FP)
176 * | ... | BPF prog stack
178 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
179 * |RSVD | JIT scratchpad
180 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
182 * | ... | Function call stack
189 /* Save FP and LR registers to stay align with ARM64 AAPCS */
190 emit(A64_PUSH(A64_FP
, A64_LR
, A64_SP
), ctx
);
191 emit(A64_MOV(1, A64_FP
, A64_SP
), ctx
);
193 /* Save callee-saved registers */
194 emit(A64_PUSH(r6
, r7
, A64_SP
), ctx
);
195 emit(A64_PUSH(r8
, r9
, A64_SP
), ctx
);
196 emit(A64_PUSH(fp
, tcc
, A64_SP
), ctx
);
198 /* Set up BPF prog stack base register */
199 emit(A64_MOV(1, fp
, A64_SP
), ctx
);
201 /* Initialize tail_call_cnt */
202 emit(A64_MOVZ(1, tcc
, 0, 0), ctx
);
204 /* Set up function call stack */
205 emit(A64_SUB_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
207 cur_offset
= ctx
->idx
- idx0
;
208 if (cur_offset
!= PROLOGUE_OFFSET
) {
209 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
210 cur_offset
, PROLOGUE_OFFSET
);
216 static int out_offset
= -1; /* initialized on the first pass of build_body() */
217 static int emit_bpf_tail_call(struct jit_ctx
*ctx
)
219 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
220 const u8 r2
= bpf2a64
[BPF_REG_2
];
221 const u8 r3
= bpf2a64
[BPF_REG_3
];
223 const u8 tmp
= bpf2a64
[TMP_REG_1
];
224 const u8 prg
= bpf2a64
[TMP_REG_2
];
225 const u8 tcc
= bpf2a64
[TCALL_CNT
];
226 const int idx0
= ctx
->idx
;
227 #define cur_offset (ctx->idx - idx0)
228 #define jmp_offset (out_offset - (cur_offset))
231 /* if (index >= array->map.max_entries)
234 off
= offsetof(struct bpf_array
, map
.max_entries
);
235 emit_a64_mov_i64(tmp
, off
, ctx
);
236 emit(A64_LDR32(tmp
, r2
, tmp
), ctx
);
237 emit(A64_CMP(0, r3
, tmp
), ctx
);
238 emit(A64_B_(A64_COND_GE
, jmp_offset
), ctx
);
240 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
244 emit_a64_mov_i64(tmp
, MAX_TAIL_CALL_CNT
, ctx
);
245 emit(A64_CMP(1, tcc
, tmp
), ctx
);
246 emit(A64_B_(A64_COND_GT
, jmp_offset
), ctx
);
247 emit(A64_ADD_I(1, tcc
, tcc
, 1), ctx
);
249 /* prog = array->ptrs[index];
253 off
= offsetof(struct bpf_array
, ptrs
);
254 emit_a64_mov_i64(tmp
, off
, ctx
);
255 emit(A64_ADD(1, tmp
, r2
, tmp
), ctx
);
256 emit(A64_LSL(1, prg
, r3
, 3), ctx
);
257 emit(A64_LDR64(prg
, tmp
, prg
), ctx
);
258 emit(A64_CBZ(1, prg
, jmp_offset
), ctx
);
260 /* goto *(prog->bpf_func + prologue_size); */
261 off
= offsetof(struct bpf_prog
, bpf_func
);
262 emit_a64_mov_i64(tmp
, off
, ctx
);
263 emit(A64_LDR64(tmp
, prg
, tmp
), ctx
);
264 emit(A64_ADD_I(1, tmp
, tmp
, sizeof(u32
) * PROLOGUE_OFFSET
), ctx
);
265 emit(A64_BR(tmp
), ctx
);
268 if (out_offset
== -1)
269 out_offset
= cur_offset
;
270 if (cur_offset
!= out_offset
) {
271 pr_err_once("tail_call out_offset = %d, expected %d!\n",
272 cur_offset
, out_offset
);
280 static void build_epilogue(struct jit_ctx
*ctx
)
282 const u8 r0
= bpf2a64
[BPF_REG_0
];
283 const u8 r6
= bpf2a64
[BPF_REG_6
];
284 const u8 r7
= bpf2a64
[BPF_REG_7
];
285 const u8 r8
= bpf2a64
[BPF_REG_8
];
286 const u8 r9
= bpf2a64
[BPF_REG_9
];
287 const u8 fp
= bpf2a64
[BPF_REG_FP
];
289 /* We're done with BPF stack */
290 emit(A64_ADD_I(1, A64_SP
, A64_SP
, STACK_SIZE
), ctx
);
292 /* Restore fs (x25) and x26 */
293 emit(A64_POP(fp
, A64_R(26), A64_SP
), ctx
);
295 /* Restore callee-saved register */
296 emit(A64_POP(r8
, r9
, A64_SP
), ctx
);
297 emit(A64_POP(r6
, r7
, A64_SP
), ctx
);
299 /* Restore FP/LR registers */
300 emit(A64_POP(A64_FP
, A64_LR
, A64_SP
), ctx
);
302 /* Set return value */
303 emit(A64_MOV(1, A64_R(0), r0
), ctx
);
305 emit(A64_RET(A64_LR
), ctx
);
308 /* JITs an eBPF instruction.
310 * 0 - successfully JITed an 8-byte eBPF instruction.
311 * >0 - successfully JITed a 16-byte eBPF instruction.
312 * <0 - failed to JIT.
314 static int build_insn(const struct bpf_insn
*insn
, struct jit_ctx
*ctx
)
316 const u8 code
= insn
->code
;
317 const u8 dst
= bpf2a64
[insn
->dst_reg
];
318 const u8 src
= bpf2a64
[insn
->src_reg
];
319 const u8 tmp
= bpf2a64
[TMP_REG_1
];
320 const u8 tmp2
= bpf2a64
[TMP_REG_2
];
321 const s16 off
= insn
->off
;
322 const s32 imm
= insn
->imm
;
323 const int i
= insn
- ctx
->prog
->insnsi
;
324 const bool is64
= BPF_CLASS(code
) == BPF_ALU64
;
328 #define check_imm(bits, imm) do { \
329 if ((((imm) > 0) && ((imm) >> (bits))) || \
330 (((imm) < 0) && (~(imm) >> (bits)))) { \
331 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
336 #define check_imm19(imm) check_imm(19, imm)
337 #define check_imm26(imm) check_imm(26, imm)
341 case BPF_ALU
| BPF_MOV
| BPF_X
:
342 case BPF_ALU64
| BPF_MOV
| BPF_X
:
343 emit(A64_MOV(is64
, dst
, src
), ctx
);
345 /* dst = dst OP src */
346 case BPF_ALU
| BPF_ADD
| BPF_X
:
347 case BPF_ALU64
| BPF_ADD
| BPF_X
:
348 emit(A64_ADD(is64
, dst
, dst
, src
), ctx
);
350 case BPF_ALU
| BPF_SUB
| BPF_X
:
351 case BPF_ALU64
| BPF_SUB
| BPF_X
:
352 emit(A64_SUB(is64
, dst
, dst
, src
), ctx
);
354 case BPF_ALU
| BPF_AND
| BPF_X
:
355 case BPF_ALU64
| BPF_AND
| BPF_X
:
356 emit(A64_AND(is64
, dst
, dst
, src
), ctx
);
358 case BPF_ALU
| BPF_OR
| BPF_X
:
359 case BPF_ALU64
| BPF_OR
| BPF_X
:
360 emit(A64_ORR(is64
, dst
, dst
, src
), ctx
);
362 case BPF_ALU
| BPF_XOR
| BPF_X
:
363 case BPF_ALU64
| BPF_XOR
| BPF_X
:
364 emit(A64_EOR(is64
, dst
, dst
, src
), ctx
);
366 case BPF_ALU
| BPF_MUL
| BPF_X
:
367 case BPF_ALU64
| BPF_MUL
| BPF_X
:
368 emit(A64_MUL(is64
, dst
, dst
, src
), ctx
);
370 case BPF_ALU
| BPF_DIV
| BPF_X
:
371 case BPF_ALU64
| BPF_DIV
| BPF_X
:
372 case BPF_ALU
| BPF_MOD
| BPF_X
:
373 case BPF_ALU64
| BPF_MOD
| BPF_X
:
375 const u8 r0
= bpf2a64
[BPF_REG_0
];
377 /* if (src == 0) return 0 */
378 jmp_offset
= 3; /* skip ahead to else path */
379 check_imm19(jmp_offset
);
380 emit(A64_CBNZ(is64
, src
, jmp_offset
), ctx
);
381 emit(A64_MOVZ(1, r0
, 0, 0), ctx
);
382 jmp_offset
= epilogue_offset(ctx
);
383 check_imm26(jmp_offset
);
384 emit(A64_B(jmp_offset
), ctx
);
386 switch (BPF_OP(code
)) {
388 emit(A64_UDIV(is64
, dst
, dst
, src
), ctx
);
391 emit(A64_UDIV(is64
, tmp
, dst
, src
), ctx
);
392 emit(A64_MUL(is64
, tmp
, tmp
, src
), ctx
);
393 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
398 case BPF_ALU
| BPF_LSH
| BPF_X
:
399 case BPF_ALU64
| BPF_LSH
| BPF_X
:
400 emit(A64_LSLV(is64
, dst
, dst
, src
), ctx
);
402 case BPF_ALU
| BPF_RSH
| BPF_X
:
403 case BPF_ALU64
| BPF_RSH
| BPF_X
:
404 emit(A64_LSRV(is64
, dst
, dst
, src
), ctx
);
406 case BPF_ALU
| BPF_ARSH
| BPF_X
:
407 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
408 emit(A64_ASRV(is64
, dst
, dst
, src
), ctx
);
411 case BPF_ALU
| BPF_NEG
:
412 case BPF_ALU64
| BPF_NEG
:
413 emit(A64_NEG(is64
, dst
, dst
), ctx
);
415 /* dst = BSWAP##imm(dst) */
416 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
417 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
418 #ifdef CONFIG_CPU_BIG_ENDIAN
419 if (BPF_SRC(code
) == BPF_FROM_BE
)
421 #else /* !CONFIG_CPU_BIG_ENDIAN */
422 if (BPF_SRC(code
) == BPF_FROM_LE
)
427 emit(A64_REV16(is64
, dst
, dst
), ctx
);
428 /* zero-extend 16 bits into 64 bits */
429 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
432 emit(A64_REV32(is64
, dst
, dst
), ctx
);
433 /* upper 32 bits already cleared */
436 emit(A64_REV64(dst
, dst
), ctx
);
443 /* zero-extend 16 bits into 64 bits */
444 emit(A64_UXTH(is64
, dst
, dst
), ctx
);
447 /* zero-extend 32 bits into 64 bits */
448 emit(A64_UXTW(is64
, dst
, dst
), ctx
);
456 case BPF_ALU
| BPF_MOV
| BPF_K
:
457 case BPF_ALU64
| BPF_MOV
| BPF_K
:
458 emit_a64_mov_i(is64
, dst
, imm
, ctx
);
460 /* dst = dst OP imm */
461 case BPF_ALU
| BPF_ADD
| BPF_K
:
462 case BPF_ALU64
| BPF_ADD
| BPF_K
:
463 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
464 emit(A64_ADD(is64
, dst
, dst
, tmp
), ctx
);
466 case BPF_ALU
| BPF_SUB
| BPF_K
:
467 case BPF_ALU64
| BPF_SUB
| BPF_K
:
468 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
469 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
471 case BPF_ALU
| BPF_AND
| BPF_K
:
472 case BPF_ALU64
| BPF_AND
| BPF_K
:
473 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
474 emit(A64_AND(is64
, dst
, dst
, tmp
), ctx
);
476 case BPF_ALU
| BPF_OR
| BPF_K
:
477 case BPF_ALU64
| BPF_OR
| BPF_K
:
478 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
479 emit(A64_ORR(is64
, dst
, dst
, tmp
), ctx
);
481 case BPF_ALU
| BPF_XOR
| BPF_K
:
482 case BPF_ALU64
| BPF_XOR
| BPF_K
:
483 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
484 emit(A64_EOR(is64
, dst
, dst
, tmp
), ctx
);
486 case BPF_ALU
| BPF_MUL
| BPF_K
:
487 case BPF_ALU64
| BPF_MUL
| BPF_K
:
488 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
489 emit(A64_MUL(is64
, dst
, dst
, tmp
), ctx
);
491 case BPF_ALU
| BPF_DIV
| BPF_K
:
492 case BPF_ALU64
| BPF_DIV
| BPF_K
:
493 emit_a64_mov_i(is64
, tmp
, imm
, ctx
);
494 emit(A64_UDIV(is64
, dst
, dst
, tmp
), ctx
);
496 case BPF_ALU
| BPF_MOD
| BPF_K
:
497 case BPF_ALU64
| BPF_MOD
| BPF_K
:
498 emit_a64_mov_i(is64
, tmp2
, imm
, ctx
);
499 emit(A64_UDIV(is64
, tmp
, dst
, tmp2
), ctx
);
500 emit(A64_MUL(is64
, tmp
, tmp
, tmp2
), ctx
);
501 emit(A64_SUB(is64
, dst
, dst
, tmp
), ctx
);
503 case BPF_ALU
| BPF_LSH
| BPF_K
:
504 case BPF_ALU64
| BPF_LSH
| BPF_K
:
505 emit(A64_LSL(is64
, dst
, dst
, imm
), ctx
);
507 case BPF_ALU
| BPF_RSH
| BPF_K
:
508 case BPF_ALU64
| BPF_RSH
| BPF_K
:
509 emit(A64_LSR(is64
, dst
, dst
, imm
), ctx
);
511 case BPF_ALU
| BPF_ARSH
| BPF_K
:
512 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
513 emit(A64_ASR(is64
, dst
, dst
, imm
), ctx
);
517 case BPF_JMP
| BPF_JA
:
518 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
519 check_imm26(jmp_offset
);
520 emit(A64_B(jmp_offset
), ctx
);
522 /* IF (dst COND src) JUMP off */
523 case BPF_JMP
| BPF_JEQ
| BPF_X
:
524 case BPF_JMP
| BPF_JGT
| BPF_X
:
525 case BPF_JMP
| BPF_JGE
| BPF_X
:
526 case BPF_JMP
| BPF_JNE
| BPF_X
:
527 case BPF_JMP
| BPF_JSGT
| BPF_X
:
528 case BPF_JMP
| BPF_JSGE
| BPF_X
:
529 emit(A64_CMP(1, dst
, src
), ctx
);
531 jmp_offset
= bpf2a64_offset(i
+ off
, i
, ctx
);
532 check_imm19(jmp_offset
);
533 switch (BPF_OP(code
)) {
535 jmp_cond
= A64_COND_EQ
;
538 jmp_cond
= A64_COND_HI
;
541 jmp_cond
= A64_COND_CS
;
545 jmp_cond
= A64_COND_NE
;
548 jmp_cond
= A64_COND_GT
;
551 jmp_cond
= A64_COND_GE
;
556 emit(A64_B_(jmp_cond
, jmp_offset
), ctx
);
558 case BPF_JMP
| BPF_JSET
| BPF_X
:
559 emit(A64_TST(1, dst
, src
), ctx
);
561 /* IF (dst COND imm) JUMP off */
562 case BPF_JMP
| BPF_JEQ
| BPF_K
:
563 case BPF_JMP
| BPF_JGT
| BPF_K
:
564 case BPF_JMP
| BPF_JGE
| BPF_K
:
565 case BPF_JMP
| BPF_JNE
| BPF_K
:
566 case BPF_JMP
| BPF_JSGT
| BPF_K
:
567 case BPF_JMP
| BPF_JSGE
| BPF_K
:
568 emit_a64_mov_i(1, tmp
, imm
, ctx
);
569 emit(A64_CMP(1, dst
, tmp
), ctx
);
571 case BPF_JMP
| BPF_JSET
| BPF_K
:
572 emit_a64_mov_i(1, tmp
, imm
, ctx
);
573 emit(A64_TST(1, dst
, tmp
), ctx
);
576 case BPF_JMP
| BPF_CALL
:
578 const u8 r0
= bpf2a64
[BPF_REG_0
];
579 const u64 func
= (u64
)__bpf_call_base
+ imm
;
581 emit_a64_mov_i64(tmp
, func
, ctx
);
582 emit(A64_BLR(tmp
), ctx
);
583 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
587 case BPF_JMP
| BPF_CALL
| BPF_X
:
588 if (emit_bpf_tail_call(ctx
))
591 /* function return */
592 case BPF_JMP
| BPF_EXIT
:
593 /* Optimization: when last instruction is EXIT,
594 simply fallthrough to epilogue. */
595 if (i
== ctx
->prog
->len
- 1)
597 jmp_offset
= epilogue_offset(ctx
);
598 check_imm26(jmp_offset
);
599 emit(A64_B(jmp_offset
), ctx
);
603 case BPF_LD
| BPF_IMM
| BPF_DW
:
605 const struct bpf_insn insn1
= insn
[1];
608 if (insn1
.code
!= 0 || insn1
.src_reg
!= 0 ||
609 insn1
.dst_reg
!= 0 || insn1
.off
!= 0) {
610 /* Note: verifier in BPF core must catch invalid
613 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
617 imm64
= (u64
)insn1
.imm
<< 32 | (u32
)imm
;
618 emit_a64_mov_i64(dst
, imm64
, ctx
);
623 /* LDX: dst = *(size *)(src + off) */
624 case BPF_LDX
| BPF_MEM
| BPF_W
:
625 case BPF_LDX
| BPF_MEM
| BPF_H
:
626 case BPF_LDX
| BPF_MEM
| BPF_B
:
627 case BPF_LDX
| BPF_MEM
| BPF_DW
:
628 emit_a64_mov_i(1, tmp
, off
, ctx
);
629 switch (BPF_SIZE(code
)) {
631 emit(A64_LDR32(dst
, src
, tmp
), ctx
);
634 emit(A64_LDRH(dst
, src
, tmp
), ctx
);
637 emit(A64_LDRB(dst
, src
, tmp
), ctx
);
640 emit(A64_LDR64(dst
, src
, tmp
), ctx
);
645 /* ST: *(size *)(dst + off) = imm */
646 case BPF_ST
| BPF_MEM
| BPF_W
:
647 case BPF_ST
| BPF_MEM
| BPF_H
:
648 case BPF_ST
| BPF_MEM
| BPF_B
:
649 case BPF_ST
| BPF_MEM
| BPF_DW
:
650 /* Load imm to a register then store it */
651 emit_a64_mov_i(1, tmp2
, off
, ctx
);
652 emit_a64_mov_i(1, tmp
, imm
, ctx
);
653 switch (BPF_SIZE(code
)) {
655 emit(A64_STR32(tmp
, dst
, tmp2
), ctx
);
658 emit(A64_STRH(tmp
, dst
, tmp2
), ctx
);
661 emit(A64_STRB(tmp
, dst
, tmp2
), ctx
);
664 emit(A64_STR64(tmp
, dst
, tmp2
), ctx
);
669 /* STX: *(size *)(dst + off) = src */
670 case BPF_STX
| BPF_MEM
| BPF_W
:
671 case BPF_STX
| BPF_MEM
| BPF_H
:
672 case BPF_STX
| BPF_MEM
| BPF_B
:
673 case BPF_STX
| BPF_MEM
| BPF_DW
:
674 emit_a64_mov_i(1, tmp
, off
, ctx
);
675 switch (BPF_SIZE(code
)) {
677 emit(A64_STR32(src
, dst
, tmp
), ctx
);
680 emit(A64_STRH(src
, dst
, tmp
), ctx
);
683 emit(A64_STRB(src
, dst
, tmp
), ctx
);
686 emit(A64_STR64(src
, dst
, tmp
), ctx
);
690 /* STX XADD: lock *(u32 *)(dst + off) += src */
691 case BPF_STX
| BPF_XADD
| BPF_W
:
692 /* STX XADD: lock *(u64 *)(dst + off) += src */
693 case BPF_STX
| BPF_XADD
| BPF_DW
:
696 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
697 case BPF_LD
| BPF_ABS
| BPF_W
:
698 case BPF_LD
| BPF_ABS
| BPF_H
:
699 case BPF_LD
| BPF_ABS
| BPF_B
:
700 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
701 case BPF_LD
| BPF_IND
| BPF_W
:
702 case BPF_LD
| BPF_IND
| BPF_H
:
703 case BPF_LD
| BPF_IND
| BPF_B
:
705 const u8 r0
= bpf2a64
[BPF_REG_0
]; /* r0 = return value */
706 const u8 r6
= bpf2a64
[BPF_REG_6
]; /* r6 = pointer to sk_buff */
707 const u8 fp
= bpf2a64
[BPF_REG_FP
];
708 const u8 r1
= bpf2a64
[BPF_REG_1
]; /* r1: struct sk_buff *skb */
709 const u8 r2
= bpf2a64
[BPF_REG_2
]; /* r2: int k */
710 const u8 r3
= bpf2a64
[BPF_REG_3
]; /* r3: unsigned int size */
711 const u8 r4
= bpf2a64
[BPF_REG_4
]; /* r4: void *buffer */
712 const u8 r5
= bpf2a64
[BPF_REG_5
]; /* r5: void *(*func)(...) */
715 emit(A64_MOV(1, r1
, r6
), ctx
);
716 emit_a64_mov_i(0, r2
, imm
, ctx
);
717 if (BPF_MODE(code
) == BPF_IND
)
718 emit(A64_ADD(0, r2
, r2
, src
), ctx
);
719 switch (BPF_SIZE(code
)) {
732 emit_a64_mov_i64(r3
, size
, ctx
);
733 emit(A64_SUB_I(1, r4
, fp
, STACK_SIZE
), ctx
);
734 emit_a64_mov_i64(r5
, (unsigned long)bpf_load_pointer
, ctx
);
735 emit(A64_BLR(r5
), ctx
);
736 emit(A64_MOV(1, r0
, A64_R(0)), ctx
);
738 jmp_offset
= epilogue_offset(ctx
);
739 check_imm19(jmp_offset
);
740 emit(A64_CBZ(1, r0
, jmp_offset
), ctx
);
741 emit(A64_MOV(1, r5
, r0
), ctx
);
742 switch (BPF_SIZE(code
)) {
744 emit(A64_LDR32(r0
, r5
, A64_ZR
), ctx
);
745 #ifndef CONFIG_CPU_BIG_ENDIAN
746 emit(A64_REV32(0, r0
, r0
), ctx
);
750 emit(A64_LDRH(r0
, r5
, A64_ZR
), ctx
);
751 #ifndef CONFIG_CPU_BIG_ENDIAN
752 emit(A64_REV16(0, r0
, r0
), ctx
);
756 emit(A64_LDRB(r0
, r5
, A64_ZR
), ctx
);
762 pr_info_once("*** NOT YET: opcode %02x ***\n", code
);
766 pr_err_once("unknown opcode %02x\n", code
);
773 static int build_body(struct jit_ctx
*ctx
)
775 const struct bpf_prog
*prog
= ctx
->prog
;
778 for (i
= 0; i
< prog
->len
; i
++) {
779 const struct bpf_insn
*insn
= &prog
->insnsi
[i
];
782 ret
= build_insn(insn
, ctx
);
785 if (ctx
->image
== NULL
)
786 ctx
->offset
[i
] = ctx
->idx
;
789 if (ctx
->image
== NULL
)
790 ctx
->offset
[i
] = ctx
->idx
;
798 static int validate_code(struct jit_ctx
*ctx
)
802 for (i
= 0; i
< ctx
->idx
; i
++) {
803 u32 a64_insn
= le32_to_cpu(ctx
->image
[i
]);
805 if (a64_insn
== AARCH64_BREAK_FAULT
)
812 static inline void bpf_flush_icache(void *start
, void *end
)
814 flush_icache_range((unsigned long)start
, (unsigned long)end
);
817 void bpf_jit_compile(struct bpf_prog
*prog
)
819 /* Nothing to do here. We support Internal BPF. */
822 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
824 struct bpf_prog
*tmp
, *orig_prog
= prog
;
825 struct bpf_binary_header
*header
;
826 bool tmp_blinded
= false;
834 tmp
= bpf_jit_blind_constants(prog
);
835 /* If blinding was requested and we failed during blinding,
836 * we must fall back to the interpreter.
845 memset(&ctx
, 0, sizeof(ctx
));
848 ctx
.offset
= kcalloc(prog
->len
, sizeof(int), GFP_KERNEL
);
849 if (ctx
.offset
== NULL
) {
854 /* 1. Initial fake pass to compute ctx->idx. */
856 /* Fake pass to fill in ctx->offset. */
857 if (build_body(&ctx
)) {
862 if (build_prologue(&ctx
)) {
867 ctx
.epilogue_offset
= ctx
.idx
;
868 build_epilogue(&ctx
);
870 /* Now we know the actual image size. */
871 image_size
= sizeof(u32
) * ctx
.idx
;
872 header
= bpf_jit_binary_alloc(image_size
, &image_ptr
,
873 sizeof(u32
), jit_fill_hole
);
874 if (header
== NULL
) {
879 /* 2. Now, the actual pass. */
881 ctx
.image
= (u32
*)image_ptr
;
884 build_prologue(&ctx
);
886 if (build_body(&ctx
)) {
887 bpf_jit_binary_free(header
);
892 build_epilogue(&ctx
);
894 /* 3. Extra pass to validate JITed code. */
895 if (validate_code(&ctx
)) {
896 bpf_jit_binary_free(header
);
901 /* And we're done. */
902 if (bpf_jit_enable
> 1)
903 bpf_jit_dump(prog
->len
, image_size
, 2, ctx
.image
);
905 bpf_flush_icache(header
, ctx
.image
+ ctx
.idx
);
907 set_memory_ro((unsigned long)header
, header
->pages
);
908 prog
->bpf_func
= (void *)ctx
.image
;
915 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
920 void bpf_jit_free(struct bpf_prog
*prog
)
922 unsigned long addr
= (unsigned long)prog
->bpf_func
& PAGE_MASK
;
923 struct bpf_binary_header
*header
= (void *)addr
;
928 set_memory_rw(addr
, header
->pages
);
929 bpf_jit_binary_free(header
);
932 bpf_prog_unlock_free(prog
);