2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 #include <asm/opcodes.h>
24 #include "bpf_jit_32.h"
32 * r6 pointer to the skb
37 #define r_scratch ARM_R0
38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
43 #define r_skb_data ARM_R7
44 #define r_skb_hl ARM_R8
46 #define SCRATCH_SP_OFFSET 0
47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50 #define SEEN_MEM_WORD(k) (1 << (k))
51 #define SEEN_X (1 << BPF_MEMWORDS)
52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
56 #define FLAG_NEED_X_RESET (1 << 0)
57 #define FLAG_IMM_OVERFLOW (1 << 1)
60 const struct bpf_prog
*skf
;
62 unsigned prologue_bytes
;
68 #if __LINUX_ARM_ARCH__ < 7
75 int bpf_jit_enable __read_mostly
;
77 static inline int call_neg_helper(struct sk_buff
*skb
, int offset
, void *ret
,
80 void *ptr
= bpf_internal_load_pointer_neg_helper(skb
, offset
, size
);
84 memcpy(ret
, ptr
, size
);
88 static u64
jit_get_skb_b(struct sk_buff
*skb
, int offset
)
94 err
= call_neg_helper(skb
, offset
, &ret
, 1);
96 err
= skb_copy_bits(skb
, offset
, &ret
, 1);
98 return (u64
)err
<< 32 | ret
;
101 static u64
jit_get_skb_h(struct sk_buff
*skb
, int offset
)
107 err
= call_neg_helper(skb
, offset
, &ret
, 2);
109 err
= skb_copy_bits(skb
, offset
, &ret
, 2);
111 return (u64
)err
<< 32 | ntohs(ret
);
114 static u64
jit_get_skb_w(struct sk_buff
*skb
, int offset
)
120 err
= call_neg_helper(skb
, offset
, &ret
, 4);
122 err
= skb_copy_bits(skb
, offset
, &ret
, 4);
124 return (u64
)err
<< 32 | ntohl(ret
);
128 * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
129 * (where the assembly routines like __aeabi_uidiv could cause problems).
131 static u32
jit_udiv(u32 dividend
, u32 divisor
)
133 return dividend
/ divisor
;
136 static u32
jit_mod(u32 dividend
, u32 divisor
)
138 return dividend
% divisor
;
141 static inline void _emit(int cond
, u32 inst
, struct jit_ctx
*ctx
)
143 inst
|= (cond
<< 28);
144 inst
= __opcode_to_mem_arm(inst
);
146 if (ctx
->target
!= NULL
)
147 ctx
->target
[ctx
->idx
] = inst
;
153 * Emit an instruction that will be executed unconditionally.
155 static inline void emit(u32 inst
, struct jit_ctx
*ctx
)
157 _emit(ARM_COND_AL
, inst
, ctx
);
160 static u16
saved_regs(struct jit_ctx
*ctx
)
164 if ((ctx
->skf
->len
> 1) ||
165 (ctx
->skf
->insns
[0].code
== (BPF_RET
| BPF_A
)))
168 #ifdef CONFIG_FRAME_POINTER
169 ret
|= (1 << ARM_FP
) | (1 << ARM_IP
) | (1 << ARM_LR
) | (1 << ARM_PC
);
171 if (ctx
->seen
& SEEN_CALL
)
174 if (ctx
->seen
& (SEEN_DATA
| SEEN_SKB
))
176 if (ctx
->seen
& SEEN_DATA
)
177 ret
|= (1 << r_skb_data
) | (1 << r_skb_hl
);
178 if (ctx
->seen
& SEEN_X
)
184 static inline int mem_words_used(struct jit_ctx
*ctx
)
186 /* yes, we do waste some stack space IF there are "holes" in the set" */
187 return fls(ctx
->seen
& SEEN_MEM
);
190 static inline bool is_load_to_a(u16 inst
)
193 case BPF_LD
| BPF_W
| BPF_LEN
:
194 case BPF_LD
| BPF_W
| BPF_ABS
:
195 case BPF_LD
| BPF_H
| BPF_ABS
:
196 case BPF_LD
| BPF_B
| BPF_ABS
:
203 static void jit_fill_hole(void *area
, unsigned int size
)
206 /* We are guaranteed to have aligned memory. */
207 for (ptr
= area
; size
>= sizeof(u32
); size
-= sizeof(u32
))
208 *ptr
++ = __opcode_to_mem_arm(ARM_INST_UDF
);
211 static void build_prologue(struct jit_ctx
*ctx
)
213 u16 reg_set
= saved_regs(ctx
);
214 u16 first_inst
= ctx
->skf
->insns
[0].code
;
217 #ifdef CONFIG_FRAME_POINTER
218 emit(ARM_MOV_R(ARM_IP
, ARM_SP
), ctx
);
219 emit(ARM_PUSH(reg_set
), ctx
);
220 emit(ARM_SUB_I(ARM_FP
, ARM_IP
, 4), ctx
);
223 emit(ARM_PUSH(reg_set
), ctx
);
226 if (ctx
->seen
& (SEEN_DATA
| SEEN_SKB
))
227 emit(ARM_MOV_R(r_skb
, ARM_R0
), ctx
);
229 if (ctx
->seen
& SEEN_DATA
) {
230 off
= offsetof(struct sk_buff
, data
);
231 emit(ARM_LDR_I(r_skb_data
, r_skb
, off
), ctx
);
232 /* headlen = len - data_len */
233 off
= offsetof(struct sk_buff
, len
);
234 emit(ARM_LDR_I(r_skb_hl
, r_skb
, off
), ctx
);
235 off
= offsetof(struct sk_buff
, data_len
);
236 emit(ARM_LDR_I(r_scratch
, r_skb
, off
), ctx
);
237 emit(ARM_SUB_R(r_skb_hl
, r_skb_hl
, r_scratch
), ctx
);
240 if (ctx
->flags
& FLAG_NEED_X_RESET
)
241 emit(ARM_MOV_I(r_X
, 0), ctx
);
243 /* do not leak kernel data to userspace */
244 if ((first_inst
!= (BPF_RET
| BPF_K
)) && !(is_load_to_a(first_inst
)))
245 emit(ARM_MOV_I(r_A
, 0), ctx
);
247 /* stack space for the BPF_MEM words */
248 if (ctx
->seen
& SEEN_MEM
)
249 emit(ARM_SUB_I(ARM_SP
, ARM_SP
, mem_words_used(ctx
) * 4), ctx
);
252 static void build_epilogue(struct jit_ctx
*ctx
)
254 u16 reg_set
= saved_regs(ctx
);
256 if (ctx
->seen
& SEEN_MEM
)
257 emit(ARM_ADD_I(ARM_SP
, ARM_SP
, mem_words_used(ctx
) * 4), ctx
);
259 reg_set
&= ~(1 << ARM_LR
);
261 #ifdef CONFIG_FRAME_POINTER
262 /* the first instruction of the prologue was: mov ip, sp */
263 reg_set
&= ~(1 << ARM_IP
);
264 reg_set
|= (1 << ARM_SP
);
265 emit(ARM_LDM(ARM_SP
, reg_set
), ctx
);
268 if (ctx
->seen
& SEEN_CALL
)
269 reg_set
|= 1 << ARM_PC
;
270 emit(ARM_POP(reg_set
), ctx
);
273 if (!(ctx
->seen
& SEEN_CALL
))
274 emit(ARM_BX(ARM_LR
), ctx
);
278 static int16_t imm8m(u32 x
)
282 for (rot
= 0; rot
< 16; rot
++)
283 if ((x
& ~ror32(0xff, 2 * rot
)) == 0)
284 return rol32(x
, 2 * rot
) | (rot
<< 8);
289 #if __LINUX_ARM_ARCH__ < 7
291 static u16
imm_offset(u32 k
, struct jit_ctx
*ctx
)
293 unsigned i
= 0, offset
;
296 /* on the "fake" run we just count them (duplicates included) */
297 if (ctx
->target
== NULL
) {
302 while ((i
< ctx
->imm_count
) && ctx
->imms
[i
]) {
303 if (ctx
->imms
[i
] == k
)
308 if (ctx
->imms
[i
] == 0)
311 /* constants go just after the epilogue */
312 offset
= ctx
->offsets
[ctx
->skf
->len
];
313 offset
+= ctx
->prologue_bytes
;
314 offset
+= ctx
->epilogue_bytes
;
317 ctx
->target
[offset
/ 4] = k
;
319 /* PC in ARM mode == address of the instruction + 8 */
320 imm
= offset
- (8 + ctx
->idx
* 4);
324 * literal pool is too far, signal it into flags. we
325 * can only detect it on the second pass unfortunately.
327 ctx
->flags
|= FLAG_IMM_OVERFLOW
;
334 #endif /* __LINUX_ARM_ARCH__ */
337 * Move an immediate that's not an imm8m to a core register.
339 static inline void emit_mov_i_no8m(int rd
, u32 val
, struct jit_ctx
*ctx
)
341 #if __LINUX_ARM_ARCH__ < 7
342 emit(ARM_LDR_I(rd
, ARM_PC
, imm_offset(val
, ctx
)), ctx
);
344 emit(ARM_MOVW(rd
, val
& 0xffff), ctx
);
346 emit(ARM_MOVT(rd
, val
>> 16), ctx
);
350 static inline void emit_mov_i(int rd
, u32 val
, struct jit_ctx
*ctx
)
352 int imm12
= imm8m(val
);
355 emit(ARM_MOV_I(rd
, imm12
), ctx
);
357 emit_mov_i_no8m(rd
, val
, ctx
);
360 #if __LINUX_ARM_ARCH__ < 6
362 static void emit_load_be32(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
364 _emit(cond
, ARM_LDRB_I(ARM_R3
, r_addr
, 1), ctx
);
365 _emit(cond
, ARM_LDRB_I(ARM_R1
, r_addr
, 0), ctx
);
366 _emit(cond
, ARM_LDRB_I(ARM_R2
, r_addr
, 3), ctx
);
367 _emit(cond
, ARM_LSL_I(ARM_R3
, ARM_R3
, 16), ctx
);
368 _emit(cond
, ARM_LDRB_I(ARM_R0
, r_addr
, 2), ctx
);
369 _emit(cond
, ARM_ORR_S(ARM_R3
, ARM_R3
, ARM_R1
, SRTYPE_LSL
, 24), ctx
);
370 _emit(cond
, ARM_ORR_R(ARM_R3
, ARM_R3
, ARM_R2
), ctx
);
371 _emit(cond
, ARM_ORR_S(r_res
, ARM_R3
, ARM_R0
, SRTYPE_LSL
, 8), ctx
);
374 static void emit_load_be16(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
376 _emit(cond
, ARM_LDRB_I(ARM_R1
, r_addr
, 0), ctx
);
377 _emit(cond
, ARM_LDRB_I(ARM_R2
, r_addr
, 1), ctx
);
378 _emit(cond
, ARM_ORR_S(r_res
, ARM_R2
, ARM_R1
, SRTYPE_LSL
, 8), ctx
);
381 static inline void emit_swap16(u8 r_dst
, u8 r_src
, struct jit_ctx
*ctx
)
383 /* r_dst = (r_src << 8) | (r_src >> 8) */
384 emit(ARM_LSL_I(ARM_R1
, r_src
, 8), ctx
);
385 emit(ARM_ORR_S(r_dst
, ARM_R1
, r_src
, SRTYPE_LSR
, 8), ctx
);
388 * we need to mask out the bits set in r_dst[23:16] due to
389 * the first shift instruction.
391 * note that 0x8ff is the encoded immediate 0x00ff0000.
393 emit(ARM_BIC_I(r_dst
, r_dst
, 0x8ff), ctx
);
398 static void emit_load_be32(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
400 _emit(cond
, ARM_LDR_I(r_res
, r_addr
, 0), ctx
);
401 #ifdef __LITTLE_ENDIAN
402 _emit(cond
, ARM_REV(r_res
, r_res
), ctx
);
406 static void emit_load_be16(u8 cond
, u8 r_res
, u8 r_addr
, struct jit_ctx
*ctx
)
408 _emit(cond
, ARM_LDRH_I(r_res
, r_addr
, 0), ctx
);
409 #ifdef __LITTLE_ENDIAN
410 _emit(cond
, ARM_REV16(r_res
, r_res
), ctx
);
414 static inline void emit_swap16(u8 r_dst __maybe_unused
,
415 u8 r_src __maybe_unused
,
416 struct jit_ctx
*ctx __maybe_unused
)
418 #ifdef __LITTLE_ENDIAN
419 emit(ARM_REV16(r_dst
, r_src
), ctx
);
423 #endif /* __LINUX_ARM_ARCH__ < 6 */
426 /* Compute the immediate value for a PC-relative branch. */
427 static inline u32
b_imm(unsigned tgt
, struct jit_ctx
*ctx
)
431 if (ctx
->target
== NULL
)
434 * BPF allows only forward jumps and the offset of the target is
435 * still the one computed during the first pass.
437 imm
= ctx
->offsets
[tgt
] + ctx
->prologue_bytes
- (ctx
->idx
* 4 + 8);
442 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
444 imm12 = imm8m(imm_val); \
446 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
447 emit(op ## _R((r1), (r2), r_scratch), ctx); \
449 emit(op ## _I((r1), (r2), imm12), ctx); \
453 static inline void emit_err_ret(u8 cond
, struct jit_ctx
*ctx
)
455 if (ctx
->ret0_fp_idx
>= 0) {
456 _emit(cond
, ARM_B(b_imm(ctx
->ret0_fp_idx
, ctx
)), ctx
);
457 /* NOP to keep the size constant between passes */
458 emit(ARM_MOV_R(ARM_R0
, ARM_R0
), ctx
);
460 _emit(cond
, ARM_MOV_I(ARM_R0
, 0), ctx
);
461 _emit(cond
, ARM_B(b_imm(ctx
->skf
->len
, ctx
)), ctx
);
465 static inline void emit_blx_r(u8 tgt_reg
, struct jit_ctx
*ctx
)
467 #if __LINUX_ARM_ARCH__ < 5
468 emit(ARM_MOV_R(ARM_LR
, ARM_PC
), ctx
);
470 if (elf_hwcap
& HWCAP_THUMB
)
471 emit(ARM_BX(tgt_reg
), ctx
);
473 emit(ARM_MOV_R(ARM_PC
, tgt_reg
), ctx
);
475 emit(ARM_BLX_R(tgt_reg
), ctx
);
479 static inline void emit_udivmod(u8 rd
, u8 rm
, u8 rn
, struct jit_ctx
*ctx
,
482 #if __LINUX_ARM_ARCH__ == 7
483 if (elf_hwcap
& HWCAP_IDIVA
) {
484 if (bpf_op
== BPF_DIV
)
485 emit(ARM_UDIV(rd
, rm
, rn
), ctx
);
487 emit(ARM_UDIV(ARM_R3
, rm
, rn
), ctx
);
488 emit(ARM_MLS(rd
, rn
, ARM_R3
, rm
), ctx
);
495 * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
496 * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
497 * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
498 * before using it as a source for ARM_R1.
500 * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
501 * ARM_R5 (r_X) so there is no particular register overlap
505 emit(ARM_MOV_R(ARM_R1
, rn
), ctx
);
507 emit(ARM_MOV_R(ARM_R0
, rm
), ctx
);
509 ctx
->seen
|= SEEN_CALL
;
510 emit_mov_i(ARM_R3
, bpf_op
== BPF_DIV
? (u32
)jit_udiv
: (u32
)jit_mod
,
512 emit_blx_r(ARM_R3
, ctx
);
515 emit(ARM_MOV_R(rd
, ARM_R0
), ctx
);
518 static inline void update_on_xread(struct jit_ctx
*ctx
)
520 if (!(ctx
->seen
& SEEN_X
))
521 ctx
->flags
|= FLAG_NEED_X_RESET
;
526 static int build_body(struct jit_ctx
*ctx
)
528 void *load_func
[] = {jit_get_skb_b
, jit_get_skb_h
, jit_get_skb_w
};
529 const struct bpf_prog
*prog
= ctx
->skf
;
530 const struct sock_filter
*inst
;
531 unsigned i
, load_order
, off
, condt
;
535 for (i
= 0; i
< prog
->len
; i
++) {
538 inst
= &(prog
->insns
[i
]);
539 /* K as an immediate value operand */
541 code
= bpf_anc_helper(inst
);
543 /* compute offsets only in the fake pass */
544 if (ctx
->target
== NULL
)
545 ctx
->offsets
[i
] = ctx
->idx
* 4;
548 case BPF_LD
| BPF_IMM
:
549 emit_mov_i(r_A
, k
, ctx
);
551 case BPF_LD
| BPF_W
| BPF_LEN
:
552 ctx
->seen
|= SEEN_SKB
;
553 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, len
) != 4);
554 emit(ARM_LDR_I(r_A
, r_skb
,
555 offsetof(struct sk_buff
, len
)), ctx
);
557 case BPF_LD
| BPF_MEM
:
559 ctx
->seen
|= SEEN_MEM_WORD(k
);
560 emit(ARM_LDR_I(r_A
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
562 case BPF_LD
| BPF_W
| BPF_ABS
:
565 case BPF_LD
| BPF_H
| BPF_ABS
:
568 case BPF_LD
| BPF_B
| BPF_ABS
:
571 emit_mov_i(r_off
, k
, ctx
);
573 ctx
->seen
|= SEEN_DATA
| SEEN_CALL
;
575 if (load_order
> 0) {
576 emit(ARM_SUB_I(r_scratch
, r_skb_hl
,
577 1 << load_order
), ctx
);
578 emit(ARM_CMP_R(r_scratch
, r_off
), ctx
);
581 emit(ARM_CMP_R(r_skb_hl
, r_off
), ctx
);
586 * test for negative offset, only if we are
587 * currently scheduled to take the fast
588 * path. this will update the flags so that
589 * the slowpath instruction are ignored if the
590 * offset is negative.
592 * for loard_order == 0 the HI condition will
593 * make loads at offset 0 take the slow path too.
595 _emit(condt
, ARM_CMP_I(r_off
, 0), ctx
);
597 _emit(condt
, ARM_ADD_R(r_scratch
, r_off
, r_skb_data
),
601 _emit(condt
, ARM_LDRB_I(r_A
, r_scratch
, 0),
603 else if (load_order
== 1)
604 emit_load_be16(condt
, r_A
, r_scratch
, ctx
);
605 else if (load_order
== 2)
606 emit_load_be32(condt
, r_A
, r_scratch
, ctx
);
608 _emit(condt
, ARM_B(b_imm(i
+ 1, ctx
)), ctx
);
611 emit_mov_i(ARM_R3
, (u32
)load_func
[load_order
], ctx
);
612 emit(ARM_MOV_R(ARM_R0
, r_skb
), ctx
);
613 /* the offset is already in R1 */
614 emit_blx_r(ARM_R3
, ctx
);
615 /* check the result of skb_copy_bits */
616 emit(ARM_CMP_I(ARM_R1
, 0), ctx
);
617 emit_err_ret(ARM_COND_NE
, ctx
);
618 emit(ARM_MOV_R(r_A
, ARM_R0
), ctx
);
620 case BPF_LD
| BPF_W
| BPF_IND
:
623 case BPF_LD
| BPF_H
| BPF_IND
:
626 case BPF_LD
| BPF_B
| BPF_IND
:
629 update_on_xread(ctx
);
630 OP_IMM3(ARM_ADD
, r_off
, r_X
, k
, ctx
);
632 case BPF_LDX
| BPF_IMM
:
634 emit_mov_i(r_X
, k
, ctx
);
636 case BPF_LDX
| BPF_W
| BPF_LEN
:
637 ctx
->seen
|= SEEN_X
| SEEN_SKB
;
638 emit(ARM_LDR_I(r_X
, r_skb
,
639 offsetof(struct sk_buff
, len
)), ctx
);
641 case BPF_LDX
| BPF_MEM
:
642 ctx
->seen
|= SEEN_X
| SEEN_MEM_WORD(k
);
643 emit(ARM_LDR_I(r_X
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
645 case BPF_LDX
| BPF_B
| BPF_MSH
:
646 /* x = ((*(frame + k)) & 0xf) << 2; */
647 ctx
->seen
|= SEEN_X
| SEEN_DATA
| SEEN_CALL
;
648 /* the interpreter should deal with the negative K */
651 /* offset in r1: we might have to take the slow path */
652 emit_mov_i(r_off
, k
, ctx
);
653 emit(ARM_CMP_R(r_skb_hl
, r_off
), ctx
);
655 /* load in r0: common with the slowpath */
656 _emit(ARM_COND_HI
, ARM_LDRB_R(ARM_R0
, r_skb_data
,
659 * emit_mov_i() might generate one or two instructions,
660 * the same holds for emit_blx_r()
662 _emit(ARM_COND_HI
, ARM_B(b_imm(i
+ 1, ctx
) - 2), ctx
);
664 emit(ARM_MOV_R(ARM_R0
, r_skb
), ctx
);
666 emit_mov_i(ARM_R3
, (u32
)jit_get_skb_b
, ctx
);
667 emit_blx_r(ARM_R3
, ctx
);
668 /* check the return value of skb_copy_bits */
669 emit(ARM_CMP_I(ARM_R1
, 0), ctx
);
670 emit_err_ret(ARM_COND_NE
, ctx
);
672 emit(ARM_AND_I(r_X
, ARM_R0
, 0x00f), ctx
);
673 emit(ARM_LSL_I(r_X
, r_X
, 2), ctx
);
676 ctx
->seen
|= SEEN_MEM_WORD(k
);
677 emit(ARM_STR_I(r_A
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
680 update_on_xread(ctx
);
681 ctx
->seen
|= SEEN_MEM_WORD(k
);
682 emit(ARM_STR_I(r_X
, ARM_SP
, SCRATCH_OFF(k
)), ctx
);
684 case BPF_ALU
| BPF_ADD
| BPF_K
:
686 OP_IMM3(ARM_ADD
, r_A
, r_A
, k
, ctx
);
688 case BPF_ALU
| BPF_ADD
| BPF_X
:
689 update_on_xread(ctx
);
690 emit(ARM_ADD_R(r_A
, r_A
, r_X
), ctx
);
692 case BPF_ALU
| BPF_SUB
| BPF_K
:
694 OP_IMM3(ARM_SUB
, r_A
, r_A
, k
, ctx
);
696 case BPF_ALU
| BPF_SUB
| BPF_X
:
697 update_on_xread(ctx
);
698 emit(ARM_SUB_R(r_A
, r_A
, r_X
), ctx
);
700 case BPF_ALU
| BPF_MUL
| BPF_K
:
702 emit_mov_i(r_scratch
, k
, ctx
);
703 emit(ARM_MUL(r_A
, r_A
, r_scratch
), ctx
);
705 case BPF_ALU
| BPF_MUL
| BPF_X
:
706 update_on_xread(ctx
);
707 emit(ARM_MUL(r_A
, r_A
, r_X
), ctx
);
709 case BPF_ALU
| BPF_DIV
| BPF_K
:
712 emit_mov_i(r_scratch
, k
, ctx
);
713 emit_udivmod(r_A
, r_A
, r_scratch
, ctx
, BPF_DIV
);
715 case BPF_ALU
| BPF_DIV
| BPF_X
:
716 update_on_xread(ctx
);
717 emit(ARM_CMP_I(r_X
, 0), ctx
);
718 emit_err_ret(ARM_COND_EQ
, ctx
);
719 emit_udivmod(r_A
, r_A
, r_X
, ctx
, BPF_DIV
);
721 case BPF_ALU
| BPF_MOD
| BPF_K
:
723 emit_mov_i(r_A
, 0, ctx
);
726 emit_mov_i(r_scratch
, k
, ctx
);
727 emit_udivmod(r_A
, r_A
, r_scratch
, ctx
, BPF_MOD
);
729 case BPF_ALU
| BPF_MOD
| BPF_X
:
730 update_on_xread(ctx
);
731 emit(ARM_CMP_I(r_X
, 0), ctx
);
732 emit_err_ret(ARM_COND_EQ
, ctx
);
733 emit_udivmod(r_A
, r_A
, r_X
, ctx
, BPF_MOD
);
735 case BPF_ALU
| BPF_OR
| BPF_K
:
737 OP_IMM3(ARM_ORR
, r_A
, r_A
, k
, ctx
);
739 case BPF_ALU
| BPF_OR
| BPF_X
:
740 update_on_xread(ctx
);
741 emit(ARM_ORR_R(r_A
, r_A
, r_X
), ctx
);
743 case BPF_ALU
| BPF_XOR
| BPF_K
:
745 OP_IMM3(ARM_EOR
, r_A
, r_A
, k
, ctx
);
747 case BPF_ANC
| SKF_AD_ALU_XOR_X
:
748 case BPF_ALU
| BPF_XOR
| BPF_X
:
750 update_on_xread(ctx
);
751 emit(ARM_EOR_R(r_A
, r_A
, r_X
), ctx
);
753 case BPF_ALU
| BPF_AND
| BPF_K
:
755 OP_IMM3(ARM_AND
, r_A
, r_A
, k
, ctx
);
757 case BPF_ALU
| BPF_AND
| BPF_X
:
758 update_on_xread(ctx
);
759 emit(ARM_AND_R(r_A
, r_A
, r_X
), ctx
);
761 case BPF_ALU
| BPF_LSH
| BPF_K
:
762 if (unlikely(k
> 31))
764 emit(ARM_LSL_I(r_A
, r_A
, k
), ctx
);
766 case BPF_ALU
| BPF_LSH
| BPF_X
:
767 update_on_xread(ctx
);
768 emit(ARM_LSL_R(r_A
, r_A
, r_X
), ctx
);
770 case BPF_ALU
| BPF_RSH
| BPF_K
:
771 if (unlikely(k
> 31))
773 emit(ARM_LSR_I(r_A
, r_A
, k
), ctx
);
775 case BPF_ALU
| BPF_RSH
| BPF_X
:
776 update_on_xread(ctx
);
777 emit(ARM_LSR_R(r_A
, r_A
, r_X
), ctx
);
779 case BPF_ALU
| BPF_NEG
:
781 emit(ARM_RSB_I(r_A
, r_A
, 0), ctx
);
783 case BPF_JMP
| BPF_JA
:
785 emit(ARM_B(b_imm(i
+ k
+ 1, ctx
)), ctx
);
787 case BPF_JMP
| BPF_JEQ
| BPF_K
:
788 /* pc += (A == K) ? pc->jt : pc->jf */
791 case BPF_JMP
| BPF_JGT
| BPF_K
:
792 /* pc += (A > K) ? pc->jt : pc->jf */
795 case BPF_JMP
| BPF_JGE
| BPF_K
:
796 /* pc += (A >= K) ? pc->jt : pc->jf */
801 emit_mov_i_no8m(r_scratch
, k
, ctx
);
802 emit(ARM_CMP_R(r_A
, r_scratch
), ctx
);
804 emit(ARM_CMP_I(r_A
, imm12
), ctx
);
808 _emit(condt
, ARM_B(b_imm(i
+ inst
->jt
+ 1,
811 _emit(condt
^ 1, ARM_B(b_imm(i
+ inst
->jf
+ 1,
814 case BPF_JMP
| BPF_JEQ
| BPF_X
:
815 /* pc += (A == X) ? pc->jt : pc->jf */
818 case BPF_JMP
| BPF_JGT
| BPF_X
:
819 /* pc += (A > X) ? pc->jt : pc->jf */
822 case BPF_JMP
| BPF_JGE
| BPF_X
:
823 /* pc += (A >= X) ? pc->jt : pc->jf */
826 update_on_xread(ctx
);
827 emit(ARM_CMP_R(r_A
, r_X
), ctx
);
829 case BPF_JMP
| BPF_JSET
| BPF_K
:
830 /* pc += (A & K) ? pc->jt : pc->jf */
832 /* not set iff all zeroes iff Z==1 iff EQ */
836 emit_mov_i_no8m(r_scratch
, k
, ctx
);
837 emit(ARM_TST_R(r_A
, r_scratch
), ctx
);
839 emit(ARM_TST_I(r_A
, imm12
), ctx
);
842 case BPF_JMP
| BPF_JSET
| BPF_X
:
843 /* pc += (A & X) ? pc->jt : pc->jf */
844 update_on_xread(ctx
);
846 emit(ARM_TST_R(r_A
, r_X
), ctx
);
848 case BPF_RET
| BPF_A
:
849 emit(ARM_MOV_R(ARM_R0
, r_A
), ctx
);
851 case BPF_RET
| BPF_K
:
852 if ((k
== 0) && (ctx
->ret0_fp_idx
< 0))
853 ctx
->ret0_fp_idx
= i
;
854 emit_mov_i(ARM_R0
, k
, ctx
);
856 if (i
!= ctx
->skf
->len
- 1)
857 emit(ARM_B(b_imm(prog
->len
, ctx
)), ctx
);
859 case BPF_MISC
| BPF_TAX
:
862 emit(ARM_MOV_R(r_X
, r_A
), ctx
);
864 case BPF_MISC
| BPF_TXA
:
866 update_on_xread(ctx
);
867 emit(ARM_MOV_R(r_A
, r_X
), ctx
);
869 case BPF_ANC
| SKF_AD_PROTOCOL
:
870 /* A = ntohs(skb->protocol) */
871 ctx
->seen
|= SEEN_SKB
;
872 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
874 off
= offsetof(struct sk_buff
, protocol
);
875 emit(ARM_LDRH_I(r_scratch
, r_skb
, off
), ctx
);
876 emit_swap16(r_A
, r_scratch
, ctx
);
878 case BPF_ANC
| SKF_AD_CPU
:
879 /* r_scratch = current_thread_info() */
880 OP_IMM3(ARM_BIC
, r_scratch
, ARM_SP
, THREAD_SIZE
- 1, ctx
);
881 /* A = current_thread_info()->cpu */
882 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info
, cpu
) != 4);
883 off
= offsetof(struct thread_info
, cpu
);
884 emit(ARM_LDR_I(r_A
, r_scratch
, off
), ctx
);
886 case BPF_ANC
| SKF_AD_IFINDEX
:
887 case BPF_ANC
| SKF_AD_HATYPE
:
888 /* A = skb->dev->ifindex */
889 /* A = skb->dev->type */
890 ctx
->seen
|= SEEN_SKB
;
891 off
= offsetof(struct sk_buff
, dev
);
892 emit(ARM_LDR_I(r_scratch
, r_skb
, off
), ctx
);
894 emit(ARM_CMP_I(r_scratch
, 0), ctx
);
895 emit_err_ret(ARM_COND_EQ
, ctx
);
897 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
899 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
902 if (code
== (BPF_ANC
| SKF_AD_IFINDEX
)) {
903 off
= offsetof(struct net_device
, ifindex
);
904 emit(ARM_LDR_I(r_A
, r_scratch
, off
), ctx
);
907 * offset of field "type" in "struct
908 * net_device" is above what can be
909 * used in the ldrh rd, [rn, #imm]
910 * instruction, so load the offset in
911 * a register and use ldrh rd, [rn, rm]
913 off
= offsetof(struct net_device
, type
);
914 emit_mov_i(ARM_R3
, off
, ctx
);
915 emit(ARM_LDRH_R(r_A
, r_scratch
, ARM_R3
), ctx
);
918 case BPF_ANC
| SKF_AD_MARK
:
919 ctx
->seen
|= SEEN_SKB
;
920 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
921 off
= offsetof(struct sk_buff
, mark
);
922 emit(ARM_LDR_I(r_A
, r_skb
, off
), ctx
);
924 case BPF_ANC
| SKF_AD_RXHASH
:
925 ctx
->seen
|= SEEN_SKB
;
926 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
927 off
= offsetof(struct sk_buff
, hash
);
928 emit(ARM_LDR_I(r_A
, r_skb
, off
), ctx
);
930 case BPF_ANC
| SKF_AD_VLAN_TAG
:
931 case BPF_ANC
| SKF_AD_VLAN_TAG_PRESENT
:
932 ctx
->seen
|= SEEN_SKB
;
933 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
934 off
= offsetof(struct sk_buff
, vlan_tci
);
935 emit(ARM_LDRH_I(r_A
, r_skb
, off
), ctx
);
936 if (code
== (BPF_ANC
| SKF_AD_VLAN_TAG
))
937 OP_IMM3(ARM_AND
, r_A
, r_A
, ~VLAN_TAG_PRESENT
, ctx
);
939 OP_IMM3(ARM_LSR
, r_A
, r_A
, 12, ctx
);
940 OP_IMM3(ARM_AND
, r_A
, r_A
, 0x1, ctx
);
943 case BPF_ANC
| SKF_AD_PKTTYPE
:
944 ctx
->seen
|= SEEN_SKB
;
945 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
946 __pkt_type_offset
[0]) != 1);
947 off
= PKT_TYPE_OFFSET();
948 emit(ARM_LDRB_I(r_A
, r_skb
, off
), ctx
);
949 emit(ARM_AND_I(r_A
, r_A
, PKT_TYPE_MAX
), ctx
);
950 #ifdef __BIG_ENDIAN_BITFIELD
951 emit(ARM_LSR_I(r_A
, r_A
, 5), ctx
);
954 case BPF_ANC
| SKF_AD_QUEUE
:
955 ctx
->seen
|= SEEN_SKB
;
956 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
957 queue_mapping
) != 2);
958 BUILD_BUG_ON(offsetof(struct sk_buff
,
959 queue_mapping
) > 0xff);
960 off
= offsetof(struct sk_buff
, queue_mapping
);
961 emit(ARM_LDRH_I(r_A
, r_skb
, off
), ctx
);
963 case BPF_ANC
| SKF_AD_PAY_OFFSET
:
964 ctx
->seen
|= SEEN_SKB
| SEEN_CALL
;
966 emit(ARM_MOV_R(ARM_R0
, r_skb
), ctx
);
967 emit_mov_i(ARM_R3
, (unsigned int)skb_get_poff
, ctx
);
968 emit_blx_r(ARM_R3
, ctx
);
969 emit(ARM_MOV_R(r_A
, ARM_R0
), ctx
);
971 case BPF_LDX
| BPF_W
| BPF_ABS
:
973 * load a 32bit word from struct seccomp_data.
974 * seccomp_check_filter() will already have checked
975 * that k is 32bit aligned and lies within the
976 * struct seccomp_data.
978 ctx
->seen
|= SEEN_SKB
;
979 emit(ARM_LDR_I(r_A
, r_skb
, k
), ctx
);
985 if (ctx
->flags
& FLAG_IMM_OVERFLOW
)
987 * this instruction generated an overflow when
988 * trying to access the literal pool, so
989 * delegate this filter to the kernel interpreter.
994 /* compute offsets only during the first pass */
995 if (ctx
->target
== NULL
)
996 ctx
->offsets
[i
] = ctx
->idx
* 4;
1002 void bpf_jit_compile(struct bpf_prog
*fp
)
1004 struct bpf_binary_header
*header
;
1007 unsigned alloc_size
;
1010 if (!bpf_jit_enable
)
1013 memset(&ctx
, 0, sizeof(ctx
));
1015 ctx
.ret0_fp_idx
= -1;
1017 ctx
.offsets
= kzalloc(4 * (ctx
.skf
->len
+ 1), GFP_KERNEL
);
1018 if (ctx
.offsets
== NULL
)
1021 /* fake pass to fill in the ctx->seen */
1022 if (unlikely(build_body(&ctx
)))
1026 build_prologue(&ctx
);
1027 ctx
.prologue_bytes
= (ctx
.idx
- tmp_idx
) * 4;
1029 #if __LINUX_ARM_ARCH__ < 7
1031 build_epilogue(&ctx
);
1032 ctx
.epilogue_bytes
= (ctx
.idx
- tmp_idx
) * 4;
1034 ctx
.idx
+= ctx
.imm_count
;
1035 if (ctx
.imm_count
) {
1036 ctx
.imms
= kzalloc(4 * ctx
.imm_count
, GFP_KERNEL
);
1037 if (ctx
.imms
== NULL
)
1041 /* there's nothing after the epilogue on ARMv7 */
1042 build_epilogue(&ctx
);
1044 alloc_size
= 4 * ctx
.idx
;
1045 header
= bpf_jit_binary_alloc(alloc_size
, &target_ptr
,
1050 ctx
.target
= (u32
*) target_ptr
;
1053 build_prologue(&ctx
);
1054 if (build_body(&ctx
) < 0) {
1055 #if __LINUX_ARM_ARCH__ < 7
1059 bpf_jit_binary_free(header
);
1062 build_epilogue(&ctx
);
1064 flush_icache_range((u32
)header
, (u32
)(ctx
.target
+ ctx
.idx
));
1066 #if __LINUX_ARM_ARCH__ < 7
1071 if (bpf_jit_enable
> 1)
1072 /* there are 2 passes here */
1073 bpf_jit_dump(fp
->len
, alloc_size
, 2, ctx
.target
);
1075 set_memory_ro((unsigned long)header
, header
->pages
);
1076 fp
->bpf_func
= (void *)ctx
.target
;
1083 void bpf_jit_free(struct bpf_prog
*fp
)
1085 unsigned long addr
= (unsigned long)fp
->bpf_func
& PAGE_MASK
;
1086 struct bpf_binary_header
*header
= (void *)addr
;
1091 set_memory_rw(addr
, header
->pages
);
1092 bpf_jit_binary_free(header
);
1095 bpf_prog_unlock_free(fp
);