x86, efi: Set runtime_version to the EFI spec revision
[linux/fpc-iii.git] / arch / arm / net / bpf_jit_32.c
bloba34f1e21411672feea380b14471652edcf459cfc
1 /*
2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
9 */
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/moduleloader.h>
16 #include <linux/netdevice.h>
17 #include <linux/string.h>
18 #include <linux/slab.h>
19 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
23 #include "bpf_jit_32.h"
26 * ABI:
28 * r0 scratch register
29 * r4 BPF register A
30 * r5 BPF register X
31 * r6 pointer to the skb
32 * r7 skb->data
33 * r8 skb_headlen(skb)
36 #define r_scratch ARM_R0
37 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
38 #define r_off ARM_R1
39 #define r_A ARM_R4
40 #define r_X ARM_R5
41 #define r_skb ARM_R6
42 #define r_skb_data ARM_R7
43 #define r_skb_hl ARM_R8
45 #define SCRATCH_SP_OFFSET 0
46 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
48 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
49 #define SEEN_MEM_WORD(k) (1 << (k))
50 #define SEEN_X (1 << BPF_MEMWORDS)
51 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
52 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
53 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55 #define FLAG_NEED_X_RESET (1 << 0)
57 struct jit_ctx {
58 const struct sk_filter *skf;
59 unsigned idx;
60 unsigned prologue_bytes;
61 int ret0_fp_idx;
62 u32 seen;
63 u32 flags;
64 u32 *offsets;
65 u32 *target;
66 #if __LINUX_ARM_ARCH__ < 7
67 u16 epilogue_bytes;
68 u16 imm_count;
69 u32 *imms;
70 #endif
73 int bpf_jit_enable __read_mostly;
75 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
77 u8 ret;
78 int err;
80 err = skb_copy_bits(skb, offset, &ret, 1);
82 return (u64)err << 32 | ret;
85 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
87 u16 ret;
88 int err;
90 err = skb_copy_bits(skb, offset, &ret, 2);
92 return (u64)err << 32 | ntohs(ret);
95 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
97 u32 ret;
98 int err;
100 err = skb_copy_bits(skb, offset, &ret, 4);
102 return (u64)err << 32 | ntohl(ret);
106 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
107 * (where the assembly routines like __aeabi_uidiv could cause problems).
109 static u32 jit_udiv(u32 dividend, u32 divisor)
111 return dividend / divisor;
114 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
116 if (ctx->target != NULL)
117 ctx->target[ctx->idx] = inst | (cond << 28);
119 ctx->idx++;
123 * Emit an instruction that will be executed unconditionally.
125 static inline void emit(u32 inst, struct jit_ctx *ctx)
127 _emit(ARM_COND_AL, inst, ctx);
130 static u16 saved_regs(struct jit_ctx *ctx)
132 u16 ret = 0;
134 if ((ctx->skf->len > 1) ||
135 (ctx->skf->insns[0].code == BPF_S_RET_A))
136 ret |= 1 << r_A;
138 #ifdef CONFIG_FRAME_POINTER
139 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
140 #else
141 if (ctx->seen & SEEN_CALL)
142 ret |= 1 << ARM_LR;
143 #endif
144 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
145 ret |= 1 << r_skb;
146 if (ctx->seen & SEEN_DATA)
147 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
148 if (ctx->seen & SEEN_X)
149 ret |= 1 << r_X;
151 return ret;
154 static inline int mem_words_used(struct jit_ctx *ctx)
156 /* yes, we do waste some stack space IF there are "holes" in the set" */
157 return fls(ctx->seen & SEEN_MEM);
160 static inline bool is_load_to_a(u16 inst)
162 switch (inst) {
163 case BPF_S_LD_W_LEN:
164 case BPF_S_LD_W_ABS:
165 case BPF_S_LD_H_ABS:
166 case BPF_S_LD_B_ABS:
167 case BPF_S_ANC_CPU:
168 case BPF_S_ANC_IFINDEX:
169 case BPF_S_ANC_MARK:
170 case BPF_S_ANC_PROTOCOL:
171 case BPF_S_ANC_RXHASH:
172 case BPF_S_ANC_VLAN_TAG:
173 case BPF_S_ANC_VLAN_TAG_PRESENT:
174 case BPF_S_ANC_QUEUE:
175 return true;
176 default:
177 return false;
181 static void build_prologue(struct jit_ctx *ctx)
183 u16 reg_set = saved_regs(ctx);
184 u16 first_inst = ctx->skf->insns[0].code;
185 u16 off;
187 #ifdef CONFIG_FRAME_POINTER
188 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
189 emit(ARM_PUSH(reg_set), ctx);
190 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
191 #else
192 if (reg_set)
193 emit(ARM_PUSH(reg_set), ctx);
194 #endif
196 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
197 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
199 if (ctx->seen & SEEN_DATA) {
200 off = offsetof(struct sk_buff, data);
201 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
202 /* headlen = len - data_len */
203 off = offsetof(struct sk_buff, len);
204 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
205 off = offsetof(struct sk_buff, data_len);
206 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
207 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
210 if (ctx->flags & FLAG_NEED_X_RESET)
211 emit(ARM_MOV_I(r_X, 0), ctx);
213 /* do not leak kernel data to userspace */
214 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
215 emit(ARM_MOV_I(r_A, 0), ctx);
217 /* stack space for the BPF_MEM words */
218 if (ctx->seen & SEEN_MEM)
219 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
222 static void build_epilogue(struct jit_ctx *ctx)
224 u16 reg_set = saved_regs(ctx);
226 if (ctx->seen & SEEN_MEM)
227 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
229 reg_set &= ~(1 << ARM_LR);
231 #ifdef CONFIG_FRAME_POINTER
232 /* the first instruction of the prologue was: mov ip, sp */
233 reg_set &= ~(1 << ARM_IP);
234 reg_set |= (1 << ARM_SP);
235 emit(ARM_LDM(ARM_SP, reg_set), ctx);
236 #else
237 if (reg_set) {
238 if (ctx->seen & SEEN_CALL)
239 reg_set |= 1 << ARM_PC;
240 emit(ARM_POP(reg_set), ctx);
243 if (!(ctx->seen & SEEN_CALL))
244 emit(ARM_BX(ARM_LR), ctx);
245 #endif
248 static int16_t imm8m(u32 x)
250 u32 rot;
252 for (rot = 0; rot < 16; rot++)
253 if ((x & ~ror32(0xff, 2 * rot)) == 0)
254 return rol32(x, 2 * rot) | (rot << 8);
256 return -1;
259 #if __LINUX_ARM_ARCH__ < 7
261 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
263 unsigned i = 0, offset;
264 u16 imm;
266 /* on the "fake" run we just count them (duplicates included) */
267 if (ctx->target == NULL) {
268 ctx->imm_count++;
269 return 0;
272 while ((i < ctx->imm_count) && ctx->imms[i]) {
273 if (ctx->imms[i] == k)
274 break;
275 i++;
278 if (ctx->imms[i] == 0)
279 ctx->imms[i] = k;
281 /* constants go just after the epilogue */
282 offset = ctx->offsets[ctx->skf->len];
283 offset += ctx->prologue_bytes;
284 offset += ctx->epilogue_bytes;
285 offset += i * 4;
287 ctx->target[offset / 4] = k;
289 /* PC in ARM mode == address of the instruction + 8 */
290 imm = offset - (8 + ctx->idx * 4);
292 return imm;
295 #endif /* __LINUX_ARM_ARCH__ */
298 * Move an immediate that's not an imm8m to a core register.
300 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
302 #if __LINUX_ARM_ARCH__ < 7
303 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
304 #else
305 emit(ARM_MOVW(rd, val & 0xffff), ctx);
306 if (val > 0xffff)
307 emit(ARM_MOVT(rd, val >> 16), ctx);
308 #endif
311 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
313 int imm12 = imm8m(val);
315 if (imm12 >= 0)
316 emit(ARM_MOV_I(rd, imm12), ctx);
317 else
318 emit_mov_i_no8m(rd, val, ctx);
321 #if __LINUX_ARM_ARCH__ < 6
323 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
325 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
326 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
327 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
328 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
329 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
330 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
331 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
332 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
335 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
337 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
338 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
339 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
342 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
344 emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
345 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
346 emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
347 emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
350 #else /* ARMv6+ */
352 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
354 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
355 #ifdef __LITTLE_ENDIAN
356 _emit(cond, ARM_REV(r_res, r_res), ctx);
357 #endif
360 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
362 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
363 #ifdef __LITTLE_ENDIAN
364 _emit(cond, ARM_REV16(r_res, r_res), ctx);
365 #endif
368 static inline void emit_swap16(u8 r_dst __maybe_unused,
369 u8 r_src __maybe_unused,
370 struct jit_ctx *ctx __maybe_unused)
372 #ifdef __LITTLE_ENDIAN
373 emit(ARM_REV16(r_dst, r_src), ctx);
374 #endif
377 #endif /* __LINUX_ARM_ARCH__ < 6 */
380 /* Compute the immediate value for a PC-relative branch. */
381 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
383 u32 imm;
385 if (ctx->target == NULL)
386 return 0;
388 * BPF allows only forward jumps and the offset of the target is
389 * still the one computed during the first pass.
391 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
393 return imm >> 2;
396 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
397 do { \
398 imm12 = imm8m(imm_val); \
399 if (imm12 < 0) { \
400 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
401 emit(op ## _R((r1), (r2), r_scratch), ctx); \
402 } else { \
403 emit(op ## _I((r1), (r2), imm12), ctx); \
405 } while (0)
407 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
409 if (ctx->ret0_fp_idx >= 0) {
410 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
411 /* NOP to keep the size constant between passes */
412 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
413 } else {
414 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
415 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
419 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
421 #if __LINUX_ARM_ARCH__ < 5
422 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
424 if (elf_hwcap & HWCAP_THUMB)
425 emit(ARM_BX(tgt_reg), ctx);
426 else
427 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
428 #else
429 emit(ARM_BLX_R(tgt_reg), ctx);
430 #endif
433 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
435 #if __LINUX_ARM_ARCH__ == 7
436 if (elf_hwcap & HWCAP_IDIVA) {
437 emit(ARM_UDIV(rd, rm, rn), ctx);
438 return;
440 #endif
441 if (rm != ARM_R0)
442 emit(ARM_MOV_R(ARM_R0, rm), ctx);
443 if (rn != ARM_R1)
444 emit(ARM_MOV_R(ARM_R1, rn), ctx);
446 ctx->seen |= SEEN_CALL;
447 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
448 emit_blx_r(ARM_R3, ctx);
450 if (rd != ARM_R0)
451 emit(ARM_MOV_R(rd, ARM_R0), ctx);
454 static inline void update_on_xread(struct jit_ctx *ctx)
456 if (!(ctx->seen & SEEN_X))
457 ctx->flags |= FLAG_NEED_X_RESET;
459 ctx->seen |= SEEN_X;
462 static int build_body(struct jit_ctx *ctx)
464 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
465 const struct sk_filter *prog = ctx->skf;
466 const struct sock_filter *inst;
467 unsigned i, load_order, off, condt;
468 int imm12;
469 u32 k;
471 for (i = 0; i < prog->len; i++) {
472 inst = &(prog->insns[i]);
473 /* K as an immediate value operand */
474 k = inst->k;
476 /* compute offsets only in the fake pass */
477 if (ctx->target == NULL)
478 ctx->offsets[i] = ctx->idx * 4;
480 switch (inst->code) {
481 case BPF_S_LD_IMM:
482 emit_mov_i(r_A, k, ctx);
483 break;
484 case BPF_S_LD_W_LEN:
485 ctx->seen |= SEEN_SKB;
486 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
487 emit(ARM_LDR_I(r_A, r_skb,
488 offsetof(struct sk_buff, len)), ctx);
489 break;
490 case BPF_S_LD_MEM:
491 /* A = scratch[k] */
492 ctx->seen |= SEEN_MEM_WORD(k);
493 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
494 break;
495 case BPF_S_LD_W_ABS:
496 load_order = 2;
497 goto load;
498 case BPF_S_LD_H_ABS:
499 load_order = 1;
500 goto load;
501 case BPF_S_LD_B_ABS:
502 load_order = 0;
503 load:
504 /* the interpreter will deal with the negative K */
505 if ((int)k < 0)
506 return -ENOTSUPP;
507 emit_mov_i(r_off, k, ctx);
508 load_common:
509 ctx->seen |= SEEN_DATA | SEEN_CALL;
511 if (load_order > 0) {
512 emit(ARM_SUB_I(r_scratch, r_skb_hl,
513 1 << load_order), ctx);
514 emit(ARM_CMP_R(r_scratch, r_off), ctx);
515 condt = ARM_COND_HS;
516 } else {
517 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
518 condt = ARM_COND_HI;
521 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
522 ctx);
524 if (load_order == 0)
525 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
526 ctx);
527 else if (load_order == 1)
528 emit_load_be16(condt, r_A, r_scratch, ctx);
529 else if (load_order == 2)
530 emit_load_be32(condt, r_A, r_scratch, ctx);
532 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
534 /* the slowpath */
535 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
536 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
537 /* the offset is already in R1 */
538 emit_blx_r(ARM_R3, ctx);
539 /* check the result of skb_copy_bits */
540 emit(ARM_CMP_I(ARM_R1, 0), ctx);
541 emit_err_ret(ARM_COND_NE, ctx);
542 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
543 break;
544 case BPF_S_LD_W_IND:
545 load_order = 2;
546 goto load_ind;
547 case BPF_S_LD_H_IND:
548 load_order = 1;
549 goto load_ind;
550 case BPF_S_LD_B_IND:
551 load_order = 0;
552 load_ind:
553 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
554 goto load_common;
555 case BPF_S_LDX_IMM:
556 ctx->seen |= SEEN_X;
557 emit_mov_i(r_X, k, ctx);
558 break;
559 case BPF_S_LDX_W_LEN:
560 ctx->seen |= SEEN_X | SEEN_SKB;
561 emit(ARM_LDR_I(r_X, r_skb,
562 offsetof(struct sk_buff, len)), ctx);
563 break;
564 case BPF_S_LDX_MEM:
565 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
566 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
567 break;
568 case BPF_S_LDX_B_MSH:
569 /* x = ((*(frame + k)) & 0xf) << 2; */
570 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
571 /* the interpreter should deal with the negative K */
572 if (k < 0)
573 return -1;
574 /* offset in r1: we might have to take the slow path */
575 emit_mov_i(r_off, k, ctx);
576 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
578 /* load in r0: common with the slowpath */
579 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
580 ARM_R1), ctx);
582 * emit_mov_i() might generate one or two instructions,
583 * the same holds for emit_blx_r()
585 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
587 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
588 /* r_off is r1 */
589 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
590 emit_blx_r(ARM_R3, ctx);
591 /* check the return value of skb_copy_bits */
592 emit(ARM_CMP_I(ARM_R1, 0), ctx);
593 emit_err_ret(ARM_COND_NE, ctx);
595 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
596 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
597 break;
598 case BPF_S_ST:
599 ctx->seen |= SEEN_MEM_WORD(k);
600 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
601 break;
602 case BPF_S_STX:
603 update_on_xread(ctx);
604 ctx->seen |= SEEN_MEM_WORD(k);
605 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
606 break;
607 case BPF_S_ALU_ADD_K:
608 /* A += K */
609 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
610 break;
611 case BPF_S_ALU_ADD_X:
612 update_on_xread(ctx);
613 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
614 break;
615 case BPF_S_ALU_SUB_K:
616 /* A -= K */
617 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
618 break;
619 case BPF_S_ALU_SUB_X:
620 update_on_xread(ctx);
621 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
622 break;
623 case BPF_S_ALU_MUL_K:
624 /* A *= K */
625 emit_mov_i(r_scratch, k, ctx);
626 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
627 break;
628 case BPF_S_ALU_MUL_X:
629 update_on_xread(ctx);
630 emit(ARM_MUL(r_A, r_A, r_X), ctx);
631 break;
632 case BPF_S_ALU_DIV_K:
633 /* current k == reciprocal_value(userspace k) */
634 emit_mov_i(r_scratch, k, ctx);
635 /* A = top 32 bits of the product */
636 emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
637 break;
638 case BPF_S_ALU_DIV_X:
639 update_on_xread(ctx);
640 emit(ARM_CMP_I(r_X, 0), ctx);
641 emit_err_ret(ARM_COND_EQ, ctx);
642 emit_udiv(r_A, r_A, r_X, ctx);
643 break;
644 case BPF_S_ALU_OR_K:
645 /* A |= K */
646 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
647 break;
648 case BPF_S_ALU_OR_X:
649 update_on_xread(ctx);
650 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
651 break;
652 case BPF_S_ALU_XOR_K:
653 /* A ^= K; */
654 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
655 break;
656 case BPF_S_ANC_ALU_XOR_X:
657 case BPF_S_ALU_XOR_X:
658 /* A ^= X */
659 update_on_xread(ctx);
660 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
661 break;
662 case BPF_S_ALU_AND_K:
663 /* A &= K */
664 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
665 break;
666 case BPF_S_ALU_AND_X:
667 update_on_xread(ctx);
668 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
669 break;
670 case BPF_S_ALU_LSH_K:
671 if (unlikely(k > 31))
672 return -1;
673 emit(ARM_LSL_I(r_A, r_A, k), ctx);
674 break;
675 case BPF_S_ALU_LSH_X:
676 update_on_xread(ctx);
677 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
678 break;
679 case BPF_S_ALU_RSH_K:
680 if (unlikely(k > 31))
681 return -1;
682 emit(ARM_LSR_I(r_A, r_A, k), ctx);
683 break;
684 case BPF_S_ALU_RSH_X:
685 update_on_xread(ctx);
686 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
687 break;
688 case BPF_S_ALU_NEG:
689 /* A = -A */
690 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
691 break;
692 case BPF_S_JMP_JA:
693 /* pc += K */
694 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
695 break;
696 case BPF_S_JMP_JEQ_K:
697 /* pc += (A == K) ? pc->jt : pc->jf */
698 condt = ARM_COND_EQ;
699 goto cmp_imm;
700 case BPF_S_JMP_JGT_K:
701 /* pc += (A > K) ? pc->jt : pc->jf */
702 condt = ARM_COND_HI;
703 goto cmp_imm;
704 case BPF_S_JMP_JGE_K:
705 /* pc += (A >= K) ? pc->jt : pc->jf */
706 condt = ARM_COND_HS;
707 cmp_imm:
708 imm12 = imm8m(k);
709 if (imm12 < 0) {
710 emit_mov_i_no8m(r_scratch, k, ctx);
711 emit(ARM_CMP_R(r_A, r_scratch), ctx);
712 } else {
713 emit(ARM_CMP_I(r_A, imm12), ctx);
715 cond_jump:
716 if (inst->jt)
717 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
718 ctx)), ctx);
719 if (inst->jf)
720 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
721 ctx)), ctx);
722 break;
723 case BPF_S_JMP_JEQ_X:
724 /* pc += (A == X) ? pc->jt : pc->jf */
725 condt = ARM_COND_EQ;
726 goto cmp_x;
727 case BPF_S_JMP_JGT_X:
728 /* pc += (A > X) ? pc->jt : pc->jf */
729 condt = ARM_COND_HI;
730 goto cmp_x;
731 case BPF_S_JMP_JGE_X:
732 /* pc += (A >= X) ? pc->jt : pc->jf */
733 condt = ARM_COND_CS;
734 cmp_x:
735 update_on_xread(ctx);
736 emit(ARM_CMP_R(r_A, r_X), ctx);
737 goto cond_jump;
738 case BPF_S_JMP_JSET_K:
739 /* pc += (A & K) ? pc->jt : pc->jf */
740 condt = ARM_COND_NE;
741 /* not set iff all zeroes iff Z==1 iff EQ */
743 imm12 = imm8m(k);
744 if (imm12 < 0) {
745 emit_mov_i_no8m(r_scratch, k, ctx);
746 emit(ARM_TST_R(r_A, r_scratch), ctx);
747 } else {
748 emit(ARM_TST_I(r_A, imm12), ctx);
750 goto cond_jump;
751 case BPF_S_JMP_JSET_X:
752 /* pc += (A & X) ? pc->jt : pc->jf */
753 update_on_xread(ctx);
754 condt = ARM_COND_NE;
755 emit(ARM_TST_R(r_A, r_X), ctx);
756 goto cond_jump;
757 case BPF_S_RET_A:
758 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
759 goto b_epilogue;
760 case BPF_S_RET_K:
761 if ((k == 0) && (ctx->ret0_fp_idx < 0))
762 ctx->ret0_fp_idx = i;
763 emit_mov_i(ARM_R0, k, ctx);
764 b_epilogue:
765 if (i != ctx->skf->len - 1)
766 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
767 break;
768 case BPF_S_MISC_TAX:
769 /* X = A */
770 ctx->seen |= SEEN_X;
771 emit(ARM_MOV_R(r_X, r_A), ctx);
772 break;
773 case BPF_S_MISC_TXA:
774 /* A = X */
775 update_on_xread(ctx);
776 emit(ARM_MOV_R(r_A, r_X), ctx);
777 break;
778 case BPF_S_ANC_PROTOCOL:
779 /* A = ntohs(skb->protocol) */
780 ctx->seen |= SEEN_SKB;
781 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
782 protocol) != 2);
783 off = offsetof(struct sk_buff, protocol);
784 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
785 emit_swap16(r_A, r_scratch, ctx);
786 break;
787 case BPF_S_ANC_CPU:
788 /* r_scratch = current_thread_info() */
789 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
790 /* A = current_thread_info()->cpu */
791 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
792 off = offsetof(struct thread_info, cpu);
793 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
794 break;
795 case BPF_S_ANC_IFINDEX:
796 /* A = skb->dev->ifindex */
797 ctx->seen |= SEEN_SKB;
798 off = offsetof(struct sk_buff, dev);
799 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
801 emit(ARM_CMP_I(r_scratch, 0), ctx);
802 emit_err_ret(ARM_COND_EQ, ctx);
804 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
805 ifindex) != 4);
806 off = offsetof(struct net_device, ifindex);
807 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
808 break;
809 case BPF_S_ANC_MARK:
810 ctx->seen |= SEEN_SKB;
811 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
812 off = offsetof(struct sk_buff, mark);
813 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
814 break;
815 case BPF_S_ANC_RXHASH:
816 ctx->seen |= SEEN_SKB;
817 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
818 off = offsetof(struct sk_buff, rxhash);
819 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
820 break;
821 case BPF_S_ANC_VLAN_TAG:
822 case BPF_S_ANC_VLAN_TAG_PRESENT:
823 ctx->seen |= SEEN_SKB;
824 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
825 off = offsetof(struct sk_buff, vlan_tci);
826 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
827 if (inst->code == BPF_S_ANC_VLAN_TAG)
828 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
829 else
830 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
831 break;
832 case BPF_S_ANC_QUEUE:
833 ctx->seen |= SEEN_SKB;
834 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
835 queue_mapping) != 2);
836 BUILD_BUG_ON(offsetof(struct sk_buff,
837 queue_mapping) > 0xff);
838 off = offsetof(struct sk_buff, queue_mapping);
839 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
840 break;
841 default:
842 return -1;
846 /* compute offsets only during the first pass */
847 if (ctx->target == NULL)
848 ctx->offsets[i] = ctx->idx * 4;
850 return 0;
854 void bpf_jit_compile(struct sk_filter *fp)
856 struct jit_ctx ctx;
857 unsigned tmp_idx;
858 unsigned alloc_size;
860 if (!bpf_jit_enable)
861 return;
863 memset(&ctx, 0, sizeof(ctx));
864 ctx.skf = fp;
865 ctx.ret0_fp_idx = -1;
867 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
868 if (ctx.offsets == NULL)
869 return;
871 /* fake pass to fill in the ctx->seen */
872 if (unlikely(build_body(&ctx)))
873 goto out;
875 tmp_idx = ctx.idx;
876 build_prologue(&ctx);
877 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
879 #if __LINUX_ARM_ARCH__ < 7
880 tmp_idx = ctx.idx;
881 build_epilogue(&ctx);
882 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
884 ctx.idx += ctx.imm_count;
885 if (ctx.imm_count) {
886 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
887 if (ctx.imms == NULL)
888 goto out;
890 #else
891 /* there's nothing after the epilogue on ARMv7 */
892 build_epilogue(&ctx);
893 #endif
895 alloc_size = 4 * ctx.idx;
896 ctx.target = module_alloc(max(sizeof(struct work_struct),
897 alloc_size));
898 if (unlikely(ctx.target == NULL))
899 goto out;
901 ctx.idx = 0;
902 build_prologue(&ctx);
903 build_body(&ctx);
904 build_epilogue(&ctx);
906 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
908 #if __LINUX_ARM_ARCH__ < 7
909 if (ctx.imm_count)
910 kfree(ctx.imms);
911 #endif
913 if (bpf_jit_enable > 1)
914 print_hex_dump(KERN_INFO, "BPF JIT code: ",
915 DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
916 alloc_size, false);
918 fp->bpf_func = (void *)ctx.target;
919 out:
920 kfree(ctx.offsets);
921 return;
924 static void bpf_jit_free_worker(struct work_struct *work)
926 module_free(NULL, work);
929 void bpf_jit_free(struct sk_filter *fp)
931 struct work_struct *work;
933 if (fp->bpf_func != sk_run_filter) {
934 work = (struct work_struct *)fp->bpf_func;
936 INIT_WORK(work, bpf_jit_free_worker);
937 schedule_work(work);