1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
20 int bpf_jit_enable __read_mostly
;
22 static inline void bpf_flush_icache(void *start
, void *end
)
25 flush_icache_range((unsigned long)start
, (unsigned long)end
);
28 static void bpf_jit_build_prologue(struct sk_filter
*fp
, u32
*image
,
29 struct codegen_context
*ctx
)
32 const struct sock_filter
*filter
= fp
->insns
;
34 if (ctx
->seen
& (SEEN_MEM
| SEEN_DATAREF
)) {
36 if (ctx
->seen
& SEEN_DATAREF
) {
37 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR
| __PPC_RT(R0
));
41 /* Back up non-volatile regs. */
42 PPC_STD(r_D
, 1, -(8*(32-r_D
)));
43 PPC_STD(r_HL
, 1, -(8*(32-r_HL
)));
45 if (ctx
->seen
& SEEN_MEM
) {
47 * Conditionally save regs r15-r31 as some will be used
50 for (i
= r_M
; i
< (r_M
+16); i
++) {
51 if (ctx
->seen
& (1 << (i
-r_M
)))
52 PPC_STD(i
, 1, -(8*(32-i
)));
55 EMIT(PPC_INST_STDU
| __PPC_RS(R1
) | __PPC_RA(R1
) |
56 (-BPF_PPC_STACKFRAME
& 0xfffc));
59 if (ctx
->seen
& SEEN_DATAREF
) {
61 * If this filter needs to access skb data,
62 * prepare r_D and r_HL:
63 * r_HL = skb->len - skb->data_len
66 PPC_LWZ_OFFS(r_scratch1
, r_skb
, offsetof(struct sk_buff
,
68 PPC_LWZ_OFFS(r_HL
, r_skb
, offsetof(struct sk_buff
, len
));
69 PPC_SUB(r_HL
, r_HL
, r_scratch1
);
70 PPC_LD_OFFS(r_D
, r_skb
, offsetof(struct sk_buff
, data
));
73 if (ctx
->seen
& SEEN_XREG
) {
75 * TODO: Could also detect whether first instr. sets X and
76 * avoid this (as below, with A).
81 switch (filter
[0].code
) {
84 case BPF_S_ANC_PROTOCOL
:
85 case BPF_S_ANC_IFINDEX
:
87 case BPF_S_ANC_RXHASH
:
88 case BPF_S_ANC_VLAN_TAG
:
89 case BPF_S_ANC_VLAN_TAG_PRESENT
:
95 /* first instruction sets A register (or is RET 'constant') */
98 /* make sure we dont leak kernel information to user */
103 static void bpf_jit_build_epilogue(u32
*image
, struct codegen_context
*ctx
)
107 if (ctx
->seen
& (SEEN_MEM
| SEEN_DATAREF
)) {
108 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME
);
109 if (ctx
->seen
& SEEN_DATAREF
) {
112 PPC_LD(r_D
, 1, -(8*(32-r_D
)));
113 PPC_LD(r_HL
, 1, -(8*(32-r_HL
)));
115 if (ctx
->seen
& SEEN_MEM
) {
116 /* Restore any saved non-vol registers */
117 for (i
= r_M
; i
< (r_M
+16); i
++) {
118 if (ctx
->seen
& (1 << (i
-r_M
)))
119 PPC_LD(i
, 1, -(8*(32-i
)));
123 /* The RETs have left a return value in R3. */
128 #define CHOOSE_LOAD_FUNC(K, func) \
129 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
131 /* Assemble the body code between the prologue & epilogue. */
132 static int bpf_jit_build_body(struct sk_filter
*fp
, u32
*image
,
133 struct codegen_context
*ctx
,
136 const struct sock_filter
*filter
= fp
->insns
;
139 unsigned int true_cond
;
142 /* Start of epilogue code */
143 unsigned int exit_addr
= addrs
[flen
];
145 for (i
= 0; i
< flen
; i
++) {
146 unsigned int K
= filter
[i
].k
;
149 * addrs[] maps a BPF bytecode address into a real offset from
150 * the start of the body code.
152 addrs
[i
] = ctx
->idx
* 4;
154 switch (filter
[i
].code
) {
156 case BPF_S_ALU_ADD_X
: /* A += X; */
157 ctx
->seen
|= SEEN_XREG
;
158 PPC_ADD(r_A
, r_A
, r_X
);
160 case BPF_S_ALU_ADD_K
: /* A += K; */
163 PPC_ADDI(r_A
, r_A
, IMM_L(K
));
165 PPC_ADDIS(r_A
, r_A
, IMM_HA(K
));
167 case BPF_S_ALU_SUB_X
: /* A -= X; */
168 ctx
->seen
|= SEEN_XREG
;
169 PPC_SUB(r_A
, r_A
, r_X
);
171 case BPF_S_ALU_SUB_K
: /* A -= K */
174 PPC_ADDI(r_A
, r_A
, IMM_L(-K
));
176 PPC_ADDIS(r_A
, r_A
, IMM_HA(-K
));
178 case BPF_S_ALU_MUL_X
: /* A *= X; */
179 ctx
->seen
|= SEEN_XREG
;
180 PPC_MUL(r_A
, r_A
, r_X
);
182 case BPF_S_ALU_MUL_K
: /* A *= K */
184 PPC_MULI(r_A
, r_A
, K
);
186 PPC_LI32(r_scratch1
, K
);
187 PPC_MUL(r_A
, r_A
, r_scratch1
);
190 case BPF_S_ALU_MOD_X
: /* A %= X; */
191 ctx
->seen
|= SEEN_XREG
;
193 if (ctx
->pc_ret0
!= -1) {
194 PPC_BCC(COND_EQ
, addrs
[ctx
->pc_ret0
]);
196 PPC_BCC_SHORT(COND_NE
, (ctx
->idx
*4)+12);
200 PPC_DIVWU(r_scratch1
, r_A
, r_X
);
201 PPC_MUL(r_scratch1
, r_X
, r_scratch1
);
202 PPC_SUB(r_A
, r_A
, r_scratch1
);
204 case BPF_S_ALU_MOD_K
: /* A %= K; */
205 PPC_LI32(r_scratch2
, K
);
206 PPC_DIVWU(r_scratch1
, r_A
, r_scratch2
);
207 PPC_MUL(r_scratch1
, r_scratch2
, r_scratch1
);
208 PPC_SUB(r_A
, r_A
, r_scratch1
);
210 case BPF_S_ALU_DIV_X
: /* A /= X; */
211 ctx
->seen
|= SEEN_XREG
;
213 if (ctx
->pc_ret0
!= -1) {
214 PPC_BCC(COND_EQ
, addrs
[ctx
->pc_ret0
]);
217 * Exit, returning 0; first pass hits here
218 * (longer worst-case code size).
220 PPC_BCC_SHORT(COND_NE
, (ctx
->idx
*4)+12);
224 PPC_DIVWU(r_A
, r_A
, r_X
);
226 case BPF_S_ALU_DIV_K
: /* A /= K */
229 PPC_LI32(r_scratch1
, K
);
230 PPC_DIVWU(r_A
, r_A
, r_scratch1
);
232 case BPF_S_ALU_AND_X
:
233 ctx
->seen
|= SEEN_XREG
;
234 PPC_AND(r_A
, r_A
, r_X
);
236 case BPF_S_ALU_AND_K
:
238 PPC_ANDI(r_A
, r_A
, K
);
240 PPC_LI32(r_scratch1
, K
);
241 PPC_AND(r_A
, r_A
, r_scratch1
);
245 ctx
->seen
|= SEEN_XREG
;
246 PPC_OR(r_A
, r_A
, r_X
);
250 PPC_ORI(r_A
, r_A
, IMM_L(K
));
252 PPC_ORIS(r_A
, r_A
, IMM_H(K
));
254 case BPF_S_ANC_ALU_XOR_X
:
255 case BPF_S_ALU_XOR_X
: /* A ^= X */
256 ctx
->seen
|= SEEN_XREG
;
257 PPC_XOR(r_A
, r_A
, r_X
);
259 case BPF_S_ALU_XOR_K
: /* A ^= K */
261 PPC_XORI(r_A
, r_A
, IMM_L(K
));
263 PPC_XORIS(r_A
, r_A
, IMM_H(K
));
265 case BPF_S_ALU_LSH_X
: /* A <<= X; */
266 ctx
->seen
|= SEEN_XREG
;
267 PPC_SLW(r_A
, r_A
, r_X
);
269 case BPF_S_ALU_LSH_K
:
273 PPC_SLWI(r_A
, r_A
, K
);
275 case BPF_S_ALU_RSH_X
: /* A >>= X; */
276 ctx
->seen
|= SEEN_XREG
;
277 PPC_SRW(r_A
, r_A
, r_X
);
279 case BPF_S_ALU_RSH_K
: /* A >>= K; */
283 PPC_SRWI(r_A
, r_A
, K
);
291 if (ctx
->pc_ret0
== -1)
295 * If this isn't the very last instruction, branch to
296 * the epilogue if we've stuff to clean up. Otherwise,
297 * if there's nothing to tidy, just return. If we /are/
298 * the last instruction, we're about to fall through to
299 * the epilogue to return.
303 * Note: 'seen' is properly valid only on pass
304 * #2. Both parts of this conditional are the
305 * same instruction size though, meaning the
306 * first pass will still correctly determine the
307 * code size/addresses.
324 case BPF_S_MISC_TAX
: /* X = A */
327 case BPF_S_MISC_TXA
: /* A = X */
328 ctx
->seen
|= SEEN_XREG
;
332 /*** Constant loads/M[] access ***/
333 case BPF_S_LD_IMM
: /* A = K */
336 case BPF_S_LDX_IMM
: /* X = K */
339 case BPF_S_LD_MEM
: /* A = mem[K] */
340 PPC_MR(r_A
, r_M
+ (K
& 0xf));
341 ctx
->seen
|= SEEN_MEM
| (1<<(K
& 0xf));
343 case BPF_S_LDX_MEM
: /* X = mem[K] */
344 PPC_MR(r_X
, r_M
+ (K
& 0xf));
345 ctx
->seen
|= SEEN_MEM
| (1<<(K
& 0xf));
347 case BPF_S_ST
: /* mem[K] = A */
348 PPC_MR(r_M
+ (K
& 0xf), r_A
);
349 ctx
->seen
|= SEEN_MEM
| (1<<(K
& 0xf));
351 case BPF_S_STX
: /* mem[K] = X */
352 PPC_MR(r_M
+ (K
& 0xf), r_X
);
353 ctx
->seen
|= SEEN_XREG
| SEEN_MEM
| (1<<(K
& 0xf));
355 case BPF_S_LD_W_LEN
: /* A = skb->len; */
356 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, len
) != 4);
357 PPC_LWZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
, len
));
359 case BPF_S_LDX_W_LEN
: /* X = skb->len; */
360 PPC_LWZ_OFFS(r_X
, r_skb
, offsetof(struct sk_buff
, len
));
363 /*** Ancillary info loads ***/
364 case BPF_S_ANC_PROTOCOL
: /* A = ntohs(skb->protocol); */
365 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
367 PPC_NTOHS_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
370 case BPF_S_ANC_IFINDEX
:
371 PPC_LD_OFFS(r_scratch1
, r_skb
, offsetof(struct sk_buff
,
373 PPC_CMPDI(r_scratch1
, 0);
374 if (ctx
->pc_ret0
!= -1) {
375 PPC_BCC(COND_EQ
, addrs
[ctx
->pc_ret0
]);
377 /* Exit, returning 0; first pass hits here. */
378 PPC_BCC_SHORT(COND_NE
, (ctx
->idx
*4)+12);
382 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
384 PPC_LWZ_OFFS(r_A
, r_scratch1
,
385 offsetof(struct net_device
, ifindex
));
388 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
389 PPC_LWZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
392 case BPF_S_ANC_RXHASH
:
393 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, rxhash
) != 4);
394 PPC_LWZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
397 case BPF_S_ANC_VLAN_TAG
:
398 case BPF_S_ANC_VLAN_TAG_PRESENT
:
399 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
400 PPC_LHZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
402 if (filter
[i
].code
== BPF_S_ANC_VLAN_TAG
)
403 PPC_ANDI(r_A
, r_A
, VLAN_VID_MASK
);
405 PPC_ANDI(r_A
, r_A
, VLAN_TAG_PRESENT
);
407 case BPF_S_ANC_QUEUE
:
408 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
409 queue_mapping
) != 2);
410 PPC_LHZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
417 * raw_smp_processor_id() = local_paca->paca_index
419 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct
,
421 PPC_LHZ_OFFS(r_A
, 13,
422 offsetof(struct paca_struct
, paca_index
));
428 /*** Absolute loads from packet header/data ***/
430 func
= CHOOSE_LOAD_FUNC(K
, sk_load_word
);
433 func
= CHOOSE_LOAD_FUNC(K
, sk_load_half
);
436 func
= CHOOSE_LOAD_FUNC(K
, sk_load_byte
);
439 ctx
->seen
|= SEEN_DATAREF
;
440 PPC_LI64(r_scratch1
, func
);
441 PPC_MTLR(r_scratch1
);
445 * Helper returns 'lt' condition on error, and an
446 * appropriate return value in r3
448 PPC_BCC(COND_LT
, exit_addr
);
451 /*** Indirect loads from packet header/data ***/
454 goto common_load_ind
;
457 goto common_load_ind
;
462 * Load from [X + K]. Negative offsets are tested for
463 * in the helper functions.
465 ctx
->seen
|= SEEN_DATAREF
| SEEN_XREG
;
466 PPC_LI64(r_scratch1
, func
);
467 PPC_MTLR(r_scratch1
);
468 PPC_ADDI(r_addr
, r_X
, IMM_L(K
));
470 PPC_ADDIS(r_addr
, r_addr
, IMM_HA(K
));
472 /* If error, cr0.LT set */
473 PPC_BCC(COND_LT
, exit_addr
);
476 case BPF_S_LDX_B_MSH
:
477 func
= CHOOSE_LOAD_FUNC(K
, sk_load_byte_msh
);
481 /*** Jump and branches ***/
484 PPC_JMP(addrs
[i
+ 1 + K
]);
487 case BPF_S_JMP_JGT_K
:
488 case BPF_S_JMP_JGT_X
:
491 case BPF_S_JMP_JGE_K
:
492 case BPF_S_JMP_JGE_X
:
495 case BPF_S_JMP_JEQ_K
:
496 case BPF_S_JMP_JEQ_X
:
499 case BPF_S_JMP_JSET_K
:
500 case BPF_S_JMP_JSET_X
:
504 /* same targets, can avoid doing the test :) */
505 if (filter
[i
].jt
== filter
[i
].jf
) {
506 if (filter
[i
].jt
> 0)
507 PPC_JMP(addrs
[i
+ 1 + filter
[i
].jt
]);
511 switch (filter
[i
].code
) {
512 case BPF_S_JMP_JGT_X
:
513 case BPF_S_JMP_JGE_X
:
514 case BPF_S_JMP_JEQ_X
:
515 ctx
->seen
|= SEEN_XREG
;
518 case BPF_S_JMP_JSET_X
:
519 ctx
->seen
|= SEEN_XREG
;
520 PPC_AND_DOT(r_scratch1
, r_A
, r_X
);
522 case BPF_S_JMP_JEQ_K
:
523 case BPF_S_JMP_JGT_K
:
524 case BPF_S_JMP_JGE_K
:
528 PPC_LI32(r_scratch1
, K
);
529 PPC_CMPLW(r_A
, r_scratch1
);
532 case BPF_S_JMP_JSET_K
:
534 /* PPC_ANDI is /only/ dot-form */
535 PPC_ANDI(r_scratch1
, r_A
, K
);
537 PPC_LI32(r_scratch1
, K
);
538 PPC_AND_DOT(r_scratch1
, r_A
,
543 /* Sometimes branches are constructed "backward", with
544 * the false path being the branch and true path being
545 * a fallthrough to the next instruction.
547 if (filter
[i
].jt
== 0)
548 /* Swap the sense of the branch */
549 PPC_BCC(true_cond
^ COND_CMP_TRUE
,
550 addrs
[i
+ 1 + filter
[i
].jf
]);
552 PPC_BCC(true_cond
, addrs
[i
+ 1 + filter
[i
].jt
]);
553 if (filter
[i
].jf
!= 0)
554 PPC_JMP(addrs
[i
+ 1 + filter
[i
].jf
]);
558 /* The filter contains something cruel & unusual.
559 * We don't handle it, but also there shouldn't be
560 * anything missing from our list.
562 if (printk_ratelimit())
563 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
569 /* Set end-of-body-code address for exit. */
570 addrs
[i
] = ctx
->idx
* 4;
575 void bpf_jit_compile(struct sk_filter
*fp
)
577 unsigned int proglen
;
578 unsigned int alloclen
;
582 struct codegen_context cgctx
;
589 addrs
= kzalloc((flen
+1) * sizeof(*addrs
), GFP_KERNEL
);
594 * There are multiple assembly passes as the generated code will change
595 * size as it settles down, figuring out the max branch offsets/exit
598 * The range of standard conditional branches is +/- 32Kbytes. Since
599 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
600 * finish with 8 bytes/instruction. Not feasible, so long jumps are
601 * used, distinct from short branches.
605 * For now, both branch types assemble to 2 words (short branches padded
606 * with a NOP); this is less efficient, but assembly will always complete
607 * after exactly 3 passes:
609 * First pass: No code buffer; Program is "faux-generated" -- no code
610 * emitted but maximum size of output determined (and addrs[] filled
611 * in). Also, we note whether we use M[], whether we use skb data, etc.
612 * All generation choices assumed to be 'worst-case', e.g. branches all
613 * far (2 instructions), return path code reduction not available, etc.
615 * Second pass: Code buffer allocated with size determined previously.
616 * Prologue generated to support features we have seen used. Exit paths
617 * determined and addrs[] is filled in again, as code may be slightly
618 * smaller as a result.
620 * Third pass: Code generated 'for real', and branch destinations
621 * determined from now-accurate addrs[] map.
625 * If we optimise this, near branches will be shorter. On the
626 * first assembly pass, we should err on the side of caution and
627 * generate the biggest code. On subsequent passes, branches will be
628 * generated short or long and code size will reduce. With smaller
629 * code, more branches may fall into the short category, and code will
632 * Finally, if we see one pass generate code the same size as the
633 * previous pass we have converged and should now generate code for
634 * real. Allocating at the end will also save the memory that would
635 * otherwise be wasted by the (small) current code shrinkage.
636 * Preferably, we should do a small number of passes (e.g. 5) and if we
637 * haven't converged by then, get impatient and force code to generate
638 * as-is, even if the odd branch would be left long. The chances of a
639 * long jump are tiny with all but the most enormous of BPF filter
640 * inputs, so we should usually converge on the third pass.
646 /* Scouting faux-generate pass 0 */
647 if (bpf_jit_build_body(fp
, 0, &cgctx
, addrs
))
648 /* We hit something illegal or unsupported. */
652 * Pretend to build prologue, given the features we've seen. This will
653 * update ctgtx.idx as it pretends to output instructions, then we can
654 * calculate total size from idx.
656 bpf_jit_build_prologue(fp
, 0, &cgctx
);
657 bpf_jit_build_epilogue(0, &cgctx
);
659 proglen
= cgctx
.idx
* 4;
660 alloclen
= proglen
+ FUNCTION_DESCR_SIZE
;
661 image
= module_alloc(alloclen
);
665 code_base
= image
+ (FUNCTION_DESCR_SIZE
/4);
667 /* Code generation passes 1-2 */
668 for (pass
= 1; pass
< 3; pass
++) {
669 /* Now build the prologue, body code & epilogue for real. */
671 bpf_jit_build_prologue(fp
, code_base
, &cgctx
);
672 bpf_jit_build_body(fp
, code_base
, &cgctx
, addrs
);
673 bpf_jit_build_epilogue(code_base
, &cgctx
);
675 if (bpf_jit_enable
> 1)
676 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass
,
677 proglen
- (cgctx
.idx
* 4), cgctx
.seen
);
680 if (bpf_jit_enable
> 1)
681 /* Note that we output the base address of the code_base
682 * rather than image, since opcodes are in code_base.
684 bpf_jit_dump(flen
, proglen
, pass
, code_base
);
687 bpf_flush_icache(code_base
, code_base
+ (proglen
/4));
688 /* Function descriptor nastiness: Address + TOC */
689 ((u64
*)image
)[0] = (u64
)code_base
;
690 ((u64
*)image
)[1] = local_paca
->kernel_toc
;
691 fp
->bpf_func
= (void *)image
;
698 void bpf_jit_free(struct sk_filter
*fp
)
700 if (fp
->bpf_func
!= sk_run_filter
)
701 module_free(NULL
, fp
->bpf_func
);