1 /* bpf_jit_comp.c: BPF JIT compiler
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
13 #include <linux/moduleloader.h>
14 #include <asm/cacheflush.h>
15 #include <linux/netdevice.h>
16 #include <linux/filter.h>
17 #include <linux/if_vlan.h>
21 int bpf_jit_enable __read_mostly
;
23 static inline void bpf_flush_icache(void *start
, void *end
)
26 flush_icache_range((unsigned long)start
, (unsigned long)end
);
29 static void bpf_jit_build_prologue(struct bpf_prog
*fp
, u32
*image
,
30 struct codegen_context
*ctx
)
33 const struct sock_filter
*filter
= fp
->insns
;
35 if (ctx
->seen
& (SEEN_MEM
| SEEN_DATAREF
)) {
37 if (ctx
->seen
& SEEN_DATAREF
) {
38 /* If we call any helpers (for loads), save LR */
39 EMIT(PPC_INST_MFLR
| __PPC_RT(R0
));
40 PPC_BPF_STL(0, 1, PPC_LR_STKOFF
);
42 /* Back up non-volatile regs. */
43 PPC_BPF_STL(r_D
, 1, -(REG_SZ
*(32-r_D
)));
44 PPC_BPF_STL(r_HL
, 1, -(REG_SZ
*(32-r_HL
)));
46 if (ctx
->seen
& SEEN_MEM
) {
48 * Conditionally save regs r15-r31 as some will be used
51 for (i
= r_M
; i
< (r_M
+16); i
++) {
52 if (ctx
->seen
& (1 << (i
-r_M
)))
53 PPC_BPF_STL(i
, 1, -(REG_SZ
*(32-i
)));
56 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME
);
59 if (ctx
->seen
& SEEN_DATAREF
) {
61 * If this filter needs to access skb data,
62 * prepare r_D and r_HL:
63 * r_HL = skb->len - skb->data_len
66 PPC_LWZ_OFFS(r_scratch1
, r_skb
, offsetof(struct sk_buff
,
68 PPC_LWZ_OFFS(r_HL
, r_skb
, offsetof(struct sk_buff
, len
));
69 PPC_SUB(r_HL
, r_HL
, r_scratch1
);
70 PPC_LL_OFFS(r_D
, r_skb
, offsetof(struct sk_buff
, data
));
73 if (ctx
->seen
& SEEN_XREG
) {
75 * TODO: Could also detect whether first instr. sets X and
76 * avoid this (as below, with A).
81 /* make sure we dont leak kernel information to user */
82 if (bpf_needs_clear_a(&filter
[0]))
86 static void bpf_jit_build_epilogue(u32
*image
, struct codegen_context
*ctx
)
90 if (ctx
->seen
& (SEEN_MEM
| SEEN_DATAREF
)) {
91 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME
);
92 if (ctx
->seen
& SEEN_DATAREF
) {
93 PPC_BPF_LL(0, 1, PPC_LR_STKOFF
);
95 PPC_BPF_LL(r_D
, 1, -(REG_SZ
*(32-r_D
)));
96 PPC_BPF_LL(r_HL
, 1, -(REG_SZ
*(32-r_HL
)));
98 if (ctx
->seen
& SEEN_MEM
) {
99 /* Restore any saved non-vol registers */
100 for (i
= r_M
; i
< (r_M
+16); i
++) {
101 if (ctx
->seen
& (1 << (i
-r_M
)))
102 PPC_BPF_LL(i
, 1, -(REG_SZ
*(32-i
)));
106 /* The RETs have left a return value in R3. */
111 #define CHOOSE_LOAD_FUNC(K, func) \
112 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
114 /* Assemble the body code between the prologue & epilogue. */
115 static int bpf_jit_build_body(struct bpf_prog
*fp
, u32
*image
,
116 struct codegen_context
*ctx
,
119 const struct sock_filter
*filter
= fp
->insns
;
122 unsigned int true_cond
;
125 /* Start of epilogue code */
126 unsigned int exit_addr
= addrs
[flen
];
128 for (i
= 0; i
< flen
; i
++) {
129 unsigned int K
= filter
[i
].k
;
130 u16 code
= bpf_anc_helper(&filter
[i
]);
133 * addrs[] maps a BPF bytecode address into a real offset from
134 * the start of the body code.
136 addrs
[i
] = ctx
->idx
* 4;
140 case BPF_ALU
| BPF_ADD
| BPF_X
: /* A += X; */
141 ctx
->seen
|= SEEN_XREG
;
142 PPC_ADD(r_A
, r_A
, r_X
);
144 case BPF_ALU
| BPF_ADD
| BPF_K
: /* A += K; */
147 PPC_ADDI(r_A
, r_A
, IMM_L(K
));
149 PPC_ADDIS(r_A
, r_A
, IMM_HA(K
));
151 case BPF_ALU
| BPF_SUB
| BPF_X
: /* A -= X; */
152 ctx
->seen
|= SEEN_XREG
;
153 PPC_SUB(r_A
, r_A
, r_X
);
155 case BPF_ALU
| BPF_SUB
| BPF_K
: /* A -= K */
158 PPC_ADDI(r_A
, r_A
, IMM_L(-K
));
160 PPC_ADDIS(r_A
, r_A
, IMM_HA(-K
));
162 case BPF_ALU
| BPF_MUL
| BPF_X
: /* A *= X; */
163 ctx
->seen
|= SEEN_XREG
;
164 PPC_MUL(r_A
, r_A
, r_X
);
166 case BPF_ALU
| BPF_MUL
| BPF_K
: /* A *= K */
168 PPC_MULI(r_A
, r_A
, K
);
170 PPC_LI32(r_scratch1
, K
);
171 PPC_MUL(r_A
, r_A
, r_scratch1
);
174 case BPF_ALU
| BPF_MOD
| BPF_X
: /* A %= X; */
175 case BPF_ALU
| BPF_DIV
| BPF_X
: /* A /= X; */
176 ctx
->seen
|= SEEN_XREG
;
178 if (ctx
->pc_ret0
!= -1) {
179 PPC_BCC(COND_EQ
, addrs
[ctx
->pc_ret0
]);
181 PPC_BCC_SHORT(COND_NE
, (ctx
->idx
*4)+12);
185 if (code
== (BPF_ALU
| BPF_MOD
| BPF_X
)) {
186 PPC_DIVWU(r_scratch1
, r_A
, r_X
);
187 PPC_MUL(r_scratch1
, r_X
, r_scratch1
);
188 PPC_SUB(r_A
, r_A
, r_scratch1
);
190 PPC_DIVWU(r_A
, r_A
, r_X
);
193 case BPF_ALU
| BPF_MOD
| BPF_K
: /* A %= K; */
194 PPC_LI32(r_scratch2
, K
);
195 PPC_DIVWU(r_scratch1
, r_A
, r_scratch2
);
196 PPC_MUL(r_scratch1
, r_scratch2
, r_scratch1
);
197 PPC_SUB(r_A
, r_A
, r_scratch1
);
199 case BPF_ALU
| BPF_DIV
| BPF_K
: /* A /= K */
202 PPC_LI32(r_scratch1
, K
);
203 PPC_DIVWU(r_A
, r_A
, r_scratch1
);
205 case BPF_ALU
| BPF_AND
| BPF_X
:
206 ctx
->seen
|= SEEN_XREG
;
207 PPC_AND(r_A
, r_A
, r_X
);
209 case BPF_ALU
| BPF_AND
| BPF_K
:
211 PPC_ANDI(r_A
, r_A
, K
);
213 PPC_LI32(r_scratch1
, K
);
214 PPC_AND(r_A
, r_A
, r_scratch1
);
217 case BPF_ALU
| BPF_OR
| BPF_X
:
218 ctx
->seen
|= SEEN_XREG
;
219 PPC_OR(r_A
, r_A
, r_X
);
221 case BPF_ALU
| BPF_OR
| BPF_K
:
223 PPC_ORI(r_A
, r_A
, IMM_L(K
));
225 PPC_ORIS(r_A
, r_A
, IMM_H(K
));
227 case BPF_ANC
| SKF_AD_ALU_XOR_X
:
228 case BPF_ALU
| BPF_XOR
| BPF_X
: /* A ^= X */
229 ctx
->seen
|= SEEN_XREG
;
230 PPC_XOR(r_A
, r_A
, r_X
);
232 case BPF_ALU
| BPF_XOR
| BPF_K
: /* A ^= K */
234 PPC_XORI(r_A
, r_A
, IMM_L(K
));
236 PPC_XORIS(r_A
, r_A
, IMM_H(K
));
238 case BPF_ALU
| BPF_LSH
| BPF_X
: /* A <<= X; */
239 ctx
->seen
|= SEEN_XREG
;
240 PPC_SLW(r_A
, r_A
, r_X
);
242 case BPF_ALU
| BPF_LSH
| BPF_K
:
246 PPC_SLWI(r_A
, r_A
, K
);
248 case BPF_ALU
| BPF_RSH
| BPF_X
: /* A >>= X; */
249 ctx
->seen
|= SEEN_XREG
;
250 PPC_SRW(r_A
, r_A
, r_X
);
252 case BPF_ALU
| BPF_RSH
| BPF_K
: /* A >>= K; */
256 PPC_SRWI(r_A
, r_A
, K
);
258 case BPF_ALU
| BPF_NEG
:
261 case BPF_RET
| BPF_K
:
264 if (ctx
->pc_ret0
== -1)
268 * If this isn't the very last instruction, branch to
269 * the epilogue if we've stuff to clean up. Otherwise,
270 * if there's nothing to tidy, just return. If we /are/
271 * the last instruction, we're about to fall through to
272 * the epilogue to return.
276 * Note: 'seen' is properly valid only on pass
277 * #2. Both parts of this conditional are the
278 * same instruction size though, meaning the
279 * first pass will still correctly determine the
280 * code size/addresses.
288 case BPF_RET
| BPF_A
:
297 case BPF_MISC
| BPF_TAX
: /* X = A */
300 case BPF_MISC
| BPF_TXA
: /* A = X */
301 ctx
->seen
|= SEEN_XREG
;
305 /*** Constant loads/M[] access ***/
306 case BPF_LD
| BPF_IMM
: /* A = K */
309 case BPF_LDX
| BPF_IMM
: /* X = K */
312 case BPF_LD
| BPF_MEM
: /* A = mem[K] */
313 PPC_MR(r_A
, r_M
+ (K
& 0xf));
314 ctx
->seen
|= SEEN_MEM
| (1<<(K
& 0xf));
316 case BPF_LDX
| BPF_MEM
: /* X = mem[K] */
317 PPC_MR(r_X
, r_M
+ (K
& 0xf));
318 ctx
->seen
|= SEEN_MEM
| (1<<(K
& 0xf));
320 case BPF_ST
: /* mem[K] = A */
321 PPC_MR(r_M
+ (K
& 0xf), r_A
);
322 ctx
->seen
|= SEEN_MEM
| (1<<(K
& 0xf));
324 case BPF_STX
: /* mem[K] = X */
325 PPC_MR(r_M
+ (K
& 0xf), r_X
);
326 ctx
->seen
|= SEEN_XREG
| SEEN_MEM
| (1<<(K
& 0xf));
328 case BPF_LD
| BPF_W
| BPF_LEN
: /* A = skb->len; */
329 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, len
) != 4);
330 PPC_LWZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
, len
));
332 case BPF_LDX
| BPF_W
| BPF_LEN
: /* X = skb->len; */
333 PPC_LWZ_OFFS(r_X
, r_skb
, offsetof(struct sk_buff
, len
));
336 /*** Ancillary info loads ***/
337 case BPF_ANC
| SKF_AD_PROTOCOL
: /* A = ntohs(skb->protocol); */
338 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
340 PPC_NTOHS_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
343 case BPF_ANC
| SKF_AD_IFINDEX
:
344 case BPF_ANC
| SKF_AD_HATYPE
:
345 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
347 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device
,
349 PPC_LL_OFFS(r_scratch1
, r_skb
, offsetof(struct sk_buff
,
351 PPC_CMPDI(r_scratch1
, 0);
352 if (ctx
->pc_ret0
!= -1) {
353 PPC_BCC(COND_EQ
, addrs
[ctx
->pc_ret0
]);
355 /* Exit, returning 0; first pass hits here. */
356 PPC_BCC_SHORT(COND_NE
, ctx
->idx
* 4 + 12);
360 if (code
== (BPF_ANC
| SKF_AD_IFINDEX
)) {
361 PPC_LWZ_OFFS(r_A
, r_scratch1
,
362 offsetof(struct net_device
, ifindex
));
364 PPC_LHZ_OFFS(r_A
, r_scratch1
,
365 offsetof(struct net_device
, type
));
369 case BPF_ANC
| SKF_AD_MARK
:
370 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, mark
) != 4);
371 PPC_LWZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
374 case BPF_ANC
| SKF_AD_RXHASH
:
375 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, hash
) != 4);
376 PPC_LWZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
379 case BPF_ANC
| SKF_AD_VLAN_TAG
:
380 case BPF_ANC
| SKF_AD_VLAN_TAG_PRESENT
:
381 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
, vlan_tci
) != 2);
382 BUILD_BUG_ON(VLAN_TAG_PRESENT
!= 0x1000);
384 PPC_LHZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
386 if (code
== (BPF_ANC
| SKF_AD_VLAN_TAG
)) {
387 PPC_ANDI(r_A
, r_A
, ~VLAN_TAG_PRESENT
);
389 PPC_ANDI(r_A
, r_A
, VLAN_TAG_PRESENT
);
390 PPC_SRWI(r_A
, r_A
, 12);
393 case BPF_ANC
| SKF_AD_QUEUE
:
394 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff
,
395 queue_mapping
) != 2);
396 PPC_LHZ_OFFS(r_A
, r_skb
, offsetof(struct sk_buff
,
399 case BPF_ANC
| SKF_AD_PKTTYPE
:
400 PPC_LBZ_OFFS(r_A
, r_skb
, PKT_TYPE_OFFSET());
401 PPC_ANDI(r_A
, r_A
, PKT_TYPE_MAX
);
402 PPC_SRWI(r_A
, r_A
, 5);
404 case BPF_ANC
| SKF_AD_CPU
:
405 PPC_BPF_LOAD_CPU(r_A
);
407 /*** Absolute loads from packet header/data ***/
408 case BPF_LD
| BPF_W
| BPF_ABS
:
409 func
= CHOOSE_LOAD_FUNC(K
, sk_load_word
);
411 case BPF_LD
| BPF_H
| BPF_ABS
:
412 func
= CHOOSE_LOAD_FUNC(K
, sk_load_half
);
414 case BPF_LD
| BPF_B
| BPF_ABS
:
415 func
= CHOOSE_LOAD_FUNC(K
, sk_load_byte
);
418 ctx
->seen
|= SEEN_DATAREF
;
419 PPC_FUNC_ADDR(r_scratch1
, func
);
420 PPC_MTLR(r_scratch1
);
424 * Helper returns 'lt' condition on error, and an
425 * appropriate return value in r3
427 PPC_BCC(COND_LT
, exit_addr
);
430 /*** Indirect loads from packet header/data ***/
431 case BPF_LD
| BPF_W
| BPF_IND
:
433 goto common_load_ind
;
434 case BPF_LD
| BPF_H
| BPF_IND
:
436 goto common_load_ind
;
437 case BPF_LD
| BPF_B
| BPF_IND
:
441 * Load from [X + K]. Negative offsets are tested for
442 * in the helper functions.
444 ctx
->seen
|= SEEN_DATAREF
| SEEN_XREG
;
445 PPC_FUNC_ADDR(r_scratch1
, func
);
446 PPC_MTLR(r_scratch1
);
447 PPC_ADDI(r_addr
, r_X
, IMM_L(K
));
449 PPC_ADDIS(r_addr
, r_addr
, IMM_HA(K
));
451 /* If error, cr0.LT set */
452 PPC_BCC(COND_LT
, exit_addr
);
455 case BPF_LDX
| BPF_B
| BPF_MSH
:
456 func
= CHOOSE_LOAD_FUNC(K
, sk_load_byte_msh
);
460 /*** Jump and branches ***/
461 case BPF_JMP
| BPF_JA
:
463 PPC_JMP(addrs
[i
+ 1 + K
]);
466 case BPF_JMP
| BPF_JGT
| BPF_K
:
467 case BPF_JMP
| BPF_JGT
| BPF_X
:
470 case BPF_JMP
| BPF_JGE
| BPF_K
:
471 case BPF_JMP
| BPF_JGE
| BPF_X
:
474 case BPF_JMP
| BPF_JEQ
| BPF_K
:
475 case BPF_JMP
| BPF_JEQ
| BPF_X
:
478 case BPF_JMP
| BPF_JSET
| BPF_K
:
479 case BPF_JMP
| BPF_JSET
| BPF_X
:
483 /* same targets, can avoid doing the test :) */
484 if (filter
[i
].jt
== filter
[i
].jf
) {
485 if (filter
[i
].jt
> 0)
486 PPC_JMP(addrs
[i
+ 1 + filter
[i
].jt
]);
491 case BPF_JMP
| BPF_JGT
| BPF_X
:
492 case BPF_JMP
| BPF_JGE
| BPF_X
:
493 case BPF_JMP
| BPF_JEQ
| BPF_X
:
494 ctx
->seen
|= SEEN_XREG
;
497 case BPF_JMP
| BPF_JSET
| BPF_X
:
498 ctx
->seen
|= SEEN_XREG
;
499 PPC_AND_DOT(r_scratch1
, r_A
, r_X
);
501 case BPF_JMP
| BPF_JEQ
| BPF_K
:
502 case BPF_JMP
| BPF_JGT
| BPF_K
:
503 case BPF_JMP
| BPF_JGE
| BPF_K
:
507 PPC_LI32(r_scratch1
, K
);
508 PPC_CMPLW(r_A
, r_scratch1
);
511 case BPF_JMP
| BPF_JSET
| BPF_K
:
513 /* PPC_ANDI is /only/ dot-form */
514 PPC_ANDI(r_scratch1
, r_A
, K
);
516 PPC_LI32(r_scratch1
, K
);
517 PPC_AND_DOT(r_scratch1
, r_A
,
522 /* Sometimes branches are constructed "backward", with
523 * the false path being the branch and true path being
524 * a fallthrough to the next instruction.
526 if (filter
[i
].jt
== 0)
527 /* Swap the sense of the branch */
528 PPC_BCC(true_cond
^ COND_CMP_TRUE
,
529 addrs
[i
+ 1 + filter
[i
].jf
]);
531 PPC_BCC(true_cond
, addrs
[i
+ 1 + filter
[i
].jt
]);
532 if (filter
[i
].jf
!= 0)
533 PPC_JMP(addrs
[i
+ 1 + filter
[i
].jf
]);
537 /* The filter contains something cruel & unusual.
538 * We don't handle it, but also there shouldn't be
539 * anything missing from our list.
541 if (printk_ratelimit())
542 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
548 /* Set end-of-body-code address for exit. */
549 addrs
[i
] = ctx
->idx
* 4;
554 void bpf_jit_compile(struct bpf_prog
*fp
)
556 unsigned int proglen
;
557 unsigned int alloclen
;
561 struct codegen_context cgctx
;
568 addrs
= kzalloc((flen
+1) * sizeof(*addrs
), GFP_KERNEL
);
573 * There are multiple assembly passes as the generated code will change
574 * size as it settles down, figuring out the max branch offsets/exit
577 * The range of standard conditional branches is +/- 32Kbytes. Since
578 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
579 * finish with 8 bytes/instruction. Not feasible, so long jumps are
580 * used, distinct from short branches.
584 * For now, both branch types assemble to 2 words (short branches padded
585 * with a NOP); this is less efficient, but assembly will always complete
586 * after exactly 3 passes:
588 * First pass: No code buffer; Program is "faux-generated" -- no code
589 * emitted but maximum size of output determined (and addrs[] filled
590 * in). Also, we note whether we use M[], whether we use skb data, etc.
591 * All generation choices assumed to be 'worst-case', e.g. branches all
592 * far (2 instructions), return path code reduction not available, etc.
594 * Second pass: Code buffer allocated with size determined previously.
595 * Prologue generated to support features we have seen used. Exit paths
596 * determined and addrs[] is filled in again, as code may be slightly
597 * smaller as a result.
599 * Third pass: Code generated 'for real', and branch destinations
600 * determined from now-accurate addrs[] map.
604 * If we optimise this, near branches will be shorter. On the
605 * first assembly pass, we should err on the side of caution and
606 * generate the biggest code. On subsequent passes, branches will be
607 * generated short or long and code size will reduce. With smaller
608 * code, more branches may fall into the short category, and code will
611 * Finally, if we see one pass generate code the same size as the
612 * previous pass we have converged and should now generate code for
613 * real. Allocating at the end will also save the memory that would
614 * otherwise be wasted by the (small) current code shrinkage.
615 * Preferably, we should do a small number of passes (e.g. 5) and if we
616 * haven't converged by then, get impatient and force code to generate
617 * as-is, even if the odd branch would be left long. The chances of a
618 * long jump are tiny with all but the most enormous of BPF filter
619 * inputs, so we should usually converge on the third pass.
625 /* Scouting faux-generate pass 0 */
626 if (bpf_jit_build_body(fp
, 0, &cgctx
, addrs
))
627 /* We hit something illegal or unsupported. */
631 * Pretend to build prologue, given the features we've seen. This will
632 * update ctgtx.idx as it pretends to output instructions, then we can
633 * calculate total size from idx.
635 bpf_jit_build_prologue(fp
, 0, &cgctx
);
636 bpf_jit_build_epilogue(0, &cgctx
);
638 proglen
= cgctx
.idx
* 4;
639 alloclen
= proglen
+ FUNCTION_DESCR_SIZE
;
640 image
= module_alloc(alloclen
);
644 code_base
= image
+ (FUNCTION_DESCR_SIZE
/4);
646 /* Code generation passes 1-2 */
647 for (pass
= 1; pass
< 3; pass
++) {
648 /* Now build the prologue, body code & epilogue for real. */
650 bpf_jit_build_prologue(fp
, code_base
, &cgctx
);
651 bpf_jit_build_body(fp
, code_base
, &cgctx
, addrs
);
652 bpf_jit_build_epilogue(code_base
, &cgctx
);
654 if (bpf_jit_enable
> 1)
655 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass
,
656 proglen
- (cgctx
.idx
* 4), cgctx
.seen
);
659 if (bpf_jit_enable
> 1)
660 /* Note that we output the base address of the code_base
661 * rather than image, since opcodes are in code_base.
663 bpf_jit_dump(flen
, proglen
, pass
, code_base
);
666 bpf_flush_icache(code_base
, code_base
+ (proglen
/4));
668 /* Function descriptor nastiness: Address + TOC */
669 ((u64
*)image
)[0] = (u64
)code_base
;
670 ((u64
*)image
)[1] = local_paca
->kernel_toc
;
672 fp
->bpf_func
= (void *)image
;
680 void bpf_jit_free(struct bpf_prog
*fp
)
683 module_memfree(fp
->bpf_func
);
685 bpf_prog_unlock_free(fp
);