1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
11 #include <linux/moduleloader.h>
12 #include <asm/cacheflush.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <linux/random.h>
18 int bpf_jit_enable __read_mostly
;
21 * assembly code in arch/x86/net/bpf_jit.S
23 extern u8 sk_load_word
[], sk_load_half
[], sk_load_byte
[];
24 extern u8 sk_load_word_positive_offset
[], sk_load_half_positive_offset
[];
25 extern u8 sk_load_byte_positive_offset
[];
26 extern u8 sk_load_word_negative_offset
[], sk_load_half_negative_offset
[];
27 extern u8 sk_load_byte_negative_offset
[];
29 static inline u8
*emit_code(u8
*ptr
, u32 bytes
, unsigned int len
)
42 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
44 #define EMIT1(b1) EMIT(b1, 1)
45 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
46 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 #define EMIT1_off32(b1, off) \
49 do {EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
57 static inline bool is_imm8(int value
)
59 return value
<= 127 && value
>= -128;
62 static inline bool is_simm32(s64 value
)
64 return value
== (s64
) (s32
) value
;
68 #define EMIT_mov(DST, SRC) \
70 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
73 static int bpf_size_to_x86_bytes(int bpf_size
)
75 if (bpf_size
== BPF_W
)
77 else if (bpf_size
== BPF_H
)
79 else if (bpf_size
== BPF_B
)
81 else if (bpf_size
== BPF_DW
)
87 /* list of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
99 static inline void bpf_flush_icache(void *start
, void *end
)
101 mm_segment_t old_fs
= get_fs();
105 flush_icache_range((unsigned long)start
, (unsigned long)end
);
109 #define CHOOSE_LOAD_FUNC(K, func) \
110 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
112 struct bpf_binary_header
{
114 /* Note : for security reasons, bpf code will follow a randomly
115 * sized amount of int3 instructions
120 static struct bpf_binary_header
*bpf_alloc_binary(unsigned int proglen
,
123 unsigned int sz
, hole
;
124 struct bpf_binary_header
*header
;
126 /* Most of BPF filters are really small,
127 * but if some of them fill a page, allow at least
128 * 128 extra bytes to insert a random section of int3
130 sz
= round_up(proglen
+ sizeof(*header
) + 128, PAGE_SIZE
);
131 header
= module_alloc(sz
);
135 memset(header
, 0xcc, sz
); /* fill whole space with int3 instructions */
137 header
->pages
= sz
/ PAGE_SIZE
;
138 hole
= min(sz
- (proglen
+ sizeof(*header
)), PAGE_SIZE
- sizeof(*header
));
140 /* insert a random number of int3 instructions before BPF code */
141 *image_ptr
= &header
->image
[prandom_u32() % hole
];
145 /* pick a register outside of BPF range for JIT internal work */
146 #define AUX_REG (MAX_BPF_REG + 1)
148 /* the following table maps BPF registers to x64 registers.
149 * x64 register r12 is unused, since if used as base address register
150 * in load/store instructions, it always needs an extra byte of encoding
152 static const int reg2hex
[] = {
153 [BPF_REG_0
] = 0, /* rax */
154 [BPF_REG_1
] = 7, /* rdi */
155 [BPF_REG_2
] = 6, /* rsi */
156 [BPF_REG_3
] = 2, /* rdx */
157 [BPF_REG_4
] = 1, /* rcx */
158 [BPF_REG_5
] = 0, /* r8 */
159 [BPF_REG_6
] = 3, /* rbx callee saved */
160 [BPF_REG_7
] = 5, /* r13 callee saved */
161 [BPF_REG_8
] = 6, /* r14 callee saved */
162 [BPF_REG_9
] = 7, /* r15 callee saved */
163 [BPF_REG_FP
] = 5, /* rbp readonly */
164 [AUX_REG
] = 3, /* r11 temp register */
167 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
168 * which need extra byte of encoding.
169 * rax,rcx,...,rbp have simpler encoding
171 static inline bool is_ereg(u32 reg
)
173 if (reg
== BPF_REG_5
|| reg
== AUX_REG
||
174 (reg
>= BPF_REG_7
&& reg
<= BPF_REG_9
))
180 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
181 static inline u8
add_1mod(u8 byte
, u32 reg
)
188 static inline u8
add_2mod(u8 byte
, u32 r1
, u32 r2
)
197 /* encode 'dst_reg' register into x64 opcode 'byte' */
198 static inline u8
add_1reg(u8 byte
, u32 dst_reg
)
200 return byte
+ reg2hex
[dst_reg
];
203 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
204 static inline u8
add_2reg(u8 byte
, u32 dst_reg
, u32 src_reg
)
206 return byte
+ reg2hex
[dst_reg
] + (reg2hex
[src_reg
] << 3);
210 unsigned int cleanup_addr
; /* epilogue code offset */
214 static int do_jit(struct sk_filter
*bpf_prog
, int *addrs
, u8
*image
,
215 int oldproglen
, struct jit_context
*ctx
)
217 struct sock_filter_int
*insn
= bpf_prog
->insnsi
;
218 int insn_cnt
= bpf_prog
->len
;
223 int stacksize
= MAX_BPF_STACK
+
224 32 /* space for rbx, r13, r14, r15 */ +
225 8 /* space for skb_copy_bits() buffer */;
227 EMIT1(0x55); /* push rbp */
228 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
230 /* sub rsp, stacksize */
231 EMIT3_off32(0x48, 0x81, 0xEC, stacksize
);
233 /* all classic BPF filters use R6(rbx) save it */
235 /* mov qword ptr [rbp-X],rbx */
236 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize
);
238 /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
239 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
240 * R8(r14). R9(r15) spill could be made conditional, but there is only
241 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
242 * The overhead of extra spill is negligible for any filter other
243 * than synthetic ones. Therefore not worth adding complexity.
246 /* mov qword ptr [rbp-X],r13 */
247 EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize
+ 8);
248 /* mov qword ptr [rbp-X],r14 */
249 EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize
+ 16);
250 /* mov qword ptr [rbp-X],r15 */
251 EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize
+ 24);
253 /* clear A and X registers */
254 EMIT2(0x31, 0xc0); /* xor eax, eax */
255 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
257 if (ctx
->seen_ld_abs
) {
258 /* r9d : skb->len - skb->data_len (headlen)
261 if (is_imm8(offsetof(struct sk_buff
, len
)))
262 /* mov %r9d, off8(%rdi) */
263 EMIT4(0x44, 0x8b, 0x4f,
264 offsetof(struct sk_buff
, len
));
266 /* mov %r9d, off32(%rdi) */
267 EMIT3_off32(0x44, 0x8b, 0x8f,
268 offsetof(struct sk_buff
, len
));
270 if (is_imm8(offsetof(struct sk_buff
, data_len
)))
271 /* sub %r9d, off8(%rdi) */
272 EMIT4(0x44, 0x2b, 0x4f,
273 offsetof(struct sk_buff
, data_len
));
275 EMIT3_off32(0x44, 0x2b, 0x8f,
276 offsetof(struct sk_buff
, data_len
));
278 if (is_imm8(offsetof(struct sk_buff
, data
)))
279 /* mov %r10, off8(%rdi) */
280 EMIT4(0x4c, 0x8b, 0x57,
281 offsetof(struct sk_buff
, data
));
283 /* mov %r10, off32(%rdi) */
284 EMIT3_off32(0x4c, 0x8b, 0x97,
285 offsetof(struct sk_buff
, data
));
288 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
289 const s32 imm32
= insn
->imm
;
290 u32 dst_reg
= insn
->dst_reg
;
291 u32 src_reg
= insn
->src_reg
;
292 u8 b1
= 0, b2
= 0, b3
= 0;
298 switch (insn
->code
) {
300 case BPF_ALU
| BPF_ADD
| BPF_X
:
301 case BPF_ALU
| BPF_SUB
| BPF_X
:
302 case BPF_ALU
| BPF_AND
| BPF_X
:
303 case BPF_ALU
| BPF_OR
| BPF_X
:
304 case BPF_ALU
| BPF_XOR
| BPF_X
:
305 case BPF_ALU64
| BPF_ADD
| BPF_X
:
306 case BPF_ALU64
| BPF_SUB
| BPF_X
:
307 case BPF_ALU64
| BPF_AND
| BPF_X
:
308 case BPF_ALU64
| BPF_OR
| BPF_X
:
309 case BPF_ALU64
| BPF_XOR
| BPF_X
:
310 switch (BPF_OP(insn
->code
)) {
311 case BPF_ADD
: b2
= 0x01; break;
312 case BPF_SUB
: b2
= 0x29; break;
313 case BPF_AND
: b2
= 0x21; break;
314 case BPF_OR
: b2
= 0x09; break;
315 case BPF_XOR
: b2
= 0x31; break;
317 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
318 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
319 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
320 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
321 EMIT2(b2
, add_2reg(0xC0, dst_reg
, src_reg
));
325 case BPF_ALU64
| BPF_MOV
| BPF_X
:
326 EMIT_mov(dst_reg
, src_reg
);
330 case BPF_ALU
| BPF_MOV
| BPF_X
:
331 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
332 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
333 EMIT2(0x89, add_2reg(0xC0, dst_reg
, src_reg
));
337 case BPF_ALU
| BPF_NEG
:
338 case BPF_ALU64
| BPF_NEG
:
339 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
340 EMIT1(add_1mod(0x48, dst_reg
));
341 else if (is_ereg(dst_reg
))
342 EMIT1(add_1mod(0x40, dst_reg
));
343 EMIT2(0xF7, add_1reg(0xD8, dst_reg
));
346 case BPF_ALU
| BPF_ADD
| BPF_K
:
347 case BPF_ALU
| BPF_SUB
| BPF_K
:
348 case BPF_ALU
| BPF_AND
| BPF_K
:
349 case BPF_ALU
| BPF_OR
| BPF_K
:
350 case BPF_ALU
| BPF_XOR
| BPF_K
:
351 case BPF_ALU64
| BPF_ADD
| BPF_K
:
352 case BPF_ALU64
| BPF_SUB
| BPF_K
:
353 case BPF_ALU64
| BPF_AND
| BPF_K
:
354 case BPF_ALU64
| BPF_OR
| BPF_K
:
355 case BPF_ALU64
| BPF_XOR
| BPF_K
:
356 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
357 EMIT1(add_1mod(0x48, dst_reg
));
358 else if (is_ereg(dst_reg
))
359 EMIT1(add_1mod(0x40, dst_reg
));
361 switch (BPF_OP(insn
->code
)) {
362 case BPF_ADD
: b3
= 0xC0; break;
363 case BPF_SUB
: b3
= 0xE8; break;
364 case BPF_AND
: b3
= 0xE0; break;
365 case BPF_OR
: b3
= 0xC8; break;
366 case BPF_XOR
: b3
= 0xF0; break;
370 EMIT3(0x83, add_1reg(b3
, dst_reg
), imm32
);
372 EMIT2_off32(0x81, add_1reg(b3
, dst_reg
), imm32
);
375 case BPF_ALU64
| BPF_MOV
| BPF_K
:
376 /* optimization: if imm32 is positive,
377 * use 'mov eax, imm32' (which zero-extends imm32)
381 /* 'mov rax, imm32' sign extends imm32 */
382 b1
= add_1mod(0x48, dst_reg
);
385 EMIT3_off32(b1
, b2
, add_1reg(b3
, dst_reg
), imm32
);
389 case BPF_ALU
| BPF_MOV
| BPF_K
:
390 /* mov %eax, imm32 */
391 if (is_ereg(dst_reg
))
392 EMIT1(add_1mod(0x40, dst_reg
));
393 EMIT1_off32(add_1reg(0xB8, dst_reg
), imm32
);
396 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
397 case BPF_ALU
| BPF_MOD
| BPF_X
:
398 case BPF_ALU
| BPF_DIV
| BPF_X
:
399 case BPF_ALU
| BPF_MOD
| BPF_K
:
400 case BPF_ALU
| BPF_DIV
| BPF_K
:
401 case BPF_ALU64
| BPF_MOD
| BPF_X
:
402 case BPF_ALU64
| BPF_DIV
| BPF_X
:
403 case BPF_ALU64
| BPF_MOD
| BPF_K
:
404 case BPF_ALU64
| BPF_DIV
| BPF_K
:
405 EMIT1(0x50); /* push rax */
406 EMIT1(0x52); /* push rdx */
408 if (BPF_SRC(insn
->code
) == BPF_X
)
409 /* mov r11, src_reg */
410 EMIT_mov(AUX_REG
, src_reg
);
413 EMIT3_off32(0x49, 0xC7, 0xC3, imm32
);
415 /* mov rax, dst_reg */
416 EMIT_mov(BPF_REG_0
, dst_reg
);
419 * equivalent to 'xor rdx, rdx', but one byte less
423 if (BPF_SRC(insn
->code
) == BPF_X
) {
424 /* if (src_reg == 0) return 0 */
427 EMIT4(0x49, 0x83, 0xFB, 0x00);
429 /* jne .+9 (skip over pop, pop, xor and jmp) */
430 EMIT2(X86_JNE
, 1 + 1 + 2 + 5);
431 EMIT1(0x5A); /* pop rdx */
432 EMIT1(0x58); /* pop rax */
433 EMIT2(0x31, 0xc0); /* xor eax, eax */
436 * addrs[i] - 11, because there are 11 bytes
437 * after this insn: div, mov, pop, pop, mov
439 jmp_offset
= ctx
->cleanup_addr
- (addrs
[i
] - 11);
440 EMIT1_off32(0xE9, jmp_offset
);
443 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
445 EMIT3(0x49, 0xF7, 0xF3);
448 EMIT3(0x41, 0xF7, 0xF3);
450 if (BPF_OP(insn
->code
) == BPF_MOD
)
452 EMIT3(0x49, 0x89, 0xD3);
455 EMIT3(0x49, 0x89, 0xC3);
457 EMIT1(0x5A); /* pop rdx */
458 EMIT1(0x58); /* pop rax */
460 /* mov dst_reg, r11 */
461 EMIT_mov(dst_reg
, AUX_REG
);
464 case BPF_ALU
| BPF_MUL
| BPF_K
:
465 case BPF_ALU
| BPF_MUL
| BPF_X
:
466 case BPF_ALU64
| BPF_MUL
| BPF_K
:
467 case BPF_ALU64
| BPF_MUL
| BPF_X
:
468 EMIT1(0x50); /* push rax */
469 EMIT1(0x52); /* push rdx */
471 /* mov r11, dst_reg */
472 EMIT_mov(AUX_REG
, dst_reg
);
474 if (BPF_SRC(insn
->code
) == BPF_X
)
475 /* mov rax, src_reg */
476 EMIT_mov(BPF_REG_0
, src_reg
);
479 EMIT3_off32(0x48, 0xC7, 0xC0, imm32
);
481 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
482 EMIT1(add_1mod(0x48, AUX_REG
));
483 else if (is_ereg(AUX_REG
))
484 EMIT1(add_1mod(0x40, AUX_REG
));
486 EMIT2(0xF7, add_1reg(0xE0, AUX_REG
));
489 EMIT_mov(AUX_REG
, BPF_REG_0
);
491 EMIT1(0x5A); /* pop rdx */
492 EMIT1(0x58); /* pop rax */
494 /* mov dst_reg, r11 */
495 EMIT_mov(dst_reg
, AUX_REG
);
499 case BPF_ALU
| BPF_LSH
| BPF_K
:
500 case BPF_ALU
| BPF_RSH
| BPF_K
:
501 case BPF_ALU
| BPF_ARSH
| BPF_K
:
502 case BPF_ALU64
| BPF_LSH
| BPF_K
:
503 case BPF_ALU64
| BPF_RSH
| BPF_K
:
504 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
505 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
506 EMIT1(add_1mod(0x48, dst_reg
));
507 else if (is_ereg(dst_reg
))
508 EMIT1(add_1mod(0x40, dst_reg
));
510 switch (BPF_OP(insn
->code
)) {
511 case BPF_LSH
: b3
= 0xE0; break;
512 case BPF_RSH
: b3
= 0xE8; break;
513 case BPF_ARSH
: b3
= 0xF8; break;
515 EMIT3(0xC1, add_1reg(b3
, dst_reg
), imm32
);
518 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
521 /* emit 'ror %ax, 8' to swap lower 2 bytes */
523 if (is_ereg(dst_reg
))
525 EMIT3(0xC1, add_1reg(0xC8, dst_reg
), 8);
528 /* emit 'bswap eax' to swap lower 4 bytes */
529 if (is_ereg(dst_reg
))
533 EMIT1(add_1reg(0xC8, dst_reg
));
536 /* emit 'bswap rax' to swap 8 bytes */
537 EMIT3(add_1mod(0x48, dst_reg
), 0x0F,
538 add_1reg(0xC8, dst_reg
));
543 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
546 /* ST: *(u8*)(dst_reg + off) = imm */
547 case BPF_ST
| BPF_MEM
| BPF_B
:
548 if (is_ereg(dst_reg
))
553 case BPF_ST
| BPF_MEM
| BPF_H
:
554 if (is_ereg(dst_reg
))
555 EMIT3(0x66, 0x41, 0xC7);
559 case BPF_ST
| BPF_MEM
| BPF_W
:
560 if (is_ereg(dst_reg
))
565 case BPF_ST
| BPF_MEM
| BPF_DW
:
566 EMIT2(add_1mod(0x48, dst_reg
), 0xC7);
568 st
: if (is_imm8(insn
->off
))
569 EMIT2(add_1reg(0x40, dst_reg
), insn
->off
);
571 EMIT1_off32(add_1reg(0x80, dst_reg
), insn
->off
);
573 EMIT(imm32
, bpf_size_to_x86_bytes(BPF_SIZE(insn
->code
)));
576 /* STX: *(u8*)(dst_reg + off) = src_reg */
577 case BPF_STX
| BPF_MEM
| BPF_B
:
578 /* emit 'mov byte ptr [rax + off], al' */
579 if (is_ereg(dst_reg
) || is_ereg(src_reg
) ||
580 /* have to add extra byte for x86 SIL, DIL regs */
581 src_reg
== BPF_REG_1
|| src_reg
== BPF_REG_2
)
582 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x88);
586 case BPF_STX
| BPF_MEM
| BPF_H
:
587 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
588 EMIT3(0x66, add_2mod(0x40, dst_reg
, src_reg
), 0x89);
592 case BPF_STX
| BPF_MEM
| BPF_W
:
593 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
594 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x89);
598 case BPF_STX
| BPF_MEM
| BPF_DW
:
599 EMIT2(add_2mod(0x48, dst_reg
, src_reg
), 0x89);
600 stx
: if (is_imm8(insn
->off
))
601 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
603 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
607 /* LDX: dst_reg = *(u8*)(src_reg + off) */
608 case BPF_LDX
| BPF_MEM
| BPF_B
:
609 /* emit 'movzx rax, byte ptr [rax + off]' */
610 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB6);
612 case BPF_LDX
| BPF_MEM
| BPF_H
:
613 /* emit 'movzx rax, word ptr [rax + off]' */
614 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB7);
616 case BPF_LDX
| BPF_MEM
| BPF_W
:
617 /* emit 'mov eax, dword ptr [rax+0x14]' */
618 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
619 EMIT2(add_2mod(0x40, src_reg
, dst_reg
), 0x8B);
623 case BPF_LDX
| BPF_MEM
| BPF_DW
:
624 /* emit 'mov rax, qword ptr [rax+0x14]' */
625 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x8B);
626 ldx
: /* if insn->off == 0 we can save one extra byte, but
627 * special case of x86 r13 which always needs an offset
628 * is not worth the hassle
630 if (is_imm8(insn
->off
))
631 EMIT2(add_2reg(0x40, src_reg
, dst_reg
), insn
->off
);
633 EMIT1_off32(add_2reg(0x80, src_reg
, dst_reg
),
637 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
638 case BPF_STX
| BPF_XADD
| BPF_W
:
639 /* emit 'lock add dword ptr [rax + off], eax' */
640 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
641 EMIT3(0xF0, add_2mod(0x40, dst_reg
, src_reg
), 0x01);
645 case BPF_STX
| BPF_XADD
| BPF_DW
:
646 EMIT3(0xF0, add_2mod(0x48, dst_reg
, src_reg
), 0x01);
647 xadd
: if (is_imm8(insn
->off
))
648 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
650 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
655 case BPF_JMP
| BPF_CALL
:
656 func
= (u8
*) __bpf_call_base
+ imm32
;
657 jmp_offset
= func
- (image
+ addrs
[i
]);
658 if (ctx
->seen_ld_abs
) {
659 EMIT2(0x41, 0x52); /* push %r10 */
660 EMIT2(0x41, 0x51); /* push %r9 */
661 /* need to adjust jmp offset, since
662 * pop %r9, pop %r10 take 4 bytes after call insn
666 if (!imm32
|| !is_simm32(jmp_offset
)) {
667 pr_err("unsupported bpf func %d addr %p image %p\n",
671 EMIT1_off32(0xE8, jmp_offset
);
672 if (ctx
->seen_ld_abs
) {
673 EMIT2(0x41, 0x59); /* pop %r9 */
674 EMIT2(0x41, 0x5A); /* pop %r10 */
679 case BPF_JMP
| BPF_JEQ
| BPF_X
:
680 case BPF_JMP
| BPF_JNE
| BPF_X
:
681 case BPF_JMP
| BPF_JGT
| BPF_X
:
682 case BPF_JMP
| BPF_JGE
| BPF_X
:
683 case BPF_JMP
| BPF_JSGT
| BPF_X
:
684 case BPF_JMP
| BPF_JSGE
| BPF_X
:
685 /* cmp dst_reg, src_reg */
686 EMIT3(add_2mod(0x48, dst_reg
, src_reg
), 0x39,
687 add_2reg(0xC0, dst_reg
, src_reg
));
690 case BPF_JMP
| BPF_JSET
| BPF_X
:
691 /* test dst_reg, src_reg */
692 EMIT3(add_2mod(0x48, dst_reg
, src_reg
), 0x85,
693 add_2reg(0xC0, dst_reg
, src_reg
));
696 case BPF_JMP
| BPF_JSET
| BPF_K
:
697 /* test dst_reg, imm32 */
698 EMIT1(add_1mod(0x48, dst_reg
));
699 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg
), imm32
);
702 case BPF_JMP
| BPF_JEQ
| BPF_K
:
703 case BPF_JMP
| BPF_JNE
| BPF_K
:
704 case BPF_JMP
| BPF_JGT
| BPF_K
:
705 case BPF_JMP
| BPF_JGE
| BPF_K
:
706 case BPF_JMP
| BPF_JSGT
| BPF_K
:
707 case BPF_JMP
| BPF_JSGE
| BPF_K
:
708 /* cmp dst_reg, imm8/32 */
709 EMIT1(add_1mod(0x48, dst_reg
));
712 EMIT3(0x83, add_1reg(0xF8, dst_reg
), imm32
);
714 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg
), imm32
);
716 emit_cond_jmp
: /* convert BPF opcode to x86 */
717 switch (BPF_OP(insn
->code
)) {
726 /* GT is unsigned '>', JA in x86 */
730 /* GE is unsigned '>=', JAE in x86 */
734 /* signed '>', GT in x86 */
738 /* signed '>=', GE in x86 */
741 default: /* to silence gcc warning */
744 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
745 if (is_imm8(jmp_offset
)) {
746 EMIT2(jmp_cond
, jmp_offset
);
747 } else if (is_simm32(jmp_offset
)) {
748 EMIT2_off32(0x0F, jmp_cond
+ 0x10, jmp_offset
);
750 pr_err("cond_jmp gen bug %llx\n", jmp_offset
);
756 case BPF_JMP
| BPF_JA
:
757 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
759 /* optimize out nop jumps */
762 if (is_imm8(jmp_offset
)) {
763 EMIT2(0xEB, jmp_offset
);
764 } else if (is_simm32(jmp_offset
)) {
765 EMIT1_off32(0xE9, jmp_offset
);
767 pr_err("jmp gen bug %llx\n", jmp_offset
);
772 case BPF_LD
| BPF_IND
| BPF_W
:
775 case BPF_LD
| BPF_ABS
| BPF_W
:
776 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_word
);
777 common_load
: ctx
->seen_ld_abs
= true;
778 jmp_offset
= func
- (image
+ addrs
[i
]);
779 if (!func
|| !is_simm32(jmp_offset
)) {
780 pr_err("unsupported bpf func %d addr %p image %p\n",
784 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
785 /* mov %esi, imm32 */
786 EMIT1_off32(0xBE, imm32
);
788 /* mov %rsi, src_reg */
789 EMIT_mov(BPF_REG_2
, src_reg
);
793 EMIT3(0x83, 0xC6, imm32
);
795 /* add %esi, imm32 */
796 EMIT2_off32(0x81, 0xC6, imm32
);
799 /* skb pointer is in R6 (%rbx), it will be copied into
800 * %rdi if skb_copy_bits() call is necessary.
801 * sk_load_* helpers also use %r10 and %r9d.
804 EMIT1_off32(0xE8, jmp_offset
); /* call */
807 case BPF_LD
| BPF_IND
| BPF_H
:
810 case BPF_LD
| BPF_ABS
| BPF_H
:
811 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_half
);
813 case BPF_LD
| BPF_IND
| BPF_B
:
816 case BPF_LD
| BPF_ABS
| BPF_B
:
817 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_byte
);
820 case BPF_JMP
| BPF_EXIT
:
821 if (i
!= insn_cnt
- 1) {
822 jmp_offset
= ctx
->cleanup_addr
- addrs
[i
];
825 /* update cleanup_addr */
826 ctx
->cleanup_addr
= proglen
;
827 /* mov rbx, qword ptr [rbp-X] */
828 EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize
);
829 /* mov r13, qword ptr [rbp-X] */
830 EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize
+ 8);
831 /* mov r14, qword ptr [rbp-X] */
832 EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize
+ 16);
833 /* mov r15, qword ptr [rbp-X] */
834 EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize
+ 24);
836 EMIT1(0xC9); /* leave */
837 EMIT1(0xC3); /* ret */
841 /* By design x64 JIT should support all BPF instructions
842 * This error will be seen if new instruction was added
843 * to interpreter, but not to JIT
844 * or if there is junk in sk_filter
846 pr_err("bpf_jit: unknown opcode %02x\n", insn
->code
);
852 if (unlikely(proglen
+ ilen
> oldproglen
)) {
853 pr_err("bpf_jit_compile fatal error\n");
856 memcpy(image
+ proglen
, temp
, ilen
);
865 void bpf_jit_compile(struct sk_filter
*prog
)
869 void bpf_int_jit_compile(struct sk_filter
*prog
)
871 struct bpf_binary_header
*header
= NULL
;
872 int proglen
, oldproglen
= 0;
873 struct jit_context ctx
= {};
882 if (!prog
|| !prog
->len
)
885 addrs
= kmalloc(prog
->len
* sizeof(*addrs
), GFP_KERNEL
);
889 /* Before first pass, make a rough estimation of addrs[]
890 * each bpf instruction is translated to less than 64 bytes
892 for (proglen
= 0, i
= 0; i
< prog
->len
; i
++) {
896 ctx
.cleanup_addr
= proglen
;
898 for (pass
= 0; pass
< 10; pass
++) {
899 proglen
= do_jit(prog
, addrs
, image
, oldproglen
, &ctx
);
903 module_free(NULL
, header
);
907 if (proglen
!= oldproglen
)
908 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
909 proglen
, oldproglen
);
912 if (proglen
== oldproglen
) {
913 header
= bpf_alloc_binary(proglen
, &image
);
917 oldproglen
= proglen
;
920 if (bpf_jit_enable
> 1)
921 bpf_jit_dump(prog
->len
, proglen
, 0, image
);
924 bpf_flush_icache(header
, image
+ proglen
);
925 set_memory_ro((unsigned long)header
, header
->pages
);
926 prog
->bpf_func
= (void *)image
;
933 static void bpf_jit_free_deferred(struct work_struct
*work
)
935 struct sk_filter
*fp
= container_of(work
, struct sk_filter
, work
);
936 unsigned long addr
= (unsigned long)fp
->bpf_func
& PAGE_MASK
;
937 struct bpf_binary_header
*header
= (void *)addr
;
939 set_memory_rw(addr
, header
->pages
);
940 module_free(NULL
, header
);
944 void bpf_jit_free(struct sk_filter
*fp
)
947 INIT_WORK(&fp
->work
, bpf_jit_free_deferred
);
948 schedule_work(&fp
->work
);