1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <linux/bpf.h>
20 * assembly code in arch/x86/net/bpf_jit.S
22 extern u8 sk_load_word
[], sk_load_half
[], sk_load_byte
[];
23 extern u8 sk_load_word_positive_offset
[], sk_load_half_positive_offset
[];
24 extern u8 sk_load_byte_positive_offset
[];
25 extern u8 sk_load_word_negative_offset
[], sk_load_half_negative_offset
[];
26 extern u8 sk_load_byte_negative_offset
[];
28 static u8
*emit_code(u8
*ptr
, u32 bytes
, unsigned int len
)
41 #define EMIT(bytes, len) \
42 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
44 #define EMIT1(b1) EMIT(b1, 1)
45 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
46 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
47 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
48 #define EMIT1_off32(b1, off) \
49 do {EMIT1(b1); EMIT(off, 4); } while (0)
50 #define EMIT2_off32(b1, b2, off) \
51 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
52 #define EMIT3_off32(b1, b2, b3, off) \
53 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
54 #define EMIT4_off32(b1, b2, b3, b4, off) \
55 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
57 static bool is_imm8(int value
)
59 return value
<= 127 && value
>= -128;
62 static bool is_simm32(s64 value
)
64 return value
== (s64
) (s32
) value
;
68 #define EMIT_mov(DST, SRC) \
70 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
73 static int bpf_size_to_x86_bytes(int bpf_size
)
75 if (bpf_size
== BPF_W
)
77 else if (bpf_size
== BPF_H
)
79 else if (bpf_size
== BPF_B
)
81 else if (bpf_size
== BPF_DW
)
87 /* list of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
101 static void bpf_flush_icache(void *start
, void *end
)
103 mm_segment_t old_fs
= get_fs();
107 flush_icache_range((unsigned long)start
, (unsigned long)end
);
111 #define CHOOSE_LOAD_FUNC(K, func) \
112 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
114 /* pick a register outside of BPF range for JIT internal work */
115 #define AUX_REG (MAX_BPF_JIT_REG + 1)
117 /* The following table maps BPF registers to x64 registers.
119 * x64 register r12 is unused, since if used as base address
120 * register in load/store instructions, it always needs an
121 * extra byte of encoding and is callee saved.
123 * r9 caches skb->len - skb->data_len
124 * r10 caches skb->data, and used for blinding (if enabled)
126 static const int reg2hex
[] = {
127 [BPF_REG_0
] = 0, /* rax */
128 [BPF_REG_1
] = 7, /* rdi */
129 [BPF_REG_2
] = 6, /* rsi */
130 [BPF_REG_3
] = 2, /* rdx */
131 [BPF_REG_4
] = 1, /* rcx */
132 [BPF_REG_5
] = 0, /* r8 */
133 [BPF_REG_6
] = 3, /* rbx callee saved */
134 [BPF_REG_7
] = 5, /* r13 callee saved */
135 [BPF_REG_8
] = 6, /* r14 callee saved */
136 [BPF_REG_9
] = 7, /* r15 callee saved */
137 [BPF_REG_FP
] = 5, /* rbp readonly */
138 [BPF_REG_AX
] = 2, /* r10 temp register */
139 [AUX_REG
] = 3, /* r11 temp register */
142 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
143 * which need extra byte of encoding.
144 * rax,rcx,...,rbp have simpler encoding
146 static bool is_ereg(u32 reg
)
148 return (1 << reg
) & (BIT(BPF_REG_5
) |
156 static bool is_axreg(u32 reg
)
158 return reg
== BPF_REG_0
;
161 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
162 static u8
add_1mod(u8 byte
, u32 reg
)
169 static u8
add_2mod(u8 byte
, u32 r1
, u32 r2
)
178 /* encode 'dst_reg' register into x64 opcode 'byte' */
179 static u8
add_1reg(u8 byte
, u32 dst_reg
)
181 return byte
+ reg2hex
[dst_reg
];
184 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
185 static u8
add_2reg(u8 byte
, u32 dst_reg
, u32 src_reg
)
187 return byte
+ reg2hex
[dst_reg
] + (reg2hex
[src_reg
] << 3);
190 static void jit_fill_hole(void *area
, unsigned int size
)
192 /* fill whole space with int3 instructions */
193 memset(area
, 0xcc, size
);
197 int cleanup_addr
; /* epilogue code offset */
202 /* maximum number of bytes emitted while JITing one eBPF insn */
203 #define BPF_MAX_INSN_SIZE 128
204 #define BPF_INSN_SAFETY 64
206 #define AUX_STACK_SPACE \
207 (32 /* space for rbx, r13, r14, r15 */ + \
208 8 /* space for skb_copy_bits() buffer */)
210 #define PROLOGUE_SIZE 37
212 /* emit x64 prologue code for BPF program and check it's size.
213 * bpf_tail_call helper will skip it while jumping into another program
215 static void emit_prologue(u8
**pprog
, u32 stack_depth
)
220 EMIT1(0x55); /* push rbp */
221 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
223 /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
224 EMIT3_off32(0x48, 0x81, 0xEC,
225 round_up(stack_depth
, 8) + AUX_STACK_SPACE
);
227 /* sub rbp, AUX_STACK_SPACE */
228 EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE
);
230 /* all classic BPF filters use R6(rbx) save it */
232 /* mov qword ptr [rbp+0],rbx */
233 EMIT4(0x48, 0x89, 0x5D, 0);
235 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
236 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
237 * R8(r14). R9(r15) spill could be made conditional, but there is only
238 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
239 * The overhead of extra spill is negligible for any filter other
240 * than synthetic ones. Therefore not worth adding complexity.
243 /* mov qword ptr [rbp+8],r13 */
244 EMIT4(0x4C, 0x89, 0x6D, 8);
245 /* mov qword ptr [rbp+16],r14 */
246 EMIT4(0x4C, 0x89, 0x75, 16);
247 /* mov qword ptr [rbp+24],r15 */
248 EMIT4(0x4C, 0x89, 0x7D, 24);
250 /* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
251 * we need to reset the counter to 0. It's done in two instructions,
252 * resetting rax register to 0 (xor on eax gets 0 extended), and
253 * moving it to the counter location.
258 /* mov qword ptr [rbp+32], rax */
259 EMIT4(0x48, 0x89, 0x45, 32);
261 BUILD_BUG_ON(cnt
!= PROLOGUE_SIZE
);
265 /* generate the following code:
266 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
267 * if (index >= array->map.max_entries)
269 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
271 * prog = array->ptrs[index];
274 * goto *(prog->bpf_func + prologue_size);
277 static void emit_bpf_tail_call(u8
**pprog
)
280 int label1
, label2
, label3
;
283 /* rdi - pointer to ctx
284 * rsi - pointer to bpf_array
285 * rdx - index in bpf_array
288 /* if (index >= array->map.max_entries)
291 EMIT2(0x89, 0xD2); /* mov edx, edx */
292 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
293 offsetof(struct bpf_array
, map
.max_entries
));
294 #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
295 EMIT2(X86_JBE
, OFFSET1
); /* jbe out */
298 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
301 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
302 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT
); /* cmp eax, MAX_TAIL_CALL_CNT */
303 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
304 EMIT2(X86_JA
, OFFSET2
); /* ja out */
306 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
307 EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
309 /* prog = array->ptrs[index]; */
310 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
311 offsetof(struct bpf_array
, ptrs
));
316 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
317 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
318 EMIT2(X86_JE
, OFFSET3
); /* je out */
321 /* goto *(prog->bpf_func + prologue_size); */
322 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
323 offsetof(struct bpf_prog
, bpf_func
));
324 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE
); /* add rax, prologue_size */
326 /* now we're ready to jump into next BPF program
327 * rdi == ctx (1st arg)
328 * rax == prog->bpf_func + prologue_size
330 RETPOLINE_RAX_BPF_JIT();
333 BUILD_BUG_ON(cnt
- label1
!= OFFSET1
);
334 BUILD_BUG_ON(cnt
- label2
!= OFFSET2
);
335 BUILD_BUG_ON(cnt
- label3
!= OFFSET3
);
340 static void emit_load_skb_data_hlen(u8
**pprog
)
345 /* r9d = skb->len - skb->data_len (headlen)
348 /* mov %r9d, off32(%rdi) */
349 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff
, len
));
351 /* sub %r9d, off32(%rdi) */
352 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff
, data_len
));
354 /* mov %r10, off32(%rdi) */
355 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff
, data
));
359 static int do_jit(struct bpf_prog
*bpf_prog
, int *addrs
, u8
*image
,
360 int oldproglen
, struct jit_context
*ctx
)
362 struct bpf_insn
*insn
= bpf_prog
->insnsi
;
363 int insn_cnt
= bpf_prog
->len
;
364 bool seen_ld_abs
= ctx
->seen_ld_abs
| (oldproglen
== 0);
365 bool seen_ax_reg
= ctx
->seen_ax_reg
| (oldproglen
== 0);
366 bool seen_exit
= false;
367 u8 temp
[BPF_MAX_INSN_SIZE
+ BPF_INSN_SAFETY
];
372 emit_prologue(&prog
, bpf_prog
->aux
->stack_depth
);
375 emit_load_skb_data_hlen(&prog
);
377 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
378 const s32 imm32
= insn
->imm
;
379 u32 dst_reg
= insn
->dst_reg
;
380 u32 src_reg
= insn
->src_reg
;
381 u8 b1
= 0, b2
= 0, b3
= 0;
384 bool reload_skb_data
;
388 if (dst_reg
== BPF_REG_AX
|| src_reg
== BPF_REG_AX
)
389 ctx
->seen_ax_reg
= seen_ax_reg
= true;
391 switch (insn
->code
) {
393 case BPF_ALU
| BPF_ADD
| BPF_X
:
394 case BPF_ALU
| BPF_SUB
| BPF_X
:
395 case BPF_ALU
| BPF_AND
| BPF_X
:
396 case BPF_ALU
| BPF_OR
| BPF_X
:
397 case BPF_ALU
| BPF_XOR
| BPF_X
:
398 case BPF_ALU64
| BPF_ADD
| BPF_X
:
399 case BPF_ALU64
| BPF_SUB
| BPF_X
:
400 case BPF_ALU64
| BPF_AND
| BPF_X
:
401 case BPF_ALU64
| BPF_OR
| BPF_X
:
402 case BPF_ALU64
| BPF_XOR
| BPF_X
:
403 switch (BPF_OP(insn
->code
)) {
404 case BPF_ADD
: b2
= 0x01; break;
405 case BPF_SUB
: b2
= 0x29; break;
406 case BPF_AND
: b2
= 0x21; break;
407 case BPF_OR
: b2
= 0x09; break;
408 case BPF_XOR
: b2
= 0x31; break;
410 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
411 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
412 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
413 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
414 EMIT2(b2
, add_2reg(0xC0, dst_reg
, src_reg
));
418 case BPF_ALU64
| BPF_MOV
| BPF_X
:
419 EMIT_mov(dst_reg
, src_reg
);
423 case BPF_ALU
| BPF_MOV
| BPF_X
:
424 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
425 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
426 EMIT2(0x89, add_2reg(0xC0, dst_reg
, src_reg
));
430 case BPF_ALU
| BPF_NEG
:
431 case BPF_ALU64
| BPF_NEG
:
432 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
433 EMIT1(add_1mod(0x48, dst_reg
));
434 else if (is_ereg(dst_reg
))
435 EMIT1(add_1mod(0x40, dst_reg
));
436 EMIT2(0xF7, add_1reg(0xD8, dst_reg
));
439 case BPF_ALU
| BPF_ADD
| BPF_K
:
440 case BPF_ALU
| BPF_SUB
| BPF_K
:
441 case BPF_ALU
| BPF_AND
| BPF_K
:
442 case BPF_ALU
| BPF_OR
| BPF_K
:
443 case BPF_ALU
| BPF_XOR
| BPF_K
:
444 case BPF_ALU64
| BPF_ADD
| BPF_K
:
445 case BPF_ALU64
| BPF_SUB
| BPF_K
:
446 case BPF_ALU64
| BPF_AND
| BPF_K
:
447 case BPF_ALU64
| BPF_OR
| BPF_K
:
448 case BPF_ALU64
| BPF_XOR
| BPF_K
:
449 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
450 EMIT1(add_1mod(0x48, dst_reg
));
451 else if (is_ereg(dst_reg
))
452 EMIT1(add_1mod(0x40, dst_reg
));
454 /* b3 holds 'normal' opcode, b2 short form only valid
455 * in case dst is eax/rax.
457 switch (BPF_OP(insn
->code
)) {
481 EMIT3(0x83, add_1reg(b3
, dst_reg
), imm32
);
482 else if (is_axreg(dst_reg
))
483 EMIT1_off32(b2
, imm32
);
485 EMIT2_off32(0x81, add_1reg(b3
, dst_reg
), imm32
);
488 case BPF_ALU64
| BPF_MOV
| BPF_K
:
489 /* optimization: if imm32 is positive,
490 * use 'mov eax, imm32' (which zero-extends imm32)
494 /* 'mov rax, imm32' sign extends imm32 */
495 b1
= add_1mod(0x48, dst_reg
);
498 EMIT3_off32(b1
, b2
, add_1reg(b3
, dst_reg
), imm32
);
502 case BPF_ALU
| BPF_MOV
| BPF_K
:
503 /* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
507 if (is_ereg(dst_reg
))
508 EMIT1(add_2mod(0x40, dst_reg
, dst_reg
));
511 EMIT2(b2
, add_2reg(b3
, dst_reg
, dst_reg
));
515 /* mov %eax, imm32 */
516 if (is_ereg(dst_reg
))
517 EMIT1(add_1mod(0x40, dst_reg
));
518 EMIT1_off32(add_1reg(0xB8, dst_reg
), imm32
);
521 case BPF_LD
| BPF_IMM
| BPF_DW
:
522 /* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
525 if (insn
[0].imm
== 0 && insn
[1].imm
== 0) {
526 b1
= add_2mod(0x48, dst_reg
, dst_reg
);
529 EMIT3(b1
, b2
, add_2reg(b3
, dst_reg
, dst_reg
));
536 /* movabsq %rax, imm64 */
537 EMIT2(add_1mod(0x48, dst_reg
), add_1reg(0xB8, dst_reg
));
538 EMIT(insn
[0].imm
, 4);
539 EMIT(insn
[1].imm
, 4);
545 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
546 case BPF_ALU
| BPF_MOD
| BPF_X
:
547 case BPF_ALU
| BPF_DIV
| BPF_X
:
548 case BPF_ALU
| BPF_MOD
| BPF_K
:
549 case BPF_ALU
| BPF_DIV
| BPF_K
:
550 case BPF_ALU64
| BPF_MOD
| BPF_X
:
551 case BPF_ALU64
| BPF_DIV
| BPF_X
:
552 case BPF_ALU64
| BPF_MOD
| BPF_K
:
553 case BPF_ALU64
| BPF_DIV
| BPF_K
:
554 EMIT1(0x50); /* push rax */
555 EMIT1(0x52); /* push rdx */
557 if (BPF_SRC(insn
->code
) == BPF_X
)
558 /* mov r11, src_reg */
559 EMIT_mov(AUX_REG
, src_reg
);
562 EMIT3_off32(0x49, 0xC7, 0xC3, imm32
);
564 /* mov rax, dst_reg */
565 EMIT_mov(BPF_REG_0
, dst_reg
);
568 * equivalent to 'xor rdx, rdx', but one byte less
572 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
574 EMIT3(0x49, 0xF7, 0xF3);
577 EMIT3(0x41, 0xF7, 0xF3);
579 if (BPF_OP(insn
->code
) == BPF_MOD
)
581 EMIT3(0x49, 0x89, 0xD3);
584 EMIT3(0x49, 0x89, 0xC3);
586 EMIT1(0x5A); /* pop rdx */
587 EMIT1(0x58); /* pop rax */
589 /* mov dst_reg, r11 */
590 EMIT_mov(dst_reg
, AUX_REG
);
593 case BPF_ALU
| BPF_MUL
| BPF_K
:
594 case BPF_ALU
| BPF_MUL
| BPF_X
:
595 case BPF_ALU64
| BPF_MUL
| BPF_K
:
596 case BPF_ALU64
| BPF_MUL
| BPF_X
:
597 EMIT1(0x50); /* push rax */
598 EMIT1(0x52); /* push rdx */
600 /* mov r11, dst_reg */
601 EMIT_mov(AUX_REG
, dst_reg
);
603 if (BPF_SRC(insn
->code
) == BPF_X
)
604 /* mov rax, src_reg */
605 EMIT_mov(BPF_REG_0
, src_reg
);
608 EMIT3_off32(0x48, 0xC7, 0xC0, imm32
);
610 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
611 EMIT1(add_1mod(0x48, AUX_REG
));
612 else if (is_ereg(AUX_REG
))
613 EMIT1(add_1mod(0x40, AUX_REG
));
615 EMIT2(0xF7, add_1reg(0xE0, AUX_REG
));
618 EMIT_mov(AUX_REG
, BPF_REG_0
);
620 EMIT1(0x5A); /* pop rdx */
621 EMIT1(0x58); /* pop rax */
623 /* mov dst_reg, r11 */
624 EMIT_mov(dst_reg
, AUX_REG
);
628 case BPF_ALU
| BPF_LSH
| BPF_K
:
629 case BPF_ALU
| BPF_RSH
| BPF_K
:
630 case BPF_ALU
| BPF_ARSH
| BPF_K
:
631 case BPF_ALU64
| BPF_LSH
| BPF_K
:
632 case BPF_ALU64
| BPF_RSH
| BPF_K
:
633 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
634 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
635 EMIT1(add_1mod(0x48, dst_reg
));
636 else if (is_ereg(dst_reg
))
637 EMIT1(add_1mod(0x40, dst_reg
));
639 switch (BPF_OP(insn
->code
)) {
640 case BPF_LSH
: b3
= 0xE0; break;
641 case BPF_RSH
: b3
= 0xE8; break;
642 case BPF_ARSH
: b3
= 0xF8; break;
644 EMIT3(0xC1, add_1reg(b3
, dst_reg
), imm32
);
647 case BPF_ALU
| BPF_LSH
| BPF_X
:
648 case BPF_ALU
| BPF_RSH
| BPF_X
:
649 case BPF_ALU
| BPF_ARSH
| BPF_X
:
650 case BPF_ALU64
| BPF_LSH
| BPF_X
:
651 case BPF_ALU64
| BPF_RSH
| BPF_X
:
652 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
654 /* check for bad case when dst_reg == rcx */
655 if (dst_reg
== BPF_REG_4
) {
656 /* mov r11, dst_reg */
657 EMIT_mov(AUX_REG
, dst_reg
);
661 if (src_reg
!= BPF_REG_4
) { /* common case */
662 EMIT1(0x51); /* push rcx */
664 /* mov rcx, src_reg */
665 EMIT_mov(BPF_REG_4
, src_reg
);
668 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
669 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
670 EMIT1(add_1mod(0x48, dst_reg
));
671 else if (is_ereg(dst_reg
))
672 EMIT1(add_1mod(0x40, dst_reg
));
674 switch (BPF_OP(insn
->code
)) {
675 case BPF_LSH
: b3
= 0xE0; break;
676 case BPF_RSH
: b3
= 0xE8; break;
677 case BPF_ARSH
: b3
= 0xF8; break;
679 EMIT2(0xD3, add_1reg(b3
, dst_reg
));
681 if (src_reg
!= BPF_REG_4
)
682 EMIT1(0x59); /* pop rcx */
684 if (insn
->dst_reg
== BPF_REG_4
)
685 /* mov dst_reg, r11 */
686 EMIT_mov(insn
->dst_reg
, AUX_REG
);
689 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
692 /* emit 'ror %ax, 8' to swap lower 2 bytes */
694 if (is_ereg(dst_reg
))
696 EMIT3(0xC1, add_1reg(0xC8, dst_reg
), 8);
698 /* emit 'movzwl eax, ax' */
699 if (is_ereg(dst_reg
))
700 EMIT3(0x45, 0x0F, 0xB7);
703 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
706 /* emit 'bswap eax' to swap lower 4 bytes */
707 if (is_ereg(dst_reg
))
711 EMIT1(add_1reg(0xC8, dst_reg
));
714 /* emit 'bswap rax' to swap 8 bytes */
715 EMIT3(add_1mod(0x48, dst_reg
), 0x0F,
716 add_1reg(0xC8, dst_reg
));
721 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
724 /* emit 'movzwl eax, ax' to zero extend 16-bit
727 if (is_ereg(dst_reg
))
728 EMIT3(0x45, 0x0F, 0xB7);
731 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
734 /* emit 'mov eax, eax' to clear upper 32-bits */
735 if (is_ereg(dst_reg
))
737 EMIT2(0x89, add_2reg(0xC0, dst_reg
, dst_reg
));
745 /* ST: *(u8*)(dst_reg + off) = imm */
746 case BPF_ST
| BPF_MEM
| BPF_B
:
747 if (is_ereg(dst_reg
))
752 case BPF_ST
| BPF_MEM
| BPF_H
:
753 if (is_ereg(dst_reg
))
754 EMIT3(0x66, 0x41, 0xC7);
758 case BPF_ST
| BPF_MEM
| BPF_W
:
759 if (is_ereg(dst_reg
))
764 case BPF_ST
| BPF_MEM
| BPF_DW
:
765 EMIT2(add_1mod(0x48, dst_reg
), 0xC7);
767 st
: if (is_imm8(insn
->off
))
768 EMIT2(add_1reg(0x40, dst_reg
), insn
->off
);
770 EMIT1_off32(add_1reg(0x80, dst_reg
), insn
->off
);
772 EMIT(imm32
, bpf_size_to_x86_bytes(BPF_SIZE(insn
->code
)));
775 /* STX: *(u8*)(dst_reg + off) = src_reg */
776 case BPF_STX
| BPF_MEM
| BPF_B
:
777 /* emit 'mov byte ptr [rax + off], al' */
778 if (is_ereg(dst_reg
) || is_ereg(src_reg
) ||
779 /* have to add extra byte for x86 SIL, DIL regs */
780 src_reg
== BPF_REG_1
|| src_reg
== BPF_REG_2
)
781 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x88);
785 case BPF_STX
| BPF_MEM
| BPF_H
:
786 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
787 EMIT3(0x66, add_2mod(0x40, dst_reg
, src_reg
), 0x89);
791 case BPF_STX
| BPF_MEM
| BPF_W
:
792 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
793 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x89);
797 case BPF_STX
| BPF_MEM
| BPF_DW
:
798 EMIT2(add_2mod(0x48, dst_reg
, src_reg
), 0x89);
799 stx
: if (is_imm8(insn
->off
))
800 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
802 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
806 /* LDX: dst_reg = *(u8*)(src_reg + off) */
807 case BPF_LDX
| BPF_MEM
| BPF_B
:
808 /* emit 'movzx rax, byte ptr [rax + off]' */
809 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB6);
811 case BPF_LDX
| BPF_MEM
| BPF_H
:
812 /* emit 'movzx rax, word ptr [rax + off]' */
813 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB7);
815 case BPF_LDX
| BPF_MEM
| BPF_W
:
816 /* emit 'mov eax, dword ptr [rax+0x14]' */
817 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
818 EMIT2(add_2mod(0x40, src_reg
, dst_reg
), 0x8B);
822 case BPF_LDX
| BPF_MEM
| BPF_DW
:
823 /* emit 'mov rax, qword ptr [rax+0x14]' */
824 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x8B);
825 ldx
: /* if insn->off == 0 we can save one extra byte, but
826 * special case of x86 r13 which always needs an offset
827 * is not worth the hassle
829 if (is_imm8(insn
->off
))
830 EMIT2(add_2reg(0x40, src_reg
, dst_reg
), insn
->off
);
832 EMIT1_off32(add_2reg(0x80, src_reg
, dst_reg
),
836 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
837 case BPF_STX
| BPF_XADD
| BPF_W
:
838 /* emit 'lock add dword ptr [rax + off], eax' */
839 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
840 EMIT3(0xF0, add_2mod(0x40, dst_reg
, src_reg
), 0x01);
844 case BPF_STX
| BPF_XADD
| BPF_DW
:
845 EMIT3(0xF0, add_2mod(0x48, dst_reg
, src_reg
), 0x01);
846 xadd
: if (is_imm8(insn
->off
))
847 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
849 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
854 case BPF_JMP
| BPF_CALL
:
855 func
= (u8
*) __bpf_call_base
+ imm32
;
856 jmp_offset
= func
- (image
+ addrs
[i
]);
858 reload_skb_data
= bpf_helper_changes_pkt_data(func
);
859 if (reload_skb_data
) {
860 EMIT1(0x57); /* push %rdi */
861 jmp_offset
+= 22; /* pop, mov, sub, mov */
863 EMIT2(0x41, 0x52); /* push %r10 */
864 EMIT2(0x41, 0x51); /* push %r9 */
865 /* need to adjust jmp offset, since
866 * pop %r9, pop %r10 take 4 bytes after call insn
871 if (!imm32
|| !is_simm32(jmp_offset
)) {
872 pr_err("unsupported bpf func %d addr %p image %p\n",
876 EMIT1_off32(0xE8, jmp_offset
);
878 if (reload_skb_data
) {
879 EMIT1(0x5F); /* pop %rdi */
880 emit_load_skb_data_hlen(&prog
);
882 EMIT2(0x41, 0x59); /* pop %r9 */
883 EMIT2(0x41, 0x5A); /* pop %r10 */
888 case BPF_JMP
| BPF_TAIL_CALL
:
889 emit_bpf_tail_call(&prog
);
893 case BPF_JMP
| BPF_JEQ
| BPF_X
:
894 case BPF_JMP
| BPF_JNE
| BPF_X
:
895 case BPF_JMP
| BPF_JGT
| BPF_X
:
896 case BPF_JMP
| BPF_JLT
| BPF_X
:
897 case BPF_JMP
| BPF_JGE
| BPF_X
:
898 case BPF_JMP
| BPF_JLE
| BPF_X
:
899 case BPF_JMP
| BPF_JSGT
| BPF_X
:
900 case BPF_JMP
| BPF_JSLT
| BPF_X
:
901 case BPF_JMP
| BPF_JSGE
| BPF_X
:
902 case BPF_JMP
| BPF_JSLE
| BPF_X
:
903 /* cmp dst_reg, src_reg */
904 EMIT3(add_2mod(0x48, dst_reg
, src_reg
), 0x39,
905 add_2reg(0xC0, dst_reg
, src_reg
));
908 case BPF_JMP
| BPF_JSET
| BPF_X
:
909 /* test dst_reg, src_reg */
910 EMIT3(add_2mod(0x48, dst_reg
, src_reg
), 0x85,
911 add_2reg(0xC0, dst_reg
, src_reg
));
914 case BPF_JMP
| BPF_JSET
| BPF_K
:
915 /* test dst_reg, imm32 */
916 EMIT1(add_1mod(0x48, dst_reg
));
917 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg
), imm32
);
920 case BPF_JMP
| BPF_JEQ
| BPF_K
:
921 case BPF_JMP
| BPF_JNE
| BPF_K
:
922 case BPF_JMP
| BPF_JGT
| BPF_K
:
923 case BPF_JMP
| BPF_JLT
| BPF_K
:
924 case BPF_JMP
| BPF_JGE
| BPF_K
:
925 case BPF_JMP
| BPF_JLE
| BPF_K
:
926 case BPF_JMP
| BPF_JSGT
| BPF_K
:
927 case BPF_JMP
| BPF_JSLT
| BPF_K
:
928 case BPF_JMP
| BPF_JSGE
| BPF_K
:
929 case BPF_JMP
| BPF_JSLE
| BPF_K
:
930 /* cmp dst_reg, imm8/32 */
931 EMIT1(add_1mod(0x48, dst_reg
));
934 EMIT3(0x83, add_1reg(0xF8, dst_reg
), imm32
);
936 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg
), imm32
);
938 emit_cond_jmp
: /* convert BPF opcode to x86 */
939 switch (BPF_OP(insn
->code
)) {
948 /* GT is unsigned '>', JA in x86 */
952 /* LT is unsigned '<', JB in x86 */
956 /* GE is unsigned '>=', JAE in x86 */
960 /* LE is unsigned '<=', JBE in x86 */
964 /* signed '>', GT in x86 */
968 /* signed '<', LT in x86 */
972 /* signed '>=', GE in x86 */
976 /* signed '<=', LE in x86 */
979 default: /* to silence gcc warning */
982 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
983 if (is_imm8(jmp_offset
)) {
984 EMIT2(jmp_cond
, jmp_offset
);
985 } else if (is_simm32(jmp_offset
)) {
986 EMIT2_off32(0x0F, jmp_cond
+ 0x10, jmp_offset
);
988 pr_err("cond_jmp gen bug %llx\n", jmp_offset
);
994 case BPF_JMP
| BPF_JA
:
995 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
997 /* optimize out nop jumps */
1000 if (is_imm8(jmp_offset
)) {
1001 EMIT2(0xEB, jmp_offset
);
1002 } else if (is_simm32(jmp_offset
)) {
1003 EMIT1_off32(0xE9, jmp_offset
);
1005 pr_err("jmp gen bug %llx\n", jmp_offset
);
1010 case BPF_LD
| BPF_IND
| BPF_W
:
1011 func
= sk_load_word
;
1013 case BPF_LD
| BPF_ABS
| BPF_W
:
1014 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_word
);
1016 ctx
->seen_ld_abs
= seen_ld_abs
= true;
1017 jmp_offset
= func
- (image
+ addrs
[i
]);
1018 if (!func
|| !is_simm32(jmp_offset
)) {
1019 pr_err("unsupported bpf func %d addr %p image %p\n",
1020 imm32
, func
, image
);
1023 if (BPF_MODE(insn
->code
) == BPF_ABS
) {
1024 /* mov %esi, imm32 */
1025 EMIT1_off32(0xBE, imm32
);
1027 /* mov %rsi, src_reg */
1028 EMIT_mov(BPF_REG_2
, src_reg
);
1031 /* add %esi, imm8 */
1032 EMIT3(0x83, 0xC6, imm32
);
1034 /* add %esi, imm32 */
1035 EMIT2_off32(0x81, 0xC6, imm32
);
1038 /* skb pointer is in R6 (%rbx), it will be copied into
1039 * %rdi if skb_copy_bits() call is necessary.
1040 * sk_load_* helpers also use %r10 and %r9d.
1044 /* r10 = skb->data, mov %r10, off32(%rbx) */
1045 EMIT3_off32(0x4c, 0x8b, 0x93,
1046 offsetof(struct sk_buff
, data
));
1047 EMIT1_off32(0xE8, jmp_offset
); /* call */
1050 case BPF_LD
| BPF_IND
| BPF_H
:
1051 func
= sk_load_half
;
1053 case BPF_LD
| BPF_ABS
| BPF_H
:
1054 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_half
);
1056 case BPF_LD
| BPF_IND
| BPF_B
:
1057 func
= sk_load_byte
;
1059 case BPF_LD
| BPF_ABS
| BPF_B
:
1060 func
= CHOOSE_LOAD_FUNC(imm32
, sk_load_byte
);
1063 case BPF_JMP
| BPF_EXIT
:
1065 jmp_offset
= ctx
->cleanup_addr
- addrs
[i
];
1069 /* update cleanup_addr */
1070 ctx
->cleanup_addr
= proglen
;
1071 /* mov rbx, qword ptr [rbp+0] */
1072 EMIT4(0x48, 0x8B, 0x5D, 0);
1073 /* mov r13, qword ptr [rbp+8] */
1074 EMIT4(0x4C, 0x8B, 0x6D, 8);
1075 /* mov r14, qword ptr [rbp+16] */
1076 EMIT4(0x4C, 0x8B, 0x75, 16);
1077 /* mov r15, qword ptr [rbp+24] */
1078 EMIT4(0x4C, 0x8B, 0x7D, 24);
1080 /* add rbp, AUX_STACK_SPACE */
1081 EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE
);
1082 EMIT1(0xC9); /* leave */
1083 EMIT1(0xC3); /* ret */
1087 /* By design x64 JIT should support all BPF instructions
1088 * This error will be seen if new instruction was added
1089 * to interpreter, but not to JIT
1090 * or if there is junk in bpf_prog
1092 pr_err("bpf_jit: unknown opcode %02x\n", insn
->code
);
1097 if (ilen
> BPF_MAX_INSN_SIZE
) {
1098 pr_err("bpf_jit: fatal insn size error\n");
1103 if (unlikely(proglen
+ ilen
> oldproglen
)) {
1104 pr_err("bpf_jit: fatal error\n");
1107 memcpy(image
+ proglen
, temp
, ilen
);
1116 struct x64_jit_data
{
1117 struct bpf_binary_header
*header
;
1121 struct jit_context ctx
;
1124 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1126 struct bpf_binary_header
*header
= NULL
;
1127 struct bpf_prog
*tmp
, *orig_prog
= prog
;
1128 struct x64_jit_data
*jit_data
;
1129 int proglen
, oldproglen
= 0;
1130 struct jit_context ctx
= {};
1131 bool tmp_blinded
= false;
1132 bool extra_pass
= false;
1138 if (!prog
->jit_requested
)
1141 tmp
= bpf_jit_blind_constants(prog
);
1142 /* If blinding was requested and we failed during blinding,
1143 * we must fall back to the interpreter.
1152 jit_data
= prog
->aux
->jit_data
;
1154 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
1159 prog
->aux
->jit_data
= jit_data
;
1161 addrs
= jit_data
->addrs
;
1163 ctx
= jit_data
->ctx
;
1164 oldproglen
= jit_data
->proglen
;
1165 image
= jit_data
->image
;
1166 header
= jit_data
->header
;
1168 goto skip_init_addrs
;
1170 addrs
= kmalloc(prog
->len
* sizeof(*addrs
), GFP_KERNEL
);
1176 /* Before first pass, make a rough estimation of addrs[]
1177 * each bpf instruction is translated to less than 64 bytes
1179 for (proglen
= 0, i
= 0; i
< prog
->len
; i
++) {
1183 ctx
.cleanup_addr
= proglen
;
1186 /* JITed image shrinks with every pass and the loop iterates
1187 * until the image stops shrinking. Very large bpf programs
1188 * may converge on the last pass. In such case do one more
1189 * pass to emit the final image
1191 for (pass
= 0; pass
< 10 || image
; pass
++) {
1192 proglen
= do_jit(prog
, addrs
, image
, oldproglen
, &ctx
);
1196 bpf_jit_binary_free(header
);
1201 if (proglen
!= oldproglen
) {
1202 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1203 proglen
, oldproglen
);
1209 if (proglen
== oldproglen
) {
1210 header
= bpf_jit_binary_alloc(proglen
, &image
,
1217 oldproglen
= proglen
;
1220 if (bpf_jit_enable
> 1)
1221 bpf_jit_dump(prog
->len
, proglen
, pass
+ 1, image
);
1224 bpf_flush_icache(header
, image
+ proglen
);
1225 if (!prog
->is_func
|| extra_pass
) {
1226 bpf_jit_binary_lock_ro(header
);
1228 jit_data
->addrs
= addrs
;
1229 jit_data
->ctx
= ctx
;
1230 jit_data
->proglen
= proglen
;
1231 jit_data
->image
= image
;
1232 jit_data
->header
= header
;
1234 prog
->bpf_func
= (void *)image
;
1236 prog
->jited_len
= proglen
;
1241 if (!prog
->is_func
|| extra_pass
) {
1245 prog
->aux
->jit_data
= NULL
;
1249 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?