Linux 4.13.16
[linux/fpc-iii.git] / arch / x86 / net / bpf_jit_comp.c
blob4d50ced94686840b1a38090df9206df375d508a2
1 /* bpf_jit_comp.c : BPF JIT compiler
3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; version 2
9 * of the License.
11 #include <linux/netdevice.h>
12 #include <linux/filter.h>
13 #include <linux/if_vlan.h>
14 #include <asm/cacheflush.h>
15 #include <asm/set_memory.h>
16 #include <linux/bpf.h>
18 int bpf_jit_enable __read_mostly;
21 * assembly code in arch/x86/net/bpf_jit.S
23 extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
24 extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
25 extern u8 sk_load_byte_positive_offset[];
26 extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
27 extern u8 sk_load_byte_negative_offset[];
29 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
31 if (len == 1)
32 *ptr = bytes;
33 else if (len == 2)
34 *(u16 *)ptr = bytes;
35 else {
36 *(u32 *)ptr = bytes;
37 barrier();
39 return ptr + len;
42 #define EMIT(bytes, len) \
43 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
45 #define EMIT1(b1) EMIT(b1, 1)
46 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
47 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
48 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
49 #define EMIT1_off32(b1, off) \
50 do {EMIT1(b1); EMIT(off, 4); } while (0)
51 #define EMIT2_off32(b1, b2, off) \
52 do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
53 #define EMIT3_off32(b1, b2, b3, off) \
54 do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
55 #define EMIT4_off32(b1, b2, b3, b4, off) \
56 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
58 static bool is_imm8(int value)
60 return value <= 127 && value >= -128;
63 static bool is_simm32(s64 value)
65 return value == (s64) (s32) value;
68 /* mov dst, src */
69 #define EMIT_mov(DST, SRC) \
70 do {if (DST != SRC) \
71 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
72 } while (0)
74 static int bpf_size_to_x86_bytes(int bpf_size)
76 if (bpf_size == BPF_W)
77 return 4;
78 else if (bpf_size == BPF_H)
79 return 2;
80 else if (bpf_size == BPF_B)
81 return 1;
82 else if (bpf_size == BPF_DW)
83 return 4; /* imm32 */
84 else
85 return 0;
88 /* list of x86 cond jumps opcodes (. + s8)
89 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
91 #define X86_JB 0x72
92 #define X86_JAE 0x73
93 #define X86_JE 0x74
94 #define X86_JNE 0x75
95 #define X86_JBE 0x76
96 #define X86_JA 0x77
97 #define X86_JGE 0x7D
98 #define X86_JG 0x7F
100 static void bpf_flush_icache(void *start, void *end)
102 mm_segment_t old_fs = get_fs();
104 set_fs(KERNEL_DS);
105 smp_wmb();
106 flush_icache_range((unsigned long)start, (unsigned long)end);
107 set_fs(old_fs);
110 #define CHOOSE_LOAD_FUNC(K, func) \
111 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
113 /* pick a register outside of BPF range for JIT internal work */
114 #define AUX_REG (MAX_BPF_JIT_REG + 1)
116 /* The following table maps BPF registers to x64 registers.
118 * x64 register r12 is unused, since if used as base address
119 * register in load/store instructions, it always needs an
120 * extra byte of encoding and is callee saved.
122 * r9 caches skb->len - skb->data_len
123 * r10 caches skb->data, and used for blinding (if enabled)
125 static const int reg2hex[] = {
126 [BPF_REG_0] = 0, /* rax */
127 [BPF_REG_1] = 7, /* rdi */
128 [BPF_REG_2] = 6, /* rsi */
129 [BPF_REG_3] = 2, /* rdx */
130 [BPF_REG_4] = 1, /* rcx */
131 [BPF_REG_5] = 0, /* r8 */
132 [BPF_REG_6] = 3, /* rbx callee saved */
133 [BPF_REG_7] = 5, /* r13 callee saved */
134 [BPF_REG_8] = 6, /* r14 callee saved */
135 [BPF_REG_9] = 7, /* r15 callee saved */
136 [BPF_REG_FP] = 5, /* rbp readonly */
137 [BPF_REG_AX] = 2, /* r10 temp register */
138 [AUX_REG] = 3, /* r11 temp register */
141 /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
142 * which need extra byte of encoding.
143 * rax,rcx,...,rbp have simpler encoding
145 static bool is_ereg(u32 reg)
147 return (1 << reg) & (BIT(BPF_REG_5) |
148 BIT(AUX_REG) |
149 BIT(BPF_REG_7) |
150 BIT(BPF_REG_8) |
151 BIT(BPF_REG_9) |
152 BIT(BPF_REG_AX));
155 /* add modifiers if 'reg' maps to x64 registers r8..r15 */
156 static u8 add_1mod(u8 byte, u32 reg)
158 if (is_ereg(reg))
159 byte |= 1;
160 return byte;
163 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
165 if (is_ereg(r1))
166 byte |= 1;
167 if (is_ereg(r2))
168 byte |= 4;
169 return byte;
172 /* encode 'dst_reg' register into x64 opcode 'byte' */
173 static u8 add_1reg(u8 byte, u32 dst_reg)
175 return byte + reg2hex[dst_reg];
178 /* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
179 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
181 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
184 static void jit_fill_hole(void *area, unsigned int size)
186 /* fill whole space with int3 instructions */
187 memset(area, 0xcc, size);
190 struct jit_context {
191 int cleanup_addr; /* epilogue code offset */
192 bool seen_ld_abs;
193 bool seen_ax_reg;
196 /* maximum number of bytes emitted while JITing one eBPF insn */
197 #define BPF_MAX_INSN_SIZE 128
198 #define BPF_INSN_SAFETY 64
200 #define AUX_STACK_SPACE \
201 (32 /* space for rbx, r13, r14, r15 */ + \
202 8 /* space for skb_copy_bits() buffer */)
204 #define PROLOGUE_SIZE 37
206 /* emit x64 prologue code for BPF program and check it's size.
207 * bpf_tail_call helper will skip it while jumping into another program
209 static void emit_prologue(u8 **pprog, u32 stack_depth)
211 u8 *prog = *pprog;
212 int cnt = 0;
214 EMIT1(0x55); /* push rbp */
215 EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
217 /* sub rsp, rounded_stack_depth + AUX_STACK_SPACE */
218 EMIT3_off32(0x48, 0x81, 0xEC,
219 round_up(stack_depth, 8) + AUX_STACK_SPACE);
221 /* sub rbp, AUX_STACK_SPACE */
222 EMIT4(0x48, 0x83, 0xED, AUX_STACK_SPACE);
224 /* all classic BPF filters use R6(rbx) save it */
226 /* mov qword ptr [rbp+0],rbx */
227 EMIT4(0x48, 0x89, 0x5D, 0);
229 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
230 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
231 * R8(r14). R9(r15) spill could be made conditional, but there is only
232 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
233 * The overhead of extra spill is negligible for any filter other
234 * than synthetic ones. Therefore not worth adding complexity.
237 /* mov qword ptr [rbp+8],r13 */
238 EMIT4(0x4C, 0x89, 0x6D, 8);
239 /* mov qword ptr [rbp+16],r14 */
240 EMIT4(0x4C, 0x89, 0x75, 16);
241 /* mov qword ptr [rbp+24],r15 */
242 EMIT4(0x4C, 0x89, 0x7D, 24);
244 /* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
245 * we need to reset the counter to 0. It's done in two instructions,
246 * resetting rax register to 0 (xor on eax gets 0 extended), and
247 * moving it to the counter location.
250 /* xor eax, eax */
251 EMIT2(0x31, 0xc0);
252 /* mov qword ptr [rbp+32], rax */
253 EMIT4(0x48, 0x89, 0x45, 32);
255 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
256 *pprog = prog;
259 /* generate the following code:
260 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
261 * if (index >= array->map.max_entries)
262 * goto out;
263 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
264 * goto out;
265 * prog = array->ptrs[index];
266 * if (prog == NULL)
267 * goto out;
268 * goto *(prog->bpf_func + prologue_size);
269 * out:
271 static void emit_bpf_tail_call(u8 **pprog)
273 u8 *prog = *pprog;
274 int label1, label2, label3;
275 int cnt = 0;
277 /* rdi - pointer to ctx
278 * rsi - pointer to bpf_array
279 * rdx - index in bpf_array
282 /* if (index >= array->map.max_entries)
283 * goto out;
285 EMIT2(0x89, 0xD2); /* mov edx, edx */
286 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
287 offsetof(struct bpf_array, map.max_entries));
288 #define OFFSET1 47 /* number of bytes to jump */
289 EMIT2(X86_JBE, OFFSET1); /* jbe out */
290 label1 = cnt;
292 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
293 * goto out;
295 EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
296 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
297 #define OFFSET2 36
298 EMIT2(X86_JA, OFFSET2); /* ja out */
299 label2 = cnt;
300 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
301 EMIT2_off32(0x89, 0x85, 36); /* mov dword ptr [rbp + 36], eax */
303 /* prog = array->ptrs[index]; */
304 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
305 offsetof(struct bpf_array, ptrs));
306 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
308 /* if (prog == NULL)
309 * goto out;
311 EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */
312 #define OFFSET3 10
313 EMIT2(X86_JE, OFFSET3); /* je out */
314 label3 = cnt;
316 /* goto *(prog->bpf_func + prologue_size); */
317 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
318 offsetof(struct bpf_prog, bpf_func));
319 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
321 /* now we're ready to jump into next BPF program
322 * rdi == ctx (1st arg)
323 * rax == prog->bpf_func + prologue_size
325 EMIT2(0xFF, 0xE0); /* jmp rax */
327 /* out: */
328 BUILD_BUG_ON(cnt - label1 != OFFSET1);
329 BUILD_BUG_ON(cnt - label2 != OFFSET2);
330 BUILD_BUG_ON(cnt - label3 != OFFSET3);
331 *pprog = prog;
335 static void emit_load_skb_data_hlen(u8 **pprog)
337 u8 *prog = *pprog;
338 int cnt = 0;
340 /* r9d = skb->len - skb->data_len (headlen)
341 * r10 = skb->data
343 /* mov %r9d, off32(%rdi) */
344 EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
346 /* sub %r9d, off32(%rdi) */
347 EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
349 /* mov %r10, off32(%rdi) */
350 EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
351 *pprog = prog;
354 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
355 int oldproglen, struct jit_context *ctx)
357 struct bpf_insn *insn = bpf_prog->insnsi;
358 int insn_cnt = bpf_prog->len;
359 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
360 bool seen_ax_reg = ctx->seen_ax_reg | (oldproglen == 0);
361 bool seen_exit = false;
362 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
363 int i, cnt = 0;
364 int proglen = 0;
365 u8 *prog = temp;
367 emit_prologue(&prog, bpf_prog->aux->stack_depth);
369 if (seen_ld_abs)
370 emit_load_skb_data_hlen(&prog);
372 for (i = 0; i < insn_cnt; i++, insn++) {
373 const s32 imm32 = insn->imm;
374 u32 dst_reg = insn->dst_reg;
375 u32 src_reg = insn->src_reg;
376 u8 b1 = 0, b2 = 0, b3 = 0;
377 s64 jmp_offset;
378 u8 jmp_cond;
379 bool reload_skb_data;
380 int ilen;
381 u8 *func;
383 if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
384 ctx->seen_ax_reg = seen_ax_reg = true;
386 switch (insn->code) {
387 /* ALU */
388 case BPF_ALU | BPF_ADD | BPF_X:
389 case BPF_ALU | BPF_SUB | BPF_X:
390 case BPF_ALU | BPF_AND | BPF_X:
391 case BPF_ALU | BPF_OR | BPF_X:
392 case BPF_ALU | BPF_XOR | BPF_X:
393 case BPF_ALU64 | BPF_ADD | BPF_X:
394 case BPF_ALU64 | BPF_SUB | BPF_X:
395 case BPF_ALU64 | BPF_AND | BPF_X:
396 case BPF_ALU64 | BPF_OR | BPF_X:
397 case BPF_ALU64 | BPF_XOR | BPF_X:
398 switch (BPF_OP(insn->code)) {
399 case BPF_ADD: b2 = 0x01; break;
400 case BPF_SUB: b2 = 0x29; break;
401 case BPF_AND: b2 = 0x21; break;
402 case BPF_OR: b2 = 0x09; break;
403 case BPF_XOR: b2 = 0x31; break;
405 if (BPF_CLASS(insn->code) == BPF_ALU64)
406 EMIT1(add_2mod(0x48, dst_reg, src_reg));
407 else if (is_ereg(dst_reg) || is_ereg(src_reg))
408 EMIT1(add_2mod(0x40, dst_reg, src_reg));
409 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
410 break;
412 /* mov dst, src */
413 case BPF_ALU64 | BPF_MOV | BPF_X:
414 EMIT_mov(dst_reg, src_reg);
415 break;
417 /* mov32 dst, src */
418 case BPF_ALU | BPF_MOV | BPF_X:
419 if (is_ereg(dst_reg) || is_ereg(src_reg))
420 EMIT1(add_2mod(0x40, dst_reg, src_reg));
421 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
422 break;
424 /* neg dst */
425 case BPF_ALU | BPF_NEG:
426 case BPF_ALU64 | BPF_NEG:
427 if (BPF_CLASS(insn->code) == BPF_ALU64)
428 EMIT1(add_1mod(0x48, dst_reg));
429 else if (is_ereg(dst_reg))
430 EMIT1(add_1mod(0x40, dst_reg));
431 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
432 break;
434 case BPF_ALU | BPF_ADD | BPF_K:
435 case BPF_ALU | BPF_SUB | BPF_K:
436 case BPF_ALU | BPF_AND | BPF_K:
437 case BPF_ALU | BPF_OR | BPF_K:
438 case BPF_ALU | BPF_XOR | BPF_K:
439 case BPF_ALU64 | BPF_ADD | BPF_K:
440 case BPF_ALU64 | BPF_SUB | BPF_K:
441 case BPF_ALU64 | BPF_AND | BPF_K:
442 case BPF_ALU64 | BPF_OR | BPF_K:
443 case BPF_ALU64 | BPF_XOR | BPF_K:
444 if (BPF_CLASS(insn->code) == BPF_ALU64)
445 EMIT1(add_1mod(0x48, dst_reg));
446 else if (is_ereg(dst_reg))
447 EMIT1(add_1mod(0x40, dst_reg));
449 switch (BPF_OP(insn->code)) {
450 case BPF_ADD: b3 = 0xC0; break;
451 case BPF_SUB: b3 = 0xE8; break;
452 case BPF_AND: b3 = 0xE0; break;
453 case BPF_OR: b3 = 0xC8; break;
454 case BPF_XOR: b3 = 0xF0; break;
457 if (is_imm8(imm32))
458 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
459 else
460 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
461 break;
463 case BPF_ALU64 | BPF_MOV | BPF_K:
464 /* optimization: if imm32 is positive,
465 * use 'mov eax, imm32' (which zero-extends imm32)
466 * to save 2 bytes
468 if (imm32 < 0) {
469 /* 'mov rax, imm32' sign extends imm32 */
470 b1 = add_1mod(0x48, dst_reg);
471 b2 = 0xC7;
472 b3 = 0xC0;
473 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
474 break;
477 case BPF_ALU | BPF_MOV | BPF_K:
478 /* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
479 * to save 3 bytes.
481 if (imm32 == 0) {
482 if (is_ereg(dst_reg))
483 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
484 b2 = 0x31; /* xor */
485 b3 = 0xC0;
486 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
487 break;
490 /* mov %eax, imm32 */
491 if (is_ereg(dst_reg))
492 EMIT1(add_1mod(0x40, dst_reg));
493 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
494 break;
496 case BPF_LD | BPF_IMM | BPF_DW:
497 /* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
498 * to save 7 bytes.
500 if (insn[0].imm == 0 && insn[1].imm == 0) {
501 b1 = add_2mod(0x48, dst_reg, dst_reg);
502 b2 = 0x31; /* xor */
503 b3 = 0xC0;
504 EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
506 insn++;
507 i++;
508 break;
511 /* movabsq %rax, imm64 */
512 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
513 EMIT(insn[0].imm, 4);
514 EMIT(insn[1].imm, 4);
516 insn++;
517 i++;
518 break;
520 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
521 case BPF_ALU | BPF_MOD | BPF_X:
522 case BPF_ALU | BPF_DIV | BPF_X:
523 case BPF_ALU | BPF_MOD | BPF_K:
524 case BPF_ALU | BPF_DIV | BPF_K:
525 case BPF_ALU64 | BPF_MOD | BPF_X:
526 case BPF_ALU64 | BPF_DIV | BPF_X:
527 case BPF_ALU64 | BPF_MOD | BPF_K:
528 case BPF_ALU64 | BPF_DIV | BPF_K:
529 EMIT1(0x50); /* push rax */
530 EMIT1(0x52); /* push rdx */
532 if (BPF_SRC(insn->code) == BPF_X)
533 /* mov r11, src_reg */
534 EMIT_mov(AUX_REG, src_reg);
535 else
536 /* mov r11, imm32 */
537 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
539 /* mov rax, dst_reg */
540 EMIT_mov(BPF_REG_0, dst_reg);
542 /* xor edx, edx
543 * equivalent to 'xor rdx, rdx', but one byte less
545 EMIT2(0x31, 0xd2);
547 if (BPF_SRC(insn->code) == BPF_X) {
548 /* if (src_reg == 0) return 0 */
550 /* cmp r11, 0 */
551 EMIT4(0x49, 0x83, 0xFB, 0x00);
553 /* jne .+9 (skip over pop, pop, xor and jmp) */
554 EMIT2(X86_JNE, 1 + 1 + 2 + 5);
555 EMIT1(0x5A); /* pop rdx */
556 EMIT1(0x58); /* pop rax */
557 EMIT2(0x31, 0xc0); /* xor eax, eax */
559 /* jmp cleanup_addr
560 * addrs[i] - 11, because there are 11 bytes
561 * after this insn: div, mov, pop, pop, mov
563 jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
564 EMIT1_off32(0xE9, jmp_offset);
567 if (BPF_CLASS(insn->code) == BPF_ALU64)
568 /* div r11 */
569 EMIT3(0x49, 0xF7, 0xF3);
570 else
571 /* div r11d */
572 EMIT3(0x41, 0xF7, 0xF3);
574 if (BPF_OP(insn->code) == BPF_MOD)
575 /* mov r11, rdx */
576 EMIT3(0x49, 0x89, 0xD3);
577 else
578 /* mov r11, rax */
579 EMIT3(0x49, 0x89, 0xC3);
581 EMIT1(0x5A); /* pop rdx */
582 EMIT1(0x58); /* pop rax */
584 /* mov dst_reg, r11 */
585 EMIT_mov(dst_reg, AUX_REG);
586 break;
588 case BPF_ALU | BPF_MUL | BPF_K:
589 case BPF_ALU | BPF_MUL | BPF_X:
590 case BPF_ALU64 | BPF_MUL | BPF_K:
591 case BPF_ALU64 | BPF_MUL | BPF_X:
592 EMIT1(0x50); /* push rax */
593 EMIT1(0x52); /* push rdx */
595 /* mov r11, dst_reg */
596 EMIT_mov(AUX_REG, dst_reg);
598 if (BPF_SRC(insn->code) == BPF_X)
599 /* mov rax, src_reg */
600 EMIT_mov(BPF_REG_0, src_reg);
601 else
602 /* mov rax, imm32 */
603 EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
605 if (BPF_CLASS(insn->code) == BPF_ALU64)
606 EMIT1(add_1mod(0x48, AUX_REG));
607 else if (is_ereg(AUX_REG))
608 EMIT1(add_1mod(0x40, AUX_REG));
609 /* mul(q) r11 */
610 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
612 /* mov r11, rax */
613 EMIT_mov(AUX_REG, BPF_REG_0);
615 EMIT1(0x5A); /* pop rdx */
616 EMIT1(0x58); /* pop rax */
618 /* mov dst_reg, r11 */
619 EMIT_mov(dst_reg, AUX_REG);
620 break;
622 /* shifts */
623 case BPF_ALU | BPF_LSH | BPF_K:
624 case BPF_ALU | BPF_RSH | BPF_K:
625 case BPF_ALU | BPF_ARSH | BPF_K:
626 case BPF_ALU64 | BPF_LSH | BPF_K:
627 case BPF_ALU64 | BPF_RSH | BPF_K:
628 case BPF_ALU64 | BPF_ARSH | BPF_K:
629 if (BPF_CLASS(insn->code) == BPF_ALU64)
630 EMIT1(add_1mod(0x48, dst_reg));
631 else if (is_ereg(dst_reg))
632 EMIT1(add_1mod(0x40, dst_reg));
634 switch (BPF_OP(insn->code)) {
635 case BPF_LSH: b3 = 0xE0; break;
636 case BPF_RSH: b3 = 0xE8; break;
637 case BPF_ARSH: b3 = 0xF8; break;
639 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
640 break;
642 case BPF_ALU | BPF_LSH | BPF_X:
643 case BPF_ALU | BPF_RSH | BPF_X:
644 case BPF_ALU | BPF_ARSH | BPF_X:
645 case BPF_ALU64 | BPF_LSH | BPF_X:
646 case BPF_ALU64 | BPF_RSH | BPF_X:
647 case BPF_ALU64 | BPF_ARSH | BPF_X:
649 /* check for bad case when dst_reg == rcx */
650 if (dst_reg == BPF_REG_4) {
651 /* mov r11, dst_reg */
652 EMIT_mov(AUX_REG, dst_reg);
653 dst_reg = AUX_REG;
656 if (src_reg != BPF_REG_4) { /* common case */
657 EMIT1(0x51); /* push rcx */
659 /* mov rcx, src_reg */
660 EMIT_mov(BPF_REG_4, src_reg);
663 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
664 if (BPF_CLASS(insn->code) == BPF_ALU64)
665 EMIT1(add_1mod(0x48, dst_reg));
666 else if (is_ereg(dst_reg))
667 EMIT1(add_1mod(0x40, dst_reg));
669 switch (BPF_OP(insn->code)) {
670 case BPF_LSH: b3 = 0xE0; break;
671 case BPF_RSH: b3 = 0xE8; break;
672 case BPF_ARSH: b3 = 0xF8; break;
674 EMIT2(0xD3, add_1reg(b3, dst_reg));
676 if (src_reg != BPF_REG_4)
677 EMIT1(0x59); /* pop rcx */
679 if (insn->dst_reg == BPF_REG_4)
680 /* mov dst_reg, r11 */
681 EMIT_mov(insn->dst_reg, AUX_REG);
682 break;
684 case BPF_ALU | BPF_END | BPF_FROM_BE:
685 switch (imm32) {
686 case 16:
687 /* emit 'ror %ax, 8' to swap lower 2 bytes */
688 EMIT1(0x66);
689 if (is_ereg(dst_reg))
690 EMIT1(0x41);
691 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
693 /* emit 'movzwl eax, ax' */
694 if (is_ereg(dst_reg))
695 EMIT3(0x45, 0x0F, 0xB7);
696 else
697 EMIT2(0x0F, 0xB7);
698 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
699 break;
700 case 32:
701 /* emit 'bswap eax' to swap lower 4 bytes */
702 if (is_ereg(dst_reg))
703 EMIT2(0x41, 0x0F);
704 else
705 EMIT1(0x0F);
706 EMIT1(add_1reg(0xC8, dst_reg));
707 break;
708 case 64:
709 /* emit 'bswap rax' to swap 8 bytes */
710 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
711 add_1reg(0xC8, dst_reg));
712 break;
714 break;
716 case BPF_ALU | BPF_END | BPF_FROM_LE:
717 switch (imm32) {
718 case 16:
719 /* emit 'movzwl eax, ax' to zero extend 16-bit
720 * into 64 bit
722 if (is_ereg(dst_reg))
723 EMIT3(0x45, 0x0F, 0xB7);
724 else
725 EMIT2(0x0F, 0xB7);
726 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
727 break;
728 case 32:
729 /* emit 'mov eax, eax' to clear upper 32-bits */
730 if (is_ereg(dst_reg))
731 EMIT1(0x45);
732 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
733 break;
734 case 64:
735 /* nop */
736 break;
738 break;
740 /* ST: *(u8*)(dst_reg + off) = imm */
741 case BPF_ST | BPF_MEM | BPF_B:
742 if (is_ereg(dst_reg))
743 EMIT2(0x41, 0xC6);
744 else
745 EMIT1(0xC6);
746 goto st;
747 case BPF_ST | BPF_MEM | BPF_H:
748 if (is_ereg(dst_reg))
749 EMIT3(0x66, 0x41, 0xC7);
750 else
751 EMIT2(0x66, 0xC7);
752 goto st;
753 case BPF_ST | BPF_MEM | BPF_W:
754 if (is_ereg(dst_reg))
755 EMIT2(0x41, 0xC7);
756 else
757 EMIT1(0xC7);
758 goto st;
759 case BPF_ST | BPF_MEM | BPF_DW:
760 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
762 st: if (is_imm8(insn->off))
763 EMIT2(add_1reg(0x40, dst_reg), insn->off);
764 else
765 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
767 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
768 break;
770 /* STX: *(u8*)(dst_reg + off) = src_reg */
771 case BPF_STX | BPF_MEM | BPF_B:
772 /* emit 'mov byte ptr [rax + off], al' */
773 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
774 /* have to add extra byte for x86 SIL, DIL regs */
775 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
776 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
777 else
778 EMIT1(0x88);
779 goto stx;
780 case BPF_STX | BPF_MEM | BPF_H:
781 if (is_ereg(dst_reg) || is_ereg(src_reg))
782 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
783 else
784 EMIT2(0x66, 0x89);
785 goto stx;
786 case BPF_STX | BPF_MEM | BPF_W:
787 if (is_ereg(dst_reg) || is_ereg(src_reg))
788 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
789 else
790 EMIT1(0x89);
791 goto stx;
792 case BPF_STX | BPF_MEM | BPF_DW:
793 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
794 stx: if (is_imm8(insn->off))
795 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
796 else
797 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
798 insn->off);
799 break;
801 /* LDX: dst_reg = *(u8*)(src_reg + off) */
802 case BPF_LDX | BPF_MEM | BPF_B:
803 /* emit 'movzx rax, byte ptr [rax + off]' */
804 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
805 goto ldx;
806 case BPF_LDX | BPF_MEM | BPF_H:
807 /* emit 'movzx rax, word ptr [rax + off]' */
808 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
809 goto ldx;
810 case BPF_LDX | BPF_MEM | BPF_W:
811 /* emit 'mov eax, dword ptr [rax+0x14]' */
812 if (is_ereg(dst_reg) || is_ereg(src_reg))
813 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
814 else
815 EMIT1(0x8B);
816 goto ldx;
817 case BPF_LDX | BPF_MEM | BPF_DW:
818 /* emit 'mov rax, qword ptr [rax+0x14]' */
819 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
820 ldx: /* if insn->off == 0 we can save one extra byte, but
821 * special case of x86 r13 which always needs an offset
822 * is not worth the hassle
824 if (is_imm8(insn->off))
825 EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
826 else
827 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
828 insn->off);
829 break;
831 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
832 case BPF_STX | BPF_XADD | BPF_W:
833 /* emit 'lock add dword ptr [rax + off], eax' */
834 if (is_ereg(dst_reg) || is_ereg(src_reg))
835 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
836 else
837 EMIT2(0xF0, 0x01);
838 goto xadd;
839 case BPF_STX | BPF_XADD | BPF_DW:
840 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
841 xadd: if (is_imm8(insn->off))
842 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
843 else
844 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
845 insn->off);
846 break;
848 /* call */
849 case BPF_JMP | BPF_CALL:
850 func = (u8 *) __bpf_call_base + imm32;
851 jmp_offset = func - (image + addrs[i]);
852 if (seen_ld_abs) {
853 reload_skb_data = bpf_helper_changes_pkt_data(func);
854 if (reload_skb_data) {
855 EMIT1(0x57); /* push %rdi */
856 jmp_offset += 22; /* pop, mov, sub, mov */
857 } else {
858 EMIT2(0x41, 0x52); /* push %r10 */
859 EMIT2(0x41, 0x51); /* push %r9 */
860 /* need to adjust jmp offset, since
861 * pop %r9, pop %r10 take 4 bytes after call insn
863 jmp_offset += 4;
866 if (!imm32 || !is_simm32(jmp_offset)) {
867 pr_err("unsupported bpf func %d addr %p image %p\n",
868 imm32, func, image);
869 return -EINVAL;
871 EMIT1_off32(0xE8, jmp_offset);
872 if (seen_ld_abs) {
873 if (reload_skb_data) {
874 EMIT1(0x5F); /* pop %rdi */
875 emit_load_skb_data_hlen(&prog);
876 } else {
877 EMIT2(0x41, 0x59); /* pop %r9 */
878 EMIT2(0x41, 0x5A); /* pop %r10 */
881 break;
883 case BPF_JMP | BPF_TAIL_CALL:
884 emit_bpf_tail_call(&prog);
885 break;
887 /* cond jump */
888 case BPF_JMP | BPF_JEQ | BPF_X:
889 case BPF_JMP | BPF_JNE | BPF_X:
890 case BPF_JMP | BPF_JGT | BPF_X:
891 case BPF_JMP | BPF_JGE | BPF_X:
892 case BPF_JMP | BPF_JSGT | BPF_X:
893 case BPF_JMP | BPF_JSGE | BPF_X:
894 /* cmp dst_reg, src_reg */
895 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
896 add_2reg(0xC0, dst_reg, src_reg));
897 goto emit_cond_jmp;
899 case BPF_JMP | BPF_JSET | BPF_X:
900 /* test dst_reg, src_reg */
901 EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
902 add_2reg(0xC0, dst_reg, src_reg));
903 goto emit_cond_jmp;
905 case BPF_JMP | BPF_JSET | BPF_K:
906 /* test dst_reg, imm32 */
907 EMIT1(add_1mod(0x48, dst_reg));
908 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
909 goto emit_cond_jmp;
911 case BPF_JMP | BPF_JEQ | BPF_K:
912 case BPF_JMP | BPF_JNE | BPF_K:
913 case BPF_JMP | BPF_JGT | BPF_K:
914 case BPF_JMP | BPF_JGE | BPF_K:
915 case BPF_JMP | BPF_JSGT | BPF_K:
916 case BPF_JMP | BPF_JSGE | BPF_K:
917 /* cmp dst_reg, imm8/32 */
918 EMIT1(add_1mod(0x48, dst_reg));
920 if (is_imm8(imm32))
921 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
922 else
923 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
925 emit_cond_jmp: /* convert BPF opcode to x86 */
926 switch (BPF_OP(insn->code)) {
927 case BPF_JEQ:
928 jmp_cond = X86_JE;
929 break;
930 case BPF_JSET:
931 case BPF_JNE:
932 jmp_cond = X86_JNE;
933 break;
934 case BPF_JGT:
935 /* GT is unsigned '>', JA in x86 */
936 jmp_cond = X86_JA;
937 break;
938 case BPF_JGE:
939 /* GE is unsigned '>=', JAE in x86 */
940 jmp_cond = X86_JAE;
941 break;
942 case BPF_JSGT:
943 /* signed '>', GT in x86 */
944 jmp_cond = X86_JG;
945 break;
946 case BPF_JSGE:
947 /* signed '>=', GE in x86 */
948 jmp_cond = X86_JGE;
949 break;
950 default: /* to silence gcc warning */
951 return -EFAULT;
953 jmp_offset = addrs[i + insn->off] - addrs[i];
954 if (is_imm8(jmp_offset)) {
955 EMIT2(jmp_cond, jmp_offset);
956 } else if (is_simm32(jmp_offset)) {
957 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
958 } else {
959 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
960 return -EFAULT;
963 break;
965 case BPF_JMP | BPF_JA:
966 jmp_offset = addrs[i + insn->off] - addrs[i];
967 if (!jmp_offset)
968 /* optimize out nop jumps */
969 break;
970 emit_jmp:
971 if (is_imm8(jmp_offset)) {
972 EMIT2(0xEB, jmp_offset);
973 } else if (is_simm32(jmp_offset)) {
974 EMIT1_off32(0xE9, jmp_offset);
975 } else {
976 pr_err("jmp gen bug %llx\n", jmp_offset);
977 return -EFAULT;
979 break;
981 case BPF_LD | BPF_IND | BPF_W:
982 func = sk_load_word;
983 goto common_load;
984 case BPF_LD | BPF_ABS | BPF_W:
985 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
986 common_load:
987 ctx->seen_ld_abs = seen_ld_abs = true;
988 jmp_offset = func - (image + addrs[i]);
989 if (!func || !is_simm32(jmp_offset)) {
990 pr_err("unsupported bpf func %d addr %p image %p\n",
991 imm32, func, image);
992 return -EINVAL;
994 if (BPF_MODE(insn->code) == BPF_ABS) {
995 /* mov %esi, imm32 */
996 EMIT1_off32(0xBE, imm32);
997 } else {
998 /* mov %rsi, src_reg */
999 EMIT_mov(BPF_REG_2, src_reg);
1000 if (imm32) {
1001 if (is_imm8(imm32))
1002 /* add %esi, imm8 */
1003 EMIT3(0x83, 0xC6, imm32);
1004 else
1005 /* add %esi, imm32 */
1006 EMIT2_off32(0x81, 0xC6, imm32);
1009 /* skb pointer is in R6 (%rbx), it will be copied into
1010 * %rdi if skb_copy_bits() call is necessary.
1011 * sk_load_* helpers also use %r10 and %r9d.
1012 * See bpf_jit.S
1014 if (seen_ax_reg)
1015 /* r10 = skb->data, mov %r10, off32(%rbx) */
1016 EMIT3_off32(0x4c, 0x8b, 0x93,
1017 offsetof(struct sk_buff, data));
1018 EMIT1_off32(0xE8, jmp_offset); /* call */
1019 break;
1021 case BPF_LD | BPF_IND | BPF_H:
1022 func = sk_load_half;
1023 goto common_load;
1024 case BPF_LD | BPF_ABS | BPF_H:
1025 func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1026 goto common_load;
1027 case BPF_LD | BPF_IND | BPF_B:
1028 func = sk_load_byte;
1029 goto common_load;
1030 case BPF_LD | BPF_ABS | BPF_B:
1031 func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1032 goto common_load;
1034 case BPF_JMP | BPF_EXIT:
1035 if (seen_exit) {
1036 jmp_offset = ctx->cleanup_addr - addrs[i];
1037 goto emit_jmp;
1039 seen_exit = true;
1040 /* update cleanup_addr */
1041 ctx->cleanup_addr = proglen;
1042 /* mov rbx, qword ptr [rbp+0] */
1043 EMIT4(0x48, 0x8B, 0x5D, 0);
1044 /* mov r13, qword ptr [rbp+8] */
1045 EMIT4(0x4C, 0x8B, 0x6D, 8);
1046 /* mov r14, qword ptr [rbp+16] */
1047 EMIT4(0x4C, 0x8B, 0x75, 16);
1048 /* mov r15, qword ptr [rbp+24] */
1049 EMIT4(0x4C, 0x8B, 0x7D, 24);
1051 /* add rbp, AUX_STACK_SPACE */
1052 EMIT4(0x48, 0x83, 0xC5, AUX_STACK_SPACE);
1053 EMIT1(0xC9); /* leave */
1054 EMIT1(0xC3); /* ret */
1055 break;
1057 default:
1058 /* By design x64 JIT should support all BPF instructions
1059 * This error will be seen if new instruction was added
1060 * to interpreter, but not to JIT
1061 * or if there is junk in bpf_prog
1063 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1064 return -EINVAL;
1067 ilen = prog - temp;
1068 if (ilen > BPF_MAX_INSN_SIZE) {
1069 pr_err("bpf_jit: fatal insn size error\n");
1070 return -EFAULT;
1073 if (image) {
1074 if (unlikely(proglen + ilen > oldproglen)) {
1075 pr_err("bpf_jit: fatal error\n");
1076 return -EFAULT;
1078 memcpy(image + proglen, temp, ilen);
1080 proglen += ilen;
1081 addrs[i] = proglen;
1082 prog = temp;
1084 return proglen;
1087 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1089 struct bpf_binary_header *header = NULL;
1090 struct bpf_prog *tmp, *orig_prog = prog;
1091 int proglen, oldproglen = 0;
1092 struct jit_context ctx = {};
1093 bool tmp_blinded = false;
1094 u8 *image = NULL;
1095 int *addrs;
1096 int pass;
1097 int i;
1099 if (!bpf_jit_enable)
1100 return orig_prog;
1102 tmp = bpf_jit_blind_constants(prog);
1103 /* If blinding was requested and we failed during blinding,
1104 * we must fall back to the interpreter.
1106 if (IS_ERR(tmp))
1107 return orig_prog;
1108 if (tmp != prog) {
1109 tmp_blinded = true;
1110 prog = tmp;
1113 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1114 if (!addrs) {
1115 prog = orig_prog;
1116 goto out;
1119 /* Before first pass, make a rough estimation of addrs[]
1120 * each bpf instruction is translated to less than 64 bytes
1122 for (proglen = 0, i = 0; i < prog->len; i++) {
1123 proglen += 64;
1124 addrs[i] = proglen;
1126 ctx.cleanup_addr = proglen;
1128 /* JITed image shrinks with every pass and the loop iterates
1129 * until the image stops shrinking. Very large bpf programs
1130 * may converge on the last pass. In such case do one more
1131 * pass to emit the final image
1133 for (pass = 0; pass < 10 || image; pass++) {
1134 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1135 if (proglen <= 0) {
1136 image = NULL;
1137 if (header)
1138 bpf_jit_binary_free(header);
1139 prog = orig_prog;
1140 goto out_addrs;
1142 if (image) {
1143 if (proglen != oldproglen) {
1144 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1145 proglen, oldproglen);
1146 prog = orig_prog;
1147 goto out_addrs;
1149 break;
1151 if (proglen == oldproglen) {
1152 header = bpf_jit_binary_alloc(proglen, &image,
1153 1, jit_fill_hole);
1154 if (!header) {
1155 prog = orig_prog;
1156 goto out_addrs;
1159 oldproglen = proglen;
1162 if (bpf_jit_enable > 1)
1163 bpf_jit_dump(prog->len, proglen, pass + 1, image);
1165 if (image) {
1166 bpf_flush_icache(header, image + proglen);
1167 bpf_jit_binary_lock_ro(header);
1168 prog->bpf_func = (void *)image;
1169 prog->jited = 1;
1170 prog->jited_len = proglen;
1171 } else {
1172 prog = orig_prog;
1175 out_addrs:
1176 kfree(addrs);
1177 out:
1178 if (tmp_blinded)
1179 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1180 tmp : orig_prog);
1181 return prog;