1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp.c: BPF JIT compiler
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18 #include <asm/asm-prototypes.h>
20 static u8
*emit_code(u8
*ptr
, u32 bytes
, unsigned int len
)
33 #define EMIT(bytes, len) \
34 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
36 #define EMIT1(b1) EMIT(b1, 1)
37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
41 #define EMIT1_off32(b1, off) \
42 do { EMIT1(b1); EMIT(off, 4); } while (0)
43 #define EMIT2_off32(b1, b2, off) \
44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45 #define EMIT3_off32(b1, b2, b3, off) \
46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47 #define EMIT4_off32(b1, b2, b3, b4, off) \
48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
50 static bool is_imm8(int value
)
52 return value
<= 127 && value
>= -128;
55 static bool is_simm32(s64 value
)
57 return value
== (s64
)(s32
)value
;
60 static bool is_uimm32(u64 value
)
62 return value
== (u64
)(u32
)value
;
66 #define EMIT_mov(DST, SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
72 static int bpf_size_to_x86_bytes(int bpf_size
)
74 if (bpf_size
== BPF_W
)
76 else if (bpf_size
== BPF_H
)
78 else if (bpf_size
== BPF_B
)
80 else if (bpf_size
== BPF_DW
)
87 * List of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
101 /* Pick a register outside of BPF range for JIT internal work */
102 #define AUX_REG (MAX_BPF_JIT_REG + 1)
103 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
106 * The following table maps BPF registers to x86-64 registers.
108 * x86-64 register R12 is unused, since if used as base address
109 * register in load/store instructions, it always needs an
110 * extra byte of encoding and is callee saved.
112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
115 static const int reg2hex
[] = {
116 [BPF_REG_0
] = 0, /* RAX */
117 [BPF_REG_1
] = 7, /* RDI */
118 [BPF_REG_2
] = 6, /* RSI */
119 [BPF_REG_3
] = 2, /* RDX */
120 [BPF_REG_4
] = 1, /* RCX */
121 [BPF_REG_5
] = 0, /* R8 */
122 [BPF_REG_6
] = 3, /* RBX callee saved */
123 [BPF_REG_7
] = 5, /* R13 callee saved */
124 [BPF_REG_8
] = 6, /* R14 callee saved */
125 [BPF_REG_9
] = 7, /* R15 callee saved */
126 [BPF_REG_FP
] = 5, /* RBP readonly */
127 [BPF_REG_AX
] = 2, /* R10 temp register */
128 [AUX_REG
] = 3, /* R11 temp register */
129 [X86_REG_R9
] = 1, /* R9 register, 6th function argument */
132 static const int reg2pt_regs
[] = {
133 [BPF_REG_0
] = offsetof(struct pt_regs
, ax
),
134 [BPF_REG_1
] = offsetof(struct pt_regs
, di
),
135 [BPF_REG_2
] = offsetof(struct pt_regs
, si
),
136 [BPF_REG_3
] = offsetof(struct pt_regs
, dx
),
137 [BPF_REG_4
] = offsetof(struct pt_regs
, cx
),
138 [BPF_REG_5
] = offsetof(struct pt_regs
, r8
),
139 [BPF_REG_6
] = offsetof(struct pt_regs
, bx
),
140 [BPF_REG_7
] = offsetof(struct pt_regs
, r13
),
141 [BPF_REG_8
] = offsetof(struct pt_regs
, r14
),
142 [BPF_REG_9
] = offsetof(struct pt_regs
, r15
),
146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147 * which need extra byte of encoding.
148 * rax,rcx,...,rbp have simpler encoding
150 static bool is_ereg(u32 reg
)
152 return (1 << reg
) & (BIT(BPF_REG_5
) |
162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164 * of encoding. al,cl,dl,bl have simpler encoding.
166 static bool is_ereg_8l(u32 reg
)
168 return is_ereg(reg
) ||
169 (1 << reg
) & (BIT(BPF_REG_1
) |
174 static bool is_axreg(u32 reg
)
176 return reg
== BPF_REG_0
;
179 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
180 static u8
add_1mod(u8 byte
, u32 reg
)
187 static u8
add_2mod(u8 byte
, u32 r1
, u32 r2
)
196 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
197 static u8
add_1reg(u8 byte
, u32 dst_reg
)
199 return byte
+ reg2hex
[dst_reg
];
202 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
203 static u8
add_2reg(u8 byte
, u32 dst_reg
, u32 src_reg
)
205 return byte
+ reg2hex
[dst_reg
] + (reg2hex
[src_reg
] << 3);
208 static void jit_fill_hole(void *area
, unsigned int size
)
210 /* Fill whole space with INT3 instructions */
211 memset(area
, 0xcc, size
);
215 int cleanup_addr
; /* Epilogue code offset */
218 /* Maximum number of bytes emitted while JITing one eBPF insn */
219 #define BPF_MAX_INSN_SIZE 128
220 #define BPF_INSN_SAFETY 64
222 /* Number of bytes emit_patch() needs to generate instructions */
223 #define X86_PATCH_SIZE 5
224 /* Number of bytes that will be skipped on tailcall */
225 #define X86_TAIL_CALL_OFFSET 11
227 static void push_callee_regs(u8
**pprog
, bool *callee_regs_used
)
232 if (callee_regs_used
[0])
233 EMIT1(0x53); /* push rbx */
234 if (callee_regs_used
[1])
235 EMIT2(0x41, 0x55); /* push r13 */
236 if (callee_regs_used
[2])
237 EMIT2(0x41, 0x56); /* push r14 */
238 if (callee_regs_used
[3])
239 EMIT2(0x41, 0x57); /* push r15 */
243 static void pop_callee_regs(u8
**pprog
, bool *callee_regs_used
)
248 if (callee_regs_used
[3])
249 EMIT2(0x41, 0x5F); /* pop r15 */
250 if (callee_regs_used
[2])
251 EMIT2(0x41, 0x5E); /* pop r14 */
252 if (callee_regs_used
[1])
253 EMIT2(0x41, 0x5D); /* pop r13 */
254 if (callee_regs_used
[0])
255 EMIT1(0x5B); /* pop rbx */
260 * Emit x86-64 prologue code for BPF program.
261 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
262 * while jumping to another program
264 static void emit_prologue(u8
**pprog
, u32 stack_depth
, bool ebpf_from_cbpf
,
265 bool tail_call_reachable
, bool is_subprog
)
268 int cnt
= X86_PATCH_SIZE
;
270 /* BPF trampoline can be made to work without these nops,
271 * but let's waste 5 bytes for now and optimize later
273 memcpy(prog
, ideal_nops
[NOP_ATOMIC5
], cnt
);
275 if (!ebpf_from_cbpf
) {
276 if (tail_call_reachable
&& !is_subprog
)
277 EMIT2(0x31, 0xC0); /* xor eax, eax */
279 EMIT2(0x66, 0x90); /* nop2 */
281 EMIT1(0x55); /* push rbp */
282 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
283 /* sub rsp, rounded_stack_depth */
285 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth
, 8));
286 if (tail_call_reachable
)
287 EMIT1(0x50); /* push rax */
291 static int emit_patch(u8
**pprog
, void *func
, void *ip
, u8 opcode
)
297 offset
= func
- (ip
+ X86_PATCH_SIZE
);
298 if (!is_simm32(offset
)) {
299 pr_err("Target call %p is out of range\n", func
);
302 EMIT1_off32(opcode
, offset
);
307 static int emit_call(u8
**pprog
, void *func
, void *ip
)
309 return emit_patch(pprog
, func
, ip
, 0xE8);
312 static int emit_jump(u8
**pprog
, void *func
, void *ip
)
314 return emit_patch(pprog
, func
, ip
, 0xE9);
317 static int __bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
318 void *old_addr
, void *new_addr
,
319 const bool text_live
)
321 const u8
*nop_insn
= ideal_nops
[NOP_ATOMIC5
];
322 u8 old_insn
[X86_PATCH_SIZE
];
323 u8 new_insn
[X86_PATCH_SIZE
];
327 memcpy(old_insn
, nop_insn
, X86_PATCH_SIZE
);
330 ret
= t
== BPF_MOD_CALL
?
331 emit_call(&prog
, old_addr
, ip
) :
332 emit_jump(&prog
, old_addr
, ip
);
337 memcpy(new_insn
, nop_insn
, X86_PATCH_SIZE
);
340 ret
= t
== BPF_MOD_CALL
?
341 emit_call(&prog
, new_addr
, ip
) :
342 emit_jump(&prog
, new_addr
, ip
);
348 mutex_lock(&text_mutex
);
349 if (memcmp(ip
, old_insn
, X86_PATCH_SIZE
))
352 if (memcmp(ip
, new_insn
, X86_PATCH_SIZE
)) {
354 text_poke_bp(ip
, new_insn
, X86_PATCH_SIZE
, NULL
);
356 memcpy(ip
, new_insn
, X86_PATCH_SIZE
);
360 mutex_unlock(&text_mutex
);
364 int bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
365 void *old_addr
, void *new_addr
)
367 if (!is_kernel_text((long)ip
) &&
368 !is_bpf_text_address((long)ip
))
369 /* BPF poking in modules is not supported */
372 return __bpf_arch_text_poke(ip
, t
, old_addr
, new_addr
, true);
375 static int get_pop_bytes(bool *callee_regs_used
)
379 if (callee_regs_used
[3])
381 if (callee_regs_used
[2])
383 if (callee_regs_used
[1])
385 if (callee_regs_used
[0])
392 * Generate the following code:
394 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
395 * if (index >= array->map.max_entries)
397 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
399 * prog = array->ptrs[index];
402 * goto *(prog->bpf_func + prologue_size);
405 static void emit_bpf_tail_call_indirect(u8
**pprog
, bool *callee_regs_used
,
408 int tcc_off
= -4 - round_up(stack_depth
, 8);
416 /* count the additional bytes used for popping callee regs from stack
417 * that need to be taken into account for each of the offsets that
418 * are used for bailing out of the tail call
420 pop_bytes
= get_pop_bytes(callee_regs_used
);
432 * rdi - pointer to ctx
433 * rsi - pointer to bpf_array
434 * rdx - index in bpf_array
438 * if (index >= array->map.max_entries)
441 EMIT2(0x89, 0xD2); /* mov edx, edx */
442 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
443 offsetof(struct bpf_array
, map
.max_entries
));
444 #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
445 EMIT2(X86_JBE
, OFFSET1
); /* jbe out */
448 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
451 EMIT2_off32(0x8B, 0x85, tcc_off
); /* mov eax, dword ptr [rbp - tcc_off] */
452 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT
); /* cmp eax, MAX_TAIL_CALL_CNT */
453 #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
454 EMIT2(X86_JA
, OFFSET2
); /* ja out */
455 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
456 EMIT2_off32(0x89, 0x85, tcc_off
); /* mov dword ptr [rbp - tcc_off], eax */
458 /* prog = array->ptrs[index]; */
459 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
460 offsetof(struct bpf_array
, ptrs
));
466 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
467 #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
468 EMIT2(X86_JE
, OFFSET3
); /* je out */
471 pop_callee_regs(pprog
, callee_regs_used
);
474 EMIT1(0x58); /* pop rax */
476 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
477 round_up(stack_depth
, 8));
479 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
480 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
481 offsetof(struct bpf_prog
, bpf_func
));
482 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
483 X86_TAIL_CALL_OFFSET
);
485 * Now we're ready to jump into next BPF program
486 * rdi == ctx (1st arg)
487 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
489 RETPOLINE_RCX_BPF_JIT();
495 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor
*poke
,
496 u8
**pprog
, int addr
, u8
*image
,
497 bool *callee_regs_used
, u32 stack_depth
)
499 int tcc_off
= -4 - round_up(stack_depth
, 8);
506 /* count the additional bytes used for popping callee regs to stack
507 * that need to be taken into account for jump offset that is used for
508 * bailing out from of the tail call when limit is reached
510 pop_bytes
= get_pop_bytes(callee_regs_used
);
517 * - sub rsp, $val if depth > 0
520 poke_off
= X86_PATCH_SIZE
+ pop_bytes
+ 1;
527 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
530 EMIT2_off32(0x8B, 0x85, tcc_off
); /* mov eax, dword ptr [rbp - tcc_off] */
531 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT
); /* cmp eax, MAX_TAIL_CALL_CNT */
532 EMIT2(X86_JA
, off1
); /* ja out */
533 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
534 EMIT2_off32(0x89, 0x85, tcc_off
); /* mov dword ptr [rbp - tcc_off], eax */
536 poke
->tailcall_bypass
= image
+ (addr
- poke_off
- X86_PATCH_SIZE
);
537 poke
->adj_off
= X86_TAIL_CALL_OFFSET
;
538 poke
->tailcall_target
= image
+ (addr
- X86_PATCH_SIZE
);
539 poke
->bypass_addr
= (u8
*)poke
->tailcall_target
+ X86_PATCH_SIZE
;
541 emit_jump(&prog
, (u8
*)poke
->tailcall_target
+ X86_PATCH_SIZE
,
542 poke
->tailcall_bypass
);
545 pop_callee_regs(pprog
, callee_regs_used
);
547 EMIT1(0x58); /* pop rax */
549 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth
, 8));
551 memcpy(prog
, ideal_nops
[NOP_ATOMIC5
], X86_PATCH_SIZE
);
552 prog
+= X86_PATCH_SIZE
;
558 static void bpf_tail_call_direct_fixup(struct bpf_prog
*prog
)
560 struct bpf_jit_poke_descriptor
*poke
;
561 struct bpf_array
*array
;
562 struct bpf_prog
*target
;
565 for (i
= 0; i
< prog
->aux
->size_poke_tab
; i
++) {
566 poke
= &prog
->aux
->poke_tab
[i
];
567 WARN_ON_ONCE(READ_ONCE(poke
->tailcall_target_stable
));
569 if (poke
->reason
!= BPF_POKE_REASON_TAIL_CALL
)
572 array
= container_of(poke
->tail_call
.map
, struct bpf_array
, map
);
573 mutex_lock(&array
->aux
->poke_mutex
);
574 target
= array
->ptrs
[poke
->tail_call
.key
];
576 /* Plain memcpy is used when image is not live yet
577 * and still not locked as read-only. Once poke
578 * location is active (poke->tailcall_target_stable),
579 * any parallel bpf_arch_text_poke() might occur
580 * still on the read-write image until we finally
581 * locked it as read-only. Both modifications on
582 * the given image are under text_mutex to avoid
585 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,
587 (u8
*)target
->bpf_func
+
588 poke
->adj_off
, false);
590 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
592 (u8
*)poke
->tailcall_target
+
593 X86_PATCH_SIZE
, NULL
, false);
596 WRITE_ONCE(poke
->tailcall_target_stable
, true);
597 mutex_unlock(&array
->aux
->poke_mutex
);
601 static void emit_mov_imm32(u8
**pprog
, bool sign_propagate
,
602 u32 dst_reg
, const u32 imm32
)
609 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
610 * (which zero-extends imm32) to save 2 bytes.
612 if (sign_propagate
&& (s32
)imm32
< 0) {
613 /* 'mov %rax, imm32' sign extends imm32 */
614 b1
= add_1mod(0x48, dst_reg
);
617 EMIT3_off32(b1
, b2
, add_1reg(b3
, dst_reg
), imm32
);
622 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
626 if (is_ereg(dst_reg
))
627 EMIT1(add_2mod(0x40, dst_reg
, dst_reg
));
630 EMIT2(b2
, add_2reg(b3
, dst_reg
, dst_reg
));
634 /* mov %eax, imm32 */
635 if (is_ereg(dst_reg
))
636 EMIT1(add_1mod(0x40, dst_reg
));
637 EMIT1_off32(add_1reg(0xB8, dst_reg
), imm32
);
642 static void emit_mov_imm64(u8
**pprog
, u32 dst_reg
,
643 const u32 imm32_hi
, const u32 imm32_lo
)
648 if (is_uimm32(((u64
)imm32_hi
<< 32) | (u32
)imm32_lo
)) {
650 * For emitting plain u32, where sign bit must not be
651 * propagated LLVM tends to load imm64 over mov32
652 * directly, so save couple of bytes by just doing
653 * 'mov %eax, imm32' instead.
655 emit_mov_imm32(&prog
, false, dst_reg
, imm32_lo
);
657 /* movabsq %rax, imm64 */
658 EMIT2(add_1mod(0x48, dst_reg
), add_1reg(0xB8, dst_reg
));
666 static void emit_mov_reg(u8
**pprog
, bool is64
, u32 dst_reg
, u32 src_reg
)
673 EMIT_mov(dst_reg
, src_reg
);
676 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
677 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
678 EMIT2(0x89, add_2reg(0xC0, dst_reg
, src_reg
));
684 /* LDX: dst_reg = *(u8*)(src_reg + off) */
685 static void emit_ldx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
692 /* Emit 'movzx rax, byte ptr [rax + off]' */
693 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB6);
696 /* Emit 'movzx rax, word ptr [rax + off]' */
697 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB7);
700 /* Emit 'mov eax, dword ptr [rax+0x14]' */
701 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
702 EMIT2(add_2mod(0x40, src_reg
, dst_reg
), 0x8B);
707 /* Emit 'mov rax, qword ptr [rax+0x14]' */
708 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x8B);
712 * If insn->off == 0 we can save one extra byte, but
713 * special case of x86 R13 which always needs an offset
714 * is not worth the hassle
717 EMIT2(add_2reg(0x40, src_reg
, dst_reg
), off
);
719 EMIT1_off32(add_2reg(0x80, src_reg
, dst_reg
), off
);
723 /* STX: *(u8*)(dst_reg + off) = src_reg */
724 static void emit_stx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
731 /* Emit 'mov byte ptr [rax + off], al' */
732 if (is_ereg(dst_reg
) || is_ereg_8l(src_reg
))
733 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
734 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x88);
739 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
740 EMIT3(0x66, add_2mod(0x40, dst_reg
, src_reg
), 0x89);
745 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
746 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x89);
751 EMIT2(add_2mod(0x48, dst_reg
, src_reg
), 0x89);
755 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), off
);
757 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
), off
);
761 static bool ex_handler_bpf(const struct exception_table_entry
*x
,
762 struct pt_regs
*regs
, int trapnr
,
763 unsigned long error_code
, unsigned long fault_addr
)
765 u32 reg
= x
->fixup
>> 8;
767 /* jump over faulting load and clear dest register */
768 *(unsigned long *)((void *)regs
+ reg
) = 0;
769 regs
->ip
+= x
->fixup
& 0xff;
773 static void detect_reg_usage(struct bpf_insn
*insn
, int insn_cnt
,
774 bool *regs_used
, bool *tail_call_seen
)
778 for (i
= 1; i
<= insn_cnt
; i
++, insn
++) {
779 if (insn
->code
== (BPF_JMP
| BPF_TAIL_CALL
))
780 *tail_call_seen
= true;
781 if (insn
->dst_reg
== BPF_REG_6
|| insn
->src_reg
== BPF_REG_6
)
783 if (insn
->dst_reg
== BPF_REG_7
|| insn
->src_reg
== BPF_REG_7
)
785 if (insn
->dst_reg
== BPF_REG_8
|| insn
->src_reg
== BPF_REG_8
)
787 if (insn
->dst_reg
== BPF_REG_9
|| insn
->src_reg
== BPF_REG_9
)
792 static int do_jit(struct bpf_prog
*bpf_prog
, int *addrs
, u8
*image
,
793 int oldproglen
, struct jit_context
*ctx
)
795 bool tail_call_reachable
= bpf_prog
->aux
->tail_call_reachable
;
796 struct bpf_insn
*insn
= bpf_prog
->insnsi
;
797 bool callee_regs_used
[4] = {};
798 int insn_cnt
= bpf_prog
->len
;
799 bool tail_call_seen
= false;
800 bool seen_exit
= false;
801 u8 temp
[BPF_MAX_INSN_SIZE
+ BPF_INSN_SAFETY
];
802 int i
, cnt
= 0, excnt
= 0;
806 detect_reg_usage(insn
, insn_cnt
, callee_regs_used
,
809 /* tail call's presence in current prog implies it is reachable */
810 tail_call_reachable
|= tail_call_seen
;
812 emit_prologue(&prog
, bpf_prog
->aux
->stack_depth
,
813 bpf_prog_was_classic(bpf_prog
), tail_call_reachable
,
814 bpf_prog
->aux
->func_idx
!= 0);
815 push_callee_regs(&prog
, callee_regs_used
);
816 addrs
[0] = prog
- temp
;
818 for (i
= 1; i
<= insn_cnt
; i
++, insn
++) {
819 const s32 imm32
= insn
->imm
;
820 u32 dst_reg
= insn
->dst_reg
;
821 u32 src_reg
= insn
->src_reg
;
828 switch (insn
->code
) {
830 case BPF_ALU
| BPF_ADD
| BPF_X
:
831 case BPF_ALU
| BPF_SUB
| BPF_X
:
832 case BPF_ALU
| BPF_AND
| BPF_X
:
833 case BPF_ALU
| BPF_OR
| BPF_X
:
834 case BPF_ALU
| BPF_XOR
| BPF_X
:
835 case BPF_ALU64
| BPF_ADD
| BPF_X
:
836 case BPF_ALU64
| BPF_SUB
| BPF_X
:
837 case BPF_ALU64
| BPF_AND
| BPF_X
:
838 case BPF_ALU64
| BPF_OR
| BPF_X
:
839 case BPF_ALU64
| BPF_XOR
| BPF_X
:
840 switch (BPF_OP(insn
->code
)) {
841 case BPF_ADD
: b2
= 0x01; break;
842 case BPF_SUB
: b2
= 0x29; break;
843 case BPF_AND
: b2
= 0x21; break;
844 case BPF_OR
: b2
= 0x09; break;
845 case BPF_XOR
: b2
= 0x31; break;
847 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
848 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
849 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
850 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
851 EMIT2(b2
, add_2reg(0xC0, dst_reg
, src_reg
));
854 case BPF_ALU64
| BPF_MOV
| BPF_X
:
855 case BPF_ALU
| BPF_MOV
| BPF_X
:
857 BPF_CLASS(insn
->code
) == BPF_ALU64
,
862 case BPF_ALU
| BPF_NEG
:
863 case BPF_ALU64
| BPF_NEG
:
864 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
865 EMIT1(add_1mod(0x48, dst_reg
));
866 else if (is_ereg(dst_reg
))
867 EMIT1(add_1mod(0x40, dst_reg
));
868 EMIT2(0xF7, add_1reg(0xD8, dst_reg
));
871 case BPF_ALU
| BPF_ADD
| BPF_K
:
872 case BPF_ALU
| BPF_SUB
| BPF_K
:
873 case BPF_ALU
| BPF_AND
| BPF_K
:
874 case BPF_ALU
| BPF_OR
| BPF_K
:
875 case BPF_ALU
| BPF_XOR
| BPF_K
:
876 case BPF_ALU64
| BPF_ADD
| BPF_K
:
877 case BPF_ALU64
| BPF_SUB
| BPF_K
:
878 case BPF_ALU64
| BPF_AND
| BPF_K
:
879 case BPF_ALU64
| BPF_OR
| BPF_K
:
880 case BPF_ALU64
| BPF_XOR
| BPF_K
:
881 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
882 EMIT1(add_1mod(0x48, dst_reg
));
883 else if (is_ereg(dst_reg
))
884 EMIT1(add_1mod(0x40, dst_reg
));
887 * b3 holds 'normal' opcode, b2 short form only valid
888 * in case dst is eax/rax.
890 switch (BPF_OP(insn
->code
)) {
914 EMIT3(0x83, add_1reg(b3
, dst_reg
), imm32
);
915 else if (is_axreg(dst_reg
))
916 EMIT1_off32(b2
, imm32
);
918 EMIT2_off32(0x81, add_1reg(b3
, dst_reg
), imm32
);
921 case BPF_ALU64
| BPF_MOV
| BPF_K
:
922 case BPF_ALU
| BPF_MOV
| BPF_K
:
923 emit_mov_imm32(&prog
, BPF_CLASS(insn
->code
) == BPF_ALU64
,
927 case BPF_LD
| BPF_IMM
| BPF_DW
:
928 emit_mov_imm64(&prog
, dst_reg
, insn
[1].imm
, insn
[0].imm
);
933 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
934 case BPF_ALU
| BPF_MOD
| BPF_X
:
935 case BPF_ALU
| BPF_DIV
| BPF_X
:
936 case BPF_ALU
| BPF_MOD
| BPF_K
:
937 case BPF_ALU
| BPF_DIV
| BPF_K
:
938 case BPF_ALU64
| BPF_MOD
| BPF_X
:
939 case BPF_ALU64
| BPF_DIV
| BPF_X
:
940 case BPF_ALU64
| BPF_MOD
| BPF_K
:
941 case BPF_ALU64
| BPF_DIV
| BPF_K
:
942 EMIT1(0x50); /* push rax */
943 EMIT1(0x52); /* push rdx */
945 if (BPF_SRC(insn
->code
) == BPF_X
)
946 /* mov r11, src_reg */
947 EMIT_mov(AUX_REG
, src_reg
);
950 EMIT3_off32(0x49, 0xC7, 0xC3, imm32
);
952 /* mov rax, dst_reg */
953 EMIT_mov(BPF_REG_0
, dst_reg
);
957 * equivalent to 'xor rdx, rdx', but one byte less
961 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
963 EMIT3(0x49, 0xF7, 0xF3);
966 EMIT3(0x41, 0xF7, 0xF3);
968 if (BPF_OP(insn
->code
) == BPF_MOD
)
970 EMIT3(0x49, 0x89, 0xD3);
973 EMIT3(0x49, 0x89, 0xC3);
975 EMIT1(0x5A); /* pop rdx */
976 EMIT1(0x58); /* pop rax */
978 /* mov dst_reg, r11 */
979 EMIT_mov(dst_reg
, AUX_REG
);
982 case BPF_ALU
| BPF_MUL
| BPF_K
:
983 case BPF_ALU
| BPF_MUL
| BPF_X
:
984 case BPF_ALU64
| BPF_MUL
| BPF_K
:
985 case BPF_ALU64
| BPF_MUL
| BPF_X
:
987 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
989 if (dst_reg
!= BPF_REG_0
)
990 EMIT1(0x50); /* push rax */
991 if (dst_reg
!= BPF_REG_3
)
992 EMIT1(0x52); /* push rdx */
994 /* mov r11, dst_reg */
995 EMIT_mov(AUX_REG
, dst_reg
);
997 if (BPF_SRC(insn
->code
) == BPF_X
)
998 emit_mov_reg(&prog
, is64
, BPF_REG_0
, src_reg
);
1000 emit_mov_imm32(&prog
, is64
, BPF_REG_0
, imm32
);
1003 EMIT1(add_1mod(0x48, AUX_REG
));
1004 else if (is_ereg(AUX_REG
))
1005 EMIT1(add_1mod(0x40, AUX_REG
));
1007 EMIT2(0xF7, add_1reg(0xE0, AUX_REG
));
1009 if (dst_reg
!= BPF_REG_3
)
1010 EMIT1(0x5A); /* pop rdx */
1011 if (dst_reg
!= BPF_REG_0
) {
1012 /* mov dst_reg, rax */
1013 EMIT_mov(dst_reg
, BPF_REG_0
);
1014 EMIT1(0x58); /* pop rax */
1019 case BPF_ALU
| BPF_LSH
| BPF_K
:
1020 case BPF_ALU
| BPF_RSH
| BPF_K
:
1021 case BPF_ALU
| BPF_ARSH
| BPF_K
:
1022 case BPF_ALU64
| BPF_LSH
| BPF_K
:
1023 case BPF_ALU64
| BPF_RSH
| BPF_K
:
1024 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
1025 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
1026 EMIT1(add_1mod(0x48, dst_reg
));
1027 else if (is_ereg(dst_reg
))
1028 EMIT1(add_1mod(0x40, dst_reg
));
1030 switch (BPF_OP(insn
->code
)) {
1031 case BPF_LSH
: b3
= 0xE0; break;
1032 case BPF_RSH
: b3
= 0xE8; break;
1033 case BPF_ARSH
: b3
= 0xF8; break;
1037 EMIT2(0xD1, add_1reg(b3
, dst_reg
));
1039 EMIT3(0xC1, add_1reg(b3
, dst_reg
), imm32
);
1042 case BPF_ALU
| BPF_LSH
| BPF_X
:
1043 case BPF_ALU
| BPF_RSH
| BPF_X
:
1044 case BPF_ALU
| BPF_ARSH
| BPF_X
:
1045 case BPF_ALU64
| BPF_LSH
| BPF_X
:
1046 case BPF_ALU64
| BPF_RSH
| BPF_X
:
1047 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
1049 /* Check for bad case when dst_reg == rcx */
1050 if (dst_reg
== BPF_REG_4
) {
1051 /* mov r11, dst_reg */
1052 EMIT_mov(AUX_REG
, dst_reg
);
1056 if (src_reg
!= BPF_REG_4
) { /* common case */
1057 EMIT1(0x51); /* push rcx */
1059 /* mov rcx, src_reg */
1060 EMIT_mov(BPF_REG_4
, src_reg
);
1063 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1064 if (BPF_CLASS(insn
->code
) == BPF_ALU64
)
1065 EMIT1(add_1mod(0x48, dst_reg
));
1066 else if (is_ereg(dst_reg
))
1067 EMIT1(add_1mod(0x40, dst_reg
));
1069 switch (BPF_OP(insn
->code
)) {
1070 case BPF_LSH
: b3
= 0xE0; break;
1071 case BPF_RSH
: b3
= 0xE8; break;
1072 case BPF_ARSH
: b3
= 0xF8; break;
1074 EMIT2(0xD3, add_1reg(b3
, dst_reg
));
1076 if (src_reg
!= BPF_REG_4
)
1077 EMIT1(0x59); /* pop rcx */
1079 if (insn
->dst_reg
== BPF_REG_4
)
1080 /* mov dst_reg, r11 */
1081 EMIT_mov(insn
->dst_reg
, AUX_REG
);
1084 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1087 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1089 if (is_ereg(dst_reg
))
1091 EMIT3(0xC1, add_1reg(0xC8, dst_reg
), 8);
1093 /* Emit 'movzwl eax, ax' */
1094 if (is_ereg(dst_reg
))
1095 EMIT3(0x45, 0x0F, 0xB7);
1098 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
1101 /* Emit 'bswap eax' to swap lower 4 bytes */
1102 if (is_ereg(dst_reg
))
1106 EMIT1(add_1reg(0xC8, dst_reg
));
1109 /* Emit 'bswap rax' to swap 8 bytes */
1110 EMIT3(add_1mod(0x48, dst_reg
), 0x0F,
1111 add_1reg(0xC8, dst_reg
));
1116 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1120 * Emit 'movzwl eax, ax' to zero extend 16-bit
1123 if (is_ereg(dst_reg
))
1124 EMIT3(0x45, 0x0F, 0xB7);
1127 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
1130 /* Emit 'mov eax, eax' to clear upper 32-bits */
1131 if (is_ereg(dst_reg
))
1133 EMIT2(0x89, add_2reg(0xC0, dst_reg
, dst_reg
));
1141 /* ST: *(u8*)(dst_reg + off) = imm */
1142 case BPF_ST
| BPF_MEM
| BPF_B
:
1143 if (is_ereg(dst_reg
))
1148 case BPF_ST
| BPF_MEM
| BPF_H
:
1149 if (is_ereg(dst_reg
))
1150 EMIT3(0x66, 0x41, 0xC7);
1154 case BPF_ST
| BPF_MEM
| BPF_W
:
1155 if (is_ereg(dst_reg
))
1160 case BPF_ST
| BPF_MEM
| BPF_DW
:
1161 EMIT2(add_1mod(0x48, dst_reg
), 0xC7);
1163 st
: if (is_imm8(insn
->off
))
1164 EMIT2(add_1reg(0x40, dst_reg
), insn
->off
);
1166 EMIT1_off32(add_1reg(0x80, dst_reg
), insn
->off
);
1168 EMIT(imm32
, bpf_size_to_x86_bytes(BPF_SIZE(insn
->code
)));
1171 /* STX: *(u8*)(dst_reg + off) = src_reg */
1172 case BPF_STX
| BPF_MEM
| BPF_B
:
1173 case BPF_STX
| BPF_MEM
| BPF_H
:
1174 case BPF_STX
| BPF_MEM
| BPF_W
:
1175 case BPF_STX
| BPF_MEM
| BPF_DW
:
1176 emit_stx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn
->off
);
1179 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1180 case BPF_LDX
| BPF_MEM
| BPF_B
:
1181 case BPF_LDX
| BPF_PROBE_MEM
| BPF_B
:
1182 case BPF_LDX
| BPF_MEM
| BPF_H
:
1183 case BPF_LDX
| BPF_PROBE_MEM
| BPF_H
:
1184 case BPF_LDX
| BPF_MEM
| BPF_W
:
1185 case BPF_LDX
| BPF_PROBE_MEM
| BPF_W
:
1186 case BPF_LDX
| BPF_MEM
| BPF_DW
:
1187 case BPF_LDX
| BPF_PROBE_MEM
| BPF_DW
:
1188 emit_ldx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn
->off
);
1189 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
) {
1190 struct exception_table_entry
*ex
;
1191 u8
*_insn
= image
+ proglen
;
1194 if (!bpf_prog
->aux
->extable
)
1197 if (excnt
>= bpf_prog
->aux
->num_exentries
) {
1198 pr_err("ex gen bug\n");
1201 ex
= &bpf_prog
->aux
->extable
[excnt
++];
1203 delta
= _insn
- (u8
*)&ex
->insn
;
1204 if (!is_simm32(delta
)) {
1205 pr_err("extable->insn doesn't fit into 32-bit\n");
1210 delta
= (u8
*)ex_handler_bpf
- (u8
*)&ex
->handler
;
1211 if (!is_simm32(delta
)) {
1212 pr_err("extable->handler doesn't fit into 32-bit\n");
1215 ex
->handler
= delta
;
1217 if (dst_reg
> BPF_REG_9
) {
1218 pr_err("verifier error\n");
1222 * Compute size of x86 insn and its target dest x86 register.
1223 * ex_handler_bpf() will use lower 8 bits to adjust
1224 * pt_regs->ip to jump over this x86 instruction
1225 * and upper bits to figure out which pt_regs to zero out.
1226 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1227 * of 4 bytes will be ignored and rbx will be zero inited.
1229 ex
->fixup
= (prog
- temp
) | (reg2pt_regs
[dst_reg
] << 8);
1233 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1234 case BPF_STX
| BPF_XADD
| BPF_W
:
1235 /* Emit 'lock add dword ptr [rax + off], eax' */
1236 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1237 EMIT3(0xF0, add_2mod(0x40, dst_reg
, src_reg
), 0x01);
1241 case BPF_STX
| BPF_XADD
| BPF_DW
:
1242 EMIT3(0xF0, add_2mod(0x48, dst_reg
, src_reg
), 0x01);
1243 xadd
: if (is_imm8(insn
->off
))
1244 EMIT2(add_2reg(0x40, dst_reg
, src_reg
), insn
->off
);
1246 EMIT1_off32(add_2reg(0x80, dst_reg
, src_reg
),
1251 case BPF_JMP
| BPF_CALL
:
1252 func
= (u8
*) __bpf_call_base
+ imm32
;
1253 if (tail_call_reachable
) {
1254 EMIT3_off32(0x48, 0x8B, 0x85,
1255 -(bpf_prog
->aux
->stack_depth
+ 8));
1256 if (!imm32
|| emit_call(&prog
, func
, image
+ addrs
[i
- 1] + 7))
1259 if (!imm32
|| emit_call(&prog
, func
, image
+ addrs
[i
- 1]))
1264 case BPF_JMP
| BPF_TAIL_CALL
:
1266 emit_bpf_tail_call_direct(&bpf_prog
->aux
->poke_tab
[imm32
- 1],
1267 &prog
, addrs
[i
], image
,
1269 bpf_prog
->aux
->stack_depth
);
1271 emit_bpf_tail_call_indirect(&prog
,
1273 bpf_prog
->aux
->stack_depth
);
1277 case BPF_JMP
| BPF_JEQ
| BPF_X
:
1278 case BPF_JMP
| BPF_JNE
| BPF_X
:
1279 case BPF_JMP
| BPF_JGT
| BPF_X
:
1280 case BPF_JMP
| BPF_JLT
| BPF_X
:
1281 case BPF_JMP
| BPF_JGE
| BPF_X
:
1282 case BPF_JMP
| BPF_JLE
| BPF_X
:
1283 case BPF_JMP
| BPF_JSGT
| BPF_X
:
1284 case BPF_JMP
| BPF_JSLT
| BPF_X
:
1285 case BPF_JMP
| BPF_JSGE
| BPF_X
:
1286 case BPF_JMP
| BPF_JSLE
| BPF_X
:
1287 case BPF_JMP32
| BPF_JEQ
| BPF_X
:
1288 case BPF_JMP32
| BPF_JNE
| BPF_X
:
1289 case BPF_JMP32
| BPF_JGT
| BPF_X
:
1290 case BPF_JMP32
| BPF_JLT
| BPF_X
:
1291 case BPF_JMP32
| BPF_JGE
| BPF_X
:
1292 case BPF_JMP32
| BPF_JLE
| BPF_X
:
1293 case BPF_JMP32
| BPF_JSGT
| BPF_X
:
1294 case BPF_JMP32
| BPF_JSLT
| BPF_X
:
1295 case BPF_JMP32
| BPF_JSGE
| BPF_X
:
1296 case BPF_JMP32
| BPF_JSLE
| BPF_X
:
1297 /* cmp dst_reg, src_reg */
1298 if (BPF_CLASS(insn
->code
) == BPF_JMP
)
1299 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
1300 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1301 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
1302 EMIT2(0x39, add_2reg(0xC0, dst_reg
, src_reg
));
1305 case BPF_JMP
| BPF_JSET
| BPF_X
:
1306 case BPF_JMP32
| BPF_JSET
| BPF_X
:
1307 /* test dst_reg, src_reg */
1308 if (BPF_CLASS(insn
->code
) == BPF_JMP
)
1309 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
1310 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1311 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
1312 EMIT2(0x85, add_2reg(0xC0, dst_reg
, src_reg
));
1315 case BPF_JMP
| BPF_JSET
| BPF_K
:
1316 case BPF_JMP32
| BPF_JSET
| BPF_K
:
1317 /* test dst_reg, imm32 */
1318 if (BPF_CLASS(insn
->code
) == BPF_JMP
)
1319 EMIT1(add_1mod(0x48, dst_reg
));
1320 else if (is_ereg(dst_reg
))
1321 EMIT1(add_1mod(0x40, dst_reg
));
1322 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg
), imm32
);
1325 case BPF_JMP
| BPF_JEQ
| BPF_K
:
1326 case BPF_JMP
| BPF_JNE
| BPF_K
:
1327 case BPF_JMP
| BPF_JGT
| BPF_K
:
1328 case BPF_JMP
| BPF_JLT
| BPF_K
:
1329 case BPF_JMP
| BPF_JGE
| BPF_K
:
1330 case BPF_JMP
| BPF_JLE
| BPF_K
:
1331 case BPF_JMP
| BPF_JSGT
| BPF_K
:
1332 case BPF_JMP
| BPF_JSLT
| BPF_K
:
1333 case BPF_JMP
| BPF_JSGE
| BPF_K
:
1334 case BPF_JMP
| BPF_JSLE
| BPF_K
:
1335 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
1336 case BPF_JMP32
| BPF_JNE
| BPF_K
:
1337 case BPF_JMP32
| BPF_JGT
| BPF_K
:
1338 case BPF_JMP32
| BPF_JLT
| BPF_K
:
1339 case BPF_JMP32
| BPF_JGE
| BPF_K
:
1340 case BPF_JMP32
| BPF_JLE
| BPF_K
:
1341 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
1342 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
1343 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
1344 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
1345 /* test dst_reg, dst_reg to save one extra byte */
1347 if (BPF_CLASS(insn
->code
) == BPF_JMP
)
1348 EMIT1(add_2mod(0x48, dst_reg
, dst_reg
));
1349 else if (is_ereg(dst_reg
))
1350 EMIT1(add_2mod(0x40, dst_reg
, dst_reg
));
1351 EMIT2(0x85, add_2reg(0xC0, dst_reg
, dst_reg
));
1355 /* cmp dst_reg, imm8/32 */
1356 if (BPF_CLASS(insn
->code
) == BPF_JMP
)
1357 EMIT1(add_1mod(0x48, dst_reg
));
1358 else if (is_ereg(dst_reg
))
1359 EMIT1(add_1mod(0x40, dst_reg
));
1362 EMIT3(0x83, add_1reg(0xF8, dst_reg
), imm32
);
1364 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg
), imm32
);
1366 emit_cond_jmp
: /* Convert BPF opcode to x86 */
1367 switch (BPF_OP(insn
->code
)) {
1376 /* GT is unsigned '>', JA in x86 */
1380 /* LT is unsigned '<', JB in x86 */
1384 /* GE is unsigned '>=', JAE in x86 */
1388 /* LE is unsigned '<=', JBE in x86 */
1392 /* Signed '>', GT in x86 */
1396 /* Signed '<', LT in x86 */
1400 /* Signed '>=', GE in x86 */
1404 /* Signed '<=', LE in x86 */
1407 default: /* to silence GCC warning */
1410 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
1411 if (is_imm8(jmp_offset
)) {
1412 EMIT2(jmp_cond
, jmp_offset
);
1413 } else if (is_simm32(jmp_offset
)) {
1414 EMIT2_off32(0x0F, jmp_cond
+ 0x10, jmp_offset
);
1416 pr_err("cond_jmp gen bug %llx\n", jmp_offset
);
1422 case BPF_JMP
| BPF_JA
:
1423 if (insn
->off
== -1)
1424 /* -1 jmp instructions will always jump
1425 * backwards two bytes. Explicitly handling
1426 * this case avoids wasting too many passes
1427 * when there are long sequences of replaced
1432 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
1435 /* Optimize out nop jumps */
1438 if (is_imm8(jmp_offset
)) {
1439 EMIT2(0xEB, jmp_offset
);
1440 } else if (is_simm32(jmp_offset
)) {
1441 EMIT1_off32(0xE9, jmp_offset
);
1443 pr_err("jmp gen bug %llx\n", jmp_offset
);
1448 case BPF_JMP
| BPF_EXIT
:
1450 jmp_offset
= ctx
->cleanup_addr
- addrs
[i
];
1454 /* Update cleanup_addr */
1455 ctx
->cleanup_addr
= proglen
;
1456 pop_callee_regs(&prog
, callee_regs_used
);
1457 EMIT1(0xC9); /* leave */
1458 EMIT1(0xC3); /* ret */
1463 * By design x86-64 JIT should support all BPF instructions.
1464 * This error will be seen if new instruction was added
1465 * to the interpreter, but not to the JIT, or if there is
1468 pr_err("bpf_jit: unknown opcode %02x\n", insn
->code
);
1473 if (ilen
> BPF_MAX_INSN_SIZE
) {
1474 pr_err("bpf_jit: fatal insn size error\n");
1479 if (unlikely(proglen
+ ilen
> oldproglen
)) {
1480 pr_err("bpf_jit: fatal error\n");
1483 memcpy(image
+ proglen
, temp
, ilen
);
1490 if (image
&& excnt
!= bpf_prog
->aux
->num_exentries
) {
1491 pr_err("extable is not populated\n");
1497 static void save_regs(const struct btf_func_model
*m
, u8
**prog
, int nr_args
,
1501 /* Store function arguments to stack.
1502 * For a function that accepts two pointers the sequence will be:
1503 * mov QWORD PTR [rbp-0x10],rdi
1504 * mov QWORD PTR [rbp-0x8],rsi
1506 for (i
= 0; i
< min(nr_args
, 6); i
++)
1507 emit_stx(prog
, bytes_to_bpf_size(m
->arg_size
[i
]),
1509 i
== 5 ? X86_REG_R9
: BPF_REG_1
+ i
,
1510 -(stack_size
- i
* 8));
1513 static void restore_regs(const struct btf_func_model
*m
, u8
**prog
, int nr_args
,
1518 /* Restore function arguments from stack.
1519 * For a function that accepts two pointers the sequence will be:
1520 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1521 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1523 for (i
= 0; i
< min(nr_args
, 6); i
++)
1524 emit_ldx(prog
, bytes_to_bpf_size(m
->arg_size
[i
]),
1525 i
== 5 ? X86_REG_R9
: BPF_REG_1
+ i
,
1527 -(stack_size
- i
* 8));
1530 static int invoke_bpf_prog(const struct btf_func_model
*m
, u8
**pprog
,
1531 struct bpf_prog
*p
, int stack_size
, bool mod_ret
)
1536 if (p
->aux
->sleepable
) {
1537 if (emit_call(&prog
, __bpf_prog_enter_sleepable
, prog
))
1540 if (emit_call(&prog
, __bpf_prog_enter
, prog
))
1542 /* remember prog start time returned by __bpf_prog_enter */
1543 emit_mov_reg(&prog
, true, BPF_REG_6
, BPF_REG_0
);
1546 /* arg1: lea rdi, [rbp - stack_size] */
1547 EMIT4(0x48, 0x8D, 0x7D, -stack_size
);
1548 /* arg2: progs[i]->insnsi for interpreter */
1550 emit_mov_imm64(&prog
, BPF_REG_2
,
1551 (long) p
->insnsi
>> 32,
1552 (u32
) (long) p
->insnsi
);
1553 /* call JITed bpf program or interpreter */
1554 if (emit_call(&prog
, p
->bpf_func
, prog
))
1557 /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1558 * of the previous call which is then passed on the stack to
1559 * the next BPF program.
1562 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
1564 if (p
->aux
->sleepable
) {
1565 if (emit_call(&prog
, __bpf_prog_exit_sleepable
, prog
))
1568 /* arg1: mov rdi, progs[i] */
1569 emit_mov_imm64(&prog
, BPF_REG_1
, (long) p
>> 32,
1571 /* arg2: mov rsi, rbx <- start time in nsec */
1572 emit_mov_reg(&prog
, true, BPF_REG_2
, BPF_REG_6
);
1573 if (emit_call(&prog
, __bpf_prog_exit
, prog
))
1581 static void emit_nops(u8
**pprog
, unsigned int len
)
1583 unsigned int i
, noplen
;
1590 if (noplen
> ASM_NOP_MAX
)
1591 noplen
= ASM_NOP_MAX
;
1593 for (i
= 0; i
< noplen
; i
++)
1594 EMIT1(ideal_nops
[noplen
][i
]);
1601 static void emit_align(u8
**pprog
, u32 align
)
1603 u8
*target
, *prog
= *pprog
;
1605 target
= PTR_ALIGN(prog
, align
);
1607 emit_nops(&prog
, target
- prog
);
1612 static int emit_cond_near_jump(u8
**pprog
, void *func
, void *ip
, u8 jmp_cond
)
1618 offset
= func
- (ip
+ 2 + 4);
1619 if (!is_simm32(offset
)) {
1620 pr_err("Target %p is out of range\n", func
);
1623 EMIT2_off32(0x0F, jmp_cond
+ 0x10, offset
);
1628 static int invoke_bpf(const struct btf_func_model
*m
, u8
**pprog
,
1629 struct bpf_tramp_progs
*tp
, int stack_size
)
1634 for (i
= 0; i
< tp
->nr_progs
; i
++) {
1635 if (invoke_bpf_prog(m
, &prog
, tp
->progs
[i
], stack_size
, false))
1642 static int invoke_bpf_mod_ret(const struct btf_func_model
*m
, u8
**pprog
,
1643 struct bpf_tramp_progs
*tp
, int stack_size
,
1649 /* The first fmod_ret program will receive a garbage return value.
1650 * Set this to 0 to avoid confusing the program.
1652 emit_mov_imm32(&prog
, false, BPF_REG_0
, 0);
1653 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
1654 for (i
= 0; i
< tp
->nr_progs
; i
++) {
1655 if (invoke_bpf_prog(m
, &prog
, tp
->progs
[i
], stack_size
, true))
1658 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1659 * if (*(u64 *)(rbp - 8) != 0)
1662 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1663 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1665 /* Save the location of the branch and Generate 6 nops
1666 * (4 bytes for an offset and 2 bytes for the jump) These nops
1667 * are replaced with a conditional jump once do_fexit (i.e. the
1668 * start of the fexit invocation) is finalized.
1671 emit_nops(&prog
, 4 + 2);
1679 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1680 * its 'struct btf_func_model' will be nr_args=2
1681 * The assembly code when eth_type_trans is executing after trampoline:
1685 * sub rsp, 16 // space for skb and dev
1686 * push rbx // temp regs to pass start time
1687 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1688 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1689 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1690 * mov rbx, rax // remember start time in bpf stats are enabled
1691 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1692 * call addr_of_jited_FENTRY_prog
1693 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1694 * mov rsi, rbx // prog start time
1695 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1696 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1697 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1702 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1703 * replaced with 'call generated_bpf_trampoline'. When it returns
1704 * eth_type_trans will continue executing with original skb and dev pointers.
1706 * The assembly code when eth_type_trans is called from trampoline:
1710 * sub rsp, 24 // space for skb, dev, return value
1711 * push rbx // temp regs to pass start time
1712 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1713 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1714 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1715 * mov rbx, rax // remember start time if bpf stats are enabled
1716 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1717 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1718 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1719 * mov rsi, rbx // prog start time
1720 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1721 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1722 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1723 * call eth_type_trans+5 // execute body of eth_type_trans
1724 * mov qword ptr [rbp - 8], rax // save return value
1725 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1726 * mov rbx, rax // remember start time in bpf stats are enabled
1727 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1728 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1729 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1730 * mov rsi, rbx // prog start time
1731 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1732 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1735 * add rsp, 8 // skip eth_type_trans's frame
1736 * ret // return to its caller
1738 int arch_prepare_bpf_trampoline(void *image
, void *image_end
,
1739 const struct btf_func_model
*m
, u32 flags
,
1740 struct bpf_tramp_progs
*tprogs
,
1743 int ret
, i
, cnt
= 0, nr_args
= m
->nr_args
;
1744 int stack_size
= nr_args
* 8;
1745 struct bpf_tramp_progs
*fentry
= &tprogs
[BPF_TRAMP_FENTRY
];
1746 struct bpf_tramp_progs
*fexit
= &tprogs
[BPF_TRAMP_FEXIT
];
1747 struct bpf_tramp_progs
*fmod_ret
= &tprogs
[BPF_TRAMP_MODIFY_RETURN
];
1748 u8
**branches
= NULL
;
1751 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1755 if ((flags
& BPF_TRAMP_F_RESTORE_REGS
) &&
1756 (flags
& BPF_TRAMP_F_SKIP_FRAME
))
1759 if (flags
& BPF_TRAMP_F_CALL_ORIG
)
1760 stack_size
+= 8; /* room for return value of orig_call */
1762 if (flags
& BPF_TRAMP_F_SKIP_FRAME
)
1763 /* skip patched call instruction and point orig_call to actual
1764 * body of the kernel function.
1766 orig_call
+= X86_PATCH_SIZE
;
1770 EMIT1(0x55); /* push rbp */
1771 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1772 EMIT4(0x48, 0x83, 0xEC, stack_size
); /* sub rsp, stack_size */
1773 EMIT1(0x53); /* push rbx */
1775 save_regs(m
, &prog
, nr_args
, stack_size
);
1777 if (fentry
->nr_progs
)
1778 if (invoke_bpf(m
, &prog
, fentry
, stack_size
))
1781 if (fmod_ret
->nr_progs
) {
1782 branches
= kcalloc(fmod_ret
->nr_progs
, sizeof(u8
*),
1787 if (invoke_bpf_mod_ret(m
, &prog
, fmod_ret
, stack_size
,
1794 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
1795 if (fentry
->nr_progs
|| fmod_ret
->nr_progs
)
1796 restore_regs(m
, &prog
, nr_args
, stack_size
);
1798 /* call original function */
1799 if (emit_call(&prog
, orig_call
, prog
)) {
1803 /* remember return value in a stack for bpf prog to access */
1804 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
1807 if (fmod_ret
->nr_progs
) {
1808 /* From Intel 64 and IA-32 Architectures Optimization
1809 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
1810 * Coding Rule 11: All branch targets should be 16-byte
1813 emit_align(&prog
, 16);
1814 /* Update the branches saved in invoke_bpf_mod_ret with the
1815 * aligned address of do_fexit.
1817 for (i
= 0; i
< fmod_ret
->nr_progs
; i
++)
1818 emit_cond_near_jump(&branches
[i
], prog
, branches
[i
],
1822 if (fexit
->nr_progs
)
1823 if (invoke_bpf(m
, &prog
, fexit
, stack_size
)) {
1828 if (flags
& BPF_TRAMP_F_RESTORE_REGS
)
1829 restore_regs(m
, &prog
, nr_args
, stack_size
);
1831 /* This needs to be done regardless. If there were fmod_ret programs,
1832 * the return value is only updated on the stack and still needs to be
1835 if (flags
& BPF_TRAMP_F_CALL_ORIG
)
1836 /* restore original return value back into RAX */
1837 emit_ldx(&prog
, BPF_DW
, BPF_REG_0
, BPF_REG_FP
, -8);
1839 EMIT1(0x5B); /* pop rbx */
1840 EMIT1(0xC9); /* leave */
1841 if (flags
& BPF_TRAMP_F_SKIP_FRAME
)
1842 /* skip our return address and return to parent */
1843 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
1844 EMIT1(0xC3); /* ret */
1845 /* Make sure the trampoline generation logic doesn't overflow */
1846 if (WARN_ON_ONCE(prog
> (u8
*)image_end
- BPF_INSN_SAFETY
)) {
1850 ret
= prog
- (u8
*)image
;
1857 static int emit_fallback_jump(u8
**pprog
)
1862 #ifdef CONFIG_RETPOLINE
1863 /* Note that this assumes the the compiler uses external
1864 * thunks for indirect calls. Both clang and GCC use the same
1865 * naming convention for external thunks.
1867 err
= emit_jump(&prog
, __x86_indirect_thunk_rdx
, prog
);
1871 EMIT2(0xFF, 0xE2); /* jmp rdx */
1877 static int emit_bpf_dispatcher(u8
**pprog
, int a
, int b
, s64
*progs
)
1879 u8
*jg_reloc
, *prog
= *pprog
;
1880 int pivot
, err
, jg_bytes
= 1, cnt
= 0;
1884 /* Leaf node of recursion, i.e. not a range of indices
1887 EMIT1(add_1mod(0x48, BPF_REG_3
)); /* cmp rdx,func */
1888 if (!is_simm32(progs
[a
]))
1890 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3
),
1892 err
= emit_cond_near_jump(&prog
, /* je func */
1893 (void *)progs
[a
], prog
,
1898 err
= emit_fallback_jump(&prog
); /* jmp thunk/indirect */
1906 /* Not a leaf node, so we pivot, and recursively descend into
1907 * the lower and upper ranges.
1909 pivot
= (b
- a
) / 2;
1910 EMIT1(add_1mod(0x48, BPF_REG_3
)); /* cmp rdx,func */
1911 if (!is_simm32(progs
[a
+ pivot
]))
1913 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3
), progs
[a
+ pivot
]);
1915 if (pivot
> 2) { /* jg upper_part */
1916 /* Require near jump. */
1918 EMIT2_off32(0x0F, X86_JG
+ 0x10, 0);
1924 err
= emit_bpf_dispatcher(&prog
, a
, a
+ pivot
, /* emit lower_part */
1929 /* From Intel 64 and IA-32 Architectures Optimization
1930 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
1931 * Coding Rule 11: All branch targets should be 16-byte
1934 emit_align(&prog
, 16);
1935 jg_offset
= prog
- jg_reloc
;
1936 emit_code(jg_reloc
- jg_bytes
, jg_offset
, jg_bytes
);
1938 err
= emit_bpf_dispatcher(&prog
, a
+ pivot
+ 1, /* emit upper_part */
1947 static int cmp_ips(const void *a
, const void *b
)
1959 int arch_prepare_bpf_dispatcher(void *image
, s64
*funcs
, int num_funcs
)
1963 sort(funcs
, num_funcs
, sizeof(funcs
[0]), cmp_ips
, NULL
);
1964 return emit_bpf_dispatcher(&prog
, 0, num_funcs
- 1, funcs
);
1967 struct x64_jit_data
{
1968 struct bpf_binary_header
*header
;
1972 struct jit_context ctx
;
1975 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
1977 struct bpf_binary_header
*header
= NULL
;
1978 struct bpf_prog
*tmp
, *orig_prog
= prog
;
1979 struct x64_jit_data
*jit_data
;
1980 int proglen
, oldproglen
= 0;
1981 struct jit_context ctx
= {};
1982 bool tmp_blinded
= false;
1983 bool extra_pass
= false;
1989 if (!prog
->jit_requested
)
1992 tmp
= bpf_jit_blind_constants(prog
);
1994 * If blinding was requested and we failed during blinding,
1995 * we must fall back to the interpreter.
2004 jit_data
= prog
->aux
->jit_data
;
2006 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
2011 prog
->aux
->jit_data
= jit_data
;
2013 addrs
= jit_data
->addrs
;
2015 ctx
= jit_data
->ctx
;
2016 oldproglen
= jit_data
->proglen
;
2017 image
= jit_data
->image
;
2018 header
= jit_data
->header
;
2020 goto skip_init_addrs
;
2022 addrs
= kmalloc_array(prog
->len
+ 1, sizeof(*addrs
), GFP_KERNEL
);
2029 * Before first pass, make a rough estimation of addrs[]
2030 * each BPF instruction is translated to less than 64 bytes
2032 for (proglen
= 0, i
= 0; i
<= prog
->len
; i
++) {
2036 ctx
.cleanup_addr
= proglen
;
2040 * JITed image shrinks with every pass and the loop iterates
2041 * until the image stops shrinking. Very large BPF programs
2042 * may converge on the last pass. In such case do one more
2043 * pass to emit the final image.
2045 for (pass
= 0; pass
< 20 || image
; pass
++) {
2046 proglen
= do_jit(prog
, addrs
, image
, oldproglen
, &ctx
);
2051 bpf_jit_binary_free(header
);
2056 if (proglen
!= oldproglen
) {
2057 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2058 proglen
, oldproglen
);
2063 if (proglen
== oldproglen
) {
2065 * The number of entries in extable is the number of BPF_LDX
2066 * insns that access kernel memory via "pointer to BTF type".
2067 * The verifier changed their opcode from LDX|MEM|size
2068 * to LDX|PROBE_MEM|size to make JITing easier.
2070 u32 align
= __alignof__(struct exception_table_entry
);
2071 u32 extable_size
= prog
->aux
->num_exentries
*
2072 sizeof(struct exception_table_entry
);
2074 /* allocate module memory for x86 insns and extable */
2075 header
= bpf_jit_binary_alloc(roundup(proglen
, align
) + extable_size
,
2076 &image
, align
, jit_fill_hole
);
2081 prog
->aux
->extable
= (void *) image
+ roundup(proglen
, align
);
2083 oldproglen
= proglen
;
2087 if (bpf_jit_enable
> 1)
2088 bpf_jit_dump(prog
->len
, proglen
, pass
+ 1, image
);
2091 if (!prog
->is_func
|| extra_pass
) {
2092 bpf_tail_call_direct_fixup(prog
);
2093 bpf_jit_binary_lock_ro(header
);
2095 jit_data
->addrs
= addrs
;
2096 jit_data
->ctx
= ctx
;
2097 jit_data
->proglen
= proglen
;
2098 jit_data
->image
= image
;
2099 jit_data
->header
= header
;
2101 prog
->bpf_func
= (void *)image
;
2103 prog
->jited_len
= proglen
;
2108 if (!image
|| !prog
->is_func
|| extra_pass
) {
2110 bpf_prog_fill_jited_linfo(prog
, addrs
+ 1);
2114 prog
->aux
->jit_data
= NULL
;
2118 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?