1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/ftrace.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/text-patching.h>
19 #include <asm/unwind.h>
22 static bool all_callee_regs_used
[4] = {true, true, true, true};
24 static u8
*emit_code(u8
*ptr
, u32 bytes
, unsigned int len
)
37 #define EMIT(bytes, len) \
38 do { prog = emit_code(prog, bytes, len); } while (0)
40 #define EMIT1(b1) EMIT(b1, 1)
41 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
42 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
43 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
45 #define EMIT1_off32(b1, off) \
46 do { EMIT1(b1); EMIT(off, 4); } while (0)
47 #define EMIT2_off32(b1, b2, off) \
48 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
49 #define EMIT3_off32(b1, b2, b3, off) \
50 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
51 #define EMIT4_off32(b1, b2, b3, b4, off) \
52 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
54 #ifdef CONFIG_X86_KERNEL_IBT
55 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
56 #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
59 #define EMIT_ENDBR_POISON()
62 static bool is_imm8(int value
)
64 return value
<= 127 && value
>= -128;
68 * Let us limit the positive offset to be <= 123.
69 * This is to ensure eventual jit convergence For the following patterns:
71 * pass4, final_proglen=4391:
73 * 20e: 48 85 ff test rdi,rdi
75 * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
77 * 289: 48 85 ff test rdi,rdi
79 * 28e: e9 7f ff ff ff jmp 0x212
80 * 293: bf 03 00 00 00 mov edi,0x3
81 * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125)
82 * and insn at 0x28e is 5-byte jmp insn with offset -129.
84 * pass5, final_proglen=4392:
86 * 20e: 48 85 ff test rdi,rdi
87 * 211: 0f 84 80 00 00 00 je 0x297
88 * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0]
90 * 28d: 48 85 ff test rdi,rdi
92 * 292: eb 84 jmp 0x218
93 * 294: bf 03 00 00 00 mov edi,0x3
94 * Note that insn at 0x211 is 6-byte cond jump insn now since its offset
95 * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80).
96 * At the same time, insn at 0x292 is a 2-byte insn since its offset is
99 * pass6 will repeat the same code as in pass4 and this will prevent
100 * eventual convergence.
102 * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes)
103 * cycle in the above. In the above example je offset <= 0x7c should work.
105 * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence
106 * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should
107 * avoid no convergence issue.
109 * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn
110 * to maximum 123 (0x7b). This way, the jit pass can eventually converge.
112 static bool is_imm8_jmp_offset(int value
)
114 return value
<= 123 && value
>= -128;
117 static bool is_simm32(s64 value
)
119 return value
== (s64
)(s32
)value
;
122 static bool is_uimm32(u64 value
)
124 return value
== (u64
)(u32
)value
;
128 #define EMIT_mov(DST, SRC) \
131 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
134 static int bpf_size_to_x86_bytes(int bpf_size
)
136 if (bpf_size
== BPF_W
)
138 else if (bpf_size
== BPF_H
)
140 else if (bpf_size
== BPF_B
)
142 else if (bpf_size
== BPF_DW
)
143 return 4; /* imm32 */
149 * List of x86 cond jumps opcodes (. + s8)
150 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
163 /* Pick a register outside of BPF range for JIT internal work */
164 #define AUX_REG (MAX_BPF_JIT_REG + 1)
165 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
166 #define X86_REG_R12 (MAX_BPF_JIT_REG + 3)
169 * The following table maps BPF registers to x86-64 registers.
171 * x86-64 register R12 is unused, since if used as base address
172 * register in load/store instructions, it always needs an
173 * extra byte of encoding and is callee saved.
175 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
176 * trampoline. x86-64 register R10 is used for blinding (if enabled).
178 static const int reg2hex
[] = {
179 [BPF_REG_0
] = 0, /* RAX */
180 [BPF_REG_1
] = 7, /* RDI */
181 [BPF_REG_2
] = 6, /* RSI */
182 [BPF_REG_3
] = 2, /* RDX */
183 [BPF_REG_4
] = 1, /* RCX */
184 [BPF_REG_5
] = 0, /* R8 */
185 [BPF_REG_6
] = 3, /* RBX callee saved */
186 [BPF_REG_7
] = 5, /* R13 callee saved */
187 [BPF_REG_8
] = 6, /* R14 callee saved */
188 [BPF_REG_9
] = 7, /* R15 callee saved */
189 [BPF_REG_FP
] = 5, /* RBP readonly */
190 [BPF_REG_AX
] = 2, /* R10 temp register */
191 [AUX_REG
] = 3, /* R11 temp register */
192 [X86_REG_R9
] = 1, /* R9 register, 6th function argument */
193 [X86_REG_R12
] = 4, /* R12 callee saved */
196 static const int reg2pt_regs
[] = {
197 [BPF_REG_0
] = offsetof(struct pt_regs
, ax
),
198 [BPF_REG_1
] = offsetof(struct pt_regs
, di
),
199 [BPF_REG_2
] = offsetof(struct pt_regs
, si
),
200 [BPF_REG_3
] = offsetof(struct pt_regs
, dx
),
201 [BPF_REG_4
] = offsetof(struct pt_regs
, cx
),
202 [BPF_REG_5
] = offsetof(struct pt_regs
, r8
),
203 [BPF_REG_6
] = offsetof(struct pt_regs
, bx
),
204 [BPF_REG_7
] = offsetof(struct pt_regs
, r13
),
205 [BPF_REG_8
] = offsetof(struct pt_regs
, r14
),
206 [BPF_REG_9
] = offsetof(struct pt_regs
, r15
),
210 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
211 * which need extra byte of encoding.
212 * rax,rcx,...,rbp have simpler encoding
214 static bool is_ereg(u32 reg
)
216 return (1 << reg
) & (BIT(BPF_REG_5
) |
227 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
228 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
229 * of encoding. al,cl,dl,bl have simpler encoding.
231 static bool is_ereg_8l(u32 reg
)
233 return is_ereg(reg
) ||
234 (1 << reg
) & (BIT(BPF_REG_1
) |
239 static bool is_axreg(u32 reg
)
241 return reg
== BPF_REG_0
;
244 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
245 static u8
add_1mod(u8 byte
, u32 reg
)
252 static u8
add_2mod(u8 byte
, u32 r1
, u32 r2
)
261 static u8
add_3mod(u8 byte
, u32 r1
, u32 r2
, u32 index
)
272 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
273 static u8
add_1reg(u8 byte
, u32 dst_reg
)
275 return byte
+ reg2hex
[dst_reg
];
278 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
279 static u8
add_2reg(u8 byte
, u32 dst_reg
, u32 src_reg
)
281 return byte
+ reg2hex
[dst_reg
] + (reg2hex
[src_reg
] << 3);
284 /* Some 1-byte opcodes for binary ALU operations */
285 static u8 simple_alu_opcodes
[] = {
296 static void jit_fill_hole(void *area
, unsigned int size
)
298 /* Fill whole space with INT3 instructions */
299 memset(area
, 0xcc, size
);
302 int bpf_arch_text_invalidate(void *dst
, size_t len
)
304 return IS_ERR_OR_NULL(text_poke_set(dst
, 0xcc, len
));
308 int cleanup_addr
; /* Epilogue code offset */
311 * Program specific offsets of labels in the code; these rely on the
312 * JIT doing at least 2 passes, recording the position on the first
313 * pass, only to generate the correct offset on the second pass.
315 int tail_call_direct_label
;
316 int tail_call_indirect_label
;
319 /* Maximum number of bytes emitted while JITing one eBPF insn */
320 #define BPF_MAX_INSN_SIZE 128
321 #define BPF_INSN_SAFETY 64
323 /* Number of bytes emit_patch() needs to generate instructions */
324 #define X86_PATCH_SIZE 5
325 /* Number of bytes that will be skipped on tailcall */
326 #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE)
328 static void push_r12(u8
**pprog
)
332 EMIT2(0x41, 0x54); /* push r12 */
336 static void push_callee_regs(u8
**pprog
, bool *callee_regs_used
)
340 if (callee_regs_used
[0])
341 EMIT1(0x53); /* push rbx */
342 if (callee_regs_used
[1])
343 EMIT2(0x41, 0x55); /* push r13 */
344 if (callee_regs_used
[2])
345 EMIT2(0x41, 0x56); /* push r14 */
346 if (callee_regs_used
[3])
347 EMIT2(0x41, 0x57); /* push r15 */
351 static void pop_r12(u8
**pprog
)
355 EMIT2(0x41, 0x5C); /* pop r12 */
359 static void pop_callee_regs(u8
**pprog
, bool *callee_regs_used
)
363 if (callee_regs_used
[3])
364 EMIT2(0x41, 0x5F); /* pop r15 */
365 if (callee_regs_used
[2])
366 EMIT2(0x41, 0x5E); /* pop r14 */
367 if (callee_regs_used
[1])
368 EMIT2(0x41, 0x5D); /* pop r13 */
369 if (callee_regs_used
[0])
370 EMIT1(0x5B); /* pop rbx */
374 static void emit_nops(u8
**pprog
, int len
)
382 if (noplen
> ASM_NOP_MAX
)
383 noplen
= ASM_NOP_MAX
;
385 for (i
= 0; i
< noplen
; i
++)
386 EMIT1(x86_nops
[noplen
][i
]);
394 * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT
395 * in arch/x86/kernel/alternative.c
398 static void emit_fineibt(u8
**pprog
, u32 hash
)
403 EMIT3_off32(0x41, 0x81, 0xea, hash
); /* subl $hash, %r10d */
404 EMIT2(0x74, 0x07); /* jz.d8 +7 */
405 EMIT2(0x0f, 0x0b); /* ud2 */
406 EMIT1(0x90); /* nop */
412 static void emit_kcfi(u8
**pprog
, u32 hash
)
416 EMIT1_off32(0xb8, hash
); /* movl $hash, %eax */
417 #ifdef CONFIG_CALL_PADDING
435 static void emit_cfi(u8
**pprog
, u32 hash
)
441 emit_fineibt(&prog
, hash
);
445 emit_kcfi(&prog
, hash
);
456 static void emit_prologue_tail_call(u8
**pprog
, bool is_subprog
)
461 /* cmp rax, MAX_TAIL_CALL_CNT */
462 EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT
);
463 EMIT2(X86_JA
, 6); /* ja 6 */
464 /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT.
465 * case1: entry of main prog.
466 * case2: tail callee of main prog.
468 EMIT1(0x50); /* push rax */
469 /* Make rax as tail_call_cnt_ptr. */
470 EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
471 EMIT2(0xEB, 1); /* jmp 1 */
472 /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT.
473 * case: tail callee of subprog.
475 EMIT1(0x50); /* push rax */
476 /* push tail_call_cnt_ptr */
477 EMIT1(0x50); /* push rax */
478 } else { /* is_subprog */
479 /* rax is tail_call_cnt_ptr. */
480 EMIT1(0x50); /* push rax */
481 EMIT1(0x50); /* push rax */
488 * Emit x86-64 prologue code for BPF program.
489 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
490 * while jumping to another program
492 static void emit_prologue(u8
**pprog
, u32 stack_depth
, bool ebpf_from_cbpf
,
493 bool tail_call_reachable
, bool is_subprog
,
494 bool is_exception_cb
)
498 emit_cfi(&prog
, is_subprog
? cfi_bpf_subprog_hash
: cfi_bpf_hash
);
499 /* BPF trampoline can be made to work without these nops,
500 * but let's waste 5 bytes for now and optimize later
502 emit_nops(&prog
, X86_PATCH_SIZE
);
503 if (!ebpf_from_cbpf
) {
504 if (tail_call_reachable
&& !is_subprog
)
505 /* When it's the entry of the whole tailcall context,
506 * zeroing rax means initialising tail_call_cnt.
508 EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */
510 /* Keep the same instruction layout. */
511 emit_nops(&prog
, 3); /* nop3 */
513 /* Exception callback receives FP as third parameter */
514 if (is_exception_cb
) {
515 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
516 EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */
517 /* The main frame must have exception_boundary as true, so we
518 * first restore those callee-saved regs from stack, before
519 * reusing the stack frame.
521 pop_callee_regs(&prog
, all_callee_regs_used
);
523 /* Reset the stack frame. */
524 EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */
526 EMIT1(0x55); /* push rbp */
527 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
530 /* X86_TAIL_CALL_OFFSET is here */
533 /* sub rsp, rounded_stack_depth */
535 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth
, 8));
536 if (tail_call_reachable
)
537 emit_prologue_tail_call(&prog
, is_subprog
);
541 static int emit_patch(u8
**pprog
, void *func
, void *ip
, u8 opcode
)
546 offset
= func
- (ip
+ X86_PATCH_SIZE
);
547 if (!is_simm32(offset
)) {
548 pr_err("Target call %p is out of range\n", func
);
551 EMIT1_off32(opcode
, offset
);
556 static int emit_call(u8
**pprog
, void *func
, void *ip
)
558 return emit_patch(pprog
, func
, ip
, 0xE8);
561 static int emit_rsb_call(u8
**pprog
, void *func
, void *ip
)
563 OPTIMIZER_HIDE_VAR(func
);
564 ip
+= x86_call_depth_emit_accounting(pprog
, func
, ip
);
565 return emit_patch(pprog
, func
, ip
, 0xE8);
568 static int emit_jump(u8
**pprog
, void *func
, void *ip
)
570 return emit_patch(pprog
, func
, ip
, 0xE9);
573 static int __bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
574 void *old_addr
, void *new_addr
)
576 const u8
*nop_insn
= x86_nops
[5];
577 u8 old_insn
[X86_PATCH_SIZE
];
578 u8 new_insn
[X86_PATCH_SIZE
];
582 memcpy(old_insn
, nop_insn
, X86_PATCH_SIZE
);
585 ret
= t
== BPF_MOD_CALL
?
586 emit_call(&prog
, old_addr
, ip
) :
587 emit_jump(&prog
, old_addr
, ip
);
592 memcpy(new_insn
, nop_insn
, X86_PATCH_SIZE
);
595 ret
= t
== BPF_MOD_CALL
?
596 emit_call(&prog
, new_addr
, ip
) :
597 emit_jump(&prog
, new_addr
, ip
);
603 mutex_lock(&text_mutex
);
604 if (memcmp(ip
, old_insn
, X86_PATCH_SIZE
))
607 if (memcmp(ip
, new_insn
, X86_PATCH_SIZE
)) {
608 text_poke_bp(ip
, new_insn
, X86_PATCH_SIZE
, NULL
);
612 mutex_unlock(&text_mutex
);
616 int bpf_arch_text_poke(void *ip
, enum bpf_text_poke_type t
,
617 void *old_addr
, void *new_addr
)
619 if (!is_kernel_text((long)ip
) &&
620 !is_bpf_text_address((long)ip
))
621 /* BPF poking in modules is not supported */
625 * See emit_prologue(), for IBT builds the trampoline hook is preceded
626 * with an ENDBR instruction.
628 if (is_endbr(*(u32
*)ip
))
629 ip
+= ENDBR_INSN_SIZE
;
631 return __bpf_arch_text_poke(ip
, t
, old_addr
, new_addr
);
634 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
636 static void emit_indirect_jump(u8
**pprog
, int reg
, u8
*ip
)
640 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE
)) {
642 EMIT2(0xFF, 0xE0 + reg
);
643 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE
)) {
644 OPTIMIZER_HIDE_VAR(reg
);
645 if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH
))
646 emit_jump(&prog
, &__x86_indirect_jump_thunk_array
[reg
], ip
);
648 emit_jump(&prog
, &__x86_indirect_thunk_array
[reg
], ip
);
650 EMIT2(0xFF, 0xE0 + reg
); /* jmp *%\reg */
651 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
) || IS_ENABLED(CONFIG_MITIGATION_SLS
))
652 EMIT1(0xCC); /* int3 */
658 static void emit_return(u8
**pprog
, u8
*ip
)
662 if (cpu_feature_enabled(X86_FEATURE_RETHUNK
)) {
663 emit_jump(&prog
, x86_return_thunk
, ip
);
665 EMIT1(0xC3); /* ret */
666 if (IS_ENABLED(CONFIG_MITIGATION_SLS
))
667 EMIT1(0xCC); /* int3 */
673 #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8))
676 * Generate the following code:
678 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
679 * if (index >= array->map.max_entries)
681 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
683 * prog = array->ptrs[index];
686 * goto *(prog->bpf_func + prologue_size);
689 static void emit_bpf_tail_call_indirect(struct bpf_prog
*bpf_prog
,
690 u8
**pprog
, bool *callee_regs_used
,
691 u32 stack_depth
, u8
*ip
,
692 struct jit_context
*ctx
)
694 int tcc_ptr_off
= BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth
);
695 u8
*prog
= *pprog
, *start
= *pprog
;
699 * rdi - pointer to ctx
700 * rsi - pointer to bpf_array
701 * rdx - index in bpf_array
705 * if (index >= array->map.max_entries)
708 EMIT2(0x89, 0xD2); /* mov edx, edx */
709 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
710 offsetof(struct bpf_array
, map
.max_entries
));
712 offset
= ctx
->tail_call_indirect_label
- (prog
+ 2 - start
);
713 EMIT2(X86_JBE
, offset
); /* jbe out */
716 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
719 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off
); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
720 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT
); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
722 offset
= ctx
->tail_call_indirect_label
- (prog
+ 2 - start
);
723 EMIT2(X86_JAE
, offset
); /* jae out */
725 /* prog = array->ptrs[index]; */
726 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
727 offsetof(struct bpf_array
, ptrs
));
733 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
735 offset
= ctx
->tail_call_indirect_label
- (prog
+ 2 - start
);
736 EMIT2(X86_JE
, offset
); /* je out */
738 /* Inc tail_call_cnt if the slot is populated. */
739 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
741 if (bpf_prog
->aux
->exception_boundary
) {
742 pop_callee_regs(&prog
, all_callee_regs_used
);
745 pop_callee_regs(&prog
, callee_regs_used
);
746 if (bpf_arena_get_kern_vm_start(bpf_prog
->aux
->arena
))
750 /* Pop tail_call_cnt_ptr. */
751 EMIT1(0x58); /* pop rax */
752 /* Pop tail_call_cnt, if it's main prog.
753 * Pop tail_call_cnt_ptr, if it's subprog.
755 EMIT1(0x58); /* pop rax */
757 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
758 round_up(stack_depth
, 8));
760 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
761 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
762 offsetof(struct bpf_prog
, bpf_func
));
763 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
764 X86_TAIL_CALL_OFFSET
);
766 * Now we're ready to jump into next BPF program
767 * rdi == ctx (1st arg)
768 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
770 emit_indirect_jump(&prog
, 1 /* rcx */, ip
+ (prog
- start
));
773 ctx
->tail_call_indirect_label
= prog
- start
;
777 static void emit_bpf_tail_call_direct(struct bpf_prog
*bpf_prog
,
778 struct bpf_jit_poke_descriptor
*poke
,
780 bool *callee_regs_used
, u32 stack_depth
,
781 struct jit_context
*ctx
)
783 int tcc_ptr_off
= BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth
);
784 u8
*prog
= *pprog
, *start
= *pprog
;
788 * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT)
791 EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off
); /* mov rax, qword ptr [rbp - tcc_ptr_off] */
792 EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT
); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */
794 offset
= ctx
->tail_call_direct_label
- (prog
+ 2 - start
);
795 EMIT2(X86_JAE
, offset
); /* jae out */
797 poke
->tailcall_bypass
= ip
+ (prog
- start
);
798 poke
->adj_off
= X86_TAIL_CALL_OFFSET
;
799 poke
->tailcall_target
= ip
+ ctx
->tail_call_direct_label
- X86_PATCH_SIZE
;
800 poke
->bypass_addr
= (u8
*)poke
->tailcall_target
+ X86_PATCH_SIZE
;
802 emit_jump(&prog
, (u8
*)poke
->tailcall_target
+ X86_PATCH_SIZE
,
803 poke
->tailcall_bypass
);
805 /* Inc tail_call_cnt if the slot is populated. */
806 EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */
808 if (bpf_prog
->aux
->exception_boundary
) {
809 pop_callee_regs(&prog
, all_callee_regs_used
);
812 pop_callee_regs(&prog
, callee_regs_used
);
813 if (bpf_arena_get_kern_vm_start(bpf_prog
->aux
->arena
))
817 /* Pop tail_call_cnt_ptr. */
818 EMIT1(0x58); /* pop rax */
819 /* Pop tail_call_cnt, if it's main prog.
820 * Pop tail_call_cnt_ptr, if it's subprog.
822 EMIT1(0x58); /* pop rax */
824 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth
, 8));
826 emit_nops(&prog
, X86_PATCH_SIZE
);
829 ctx
->tail_call_direct_label
= prog
- start
;
834 static void bpf_tail_call_direct_fixup(struct bpf_prog
*prog
)
836 struct bpf_jit_poke_descriptor
*poke
;
837 struct bpf_array
*array
;
838 struct bpf_prog
*target
;
841 for (i
= 0; i
< prog
->aux
->size_poke_tab
; i
++) {
842 poke
= &prog
->aux
->poke_tab
[i
];
843 if (poke
->aux
&& poke
->aux
!= prog
->aux
)
846 WARN_ON_ONCE(READ_ONCE(poke
->tailcall_target_stable
));
848 if (poke
->reason
!= BPF_POKE_REASON_TAIL_CALL
)
851 array
= container_of(poke
->tail_call
.map
, struct bpf_array
, map
);
852 mutex_lock(&array
->aux
->poke_mutex
);
853 target
= array
->ptrs
[poke
->tail_call
.key
];
855 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,
857 (u8
*)target
->bpf_func
+
860 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
862 (u8
*)poke
->tailcall_target
+
863 X86_PATCH_SIZE
, NULL
);
866 WRITE_ONCE(poke
->tailcall_target_stable
, true);
867 mutex_unlock(&array
->aux
->poke_mutex
);
871 static void emit_mov_imm32(u8
**pprog
, bool sign_propagate
,
872 u32 dst_reg
, const u32 imm32
)
878 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
879 * (which zero-extends imm32) to save 2 bytes.
881 if (sign_propagate
&& (s32
)imm32
< 0) {
882 /* 'mov %rax, imm32' sign extends imm32 */
883 b1
= add_1mod(0x48, dst_reg
);
886 EMIT3_off32(b1
, b2
, add_1reg(b3
, dst_reg
), imm32
);
891 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
895 if (is_ereg(dst_reg
))
896 EMIT1(add_2mod(0x40, dst_reg
, dst_reg
));
899 EMIT2(b2
, add_2reg(b3
, dst_reg
, dst_reg
));
903 /* mov %eax, imm32 */
904 if (is_ereg(dst_reg
))
905 EMIT1(add_1mod(0x40, dst_reg
));
906 EMIT1_off32(add_1reg(0xB8, dst_reg
), imm32
);
911 static void emit_mov_imm64(u8
**pprog
, u32 dst_reg
,
912 const u32 imm32_hi
, const u32 imm32_lo
)
914 u64 imm64
= ((u64
)imm32_hi
<< 32) | (u32
)imm32_lo
;
917 if (is_uimm32(imm64
)) {
919 * For emitting plain u32, where sign bit must not be
920 * propagated LLVM tends to load imm64 over mov32
921 * directly, so save couple of bytes by just doing
922 * 'mov %eax, imm32' instead.
924 emit_mov_imm32(&prog
, false, dst_reg
, imm32_lo
);
925 } else if (is_simm32(imm64
)) {
926 emit_mov_imm32(&prog
, true, dst_reg
, imm32_lo
);
928 /* movabsq rax, imm64 */
929 EMIT2(add_1mod(0x48, dst_reg
), add_1reg(0xB8, dst_reg
));
937 static void emit_mov_reg(u8
**pprog
, bool is64
, u32 dst_reg
, u32 src_reg
)
943 EMIT_mov(dst_reg
, src_reg
);
946 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
947 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
948 EMIT2(0x89, add_2reg(0xC0, dst_reg
, src_reg
));
954 static void emit_movsx_reg(u8
**pprog
, int num_bits
, bool is64
, u32 dst_reg
,
960 /* movs[b,w,l]q dst, src */
962 EMIT4(add_2mod(0x48, src_reg
, dst_reg
), 0x0f, 0xbe,
963 add_2reg(0xC0, src_reg
, dst_reg
));
964 else if (num_bits
== 16)
965 EMIT4(add_2mod(0x48, src_reg
, dst_reg
), 0x0f, 0xbf,
966 add_2reg(0xC0, src_reg
, dst_reg
));
967 else if (num_bits
== 32)
968 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x63,
969 add_2reg(0xC0, src_reg
, dst_reg
));
971 /* movs[b,w]l dst, src */
973 EMIT4(add_2mod(0x40, src_reg
, dst_reg
), 0x0f, 0xbe,
974 add_2reg(0xC0, src_reg
, dst_reg
));
975 } else if (num_bits
== 16) {
976 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
977 EMIT1(add_2mod(0x40, src_reg
, dst_reg
));
978 EMIT3(add_2mod(0x0f, src_reg
, dst_reg
), 0xbf,
979 add_2reg(0xC0, src_reg
, dst_reg
));
986 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
987 static void emit_insn_suffix(u8
**pprog
, u32 ptr_reg
, u32 val_reg
, int off
)
992 /* 1-byte signed displacement.
994 * If off == 0 we could skip this and save one extra byte, but
995 * special case of x86 R13 which always needs an offset is not
998 EMIT2(add_2reg(0x40, ptr_reg
, val_reg
), off
);
1000 /* 4-byte signed displacement */
1001 EMIT1_off32(add_2reg(0x80, ptr_reg
, val_reg
), off
);
1006 static void emit_insn_suffix_SIB(u8
**pprog
, u32 ptr_reg
, u32 val_reg
, u32 index_reg
, int off
)
1011 EMIT3(add_2reg(0x44, BPF_REG_0
, val_reg
), add_2reg(0, ptr_reg
, index_reg
) /* SIB */, off
);
1013 EMIT2_off32(add_2reg(0x84, BPF_REG_0
, val_reg
), add_2reg(0, ptr_reg
, index_reg
) /* SIB */, off
);
1019 * Emit a REX byte if it will be necessary to address these registers
1021 static void maybe_emit_mod(u8
**pprog
, u32 dst_reg
, u32 src_reg
, bool is64
)
1026 EMIT1(add_2mod(0x48, dst_reg
, src_reg
));
1027 else if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1028 EMIT1(add_2mod(0x40, dst_reg
, src_reg
));
1033 * Similar version of maybe_emit_mod() for a single register
1035 static void maybe_emit_1mod(u8
**pprog
, u32 reg
, bool is64
)
1040 EMIT1(add_1mod(0x48, reg
));
1041 else if (is_ereg(reg
))
1042 EMIT1(add_1mod(0x40, reg
));
1046 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1047 static void emit_ldx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
1053 /* Emit 'movzx rax, byte ptr [rax + off]' */
1054 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB6);
1057 /* Emit 'movzx rax, word ptr [rax + off]' */
1058 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xB7);
1061 /* Emit 'mov eax, dword ptr [rax+0x14]' */
1062 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1063 EMIT2(add_2mod(0x40, src_reg
, dst_reg
), 0x8B);
1068 /* Emit 'mov rax, qword ptr [rax+0x14]' */
1069 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x8B);
1072 emit_insn_suffix(&prog
, src_reg
, dst_reg
, off
);
1076 /* LDSX: dst_reg = *(s8*)(src_reg + off) */
1077 static void emit_ldsx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
1083 /* Emit 'movsx rax, byte ptr [rax + off]' */
1084 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xBE);
1087 /* Emit 'movsx rax, word ptr [rax + off]' */
1088 EMIT3(add_2mod(0x48, src_reg
, dst_reg
), 0x0F, 0xBF);
1091 /* Emit 'movsx rax, dword ptr [rax+0x14]' */
1092 EMIT2(add_2mod(0x48, src_reg
, dst_reg
), 0x63);
1095 emit_insn_suffix(&prog
, src_reg
, dst_reg
, off
);
1099 static void emit_ldx_index(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, u32 index_reg
, int off
)
1105 /* movzx rax, byte ptr [rax + r12 + off] */
1106 EMIT3(add_3mod(0x40, src_reg
, dst_reg
, index_reg
), 0x0F, 0xB6);
1109 /* movzx rax, word ptr [rax + r12 + off] */
1110 EMIT3(add_3mod(0x40, src_reg
, dst_reg
, index_reg
), 0x0F, 0xB7);
1113 /* mov eax, dword ptr [rax + r12 + off] */
1114 EMIT2(add_3mod(0x40, src_reg
, dst_reg
, index_reg
), 0x8B);
1117 /* mov rax, qword ptr [rax + r12 + off] */
1118 EMIT2(add_3mod(0x48, src_reg
, dst_reg
, index_reg
), 0x8B);
1121 emit_insn_suffix_SIB(&prog
, src_reg
, dst_reg
, index_reg
, off
);
1125 static void emit_ldx_r12(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
1127 emit_ldx_index(pprog
, size
, dst_reg
, src_reg
, X86_REG_R12
, off
);
1130 /* STX: *(u8*)(dst_reg + off) = src_reg */
1131 static void emit_stx(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
1137 /* Emit 'mov byte ptr [rax + off], al' */
1138 if (is_ereg(dst_reg
) || is_ereg_8l(src_reg
))
1139 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
1140 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x88);
1145 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1146 EMIT3(0x66, add_2mod(0x40, dst_reg
, src_reg
), 0x89);
1151 if (is_ereg(dst_reg
) || is_ereg(src_reg
))
1152 EMIT2(add_2mod(0x40, dst_reg
, src_reg
), 0x89);
1157 EMIT2(add_2mod(0x48, dst_reg
, src_reg
), 0x89);
1160 emit_insn_suffix(&prog
, dst_reg
, src_reg
, off
);
1164 /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */
1165 static void emit_stx_index(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, u32 index_reg
, int off
)
1171 /* mov byte ptr [rax + r12 + off], al */
1172 EMIT2(add_3mod(0x40, dst_reg
, src_reg
, index_reg
), 0x88);
1175 /* mov word ptr [rax + r12 + off], ax */
1176 EMIT3(0x66, add_3mod(0x40, dst_reg
, src_reg
, index_reg
), 0x89);
1179 /* mov dword ptr [rax + r12 + 1], eax */
1180 EMIT2(add_3mod(0x40, dst_reg
, src_reg
, index_reg
), 0x89);
1183 /* mov qword ptr [rax + r12 + 1], rax */
1184 EMIT2(add_3mod(0x48, dst_reg
, src_reg
, index_reg
), 0x89);
1187 emit_insn_suffix_SIB(&prog
, dst_reg
, src_reg
, index_reg
, off
);
1191 static void emit_stx_r12(u8
**pprog
, u32 size
, u32 dst_reg
, u32 src_reg
, int off
)
1193 emit_stx_index(pprog
, size
, dst_reg
, src_reg
, X86_REG_R12
, off
);
1196 /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */
1197 static void emit_st_index(u8
**pprog
, u32 size
, u32 dst_reg
, u32 index_reg
, int off
, int imm
)
1203 /* mov byte ptr [rax + r12 + off], imm8 */
1204 EMIT2(add_3mod(0x40, dst_reg
, 0, index_reg
), 0xC6);
1207 /* mov word ptr [rax + r12 + off], imm16 */
1208 EMIT3(0x66, add_3mod(0x40, dst_reg
, 0, index_reg
), 0xC7);
1211 /* mov dword ptr [rax + r12 + 1], imm32 */
1212 EMIT2(add_3mod(0x40, dst_reg
, 0, index_reg
), 0xC7);
1215 /* mov qword ptr [rax + r12 + 1], imm32 */
1216 EMIT2(add_3mod(0x48, dst_reg
, 0, index_reg
), 0xC7);
1219 emit_insn_suffix_SIB(&prog
, dst_reg
, 0, index_reg
, off
);
1220 EMIT(imm
, bpf_size_to_x86_bytes(size
));
1224 static void emit_st_r12(u8
**pprog
, u32 size
, u32 dst_reg
, int off
, int imm
)
1226 emit_st_index(pprog
, size
, dst_reg
, X86_REG_R12
, off
, imm
);
1229 static int emit_atomic(u8
**pprog
, u8 atomic_op
,
1230 u32 dst_reg
, u32 src_reg
, s16 off
, u8 bpf_size
)
1234 EMIT1(0xF0); /* lock prefix */
1236 maybe_emit_mod(&prog
, dst_reg
, src_reg
, bpf_size
== BPF_DW
);
1239 switch (atomic_op
) {
1244 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
1245 EMIT1(simple_alu_opcodes
[atomic_op
]);
1247 case BPF_ADD
| BPF_FETCH
:
1248 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
1252 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
1256 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
1260 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op
);
1264 emit_insn_suffix(&prog
, dst_reg
, src_reg
, off
);
1270 static int emit_atomic_index(u8
**pprog
, u8 atomic_op
, u32 size
,
1271 u32 dst_reg
, u32 src_reg
, u32 index_reg
, int off
)
1275 EMIT1(0xF0); /* lock prefix */
1278 EMIT1(add_3mod(0x40, dst_reg
, src_reg
, index_reg
));
1281 EMIT1(add_3mod(0x48, dst_reg
, src_reg
, index_reg
));
1284 pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n");
1289 switch (atomic_op
) {
1294 /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */
1295 EMIT1(simple_alu_opcodes
[atomic_op
]);
1297 case BPF_ADD
| BPF_FETCH
:
1298 /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */
1302 /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */
1306 /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */
1310 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op
);
1313 emit_insn_suffix_SIB(&prog
, dst_reg
, src_reg
, index_reg
, off
);
1318 #define DONT_CLEAR 1
1320 bool ex_handler_bpf(const struct exception_table_entry
*x
, struct pt_regs
*regs
)
1322 u32 reg
= x
->fixup
>> 8;
1324 /* jump over faulting load and clear dest register */
1325 if (reg
!= DONT_CLEAR
)
1326 *(unsigned long *)((void *)regs
+ reg
) = 0;
1327 regs
->ip
+= x
->fixup
& 0xff;
1331 static void detect_reg_usage(struct bpf_insn
*insn
, int insn_cnt
,
1336 for (i
= 1; i
<= insn_cnt
; i
++, insn
++) {
1337 if (insn
->dst_reg
== BPF_REG_6
|| insn
->src_reg
== BPF_REG_6
)
1338 regs_used
[0] = true;
1339 if (insn
->dst_reg
== BPF_REG_7
|| insn
->src_reg
== BPF_REG_7
)
1340 regs_used
[1] = true;
1341 if (insn
->dst_reg
== BPF_REG_8
|| insn
->src_reg
== BPF_REG_8
)
1342 regs_used
[2] = true;
1343 if (insn
->dst_reg
== BPF_REG_9
|| insn
->src_reg
== BPF_REG_9
)
1344 regs_used
[3] = true;
1348 /* emit the 3-byte VEX prefix
1350 * r: same as rex.r, extra bit for ModRM reg field
1351 * x: same as rex.x, extra bit for SIB index field
1352 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
1353 * m: opcode map select, encoding escape bytes e.g. 0x0f38
1354 * w: same as rex.w (32 bit or 64 bit) or opcode specific
1355 * src_reg2: additional source reg (encoded as BPF reg)
1356 * l: vector length (128 bit or 256 bit) or reserved
1357 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
1359 static void emit_3vex(u8
**pprog
, bool r
, bool x
, bool b
, u8 m
,
1360 bool w
, u8 src_reg2
, bool l
, u8 pp
)
1363 const u8 b0
= 0xc4; /* first byte of 3-byte VEX prefix */
1365 u8 vvvv
= reg2hex
[src_reg2
];
1367 /* reg2hex gives only the lower 3 bit of vvvv */
1368 if (is_ereg(src_reg2
))
1372 * 2nd byte of 3-byte VEX prefix
1373 * ~ means bit inverted encoding
1376 * +---+---+---+---+---+---+---+---+
1378 * +---+---+---+---+---+---+---+---+
1380 b1
= (!r
<< 7) | (!x
<< 6) | (!b
<< 5) | (m
& 0x1f);
1382 * 3rd byte of 3-byte VEX prefix
1385 * +---+---+---+---+---+---+---+---+
1386 * | W | ~vvvv | L | pp |
1387 * +---+---+---+---+---+---+---+---+
1389 b2
= (w
<< 7) | ((~vvvv
& 0xf) << 3) | (l
<< 2) | (pp
& 3);
1395 /* emit BMI2 shift instruction */
1396 static void emit_shiftx(u8
**pprog
, u32 dst_reg
, u8 src_reg
, bool is64
, u8 op
)
1399 bool r
= is_ereg(dst_reg
);
1400 u8 m
= 2; /* escape code 0f38 */
1402 emit_3vex(&prog
, r
, false, r
, m
, is64
, src_reg
, false, op
);
1403 EMIT2(0xf7, add_2reg(0xC0, dst_reg
, dst_reg
));
1407 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
1409 #define __LOAD_TCC_PTR(off) \
1410 EMIT3_off32(0x48, 0x8B, 0x85, off)
1411 /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */
1412 #define LOAD_TAIL_CALL_CNT_PTR(stack) \
1413 __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
1415 static int do_jit(struct bpf_prog
*bpf_prog
, int *addrs
, u8
*image
, u8
*rw_image
,
1416 int oldproglen
, struct jit_context
*ctx
, bool jmp_padding
)
1418 bool tail_call_reachable
= bpf_prog
->aux
->tail_call_reachable
;
1419 struct bpf_insn
*insn
= bpf_prog
->insnsi
;
1420 bool callee_regs_used
[4] = {};
1421 int insn_cnt
= bpf_prog
->len
;
1422 bool seen_exit
= false;
1423 u8 temp
[BPF_MAX_INSN_SIZE
+ BPF_INSN_SAFETY
];
1424 u64 arena_vm_start
, user_vm_start
;
1426 int ilen
, proglen
= 0;
1430 arena_vm_start
= bpf_arena_get_kern_vm_start(bpf_prog
->aux
->arena
);
1431 user_vm_start
= bpf_arena_get_user_vm_start(bpf_prog
->aux
->arena
);
1433 detect_reg_usage(insn
, insn_cnt
, callee_regs_used
);
1435 emit_prologue(&prog
, bpf_prog
->aux
->stack_depth
,
1436 bpf_prog_was_classic(bpf_prog
), tail_call_reachable
,
1437 bpf_is_subprog(bpf_prog
), bpf_prog
->aux
->exception_cb
);
1438 /* Exception callback will clobber callee regs for its own use, and
1439 * restore the original callee regs from main prog's stack frame.
1441 if (bpf_prog
->aux
->exception_boundary
) {
1442 /* We also need to save r12, which is not mapped to any BPF
1443 * register, as we throw after entry into the kernel, which may
1447 push_callee_regs(&prog
, all_callee_regs_used
);
1451 push_callee_regs(&prog
, callee_regs_used
);
1454 emit_mov_imm64(&prog
, X86_REG_R12
,
1455 arena_vm_start
>> 32, (u32
) arena_vm_start
);
1459 memcpy(rw_image
+ proglen
, temp
, ilen
);
1464 for (i
= 1; i
<= insn_cnt
; i
++, insn
++) {
1465 const s32 imm32
= insn
->imm
;
1466 u32 dst_reg
= insn
->dst_reg
;
1467 u32 src_reg
= insn
->src_reg
;
1476 switch (insn
->code
) {
1478 case BPF_ALU
| BPF_ADD
| BPF_X
:
1479 case BPF_ALU
| BPF_SUB
| BPF_X
:
1480 case BPF_ALU
| BPF_AND
| BPF_X
:
1481 case BPF_ALU
| BPF_OR
| BPF_X
:
1482 case BPF_ALU
| BPF_XOR
| BPF_X
:
1483 case BPF_ALU64
| BPF_ADD
| BPF_X
:
1484 case BPF_ALU64
| BPF_SUB
| BPF_X
:
1485 case BPF_ALU64
| BPF_AND
| BPF_X
:
1486 case BPF_ALU64
| BPF_OR
| BPF_X
:
1487 case BPF_ALU64
| BPF_XOR
| BPF_X
:
1488 maybe_emit_mod(&prog
, dst_reg
, src_reg
,
1489 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1490 b2
= simple_alu_opcodes
[BPF_OP(insn
->code
)];
1491 EMIT2(b2
, add_2reg(0xC0, dst_reg
, src_reg
));
1494 case BPF_ALU64
| BPF_MOV
| BPF_X
:
1495 if (insn_is_cast_user(insn
)) {
1496 if (dst_reg
!= src_reg
)
1498 emit_mov_reg(&prog
, false, dst_reg
, src_reg
);
1499 /* shl dst_reg, 32 */
1500 maybe_emit_1mod(&prog
, dst_reg
, true);
1501 EMIT3(0xC1, add_1reg(0xE0, dst_reg
), 32);
1503 /* or dst_reg, user_vm_start */
1504 maybe_emit_1mod(&prog
, dst_reg
, true);
1505 if (is_axreg(dst_reg
))
1506 EMIT1_off32(0x0D, user_vm_start
>> 32);
1508 EMIT2_off32(0x81, add_1reg(0xC8, dst_reg
), user_vm_start
>> 32);
1510 /* rol dst_reg, 32 */
1511 maybe_emit_1mod(&prog
, dst_reg
, true);
1512 EMIT3(0xC1, add_1reg(0xC0, dst_reg
), 32);
1515 EMIT3(0x4D, 0x31, 0xDB);
1517 /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */
1518 maybe_emit_mod(&prog
, dst_reg
, dst_reg
, false);
1519 EMIT2(0x85, add_2reg(0xC0, dst_reg
, dst_reg
));
1521 /* cmove r11, dst_reg; if so, set dst_reg to zero */
1522 /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */
1523 maybe_emit_mod(&prog
, AUX_REG
, dst_reg
, true);
1524 EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG
, dst_reg
));
1526 } else if (insn_is_mov_percpu_addr(insn
)) {
1527 /* mov <dst>, <src> (if necessary) */
1528 EMIT_mov(dst_reg
, src_reg
);
1530 /* add <dst>, gs:[<off>] */
1531 EMIT2(0x65, add_1mod(0x48, dst_reg
));
1532 EMIT3(0x03, add_2reg(0x04, 0, dst_reg
), 0x25);
1533 EMIT((u32
)(unsigned long)&this_cpu_off
, 4);
1538 case BPF_ALU
| BPF_MOV
| BPF_X
:
1541 BPF_CLASS(insn
->code
) == BPF_ALU64
,
1544 emit_movsx_reg(&prog
, insn
->off
,
1545 BPF_CLASS(insn
->code
) == BPF_ALU64
,
1550 case BPF_ALU
| BPF_NEG
:
1551 case BPF_ALU64
| BPF_NEG
:
1552 maybe_emit_1mod(&prog
, dst_reg
,
1553 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1554 EMIT2(0xF7, add_1reg(0xD8, dst_reg
));
1557 case BPF_ALU
| BPF_ADD
| BPF_K
:
1558 case BPF_ALU
| BPF_SUB
| BPF_K
:
1559 case BPF_ALU
| BPF_AND
| BPF_K
:
1560 case BPF_ALU
| BPF_OR
| BPF_K
:
1561 case BPF_ALU
| BPF_XOR
| BPF_K
:
1562 case BPF_ALU64
| BPF_ADD
| BPF_K
:
1563 case BPF_ALU64
| BPF_SUB
| BPF_K
:
1564 case BPF_ALU64
| BPF_AND
| BPF_K
:
1565 case BPF_ALU64
| BPF_OR
| BPF_K
:
1566 case BPF_ALU64
| BPF_XOR
| BPF_K
:
1567 maybe_emit_1mod(&prog
, dst_reg
,
1568 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1571 * b3 holds 'normal' opcode, b2 short form only valid
1572 * in case dst is eax/rax.
1574 switch (BPF_OP(insn
->code
)) {
1598 EMIT3(0x83, add_1reg(b3
, dst_reg
), imm32
);
1599 else if (is_axreg(dst_reg
))
1600 EMIT1_off32(b2
, imm32
);
1602 EMIT2_off32(0x81, add_1reg(b3
, dst_reg
), imm32
);
1605 case BPF_ALU64
| BPF_MOV
| BPF_K
:
1606 case BPF_ALU
| BPF_MOV
| BPF_K
:
1607 emit_mov_imm32(&prog
, BPF_CLASS(insn
->code
) == BPF_ALU64
,
1611 case BPF_LD
| BPF_IMM
| BPF_DW
:
1612 emit_mov_imm64(&prog
, dst_reg
, insn
[1].imm
, insn
[0].imm
);
1617 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1618 case BPF_ALU
| BPF_MOD
| BPF_X
:
1619 case BPF_ALU
| BPF_DIV
| BPF_X
:
1620 case BPF_ALU
| BPF_MOD
| BPF_K
:
1621 case BPF_ALU
| BPF_DIV
| BPF_K
:
1622 case BPF_ALU64
| BPF_MOD
| BPF_X
:
1623 case BPF_ALU64
| BPF_DIV
| BPF_X
:
1624 case BPF_ALU64
| BPF_MOD
| BPF_K
:
1625 case BPF_ALU64
| BPF_DIV
| BPF_K
: {
1626 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
1628 if (dst_reg
!= BPF_REG_0
)
1629 EMIT1(0x50); /* push rax */
1630 if (dst_reg
!= BPF_REG_3
)
1631 EMIT1(0x52); /* push rdx */
1633 if (BPF_SRC(insn
->code
) == BPF_X
) {
1634 if (src_reg
== BPF_REG_0
||
1635 src_reg
== BPF_REG_3
) {
1636 /* mov r11, src_reg */
1637 EMIT_mov(AUX_REG
, src_reg
);
1641 /* mov r11, imm32 */
1642 EMIT3_off32(0x49, 0xC7, 0xC3, imm32
);
1646 if (dst_reg
!= BPF_REG_0
)
1647 /* mov rax, dst_reg */
1648 emit_mov_reg(&prog
, is64
, BPF_REG_0
, dst_reg
);
1650 if (insn
->off
== 0) {
1653 * equivalent to 'xor rdx, rdx', but one byte less
1658 maybe_emit_1mod(&prog
, src_reg
, is64
);
1659 EMIT2(0xF7, add_1reg(0xF0, src_reg
));
1661 if (BPF_CLASS(insn
->code
) == BPF_ALU
)
1662 EMIT1(0x99); /* cdq */
1664 EMIT2(0x48, 0x99); /* cqo */
1667 maybe_emit_1mod(&prog
, src_reg
, is64
);
1668 EMIT2(0xF7, add_1reg(0xF8, src_reg
));
1671 if (BPF_OP(insn
->code
) == BPF_MOD
&&
1672 dst_reg
!= BPF_REG_3
)
1673 /* mov dst_reg, rdx */
1674 emit_mov_reg(&prog
, is64
, dst_reg
, BPF_REG_3
);
1675 else if (BPF_OP(insn
->code
) == BPF_DIV
&&
1676 dst_reg
!= BPF_REG_0
)
1677 /* mov dst_reg, rax */
1678 emit_mov_reg(&prog
, is64
, dst_reg
, BPF_REG_0
);
1680 if (dst_reg
!= BPF_REG_3
)
1681 EMIT1(0x5A); /* pop rdx */
1682 if (dst_reg
!= BPF_REG_0
)
1683 EMIT1(0x58); /* pop rax */
1687 case BPF_ALU
| BPF_MUL
| BPF_K
:
1688 case BPF_ALU64
| BPF_MUL
| BPF_K
:
1689 maybe_emit_mod(&prog
, dst_reg
, dst_reg
,
1690 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1693 /* imul dst_reg, dst_reg, imm8 */
1694 EMIT3(0x6B, add_2reg(0xC0, dst_reg
, dst_reg
),
1697 /* imul dst_reg, dst_reg, imm32 */
1699 add_2reg(0xC0, dst_reg
, dst_reg
),
1703 case BPF_ALU
| BPF_MUL
| BPF_X
:
1704 case BPF_ALU64
| BPF_MUL
| BPF_X
:
1705 maybe_emit_mod(&prog
, src_reg
, dst_reg
,
1706 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1708 /* imul dst_reg, src_reg */
1709 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg
, dst_reg
));
1713 case BPF_ALU
| BPF_LSH
| BPF_K
:
1714 case BPF_ALU
| BPF_RSH
| BPF_K
:
1715 case BPF_ALU
| BPF_ARSH
| BPF_K
:
1716 case BPF_ALU64
| BPF_LSH
| BPF_K
:
1717 case BPF_ALU64
| BPF_RSH
| BPF_K
:
1718 case BPF_ALU64
| BPF_ARSH
| BPF_K
:
1719 maybe_emit_1mod(&prog
, dst_reg
,
1720 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1722 b3
= simple_alu_opcodes
[BPF_OP(insn
->code
)];
1724 EMIT2(0xD1, add_1reg(b3
, dst_reg
));
1726 EMIT3(0xC1, add_1reg(b3
, dst_reg
), imm32
);
1729 case BPF_ALU
| BPF_LSH
| BPF_X
:
1730 case BPF_ALU
| BPF_RSH
| BPF_X
:
1731 case BPF_ALU
| BPF_ARSH
| BPF_X
:
1732 case BPF_ALU64
| BPF_LSH
| BPF_X
:
1733 case BPF_ALU64
| BPF_RSH
| BPF_X
:
1734 case BPF_ALU64
| BPF_ARSH
| BPF_X
:
1735 /* BMI2 shifts aren't better when shift count is already in rcx */
1736 if (boot_cpu_has(X86_FEATURE_BMI2
) && src_reg
!= BPF_REG_4
) {
1737 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1738 bool w
= (BPF_CLASS(insn
->code
) == BPF_ALU64
);
1741 switch (BPF_OP(insn
->code
)) {
1743 op
= 1; /* prefix 0x66 */
1746 op
= 3; /* prefix 0xf2 */
1749 op
= 2; /* prefix 0xf3 */
1753 emit_shiftx(&prog
, dst_reg
, src_reg
, w
, op
);
1758 if (src_reg
!= BPF_REG_4
) { /* common case */
1759 /* Check for bad case when dst_reg == rcx */
1760 if (dst_reg
== BPF_REG_4
) {
1761 /* mov r11, dst_reg */
1762 EMIT_mov(AUX_REG
, dst_reg
);
1765 EMIT1(0x51); /* push rcx */
1767 /* mov rcx, src_reg */
1768 EMIT_mov(BPF_REG_4
, src_reg
);
1771 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1772 maybe_emit_1mod(&prog
, dst_reg
,
1773 BPF_CLASS(insn
->code
) == BPF_ALU64
);
1775 b3
= simple_alu_opcodes
[BPF_OP(insn
->code
)];
1776 EMIT2(0xD3, add_1reg(b3
, dst_reg
));
1778 if (src_reg
!= BPF_REG_4
) {
1779 if (insn
->dst_reg
== BPF_REG_4
)
1780 /* mov dst_reg, r11 */
1781 EMIT_mov(insn
->dst_reg
, AUX_REG
);
1783 EMIT1(0x59); /* pop rcx */
1788 case BPF_ALU
| BPF_END
| BPF_FROM_BE
:
1789 case BPF_ALU64
| BPF_END
| BPF_FROM_LE
:
1792 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1794 if (is_ereg(dst_reg
))
1796 EMIT3(0xC1, add_1reg(0xC8, dst_reg
), 8);
1798 /* Emit 'movzwl eax, ax' */
1799 if (is_ereg(dst_reg
))
1800 EMIT3(0x45, 0x0F, 0xB7);
1803 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
1806 /* Emit 'bswap eax' to swap lower 4 bytes */
1807 if (is_ereg(dst_reg
))
1811 EMIT1(add_1reg(0xC8, dst_reg
));
1814 /* Emit 'bswap rax' to swap 8 bytes */
1815 EMIT3(add_1mod(0x48, dst_reg
), 0x0F,
1816 add_1reg(0xC8, dst_reg
));
1821 case BPF_ALU
| BPF_END
| BPF_FROM_LE
:
1825 * Emit 'movzwl eax, ax' to zero extend 16-bit
1828 if (is_ereg(dst_reg
))
1829 EMIT3(0x45, 0x0F, 0xB7);
1832 EMIT1(add_2reg(0xC0, dst_reg
, dst_reg
));
1835 /* Emit 'mov eax, eax' to clear upper 32-bits */
1836 if (is_ereg(dst_reg
))
1838 EMIT2(0x89, add_2reg(0xC0, dst_reg
, dst_reg
));
1846 /* speculation barrier */
1847 case BPF_ST
| BPF_NOSPEC
:
1851 /* ST: *(u8*)(dst_reg + off) = imm */
1852 case BPF_ST
| BPF_MEM
| BPF_B
:
1853 if (is_ereg(dst_reg
))
1858 case BPF_ST
| BPF_MEM
| BPF_H
:
1859 if (is_ereg(dst_reg
))
1860 EMIT3(0x66, 0x41, 0xC7);
1864 case BPF_ST
| BPF_MEM
| BPF_W
:
1865 if (is_ereg(dst_reg
))
1870 case BPF_ST
| BPF_MEM
| BPF_DW
:
1871 EMIT2(add_1mod(0x48, dst_reg
), 0xC7);
1873 st
: if (is_imm8(insn
->off
))
1874 EMIT2(add_1reg(0x40, dst_reg
), insn
->off
);
1876 EMIT1_off32(add_1reg(0x80, dst_reg
), insn
->off
);
1878 EMIT(imm32
, bpf_size_to_x86_bytes(BPF_SIZE(insn
->code
)));
1881 /* STX: *(u8*)(dst_reg + off) = src_reg */
1882 case BPF_STX
| BPF_MEM
| BPF_B
:
1883 case BPF_STX
| BPF_MEM
| BPF_H
:
1884 case BPF_STX
| BPF_MEM
| BPF_W
:
1885 case BPF_STX
| BPF_MEM
| BPF_DW
:
1886 emit_stx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn
->off
);
1889 case BPF_ST
| BPF_PROBE_MEM32
| BPF_B
:
1890 case BPF_ST
| BPF_PROBE_MEM32
| BPF_H
:
1891 case BPF_ST
| BPF_PROBE_MEM32
| BPF_W
:
1892 case BPF_ST
| BPF_PROBE_MEM32
| BPF_DW
:
1893 start_of_ldx
= prog
;
1894 emit_st_r12(&prog
, BPF_SIZE(insn
->code
), dst_reg
, insn
->off
, insn
->imm
);
1895 goto populate_extable
;
1897 /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */
1898 case BPF_LDX
| BPF_PROBE_MEM32
| BPF_B
:
1899 case BPF_LDX
| BPF_PROBE_MEM32
| BPF_H
:
1900 case BPF_LDX
| BPF_PROBE_MEM32
| BPF_W
:
1901 case BPF_LDX
| BPF_PROBE_MEM32
| BPF_DW
:
1902 case BPF_STX
| BPF_PROBE_MEM32
| BPF_B
:
1903 case BPF_STX
| BPF_PROBE_MEM32
| BPF_H
:
1904 case BPF_STX
| BPF_PROBE_MEM32
| BPF_W
:
1905 case BPF_STX
| BPF_PROBE_MEM32
| BPF_DW
:
1906 start_of_ldx
= prog
;
1907 if (BPF_CLASS(insn
->code
) == BPF_LDX
)
1908 emit_ldx_r12(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn
->off
);
1910 emit_stx_r12(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn
->off
);
1913 struct exception_table_entry
*ex
;
1914 u8
*_insn
= image
+ proglen
+ (start_of_ldx
- temp
);
1917 if (!bpf_prog
->aux
->extable
)
1920 if (excnt
>= bpf_prog
->aux
->num_exentries
) {
1921 pr_err("mem32 extable bug\n");
1924 ex
= &bpf_prog
->aux
->extable
[excnt
++];
1926 delta
= _insn
- (u8
*)&ex
->insn
;
1927 /* switch ex to rw buffer for writes */
1928 ex
= (void *)rw_image
+ ((void *)ex
- (void *)image
);
1932 ex
->data
= EX_TYPE_BPF
;
1934 ex
->fixup
= (prog
- start_of_ldx
) |
1935 ((BPF_CLASS(insn
->code
) == BPF_LDX
? reg2pt_regs
[dst_reg
] : DONT_CLEAR
) << 8);
1939 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1940 case BPF_LDX
| BPF_MEM
| BPF_B
:
1941 case BPF_LDX
| BPF_PROBE_MEM
| BPF_B
:
1942 case BPF_LDX
| BPF_MEM
| BPF_H
:
1943 case BPF_LDX
| BPF_PROBE_MEM
| BPF_H
:
1944 case BPF_LDX
| BPF_MEM
| BPF_W
:
1945 case BPF_LDX
| BPF_PROBE_MEM
| BPF_W
:
1946 case BPF_LDX
| BPF_MEM
| BPF_DW
:
1947 case BPF_LDX
| BPF_PROBE_MEM
| BPF_DW
:
1948 /* LDXS: dst_reg = *(s8*)(src_reg + off) */
1949 case BPF_LDX
| BPF_MEMSX
| BPF_B
:
1950 case BPF_LDX
| BPF_MEMSX
| BPF_H
:
1951 case BPF_LDX
| BPF_MEMSX
| BPF_W
:
1952 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_B
:
1953 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_H
:
1954 case BPF_LDX
| BPF_PROBE_MEMSX
| BPF_W
:
1955 insn_off
= insn
->off
;
1957 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
||
1958 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
) {
1959 /* Conservatively check that src_reg + insn->off is a kernel address:
1960 * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
1962 * src_reg + insn->off < VSYSCALL_ADDR
1965 u64 limit
= TASK_SIZE_MAX
+ PAGE_SIZE
- VSYSCALL_ADDR
;
1968 /* movabsq r10, VSYSCALL_ADDR */
1969 emit_mov_imm64(&prog
, BPF_REG_AX
, (long)VSYSCALL_ADDR
>> 32,
1970 (u32
)(long)VSYSCALL_ADDR
);
1972 /* mov src_reg, r11 */
1973 EMIT_mov(AUX_REG
, src_reg
);
1976 /* add r11, insn->off */
1977 maybe_emit_1mod(&prog
, AUX_REG
, true);
1978 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG
), insn
->off
);
1982 maybe_emit_mod(&prog
, AUX_REG
, BPF_REG_AX
, true);
1983 EMIT2(0x29, add_2reg(0xC0, AUX_REG
, BPF_REG_AX
));
1985 /* movabsq r10, limit */
1986 emit_mov_imm64(&prog
, BPF_REG_AX
, (long)limit
>> 32,
1990 maybe_emit_mod(&prog
, AUX_REG
, BPF_REG_AX
, true);
1991 EMIT2(0x39, add_2reg(0xC0, AUX_REG
, BPF_REG_AX
));
1993 /* if unsigned '>', goto load */
1997 /* xor dst_reg, dst_reg */
1998 emit_mov_imm32(&prog
, false, dst_reg
, 0);
1999 /* jmp byte_after_ldx */
2002 /* populate jmp_offset for JAE above to jump to start_of_ldx */
2003 start_of_ldx
= prog
;
2004 end_of_jmp
[-1] = start_of_ldx
- end_of_jmp
;
2006 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
||
2007 BPF_MODE(insn
->code
) == BPF_MEMSX
)
2008 emit_ldsx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn_off
);
2010 emit_ldx(&prog
, BPF_SIZE(insn
->code
), dst_reg
, src_reg
, insn_off
);
2011 if (BPF_MODE(insn
->code
) == BPF_PROBE_MEM
||
2012 BPF_MODE(insn
->code
) == BPF_PROBE_MEMSX
) {
2013 struct exception_table_entry
*ex
;
2014 u8
*_insn
= image
+ proglen
+ (start_of_ldx
- temp
);
2017 /* populate jmp_offset for JMP above */
2018 start_of_ldx
[-1] = prog
- start_of_ldx
;
2020 if (!bpf_prog
->aux
->extable
)
2023 if (excnt
>= bpf_prog
->aux
->num_exentries
) {
2024 pr_err("ex gen bug\n");
2027 ex
= &bpf_prog
->aux
->extable
[excnt
++];
2029 delta
= _insn
- (u8
*)&ex
->insn
;
2030 if (!is_simm32(delta
)) {
2031 pr_err("extable->insn doesn't fit into 32-bit\n");
2034 /* switch ex to rw buffer for writes */
2035 ex
= (void *)rw_image
+ ((void *)ex
- (void *)image
);
2039 ex
->data
= EX_TYPE_BPF
;
2041 if (dst_reg
> BPF_REG_9
) {
2042 pr_err("verifier error\n");
2046 * Compute size of x86 insn and its target dest x86 register.
2047 * ex_handler_bpf() will use lower 8 bits to adjust
2048 * pt_regs->ip to jump over this x86 instruction
2049 * and upper bits to figure out which pt_regs to zero out.
2050 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
2051 * of 4 bytes will be ignored and rbx will be zero inited.
2053 ex
->fixup
= (prog
- start_of_ldx
) | (reg2pt_regs
[dst_reg
] << 8);
2057 case BPF_STX
| BPF_ATOMIC
| BPF_W
:
2058 case BPF_STX
| BPF_ATOMIC
| BPF_DW
:
2059 if (insn
->imm
== (BPF_AND
| BPF_FETCH
) ||
2060 insn
->imm
== (BPF_OR
| BPF_FETCH
) ||
2061 insn
->imm
== (BPF_XOR
| BPF_FETCH
)) {
2062 bool is64
= BPF_SIZE(insn
->code
) == BPF_DW
;
2063 u32 real_src_reg
= src_reg
;
2064 u32 real_dst_reg
= dst_reg
;
2068 * Can't be implemented with a single x86 insn.
2069 * Need to do a CMPXCHG loop.
2072 /* Will need RAX as a CMPXCHG operand so save R0 */
2073 emit_mov_reg(&prog
, true, BPF_REG_AX
, BPF_REG_0
);
2074 if (src_reg
== BPF_REG_0
)
2075 real_src_reg
= BPF_REG_AX
;
2076 if (dst_reg
== BPF_REG_0
)
2077 real_dst_reg
= BPF_REG_AX
;
2079 branch_target
= prog
;
2080 /* Load old value */
2081 emit_ldx(&prog
, BPF_SIZE(insn
->code
),
2082 BPF_REG_0
, real_dst_reg
, insn
->off
);
2084 * Perform the (commutative) operation locally,
2085 * put the result in the AUX_REG.
2087 emit_mov_reg(&prog
, is64
, AUX_REG
, BPF_REG_0
);
2088 maybe_emit_mod(&prog
, AUX_REG
, real_src_reg
, is64
);
2089 EMIT2(simple_alu_opcodes
[BPF_OP(insn
->imm
)],
2090 add_2reg(0xC0, AUX_REG
, real_src_reg
));
2091 /* Attempt to swap in new value */
2092 err
= emit_atomic(&prog
, BPF_CMPXCHG
,
2093 real_dst_reg
, AUX_REG
,
2095 BPF_SIZE(insn
->code
));
2099 * ZF tells us whether we won the race. If it's
2100 * cleared we need to try again.
2102 EMIT2(X86_JNE
, -(prog
- branch_target
) - 2);
2103 /* Return the pre-modification value */
2104 emit_mov_reg(&prog
, is64
, real_src_reg
, BPF_REG_0
);
2105 /* Restore R0 after clobbering RAX */
2106 emit_mov_reg(&prog
, true, BPF_REG_0
, BPF_REG_AX
);
2110 err
= emit_atomic(&prog
, insn
->imm
, dst_reg
, src_reg
,
2111 insn
->off
, BPF_SIZE(insn
->code
));
2116 case BPF_STX
| BPF_PROBE_ATOMIC
| BPF_W
:
2117 case BPF_STX
| BPF_PROBE_ATOMIC
| BPF_DW
:
2118 start_of_ldx
= prog
;
2119 err
= emit_atomic_index(&prog
, insn
->imm
, BPF_SIZE(insn
->code
),
2120 dst_reg
, src_reg
, X86_REG_R12
, insn
->off
);
2123 goto populate_extable
;
2126 case BPF_JMP
| BPF_CALL
: {
2127 u8
*ip
= image
+ addrs
[i
- 1];
2129 func
= (u8
*) __bpf_call_base
+ imm32
;
2130 if (tail_call_reachable
) {
2131 LOAD_TAIL_CALL_CNT_PTR(bpf_prog
->aux
->stack_depth
);
2136 ip
+= x86_call_depth_emit_accounting(&prog
, func
, ip
);
2137 if (emit_call(&prog
, func
, ip
))
2142 case BPF_JMP
| BPF_TAIL_CALL
:
2144 emit_bpf_tail_call_direct(bpf_prog
,
2145 &bpf_prog
->aux
->poke_tab
[imm32
- 1],
2146 &prog
, image
+ addrs
[i
- 1],
2148 bpf_prog
->aux
->stack_depth
,
2151 emit_bpf_tail_call_indirect(bpf_prog
,
2154 bpf_prog
->aux
->stack_depth
,
2155 image
+ addrs
[i
- 1],
2160 case BPF_JMP
| BPF_JEQ
| BPF_X
:
2161 case BPF_JMP
| BPF_JNE
| BPF_X
:
2162 case BPF_JMP
| BPF_JGT
| BPF_X
:
2163 case BPF_JMP
| BPF_JLT
| BPF_X
:
2164 case BPF_JMP
| BPF_JGE
| BPF_X
:
2165 case BPF_JMP
| BPF_JLE
| BPF_X
:
2166 case BPF_JMP
| BPF_JSGT
| BPF_X
:
2167 case BPF_JMP
| BPF_JSLT
| BPF_X
:
2168 case BPF_JMP
| BPF_JSGE
| BPF_X
:
2169 case BPF_JMP
| BPF_JSLE
| BPF_X
:
2170 case BPF_JMP32
| BPF_JEQ
| BPF_X
:
2171 case BPF_JMP32
| BPF_JNE
| BPF_X
:
2172 case BPF_JMP32
| BPF_JGT
| BPF_X
:
2173 case BPF_JMP32
| BPF_JLT
| BPF_X
:
2174 case BPF_JMP32
| BPF_JGE
| BPF_X
:
2175 case BPF_JMP32
| BPF_JLE
| BPF_X
:
2176 case BPF_JMP32
| BPF_JSGT
| BPF_X
:
2177 case BPF_JMP32
| BPF_JSLT
| BPF_X
:
2178 case BPF_JMP32
| BPF_JSGE
| BPF_X
:
2179 case BPF_JMP32
| BPF_JSLE
| BPF_X
:
2180 /* cmp dst_reg, src_reg */
2181 maybe_emit_mod(&prog
, dst_reg
, src_reg
,
2182 BPF_CLASS(insn
->code
) == BPF_JMP
);
2183 EMIT2(0x39, add_2reg(0xC0, dst_reg
, src_reg
));
2186 case BPF_JMP
| BPF_JSET
| BPF_X
:
2187 case BPF_JMP32
| BPF_JSET
| BPF_X
:
2188 /* test dst_reg, src_reg */
2189 maybe_emit_mod(&prog
, dst_reg
, src_reg
,
2190 BPF_CLASS(insn
->code
) == BPF_JMP
);
2191 EMIT2(0x85, add_2reg(0xC0, dst_reg
, src_reg
));
2194 case BPF_JMP
| BPF_JSET
| BPF_K
:
2195 case BPF_JMP32
| BPF_JSET
| BPF_K
:
2196 /* test dst_reg, imm32 */
2197 maybe_emit_1mod(&prog
, dst_reg
,
2198 BPF_CLASS(insn
->code
) == BPF_JMP
);
2199 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg
), imm32
);
2202 case BPF_JMP
| BPF_JEQ
| BPF_K
:
2203 case BPF_JMP
| BPF_JNE
| BPF_K
:
2204 case BPF_JMP
| BPF_JGT
| BPF_K
:
2205 case BPF_JMP
| BPF_JLT
| BPF_K
:
2206 case BPF_JMP
| BPF_JGE
| BPF_K
:
2207 case BPF_JMP
| BPF_JLE
| BPF_K
:
2208 case BPF_JMP
| BPF_JSGT
| BPF_K
:
2209 case BPF_JMP
| BPF_JSLT
| BPF_K
:
2210 case BPF_JMP
| BPF_JSGE
| BPF_K
:
2211 case BPF_JMP
| BPF_JSLE
| BPF_K
:
2212 case BPF_JMP32
| BPF_JEQ
| BPF_K
:
2213 case BPF_JMP32
| BPF_JNE
| BPF_K
:
2214 case BPF_JMP32
| BPF_JGT
| BPF_K
:
2215 case BPF_JMP32
| BPF_JLT
| BPF_K
:
2216 case BPF_JMP32
| BPF_JGE
| BPF_K
:
2217 case BPF_JMP32
| BPF_JLE
| BPF_K
:
2218 case BPF_JMP32
| BPF_JSGT
| BPF_K
:
2219 case BPF_JMP32
| BPF_JSLT
| BPF_K
:
2220 case BPF_JMP32
| BPF_JSGE
| BPF_K
:
2221 case BPF_JMP32
| BPF_JSLE
| BPF_K
:
2222 /* test dst_reg, dst_reg to save one extra byte */
2224 maybe_emit_mod(&prog
, dst_reg
, dst_reg
,
2225 BPF_CLASS(insn
->code
) == BPF_JMP
);
2226 EMIT2(0x85, add_2reg(0xC0, dst_reg
, dst_reg
));
2230 /* cmp dst_reg, imm8/32 */
2231 maybe_emit_1mod(&prog
, dst_reg
,
2232 BPF_CLASS(insn
->code
) == BPF_JMP
);
2235 EMIT3(0x83, add_1reg(0xF8, dst_reg
), imm32
);
2237 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg
), imm32
);
2239 emit_cond_jmp
: /* Convert BPF opcode to x86 */
2240 switch (BPF_OP(insn
->code
)) {
2249 /* GT is unsigned '>', JA in x86 */
2253 /* LT is unsigned '<', JB in x86 */
2257 /* GE is unsigned '>=', JAE in x86 */
2261 /* LE is unsigned '<=', JBE in x86 */
2265 /* Signed '>', GT in x86 */
2269 /* Signed '<', LT in x86 */
2273 /* Signed '>=', GE in x86 */
2277 /* Signed '<=', LE in x86 */
2280 default: /* to silence GCC warning */
2283 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
2284 if (is_imm8_jmp_offset(jmp_offset
)) {
2286 /* To keep the jmp_offset valid, the extra bytes are
2287 * padded before the jump insn, so we subtract the
2288 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
2290 * If the previous pass already emits an imm8
2291 * jmp_cond, then this BPF insn won't shrink, so
2294 * On the other hand, if the previous pass emits an
2295 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
2296 * keep the image from shrinking further.
2298 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
2299 * is 2 bytes, so the size difference is 4 bytes.
2301 nops
= INSN_SZ_DIFF
- 2;
2302 if (nops
!= 0 && nops
!= 4) {
2303 pr_err("unexpected jmp_cond padding: %d bytes\n",
2307 emit_nops(&prog
, nops
);
2309 EMIT2(jmp_cond
, jmp_offset
);
2310 } else if (is_simm32(jmp_offset
)) {
2311 EMIT2_off32(0x0F, jmp_cond
+ 0x10, jmp_offset
);
2313 pr_err("cond_jmp gen bug %llx\n", jmp_offset
);
2319 case BPF_JMP
| BPF_JA
:
2320 case BPF_JMP32
| BPF_JA
:
2321 if (BPF_CLASS(insn
->code
) == BPF_JMP
) {
2322 if (insn
->off
== -1)
2323 /* -1 jmp instructions will always jump
2324 * backwards two bytes. Explicitly handling
2325 * this case avoids wasting too many passes
2326 * when there are long sequences of replaced
2331 jmp_offset
= addrs
[i
+ insn
->off
] - addrs
[i
];
2333 if (insn
->imm
== -1)
2336 jmp_offset
= addrs
[i
+ insn
->imm
] - addrs
[i
];
2341 * If jmp_padding is enabled, the extra nops will
2342 * be inserted. Otherwise, optimize out nop jumps.
2345 /* There are 3 possible conditions.
2346 * (1) This BPF_JA is already optimized out in
2347 * the previous run, so there is no need
2348 * to pad any extra byte (0 byte).
2349 * (2) The previous pass emits an imm8 jmp,
2350 * so we pad 2 bytes to match the previous
2352 * (3) Similarly, the previous pass emits an
2353 * imm32 jmp, and 5 bytes is padded.
2355 nops
= INSN_SZ_DIFF
;
2356 if (nops
!= 0 && nops
!= 2 && nops
!= 5) {
2357 pr_err("unexpected nop jump padding: %d bytes\n",
2361 emit_nops(&prog
, nops
);
2366 if (is_imm8_jmp_offset(jmp_offset
)) {
2368 /* To avoid breaking jmp_offset, the extra bytes
2369 * are padded before the actual jmp insn, so
2370 * 2 bytes is subtracted from INSN_SZ_DIFF.
2372 * If the previous pass already emits an imm8
2373 * jmp, there is nothing to pad (0 byte).
2375 * If it emits an imm32 jmp (5 bytes) previously
2376 * and now an imm8 jmp (2 bytes), then we pad
2377 * (5 - 2 = 3) bytes to stop the image from
2378 * shrinking further.
2380 nops
= INSN_SZ_DIFF
- 2;
2381 if (nops
!= 0 && nops
!= 3) {
2382 pr_err("unexpected jump padding: %d bytes\n",
2386 emit_nops(&prog
, INSN_SZ_DIFF
- 2);
2388 EMIT2(0xEB, jmp_offset
);
2389 } else if (is_simm32(jmp_offset
)) {
2390 EMIT1_off32(0xE9, jmp_offset
);
2392 pr_err("jmp gen bug %llx\n", jmp_offset
);
2397 case BPF_JMP
| BPF_EXIT
:
2399 jmp_offset
= ctx
->cleanup_addr
- addrs
[i
];
2403 /* Update cleanup_addr */
2404 ctx
->cleanup_addr
= proglen
;
2405 if (bpf_prog
->aux
->exception_boundary
) {
2406 pop_callee_regs(&prog
, all_callee_regs_used
);
2409 pop_callee_regs(&prog
, callee_regs_used
);
2413 EMIT1(0xC9); /* leave */
2414 emit_return(&prog
, image
+ addrs
[i
- 1] + (prog
- temp
));
2419 * By design x86-64 JIT should support all BPF instructions.
2420 * This error will be seen if new instruction was added
2421 * to the interpreter, but not to the JIT, or if there is
2424 pr_err("bpf_jit: unknown opcode %02x\n", insn
->code
);
2429 if (ilen
> BPF_MAX_INSN_SIZE
) {
2430 pr_err("bpf_jit: fatal insn size error\n");
2436 * When populating the image, assert that:
2438 * i) We do not write beyond the allocated space, and
2439 * ii) addrs[i] did not change from the prior run, in order
2440 * to validate assumptions made for computing branch
2443 if (unlikely(proglen
+ ilen
> oldproglen
||
2444 proglen
+ ilen
!= addrs
[i
])) {
2445 pr_err("bpf_jit: fatal error\n");
2448 memcpy(rw_image
+ proglen
, temp
, ilen
);
2455 if (image
&& excnt
!= bpf_prog
->aux
->num_exentries
) {
2456 pr_err("extable is not populated\n");
2462 static void clean_stack_garbage(const struct btf_func_model
*m
,
2463 u8
**pprog
, int nr_stack_slots
,
2469 /* Generally speaking, the compiler will pass the arguments
2470 * on-stack with "push" instruction, which will take 8-byte
2471 * on the stack. In this case, there won't be garbage values
2472 * while we copy the arguments from origin stack frame to current
2475 * However, sometimes the compiler will only allocate 4-byte on
2476 * the stack for the arguments. For now, this case will only
2477 * happen if there is only one argument on-stack and its size
2478 * not more than 4 byte. In this case, there will be garbage
2479 * values on the upper 4-byte where we store the argument on
2480 * current stack frame.
2482 * arguments on origin stack:
2484 * stack_arg_1(4-byte) xxx(4-byte)
2488 * stack_arg_1(8-byte): stack_arg_1(origin) xxx
2490 * and the xxx is the garbage values which we should clean here.
2492 if (nr_stack_slots
!= 1)
2495 /* the size of the last argument */
2496 arg_size
= m
->arg_size
[m
->nr_args
- 1];
2497 if (arg_size
<= 4) {
2498 off
= -(stack_size
- 4);
2500 /* mov DWORD PTR [rbp + off], 0 */
2502 EMIT2_off32(0xC7, 0x85, off
);
2504 EMIT3(0xC7, 0x45, off
);
2510 /* get the count of the regs that are used to pass arguments */
2511 static int get_nr_used_regs(const struct btf_func_model
*m
)
2513 int i
, arg_regs
, nr_used_regs
= 0;
2515 for (i
= 0; i
< min_t(int, m
->nr_args
, MAX_BPF_FUNC_ARGS
); i
++) {
2516 arg_regs
= (m
->arg_size
[i
] + 7) / 8;
2517 if (nr_used_regs
+ arg_regs
<= 6)
2518 nr_used_regs
+= arg_regs
;
2520 if (nr_used_regs
>= 6)
2524 return nr_used_regs
;
2527 static void save_args(const struct btf_func_model
*m
, u8
**prog
,
2528 int stack_size
, bool for_call_origin
)
2530 int arg_regs
, first_off
= 0, nr_regs
= 0, nr_stack_slots
= 0;
2533 /* Store function arguments to stack.
2534 * For a function that accepts two pointers the sequence will be:
2535 * mov QWORD PTR [rbp-0x10],rdi
2536 * mov QWORD PTR [rbp-0x8],rsi
2538 for (i
= 0; i
< min_t(int, m
->nr_args
, MAX_BPF_FUNC_ARGS
); i
++) {
2539 arg_regs
= (m
->arg_size
[i
] + 7) / 8;
2541 /* According to the research of Yonghong, struct members
2542 * should be all in register or all on the stack.
2543 * Meanwhile, the compiler will pass the argument on regs
2544 * if the remaining regs can hold the argument.
2546 * Disorder of the args can happen. For example:
2548 * struct foo_struct {
2552 * int foo(char, char, char, char, char, struct foo_struct,
2555 * the arg1-5,arg7 will be passed by regs, and arg6 will
2558 if (nr_regs
+ arg_regs
> 6) {
2559 /* copy function arguments from origin stack frame
2560 * into current stack frame.
2562 * The starting address of the arguments on-stack
2564 * rbp + 8(push rbp) +
2565 * 8(return addr of origin call) +
2566 * 8(return addr of the caller)
2567 * which means: rbp + 24
2569 for (j
= 0; j
< arg_regs
; j
++) {
2570 emit_ldx(prog
, BPF_DW
, BPF_REG_0
, BPF_REG_FP
,
2571 nr_stack_slots
* 8 + 0x18);
2572 emit_stx(prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
,
2575 if (!nr_stack_slots
)
2576 first_off
= stack_size
;
2581 /* Only copy the arguments on-stack to current
2582 * 'stack_size' and ignore the regs, used to
2583 * prepare the arguments on-stack for origin call.
2585 if (for_call_origin
) {
2586 nr_regs
+= arg_regs
;
2590 /* copy the arguments from regs into stack */
2591 for (j
= 0; j
< arg_regs
; j
++) {
2592 emit_stx(prog
, BPF_DW
, BPF_REG_FP
,
2593 nr_regs
== 5 ? X86_REG_R9
: BPF_REG_1
+ nr_regs
,
2601 clean_stack_garbage(m
, prog
, nr_stack_slots
, first_off
);
2604 static void restore_regs(const struct btf_func_model
*m
, u8
**prog
,
2607 int i
, j
, arg_regs
, nr_regs
= 0;
2609 /* Restore function arguments from stack.
2610 * For a function that accepts two pointers the sequence will be:
2611 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
2612 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
2614 * The logic here is similar to what we do in save_args()
2616 for (i
= 0; i
< min_t(int, m
->nr_args
, MAX_BPF_FUNC_ARGS
); i
++) {
2617 arg_regs
= (m
->arg_size
[i
] + 7) / 8;
2618 if (nr_regs
+ arg_regs
<= 6) {
2619 for (j
= 0; j
< arg_regs
; j
++) {
2620 emit_ldx(prog
, BPF_DW
,
2621 nr_regs
== 5 ? X86_REG_R9
: BPF_REG_1
+ nr_regs
,
2628 stack_size
-= 8 * arg_regs
;
2636 static int invoke_bpf_prog(const struct btf_func_model
*m
, u8
**pprog
,
2637 struct bpf_tramp_link
*l
, int stack_size
,
2638 int run_ctx_off
, bool save_ret
,
2639 void *image
, void *rw_image
)
2643 int ctx_cookie_off
= offsetof(struct bpf_tramp_run_ctx
, bpf_cookie
);
2644 struct bpf_prog
*p
= l
->link
.prog
;
2645 u64 cookie
= l
->cookie
;
2647 /* mov rdi, cookie */
2648 emit_mov_imm64(&prog
, BPF_REG_1
, (long) cookie
>> 32, (u32
) (long) cookie
);
2650 /* Prepare struct bpf_tramp_run_ctx.
2652 * bpf_tramp_run_ctx is already preserved by
2653 * arch_prepare_bpf_trampoline().
2655 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
2657 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_1
, -run_ctx_off
+ ctx_cookie_off
);
2659 /* arg1: mov rdi, progs[i] */
2660 emit_mov_imm64(&prog
, BPF_REG_1
, (long) p
>> 32, (u32
) (long) p
);
2661 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
2662 if (!is_imm8(-run_ctx_off
))
2663 EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off
);
2665 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off
);
2667 if (emit_rsb_call(&prog
, bpf_trampoline_enter(p
), image
+ (prog
- (u8
*)rw_image
)))
2669 /* remember prog start time returned by __bpf_prog_enter */
2670 emit_mov_reg(&prog
, true, BPF_REG_6
, BPF_REG_0
);
2672 /* if (__bpf_prog_enter*(prog) == 0)
2673 * goto skip_exec_of_prog;
2675 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
2676 /* emit 2 nops that will be replaced with JE insn */
2678 emit_nops(&prog
, 2);
2680 /* arg1: lea rdi, [rbp - stack_size] */
2681 if (!is_imm8(-stack_size
))
2682 EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size
);
2684 EMIT4(0x48, 0x8D, 0x7D, -stack_size
);
2685 /* arg2: progs[i]->insnsi for interpreter */
2687 emit_mov_imm64(&prog
, BPF_REG_2
,
2688 (long) p
->insnsi
>> 32,
2689 (u32
) (long) p
->insnsi
);
2690 /* call JITed bpf program or interpreter */
2691 if (emit_rsb_call(&prog
, p
->bpf_func
, image
+ (prog
- (u8
*)rw_image
)))
2695 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
2696 * of the previous call which is then passed on the stack to
2697 * the next BPF program.
2699 * BPF_TRAMP_FENTRY trampoline may need to return the return
2700 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
2703 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
2705 /* replace 2 nops with JE insn, since jmp target is known */
2706 jmp_insn
[0] = X86_JE
;
2707 jmp_insn
[1] = prog
- jmp_insn
- 2;
2709 /* arg1: mov rdi, progs[i] */
2710 emit_mov_imm64(&prog
, BPF_REG_1
, (long) p
>> 32, (u32
) (long) p
);
2711 /* arg2: mov rsi, rbx <- start time in nsec */
2712 emit_mov_reg(&prog
, true, BPF_REG_2
, BPF_REG_6
);
2713 /* arg3: lea rdx, [rbp - run_ctx_off] */
2714 if (!is_imm8(-run_ctx_off
))
2715 EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off
);
2717 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off
);
2718 if (emit_rsb_call(&prog
, bpf_trampoline_exit(p
), image
+ (prog
- (u8
*)rw_image
)))
2725 static void emit_align(u8
**pprog
, u32 align
)
2727 u8
*target
, *prog
= *pprog
;
2729 target
= PTR_ALIGN(prog
, align
);
2731 emit_nops(&prog
, target
- prog
);
2736 static int emit_cond_near_jump(u8
**pprog
, void *func
, void *ip
, u8 jmp_cond
)
2741 offset
= func
- (ip
+ 2 + 4);
2742 if (!is_simm32(offset
)) {
2743 pr_err("Target %p is out of range\n", func
);
2746 EMIT2_off32(0x0F, jmp_cond
+ 0x10, offset
);
2751 static int invoke_bpf(const struct btf_func_model
*m
, u8
**pprog
,
2752 struct bpf_tramp_links
*tl
, int stack_size
,
2753 int run_ctx_off
, bool save_ret
,
2754 void *image
, void *rw_image
)
2759 for (i
= 0; i
< tl
->nr_links
; i
++) {
2760 if (invoke_bpf_prog(m
, &prog
, tl
->links
[i
], stack_size
,
2761 run_ctx_off
, save_ret
, image
, rw_image
))
2768 static int invoke_bpf_mod_ret(const struct btf_func_model
*m
, u8
**pprog
,
2769 struct bpf_tramp_links
*tl
, int stack_size
,
2770 int run_ctx_off
, u8
**branches
,
2771 void *image
, void *rw_image
)
2776 /* The first fmod_ret program will receive a garbage return value.
2777 * Set this to 0 to avoid confusing the program.
2779 emit_mov_imm32(&prog
, false, BPF_REG_0
, 0);
2780 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
2781 for (i
= 0; i
< tl
->nr_links
; i
++) {
2782 if (invoke_bpf_prog(m
, &prog
, tl
->links
[i
], stack_size
, run_ctx_off
, true,
2786 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2787 * if (*(u64 *)(rbp - 8) != 0)
2790 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
2791 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2793 /* Save the location of the branch and Generate 6 nops
2794 * (4 bytes for an offset and 2 bytes for the jump) These nops
2795 * are replaced with a conditional jump once do_fexit (i.e. the
2796 * start of the fexit invocation) is finalized.
2799 emit_nops(&prog
, 4 + 2);
2806 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
2807 #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \
2808 __LOAD_TCC_PTR(-round_up(stack, 8) - 8)
2811 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2812 * its 'struct btf_func_model' will be nr_args=2
2813 * The assembly code when eth_type_trans is executing after trampoline:
2817 * sub rsp, 16 // space for skb and dev
2818 * push rbx // temp regs to pass start time
2819 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
2820 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
2821 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2822 * mov rbx, rax // remember start time in bpf stats are enabled
2823 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2824 * call addr_of_jited_FENTRY_prog
2825 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2826 * mov rsi, rbx // prog start time
2827 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2828 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
2829 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
2834 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2835 * replaced with 'call generated_bpf_trampoline'. When it returns
2836 * eth_type_trans will continue executing with original skb and dev pointers.
2838 * The assembly code when eth_type_trans is called from trampoline:
2842 * sub rsp, 24 // space for skb, dev, return value
2843 * push rbx // temp regs to pass start time
2844 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
2845 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
2846 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2847 * mov rbx, rax // remember start time if bpf stats are enabled
2848 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2849 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2850 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2851 * mov rsi, rbx // prog start time
2852 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2853 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2854 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2855 * call eth_type_trans+5 // execute body of eth_type_trans
2856 * mov qword ptr [rbp - 8], rax // save return value
2857 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2858 * mov rbx, rax // remember start time in bpf stats are enabled
2859 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2860 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2861 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2862 * mov rsi, rbx // prog start time
2863 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2864 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2867 * add rsp, 8 // skip eth_type_trans's frame
2868 * ret // return to its caller
2870 static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image
*im
, void *rw_image
,
2871 void *rw_image_end
, void *image
,
2872 const struct btf_func_model
*m
, u32 flags
,
2873 struct bpf_tramp_links
*tlinks
,
2876 int i
, ret
, nr_regs
= m
->nr_args
, stack_size
= 0;
2877 int regs_off
, nregs_off
, ip_off
, run_ctx_off
, arg_stack_off
, rbx_off
;
2878 struct bpf_tramp_links
*fentry
= &tlinks
[BPF_TRAMP_FENTRY
];
2879 struct bpf_tramp_links
*fexit
= &tlinks
[BPF_TRAMP_FEXIT
];
2880 struct bpf_tramp_links
*fmod_ret
= &tlinks
[BPF_TRAMP_MODIFY_RETURN
];
2881 void *orig_call
= func_addr
;
2882 u8
**branches
= NULL
;
2887 * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is
2888 * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG
2889 * because @func_addr.
2891 WARN_ON_ONCE((flags
& BPF_TRAMP_F_INDIRECT
) &&
2892 (flags
& ~(BPF_TRAMP_F_INDIRECT
| BPF_TRAMP_F_RET_FENTRY_RET
)));
2894 /* extra registers for struct arguments */
2895 for (i
= 0; i
< m
->nr_args
; i
++) {
2896 if (m
->arg_flags
[i
] & BTF_FMODEL_STRUCT_ARG
)
2897 nr_regs
+= (m
->arg_size
[i
] + 7) / 8 - 1;
2900 /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6
2901 * are passed through regs, the remains are through stack.
2903 if (nr_regs
> MAX_BPF_FUNC_ARGS
)
2906 /* Generated trampoline stack layout:
2908 * RBP + 8 [ return address ]
2911 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2912 * BPF_TRAMP_F_RET_FENTRY_RET flags
2914 * [ reg_argN ] always
2916 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2918 * RBP - nregs_off [ regs count ] always
2920 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2922 * RBP - rbx_off [ rbx value ] always
2924 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2926 * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG
2929 * RBP - arg_stack_off [ stack_arg1 ]
2930 * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX
2933 /* room for return value of orig_call or fentry prog */
2934 save_ret
= flags
& (BPF_TRAMP_F_CALL_ORIG
| BPF_TRAMP_F_RET_FENTRY_RET
);
2938 stack_size
+= nr_regs
* 8;
2939 regs_off
= stack_size
;
2943 nregs_off
= stack_size
;
2945 if (flags
& BPF_TRAMP_F_IP_ARG
)
2946 stack_size
+= 8; /* room for IP address argument */
2948 ip_off
= stack_size
;
2951 rbx_off
= stack_size
;
2953 stack_size
+= (sizeof(struct bpf_tramp_run_ctx
) + 7) & ~0x7;
2954 run_ctx_off
= stack_size
;
2956 if (nr_regs
> 6 && (flags
& BPF_TRAMP_F_CALL_ORIG
)) {
2957 /* the space that used to pass arguments on-stack */
2958 stack_size
+= (nr_regs
- get_nr_used_regs(m
)) * 8;
2959 /* make sure the stack pointer is 16-byte aligned if we
2960 * need pass arguments on stack, which means
2961 * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)]
2962 * should be 16-byte aligned. Following code depend on
2963 * that stack_size is already 8-byte aligned.
2965 stack_size
+= (stack_size
% 16) ? 0 : 8;
2968 arg_stack_off
= stack_size
;
2970 if (flags
& BPF_TRAMP_F_SKIP_FRAME
) {
2971 /* skip patched call instruction and point orig_call to actual
2972 * body of the kernel function.
2974 if (is_endbr(*(u32
*)orig_call
))
2975 orig_call
+= ENDBR_INSN_SIZE
;
2976 orig_call
+= X86_PATCH_SIZE
;
2981 if (flags
& BPF_TRAMP_F_INDIRECT
) {
2983 * Indirect call for bpf_struct_ops
2985 emit_cfi(&prog
, cfi_get_func_hash(func_addr
));
2988 * Direct-call fentry stub, as such it needs accounting for the
2991 x86_call_depth_emit_accounting(&prog
, NULL
, image
);
2993 EMIT1(0x55); /* push rbp */
2994 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2995 if (!is_imm8(stack_size
)) {
2996 /* sub rsp, stack_size */
2997 EMIT3_off32(0x48, 0x81, 0xEC, stack_size
);
2999 /* sub rsp, stack_size */
3000 EMIT4(0x48, 0x83, 0xEC, stack_size
);
3002 if (flags
& BPF_TRAMP_F_TAIL_CALL_CTX
)
3003 EMIT1(0x50); /* push rax */
3004 /* mov QWORD PTR [rbp - rbx_off], rbx */
3005 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_6
, -rbx_off
);
3007 /* Store number of argument registers of the traced function:
3009 * mov QWORD PTR [rbp - nregs_off], rax
3011 emit_mov_imm64(&prog
, BPF_REG_0
, 0, (u32
) nr_regs
);
3012 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -nregs_off
);
3014 if (flags
& BPF_TRAMP_F_IP_ARG
) {
3015 /* Store IP address of the traced function:
3016 * movabsq rax, func_addr
3017 * mov QWORD PTR [rbp - ip_off], rax
3019 emit_mov_imm64(&prog
, BPF_REG_0
, (long) func_addr
>> 32, (u32
) (long) func_addr
);
3020 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -ip_off
);
3023 save_args(m
, &prog
, regs_off
, false);
3025 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
3026 /* arg1: mov rdi, im */
3027 emit_mov_imm64(&prog
, BPF_REG_1
, (long) im
>> 32, (u32
) (long) im
);
3028 if (emit_rsb_call(&prog
, __bpf_tramp_enter
,
3029 image
+ (prog
- (u8
*)rw_image
))) {
3035 if (fentry
->nr_links
) {
3036 if (invoke_bpf(m
, &prog
, fentry
, regs_off
, run_ctx_off
,
3037 flags
& BPF_TRAMP_F_RET_FENTRY_RET
, image
, rw_image
))
3041 if (fmod_ret
->nr_links
) {
3042 branches
= kcalloc(fmod_ret
->nr_links
, sizeof(u8
*),
3047 if (invoke_bpf_mod_ret(m
, &prog
, fmod_ret
, regs_off
,
3048 run_ctx_off
, branches
, image
, rw_image
)) {
3054 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
3055 restore_regs(m
, &prog
, regs_off
);
3056 save_args(m
, &prog
, arg_stack_off
, true);
3058 if (flags
& BPF_TRAMP_F_TAIL_CALL_CTX
) {
3059 /* Before calling the original function, load the
3060 * tail_call_cnt_ptr from stack to rax.
3062 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size
);
3065 if (flags
& BPF_TRAMP_F_ORIG_STACK
) {
3066 emit_ldx(&prog
, BPF_DW
, BPF_REG_6
, BPF_REG_FP
, 8);
3067 EMIT2(0xff, 0xd3); /* call *rbx */
3069 /* call original function */
3070 if (emit_rsb_call(&prog
, orig_call
, image
+ (prog
- (u8
*)rw_image
))) {
3075 /* remember return value in a stack for bpf prog to access */
3076 emit_stx(&prog
, BPF_DW
, BPF_REG_FP
, BPF_REG_0
, -8);
3077 im
->ip_after_call
= image
+ (prog
- (u8
*)rw_image
);
3078 emit_nops(&prog
, X86_PATCH_SIZE
);
3081 if (fmod_ret
->nr_links
) {
3082 /* From Intel 64 and IA-32 Architectures Optimization
3083 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3084 * Coding Rule 11: All branch targets should be 16-byte
3087 emit_align(&prog
, 16);
3088 /* Update the branches saved in invoke_bpf_mod_ret with the
3089 * aligned address of do_fexit.
3091 for (i
= 0; i
< fmod_ret
->nr_links
; i
++) {
3092 emit_cond_near_jump(&branches
[i
], image
+ (prog
- (u8
*)rw_image
),
3093 image
+ (branches
[i
] - (u8
*)rw_image
), X86_JNE
);
3097 if (fexit
->nr_links
) {
3098 if (invoke_bpf(m
, &prog
, fexit
, regs_off
, run_ctx_off
,
3099 false, image
, rw_image
)) {
3105 if (flags
& BPF_TRAMP_F_RESTORE_REGS
)
3106 restore_regs(m
, &prog
, regs_off
);
3108 /* This needs to be done regardless. If there were fmod_ret programs,
3109 * the return value is only updated on the stack and still needs to be
3112 if (flags
& BPF_TRAMP_F_CALL_ORIG
) {
3113 im
->ip_epilogue
= image
+ (prog
- (u8
*)rw_image
);
3114 /* arg1: mov rdi, im */
3115 emit_mov_imm64(&prog
, BPF_REG_1
, (long) im
>> 32, (u32
) (long) im
);
3116 if (emit_rsb_call(&prog
, __bpf_tramp_exit
, image
+ (prog
- (u8
*)rw_image
))) {
3120 } else if (flags
& BPF_TRAMP_F_TAIL_CALL_CTX
) {
3121 /* Before running the original function, load the
3122 * tail_call_cnt_ptr from stack to rax.
3124 LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size
);
3127 /* restore return value of orig_call or fentry prog back into RAX */
3129 emit_ldx(&prog
, BPF_DW
, BPF_REG_0
, BPF_REG_FP
, -8);
3131 emit_ldx(&prog
, BPF_DW
, BPF_REG_6
, BPF_REG_FP
, -rbx_off
);
3132 EMIT1(0xC9); /* leave */
3133 if (flags
& BPF_TRAMP_F_SKIP_FRAME
) {
3134 /* skip our return address and return to parent */
3135 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
3137 emit_return(&prog
, image
+ (prog
- (u8
*)rw_image
));
3138 /* Make sure the trampoline generation logic doesn't overflow */
3139 if (WARN_ON_ONCE(prog
> (u8
*)rw_image_end
- BPF_INSN_SAFETY
)) {
3143 ret
= prog
- (u8
*)rw_image
+ BPF_INSN_SAFETY
;
3150 void *arch_alloc_bpf_trampoline(unsigned int size
)
3152 return bpf_prog_pack_alloc(size
, jit_fill_hole
);
3155 void arch_free_bpf_trampoline(void *image
, unsigned int size
)
3157 bpf_prog_pack_free(image
, size
);
3160 int arch_protect_bpf_trampoline(void *image
, unsigned int size
)
3165 int arch_prepare_bpf_trampoline(struct bpf_tramp_image
*im
, void *image
, void *image_end
,
3166 const struct btf_func_model
*m
, u32 flags
,
3167 struct bpf_tramp_links
*tlinks
,
3170 void *rw_image
, *tmp
;
3172 u32 size
= image_end
- image
;
3174 /* rw_image doesn't need to be in module memory range, so we can
3177 rw_image
= kvmalloc(size
, GFP_KERNEL
);
3181 ret
= __arch_prepare_bpf_trampoline(im
, rw_image
, rw_image
+ size
, image
, m
,
3182 flags
, tlinks
, func_addr
);
3186 tmp
= bpf_arch_text_copy(image
, rw_image
, size
);
3194 int arch_bpf_trampoline_size(const struct btf_func_model
*m
, u32 flags
,
3195 struct bpf_tramp_links
*tlinks
, void *func_addr
)
3197 struct bpf_tramp_image im
;
3201 /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
3202 * This will NOT cause fragmentation in direct map, as we do not
3203 * call set_memory_*() on this buffer.
3205 * We cannot use kvmalloc here, because we need image to be in
3206 * module memory range.
3208 image
= bpf_jit_alloc_exec(PAGE_SIZE
);
3212 ret
= __arch_prepare_bpf_trampoline(&im
, image
, image
+ PAGE_SIZE
, image
,
3213 m
, flags
, tlinks
, func_addr
);
3214 bpf_jit_free_exec(image
);
3218 static int emit_bpf_dispatcher(u8
**pprog
, int a
, int b
, s64
*progs
, u8
*image
, u8
*buf
)
3220 u8
*jg_reloc
, *prog
= *pprog
;
3221 int pivot
, err
, jg_bytes
= 1;
3225 /* Leaf node of recursion, i.e. not a range of indices
3228 EMIT1(add_1mod(0x48, BPF_REG_3
)); /* cmp rdx,func */
3229 if (!is_simm32(progs
[a
]))
3231 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3
),
3233 err
= emit_cond_near_jump(&prog
, /* je func */
3234 (void *)progs
[a
], image
+ (prog
- buf
),
3239 emit_indirect_jump(&prog
, 2 /* rdx */, image
+ (prog
- buf
));
3245 /* Not a leaf node, so we pivot, and recursively descend into
3246 * the lower and upper ranges.
3248 pivot
= (b
- a
) / 2;
3249 EMIT1(add_1mod(0x48, BPF_REG_3
)); /* cmp rdx,func */
3250 if (!is_simm32(progs
[a
+ pivot
]))
3252 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3
), progs
[a
+ pivot
]);
3254 if (pivot
> 2) { /* jg upper_part */
3255 /* Require near jump. */
3257 EMIT2_off32(0x0F, X86_JG
+ 0x10, 0);
3263 err
= emit_bpf_dispatcher(&prog
, a
, a
+ pivot
, /* emit lower_part */
3268 /* From Intel 64 and IA-32 Architectures Optimization
3269 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
3270 * Coding Rule 11: All branch targets should be 16-byte
3273 emit_align(&prog
, 16);
3274 jg_offset
= prog
- jg_reloc
;
3275 emit_code(jg_reloc
- jg_bytes
, jg_offset
, jg_bytes
);
3277 err
= emit_bpf_dispatcher(&prog
, a
+ pivot
+ 1, /* emit upper_part */
3278 b
, progs
, image
, buf
);
3286 static int cmp_ips(const void *a
, const void *b
)
3298 int arch_prepare_bpf_dispatcher(void *image
, void *buf
, s64
*funcs
, int num_funcs
)
3302 sort(funcs
, num_funcs
, sizeof(funcs
[0]), cmp_ips
, NULL
);
3303 return emit_bpf_dispatcher(&prog
, 0, num_funcs
- 1, funcs
, image
, buf
);
3306 struct x64_jit_data
{
3307 struct bpf_binary_header
*rw_header
;
3308 struct bpf_binary_header
*header
;
3312 struct jit_context ctx
;
3315 #define MAX_PASSES 20
3316 #define PADDING_PASSES (MAX_PASSES - 5)
3318 struct bpf_prog
*bpf_int_jit_compile(struct bpf_prog
*prog
)
3320 struct bpf_binary_header
*rw_header
= NULL
;
3321 struct bpf_binary_header
*header
= NULL
;
3322 struct bpf_prog
*tmp
, *orig_prog
= prog
;
3323 struct x64_jit_data
*jit_data
;
3324 int proglen
, oldproglen
= 0;
3325 struct jit_context ctx
= {};
3326 bool tmp_blinded
= false;
3327 bool extra_pass
= false;
3328 bool padding
= false;
3329 u8
*rw_image
= NULL
;
3335 if (!prog
->jit_requested
)
3338 tmp
= bpf_jit_blind_constants(prog
);
3340 * If blinding was requested and we failed during blinding,
3341 * we must fall back to the interpreter.
3350 jit_data
= prog
->aux
->jit_data
;
3352 jit_data
= kzalloc(sizeof(*jit_data
), GFP_KERNEL
);
3357 prog
->aux
->jit_data
= jit_data
;
3359 addrs
= jit_data
->addrs
;
3361 ctx
= jit_data
->ctx
;
3362 oldproglen
= jit_data
->proglen
;
3363 image
= jit_data
->image
;
3364 header
= jit_data
->header
;
3365 rw_header
= jit_data
->rw_header
;
3366 rw_image
= (void *)rw_header
+ ((void *)image
- (void *)header
);
3369 goto skip_init_addrs
;
3371 addrs
= kvmalloc_array(prog
->len
+ 1, sizeof(*addrs
), GFP_KERNEL
);
3378 * Before first pass, make a rough estimation of addrs[]
3379 * each BPF instruction is translated to less than 64 bytes
3381 for (proglen
= 0, i
= 0; i
<= prog
->len
; i
++) {
3385 ctx
.cleanup_addr
= proglen
;
3389 * JITed image shrinks with every pass and the loop iterates
3390 * until the image stops shrinking. Very large BPF programs
3391 * may converge on the last pass. In such case do one more
3392 * pass to emit the final image.
3394 for (pass
= 0; pass
< MAX_PASSES
|| image
; pass
++) {
3395 if (!padding
&& pass
>= PADDING_PASSES
)
3397 proglen
= do_jit(prog
, addrs
, image
, rw_image
, oldproglen
, &ctx
, padding
);
3402 bpf_arch_text_copy(&header
->size
, &rw_header
->size
,
3403 sizeof(rw_header
->size
));
3404 bpf_jit_binary_pack_free(header
, rw_header
);
3406 /* Fall back to interpreter mode */
3409 prog
->bpf_func
= NULL
;
3411 prog
->jited_len
= 0;
3416 if (proglen
!= oldproglen
) {
3417 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
3418 proglen
, oldproglen
);
3423 if (proglen
== oldproglen
) {
3425 * The number of entries in extable is the number of BPF_LDX
3426 * insns that access kernel memory via "pointer to BTF type".
3427 * The verifier changed their opcode from LDX|MEM|size
3428 * to LDX|PROBE_MEM|size to make JITing easier.
3430 u32 align
= __alignof__(struct exception_table_entry
);
3431 u32 extable_size
= prog
->aux
->num_exentries
*
3432 sizeof(struct exception_table_entry
);
3434 /* allocate module memory for x86 insns and extable */
3435 header
= bpf_jit_binary_pack_alloc(roundup(proglen
, align
) + extable_size
,
3436 &image
, align
, &rw_header
, &rw_image
,
3442 prog
->aux
->extable
= (void *) image
+ roundup(proglen
, align
);
3444 oldproglen
= proglen
;
3448 if (bpf_jit_enable
> 1)
3449 bpf_jit_dump(prog
->len
, proglen
, pass
+ 1, rw_image
);
3452 if (!prog
->is_func
|| extra_pass
) {
3454 * bpf_jit_binary_pack_finalize fails in two scenarios:
3455 * 1) header is not pointing to proper module memory;
3456 * 2) the arch doesn't support bpf_arch_text_copy().
3458 * Both cases are serious bugs and justify WARN_ON.
3460 if (WARN_ON(bpf_jit_binary_pack_finalize(header
, rw_header
))) {
3461 /* header has been freed */
3466 bpf_tail_call_direct_fixup(prog
);
3468 jit_data
->addrs
= addrs
;
3469 jit_data
->ctx
= ctx
;
3470 jit_data
->proglen
= proglen
;
3471 jit_data
->image
= image
;
3472 jit_data
->header
= header
;
3473 jit_data
->rw_header
= rw_header
;
3476 * ctx.prog_offset is used when CFI preambles put code *before*
3477 * the function. See emit_cfi(). For FineIBT specifically this code
3478 * can also be executed and bpf_prog_kallsyms_add() will
3479 * generate an additional symbol to cover this, hence also
3480 * decrement proglen.
3482 prog
->bpf_func
= (void *)image
+ cfi_get_offset();
3484 prog
->jited_len
= proglen
- cfi_get_offset();
3489 if (!image
|| !prog
->is_func
|| extra_pass
) {
3491 bpf_prog_fill_jited_linfo(prog
, addrs
+ 1);
3495 prog
->aux
->jit_data
= NULL
;
3499 bpf_jit_prog_release_other(prog
, prog
== orig_prog
?
3504 bool bpf_jit_supports_kfunc_call(void)
3509 void *bpf_arch_text_copy(void *dst
, void *src
, size_t len
)
3511 if (text_poke_copy(dst
, src
, len
) == NULL
)
3512 return ERR_PTR(-EINVAL
);
3516 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
3517 bool bpf_jit_supports_subprog_tailcalls(void)
3522 bool bpf_jit_supports_percpu_insn(void)
3527 void bpf_jit_free(struct bpf_prog
*prog
)
3530 struct x64_jit_data
*jit_data
= prog
->aux
->jit_data
;
3531 struct bpf_binary_header
*hdr
;
3534 * If we fail the final pass of JIT (from jit_subprogs),
3535 * the program may not be finalized yet. Call finalize here
3536 * before freeing it.
3539 bpf_jit_binary_pack_finalize(jit_data
->header
,
3540 jit_data
->rw_header
);
3541 kvfree(jit_data
->addrs
);
3544 prog
->bpf_func
= (void *)prog
->bpf_func
- cfi_get_offset();
3545 hdr
= bpf_jit_binary_pack_hdr(prog
);
3546 bpf_jit_binary_pack_free(hdr
, NULL
);
3547 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog
));
3550 bpf_prog_unlock_free(prog
);
3553 bool bpf_jit_supports_exceptions(void)
3555 /* We unwind through both kernel frames (starting from within bpf_throw
3556 * call) and BPF frames. Therefore we require ORC unwinder to be enabled
3557 * to walk kernel frames and reach BPF frames in the stack trace.
3559 return IS_ENABLED(CONFIG_UNWINDER_ORC
);
3562 void arch_bpf_stack_walk(bool (*consume_fn
)(void *cookie
, u64 ip
, u64 sp
, u64 bp
), void *cookie
)
3564 #if defined(CONFIG_UNWINDER_ORC)
3565 struct unwind_state state
;
3568 for (unwind_start(&state
, current
, NULL
, NULL
); !unwind_done(&state
);
3569 unwind_next_frame(&state
)) {
3570 addr
= unwind_get_return_address(&state
);
3571 if (!addr
|| !consume_fn(cookie
, (u64
)addr
, (u64
)state
.sp
, (u64
)state
.bp
))
3576 WARN(1, "verification of programs using bpf_throw should have failed\n");
3579 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor
*poke
,
3580 struct bpf_prog
*new, struct bpf_prog
*old
)
3582 u8
*old_addr
, *new_addr
, *old_bypass_addr
;
3585 old_bypass_addr
= old
? NULL
: poke
->bypass_addr
;
3586 old_addr
= old
? (u8
*)old
->bpf_func
+ poke
->adj_off
: NULL
;
3587 new_addr
= new ? (u8
*)new->bpf_func
+ poke
->adj_off
: NULL
;
3590 * On program loading or teardown, the program's kallsym entry
3591 * might not be in place, so we use __bpf_arch_text_poke to skip
3592 * the kallsyms check.
3595 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,
3597 old_addr
, new_addr
);
3600 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
3607 ret
= __bpf_arch_text_poke(poke
->tailcall_bypass
,
3612 /* let other CPUs finish the execution of program
3613 * so that it will not possible to expose them
3614 * to invalid nop, stack unwind, nop state
3618 ret
= __bpf_arch_text_poke(poke
->tailcall_target
,
3625 bool bpf_jit_supports_arena(void)
3630 bool bpf_jit_supports_insn(struct bpf_insn
*insn
, bool in_arena
)
3634 switch (insn
->code
) {
3635 case BPF_STX
| BPF_ATOMIC
| BPF_W
:
3636 case BPF_STX
| BPF_ATOMIC
| BPF_DW
:
3637 if (insn
->imm
== (BPF_AND
| BPF_FETCH
) ||
3638 insn
->imm
== (BPF_OR
| BPF_FETCH
) ||
3639 insn
->imm
== (BPF_XOR
| BPF_FETCH
))
3645 bool bpf_jit_supports_ptr_xchg(void)
3650 /* x86-64 JIT emits its own code to filter user addresses so return 0 here */
3651 u64
bpf_arch_uaddress_limit(void)