1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes Jump Optimization (Optprobes)
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/ftrace.h>
19 #include <linux/objtool.h>
20 #include <linux/pgtable.h>
21 #include <linux/static_call.h>
23 #include <asm/text-patching.h>
24 #include <asm/cacheflush.h>
26 #include <linux/uaccess.h>
27 #include <asm/alternative.h>
29 #include <asm/debugreg.h>
30 #include <asm/set_memory.h>
31 #include <asm/sections.h>
32 #include <asm/nospec-branch.h>
36 unsigned long __recover_optprobed_insn(kprobe_opcode_t
*buf
, unsigned long addr
)
38 struct optimized_kprobe
*op
;
43 for (i
= 0; i
< JMP32_INSN_SIZE
; i
++) {
44 kp
= get_kprobe((void *)addr
- i
);
45 /* This function only handles jump-optimized kprobe */
46 if (kp
&& kprobe_optimized(kp
)) {
47 op
= container_of(kp
, struct optimized_kprobe
, kp
);
48 /* If op->list is not empty, op is under optimizing */
49 if (list_empty(&op
->list
))
57 * If the kprobe can be optimized, original bytes which can be
58 * overwritten by jump destination address. In this case, original
59 * bytes must be recovered from op->optinsn.copied_insn buffer.
61 if (copy_from_kernel_nofault(buf
, (void *)addr
,
62 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
)))
65 if (addr
== (unsigned long)kp
->addr
) {
67 memcpy(buf
+ 1, op
->optinsn
.copied_insn
, DISP32_SIZE
);
69 offs
= addr
- (unsigned long)kp
->addr
- 1;
70 memcpy(buf
, op
->optinsn
.copied_insn
+ offs
, DISP32_SIZE
- offs
);
73 return (unsigned long)buf
;
76 static void synthesize_clac(kprobe_opcode_t
*addr
)
79 * Can't be static_cpu_has() due to how objtool treats this feature bit.
80 * This isn't a fast path anyway.
82 if (!boot_cpu_has(X86_FEATURE_SMAP
))
85 /* Replace the NOP3 with CLAC */
91 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
92 static void synthesize_set_arg1(kprobe_opcode_t
*addr
, unsigned long val
)
100 *(unsigned long *)addr
= val
;
104 ".pushsection .rodata\n"
105 "optprobe_template_func:\n"
106 ".global optprobe_template_entry\n"
107 "optprobe_template_entry:\n"
109 /* We don't bother saving the ss register */
112 ".global optprobe_template_clac\n"
113 "optprobe_template_clac:\n"
117 ".global optprobe_template_val\n"
118 "optprobe_template_val:\n"
121 ".global optprobe_template_call\n"
122 "optprobe_template_call:\n"
124 /* Move flags to rsp */
125 " movq 18*8(%rsp), %rdx\n"
126 " movq %rdx, 19*8(%rsp)\n"
128 /* Skip flags entry */
131 #else /* CONFIG_X86_32 */
134 ".global optprobe_template_clac\n"
135 "optprobe_template_clac:\n"
139 ".global optprobe_template_val\n"
140 "optprobe_template_val:\n"
142 ".global optprobe_template_call\n"
143 "optprobe_template_call:\n"
145 /* Move flags into esp */
146 " movl 14*4(%esp), %edx\n"
147 " movl %edx, 15*4(%esp)\n"
149 /* Skip flags entry */
153 ".global optprobe_template_end\n"
154 "optprobe_template_end:\n"
157 void optprobe_template_func(void);
158 STACK_FRAME_NON_STANDARD(optprobe_template_func
);
160 #define TMPL_CLAC_IDX \
161 ((long)optprobe_template_clac - (long)optprobe_template_entry)
162 #define TMPL_MOVE_IDX \
163 ((long)optprobe_template_val - (long)optprobe_template_entry)
164 #define TMPL_CALL_IDX \
165 ((long)optprobe_template_call - (long)optprobe_template_entry)
166 #define TMPL_END_IDX \
167 ((long)optprobe_template_end - (long)optprobe_template_entry)
169 /* Optimized kprobe call back function: called from optinsn */
171 optimized_callback(struct optimized_kprobe
*op
, struct pt_regs
*regs
)
173 /* This is possible if op is under delayed unoptimizing */
174 if (kprobe_disabled(&op
->kp
))
178 if (kprobe_running()) {
179 kprobes_inc_nmissed_count(&op
->kp
);
181 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
182 /* Save skipped registers */
183 regs
->cs
= __KERNEL_CS
;
187 regs
->ip
= (unsigned long)op
->kp
.addr
+ INT3_INSN_SIZE
;
188 regs
->orig_ax
= ~0UL;
190 __this_cpu_write(current_kprobe
, &op
->kp
);
191 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
192 opt_pre_handler(&op
->kp
, regs
);
193 __this_cpu_write(current_kprobe
, NULL
);
197 NOKPROBE_SYMBOL(optimized_callback
);
199 static int copy_optimized_instructions(u8
*dest
, u8
*src
, u8
*real
)
204 while (len
< JMP32_INSN_SIZE
) {
205 ret
= __copy_instruction(dest
+ len
, src
+ len
, real
+ len
, &insn
);
206 if (!ret
|| !can_boost(&insn
, src
+ len
))
210 /* Check whether the address range is reserved */
211 if (ftrace_text_reserved(src
, src
+ len
- 1) ||
212 alternatives_text_reserved(src
, src
+ len
- 1) ||
213 jump_label_text_reserved(src
, src
+ len
- 1) ||
214 static_call_text_reserved(src
, src
+ len
- 1))
220 /* Check whether insn is indirect jump */
221 static int __insn_is_indirect_jump(struct insn
*insn
)
223 return ((insn
->opcode
.bytes
[0] == 0xff &&
224 (X86_MODRM_REG(insn
->modrm
.value
) & 6) == 4) || /* Jump */
225 insn
->opcode
.bytes
[0] == 0xea); /* Segment based jump */
228 /* Check whether insn jumps into specified address range */
229 static int insn_jump_into_range(struct insn
*insn
, unsigned long start
, int len
)
231 unsigned long target
= 0;
233 switch (insn
->opcode
.bytes
[0]) {
234 case 0xe0: /* loopne */
235 case 0xe1: /* loope */
236 case 0xe2: /* loop */
237 case 0xe3: /* jcxz */
238 case 0xe9: /* near relative jump */
239 case 0xeb: /* short relative jump */
242 if ((insn
->opcode
.bytes
[1] & 0xf0) == 0x80) /* jcc near */
246 if ((insn
->opcode
.bytes
[0] & 0xf0) == 0x70) /* jcc short */
250 target
= (unsigned long)insn
->next_byte
+ insn
->immediate
.value
;
252 return (start
<= target
&& target
<= start
+ len
);
255 static int insn_is_indirect_jump(struct insn
*insn
)
257 int ret
= __insn_is_indirect_jump(insn
);
259 #ifdef CONFIG_RETPOLINE
261 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
262 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
263 * older gcc may use indirect jump. So we add this check instead of
264 * replace indirect-jump check.
267 ret
= insn_jump_into_range(insn
,
268 (unsigned long)__indirect_thunk_start
,
269 (unsigned long)__indirect_thunk_end
-
270 (unsigned long)__indirect_thunk_start
);
275 static bool is_padding_int3(unsigned long addr
, unsigned long eaddr
)
279 for (; addr
< eaddr
; addr
++) {
280 if (get_kernel_nofault(ops
, (void *)addr
) < 0 ||
281 ops
!= INT3_INSN_OPCODE
)
288 /* Decode whole function to ensure any instructions don't jump into target */
289 static int can_optimize(unsigned long paddr
)
291 unsigned long addr
, size
= 0, offset
= 0;
293 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
295 /* Lookup symbol including addr */
296 if (!kallsyms_lookup_size_offset(paddr
, &size
, &offset
))
300 * Do not optimize in the entry code due to the unstable
301 * stack handling and registers setup.
303 if (((paddr
>= (unsigned long)__entry_text_start
) &&
304 (paddr
< (unsigned long)__entry_text_end
)))
307 /* Check there is enough space for a relative jump. */
308 if (size
- offset
< JMP32_INSN_SIZE
)
311 /* Decode instructions */
312 addr
= paddr
- offset
;
313 while (addr
< paddr
- offset
+ size
) { /* Decode until function end */
314 unsigned long recovered_insn
;
315 if (search_exception_tables(addr
))
317 * Since some fixup code will jumps into this function,
318 * we can't optimize kprobe in this function.
321 recovered_insn
= recover_probed_instruction(buf
, addr
);
324 kernel_insn_init(&insn
, (void *)recovered_insn
, MAX_INSN_SIZE
);
325 insn_get_length(&insn
);
327 * In the case of detecting unknown breakpoint, this could be
328 * a padding INT3 between functions. Let's check that all the
329 * rest of the bytes are also INT3.
331 if (insn
.opcode
.bytes
[0] == INT3_INSN_OPCODE
)
332 return is_padding_int3(addr
, paddr
- offset
+ size
) ? 1 : 0;
334 /* Recover address */
335 insn
.kaddr
= (void *)addr
;
336 insn
.next_byte
= (void *)(addr
+ insn
.length
);
337 /* Check any instructions don't jump into target */
338 if (insn_is_indirect_jump(&insn
) ||
339 insn_jump_into_range(&insn
, paddr
+ INT3_INSN_SIZE
,
348 /* Check optimized_kprobe can actually be optimized. */
349 int arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
354 for (i
= 1; i
< op
->optinsn
.size
; i
++) {
355 p
= get_kprobe(op
->kp
.addr
+ i
);
356 if (p
&& !kprobe_disabled(p
))
363 /* Check the addr is within the optimized instructions. */
364 int arch_within_optimized_kprobe(struct optimized_kprobe
*op
,
367 return ((unsigned long)op
->kp
.addr
<= addr
&&
368 (unsigned long)op
->kp
.addr
+ op
->optinsn
.size
> addr
);
371 /* Free optimized instruction slot */
373 void __arch_remove_optimized_kprobe(struct optimized_kprobe
*op
, int dirty
)
375 u8
*slot
= op
->optinsn
.insn
;
377 int len
= TMPL_END_IDX
+ op
->optinsn
.size
+ JMP32_INSN_SIZE
;
379 /* Record the perf event before freeing the slot */
381 perf_event_text_poke(slot
, slot
, len
, NULL
, 0);
383 free_optinsn_slot(slot
, dirty
);
384 op
->optinsn
.insn
= NULL
;
385 op
->optinsn
.size
= 0;
389 void arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
391 __arch_remove_optimized_kprobe(op
, 1);
395 * Copy replacing target instructions
396 * Target instructions MUST be relocatable (checked inside)
397 * This is called when new aggr(opt)probe is allocated or reused.
399 int arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
,
400 struct kprobe
*__unused
)
402 u8
*buf
= NULL
, *slot
;
406 if (!can_optimize((unsigned long)op
->kp
.addr
))
409 buf
= kzalloc(MAX_OPTINSN_SIZE
, GFP_KERNEL
);
413 op
->optinsn
.insn
= slot
= get_optinsn_slot();
420 * Verify if the address gap is in 2GB range, because this uses
423 rel
= (long)slot
- (long)op
->kp
.addr
+ JMP32_INSN_SIZE
;
424 if (abs(rel
) > 0x7fffffff) {
429 /* Copy arch-dep-instance from template */
430 memcpy(buf
, optprobe_template_entry
, TMPL_END_IDX
);
432 /* Copy instructions into the out-of-line buffer */
433 ret
= copy_optimized_instructions(buf
+ TMPL_END_IDX
, op
->kp
.addr
,
434 slot
+ TMPL_END_IDX
);
437 op
->optinsn
.size
= ret
;
438 len
= TMPL_END_IDX
+ op
->optinsn
.size
;
440 synthesize_clac(buf
+ TMPL_CLAC_IDX
);
442 /* Set probe information */
443 synthesize_set_arg1(buf
+ TMPL_MOVE_IDX
, (unsigned long)op
);
445 /* Set probe function call */
446 synthesize_relcall(buf
+ TMPL_CALL_IDX
,
447 slot
+ TMPL_CALL_IDX
, optimized_callback
);
449 /* Set returning jmp instruction at the tail of out-of-line buffer */
450 synthesize_reljump(buf
+ len
, slot
+ len
,
451 (u8
*)op
->kp
.addr
+ op
->optinsn
.size
);
452 len
+= JMP32_INSN_SIZE
;
455 * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
456 * used in __arch_remove_optimized_kprobe().
459 /* We have to use text_poke() for instruction buffer because it is RO */
460 perf_event_text_poke(slot
, NULL
, 0, buf
, len
);
461 text_poke(slot
, buf
, len
);
469 __arch_remove_optimized_kprobe(op
, 0);
474 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
475 * Caller must call with locking kprobe_mutex and text_mutex.
477 * The caller will have installed a regular kprobe and after that issued
478 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
479 * the 4 bytes after the INT3 are unused and can now be overwritten.
481 void arch_optimize_kprobes(struct list_head
*oplist
)
483 struct optimized_kprobe
*op
, *tmp
;
484 u8 insn_buff
[JMP32_INSN_SIZE
];
486 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
487 s32 rel
= (s32
)((long)op
->optinsn
.insn
-
488 ((long)op
->kp
.addr
+ JMP32_INSN_SIZE
));
490 WARN_ON(kprobe_disabled(&op
->kp
));
492 /* Backup instructions which will be replaced by jump address */
493 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
+ INT3_INSN_SIZE
,
496 insn_buff
[0] = JMP32_INSN_OPCODE
;
497 *(s32
*)(&insn_buff
[1]) = rel
;
499 text_poke_bp(op
->kp
.addr
, insn_buff
, JMP32_INSN_SIZE
, NULL
);
501 list_del_init(&op
->list
);
506 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
508 * After that, we can restore the 4 bytes after the INT3 to undo what
509 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
510 * unused once the INT3 lands.
512 void arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
514 u8
new[JMP32_INSN_SIZE
] = { INT3_INSN_OPCODE
, };
515 u8 old
[JMP32_INSN_SIZE
];
516 u8
*addr
= op
->kp
.addr
;
518 memcpy(old
, op
->kp
.addr
, JMP32_INSN_SIZE
);
519 memcpy(new + INT3_INSN_SIZE
,
520 op
->optinsn
.copied_insn
,
521 JMP32_INSN_SIZE
- INT3_INSN_SIZE
);
523 text_poke(addr
, new, INT3_INSN_SIZE
);
525 text_poke(addr
+ INT3_INSN_SIZE
,
526 new + INT3_INSN_SIZE
,
527 JMP32_INSN_SIZE
- INT3_INSN_SIZE
);
530 perf_event_text_poke(op
->kp
.addr
, old
, JMP32_INSN_SIZE
, new, JMP32_INSN_SIZE
);
534 * Recover original instructions and breakpoints from relative jumps.
535 * Caller must call with locking kprobe_mutex.
537 extern void arch_unoptimize_kprobes(struct list_head
*oplist
,
538 struct list_head
*done_list
)
540 struct optimized_kprobe
*op
, *tmp
;
542 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
543 arch_unoptimize_kprobe(op
);
544 list_move(&op
->list
, done_list
);
548 int setup_detour_execution(struct kprobe
*p
, struct pt_regs
*regs
, int reenter
)
550 struct optimized_kprobe
*op
;
552 if (p
->flags
& KPROBE_FLAG_OPTIMIZED
) {
553 /* This kprobe is really able to run optimized path. */
554 op
= container_of(p
, struct optimized_kprobe
, kp
);
555 /* Detour through copied instructions */
556 regs
->ip
= (unsigned long)op
->optinsn
.insn
+ TMPL_END_IDX
;
558 reset_current_kprobe();
563 NOKPROBE_SYMBOL(setup_detour_execution
);