1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Kernel Probes Jump Optimization (Optprobes)
5 * Copyright (C) IBM Corporation, 2002, 2004
6 * Copyright (C) Hitachi Ltd., 2012
8 #include <linux/kprobes.h>
9 #include <linux/perf_event.h>
10 #include <linux/ptrace.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/hardirq.h>
14 #include <linux/preempt.h>
15 #include <linux/extable.h>
16 #include <linux/kdebug.h>
17 #include <linux/kallsyms.h>
18 #include <linux/kgdb.h>
19 #include <linux/ftrace.h>
20 #include <linux/objtool.h>
21 #include <linux/pgtable.h>
22 #include <linux/static_call.h>
24 #include <asm/text-patching.h>
25 #include <asm/cacheflush.h>
27 #include <linux/uaccess.h>
28 #include <asm/alternative.h>
30 #include <asm/debugreg.h>
31 #include <asm/set_memory.h>
32 #include <asm/sections.h>
33 #include <asm/nospec-branch.h>
37 unsigned long __recover_optprobed_insn(kprobe_opcode_t
*buf
, unsigned long addr
)
39 struct optimized_kprobe
*op
;
44 for (i
= 0; i
< JMP32_INSN_SIZE
; i
++) {
45 kp
= get_kprobe((void *)addr
- i
);
46 /* This function only handles jump-optimized kprobe */
47 if (kp
&& kprobe_optimized(kp
)) {
48 op
= container_of(kp
, struct optimized_kprobe
, kp
);
49 /* If op is optimized or under unoptimizing */
50 if (list_empty(&op
->list
) || optprobe_queued_unopt(op
))
58 * If the kprobe can be optimized, original bytes which can be
59 * overwritten by jump destination address. In this case, original
60 * bytes must be recovered from op->optinsn.copied_insn buffer.
62 if (copy_from_kernel_nofault(buf
, (void *)addr
,
63 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
)))
66 if (addr
== (unsigned long)kp
->addr
) {
68 memcpy(buf
+ 1, op
->optinsn
.copied_insn
, DISP32_SIZE
);
70 offs
= addr
- (unsigned long)kp
->addr
- 1;
71 memcpy(buf
, op
->optinsn
.copied_insn
+ offs
, DISP32_SIZE
- offs
);
74 return (unsigned long)buf
;
77 static void synthesize_clac(kprobe_opcode_t
*addr
)
80 * Can't be static_cpu_has() due to how objtool treats this feature bit.
81 * This isn't a fast path anyway.
83 if (!boot_cpu_has(X86_FEATURE_SMAP
))
86 /* Replace the NOP3 with CLAC */
92 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
93 static void synthesize_set_arg1(kprobe_opcode_t
*addr
, unsigned long val
)
101 *(unsigned long *)addr
= val
;
105 ".pushsection .rodata\n"
106 "optprobe_template_func:\n"
107 ".global optprobe_template_entry\n"
108 "optprobe_template_entry:\n"
110 " pushq $" __stringify(__KERNEL_DS
) "\n"
111 /* Save the 'sp - 8', this will be fixed later. */
114 ".global optprobe_template_clac\n"
115 "optprobe_template_clac:\n"
119 ".global optprobe_template_val\n"
120 "optprobe_template_val:\n"
123 ".global optprobe_template_call\n"
124 "optprobe_template_call:\n"
126 /* Copy 'regs->flags' into 'regs->ss'. */
127 " movq 18*8(%rsp), %rdx\n"
128 " movq %rdx, 20*8(%rsp)\n"
130 /* Skip 'regs->flags' and 'regs->sp'. */
132 /* And pop flags register from 'regs->ss'. */
134 #else /* CONFIG_X86_32 */
136 /* Save the 'sp - 4', this will be fixed later. */
139 ".global optprobe_template_clac\n"
140 "optprobe_template_clac:\n"
144 ".global optprobe_template_val\n"
145 "optprobe_template_val:\n"
147 ".global optprobe_template_call\n"
148 "optprobe_template_call:\n"
150 /* Copy 'regs->flags' into 'regs->ss'. */
151 " movl 14*4(%esp), %edx\n"
152 " movl %edx, 16*4(%esp)\n"
154 /* Skip 'regs->flags' and 'regs->sp'. */
156 /* And pop flags register from 'regs->ss'. */
159 ".global optprobe_template_end\n"
160 "optprobe_template_end:\n"
163 void optprobe_template_func(void);
164 STACK_FRAME_NON_STANDARD(optprobe_template_func
);
166 #define TMPL_CLAC_IDX \
167 ((long)optprobe_template_clac - (long)optprobe_template_entry)
168 #define TMPL_MOVE_IDX \
169 ((long)optprobe_template_val - (long)optprobe_template_entry)
170 #define TMPL_CALL_IDX \
171 ((long)optprobe_template_call - (long)optprobe_template_entry)
172 #define TMPL_END_IDX \
173 ((long)optprobe_template_end - (long)optprobe_template_entry)
175 /* Optimized kprobe call back function: called from optinsn */
177 optimized_callback(struct optimized_kprobe
*op
, struct pt_regs
*regs
)
179 /* This is possible if op is under delayed unoptimizing */
180 if (kprobe_disabled(&op
->kp
))
184 if (kprobe_running()) {
185 kprobes_inc_nmissed_count(&op
->kp
);
187 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
188 /* Adjust stack pointer */
189 regs
->sp
+= sizeof(long);
190 /* Save skipped registers */
191 regs
->cs
= __KERNEL_CS
;
195 regs
->ip
= (unsigned long)op
->kp
.addr
+ INT3_INSN_SIZE
;
196 regs
->orig_ax
= ~0UL;
198 __this_cpu_write(current_kprobe
, &op
->kp
);
199 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
200 opt_pre_handler(&op
->kp
, regs
);
201 __this_cpu_write(current_kprobe
, NULL
);
205 NOKPROBE_SYMBOL(optimized_callback
);
207 static int copy_optimized_instructions(u8
*dest
, u8
*src
, u8
*real
)
212 while (len
< JMP32_INSN_SIZE
) {
213 ret
= __copy_instruction(dest
+ len
, src
+ len
, real
+ len
, &insn
);
214 if (!ret
|| !can_boost(&insn
, src
+ len
))
218 /* Check whether the address range is reserved */
219 if (ftrace_text_reserved(src
, src
+ len
- 1) ||
220 alternatives_text_reserved(src
, src
+ len
- 1) ||
221 jump_label_text_reserved(src
, src
+ len
- 1) ||
222 static_call_text_reserved(src
, src
+ len
- 1))
228 /* Check whether insn is indirect jump */
229 static int insn_is_indirect_jump(struct insn
*insn
)
231 return ((insn
->opcode
.bytes
[0] == 0xff &&
232 (X86_MODRM_REG(insn
->modrm
.value
) & 6) == 4) || /* Jump */
233 insn
->opcode
.bytes
[0] == 0xea); /* Segment based jump */
236 /* Check whether insn jumps into specified address range */
237 static int insn_jump_into_range(struct insn
*insn
, unsigned long start
, int len
)
239 unsigned long target
= 0;
241 switch (insn
->opcode
.bytes
[0]) {
242 case 0xe0: /* loopne */
243 case 0xe1: /* loope */
244 case 0xe2: /* loop */
245 case 0xe3: /* jcxz */
246 case 0xe9: /* near relative jump */
247 case 0xeb: /* short relative jump */
250 if ((insn
->opcode
.bytes
[1] & 0xf0) == 0x80) /* jcc near */
254 if ((insn
->opcode
.bytes
[0] & 0xf0) == 0x70) /* jcc short */
258 target
= (unsigned long)insn
->next_byte
+ insn
->immediate
.value
;
260 return (start
<= target
&& target
<= start
+ len
);
263 /* Decode whole function to ensure any instructions don't jump into target */
264 static int can_optimize(unsigned long paddr
)
266 unsigned long addr
, size
= 0, offset
= 0;
268 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
270 /* Lookup symbol including addr */
271 if (!kallsyms_lookup_size_offset(paddr
, &size
, &offset
))
275 * Do not optimize in the entry code due to the unstable
276 * stack handling and registers setup.
278 if (((paddr
>= (unsigned long)__entry_text_start
) &&
279 (paddr
< (unsigned long)__entry_text_end
)))
282 /* Check there is enough space for a relative jump. */
283 if (size
- offset
< JMP32_INSN_SIZE
)
286 /* Decode instructions */
287 addr
= paddr
- offset
;
288 while (addr
< paddr
- offset
+ size
) { /* Decode until function end */
289 unsigned long recovered_insn
;
292 if (search_exception_tables(addr
))
294 * Since some fixup code will jumps into this function,
295 * we can't optimize kprobe in this function.
298 recovered_insn
= recover_probed_instruction(buf
, addr
);
302 ret
= insn_decode_kernel(&insn
, (void *)recovered_insn
);
307 * If there is a dynamically installed kgdb sw breakpoint,
308 * this function should not be probed.
310 if (insn
.opcode
.bytes
[0] == INT3_INSN_OPCODE
&&
311 kgdb_has_hit_break(addr
))
314 /* Recover address */
315 insn
.kaddr
= (void *)addr
;
316 insn
.next_byte
= (void *)(addr
+ insn
.length
);
318 * Check any instructions don't jump into target, indirectly or
321 * The indirect case is present to handle a code with jump
322 * tables. When the kernel uses retpolines, the check should in
323 * theory additionally look for jumps to indirect thunks.
324 * However, the kernel built with retpolines or IBT has jump
325 * tables disabled so the check can be skipped altogether.
327 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
) &&
328 !IS_ENABLED(CONFIG_X86_KERNEL_IBT
) &&
329 insn_is_indirect_jump(&insn
))
331 if (insn_jump_into_range(&insn
, paddr
+ INT3_INSN_SIZE
,
340 /* Check optimized_kprobe can actually be optimized. */
341 int arch_check_optimized_kprobe(struct optimized_kprobe
*op
)
346 for (i
= 1; i
< op
->optinsn
.size
; i
++) {
347 p
= get_kprobe(op
->kp
.addr
+ i
);
348 if (p
&& !kprobe_disarmed(p
))
355 /* Check the addr is within the optimized instructions. */
356 int arch_within_optimized_kprobe(struct optimized_kprobe
*op
,
357 kprobe_opcode_t
*addr
)
359 return (op
->kp
.addr
<= addr
&&
360 op
->kp
.addr
+ op
->optinsn
.size
> addr
);
363 /* Free optimized instruction slot */
365 void __arch_remove_optimized_kprobe(struct optimized_kprobe
*op
, int dirty
)
367 u8
*slot
= op
->optinsn
.insn
;
369 int len
= TMPL_END_IDX
+ op
->optinsn
.size
+ JMP32_INSN_SIZE
;
371 /* Record the perf event before freeing the slot */
373 perf_event_text_poke(slot
, slot
, len
, NULL
, 0);
375 free_optinsn_slot(slot
, dirty
);
376 op
->optinsn
.insn
= NULL
;
377 op
->optinsn
.size
= 0;
381 void arch_remove_optimized_kprobe(struct optimized_kprobe
*op
)
383 __arch_remove_optimized_kprobe(op
, 1);
387 * Copy replacing target instructions
388 * Target instructions MUST be relocatable (checked inside)
389 * This is called when new aggr(opt)probe is allocated or reused.
391 int arch_prepare_optimized_kprobe(struct optimized_kprobe
*op
,
392 struct kprobe
*__unused
)
394 u8
*buf
= NULL
, *slot
;
398 if (!can_optimize((unsigned long)op
->kp
.addr
))
401 buf
= kzalloc(MAX_OPTINSN_SIZE
, GFP_KERNEL
);
405 op
->optinsn
.insn
= slot
= get_optinsn_slot();
412 * Verify if the address gap is in 2GB range, because this uses
415 rel
= (long)slot
- (long)op
->kp
.addr
+ JMP32_INSN_SIZE
;
416 if (abs(rel
) > 0x7fffffff) {
421 /* Copy arch-dep-instance from template */
422 memcpy(buf
, optprobe_template_entry
, TMPL_END_IDX
);
424 /* Copy instructions into the out-of-line buffer */
425 ret
= copy_optimized_instructions(buf
+ TMPL_END_IDX
, op
->kp
.addr
,
426 slot
+ TMPL_END_IDX
);
429 op
->optinsn
.size
= ret
;
430 len
= TMPL_END_IDX
+ op
->optinsn
.size
;
432 synthesize_clac(buf
+ TMPL_CLAC_IDX
);
434 /* Set probe information */
435 synthesize_set_arg1(buf
+ TMPL_MOVE_IDX
, (unsigned long)op
);
437 /* Set probe function call */
438 synthesize_relcall(buf
+ TMPL_CALL_IDX
,
439 slot
+ TMPL_CALL_IDX
, optimized_callback
);
441 /* Set returning jmp instruction at the tail of out-of-line buffer */
442 synthesize_reljump(buf
+ len
, slot
+ len
,
443 (u8
*)op
->kp
.addr
+ op
->optinsn
.size
);
444 len
+= JMP32_INSN_SIZE
;
447 * Note len = TMPL_END_IDX + op->optinsn.size + JMP32_INSN_SIZE is also
448 * used in __arch_remove_optimized_kprobe().
451 /* We have to use text_poke() for instruction buffer because it is RO */
452 perf_event_text_poke(slot
, NULL
, 0, buf
, len
);
453 text_poke(slot
, buf
, len
);
461 __arch_remove_optimized_kprobe(op
, 0);
466 * Replace breakpoints (INT3) with relative jumps (JMP.d32).
467 * Caller must call with locking kprobe_mutex and text_mutex.
469 * The caller will have installed a regular kprobe and after that issued
470 * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
471 * the 4 bytes after the INT3 are unused and can now be overwritten.
473 void arch_optimize_kprobes(struct list_head
*oplist
)
475 struct optimized_kprobe
*op
, *tmp
;
476 u8 insn_buff
[JMP32_INSN_SIZE
];
478 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
479 s32 rel
= (s32
)((long)op
->optinsn
.insn
-
480 ((long)op
->kp
.addr
+ JMP32_INSN_SIZE
));
482 WARN_ON(kprobe_disabled(&op
->kp
));
484 /* Backup instructions which will be replaced by jump address */
485 memcpy(op
->optinsn
.copied_insn
, op
->kp
.addr
+ INT3_INSN_SIZE
,
488 insn_buff
[0] = JMP32_INSN_OPCODE
;
489 *(s32
*)(&insn_buff
[1]) = rel
;
491 text_poke_bp(op
->kp
.addr
, insn_buff
, JMP32_INSN_SIZE
, NULL
);
493 list_del_init(&op
->list
);
498 * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
500 * After that, we can restore the 4 bytes after the INT3 to undo what
501 * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
502 * unused once the INT3 lands.
504 void arch_unoptimize_kprobe(struct optimized_kprobe
*op
)
506 u8
new[JMP32_INSN_SIZE
] = { INT3_INSN_OPCODE
, };
507 u8 old
[JMP32_INSN_SIZE
];
508 u8
*addr
= op
->kp
.addr
;
510 memcpy(old
, op
->kp
.addr
, JMP32_INSN_SIZE
);
511 memcpy(new + INT3_INSN_SIZE
,
512 op
->optinsn
.copied_insn
,
513 JMP32_INSN_SIZE
- INT3_INSN_SIZE
);
515 text_poke(addr
, new, INT3_INSN_SIZE
);
517 text_poke(addr
+ INT3_INSN_SIZE
,
518 new + INT3_INSN_SIZE
,
519 JMP32_INSN_SIZE
- INT3_INSN_SIZE
);
522 perf_event_text_poke(op
->kp
.addr
, old
, JMP32_INSN_SIZE
, new, JMP32_INSN_SIZE
);
526 * Recover original instructions and breakpoints from relative jumps.
527 * Caller must call with locking kprobe_mutex.
529 extern void arch_unoptimize_kprobes(struct list_head
*oplist
,
530 struct list_head
*done_list
)
532 struct optimized_kprobe
*op
, *tmp
;
534 list_for_each_entry_safe(op
, tmp
, oplist
, list
) {
535 arch_unoptimize_kprobe(op
);
536 list_move(&op
->list
, done_list
);
540 int setup_detour_execution(struct kprobe
*p
, struct pt_regs
*regs
, int reenter
)
542 struct optimized_kprobe
*op
;
544 if (p
->flags
& KPROBE_FLAG_OPTIMIZED
) {
545 /* This kprobe is really able to run optimized path. */
546 op
= container_of(p
, struct optimized_kprobe
, kp
);
547 /* Detour through copied instructions */
548 regs
->ip
= (unsigned long)op
->optinsn
.insn
+ TMPL_END_IDX
;
550 reset_current_kprobe();
555 NOKPROBE_SYMBOL(setup_detour_execution
);