2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
42 #include <linux/kprobes.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/slab.h>
46 #include <linux/hardirq.h>
47 #include <linux/preempt.h>
48 #include <linux/sched/debug.h>
49 #include <linux/extable.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
53 #include <linux/frame.h>
54 #include <linux/kasan.h>
55 #include <linux/moduleloader.h>
57 #include <asm/text-patching.h>
58 #include <asm/cacheflush.h>
60 #include <asm/pgtable.h>
61 #include <linux/uaccess.h>
62 #include <asm/alternative.h>
64 #include <asm/debugreg.h>
65 #include <asm/set_memory.h>
69 void jprobe_return_end(void);
71 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
72 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
74 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
76 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
77 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
78 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
79 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
80 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
83 * Undefined/reserved opcodes, conditional jump, Opcode Extension
84 * Groups, and some special opcodes can not boost.
85 * This is non-const and volatile to keep gcc from statically
86 * optimizing it out, as variable_test_bit makes gcc think only
87 * *(unsigned long*) is used.
89 static volatile u32 twobyte_is_boostable
[256 / 32] = {
90 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
91 /* ---------------------------------------------- */
92 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
93 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
94 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
95 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
96 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
97 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
98 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
99 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
100 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
101 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
102 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
103 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
104 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
105 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
106 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
107 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
108 /* ----------------------------------------------- */
109 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
113 struct kretprobe_blackpoint kretprobe_blacklist
[] = {
114 {"__switch_to", }, /* This function switches only current task, but
115 doesn't switch kernel stack.*/
116 {NULL
, NULL
} /* Terminator */
119 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
121 static nokprobe_inline
void
122 __synthesize_relative_insn(void *dest
, void *from
, void *to
, u8 op
)
124 struct __arch_relative_insn
{
129 insn
= (struct __arch_relative_insn
*)dest
;
130 insn
->raddr
= (s32
)((long)(to
) - ((long)(from
) + 5));
134 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
135 void synthesize_reljump(void *dest
, void *from
, void *to
)
137 __synthesize_relative_insn(dest
, from
, to
, RELATIVEJUMP_OPCODE
);
139 NOKPROBE_SYMBOL(synthesize_reljump
);
141 /* Insert a call instruction at address 'from', which calls address 'to'.*/
142 void synthesize_relcall(void *dest
, void *from
, void *to
)
144 __synthesize_relative_insn(dest
, from
, to
, RELATIVECALL_OPCODE
);
146 NOKPROBE_SYMBOL(synthesize_relcall
);
149 * Skip the prefixes of the instruction.
151 static kprobe_opcode_t
*skip_prefixes(kprobe_opcode_t
*insn
)
155 attr
= inat_get_opcode_attribute((insn_byte_t
)*insn
);
156 while (inat_is_legacy_prefix(attr
)) {
158 attr
= inat_get_opcode_attribute((insn_byte_t
)*insn
);
161 if (inat_is_rex_prefix(attr
))
166 NOKPROBE_SYMBOL(skip_prefixes
);
169 * Returns non-zero if INSN is boostable.
170 * RIP relative instructions are adjusted at copying time in 64 bits mode
172 int can_boost(struct insn
*insn
, void *addr
)
174 kprobe_opcode_t opcode
;
176 if (search_exception_tables((unsigned long)addr
))
177 return 0; /* Page fault may occur on this address. */
179 /* 2nd-byte opcode */
180 if (insn
->opcode
.nbytes
== 2)
181 return test_bit(insn
->opcode
.bytes
[1],
182 (unsigned long *)twobyte_is_boostable
);
184 if (insn
->opcode
.nbytes
!= 1)
187 /* Can't boost Address-size override prefix */
188 if (unlikely(inat_is_address_size_prefix(insn
->attr
)))
191 opcode
= insn
->opcode
.bytes
[0];
193 switch (opcode
& 0xf0) {
195 /* can't boost "bound" */
196 return (opcode
!= 0x62);
198 return 0; /* can't boost conditional jump */
200 return opcode
!= 0x9a; /* can't boost call far */
202 /* can't boost software-interruptions */
203 return (0xc1 < opcode
&& opcode
< 0xcc) || opcode
== 0xcf;
205 /* can boost AA* and XLAT */
206 return (opcode
== 0xd4 || opcode
== 0xd5 || opcode
== 0xd7);
208 /* can boost in/out and absolute jmps */
209 return ((opcode
& 0x04) || opcode
== 0xea);
211 /* clear and set flags are boostable */
212 return (opcode
== 0xf5 || (0xf7 < opcode
&& opcode
< 0xfe));
214 /* CS override prefix and call are not boostable */
215 return (opcode
!= 0x2e && opcode
!= 0x9a);
220 __recover_probed_insn(kprobe_opcode_t
*buf
, unsigned long addr
)
225 kp
= get_kprobe((void *)addr
);
226 faddr
= ftrace_location(addr
);
228 * Addresses inside the ftrace location are refused by
229 * arch_check_ftrace_location(). Something went terribly wrong
230 * if such an address is checked here.
232 if (WARN_ON(faddr
&& faddr
!= addr
))
235 * Use the current code if it is not modified by Kprobe
236 * and it cannot be modified by ftrace.
242 * Basically, kp->ainsn.insn has an original instruction.
243 * However, RIP-relative instruction can not do single-stepping
244 * at different place, __copy_instruction() tweaks the displacement of
245 * that instruction. In that case, we can't recover the instruction
246 * from the kp->ainsn.insn.
248 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
249 * of the first byte of the probed instruction, which is overwritten
250 * by int3. And the instruction at kp->addr is not modified by kprobes
251 * except for the first byte, we can recover the original instruction
252 * from it and kp->opcode.
254 * In case of Kprobes using ftrace, we do not have a copy of
255 * the original instruction. In fact, the ftrace location might
256 * be modified at anytime and even could be in an inconsistent state.
257 * Fortunately, we know that the original code is the ideal 5-byte
260 if (probe_kernel_read(buf
, (void *)addr
,
261 MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
)))
265 memcpy(buf
, ideal_nops
[NOP_ATOMIC5
], 5);
268 return (unsigned long)buf
;
272 * Recover the probed instruction at addr for further analysis.
273 * Caller must lock kprobes by kprobe_mutex, or disable preemption
274 * for preventing to release referencing kprobes.
275 * Returns zero if the instruction can not get recovered (or access failed).
277 unsigned long recover_probed_instruction(kprobe_opcode_t
*buf
, unsigned long addr
)
279 unsigned long __addr
;
281 __addr
= __recover_optprobed_insn(buf
, addr
);
285 return __recover_probed_insn(buf
, addr
);
288 /* Check if paddr is at an instruction boundary */
289 static int can_probe(unsigned long paddr
)
291 unsigned long addr
, __addr
, offset
= 0;
293 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
295 if (!kallsyms_lookup_size_offset(paddr
, NULL
, &offset
))
298 /* Decode instructions */
299 addr
= paddr
- offset
;
300 while (addr
< paddr
) {
302 * Check if the instruction has been modified by another
303 * kprobe, in which case we replace the breakpoint by the
304 * original instruction in our buffer.
305 * Also, jump optimization will change the breakpoint to
306 * relative-jump. Since the relative-jump itself is
307 * normally used, we just go through if there is no kprobe.
309 __addr
= recover_probed_instruction(buf
, addr
);
312 kernel_insn_init(&insn
, (void *)__addr
, MAX_INSN_SIZE
);
313 insn_get_length(&insn
);
316 * Another debugging subsystem might insert this breakpoint.
317 * In that case, we can't recover it.
319 if (insn
.opcode
.bytes
[0] == BREAKPOINT_INSTRUCTION
)
324 return (addr
== paddr
);
328 * Returns non-zero if opcode modifies the interrupt flag.
330 static int is_IF_modifier(kprobe_opcode_t
*insn
)
333 insn
= skip_prefixes(insn
);
338 case 0xcf: /* iret/iretd */
339 case 0x9d: /* popf/popfd */
347 * Copy an instruction with recovering modified instruction by kprobes
348 * and adjust the displacement if the instruction uses the %rip-relative
349 * addressing mode. Note that since @real will be the final place of copied
350 * instruction, displacement must be adjust by @real, not @dest.
351 * This returns the length of copied instruction, or 0 if it has an error.
353 int __copy_instruction(u8
*dest
, u8
*src
, u8
*real
, struct insn
*insn
)
355 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
356 unsigned long recovered_insn
=
357 recover_probed_instruction(buf
, (unsigned long)src
);
359 if (!recovered_insn
|| !insn
)
362 /* This can access kernel text if given address is not recovered */
363 if (probe_kernel_read(dest
, (void *)recovered_insn
, MAX_INSN_SIZE
))
366 kernel_insn_init(insn
, dest
, MAX_INSN_SIZE
);
367 insn_get_length(insn
);
369 /* Another subsystem puts a breakpoint, failed to recover */
370 if (insn
->opcode
.bytes
[0] == BREAKPOINT_INSTRUCTION
)
374 /* Only x86_64 has RIP relative instructions */
375 if (insn_rip_relative(insn
)) {
379 * The copied instruction uses the %rip-relative addressing
380 * mode. Adjust the displacement for the difference between
381 * the original location of this instruction and the location
382 * of the copy that will actually be run. The tricky bit here
383 * is making sure that the sign extension happens correctly in
384 * this calculation, since we need a signed 32-bit result to
385 * be sign-extended to 64 bits when it's added to the %rip
386 * value and yield the same 64-bit result that the sign-
387 * extension of the original signed 32-bit displacement would
390 newdisp
= (u8
*) src
+ (s64
) insn
->displacement
.value
392 if ((s64
) (s32
) newdisp
!= newdisp
) {
393 pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp
);
394 pr_err("\tSrc: %p, Dest: %p, old disp: %x\n",
395 src
, real
, insn
->displacement
.value
);
398 disp
= (u8
*) dest
+ insn_offset_displacement(insn
);
399 *(s32
*) disp
= (s32
) newdisp
;
405 /* Prepare reljump right after instruction to boost */
406 static int prepare_boost(kprobe_opcode_t
*buf
, struct kprobe
*p
,
409 int len
= insn
->length
;
411 if (can_boost(insn
, p
->addr
) &&
412 MAX_INSN_SIZE
- len
>= RELATIVEJUMP_SIZE
) {
414 * These instructions can be executed directly if it
415 * jumps back to correct address.
417 synthesize_reljump(buf
+ len
, p
->ainsn
.insn
+ len
,
418 p
->addr
+ insn
->length
);
419 len
+= RELATIVEJUMP_SIZE
;
420 p
->ainsn
.boostable
= true;
422 p
->ainsn
.boostable
= false;
428 /* Make page to RO mode when allocate it */
429 void *alloc_insn_page(void)
433 page
= module_alloc(PAGE_SIZE
);
435 set_memory_ro((unsigned long)page
& PAGE_MASK
, 1);
440 /* Recover page to RW mode before releasing it */
441 void free_insn_page(void *page
)
443 set_memory_nx((unsigned long)page
& PAGE_MASK
, 1);
444 set_memory_rw((unsigned long)page
& PAGE_MASK
, 1);
445 module_memfree(page
);
448 static int arch_copy_kprobe(struct kprobe
*p
)
451 kprobe_opcode_t buf
[MAX_INSN_SIZE
];
454 /* Copy an instruction with recovering if other optprobe modifies it.*/
455 len
= __copy_instruction(buf
, p
->addr
, p
->ainsn
.insn
, &insn
);
460 * __copy_instruction can modify the displacement of the instruction,
461 * but it doesn't affect boostable check.
463 len
= prepare_boost(buf
, p
, &insn
);
465 /* Check whether the instruction modifies Interrupt Flag or not */
466 p
->ainsn
.if_modifier
= is_IF_modifier(buf
);
468 /* Also, displacement change doesn't affect the first byte */
471 /* OK, write back the instruction(s) into ROX insn buffer */
472 text_poke(p
->ainsn
.insn
, buf
, len
);
477 int arch_prepare_kprobe(struct kprobe
*p
)
481 if (alternatives_text_reserved(p
->addr
, p
->addr
))
484 if (!can_probe((unsigned long)p
->addr
))
486 /* insn: must be on special executable page on x86. */
487 p
->ainsn
.insn
= get_insn_slot();
491 ret
= arch_copy_kprobe(p
);
493 free_insn_slot(p
->ainsn
.insn
, 0);
494 p
->ainsn
.insn
= NULL
;
500 void arch_arm_kprobe(struct kprobe
*p
)
502 text_poke(p
->addr
, ((unsigned char []){BREAKPOINT_INSTRUCTION
}), 1);
505 void arch_disarm_kprobe(struct kprobe
*p
)
507 text_poke(p
->addr
, &p
->opcode
, 1);
510 void arch_remove_kprobe(struct kprobe
*p
)
513 free_insn_slot(p
->ainsn
.insn
, p
->ainsn
.boostable
);
514 p
->ainsn
.insn
= NULL
;
518 static nokprobe_inline
void
519 save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
521 kcb
->prev_kprobe
.kp
= kprobe_running();
522 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
523 kcb
->prev_kprobe
.old_flags
= kcb
->kprobe_old_flags
;
524 kcb
->prev_kprobe
.saved_flags
= kcb
->kprobe_saved_flags
;
527 static nokprobe_inline
void
528 restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
530 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
531 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
532 kcb
->kprobe_old_flags
= kcb
->prev_kprobe
.old_flags
;
533 kcb
->kprobe_saved_flags
= kcb
->prev_kprobe
.saved_flags
;
536 static nokprobe_inline
void
537 set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
538 struct kprobe_ctlblk
*kcb
)
540 __this_cpu_write(current_kprobe
, p
);
541 kcb
->kprobe_saved_flags
= kcb
->kprobe_old_flags
542 = (regs
->flags
& (X86_EFLAGS_TF
| X86_EFLAGS_IF
));
543 if (p
->ainsn
.if_modifier
)
544 kcb
->kprobe_saved_flags
&= ~X86_EFLAGS_IF
;
547 static nokprobe_inline
void clear_btf(void)
549 if (test_thread_flag(TIF_BLOCKSTEP
)) {
550 unsigned long debugctl
= get_debugctlmsr();
552 debugctl
&= ~DEBUGCTLMSR_BTF
;
553 update_debugctlmsr(debugctl
);
557 static nokprobe_inline
void restore_btf(void)
559 if (test_thread_flag(TIF_BLOCKSTEP
)) {
560 unsigned long debugctl
= get_debugctlmsr();
562 debugctl
|= DEBUGCTLMSR_BTF
;
563 update_debugctlmsr(debugctl
);
567 void arch_prepare_kretprobe(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
569 unsigned long *sara
= stack_addr(regs
);
571 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
573 /* Replace the return addr with trampoline addr */
574 *sara
= (unsigned long) &kretprobe_trampoline
;
576 NOKPROBE_SYMBOL(arch_prepare_kretprobe
);
578 static void setup_singlestep(struct kprobe
*p
, struct pt_regs
*regs
,
579 struct kprobe_ctlblk
*kcb
, int reenter
)
581 if (setup_detour_execution(p
, regs
, reenter
))
584 #if !defined(CONFIG_PREEMPT)
585 if (p
->ainsn
.boostable
&& !p
->post_handler
) {
586 /* Boost up -- we can execute copied instructions directly */
588 reset_current_kprobe();
590 * Reentering boosted probe doesn't reset current_kprobe,
591 * nor set current_kprobe, because it doesn't use single
594 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
595 preempt_enable_no_resched();
600 save_previous_kprobe(kcb
);
601 set_current_kprobe(p
, regs
, kcb
);
602 kcb
->kprobe_status
= KPROBE_REENTER
;
604 kcb
->kprobe_status
= KPROBE_HIT_SS
;
605 /* Prepare real single stepping */
607 regs
->flags
|= X86_EFLAGS_TF
;
608 regs
->flags
&= ~X86_EFLAGS_IF
;
609 /* single step inline if the instruction is an int3 */
610 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
611 regs
->ip
= (unsigned long)p
->addr
;
613 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
615 NOKPROBE_SYMBOL(setup_singlestep
);
618 * We have reentered the kprobe_handler(), since another probe was hit while
619 * within the handler. We save the original kprobes variables and just single
620 * step on the instruction of the new probe without calling any user handlers.
622 static int reenter_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
623 struct kprobe_ctlblk
*kcb
)
625 switch (kcb
->kprobe_status
) {
626 case KPROBE_HIT_SSDONE
:
627 case KPROBE_HIT_ACTIVE
:
629 kprobes_inc_nmissed_count(p
);
630 setup_singlestep(p
, regs
, kcb
, 1);
633 /* A probe has been hit in the codepath leading up to, or just
634 * after, single-stepping of a probed instruction. This entire
635 * codepath should strictly reside in .kprobes.text section.
636 * Raise a BUG or we'll continue in an endless reentering loop
637 * and eventually a stack overflow.
639 printk(KERN_WARNING
"Unrecoverable kprobe detected at %p.\n",
644 /* impossible cases */
651 NOKPROBE_SYMBOL(reenter_kprobe
);
654 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
655 * remain disabled throughout this function.
657 int kprobe_int3_handler(struct pt_regs
*regs
)
659 kprobe_opcode_t
*addr
;
661 struct kprobe_ctlblk
*kcb
;
666 addr
= (kprobe_opcode_t
*)(regs
->ip
- sizeof(kprobe_opcode_t
));
668 * We don't want to be preempted for the entire
669 * duration of kprobe processing. We conditionally
670 * re-enable preemption at the end of this function,
671 * and also in reenter_kprobe() and setup_singlestep().
675 kcb
= get_kprobe_ctlblk();
676 p
= get_kprobe(addr
);
679 if (kprobe_running()) {
680 if (reenter_kprobe(p
, regs
, kcb
))
683 set_current_kprobe(p
, regs
, kcb
);
684 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
687 * If we have no pre-handler or it returned 0, we
688 * continue with normal processing. If we have a
689 * pre-handler and it returned non-zero, it prepped
690 * for calling the break_handler below on re-entry
691 * for jprobe processing, so get out doing nothing
694 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
))
695 setup_singlestep(p
, regs
, kcb
, 0);
698 } else if (*addr
!= BREAKPOINT_INSTRUCTION
) {
700 * The breakpoint instruction was removed right
701 * after we hit it. Another cpu has removed
702 * either a probepoint or a debugger breakpoint
703 * at this address. In either case, no further
704 * handling of this interrupt is appropriate.
705 * Back up over the (now missing) int3 and run
706 * the original instruction.
708 regs
->ip
= (unsigned long)addr
;
709 preempt_enable_no_resched();
711 } else if (kprobe_running()) {
712 p
= __this_cpu_read(current_kprobe
);
713 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
714 if (!skip_singlestep(p
, regs
, kcb
))
715 setup_singlestep(p
, regs
, kcb
, 0);
718 } /* else: not a kprobe fault; let the kernel handle it */
720 preempt_enable_no_resched();
723 NOKPROBE_SYMBOL(kprobe_int3_handler
);
726 * When a retprobed function returns, this code saves registers and
727 * calls trampoline_handler() runs, which calls the kretprobe's handler.
730 ".global kretprobe_trampoline\n"
731 ".type kretprobe_trampoline, @function\n"
732 "kretprobe_trampoline:\n"
734 /* We don't bother saving the ss register */
739 " call trampoline_handler\n"
740 /* Replace saved sp with true return address. */
741 " movq %rax, 152(%rsp)\n"
748 " call trampoline_handler\n"
749 /* Move flags to cs */
750 " movl 56(%esp), %edx\n"
751 " movl %edx, 52(%esp)\n"
752 /* Replace saved flags with true return address. */
753 " movl %eax, 56(%esp)\n"
758 ".size kretprobe_trampoline, .-kretprobe_trampoline\n"
760 NOKPROBE_SYMBOL(kretprobe_trampoline
);
761 STACK_FRAME_NON_STANDARD(kretprobe_trampoline
);
764 * Called from kretprobe_trampoline
766 __visible __used
void *trampoline_handler(struct pt_regs
*regs
)
768 struct kretprobe_instance
*ri
= NULL
;
769 struct hlist_head
*head
, empty_rp
;
770 struct hlist_node
*tmp
;
771 unsigned long flags
, orig_ret_address
= 0;
772 unsigned long trampoline_address
= (unsigned long)&kretprobe_trampoline
;
773 kprobe_opcode_t
*correct_ret_addr
= NULL
;
775 INIT_HLIST_HEAD(&empty_rp
);
776 kretprobe_hash_lock(current
, &head
, &flags
);
777 /* fixup registers */
779 regs
->cs
= __KERNEL_CS
;
781 regs
->cs
= __KERNEL_CS
| get_kernel_rpl();
784 regs
->ip
= trampoline_address
;
785 regs
->orig_ax
= ~0UL;
788 * It is possible to have multiple instances associated with a given
789 * task either because multiple functions in the call path have
790 * return probes installed on them, and/or more than one
791 * return probe was registered for a target function.
793 * We can handle this because:
794 * - instances are always pushed into the head of the list
795 * - when multiple return probes are registered for the same
796 * function, the (chronologically) first instance's ret_addr
797 * will be the real return address, and all the rest will
798 * point to kretprobe_trampoline.
800 hlist_for_each_entry(ri
, head
, hlist
) {
801 if (ri
->task
!= current
)
802 /* another task is sharing our hash bucket */
805 orig_ret_address
= (unsigned long)ri
->ret_addr
;
807 if (orig_ret_address
!= trampoline_address
)
809 * This is the real return address. Any other
810 * instances associated with this task are for
811 * other calls deeper on the call stack
816 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
818 correct_ret_addr
= ri
->ret_addr
;
819 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
820 if (ri
->task
!= current
)
821 /* another task is sharing our hash bucket */
824 orig_ret_address
= (unsigned long)ri
->ret_addr
;
825 if (ri
->rp
&& ri
->rp
->handler
) {
826 __this_cpu_write(current_kprobe
, &ri
->rp
->kp
);
827 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
828 ri
->ret_addr
= correct_ret_addr
;
829 ri
->rp
->handler(ri
, regs
);
830 __this_cpu_write(current_kprobe
, NULL
);
833 recycle_rp_inst(ri
, &empty_rp
);
835 if (orig_ret_address
!= trampoline_address
)
837 * This is the real return address. Any other
838 * instances associated with this task are for
839 * other calls deeper on the call stack
844 kretprobe_hash_unlock(current
, &flags
);
846 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
847 hlist_del(&ri
->hlist
);
850 return (void *)orig_ret_address
;
852 NOKPROBE_SYMBOL(trampoline_handler
);
855 * Called after single-stepping. p->addr is the address of the
856 * instruction whose first byte has been replaced by the "int 3"
857 * instruction. To avoid the SMP problems that can occur when we
858 * temporarily put back the original opcode to single-step, we
859 * single-stepped a copy of the instruction. The address of this
860 * copy is p->ainsn.insn.
862 * This function prepares to return from the post-single-step
863 * interrupt. We have to fix up the stack as follows:
865 * 0) Except in the case of absolute or indirect jump or call instructions,
866 * the new ip is relative to the copied instruction. We need to make
867 * it relative to the original instruction.
869 * 1) If the single-stepped instruction was pushfl, then the TF and IF
870 * flags are set in the just-pushed flags, and may need to be cleared.
872 * 2) If the single-stepped instruction was a call, the return address
873 * that is atop the stack is the address following the copied instruction.
874 * We need to make it the address following the original instruction.
876 * If this is the first time we've single-stepped the instruction at
877 * this probepoint, and the instruction is boostable, boost it: add a
878 * jump instruction after the copied instruction, that jumps to the next
879 * instruction after the probepoint.
881 static void resume_execution(struct kprobe
*p
, struct pt_regs
*regs
,
882 struct kprobe_ctlblk
*kcb
)
884 unsigned long *tos
= stack_addr(regs
);
885 unsigned long copy_ip
= (unsigned long)p
->ainsn
.insn
;
886 unsigned long orig_ip
= (unsigned long)p
->addr
;
887 kprobe_opcode_t
*insn
= p
->ainsn
.insn
;
890 insn
= skip_prefixes(insn
);
892 regs
->flags
&= ~X86_EFLAGS_TF
;
894 case 0x9c: /* pushfl */
895 *tos
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_IF
);
896 *tos
|= kcb
->kprobe_old_flags
;
898 case 0xc2: /* iret/ret/lret */
903 case 0xea: /* jmp absolute -- ip is correct */
904 /* ip is already adjusted, no more changes required */
905 p
->ainsn
.boostable
= true;
907 case 0xe8: /* call relative - Fix return addr */
908 *tos
= orig_ip
+ (*tos
- copy_ip
);
911 case 0x9a: /* call absolute -- same as call absolute, indirect */
912 *tos
= orig_ip
+ (*tos
- copy_ip
);
916 if ((insn
[1] & 0x30) == 0x10) {
918 * call absolute, indirect
919 * Fix return addr; ip is correct.
920 * But this is not boostable
922 *tos
= orig_ip
+ (*tos
- copy_ip
);
924 } else if (((insn
[1] & 0x31) == 0x20) ||
925 ((insn
[1] & 0x31) == 0x21)) {
927 * jmp near and far, absolute indirect
928 * ip is correct. And this is boostable
930 p
->ainsn
.boostable
= true;
937 regs
->ip
+= orig_ip
- copy_ip
;
942 NOKPROBE_SYMBOL(resume_execution
);
945 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
946 * remain disabled throughout this function.
948 int kprobe_debug_handler(struct pt_regs
*regs
)
950 struct kprobe
*cur
= kprobe_running();
951 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
956 resume_execution(cur
, regs
, kcb
);
957 regs
->flags
|= kcb
->kprobe_saved_flags
;
959 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
960 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
961 cur
->post_handler(cur
, regs
, 0);
964 /* Restore back the original saved kprobes variables and continue. */
965 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
966 restore_previous_kprobe(kcb
);
969 reset_current_kprobe();
971 preempt_enable_no_resched();
974 * if somebody else is singlestepping across a probe point, flags
975 * will have TF set, in which case, continue the remaining processing
976 * of do_debug, as if this is not a probe hit.
978 if (regs
->flags
& X86_EFLAGS_TF
)
983 NOKPROBE_SYMBOL(kprobe_debug_handler
);
985 int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
987 struct kprobe
*cur
= kprobe_running();
988 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
990 if (unlikely(regs
->ip
== (unsigned long)cur
->ainsn
.insn
)) {
991 /* This must happen on single-stepping */
992 WARN_ON(kcb
->kprobe_status
!= KPROBE_HIT_SS
&&
993 kcb
->kprobe_status
!= KPROBE_REENTER
);
995 * We are here because the instruction being single
996 * stepped caused a page fault. We reset the current
997 * kprobe and the ip points back to the probe address
998 * and allow the page fault handler to continue as a
1001 regs
->ip
= (unsigned long)cur
->addr
;
1003 * Trap flag (TF) has been set here because this fault
1004 * happened where the single stepping will be done.
1005 * So clear it by resetting the current kprobe:
1007 regs
->flags
&= ~X86_EFLAGS_TF
;
1010 * If the TF flag was set before the kprobe hit,
1013 regs
->flags
|= kcb
->kprobe_old_flags
;
1015 if (kcb
->kprobe_status
== KPROBE_REENTER
)
1016 restore_previous_kprobe(kcb
);
1018 reset_current_kprobe();
1019 preempt_enable_no_resched();
1020 } else if (kcb
->kprobe_status
== KPROBE_HIT_ACTIVE
||
1021 kcb
->kprobe_status
== KPROBE_HIT_SSDONE
) {
1023 * We increment the nmissed count for accounting,
1024 * we can also use npre/npostfault count for accounting
1025 * these specific fault cases.
1027 kprobes_inc_nmissed_count(cur
);
1030 * We come here because instructions in the pre/post
1031 * handler caused the page_fault, this could happen
1032 * if handler tries to access user space by
1033 * copy_from_user(), get_user() etc. Let the
1034 * user-specified handler try to fix it first.
1036 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
1040 * In case the user-specified fault handler returned
1041 * zero, try to fix up.
1043 if (fixup_exception(regs
, trapnr
))
1047 * fixup routine could not handle it,
1048 * Let do_page_fault() fix it.
1054 NOKPROBE_SYMBOL(kprobe_fault_handler
);
1057 * Wrapper routine for handling exceptions.
1059 int kprobe_exceptions_notify(struct notifier_block
*self
, unsigned long val
,
1062 struct die_args
*args
= data
;
1063 int ret
= NOTIFY_DONE
;
1065 if (args
->regs
&& user_mode(args
->regs
))
1068 if (val
== DIE_GPF
) {
1070 * To be potentially processing a kprobe fault and to
1071 * trust the result from kprobe_running(), we have
1072 * be non-preemptible.
1074 if (!preemptible() && kprobe_running() &&
1075 kprobe_fault_handler(args
->regs
, args
->trapnr
))
1080 NOKPROBE_SYMBOL(kprobe_exceptions_notify
);
1082 int setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1084 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
1086 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1088 kcb
->jprobe_saved_regs
= *regs
;
1089 kcb
->jprobe_saved_sp
= stack_addr(regs
);
1090 addr
= (unsigned long)(kcb
->jprobe_saved_sp
);
1093 * As Linus pointed out, gcc assumes that the callee
1094 * owns the argument space and could overwrite it, e.g.
1095 * tailcall optimization. So, to be absolutely safe
1096 * we also save and restore enough stack bytes to cover
1097 * the argument area.
1098 * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
1099 * raw stack chunk with redzones:
1101 __memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
, MIN_STACK_SIZE(addr
));
1102 regs
->ip
= (unsigned long)(jp
->entry
);
1105 * jprobes use jprobe_return() which skips the normal return
1106 * path of the function, and this messes up the accounting of the
1107 * function graph tracer to get messed up.
1109 * Pause function graph tracing while performing the jprobe function.
1111 pause_graph_tracing();
1114 NOKPROBE_SYMBOL(setjmp_pre_handler
);
1116 void jprobe_return(void)
1118 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1120 /* Unpoison stack redzones in the frames we are going to jump over. */
1121 kasan_unpoison_stack_above_sp_to(kcb
->jprobe_saved_sp
);
1124 #ifdef CONFIG_X86_64
1125 " xchg %%rbx,%%rsp \n"
1127 " xchgl %%ebx,%%esp \n"
1130 " .globl jprobe_return_end\n"
1131 " jprobe_return_end: \n"
1133 (kcb
->jprobe_saved_sp
):"memory");
1135 NOKPROBE_SYMBOL(jprobe_return
);
1136 NOKPROBE_SYMBOL(jprobe_return_end
);
1138 int longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1140 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1141 u8
*addr
= (u8
*) (regs
->ip
- 1);
1142 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
1143 void *saved_sp
= kcb
->jprobe_saved_sp
;
1145 if ((addr
> (u8
*) jprobe_return
) &&
1146 (addr
< (u8
*) jprobe_return_end
)) {
1147 if (stack_addr(regs
) != saved_sp
) {
1148 struct pt_regs
*saved_regs
= &kcb
->jprobe_saved_regs
;
1150 "current sp %p does not match saved sp %p\n",
1151 stack_addr(regs
), saved_sp
);
1152 printk(KERN_ERR
"Saved registers for jprobe %p\n", jp
);
1153 show_regs(saved_regs
);
1154 printk(KERN_ERR
"Current registers\n");
1158 /* It's OK to start function graph tracing again */
1159 unpause_graph_tracing();
1160 *regs
= kcb
->jprobe_saved_regs
;
1161 __memcpy(saved_sp
, kcb
->jprobes_stack
, MIN_STACK_SIZE(saved_sp
));
1162 preempt_enable_no_resched();
1167 NOKPROBE_SYMBOL(longjmp_break_handler
);
1169 bool arch_within_kprobe_blacklist(unsigned long addr
)
1171 return (addr
>= (unsigned long)__kprobes_text_start
&&
1172 addr
< (unsigned long)__kprobes_text_end
) ||
1173 (addr
>= (unsigned long)__entry_text_start
&&
1174 addr
< (unsigned long)__entry_text_end
);
1177 int __init
arch_init_kprobes(void)
1182 int arch_trampoline_kprobe(struct kprobe
*p
)