2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
43 #include <linux/kprobes.h>
44 #include <linux/ptrace.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/hardirq.h>
48 #include <linux/preempt.h>
49 #include <linux/module.h>
50 #include <linux/kdebug.h>
52 #include <asm/cacheflush.h>
54 #include <asm/pgtable.h>
55 #include <asm/uaccess.h>
56 #include <asm/alternative.h>
58 void jprobe_return_end(void);
60 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
61 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
64 #define stack_addr(regs) ((unsigned long *)regs->sp)
67 * "®s->sp" looks wrong, but it's correct for x86_32. x86_32 CPUs
68 * don't save the ss and esp registers if the CPU is already in kernel
69 * mode when it traps. So for kprobes, regs->sp and regs->ss are not
70 * the [nonexistent] saved stack pointer and ss register, but rather
71 * the top 8 bytes of the pre-int3 stack. So ®s->sp happens to
72 * point to the top of the pre-int3 stack.
74 #define stack_addr(regs) ((unsigned long *)®s->sp)
77 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
78 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
79 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
80 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
81 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
84 * Undefined/reserved opcodes, conditional jump, Opcode Extension
85 * Groups, and some special opcodes can not boost.
87 static const u32 twobyte_is_boostable
[256 / 32] = {
88 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
89 /* ---------------------------------------------- */
90 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
91 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
92 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
93 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
95 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
97 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
98 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
99 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
100 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
101 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
102 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
103 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
104 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
105 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
106 /* ----------------------------------------------- */
107 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
109 static const u32 onebyte_has_modrm
[256 / 32] = {
110 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
111 /* ----------------------------------------------- */
112 W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
113 W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
114 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
115 W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
116 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
117 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
118 W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
119 W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
120 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
121 W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
122 W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
123 W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
124 W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
125 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
126 W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
127 W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */
128 /* ----------------------------------------------- */
129 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
131 static const u32 twobyte_has_modrm
[256 / 32] = {
132 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
133 /* ----------------------------------------------- */
134 W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
135 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
136 W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
137 W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
138 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
139 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
140 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
141 W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
142 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
143 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
144 W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
145 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
146 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
147 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
148 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
149 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */
150 /* ----------------------------------------------- */
151 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
155 struct kretprobe_blackpoint kretprobe_blacklist
[] = {
156 {"__switch_to", }, /* This function switches only current task, but
157 doesn't switch kernel stack.*/
158 {NULL
, NULL
} /* Terminator */
160 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
162 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
163 static void __kprobes
set_jmp_op(void *from
, void *to
)
165 struct __arch_jmp_op
{
168 } __attribute__((packed
)) * jop
;
169 jop
= (struct __arch_jmp_op
*)from
;
170 jop
->raddr
= (s32
)((long)(to
) - ((long)(from
) + 5));
171 jop
->op
= RELATIVEJUMP_INSTRUCTION
;
175 * Check for the REX prefix which can only exist on X86_64
176 * X86_32 always returns 0
178 static int __kprobes
is_REX_prefix(kprobe_opcode_t
*insn
)
181 if ((*insn
& 0xf0) == 0x40)
188 * Returns non-zero if opcode is boostable.
189 * RIP relative instructions are adjusted at copying time in 64 bits mode
191 static int __kprobes
can_boost(kprobe_opcode_t
*opcodes
)
193 kprobe_opcode_t opcode
;
194 kprobe_opcode_t
*orig_opcodes
= opcodes
;
196 if (search_exception_tables((unsigned long)opcodes
))
197 return 0; /* Page fault may occur on this address. */
200 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
202 opcode
= *(opcodes
++);
204 /* 2nd-byte opcode */
205 if (opcode
== 0x0f) {
206 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
208 return test_bit(*opcodes
,
209 (unsigned long *)twobyte_is_boostable
);
212 switch (opcode
& 0xf0) {
215 goto retry
; /* REX prefix is boostable */
218 if (0x63 < opcode
&& opcode
< 0x67)
219 goto retry
; /* prefixes */
220 /* can't boost Address-size override and bound */
221 return (opcode
!= 0x62 && opcode
!= 0x67);
223 return 0; /* can't boost conditional jump */
225 /* can't boost software-interruptions */
226 return (0xc1 < opcode
&& opcode
< 0xcc) || opcode
== 0xcf;
228 /* can boost AA* and XLAT */
229 return (opcode
== 0xd4 || opcode
== 0xd5 || opcode
== 0xd7);
231 /* can boost in/out and absolute jmps */
232 return ((opcode
& 0x04) || opcode
== 0xea);
234 if ((opcode
& 0x0c) == 0 && opcode
!= 0xf1)
235 goto retry
; /* lock/rep(ne) prefix */
236 /* clear and set flags are boostable */
237 return (opcode
== 0xf5 || (0xf7 < opcode
&& opcode
< 0xfe));
239 /* segment override prefixes are boostable */
240 if (opcode
== 0x26 || opcode
== 0x36 || opcode
== 0x3e)
241 goto retry
; /* prefixes */
242 /* CS override prefix and call are not boostable */
243 return (opcode
!= 0x2e && opcode
!= 0x9a);
248 * Returns non-zero if opcode modifies the interrupt flag.
250 static int __kprobes
is_IF_modifier(kprobe_opcode_t
*insn
)
255 case 0xcf: /* iret/iretd */
256 case 0x9d: /* popf/popfd */
261 * on X86_64, 0x40-0x4f are REX prefixes so we need to look
262 * at the next byte instead.. but of course not recurse infinitely
264 if (is_REX_prefix(insn
))
265 return is_IF_modifier(++insn
);
271 * Adjust the displacement if the instruction uses the %rip-relative
273 * If it does, Return the address of the 32-bit displacement word.
274 * If not, return null.
275 * Only applicable to 64-bit x86.
277 static void __kprobes
fix_riprel(struct kprobe
*p
)
280 u8
*insn
= p
->ainsn
.insn
;
284 /* Skip legacy instruction prefixes. */
304 /* Skip REX instruction prefix. */
305 if (is_REX_prefix(insn
))
309 /* Two-byte opcode. */
311 need_modrm
= test_bit(*insn
,
312 (unsigned long *)twobyte_has_modrm
);
314 /* One-byte opcode. */
315 need_modrm
= test_bit(*insn
,
316 (unsigned long *)onebyte_has_modrm
);
320 if ((modrm
& 0xc7) == 0x05) {
321 /* %rip+disp32 addressing mode */
322 /* Displacement follows ModRM byte. */
325 * The copied instruction uses the %rip-relative
326 * addressing mode. Adjust the displacement for the
327 * difference between the original location of this
328 * instruction and the location of the copy that will
329 * actually be run. The tricky bit here is making sure
330 * that the sign extension happens correctly in this
331 * calculation, since we need a signed 32-bit result to
332 * be sign-extended to 64 bits when it's added to the
333 * %rip value and yield the same 64-bit result that the
334 * sign-extension of the original signed 32-bit
335 * displacement would have given.
337 disp
= (u8
*) p
->addr
+ *((s32
*) insn
) -
338 (u8
*) p
->ainsn
.insn
;
339 BUG_ON((s64
) (s32
) disp
!= disp
); /* Sanity check. */
340 *(s32
*)insn
= (s32
) disp
;
346 static void __kprobes
arch_copy_kprobe(struct kprobe
*p
)
348 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
352 if (can_boost(p
->addr
))
353 p
->ainsn
.boostable
= 0;
355 p
->ainsn
.boostable
= -1;
357 p
->opcode
= *p
->addr
;
360 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
362 /* insn: must be on special executable page on x86. */
363 p
->ainsn
.insn
= get_insn_slot();
370 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
372 text_poke(p
->addr
, ((unsigned char []){BREAKPOINT_INSTRUCTION
}), 1);
375 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
377 text_poke(p
->addr
, &p
->opcode
, 1);
380 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
383 free_insn_slot(p
->ainsn
.insn
, (p
->ainsn
.boostable
== 1));
384 p
->ainsn
.insn
= NULL
;
388 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
390 kcb
->prev_kprobe
.kp
= kprobe_running();
391 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
392 kcb
->prev_kprobe
.old_flags
= kcb
->kprobe_old_flags
;
393 kcb
->prev_kprobe
.saved_flags
= kcb
->kprobe_saved_flags
;
396 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
398 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
399 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
400 kcb
->kprobe_old_flags
= kcb
->prev_kprobe
.old_flags
;
401 kcb
->kprobe_saved_flags
= kcb
->prev_kprobe
.saved_flags
;
404 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
405 struct kprobe_ctlblk
*kcb
)
407 __get_cpu_var(current_kprobe
) = p
;
408 kcb
->kprobe_saved_flags
= kcb
->kprobe_old_flags
409 = (regs
->flags
& (X86_EFLAGS_TF
| X86_EFLAGS_IF
));
410 if (is_IF_modifier(p
->ainsn
.insn
))
411 kcb
->kprobe_saved_flags
&= ~X86_EFLAGS_IF
;
414 static void __kprobes
clear_btf(void)
416 if (test_thread_flag(TIF_DEBUGCTLMSR
))
417 update_debugctlmsr(0);
420 static void __kprobes
restore_btf(void)
422 if (test_thread_flag(TIF_DEBUGCTLMSR
))
423 update_debugctlmsr(current
->thread
.debugctlmsr
);
426 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
429 regs
->flags
|= X86_EFLAGS_TF
;
430 regs
->flags
&= ~X86_EFLAGS_IF
;
431 /* single step inline if the instruction is an int3 */
432 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
433 regs
->ip
= (unsigned long)p
->addr
;
435 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
438 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
439 struct pt_regs
*regs
)
441 unsigned long *sara
= stack_addr(regs
);
443 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
445 /* Replace the return addr with trampoline addr */
446 *sara
= (unsigned long) &kretprobe_trampoline
;
449 static void __kprobes
setup_singlestep(struct kprobe
*p
, struct pt_regs
*regs
,
450 struct kprobe_ctlblk
*kcb
)
452 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER)
453 if (p
->ainsn
.boostable
== 1 && !p
->post_handler
) {
454 /* Boost up -- we can execute copied instructions directly */
455 reset_current_kprobe();
456 regs
->ip
= (unsigned long)p
->ainsn
.insn
;
457 preempt_enable_no_resched();
461 prepare_singlestep(p
, regs
);
462 kcb
->kprobe_status
= KPROBE_HIT_SS
;
466 * We have reentered the kprobe_handler(), since another probe was hit while
467 * within the handler. We save the original kprobes variables and just single
468 * step on the instruction of the new probe without calling any user handlers.
470 static int __kprobes
reenter_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
471 struct kprobe_ctlblk
*kcb
)
473 switch (kcb
->kprobe_status
) {
474 case KPROBE_HIT_SSDONE
:
476 /* TODO: Provide re-entrancy from post_kprobes_handler() and
477 * avoid exception stack corruption while single-stepping on
478 * the instruction of the new probe.
480 arch_disarm_kprobe(p
);
481 regs
->ip
= (unsigned long)p
->addr
;
482 reset_current_kprobe();
483 preempt_enable_no_resched();
486 case KPROBE_HIT_ACTIVE
:
487 save_previous_kprobe(kcb
);
488 set_current_kprobe(p
, regs
, kcb
);
489 kprobes_inc_nmissed_count(p
);
490 prepare_singlestep(p
, regs
);
491 kcb
->kprobe_status
= KPROBE_REENTER
;
494 if (p
== kprobe_running()) {
495 regs
->flags
&= ~X86_EFLAGS_TF
;
496 regs
->flags
|= kcb
->kprobe_saved_flags
;
499 /* A probe has been hit in the codepath leading up
500 * to, or just after, single-stepping of a probed
501 * instruction. This entire codepath should strictly
502 * reside in .kprobes.text section. Raise a warning
503 * to highlight this peculiar case.
507 /* impossible cases */
516 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
517 * remain disabled thorough out this function.
519 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
521 kprobe_opcode_t
*addr
;
523 struct kprobe_ctlblk
*kcb
;
525 addr
= (kprobe_opcode_t
*)(regs
->ip
- sizeof(kprobe_opcode_t
));
526 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
528 * The breakpoint instruction was removed right
529 * after we hit it. Another cpu has removed
530 * either a probepoint or a debugger breakpoint
531 * at this address. In either case, no further
532 * handling of this interrupt is appropriate.
533 * Back up over the (now missing) int3 and run
534 * the original instruction.
536 regs
->ip
= (unsigned long)addr
;
541 * We don't want to be preempted for the entire
542 * duration of kprobe processing. We conditionally
543 * re-enable preemption at the end of this function,
544 * and also in reenter_kprobe() and setup_singlestep().
548 kcb
= get_kprobe_ctlblk();
549 p
= get_kprobe(addr
);
552 if (kprobe_running()) {
553 if (reenter_kprobe(p
, regs
, kcb
))
556 set_current_kprobe(p
, regs
, kcb
);
557 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
560 * If we have no pre-handler or it returned 0, we
561 * continue with normal processing. If we have a
562 * pre-handler and it returned non-zero, it prepped
563 * for calling the break_handler below on re-entry
564 * for jprobe processing, so get out doing nothing
567 if (!p
->pre_handler
|| !p
->pre_handler(p
, regs
))
568 setup_singlestep(p
, regs
, kcb
);
571 } else if (kprobe_running()) {
572 p
= __get_cpu_var(current_kprobe
);
573 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
574 setup_singlestep(p
, regs
, kcb
);
577 } /* else: not a kprobe fault; let the kernel handle it */
579 preempt_enable_no_resched();
584 * When a retprobed function returns, this code saves registers and
585 * calls trampoline_handler() runs, which calls the kretprobe's handler.
587 static void __used __kprobes
kretprobe_trampoline_holder(void)
590 ".global kretprobe_trampoline\n"
591 "kretprobe_trampoline: \n"
593 /* We don't bother saving the ss register */
597 * Skip cs, ip, orig_ax.
598 * trampoline_handler() will plug in these values
617 " call trampoline_handler\n"
618 /* Replace saved sp with true return address. */
619 " movq %rax, 152(%rsp)\n"
635 /* Skip orig_ax, ip, cs */
641 * Skip cs, ip, orig_ax and gs.
642 * trampoline_handler() will plug in these values
656 " call trampoline_handler\n"
657 /* Move flags to cs */
658 " movl 56(%esp), %edx\n"
659 " movl %edx, 52(%esp)\n"
660 /* Replace saved flags with true return address. */
661 " movl %eax, 56(%esp)\n"
669 /* Skip ds, es, fs, gs, orig_ax and ip */
677 * Called from kretprobe_trampoline
679 static __used __kprobes
void *trampoline_handler(struct pt_regs
*regs
)
681 struct kretprobe_instance
*ri
= NULL
;
682 struct hlist_head
*head
, empty_rp
;
683 struct hlist_node
*node
, *tmp
;
684 unsigned long flags
, orig_ret_address
= 0;
685 unsigned long trampoline_address
= (unsigned long)&kretprobe_trampoline
;
687 INIT_HLIST_HEAD(&empty_rp
);
688 kretprobe_hash_lock(current
, &head
, &flags
);
689 /* fixup registers */
691 regs
->cs
= __KERNEL_CS
;
693 regs
->cs
= __KERNEL_CS
| get_kernel_rpl();
696 regs
->ip
= trampoline_address
;
697 regs
->orig_ax
= ~0UL;
700 * It is possible to have multiple instances associated with a given
701 * task either because multiple functions in the call path have
702 * return probes installed on them, and/or more than one
703 * return probe was registered for a target function.
705 * We can handle this because:
706 * - instances are always pushed into the head of the list
707 * - when multiple return probes are registered for the same
708 * function, the (chronologically) first instance's ret_addr
709 * will be the real return address, and all the rest will
710 * point to kretprobe_trampoline.
712 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
713 if (ri
->task
!= current
)
714 /* another task is sharing our hash bucket */
717 if (ri
->rp
&& ri
->rp
->handler
) {
718 __get_cpu_var(current_kprobe
) = &ri
->rp
->kp
;
719 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
720 ri
->rp
->handler(ri
, regs
);
721 __get_cpu_var(current_kprobe
) = NULL
;
724 orig_ret_address
= (unsigned long)ri
->ret_addr
;
725 recycle_rp_inst(ri
, &empty_rp
);
727 if (orig_ret_address
!= trampoline_address
)
729 * This is the real return address. Any other
730 * instances associated with this task are for
731 * other calls deeper on the call stack
736 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
738 kretprobe_hash_unlock(current
, &flags
);
740 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
741 hlist_del(&ri
->hlist
);
744 return (void *)orig_ret_address
;
748 * Called after single-stepping. p->addr is the address of the
749 * instruction whose first byte has been replaced by the "int 3"
750 * instruction. To avoid the SMP problems that can occur when we
751 * temporarily put back the original opcode to single-step, we
752 * single-stepped a copy of the instruction. The address of this
753 * copy is p->ainsn.insn.
755 * This function prepares to return from the post-single-step
756 * interrupt. We have to fix up the stack as follows:
758 * 0) Except in the case of absolute or indirect jump or call instructions,
759 * the new ip is relative to the copied instruction. We need to make
760 * it relative to the original instruction.
762 * 1) If the single-stepped instruction was pushfl, then the TF and IF
763 * flags are set in the just-pushed flags, and may need to be cleared.
765 * 2) If the single-stepped instruction was a call, the return address
766 * that is atop the stack is the address following the copied instruction.
767 * We need to make it the address following the original instruction.
769 * If this is the first time we've single-stepped the instruction at
770 * this probepoint, and the instruction is boostable, boost it: add a
771 * jump instruction after the copied instruction, that jumps to the next
772 * instruction after the probepoint.
774 static void __kprobes
resume_execution(struct kprobe
*p
,
775 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
777 unsigned long *tos
= stack_addr(regs
);
778 unsigned long copy_ip
= (unsigned long)p
->ainsn
.insn
;
779 unsigned long orig_ip
= (unsigned long)p
->addr
;
780 kprobe_opcode_t
*insn
= p
->ainsn
.insn
;
782 /*skip the REX prefix*/
783 if (is_REX_prefix(insn
))
786 regs
->flags
&= ~X86_EFLAGS_TF
;
788 case 0x9c: /* pushfl */
789 *tos
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_IF
);
790 *tos
|= kcb
->kprobe_old_flags
;
792 case 0xc2: /* iret/ret/lret */
797 case 0xea: /* jmp absolute -- ip is correct */
798 /* ip is already adjusted, no more changes required */
799 p
->ainsn
.boostable
= 1;
801 case 0xe8: /* call relative - Fix return addr */
802 *tos
= orig_ip
+ (*tos
- copy_ip
);
805 case 0x9a: /* call absolute -- same as call absolute, indirect */
806 *tos
= orig_ip
+ (*tos
- copy_ip
);
810 if ((insn
[1] & 0x30) == 0x10) {
812 * call absolute, indirect
813 * Fix return addr; ip is correct.
814 * But this is not boostable
816 *tos
= orig_ip
+ (*tos
- copy_ip
);
818 } else if (((insn
[1] & 0x31) == 0x20) ||
819 ((insn
[1] & 0x31) == 0x21)) {
821 * jmp near and far, absolute indirect
822 * ip is correct. And this is boostable
824 p
->ainsn
.boostable
= 1;
831 if (p
->ainsn
.boostable
== 0) {
832 if ((regs
->ip
> copy_ip
) &&
833 (regs
->ip
- copy_ip
) + 5 < MAX_INSN_SIZE
) {
835 * These instructions can be executed directly if it
836 * jumps back to correct address.
838 set_jmp_op((void *)regs
->ip
,
839 (void *)orig_ip
+ (regs
->ip
- copy_ip
));
840 p
->ainsn
.boostable
= 1;
842 p
->ainsn
.boostable
= -1;
846 regs
->ip
+= orig_ip
- copy_ip
;
853 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
854 * remain disabled thoroughout this function.
856 static int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
858 struct kprobe
*cur
= kprobe_running();
859 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
864 resume_execution(cur
, regs
, kcb
);
865 regs
->flags
|= kcb
->kprobe_saved_flags
;
867 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
868 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
869 cur
->post_handler(cur
, regs
, 0);
872 /* Restore back the original saved kprobes variables and continue. */
873 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
874 restore_previous_kprobe(kcb
);
877 reset_current_kprobe();
879 preempt_enable_no_resched();
882 * if somebody else is singlestepping across a probe point, flags
883 * will have TF set, in which case, continue the remaining processing
884 * of do_debug, as if this is not a probe hit.
886 if (regs
->flags
& X86_EFLAGS_TF
)
892 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
894 struct kprobe
*cur
= kprobe_running();
895 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
897 switch (kcb
->kprobe_status
) {
901 * We are here because the instruction being single
902 * stepped caused a page fault. We reset the current
903 * kprobe and the ip points back to the probe address
904 * and allow the page fault handler to continue as a
907 regs
->ip
= (unsigned long)cur
->addr
;
908 regs
->flags
|= kcb
->kprobe_old_flags
;
909 if (kcb
->kprobe_status
== KPROBE_REENTER
)
910 restore_previous_kprobe(kcb
);
912 reset_current_kprobe();
913 preempt_enable_no_resched();
915 case KPROBE_HIT_ACTIVE
:
916 case KPROBE_HIT_SSDONE
:
918 * We increment the nmissed count for accounting,
919 * we can also use npre/npostfault count for accounting
920 * these specific fault cases.
922 kprobes_inc_nmissed_count(cur
);
925 * We come here because instructions in the pre/post
926 * handler caused the page_fault, this could happen
927 * if handler tries to access user space by
928 * copy_from_user(), get_user() etc. Let the
929 * user-specified handler try to fix it first.
931 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
935 * In case the user-specified fault handler returned
936 * zero, try to fix up.
938 if (fixup_exception(regs
))
942 * fixup routine could not handle it,
943 * Let do_page_fault() fix it.
953 * Wrapper routine for handling exceptions.
955 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
956 unsigned long val
, void *data
)
958 struct die_args
*args
= data
;
959 int ret
= NOTIFY_DONE
;
961 if (args
->regs
&& user_mode_vm(args
->regs
))
966 if (kprobe_handler(args
->regs
))
970 if (post_kprobe_handler(args
->regs
))
975 * To be potentially processing a kprobe fault and to
976 * trust the result from kprobe_running(), we have
977 * be non-preemptible.
979 if (!preemptible() && kprobe_running() &&
980 kprobe_fault_handler(args
->regs
, args
->trapnr
))
989 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
991 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
993 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
995 kcb
->jprobe_saved_regs
= *regs
;
996 kcb
->jprobe_saved_sp
= stack_addr(regs
);
997 addr
= (unsigned long)(kcb
->jprobe_saved_sp
);
1000 * As Linus pointed out, gcc assumes that the callee
1001 * owns the argument space and could overwrite it, e.g.
1002 * tailcall optimization. So, to be absolutely safe
1003 * we also save and restore enough stack bytes to cover
1004 * the argument area.
1006 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
,
1007 MIN_STACK_SIZE(addr
));
1008 regs
->flags
&= ~X86_EFLAGS_IF
;
1009 trace_hardirqs_off();
1010 regs
->ip
= (unsigned long)(jp
->entry
);
1014 void __kprobes
jprobe_return(void)
1016 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1019 #ifdef CONFIG_X86_64
1020 " xchg %%rbx,%%rsp \n"
1022 " xchgl %%ebx,%%esp \n"
1025 " .globl jprobe_return_end\n"
1026 " jprobe_return_end: \n"
1028 (kcb
->jprobe_saved_sp
):"memory");
1031 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
1033 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
1034 u8
*addr
= (u8
*) (regs
->ip
- 1);
1035 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
1037 if ((addr
> (u8
*) jprobe_return
) &&
1038 (addr
< (u8
*) jprobe_return_end
)) {
1039 if (stack_addr(regs
) != kcb
->jprobe_saved_sp
) {
1040 struct pt_regs
*saved_regs
= &kcb
->jprobe_saved_regs
;
1042 "current sp %p does not match saved sp %p\n",
1043 stack_addr(regs
), kcb
->jprobe_saved_sp
);
1044 printk(KERN_ERR
"Saved registers for jprobe %p\n", jp
);
1045 show_registers(saved_regs
);
1046 printk(KERN_ERR
"Current registers\n");
1047 show_registers(regs
);
1050 *regs
= kcb
->jprobe_saved_regs
;
1051 memcpy((kprobe_opcode_t
*)(kcb
->jprobe_saved_sp
),
1053 MIN_STACK_SIZE(kcb
->jprobe_saved_sp
));
1054 preempt_enable_no_resched();
1060 int __init
arch_init_kprobes(void)
1065 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)