2 * Kernel probes (kprobes) for SuperH
4 * Copyright (C) 2007 Chris Smith <chris.smith@st.com>
5 * Copyright (C) 2006 Lineo Solutions, Inc.
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 #include <linux/kprobes.h>
12 #include <linux/extable.h>
13 #include <linux/ptrace.h>
14 #include <linux/preempt.h>
15 #include <linux/kdebug.h>
16 #include <linux/slab.h>
17 #include <asm/cacheflush.h>
18 #include <linux/uaccess.h>
20 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
21 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
23 static DEFINE_PER_CPU(struct kprobe
, saved_current_opcode
);
24 static DEFINE_PER_CPU(struct kprobe
, saved_next_opcode
);
25 static DEFINE_PER_CPU(struct kprobe
, saved_next_opcode2
);
27 #define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
28 #define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
29 #define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
30 #define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
31 #define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
32 #define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
34 #define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
35 #define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
37 #define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
38 #define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
40 #define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
41 #define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
43 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
45 kprobe_opcode_t opcode
= *(kprobe_opcode_t
*) (p
->addr
);
47 if (OPCODE_RTE(opcode
))
48 return -EFAULT
; /* Bad breakpoint */
55 void __kprobes
arch_copy_kprobe(struct kprobe
*p
)
57 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
61 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
63 *p
->addr
= BREAKPOINT_INSTRUCTION
;
64 flush_icache_range((unsigned long)p
->addr
,
65 (unsigned long)p
->addr
+ sizeof(kprobe_opcode_t
));
68 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
71 flush_icache_range((unsigned long)p
->addr
,
72 (unsigned long)p
->addr
+ sizeof(kprobe_opcode_t
));
75 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
77 if (*p
->addr
== BREAKPOINT_INSTRUCTION
)
84 * If an illegal slot instruction exception occurs for an address
85 * containing a kprobe, remove the probe.
87 * Returns 0 if the exception was handled successfully, 1 otherwise.
89 int __kprobes
kprobe_handle_illslot(unsigned long pc
)
91 struct kprobe
*p
= get_kprobe((kprobe_opcode_t
*) pc
+ 1);
94 printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
95 (unsigned int)pc
+ 2);
103 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
105 struct kprobe
*saved
= this_cpu_ptr(&saved_next_opcode
);
108 arch_disarm_kprobe(p
);
109 arch_disarm_kprobe(saved
);
114 saved
= this_cpu_ptr(&saved_next_opcode2
);
116 arch_disarm_kprobe(saved
);
124 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
126 kcb
->prev_kprobe
.kp
= kprobe_running();
127 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
130 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
132 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
133 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
136 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
137 struct kprobe_ctlblk
*kcb
)
139 __this_cpu_write(current_kprobe
, p
);
143 * Singlestep is implemented by disabling the current kprobe and setting one
144 * on the next instruction, following branches. Two probes are set if the
145 * branch is conditional.
147 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
149 __this_cpu_write(saved_current_opcode
.addr
, (kprobe_opcode_t
*)regs
->pc
);
152 struct kprobe
*op1
, *op2
;
154 arch_disarm_kprobe(p
);
156 op1
= this_cpu_ptr(&saved_next_opcode
);
157 op2
= this_cpu_ptr(&saved_next_opcode2
);
159 if (OPCODE_JSR(p
->opcode
) || OPCODE_JMP(p
->opcode
)) {
160 unsigned int reg_nr
= ((p
->opcode
>> 8) & 0x000F);
161 op1
->addr
= (kprobe_opcode_t
*) regs
->regs
[reg_nr
];
162 } else if (OPCODE_BRA(p
->opcode
) || OPCODE_BSR(p
->opcode
)) {
163 unsigned long disp
= (p
->opcode
& 0x0FFF);
165 (kprobe_opcode_t
*) (regs
->pc
+ 4 + disp
* 2);
167 } else if (OPCODE_BRAF(p
->opcode
) || OPCODE_BSRF(p
->opcode
)) {
168 unsigned int reg_nr
= ((p
->opcode
>> 8) & 0x000F);
170 (kprobe_opcode_t
*) (regs
->pc
+ 4 +
173 } else if (OPCODE_RTS(p
->opcode
)) {
174 op1
->addr
= (kprobe_opcode_t
*) regs
->pr
;
176 } else if (OPCODE_BF(p
->opcode
) || OPCODE_BT(p
->opcode
)) {
177 unsigned long disp
= (p
->opcode
& 0x00FF);
179 op1
->addr
= p
->addr
+ 1;
182 (kprobe_opcode_t
*) (regs
->pc
+ 4 + disp
* 2);
183 op2
->opcode
= *(op2
->addr
);
184 arch_arm_kprobe(op2
);
186 } else if (OPCODE_BF_S(p
->opcode
) || OPCODE_BT_S(p
->opcode
)) {
187 unsigned long disp
= (p
->opcode
& 0x00FF);
189 op1
->addr
= p
->addr
+ 2;
192 (kprobe_opcode_t
*) (regs
->pc
+ 4 + disp
* 2);
193 op2
->opcode
= *(op2
->addr
);
194 arch_arm_kprobe(op2
);
197 op1
->addr
= p
->addr
+ 1;
200 op1
->opcode
= *(op1
->addr
);
201 arch_arm_kprobe(op1
);
205 /* Called with kretprobe_lock held */
206 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
207 struct pt_regs
*regs
)
209 ri
->ret_addr
= (kprobe_opcode_t
*) regs
->pr
;
211 /* Replace the return addr with trampoline addr */
212 regs
->pr
= (unsigned long)kretprobe_trampoline
;
215 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
219 kprobe_opcode_t
*addr
= NULL
;
220 struct kprobe_ctlblk
*kcb
;
223 * We don't want to be preempted for the entire
224 * duration of kprobe processing
227 kcb
= get_kprobe_ctlblk();
229 addr
= (kprobe_opcode_t
*) (regs
->pc
);
231 /* Check we're not actually recursing */
232 if (kprobe_running()) {
233 p
= get_kprobe(addr
);
235 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
236 *p
->ainsn
.insn
== BREAKPOINT_INSTRUCTION
) {
239 /* We have reentered the kprobe_handler(), since
240 * another probe was hit while within the handler.
241 * We here save the original kprobes variables and
242 * just single step on the instruction of the new probe
243 * without calling any user handlers.
245 save_previous_kprobe(kcb
);
246 set_current_kprobe(p
, regs
, kcb
);
247 kprobes_inc_nmissed_count(p
);
248 prepare_singlestep(p
, regs
);
249 kcb
->kprobe_status
= KPROBE_REENTER
;
252 p
= __this_cpu_read(current_kprobe
);
253 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
260 p
= get_kprobe(addr
);
262 /* Not one of ours: let kernel handle it */
263 if (*(kprobe_opcode_t
*)addr
!= BREAKPOINT_INSTRUCTION
) {
265 * The breakpoint instruction was removed right
266 * after we hit it. Another cpu has removed
267 * either a probepoint or a debugger breakpoint
268 * at this address. In either case, no further
269 * handling of this interrupt is appropriate.
277 set_current_kprobe(p
, regs
, kcb
);
278 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
280 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
281 /* handler has already set things up, so skip ss setup */
285 prepare_singlestep(p
, regs
);
286 kcb
->kprobe_status
= KPROBE_HIT_SS
;
290 preempt_enable_no_resched();
295 * For function-return probes, init_kprobes() establishes a probepoint
296 * here. When a retprobed function returns, this probe is hit and
297 * trampoline_probe_handler() runs, calling the kretprobe's handler.
299 static void __used
kretprobe_trampoline_holder(void)
301 asm volatile (".globl kretprobe_trampoline\n"
302 "kretprobe_trampoline:\n\t"
307 * Called when we hit the probe point at kretprobe_trampoline
309 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
311 struct kretprobe_instance
*ri
= NULL
;
312 struct hlist_head
*head
, empty_rp
;
313 struct hlist_node
*tmp
;
314 unsigned long flags
, orig_ret_address
= 0;
315 unsigned long trampoline_address
= (unsigned long)&kretprobe_trampoline
;
317 INIT_HLIST_HEAD(&empty_rp
);
318 kretprobe_hash_lock(current
, &head
, &flags
);
321 * It is possible to have multiple instances associated with a given
322 * task either because an multiple functions in the call path
323 * have a return probe installed on them, and/or more then one return
324 * return probe was registered for a target function.
326 * We can handle this because:
327 * - instances are always inserted at the head of the list
328 * - when multiple return probes are registered for the same
329 * function, the first instance's ret_addr will point to the
330 * real return address, and all the rest will point to
331 * kretprobe_trampoline
333 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
334 if (ri
->task
!= current
)
335 /* another task is sharing our hash bucket */
338 if (ri
->rp
&& ri
->rp
->handler
) {
339 __this_cpu_write(current_kprobe
, &ri
->rp
->kp
);
340 ri
->rp
->handler(ri
, regs
);
341 __this_cpu_write(current_kprobe
, NULL
);
344 orig_ret_address
= (unsigned long)ri
->ret_addr
;
345 recycle_rp_inst(ri
, &empty_rp
);
347 if (orig_ret_address
!= trampoline_address
)
349 * This is the real return address. Any other
350 * instances associated with this task are for
351 * other calls deeper on the call stack
356 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
358 regs
->pc
= orig_ret_address
;
359 kretprobe_hash_unlock(current
, &flags
);
361 preempt_enable_no_resched();
363 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
364 hlist_del(&ri
->hlist
);
368 return orig_ret_address
;
371 static int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
373 struct kprobe
*cur
= kprobe_running();
374 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
375 kprobe_opcode_t
*addr
= NULL
;
376 struct kprobe
*p
= NULL
;
381 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
382 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
383 cur
->post_handler(cur
, regs
, 0);
386 p
= this_cpu_ptr(&saved_next_opcode
);
388 arch_disarm_kprobe(p
);
392 addr
= __this_cpu_read(saved_current_opcode
.addr
);
393 __this_cpu_write(saved_current_opcode
.addr
, NULL
);
395 p
= get_kprobe(addr
);
398 p
= this_cpu_ptr(&saved_next_opcode2
);
400 arch_disarm_kprobe(p
);
406 /* Restore back the original saved kprobes variables and continue. */
407 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
408 restore_previous_kprobe(kcb
);
412 reset_current_kprobe();
415 preempt_enable_no_resched();
420 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
422 struct kprobe
*cur
= kprobe_running();
423 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
424 const struct exception_table_entry
*entry
;
426 switch (kcb
->kprobe_status
) {
430 * We are here because the instruction being single
431 * stepped caused a page fault. We reset the current
432 * kprobe, point the pc back to the probe address
433 * and allow the page fault handler to continue as a
436 regs
->pc
= (unsigned long)cur
->addr
;
437 if (kcb
->kprobe_status
== KPROBE_REENTER
)
438 restore_previous_kprobe(kcb
);
440 reset_current_kprobe();
441 preempt_enable_no_resched();
443 case KPROBE_HIT_ACTIVE
:
444 case KPROBE_HIT_SSDONE
:
446 * We increment the nmissed count for accounting,
447 * we can also use npre/npostfault count for accounting
448 * these specific fault cases.
450 kprobes_inc_nmissed_count(cur
);
453 * We come here because instructions in the pre/post
454 * handler caused the page_fault, this could happen
455 * if handler tries to access user space by
456 * copy_from_user(), get_user() etc. Let the
457 * user-specified handler try to fix it first.
459 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
463 * In case the user-specified fault handler returned
464 * zero, try to fix up.
466 if ((entry
= search_exception_tables(regs
->pc
)) != NULL
) {
467 regs
->pc
= entry
->fixup
;
472 * fixup_exception() could not handle it,
473 * Let do_page_fault() fix it.
484 * Wrapper routine to for handling exceptions.
486 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
487 unsigned long val
, void *data
)
489 struct kprobe
*p
= NULL
;
490 struct die_args
*args
= (struct die_args
*)data
;
491 int ret
= NOTIFY_DONE
;
492 kprobe_opcode_t
*addr
= NULL
;
493 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
495 addr
= (kprobe_opcode_t
*) (args
->regs
->pc
);
496 if (val
== DIE_TRAP
) {
497 if (!kprobe_running()) {
498 if (kprobe_handler(args
->regs
)) {
501 /* Not a kprobe trap */
505 p
= get_kprobe(addr
);
506 if ((kcb
->kprobe_status
== KPROBE_HIT_SS
) ||
507 (kcb
->kprobe_status
== KPROBE_REENTER
)) {
508 if (post_kprobe_handler(args
->regs
))
511 if (kprobe_handler(args
->regs
)) {
514 p
= __this_cpu_read(current_kprobe
);
515 if (p
->break_handler
&&
516 p
->break_handler(p
, args
->regs
))
526 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
528 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
530 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
532 kcb
->jprobe_saved_regs
= *regs
;
533 kcb
->jprobe_saved_r15
= regs
->regs
[15];
534 addr
= kcb
->jprobe_saved_r15
;
537 * TBD: As Linus pointed out, gcc assumes that the callee
538 * owns the argument space and could overwrite it, e.g.
539 * tailcall optimization. So, to be absolutely safe
540 * we also save and restore enough stack bytes to cover
543 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*) addr
,
544 MIN_STACK_SIZE(addr
));
546 regs
->pc
= (unsigned long)(jp
->entry
);
551 void __kprobes
jprobe_return(void)
553 asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
556 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
558 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
559 unsigned long stack_addr
= kcb
->jprobe_saved_r15
;
560 u8
*addr
= (u8
*)regs
->pc
;
562 if ((addr
>= (u8
*)jprobe_return
) &&
563 (addr
<= (u8
*)jprobe_return_end
)) {
564 *regs
= kcb
->jprobe_saved_regs
;
566 memcpy((kprobe_opcode_t
*)stack_addr
, kcb
->jprobes_stack
,
567 MIN_STACK_SIZE(stack_addr
));
569 kcb
->kprobe_status
= KPROBE_HIT_SS
;
570 preempt_enable_no_resched();
577 static struct kprobe trampoline_p
= {
578 .addr
= (kprobe_opcode_t
*)&kretprobe_trampoline
,
579 .pre_handler
= trampoline_probe_handler
582 int __init
arch_init_kprobes(void)
584 return register_kprobe(&trampoline_p
);