2 * Kernel Probes (KProbes)
3 * arch/mips/kernel/kprobes.c
5 * Copyright 2006 Sony Corp.
6 * Copyright 2010 Cavium Networks
8 * Some portions copied from the powerpc version.
10 * Copyright (C) IBM Corporation, 2002, 2004
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2 of the License.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/kprobes.h>
27 #include <linux/preempt.h>
28 #include <linux/uaccess.h>
29 #include <linux/kdebug.h>
30 #include <linux/slab.h>
32 #include <asm/ptrace.h>
33 #include <asm/branch.h>
34 #include <asm/break.h>
37 static const union mips_instruction breakpoint_insn
= {
40 .code
= BRK_KPROBE_BP
,
45 static const union mips_instruction breakpoint2_insn
= {
48 .code
= BRK_KPROBE_SSTEPBP
,
53 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
);
54 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
56 static int __kprobes
insn_has_delayslot(union mips_instruction insn
)
58 switch (insn
.i_format
.opcode
) {
61 * This group contains:
62 * jr and jalr are in r_format format.
65 switch (insn
.r_format
.func
) {
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
81 * These are unconditional and in j_format.
87 * These are conditional and in i_format.
99 * These are the FPA/cp1 branch instructions.
103 #ifdef CONFIG_CPU_CAVIUM_OCTEON
104 case lwc2_op
: /* This is bbit0 on Octeon */
105 case ldc2_op
: /* This is bbit032 on Octeon */
106 case swc2_op
: /* This is bbit1 on Octeon */
107 case sdc2_op
: /* This is bbit132 on Octeon */
118 * insn_has_ll_or_sc function checks whether instruction is ll or sc
119 * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
120 * so we need to prevent it and refuse kprobes insertion for such
121 * instructions; cannot do much about breakpoint in the middle of
122 * ll/sc pair; it is upto user to avoid those places
124 static int __kprobes
insn_has_ll_or_sc(union mips_instruction insn
)
128 switch (insn
.i_format
.opcode
) {
141 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
143 union mips_instruction insn
;
144 union mips_instruction prev_insn
;
149 if (insn_has_ll_or_sc(insn
)) {
150 pr_notice("Kprobes for ll and sc instructions are not"
156 if ((probe_kernel_read(&prev_insn
, p
->addr
- 1,
157 sizeof(mips_instruction
)) == 0) &&
158 insn_has_delayslot(prev_insn
)) {
159 pr_notice("Kprobes for branch delayslot are not supported\n");
164 /* insn: must be on special executable page on mips. */
165 p
->ainsn
.insn
= get_insn_slot();
166 if (!p
->ainsn
.insn
) {
172 * In the kprobe->ainsn.insn[] array we store the original
173 * instruction at index zero and a break trap instruction at
176 * On MIPS arch if the instruction at probed address is a
177 * branch instruction, we need to execute the instruction at
178 * Branch Delayslot (BD) at the time of probe hit. As MIPS also
179 * doesn't have single stepping support, the BD instruction can
180 * not be executed in-line and it would be executed on SSOL slot
181 * using a normal breakpoint instruction in the next slot.
182 * So, read the instruction and save it for later execution.
184 if (insn_has_delayslot(insn
))
185 memcpy(&p
->ainsn
.insn
[0], p
->addr
+ 1, sizeof(kprobe_opcode_t
));
187 memcpy(&p
->ainsn
.insn
[0], p
->addr
, sizeof(kprobe_opcode_t
));
189 p
->ainsn
.insn
[1] = breakpoint2_insn
;
190 p
->opcode
= *p
->addr
;
196 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
198 *p
->addr
= breakpoint_insn
;
202 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
204 *p
->addr
= p
->opcode
;
208 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
210 free_insn_slot(p
->ainsn
.insn
, 0);
213 static void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
215 kcb
->prev_kprobe
.kp
= kprobe_running();
216 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
217 kcb
->prev_kprobe
.old_SR
= kcb
->kprobe_old_SR
;
218 kcb
->prev_kprobe
.saved_SR
= kcb
->kprobe_saved_SR
;
219 kcb
->prev_kprobe
.saved_epc
= kcb
->kprobe_saved_epc
;
222 static void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
224 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
225 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
226 kcb
->kprobe_old_SR
= kcb
->prev_kprobe
.old_SR
;
227 kcb
->kprobe_saved_SR
= kcb
->prev_kprobe
.saved_SR
;
228 kcb
->kprobe_saved_epc
= kcb
->prev_kprobe
.saved_epc
;
231 static void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
232 struct kprobe_ctlblk
*kcb
)
234 __get_cpu_var(current_kprobe
) = p
;
235 kcb
->kprobe_saved_SR
= kcb
->kprobe_old_SR
= (regs
->cp0_status
& ST0_IE
);
236 kcb
->kprobe_saved_epc
= regs
->cp0_epc
;
240 * evaluate_branch_instrucion -
242 * Evaluate the branch instruction at probed address during probe hit. The
243 * result of evaluation would be the updated epc. The insturction in delayslot
244 * would actually be single stepped using a normal breakpoint) on SSOL slot.
246 * The result is also saved in the kprobe control block for later use,
247 * in case we need to execute the delayslot instruction. The latter will be
248 * false for NOP instruction in dealyslot and the branch-likely instructions
249 * when the branch is taken. And for those cases we set a flag as
250 * SKIP_DELAYSLOT in the kprobe control block
252 static int evaluate_branch_instruction(struct kprobe
*p
, struct pt_regs
*regs
,
253 struct kprobe_ctlblk
*kcb
)
255 union mips_instruction insn
= p
->opcode
;
263 if (p
->ainsn
.insn
->word
== 0)
264 kcb
->flags
|= SKIP_DELAYSLOT
;
266 kcb
->flags
&= ~SKIP_DELAYSLOT
;
268 ret
= __compute_return_epc_for_insn(regs
, insn
);
272 if (ret
== BRANCH_LIKELY_TAKEN
)
273 kcb
->flags
|= SKIP_DELAYSLOT
;
275 kcb
->target_epc
= regs
->cp0_epc
;
280 pr_notice("%s: unaligned epc - sending SIGBUS.\n", current
->comm
);
281 force_sig(SIGBUS
, current
);
286 static void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
,
287 struct kprobe_ctlblk
*kcb
)
291 regs
->cp0_status
&= ~ST0_IE
;
293 /* single step inline if the instruction is a break */
294 if (p
->opcode
.word
== breakpoint_insn
.word
||
295 p
->opcode
.word
== breakpoint2_insn
.word
)
296 regs
->cp0_epc
= (unsigned long)p
->addr
;
297 else if (insn_has_delayslot(p
->opcode
)) {
298 ret
= evaluate_branch_instruction(p
, regs
, kcb
);
300 pr_notice("Kprobes: Error in evaluating branch\n");
304 regs
->cp0_epc
= (unsigned long)&p
->ainsn
.insn
[0];
308 * Called after single-stepping. p->addr is the address of the
309 * instruction whose first byte has been replaced by the "break 0"
310 * instruction. To avoid the SMP problems that can occur when we
311 * temporarily put back the original opcode to single-step, we
312 * single-stepped a copy of the instruction. The address of this
313 * copy is p->ainsn.insn.
315 * This function prepares to return from the post-single-step
316 * breakpoint trap. In case of branch instructions, the target
317 * epc to be restored.
319 static void __kprobes
resume_execution(struct kprobe
*p
,
320 struct pt_regs
*regs
,
321 struct kprobe_ctlblk
*kcb
)
323 if (insn_has_delayslot(p
->opcode
))
324 regs
->cp0_epc
= kcb
->target_epc
;
326 unsigned long orig_epc
= kcb
->kprobe_saved_epc
;
327 regs
->cp0_epc
= orig_epc
+ 4;
331 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
335 kprobe_opcode_t
*addr
;
336 struct kprobe_ctlblk
*kcb
;
338 addr
= (kprobe_opcode_t
*) regs
->cp0_epc
;
341 * We don't want to be preempted for the entire
342 * duration of kprobe processing
345 kcb
= get_kprobe_ctlblk();
347 /* Check we're not actually recursing */
348 if (kprobe_running()) {
349 p
= get_kprobe(addr
);
351 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
352 p
->ainsn
.insn
->word
== breakpoint_insn
.word
) {
353 regs
->cp0_status
&= ~ST0_IE
;
354 regs
->cp0_status
|= kcb
->kprobe_saved_SR
;
358 * We have reentered the kprobe_handler(), since
359 * another probe was hit while within the handler.
360 * We here save the original kprobes variables and
361 * just single step on the instruction of the new probe
362 * without calling any user handlers.
364 save_previous_kprobe(kcb
);
365 set_current_kprobe(p
, regs
, kcb
);
366 kprobes_inc_nmissed_count(p
);
367 prepare_singlestep(p
, regs
, kcb
);
368 kcb
->kprobe_status
= KPROBE_REENTER
;
369 if (kcb
->flags
& SKIP_DELAYSLOT
) {
370 resume_execution(p
, regs
, kcb
);
371 restore_previous_kprobe(kcb
);
372 preempt_enable_no_resched();
376 if (addr
->word
!= breakpoint_insn
.word
) {
378 * The breakpoint instruction was removed by
379 * another cpu right after we hit, no further
380 * handling of this interrupt is appropriate
385 p
= __get_cpu_var(current_kprobe
);
386 if (p
->break_handler
&& p
->break_handler(p
, regs
))
392 p
= get_kprobe(addr
);
394 if (addr
->word
!= breakpoint_insn
.word
) {
396 * The breakpoint instruction was removed right
397 * after we hit it. Another cpu has removed
398 * either a probepoint or a debugger breakpoint
399 * at this address. In either case, no further
400 * handling of this interrupt is appropriate.
404 /* Not one of ours: let kernel handle it */
408 set_current_kprobe(p
, regs
, kcb
);
409 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
411 if (p
->pre_handler
&& p
->pre_handler(p
, regs
)) {
412 /* handler has already set things up, so skip ss setup */
417 prepare_singlestep(p
, regs
, kcb
);
418 if (kcb
->flags
& SKIP_DELAYSLOT
) {
419 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
421 p
->post_handler(p
, regs
, 0);
422 resume_execution(p
, regs
, kcb
);
423 preempt_enable_no_resched();
425 kcb
->kprobe_status
= KPROBE_HIT_SS
;
430 preempt_enable_no_resched();
435 static inline int post_kprobe_handler(struct pt_regs
*regs
)
437 struct kprobe
*cur
= kprobe_running();
438 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
443 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
444 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
445 cur
->post_handler(cur
, regs
, 0);
448 resume_execution(cur
, regs
, kcb
);
450 regs
->cp0_status
|= kcb
->kprobe_saved_SR
;
452 /* Restore back the original saved kprobes variables and continue. */
453 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
454 restore_previous_kprobe(kcb
);
457 reset_current_kprobe();
459 preempt_enable_no_resched();
464 static inline int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
466 struct kprobe
*cur
= kprobe_running();
467 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
469 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
472 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
473 resume_execution(cur
, regs
, kcb
);
474 regs
->cp0_status
|= kcb
->kprobe_old_SR
;
476 reset_current_kprobe();
477 preempt_enable_no_resched();
483 * Wrapper routine for handling exceptions.
485 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
486 unsigned long val
, void *data
)
489 struct die_args
*args
= (struct die_args
*)data
;
490 int ret
= NOTIFY_DONE
;
494 if (kprobe_handler(args
->regs
))
498 if (post_kprobe_handler(args
->regs
))
503 /* kprobe_running() needs smp_processor_id() */
507 && kprobe_fault_handler(args
->regs
, args
->trapnr
))
517 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
519 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
520 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
522 kcb
->jprobe_saved_regs
= *regs
;
523 kcb
->jprobe_saved_sp
= regs
->regs
[29];
525 memcpy(kcb
->jprobes_stack
, (void *)kcb
->jprobe_saved_sp
,
526 MIN_JPROBES_STACK_SIZE(kcb
->jprobe_saved_sp
));
528 regs
->cp0_epc
= (unsigned long)(jp
->entry
);
533 /* Defined in the inline asm below. */
534 void jprobe_return_end(void);
536 void __kprobes
jprobe_return(void)
538 /* Assembler quirk necessitates this '0,code' business. */
541 ".globl jprobe_return_end\n"
542 "jprobe_return_end:\n"
543 : : "n" (BRK_KPROBE_BP
) : "memory");
546 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
548 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
550 if (regs
->cp0_epc
>= (unsigned long)jprobe_return
&&
551 regs
->cp0_epc
<= (unsigned long)jprobe_return_end
) {
552 *regs
= kcb
->jprobe_saved_regs
;
553 memcpy((void *)kcb
->jprobe_saved_sp
, kcb
->jprobes_stack
,
554 MIN_JPROBES_STACK_SIZE(kcb
->jprobe_saved_sp
));
555 preempt_enable_no_resched();
563 * Function return probe trampoline:
564 * - init_kprobes() establishes a probepoint here
565 * - When the probed function returns, this probe causes the
568 static void __used
kretprobe_trampoline_holder(void)
572 /* Keep the assembler from reordering and placing JR here. */
575 ".global kretprobe_trampoline\n"
576 "kretprobe_trampoline:\n\t"
582 void kretprobe_trampoline(void);
584 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
585 struct pt_regs
*regs
)
587 ri
->ret_addr
= (kprobe_opcode_t
*) regs
->regs
[31];
589 /* Replace the return addr with trampoline addr */
590 regs
->regs
[31] = (unsigned long)kretprobe_trampoline
;
594 * Called when the probe at kretprobe trampoline is hit
596 static int __kprobes
trampoline_probe_handler(struct kprobe
*p
,
597 struct pt_regs
*regs
)
599 struct kretprobe_instance
*ri
= NULL
;
600 struct hlist_head
*head
, empty_rp
;
601 struct hlist_node
*node
, *tmp
;
602 unsigned long flags
, orig_ret_address
= 0;
603 unsigned long trampoline_address
= (unsigned long)kretprobe_trampoline
;
605 INIT_HLIST_HEAD(&empty_rp
);
606 kretprobe_hash_lock(current
, &head
, &flags
);
609 * It is possible to have multiple instances associated with a given
610 * task either because an multiple functions in the call path
611 * have a return probe installed on them, and/or more than one return
612 * return probe was registered for a target function.
614 * We can handle this because:
615 * - instances are always inserted at the head of the list
616 * - when multiple return probes are registered for the same
617 * function, the first instance's ret_addr will point to the
618 * real return address, and all the rest will point to
619 * kretprobe_trampoline
621 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
622 if (ri
->task
!= current
)
623 /* another task is sharing our hash bucket */
626 if (ri
->rp
&& ri
->rp
->handler
)
627 ri
->rp
->handler(ri
, regs
);
629 orig_ret_address
= (unsigned long)ri
->ret_addr
;
630 recycle_rp_inst(ri
, &empty_rp
);
632 if (orig_ret_address
!= trampoline_address
)
634 * This is the real return address. Any other
635 * instances associated with this task are for
636 * other calls deeper on the call stack
641 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
642 instruction_pointer(regs
) = orig_ret_address
;
644 reset_current_kprobe();
645 kretprobe_hash_unlock(current
, &flags
);
646 preempt_enable_no_resched();
648 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
649 hlist_del(&ri
->hlist
);
653 * By returning a non-zero value, we are telling
654 * kprobe_handler() that we don't want the post_handler
655 * to run (and have re-enabled preemption)
660 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
662 if (p
->addr
== (kprobe_opcode_t
*)kretprobe_trampoline
)
668 static struct kprobe trampoline_p
= {
669 .addr
= (kprobe_opcode_t
*)kretprobe_trampoline
,
670 .pre_handler
= trampoline_probe_handler
673 int __init
arch_init_kprobes(void)
675 return register_kprobe(&trampoline_p
);