2 * arch/tile/kernel/kprobes.c
5 * Some portions copied from the MIPS version.
7 * Copyright (C) IBM Corporation, 2002, 2004
8 * Copyright 2006 Sony Corp.
9 * Copyright 2010 Cavium Networks
11 * Copyright 2012 Tilera Corporation. All Rights Reserved.
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation, version 2.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
20 * NON INFRINGEMENT. See the GNU General Public License for
24 #include <linux/kprobes.h>
25 #include <linux/kdebug.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
31 #include <arch/opcode.h>
33 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
34 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
36 tile_bundle_bits breakpoint_insn
= TILEGX_BPT_BUNDLE
;
37 tile_bundle_bits breakpoint2_insn
= TILEGX_BPT_BUNDLE
| DIE_SSTEPBP
;
40 * Check whether instruction is branch or jump, or if executing it
41 * has different results depending on where it is executed (e.g. lnk).
43 static int __kprobes
insn_has_control(kprobe_opcode_t insn
)
45 if (get_Mode(insn
) != 0) { /* Y-format bundle */
46 if (get_Opcode_Y1(insn
) != RRR_1_OPCODE_Y1
||
47 get_RRROpcodeExtension_Y1(insn
) != UNARY_RRR_1_OPCODE_Y1
)
50 switch (get_UnaryOpcodeExtension_Y1(insn
)) {
51 case JALRP_UNARY_OPCODE_Y1
:
52 case JALR_UNARY_OPCODE_Y1
:
53 case JRP_UNARY_OPCODE_Y1
:
54 case JR_UNARY_OPCODE_Y1
:
55 case LNK_UNARY_OPCODE_Y1
:
62 switch (get_Opcode_X1(insn
)) {
63 case BRANCH_OPCODE_X1
: /* branch instructions */
64 case JUMP_OPCODE_X1
: /* jump instructions: j and jal */
67 case RRR_0_OPCODE_X1
: /* other jump instructions */
68 if (get_RRROpcodeExtension_X1(insn
) != UNARY_RRR_0_OPCODE_X1
)
70 switch (get_UnaryOpcodeExtension_X1(insn
)) {
71 case JALRP_UNARY_OPCODE_X1
:
72 case JALR_UNARY_OPCODE_X1
:
73 case JRP_UNARY_OPCODE_X1
:
74 case JR_UNARY_OPCODE_X1
:
75 case LNK_UNARY_OPCODE_X1
:
85 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
87 unsigned long addr
= (unsigned long)p
->addr
;
89 if (addr
& (sizeof(kprobe_opcode_t
) - 1))
92 if (insn_has_control(*p
->addr
)) {
93 pr_notice("Kprobes for control instructions are not "
98 /* insn: must be on special executable page on tile. */
99 p
->ainsn
.insn
= get_insn_slot();
104 * In the kprobe->ainsn.insn[] array we store the original
105 * instruction at index zero and a break trap instruction at
108 memcpy(&p
->ainsn
.insn
[0], p
->addr
, sizeof(kprobe_opcode_t
));
109 p
->ainsn
.insn
[1] = breakpoint2_insn
;
110 p
->opcode
= *p
->addr
;
115 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
117 unsigned long addr_wr
;
119 /* Operate on writable kernel text mapping. */
120 addr_wr
= (unsigned long)p
->addr
- MEM_SV_START
+ PAGE_OFFSET
;
122 if (probe_kernel_write((void *)addr_wr
, &breakpoint_insn
,
123 sizeof(breakpoint_insn
)))
124 pr_err("%s: failed to enable kprobe\n", __func__
);
130 void __kprobes
arch_disarm_kprobe(struct kprobe
*kp
)
132 unsigned long addr_wr
;
134 /* Operate on writable kernel text mapping. */
135 addr_wr
= (unsigned long)kp
->addr
- MEM_SV_START
+ PAGE_OFFSET
;
137 if (probe_kernel_write((void *)addr_wr
, &kp
->opcode
,
139 pr_err("%s: failed to enable kprobe\n", __func__
);
145 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
148 free_insn_slot(p
->ainsn
.insn
, 0);
149 p
->ainsn
.insn
= NULL
;
153 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
155 kcb
->prev_kprobe
.kp
= kprobe_running();
156 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
157 kcb
->prev_kprobe
.saved_pc
= kcb
->kprobe_saved_pc
;
160 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
162 __this_cpu_write(current_kprobe
, kcb
->prev_kprobe
.kp
);
163 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
164 kcb
->kprobe_saved_pc
= kcb
->prev_kprobe
.saved_pc
;
167 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
168 struct kprobe_ctlblk
*kcb
)
170 __this_cpu_write(current_kprobe
, p
);
171 kcb
->kprobe_saved_pc
= regs
->pc
;
174 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
176 /* Single step inline if the instruction is a break. */
177 if (p
->opcode
== breakpoint_insn
||
178 p
->opcode
== breakpoint2_insn
)
179 regs
->pc
= (unsigned long)p
->addr
;
181 regs
->pc
= (unsigned long)&p
->ainsn
.insn
[0];
184 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
188 kprobe_opcode_t
*addr
;
189 struct kprobe_ctlblk
*kcb
;
191 addr
= (kprobe_opcode_t
*)regs
->pc
;
194 * We don't want to be preempted for the entire
195 * duration of kprobe processing.
198 kcb
= get_kprobe_ctlblk();
200 /* Check we're not actually recursing. */
201 if (kprobe_running()) {
202 p
= get_kprobe(addr
);
204 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
205 p
->ainsn
.insn
[0] == breakpoint_insn
) {
209 * We have reentered the kprobe_handler(), since
210 * another probe was hit while within the handler.
211 * We here save the original kprobes variables and
212 * just single step on the instruction of the new probe
213 * without calling any user handlers.
215 save_previous_kprobe(kcb
);
216 set_current_kprobe(p
, regs
, kcb
);
217 kprobes_inc_nmissed_count(p
);
218 prepare_singlestep(p
, regs
);
219 kcb
->kprobe_status
= KPROBE_REENTER
;
222 if (*addr
!= breakpoint_insn
) {
224 * The breakpoint instruction was removed by
225 * another cpu right after we hit, no further
226 * handling of this interrupt is appropriate.
231 p
= __this_cpu_read(current_kprobe
);
232 if (p
->break_handler
&& p
->break_handler(p
, regs
))
238 p
= get_kprobe(addr
);
240 if (*addr
!= breakpoint_insn
) {
242 * The breakpoint instruction was removed right
243 * after we hit it. Another cpu has removed
244 * either a probepoint or a debugger breakpoint
245 * at this address. In either case, no further
246 * handling of this interrupt is appropriate.
250 /* Not one of ours: let kernel handle it. */
254 set_current_kprobe(p
, regs
, kcb
);
255 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
257 if (p
->pre_handler
&& p
->pre_handler(p
, regs
)) {
258 /* Handler has already set things up, so skip ss setup. */
263 prepare_singlestep(p
, regs
);
264 kcb
->kprobe_status
= KPROBE_HIT_SS
;
268 preempt_enable_no_resched();
273 * Called after single-stepping. p->addr is the address of the
274 * instruction that has been replaced by the breakpoint. To avoid the
275 * SMP problems that can occur when we temporarily put back the
276 * original opcode to single-step, we single-stepped a copy of the
277 * instruction. The address of this copy is p->ainsn.insn.
279 * This function prepares to return from the post-single-step
282 static void __kprobes
resume_execution(struct kprobe
*p
,
283 struct pt_regs
*regs
,
284 struct kprobe_ctlblk
*kcb
)
286 unsigned long orig_pc
= kcb
->kprobe_saved_pc
;
287 regs
->pc
= orig_pc
+ 8;
290 static inline int post_kprobe_handler(struct pt_regs
*regs
)
292 struct kprobe
*cur
= kprobe_running();
293 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
298 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
299 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
300 cur
->post_handler(cur
, regs
, 0);
303 resume_execution(cur
, regs
, kcb
);
305 /* Restore back the original saved kprobes variables and continue. */
306 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
307 restore_previous_kprobe(kcb
);
310 reset_current_kprobe();
312 preempt_enable_no_resched();
317 static inline int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
319 struct kprobe
*cur
= kprobe_running();
320 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
322 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
325 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
327 * We are here because the instruction being single
328 * stepped caused a page fault. We reset the current
329 * kprobe and the ip points back to the probe address
330 * and allow the page fault handler to continue as a
333 resume_execution(cur
, regs
, kcb
);
334 reset_current_kprobe();
335 preempt_enable_no_resched();
341 * Wrapper routine for handling exceptions.
343 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
344 unsigned long val
, void *data
)
346 struct die_args
*args
= (struct die_args
*)data
;
347 int ret
= NOTIFY_DONE
;
351 if (kprobe_handler(args
->regs
))
355 if (post_kprobe_handler(args
->regs
))
359 /* kprobe_running() needs smp_processor_id(). */
363 && kprobe_fault_handler(args
->regs
, args
->trapnr
))
373 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
375 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
376 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
378 kcb
->jprobe_saved_regs
= *regs
;
379 kcb
->jprobe_saved_sp
= regs
->sp
;
381 memcpy(kcb
->jprobes_stack
, (void *)kcb
->jprobe_saved_sp
,
382 MIN_JPROBES_STACK_SIZE(kcb
->jprobe_saved_sp
));
384 regs
->pc
= (unsigned long)(jp
->entry
);
389 /* Defined in the inline asm below. */
390 void jprobe_return_end(void);
392 void __kprobes
jprobe_return(void)
396 ".globl jprobe_return_end\n"
397 "jprobe_return_end:\n");
400 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
402 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
404 if (regs
->pc
>= (unsigned long)jprobe_return
&&
405 regs
->pc
<= (unsigned long)jprobe_return_end
) {
406 *regs
= kcb
->jprobe_saved_regs
;
407 memcpy((void *)kcb
->jprobe_saved_sp
, kcb
->jprobes_stack
,
408 MIN_JPROBES_STACK_SIZE(kcb
->jprobe_saved_sp
));
409 preempt_enable_no_resched();
417 * Function return probe trampoline:
418 * - init_kprobes() establishes a probepoint here
419 * - When the probed function returns, this probe causes the
422 static void __used
kretprobe_trampoline_holder(void)
426 ".global kretprobe_trampoline\n"
427 "kretprobe_trampoline:\n\t"
432 void kretprobe_trampoline(void);
434 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
435 struct pt_regs
*regs
)
437 ri
->ret_addr
= (kprobe_opcode_t
*) regs
->lr
;
439 /* Replace the return addr with trampoline addr */
440 regs
->lr
= (unsigned long)kretprobe_trampoline
;
444 * Called when the probe at kretprobe trampoline is hit.
446 static int __kprobes
trampoline_probe_handler(struct kprobe
*p
,
447 struct pt_regs
*regs
)
449 struct kretprobe_instance
*ri
= NULL
;
450 struct hlist_head
*head
, empty_rp
;
451 struct hlist_node
*tmp
;
452 unsigned long flags
, orig_ret_address
= 0;
453 unsigned long trampoline_address
= (unsigned long)kretprobe_trampoline
;
455 INIT_HLIST_HEAD(&empty_rp
);
456 kretprobe_hash_lock(current
, &head
, &flags
);
459 * It is possible to have multiple instances associated with a given
460 * task either because multiple functions in the call path have
461 * a return probe installed on them, and/or more than one return
462 * return probe was registered for a target function.
464 * We can handle this because:
465 * - instances are always inserted at the head of the list
466 * - when multiple return probes are registered for the same
467 * function, the first instance's ret_addr will point to the
468 * real return address, and all the rest will point to
469 * kretprobe_trampoline
471 hlist_for_each_entry_safe(ri
, tmp
, head
, hlist
) {
472 if (ri
->task
!= current
)
473 /* another task is sharing our hash bucket */
476 if (ri
->rp
&& ri
->rp
->handler
)
477 ri
->rp
->handler(ri
, regs
);
479 orig_ret_address
= (unsigned long)ri
->ret_addr
;
480 recycle_rp_inst(ri
, &empty_rp
);
482 if (orig_ret_address
!= trampoline_address
) {
484 * This is the real return address. Any other
485 * instances associated with this task are for
486 * other calls deeper on the call stack
492 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
493 instruction_pointer(regs
) = orig_ret_address
;
495 reset_current_kprobe();
496 kretprobe_hash_unlock(current
, &flags
);
497 preempt_enable_no_resched();
499 hlist_for_each_entry_safe(ri
, tmp
, &empty_rp
, hlist
) {
500 hlist_del(&ri
->hlist
);
504 * By returning a non-zero value, we are telling
505 * kprobe_handler() that we don't want the post_handler
506 * to run (and have re-enabled preemption)
511 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
513 if (p
->addr
== (kprobe_opcode_t
*)kretprobe_trampoline
)
519 static struct kprobe trampoline_p
= {
520 .addr
= (kprobe_opcode_t
*)kretprobe_trampoline
,
521 .pre_handler
= trampoline_probe_handler
524 int __init
arch_init_kprobes(void)
526 register_kprobe(&trampoline_p
);