2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
29 #include <linux/config.h>
30 #include <linux/kprobes.h>
31 #include <linux/ptrace.h>
32 #include <linux/preempt.h>
33 #include <asm/cacheflush.h>
34 #include <asm/kdebug.h>
35 #include <asm/sstep.h>
37 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
38 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
40 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
43 kprobe_opcode_t insn
= *p
->addr
;
45 if ((unsigned long)p
->addr
& 0x03) {
46 printk("Attempt to register kprobe at an unaligned address\n");
48 } else if (IS_MTMSRD(insn
) || IS_RFID(insn
)) {
49 printk("Cannot register a kprobe on rfid or mtmsrd\n");
53 /* insn must be on a special executable page on ppc64 */
55 p
->ainsn
.insn
= get_insn_slot();
61 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
68 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
70 *p
->addr
= BREAKPOINT_INSTRUCTION
;
71 flush_icache_range((unsigned long) p
->addr
,
72 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
75 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
78 flush_icache_range((unsigned long) p
->addr
,
79 (unsigned long) p
->addr
+ sizeof(kprobe_opcode_t
));
82 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
85 free_insn_slot(p
->ainsn
.insn
);
89 static inline void prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
91 kprobe_opcode_t insn
= *p
->ainsn
.insn
;
95 /* single step inline if it is a trap variant */
97 regs
->nip
= (unsigned long)p
->addr
;
99 regs
->nip
= (unsigned long)p
->ainsn
.insn
;
102 static inline void save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
104 kcb
->prev_kprobe
.kp
= kprobe_running();
105 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
106 kcb
->prev_kprobe
.saved_msr
= kcb
->kprobe_saved_msr
;
109 static inline void restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
111 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
112 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
113 kcb
->kprobe_saved_msr
= kcb
->prev_kprobe
.saved_msr
;
116 static inline void set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
117 struct kprobe_ctlblk
*kcb
)
119 __get_cpu_var(current_kprobe
) = p
;
120 kcb
->kprobe_saved_msr
= regs
->msr
;
123 /* Called with kretprobe_lock held */
124 void __kprobes
arch_prepare_kretprobe(struct kretprobe
*rp
,
125 struct pt_regs
*regs
)
127 struct kretprobe_instance
*ri
;
129 if ((ri
= get_free_rp_inst(rp
)) != NULL
) {
132 ri
->ret_addr
= (kprobe_opcode_t
*)regs
->link
;
134 /* Replace the return addr with trampoline addr */
135 regs
->link
= (unsigned long)kretprobe_trampoline
;
142 static inline int kprobe_handler(struct pt_regs
*regs
)
146 unsigned int *addr
= (unsigned int *)regs
->nip
;
147 struct kprobe_ctlblk
*kcb
;
150 * We don't want to be preempted for the entire
151 * duration of kprobe processing
154 kcb
= get_kprobe_ctlblk();
156 /* Check we're not actually recursing */
157 if (kprobe_running()) {
158 p
= get_kprobe(addr
);
160 kprobe_opcode_t insn
= *p
->ainsn
.insn
;
161 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
163 regs
->msr
&= ~MSR_SE
;
164 regs
->msr
|= kcb
->kprobe_saved_msr
;
167 /* We have reentered the kprobe_handler(), since
168 * another probe was hit while within the handler.
169 * We here save the original kprobes variables and
170 * just single step on the instruction of the new probe
171 * without calling any user handlers.
173 save_previous_kprobe(kcb
);
174 set_current_kprobe(p
, regs
, kcb
);
175 kcb
->kprobe_saved_msr
= regs
->msr
;
176 kprobes_inc_nmissed_count(p
);
177 prepare_singlestep(p
, regs
);
178 kcb
->kprobe_status
= KPROBE_REENTER
;
181 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
182 /* If trap variant, then it belongs not to us */
183 kprobe_opcode_t cur_insn
= *addr
;
184 if (is_trap(cur_insn
))
186 /* The breakpoint instruction was removed by
187 * another cpu right after we hit, no further
188 * handling of this interrupt is appropriate
193 p
= __get_cpu_var(current_kprobe
);
194 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
201 p
= get_kprobe(addr
);
203 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
205 * PowerPC has multiple variants of the "trap"
206 * instruction. If the current instruction is a
207 * trap variant, it could belong to someone else
209 kprobe_opcode_t cur_insn
= *addr
;
210 if (is_trap(cur_insn
))
213 * The breakpoint instruction was removed right
214 * after we hit it. Another cpu has removed
215 * either a probepoint or a debugger breakpoint
216 * at this address. In either case, no further
217 * handling of this interrupt is appropriate.
221 /* Not one of ours: let kernel handle it */
225 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
226 set_current_kprobe(p
, regs
, kcb
);
227 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
228 /* handler has already set things up, so skip ss setup */
232 prepare_singlestep(p
, regs
);
233 kcb
->kprobe_status
= KPROBE_HIT_SS
;
237 preempt_enable_no_resched();
242 * Function return probe trampoline:
243 * - init_kprobes() establishes a probepoint here
244 * - When the probed function returns, this probe
245 * causes the handlers to fire
247 void kretprobe_trampoline_holder(void)
249 asm volatile(".global kretprobe_trampoline\n"
250 "kretprobe_trampoline:\n"
255 * Called when the probe at kretprobe trampoline is hit
257 int __kprobes
trampoline_probe_handler(struct kprobe
*p
, struct pt_regs
*regs
)
259 struct kretprobe_instance
*ri
= NULL
;
260 struct hlist_head
*head
;
261 struct hlist_node
*node
, *tmp
;
262 unsigned long flags
, orig_ret_address
= 0;
263 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
265 spin_lock_irqsave(&kretprobe_lock
, flags
);
266 head
= kretprobe_inst_table_head(current
);
269 * It is possible to have multiple instances associated with a given
270 * task either because an multiple functions in the call path
271 * have a return probe installed on them, and/or more then one return
272 * return probe was registered for a target function.
274 * We can handle this because:
275 * - instances are always inserted at the head of the list
276 * - when multiple return probes are registered for the same
277 * function, the first instance's ret_addr will point to the
278 * real return address, and all the rest will point to
279 * kretprobe_trampoline
281 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
282 if (ri
->task
!= current
)
283 /* another task is sharing our hash bucket */
286 if (ri
->rp
&& ri
->rp
->handler
)
287 ri
->rp
->handler(ri
, regs
);
289 orig_ret_address
= (unsigned long)ri
->ret_addr
;
292 if (orig_ret_address
!= trampoline_address
)
294 * This is the real return address. Any other
295 * instances associated with this task are for
296 * other calls deeper on the call stack
301 BUG_ON(!orig_ret_address
|| (orig_ret_address
== trampoline_address
));
302 regs
->nip
= orig_ret_address
;
304 reset_current_kprobe();
305 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
306 preempt_enable_no_resched();
309 * By returning a non-zero value, we are telling
310 * kprobe_handler() that we don't want the post_handler
311 * to run (and have re-enabled preemption)
317 * Called after single-stepping. p->addr is the address of the
318 * instruction whose first byte has been replaced by the "breakpoint"
319 * instruction. To avoid the SMP problems that can occur when we
320 * temporarily put back the original opcode to single-step, we
321 * single-stepped a copy of the instruction. The address of this
322 * copy is p->ainsn.insn.
324 static void __kprobes
resume_execution(struct kprobe
*p
, struct pt_regs
*regs
)
327 unsigned int insn
= *p
->ainsn
.insn
;
329 regs
->nip
= (unsigned long)p
->addr
;
330 ret
= emulate_step(regs
, insn
);
332 regs
->nip
= (unsigned long)p
->addr
+ 4;
335 static inline int post_kprobe_handler(struct pt_regs
*regs
)
337 struct kprobe
*cur
= kprobe_running();
338 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
343 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
344 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
345 cur
->post_handler(cur
, regs
, 0);
348 resume_execution(cur
, regs
);
349 regs
->msr
|= kcb
->kprobe_saved_msr
;
351 /*Restore back the original saved kprobes variables and continue. */
352 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
353 restore_previous_kprobe(kcb
);
356 reset_current_kprobe();
358 preempt_enable_no_resched();
361 * if somebody else is singlestepping across a probe point, msr
362 * will have SE set, in which case, continue the remaining processing
363 * of do_debug, as if this is not a probe hit.
365 if (regs
->msr
& MSR_SE
)
371 static inline int kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
373 struct kprobe
*cur
= kprobe_running();
374 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
376 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
379 if (kcb
->kprobe_status
& KPROBE_HIT_SS
) {
380 resume_execution(cur
, regs
);
381 regs
->msr
&= ~MSR_SE
;
382 regs
->msr
|= kcb
->kprobe_saved_msr
;
384 reset_current_kprobe();
385 preempt_enable_no_resched();
391 * Wrapper routine to for handling exceptions.
393 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
394 unsigned long val
, void *data
)
396 struct die_args
*args
= (struct die_args
*)data
;
397 int ret
= NOTIFY_DONE
;
401 if (kprobe_handler(args
->regs
))
405 if (post_kprobe_handler(args
->regs
))
409 /* kprobe_running() needs smp_processor_id() */
411 if (kprobe_running() &&
412 kprobe_fault_handler(args
->regs
, args
->trapnr
))
422 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
424 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
425 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
427 memcpy(&kcb
->jprobe_saved_regs
, regs
, sizeof(struct pt_regs
));
429 /* setup return addr to the jprobe handler routine */
430 regs
->nip
= (unsigned long)(((func_descr_t
*)jp
->entry
)->entry
);
431 regs
->gpr
[2] = (unsigned long)(((func_descr_t
*)jp
->entry
)->toc
);
436 void __kprobes
jprobe_return(void)
438 asm volatile("trap" ::: "memory");
441 void __kprobes
jprobe_return_end(void)
445 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
447 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
450 * FIXME - we should ideally be validating that we got here 'cos
451 * of the "trap" in jprobe_return() above, before restoring the
454 memcpy(regs
, &kcb
->jprobe_saved_regs
, sizeof(struct pt_regs
));
455 preempt_enable_no_resched();
459 static struct kprobe trampoline_p
= {
460 .addr
= (kprobe_opcode_t
*) &kretprobe_trampoline
,
461 .pre_handler
= trampoline_probe_handler
464 int __init
arch_init_kprobes(void)
466 return register_kprobe(&trampoline_p
);