mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / arc / kernel / kprobes.c
blob42b05046fad9f13b3cffecc1cb041e8febed3840
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
9 #include <linux/types.h>
10 #include <linux/kprobes.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/kdebug.h>
14 #include <linux/sched.h>
15 #include <linux/uaccess.h>
16 #include <asm/cacheflush.h>
17 #include <asm/current.h>
18 #include <asm/disasm.h>
20 #define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
21 (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
23 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
24 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
26 int __kprobes arch_prepare_kprobe(struct kprobe *p)
28 /* Attempt to probe at unaligned address */
29 if ((unsigned long)p->addr & 0x01)
30 return -EINVAL;
32 /* Address should not be in exception handling code */
34 p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
35 p->opcode = *p->addr;
37 return 0;
40 void __kprobes arch_arm_kprobe(struct kprobe *p)
42 *p->addr = UNIMP_S_INSTRUCTION;
44 flush_icache_range((unsigned long)p->addr,
45 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
48 void __kprobes arch_disarm_kprobe(struct kprobe *p)
50 *p->addr = p->opcode;
52 flush_icache_range((unsigned long)p->addr,
53 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
56 void __kprobes arch_remove_kprobe(struct kprobe *p)
58 arch_disarm_kprobe(p);
60 /* Can we remove the kprobe in the middle of kprobe handling? */
61 if (p->ainsn.t1_addr) {
62 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
64 flush_icache_range((unsigned long)p->ainsn.t1_addr,
65 (unsigned long)p->ainsn.t1_addr +
66 sizeof(kprobe_opcode_t));
68 p->ainsn.t1_addr = NULL;
71 if (p->ainsn.t2_addr) {
72 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
74 flush_icache_range((unsigned long)p->ainsn.t2_addr,
75 (unsigned long)p->ainsn.t2_addr +
76 sizeof(kprobe_opcode_t));
78 p->ainsn.t2_addr = NULL;
82 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
84 kcb->prev_kprobe.kp = kprobe_running();
85 kcb->prev_kprobe.status = kcb->kprobe_status;
88 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
90 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
91 kcb->kprobe_status = kcb->prev_kprobe.status;
94 static inline void __kprobes set_current_kprobe(struct kprobe *p)
96 __this_cpu_write(current_kprobe, p);
99 static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
100 struct pt_regs *regs)
102 /* Remove the trap instructions inserted for single step and
103 * restore the original instructions
105 if (p->ainsn.t1_addr) {
106 *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
108 flush_icache_range((unsigned long)p->ainsn.t1_addr,
109 (unsigned long)p->ainsn.t1_addr +
110 sizeof(kprobe_opcode_t));
112 p->ainsn.t1_addr = NULL;
115 if (p->ainsn.t2_addr) {
116 *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
118 flush_icache_range((unsigned long)p->ainsn.t2_addr,
119 (unsigned long)p->ainsn.t2_addr +
120 sizeof(kprobe_opcode_t));
122 p->ainsn.t2_addr = NULL;
125 return;
128 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
130 unsigned long next_pc;
131 unsigned long tgt_if_br = 0;
132 int is_branch;
133 unsigned long bta;
135 /* Copy the opcode back to the kprobe location and execute the
136 * instruction. Because of this we will not be able to get into the
137 * same kprobe until this kprobe is done
139 *(p->addr) = p->opcode;
141 flush_icache_range((unsigned long)p->addr,
142 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
144 /* Now we insert the trap at the next location after this instruction to
145 * single step. If it is a branch we insert the trap at possible branch
146 * targets
149 bta = regs->bta;
151 if (regs->status32 & 0x40) {
152 /* We are in a delay slot with the branch taken */
154 next_pc = bta & ~0x01;
156 if (!p->ainsn.is_short) {
157 if (bta & 0x01)
158 regs->blink += 2;
159 else {
160 /* Branch not taken */
161 next_pc += 2;
163 /* next pc is taken from bta after executing the
164 * delay slot instruction
166 regs->bta += 2;
170 is_branch = 0;
171 } else
172 is_branch =
173 disasm_next_pc((unsigned long)p->addr, regs,
174 (struct callee_regs *) current->thread.callee_reg,
175 &next_pc, &tgt_if_br);
177 p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
178 p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
179 *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
181 flush_icache_range((unsigned long)p->ainsn.t1_addr,
182 (unsigned long)p->ainsn.t1_addr +
183 sizeof(kprobe_opcode_t));
185 if (is_branch) {
186 p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
187 p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
188 *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
190 flush_icache_range((unsigned long)p->ainsn.t2_addr,
191 (unsigned long)p->ainsn.t2_addr +
192 sizeof(kprobe_opcode_t));
196 int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
198 struct kprobe *p;
199 struct kprobe_ctlblk *kcb;
201 preempt_disable();
203 kcb = get_kprobe_ctlblk();
204 p = get_kprobe((unsigned long *)addr);
206 if (p) {
208 * We have reentered the kprobe_handler, since another kprobe
209 * was hit while within the handler, we save the original
210 * kprobes and single step on the instruction of the new probe
211 * without calling any user handlers to avoid recursive
212 * kprobes.
214 if (kprobe_running()) {
215 save_previous_kprobe(kcb);
216 set_current_kprobe(p);
217 kprobes_inc_nmissed_count(p);
218 setup_singlestep(p, regs);
219 kcb->kprobe_status = KPROBE_REENTER;
220 return 1;
223 set_current_kprobe(p);
224 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
226 /* If we have no pre-handler or it returned 0, we continue with
227 * normal processing. If we have a pre-handler and it returned
228 * non-zero - which is expected from setjmp_pre_handler for
229 * jprobe, we return without single stepping and leave that to
230 * the break-handler which is invoked by a kprobe from
231 * jprobe_return
233 if (!p->pre_handler || !p->pre_handler(p, regs)) {
234 setup_singlestep(p, regs);
235 kcb->kprobe_status = KPROBE_HIT_SS;
238 return 1;
239 } else if (kprobe_running()) {
240 p = __this_cpu_read(current_kprobe);
241 if (p->break_handler && p->break_handler(p, regs)) {
242 setup_singlestep(p, regs);
243 kcb->kprobe_status = KPROBE_HIT_SS;
244 return 1;
248 /* no_kprobe: */
249 preempt_enable_no_resched();
250 return 0;
253 static int __kprobes arc_post_kprobe_handler(unsigned long addr,
254 struct pt_regs *regs)
256 struct kprobe *cur = kprobe_running();
257 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
259 if (!cur)
260 return 0;
262 resume_execution(cur, addr, regs);
264 /* Rearm the kprobe */
265 arch_arm_kprobe(cur);
268 * When we return from trap instruction we go to the next instruction
269 * We restored the actual instruction in resume_exectuiont and we to
270 * return to the same address and execute it
272 regs->ret = addr;
274 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
275 kcb->kprobe_status = KPROBE_HIT_SSDONE;
276 cur->post_handler(cur, regs, 0);
279 if (kcb->kprobe_status == KPROBE_REENTER) {
280 restore_previous_kprobe(kcb);
281 goto out;
284 reset_current_kprobe();
286 out:
287 preempt_enable_no_resched();
288 return 1;
292 * Fault can be for the instruction being single stepped or for the
293 * pre/post handlers in the module.
294 * This is applicable for applications like user probes, where we have the
295 * probe in user space and the handlers in the kernel
298 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
300 struct kprobe *cur = kprobe_running();
301 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
303 switch (kcb->kprobe_status) {
304 case KPROBE_HIT_SS:
305 case KPROBE_REENTER:
307 * We are here because the instruction being single stepped
308 * caused the fault. We reset the current kprobe and allow the
309 * exception handler as if it is regular exception. In our
310 * case it doesn't matter because the system will be halted
312 resume_execution(cur, (unsigned long)cur->addr, regs);
314 if (kcb->kprobe_status == KPROBE_REENTER)
315 restore_previous_kprobe(kcb);
316 else
317 reset_current_kprobe();
319 preempt_enable_no_resched();
320 break;
322 case KPROBE_HIT_ACTIVE:
323 case KPROBE_HIT_SSDONE:
325 * We are here because the instructions in the pre/post handler
326 * caused the fault.
329 /* We increment the nmissed count for accounting,
330 * we can also use npre/npostfault count for accounting
331 * these specific fault cases.
333 kprobes_inc_nmissed_count(cur);
336 * We come here because instructions in the pre/post
337 * handler caused the page_fault, this could happen
338 * if handler tries to access user space by
339 * copy_from_user(), get_user() etc. Let the
340 * user-specified handler try to fix it first.
342 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
343 return 1;
346 * In case the user-specified fault handler returned zero,
347 * try to fix up.
349 if (fixup_exception(regs))
350 return 1;
353 * fixup_exception() could not handle it,
354 * Let do_page_fault() fix it.
356 break;
358 default:
359 break;
361 return 0;
364 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
365 unsigned long val, void *data)
367 struct die_args *args = data;
368 unsigned long addr = args->err;
369 int ret = NOTIFY_DONE;
371 switch (val) {
372 case DIE_IERR:
373 if (arc_kprobe_handler(addr, args->regs))
374 return NOTIFY_STOP;
375 break;
377 case DIE_TRAP:
378 if (arc_post_kprobe_handler(addr, args->regs))
379 return NOTIFY_STOP;
380 break;
382 default:
383 break;
386 return ret;
389 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
391 struct jprobe *jp = container_of(p, struct jprobe, kp);
392 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
393 unsigned long sp_addr = regs->sp;
395 kcb->jprobe_saved_regs = *regs;
396 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
397 regs->ret = (unsigned long)(jp->entry);
399 return 1;
402 void __kprobes jprobe_return(void)
404 __asm__ __volatile__("unimp_s");
405 return;
408 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
410 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
411 unsigned long sp_addr;
413 *regs = kcb->jprobe_saved_regs;
414 sp_addr = regs->sp;
415 memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
416 preempt_enable_no_resched();
418 return 1;
421 static void __used kretprobe_trampoline_holder(void)
423 __asm__ __volatile__(".global kretprobe_trampoline\n"
424 "kretprobe_trampoline:\n" "nop\n");
427 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
428 struct pt_regs *regs)
431 ri->ret_addr = (kprobe_opcode_t *) regs->blink;
433 /* Replace the return addr with trampoline addr */
434 regs->blink = (unsigned long)&kretprobe_trampoline;
437 static int __kprobes trampoline_probe_handler(struct kprobe *p,
438 struct pt_regs *regs)
440 struct kretprobe_instance *ri = NULL;
441 struct hlist_head *head, empty_rp;
442 struct hlist_node *tmp;
443 unsigned long flags, orig_ret_address = 0;
444 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
446 INIT_HLIST_HEAD(&empty_rp);
447 kretprobe_hash_lock(current, &head, &flags);
450 * It is possible to have multiple instances associated with a given
451 * task either because an multiple functions in the call path
452 * have a return probe installed on them, and/or more than one return
453 * return probe was registered for a target function.
455 * We can handle this because:
456 * - instances are always inserted at the head of the list
457 * - when multiple return probes are registered for the same
458 * function, the first instance's ret_addr will point to the
459 * real return address, and all the rest will point to
460 * kretprobe_trampoline
462 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
463 if (ri->task != current)
464 /* another task is sharing our hash bucket */
465 continue;
467 if (ri->rp && ri->rp->handler)
468 ri->rp->handler(ri, regs);
470 orig_ret_address = (unsigned long)ri->ret_addr;
471 recycle_rp_inst(ri, &empty_rp);
473 if (orig_ret_address != trampoline_address) {
475 * This is the real return address. Any other
476 * instances associated with this task are for
477 * other calls deeper on the call stack
479 break;
483 kretprobe_assert(ri, orig_ret_address, trampoline_address);
484 regs->ret = orig_ret_address;
486 reset_current_kprobe();
487 kretprobe_hash_unlock(current, &flags);
488 preempt_enable_no_resched();
490 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
491 hlist_del(&ri->hlist);
492 kfree(ri);
495 /* By returning a non zero value, we are telling the kprobe handler
496 * that we don't want the post_handler to run
498 return 1;
501 static struct kprobe trampoline_p = {
502 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
503 .pre_handler = trampoline_probe_handler
506 int __init arch_init_kprobes(void)
508 /* Registering the trampoline code for the kret probe */
509 return register_kprobe(&trampoline_p);
512 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
514 if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
515 return 1;
517 return 0;
520 void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
522 notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);