MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / arch / m32r / kernel / ptrace.c
blobab4137a3398a0a8eee7866a0dce404673877cfab
1 /*
2 * linux/arch/m32r/kernel/ptrace.c
4 * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
5 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
7 * Original x86 implementation:
8 * By Ross Biro 1/23/92
9 * edited by Linus Torvalds
11 * Some code taken from sh version:
12 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
13 * Some code taken from arm version:
14 * Copyright (C) 2000 Russell King
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/errno.h>
24 #include <linux/ptrace.h>
25 #include <linux/user.h>
26 #include <linux/string.h>
28 #include <asm/cacheflush.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/system.h>
33 #include <asm/processor.h>
34 #include <asm/mmu_context.h>
37 * Get the address of the live pt_regs for the specified task.
38 * These are saved onto the top kernel stack when the process
39 * is not running.
41 * Note: if a user thread is execve'd from kernel space, the
42 * kernel stack will not be empty on entry to the kernel, so
43 * ptracing these tasks will fail.
45 static inline struct pt_regs *
46 get_user_regs(struct task_struct *task)
48 return (struct pt_regs *)
49 ((unsigned long)task->thread_info + THREAD_SIZE
50 - sizeof(struct pt_regs));
54 * This routine will get a word off of the process kernel stack.
56 static inline unsigned long int
57 get_stack_long(struct task_struct *task, int offset)
59 unsigned long *stack;
61 stack = (unsigned long *)get_user_regs(task);
63 return stack[offset];
67 * This routine will put a word on the process kernel stack.
69 static inline int
70 put_stack_long(struct task_struct *task, int offset, unsigned long data)
72 unsigned long *stack;
74 stack = (unsigned long *)get_user_regs(task);
75 stack[offset] = data;
77 return 0;
80 static int reg_offset[] = {
81 PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
82 PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
86 * Read the word at offset "off" into the "struct user". We
87 * actually access the pt_regs stored on the kernel stack.
89 static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
90 unsigned long __user *data)
92 unsigned long tmp;
93 #ifndef NO_FPU
94 struct user * dummy = NULL;
95 #endif
97 if ((off & 3) || (off < 0) || (off > sizeof(struct user) - 3))
98 return -EIO;
100 off >>= 2;
101 switch (off) {
102 case PT_EVB:
103 __asm__ __volatile__ (
104 "mvfc %0, cr5 \n\t"
105 : "=r" (tmp)
107 break;
108 case PT_CBR: {
109 unsigned long psw;
110 psw = get_stack_long(tsk, PT_PSW);
111 tmp = ((psw >> 8) & 1);
113 break;
114 case PT_PSW: {
115 unsigned long psw, bbpsw;
116 psw = get_stack_long(tsk, PT_PSW);
117 bbpsw = get_stack_long(tsk, PT_BBPSW);
118 tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
120 break;
121 case PT_PC:
122 tmp = get_stack_long(tsk, PT_BPC);
123 break;
124 case PT_BPC:
125 off = PT_BBPC;
126 /* fall through */
127 default:
128 if (off < (sizeof(struct pt_regs) >> 2))
129 tmp = get_stack_long(tsk, off);
130 #ifndef NO_FPU
131 else if (off >= (long)(&dummy->fpu >> 2) &&
132 off < (long)(&dummy->u_fpvalid >> 2)) {
133 if (!tsk->used_math) {
134 if (off == (long)(&dummy->fpu.fpscr >> 2))
135 tmp = FPSCR_INIT;
136 else
137 tmp = 0;
138 } else
139 tmp = ((long *)(&tsk->thread.fpu >> 2))
140 [off - (long)&dummy->fpu];
141 } else if (off == (long)(&dummy->u_fpvalid >> 2))
142 tmp = tsk->used_math;
143 #endif /* not NO_FPU */
144 else
145 tmp = 0;
148 return put_user(tmp, data);
151 static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
152 unsigned long data)
154 int ret = -EIO;
155 #ifndef NO_FPU
156 struct user * dummy = NULL;
157 #endif
159 if ((off & 3) || off < 0 ||
160 off > sizeof(struct user) - 3)
161 return -EIO;
163 off >>= 2;
164 switch (off) {
165 case PT_EVB:
166 case PT_BPC:
167 case PT_SPI:
168 /* We don't allow to modify evb. */
169 ret = 0;
170 break;
171 case PT_PSW:
172 case PT_CBR: {
173 /* We allow to modify only cbr in psw */
174 unsigned long psw;
175 psw = get_stack_long(tsk, PT_PSW);
176 psw = (psw & ~0x100) | ((data & 1) << 8);
177 ret = put_stack_long(tsk, PT_PSW, psw);
179 break;
180 case PT_PC:
181 off = PT_BPC;
182 data &= ~1;
183 /* fall through */
184 default:
185 if (off < (sizeof(struct pt_regs) >> 2))
186 ret = put_stack_long(tsk, off, data);
187 #ifndef NO_FPU
188 else if (off >= (long)(&dummy->fpu >> 2) &&
189 off < (long)(&dummy->u_fpvalid >> 2)) {
190 tsk->used_math = 1;
191 ((long *)&tsk->thread.fpu)
192 [off - (long)&dummy->fpu] = data;
193 ret = 0;
194 } else if (off == (long)(&dummy->u_fpvalid >> 2)) {
195 tsk->used_math = data ? 1 : 0;
196 ret = 0;
198 #endif /* not NO_FPU */
199 break;
202 return ret;
206 * Get all user integer registers.
208 static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
210 struct pt_regs *regs = get_user_regs(tsk);
212 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
216 * Set all user integer registers.
218 static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
220 struct pt_regs newregs;
221 int ret;
223 ret = -EFAULT;
224 if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
225 struct pt_regs *regs = get_user_regs(tsk);
226 *regs = newregs;
227 ret = 0;
230 return ret;
234 static inline int
235 check_condition_bit(struct task_struct *child)
237 return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
240 static int
241 check_condition_src(unsigned long op, unsigned long regno1,
242 unsigned long regno2, struct task_struct *child)
244 unsigned long reg1, reg2;
246 reg2 = get_stack_long(child, reg_offset[regno2]);
248 switch (op) {
249 case 0x0: /* BEQ */
250 reg1 = get_stack_long(child, reg_offset[regno1]);
251 return reg1 == reg2;
252 case 0x1: /* BNE */
253 reg1 = get_stack_long(child, reg_offset[regno1]);
254 return reg1 != reg2;
255 case 0x8: /* BEQZ */
256 return reg2 == 0;
257 case 0x9: /* BNEZ */
258 return reg2 != 0;
259 case 0xa: /* BLTZ */
260 return (int)reg2 < 0;
261 case 0xb: /* BGEZ */
262 return (int)reg2 >= 0;
263 case 0xc: /* BLEZ */
264 return (int)reg2 <= 0;
265 case 0xd: /* BGTZ */
266 return (int)reg2 > 0;
267 default:
268 /* never reached */
269 return 0;
273 static void
274 compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
275 unsigned long *next_pc,
276 struct task_struct *child)
278 unsigned long op, op2, op3;
279 unsigned long disp;
280 unsigned long regno;
281 int parallel = 0;
283 if (insn & 0x00008000)
284 parallel = 1;
285 if (pc & 3)
286 insn &= 0x7fff; /* right slot */
287 else
288 insn >>= 16; /* left slot */
290 op = (insn >> 12) & 0xf;
291 op2 = (insn >> 8) & 0xf;
292 op3 = (insn >> 4) & 0xf;
294 if (op == 0x7) {
295 switch (op2) {
296 case 0xd: /* BNC */
297 case 0x9: /* BNCL */
298 if (!check_condition_bit(child)) {
299 disp = (long)(insn << 24) >> 22;
300 *next_pc = (pc & ~0x3) + disp;
301 return;
303 break;
304 case 0x8: /* BCL */
305 case 0xc: /* BC */
306 if (check_condition_bit(child)) {
307 disp = (long)(insn << 24) >> 22;
308 *next_pc = (pc & ~0x3) + disp;
309 return;
311 break;
312 case 0xe: /* BL */
313 case 0xf: /* BRA */
314 disp = (long)(insn << 24) >> 22;
315 *next_pc = (pc & ~0x3) + disp;
316 return;
317 break;
319 } else if (op == 0x1) {
320 switch (op2) {
321 case 0x0:
322 if (op3 == 0xf) { /* TRAP */
323 #if 1
324 /* pass through */
325 #else
326 /* kernel space is not allowed as next_pc */
327 unsigned long evb;
328 unsigned long trapno;
329 trapno = insn & 0xf;
330 __asm__ __volatile__ (
331 "mvfc %0, cr5\n"
332 :"=r"(evb)
335 *next_pc = evb + (trapno << 2);
336 return;
337 #endif
338 } else if (op3 == 0xd) { /* RTE */
339 *next_pc = get_stack_long(child, PT_BPC);
340 return;
342 break;
343 case 0xc: /* JC */
344 if (op3 == 0xc && check_condition_bit(child)) {
345 regno = insn & 0xf;
346 *next_pc = get_stack_long(child,
347 reg_offset[regno]);
348 return;
350 break;
351 case 0xd: /* JNC */
352 if (op3 == 0xc && !check_condition_bit(child)) {
353 regno = insn & 0xf;
354 *next_pc = get_stack_long(child,
355 reg_offset[regno]);
356 return;
358 break;
359 case 0xe: /* JL */
360 case 0xf: /* JMP */
361 if (op3 == 0xc) { /* JMP */
362 regno = insn & 0xf;
363 *next_pc = get_stack_long(child,
364 reg_offset[regno]);
365 return;
367 break;
370 if (parallel)
371 *next_pc = pc + 4;
372 else
373 *next_pc = pc + 2;
376 static void
377 compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
378 unsigned long *next_pc,
379 struct task_struct *child)
381 unsigned long op;
382 unsigned long op2;
383 unsigned long disp;
384 unsigned long regno1, regno2;
386 op = (insn >> 28) & 0xf;
387 if (op == 0xf) { /* branch 24-bit relative */
388 op2 = (insn >> 24) & 0xf;
389 switch (op2) {
390 case 0xd: /* BNC */
391 case 0x9: /* BNCL */
392 if (!check_condition_bit(child)) {
393 disp = (long)(insn << 8) >> 6;
394 *next_pc = (pc & ~0x3) + disp;
395 return;
397 break;
398 case 0x8: /* BCL */
399 case 0xc: /* BC */
400 if (check_condition_bit(child)) {
401 disp = (long)(insn << 8) >> 6;
402 *next_pc = (pc & ~0x3) + disp;
403 return;
405 break;
406 case 0xe: /* BL */
407 case 0xf: /* BRA */
408 disp = (long)(insn << 8) >> 6;
409 *next_pc = (pc & ~0x3) + disp;
410 return;
412 } else if (op == 0xb) { /* branch 16-bit relative */
413 op2 = (insn >> 20) & 0xf;
414 switch (op2) {
415 case 0x0: /* BEQ */
416 case 0x1: /* BNE */
417 case 0x8: /* BEQZ */
418 case 0x9: /* BNEZ */
419 case 0xa: /* BLTZ */
420 case 0xb: /* BGEZ */
421 case 0xc: /* BLEZ */
422 case 0xd: /* BGTZ */
423 regno1 = ((insn >> 24) & 0xf);
424 regno2 = ((insn >> 16) & 0xf);
425 if (check_condition_src(op2, regno1, regno2, child)) {
426 disp = (long)(insn << 16) >> 14;
427 *next_pc = (pc & ~0x3) + disp;
428 return;
430 break;
433 *next_pc = pc + 4;
436 static inline void
437 compute_next_pc(unsigned long insn, unsigned long pc,
438 unsigned long *next_pc, struct task_struct *child)
440 if (insn & 0x80000000)
441 compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
442 else
443 compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
446 static int
447 register_debug_trap(struct task_struct *child, unsigned long next_pc,
448 unsigned long next_insn, unsigned long *code)
450 struct debug_trap *p = &child->thread.debug_trap;
451 unsigned long addr = next_pc & ~3;
453 if (p->nr_trap != 0) {
454 printk("kernel BUG at %s %d: p->nr_trap = %d\n",
455 __FILE__, __LINE__, p->nr_trap);
456 return -1;
458 p->addr = addr;
459 p->insn = next_insn;
460 p->nr_trap++;
461 if (next_pc & 3) {
462 *code = (next_insn & 0xffff0000) | 0x10f1;
463 /* xxx --> TRAP1 */
464 } else {
465 if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
466 *code = 0x10f17000;
467 /* TRAP1 --> NOP */
468 } else {
469 *code = (next_insn & 0xffff) | 0x10f10000;
470 /* TRAP1 --> xxx */
473 return 0;
476 int withdraw_debug_trap_for_signal(struct task_struct *child)
478 struct debug_trap *p = &child->thread.debug_trap;
479 int nr_trap = p->nr_trap;
481 if (nr_trap) {
482 access_process_vm(child, p->addr, &p->insn, sizeof(p->insn), 1);
483 p->nr_trap = 0;
484 p->addr = 0;
485 p->insn = 0;
487 return nr_trap;
490 static int
491 unregister_debug_trap(struct task_struct *child, unsigned long addr,
492 unsigned long *code)
494 struct debug_trap *p = &child->thread.debug_trap;
496 if (p->nr_trap != 1 || p->addr != addr) {
497 /* The trap may be requested from debugger.
498 * ptrace should do nothing in this case.
500 return 0;
502 *code = p->insn;
503 p->insn = 0;
504 p->addr = 0;
505 p->nr_trap--;
506 return 1;
509 static void
510 unregister_all_debug_traps(struct task_struct *child)
512 struct debug_trap *p = &child->thread.debug_trap;
514 if (p->nr_trap) {
515 access_process_vm(child, p->addr, &p->insn, sizeof(p->insn), 1);
516 p->addr = 0;
517 p->insn = 0;
518 p->nr_trap = 0;
522 static inline void
523 invalidate_cache(void)
525 #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
527 _flush_cache_copyback_all();
529 #else /* ! CONFIG_CHIP_M32700 */
531 /* Invalidate cache */
532 __asm__ __volatile__ (
533 "ldi r0, #-1 \n\t"
534 "ldi r1, #0 \n\t"
535 "stb r1, @r0 ; cache off \n\t"
536 "; \n\t"
537 "ldi r0, #-2 \n\t"
538 "ldi r1, #1 \n\t"
539 "stb r1, @r0 ; cache invalidate \n\t"
540 ".fillinsn \n"
541 "0: \n\t"
542 "ldb r1, @r0 ; invalidate check \n\t"
543 "bnez r1, 0b \n\t"
544 "; \n\t"
545 "ldi r0, #-1 \n\t"
546 "ldi r1, #1 \n\t"
547 "stb r1, @r0 ; cache on \n\t"
548 : : : "r0", "r1", "memory"
550 /* FIXME: copying-back d-cache and invalidating i-cache are needed.
552 #endif /* CONFIG_CHIP_M32700 */
555 /* Embed a debug trap (TRAP1) code */
556 static int
557 embed_debug_trap(struct task_struct *child, unsigned long next_pc)
559 unsigned long next_insn, code;
560 unsigned long addr = next_pc & ~3;
562 if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
563 != sizeof(next_insn)) {
564 return -1; /* error */
567 /* Set a trap code. */
568 if (register_debug_trap(child, next_pc, next_insn, &code)) {
569 return -1; /* error */
571 if (access_process_vm(child, addr, &code, sizeof(code), 1)
572 != sizeof(code)) {
573 return -1; /* error */
575 return 0; /* success */
578 void
579 embed_debug_trap_for_signal(struct task_struct *child)
581 unsigned long next_pc;
582 unsigned long pc, insn;
583 int ret;
585 pc = get_stack_long(child, PT_BPC);
586 ret = access_process_vm(child, pc&~3, &insn, sizeof(insn), 0);
587 if (ret != sizeof(insn)) {
588 printk("kernel BUG at %s %d: access_process_vm returns %d\n",
589 __FILE__, __LINE__, ret);
590 return;
592 compute_next_pc(insn, pc, &next_pc, child);
593 if (next_pc & 0x80000000) {
594 printk("kernel BUG at %s %d: next_pc = 0x%08x\n",
595 __FILE__, __LINE__, (int)next_pc);
596 return;
598 if (embed_debug_trap(child, next_pc)) {
599 printk("kernel BUG at %s %d: embed_debug_trap error\n",
600 __FILE__, __LINE__);
601 return;
603 invalidate_cache();
606 void
607 withdraw_debug_trap(struct pt_regs *regs)
609 unsigned long addr;
610 unsigned long code;
612 addr = (regs->bpc - 2) & ~3;
613 regs->bpc -= 2;
614 if (unregister_debug_trap(current, addr, &code)) {
615 access_process_vm(current, addr, &code, sizeof(code), 1);
616 invalidate_cache();
620 static void
621 init_debug_traps(struct task_struct *child)
623 struct debug_trap *p = &child->thread.debug_trap;
624 p->nr_trap = 0;
625 p->addr = 0;
626 p->insn = 0;
631 * Called by kernel/ptrace.c when detaching..
633 * Make sure single step bits etc are not set.
635 void ptrace_disable(struct task_struct *child)
637 /* nothing to do.. */
640 static int
641 do_ptrace(long request, struct task_struct *child, long addr, long data)
643 unsigned long tmp;
644 int ret;
646 switch (request) {
648 * read word at location "addr" in the child process.
650 case PTRACE_PEEKTEXT:
651 case PTRACE_PEEKDATA:
652 ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
653 if (ret == sizeof(tmp))
654 ret = put_user(tmp,(unsigned long __user *) data);
655 else
656 ret = -EIO;
657 break;
660 * read the word at location addr in the USER area.
662 case PTRACE_PEEKUSR:
663 ret = ptrace_read_user(child, addr,
664 (unsigned long __user *)data);
665 break;
668 * write the word at location addr.
670 case PTRACE_POKETEXT:
671 case PTRACE_POKEDATA:
672 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
673 if (ret == sizeof(data)) {
674 ret = 0;
675 if (request == PTRACE_POKETEXT) {
676 invalidate_cache();
678 } else {
679 ret = -EIO;
681 break;
684 * write the word at location addr in the USER area.
686 case PTRACE_POKEUSR:
687 ret = ptrace_write_user(child, addr, data);
688 break;
691 * continue/restart and stop at next (return from) syscall
693 case PTRACE_SYSCALL:
694 case PTRACE_CONT:
695 ret = -EIO;
696 if ((unsigned long) data > _NSIG)
697 break;
698 if (request == PTRACE_SYSCALL)
699 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
700 else
701 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
702 child->exit_code = data;
703 wake_up_process(child);
704 ret = 0;
705 break;
708 * make the child exit. Best I can do is send it a sigkill.
709 * perhaps it should be put in the status that it wants to
710 * exit.
712 case PTRACE_KILL: {
713 ret = 0;
714 unregister_all_debug_traps(child);
715 invalidate_cache();
716 if (child->state == TASK_ZOMBIE) /* already dead */
717 break;
718 child->exit_code = SIGKILL;
719 wake_up_process(child);
720 break;
724 * execute single instruction.
726 case PTRACE_SINGLESTEP: {
727 unsigned long next_pc;
728 unsigned long pc, insn;
730 ret = -EIO;
731 if ((unsigned long) data > _NSIG)
732 break;
733 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
734 if ((child->ptrace & PT_DTRACE) == 0) {
735 /* Spurious delayed TF traps may occur */
736 child->ptrace |= PT_DTRACE;
739 /* Compute next pc. */
740 pc = get_stack_long(child, PT_BPC);
742 if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
743 != sizeof(insn))
744 break;
746 compute_next_pc(insn, pc, &next_pc, child);
747 if (next_pc & 0x80000000)
748 break;
750 if (embed_debug_trap(child, next_pc))
751 break;
753 invalidate_cache();
754 child->exit_code = data;
756 /* give it a chance to run. */
757 wake_up_process(child);
758 ret = 0;
759 break;
763 * detach a process that was attached.
765 case PTRACE_DETACH:
766 ret = 0;
767 ret = ptrace_detach(child, data);
768 break;
770 case PTRACE_GETREGS:
771 ret = ptrace_getregs(child, (void __user *)data);
772 break;
774 case PTRACE_SETREGS:
775 ret = ptrace_setregs(child, (void __user *)data);
776 break;
778 default:
779 ret = ptrace_request(child, request, addr, data);
780 break;
783 return ret;
786 asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
788 struct task_struct *child;
789 int ret;
791 lock_kernel();
792 ret = -EPERM;
793 if (request == PTRACE_TRACEME) {
794 /* are we already being traced? */
795 if (current->ptrace & PT_PTRACED)
796 goto out;
797 /* set the ptrace bit in the process flags. */
798 current->ptrace |= PT_PTRACED;
799 ret = 0;
800 goto out;
802 ret = -ESRCH;
803 read_lock(&tasklist_lock);
804 child = find_task_by_pid(pid);
805 if (child)
806 get_task_struct(child);
807 read_unlock(&tasklist_lock);
808 if (!child)
809 goto out;
811 ret = -EPERM;
812 if (pid == 1) /* you may not mess with init */
813 goto out;
815 if (request == PTRACE_ATTACH) {
816 ret = ptrace_attach(child);
817 if (ret == 0)
818 init_debug_traps(child);
819 goto out_tsk;
822 ret = ptrace_check_attach(child, request == PTRACE_KILL);
823 if (ret == 0)
824 ret = do_ptrace(request, child, addr, data);
826 out_tsk:
827 put_task_struct(child);
828 out:
829 unlock_kernel();
831 return ret;
834 /* notification of system call entry/exit
835 * - triggered by current->work.syscall_trace
837 void do_syscall_trace(void)
839 if (!test_thread_flag(TIF_SYSCALL_TRACE))
840 return;
841 if (!(current->ptrace & PT_PTRACED))
842 return;
843 /* the 0x80 provides a way for the tracing parent to distinguish
844 between a syscall stop and SIGTRAP delivery */
845 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
846 ? 0x80 : 0));
849 * this isn't the same as continuing with a signal, but it will do
850 * for normal use. strace only continues with a signal if the
851 * stopping signal is not SIGTRAP. -brl
853 if (current->exit_code) {
854 send_sig(current->exit_code, current, 1);
855 current->exit_code = 0;