2 * linux/arch/m32r/kernel/ptrace.c
4 * Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
5 * Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
7 * Original x86 implementation:
9 * edited by Linus Torvalds
11 * Some code taken from sh version:
12 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
13 * Some code taken from arm version:
14 * Copyright (C) 2000 Russell King
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
20 #include <linux/err.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/user.h>
25 #include <linux/string.h>
26 #include <linux/signal.h>
28 #include <asm/cacheflush.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/system.h>
33 #include <asm/processor.h>
34 #include <asm/mmu_context.h>
37 * This routine will get a word off of the process kernel stack.
39 static inline unsigned long int
40 get_stack_long(struct task_struct
*task
, int offset
)
44 stack
= (unsigned long *)task_pt_regs(task
);
50 * This routine will put a word on the process kernel stack.
53 put_stack_long(struct task_struct
*task
, int offset
, unsigned long data
)
57 stack
= (unsigned long *)task_pt_regs(task
);
63 static int reg_offset
[] = {
64 PT_R0
, PT_R1
, PT_R2
, PT_R3
, PT_R4
, PT_R5
, PT_R6
, PT_R7
,
65 PT_R8
, PT_R9
, PT_R10
, PT_R11
, PT_R12
, PT_FP
, PT_LR
, PT_SPU
,
69 * Read the word at offset "off" into the "struct user". We
70 * actually access the pt_regs stored on the kernel stack.
72 static int ptrace_read_user(struct task_struct
*tsk
, unsigned long off
,
73 unsigned long __user
*data
)
77 struct user
* dummy
= NULL
;
80 if ((off
& 3) || off
> sizeof(struct user
) - 3)
86 __asm__
__volatile__ (
93 psw
= get_stack_long(tsk
, PT_PSW
);
94 tmp
= ((psw
>> 8) & 1);
98 unsigned long psw
, bbpsw
;
99 psw
= get_stack_long(tsk
, PT_PSW
);
100 bbpsw
= get_stack_long(tsk
, PT_BBPSW
);
101 tmp
= ((psw
>> 8) & 0xff) | ((bbpsw
& 0xff) << 8);
105 tmp
= get_stack_long(tsk
, PT_BPC
);
111 if (off
< (sizeof(struct pt_regs
) >> 2))
112 tmp
= get_stack_long(tsk
, off
);
114 else if (off
>= (long)(&dummy
->fpu
>> 2) &&
115 off
< (long)(&dummy
->u_fpvalid
>> 2)) {
116 if (!tsk_used_math(tsk
)) {
117 if (off
== (long)(&dummy
->fpu
.fpscr
>> 2))
122 tmp
= ((long *)(&tsk
->thread
.fpu
>> 2))
123 [off
- (long)&dummy
->fpu
];
124 } else if (off
== (long)(&dummy
->u_fpvalid
>> 2))
125 tmp
= !!tsk_used_math(tsk
);
126 #endif /* not NO_FPU */
131 return put_user(tmp
, data
);
134 static int ptrace_write_user(struct task_struct
*tsk
, unsigned long off
,
139 struct user
* dummy
= NULL
;
142 if ((off
& 3) || off
> sizeof(struct user
) - 3)
150 /* We don't allow to modify evb. */
155 /* We allow to modify only cbr in psw */
157 psw
= get_stack_long(tsk
, PT_PSW
);
158 psw
= (psw
& ~0x100) | ((data
& 1) << 8);
159 ret
= put_stack_long(tsk
, PT_PSW
, psw
);
167 if (off
< (sizeof(struct pt_regs
) >> 2))
168 ret
= put_stack_long(tsk
, off
, data
);
170 else if (off
>= (long)(&dummy
->fpu
>> 2) &&
171 off
< (long)(&dummy
->u_fpvalid
>> 2)) {
172 set_stopped_child_used_math(tsk
);
173 ((long *)&tsk
->thread
.fpu
)
174 [off
- (long)&dummy
->fpu
] = data
;
176 } else if (off
== (long)(&dummy
->u_fpvalid
>> 2)) {
177 conditional_stopped_child_used_math(data
, tsk
);
180 #endif /* not NO_FPU */
188 * Get all user integer registers.
190 static int ptrace_getregs(struct task_struct
*tsk
, void __user
*uregs
)
192 struct pt_regs
*regs
= task_pt_regs(tsk
);
194 return copy_to_user(uregs
, regs
, sizeof(struct pt_regs
)) ? -EFAULT
: 0;
198 * Set all user integer registers.
200 static int ptrace_setregs(struct task_struct
*tsk
, void __user
*uregs
)
202 struct pt_regs newregs
;
206 if (copy_from_user(&newregs
, uregs
, sizeof(struct pt_regs
)) == 0) {
207 struct pt_regs
*regs
= task_pt_regs(tsk
);
217 check_condition_bit(struct task_struct
*child
)
219 return (int)((get_stack_long(child
, PT_PSW
) >> 8) & 1);
223 check_condition_src(unsigned long op
, unsigned long regno1
,
224 unsigned long regno2
, struct task_struct
*child
)
226 unsigned long reg1
, reg2
;
228 reg2
= get_stack_long(child
, reg_offset
[regno2
]);
232 reg1
= get_stack_long(child
, reg_offset
[regno1
]);
235 reg1
= get_stack_long(child
, reg_offset
[regno1
]);
242 return (int)reg2
< 0;
244 return (int)reg2
>= 0;
246 return (int)reg2
<= 0;
248 return (int)reg2
> 0;
256 compute_next_pc_for_16bit_insn(unsigned long insn
, unsigned long pc
,
257 unsigned long *next_pc
,
258 struct task_struct
*child
)
260 unsigned long op
, op2
, op3
;
265 if (insn
& 0x00008000)
268 insn
&= 0x7fff; /* right slot */
270 insn
>>= 16; /* left slot */
272 op
= (insn
>> 12) & 0xf;
273 op2
= (insn
>> 8) & 0xf;
274 op3
= (insn
>> 4) & 0xf;
280 if (!check_condition_bit(child
)) {
281 disp
= (long)(insn
<< 24) >> 22;
282 *next_pc
= (pc
& ~0x3) + disp
;
288 if (check_condition_bit(child
)) {
289 disp
= (long)(insn
<< 24) >> 22;
290 *next_pc
= (pc
& ~0x3) + disp
;
296 disp
= (long)(insn
<< 24) >> 22;
297 *next_pc
= (pc
& ~0x3) + disp
;
301 } else if (op
== 0x1) {
304 if (op3
== 0xf) { /* TRAP */
308 /* kernel space is not allowed as next_pc */
310 unsigned long trapno
;
312 __asm__
__volatile__ (
317 *next_pc
= evb
+ (trapno
<< 2);
320 } else if (op3
== 0xd) { /* RTE */
321 *next_pc
= get_stack_long(child
, PT_BPC
);
326 if (op3
== 0xc && check_condition_bit(child
)) {
328 *next_pc
= get_stack_long(child
,
334 if (op3
== 0xc && !check_condition_bit(child
)) {
336 *next_pc
= get_stack_long(child
,
343 if (op3
== 0xc) { /* JMP */
345 *next_pc
= get_stack_long(child
,
359 compute_next_pc_for_32bit_insn(unsigned long insn
, unsigned long pc
,
360 unsigned long *next_pc
,
361 struct task_struct
*child
)
366 unsigned long regno1
, regno2
;
368 op
= (insn
>> 28) & 0xf;
369 if (op
== 0xf) { /* branch 24-bit relative */
370 op2
= (insn
>> 24) & 0xf;
374 if (!check_condition_bit(child
)) {
375 disp
= (long)(insn
<< 8) >> 6;
376 *next_pc
= (pc
& ~0x3) + disp
;
382 if (check_condition_bit(child
)) {
383 disp
= (long)(insn
<< 8) >> 6;
384 *next_pc
= (pc
& ~0x3) + disp
;
390 disp
= (long)(insn
<< 8) >> 6;
391 *next_pc
= (pc
& ~0x3) + disp
;
394 } else if (op
== 0xb) { /* branch 16-bit relative */
395 op2
= (insn
>> 20) & 0xf;
405 regno1
= ((insn
>> 24) & 0xf);
406 regno2
= ((insn
>> 16) & 0xf);
407 if (check_condition_src(op2
, regno1
, regno2
, child
)) {
408 disp
= (long)(insn
<< 16) >> 14;
409 *next_pc
= (pc
& ~0x3) + disp
;
419 compute_next_pc(unsigned long insn
, unsigned long pc
,
420 unsigned long *next_pc
, struct task_struct
*child
)
422 if (insn
& 0x80000000)
423 compute_next_pc_for_32bit_insn(insn
, pc
, next_pc
, child
);
425 compute_next_pc_for_16bit_insn(insn
, pc
, next_pc
, child
);
429 register_debug_trap(struct task_struct
*child
, unsigned long next_pc
,
430 unsigned long next_insn
, unsigned long *code
)
432 struct debug_trap
*p
= &child
->thread
.debug_trap
;
433 unsigned long addr
= next_pc
& ~3;
435 if (p
->nr_trap
== MAX_TRAPS
) {
436 printk("kernel BUG at %s %d: p->nr_trap = %d\n",
437 __FILE__
, __LINE__
, p
->nr_trap
);
440 p
->addr
[p
->nr_trap
] = addr
;
441 p
->insn
[p
->nr_trap
] = next_insn
;
444 *code
= (next_insn
& 0xffff0000) | 0x10f1;
447 if ((next_insn
& 0x80000000) || (next_insn
& 0x8000)) {
451 *code
= (next_insn
& 0xffff) | 0x10f10000;
459 unregister_debug_trap(struct task_struct
*child
, unsigned long addr
,
462 struct debug_trap
*p
= &child
->thread
.debug_trap
;
465 /* Search debug trap entry. */
466 for (i
= 0; i
< p
->nr_trap
; i
++) {
467 if (p
->addr
[i
] == addr
)
470 if (i
>= p
->nr_trap
) {
471 /* The trap may be requested from debugger.
472 * ptrace should do nothing in this case.
477 /* Recover original instruction code. */
480 /* Shift debug trap entries. */
481 while (i
< p
->nr_trap
- 1) {
482 p
->insn
[i
] = p
->insn
[i
+ 1];
483 p
->addr
[i
] = p
->addr
[i
+ 1];
491 unregister_all_debug_traps(struct task_struct
*child
)
493 struct debug_trap
*p
= &child
->thread
.debug_trap
;
496 for (i
= 0; i
< p
->nr_trap
; i
++)
497 access_process_vm(child
, p
->addr
[i
], &p
->insn
[i
], sizeof(p
->insn
[i
]), 1);
502 invalidate_cache(void)
504 #if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
506 _flush_cache_copyback_all();
508 #else /* ! CONFIG_CHIP_M32700 */
510 /* Invalidate cache */
511 __asm__
__volatile__ (
514 "stb r1, @r0 ; cache off \n\t"
518 "stb r1, @r0 ; cache invalidate \n\t"
521 "ldb r1, @r0 ; invalidate check \n\t"
526 "stb r1, @r0 ; cache on \n\t"
527 : : : "r0", "r1", "memory"
529 /* FIXME: copying-back d-cache and invalidating i-cache are needed.
531 #endif /* CONFIG_CHIP_M32700 */
534 /* Embed a debug trap (TRAP1) code */
536 embed_debug_trap(struct task_struct
*child
, unsigned long next_pc
)
538 unsigned long next_insn
, code
;
539 unsigned long addr
= next_pc
& ~3;
541 if (access_process_vm(child
, addr
, &next_insn
, sizeof(next_insn
), 0)
542 != sizeof(next_insn
)) {
543 return -1; /* error */
546 /* Set a trap code. */
547 if (register_debug_trap(child
, next_pc
, next_insn
, &code
)) {
548 return -1; /* error */
550 if (access_process_vm(child
, addr
, &code
, sizeof(code
), 1)
552 return -1; /* error */
554 return 0; /* success */
558 withdraw_debug_trap(struct pt_regs
*regs
)
563 addr
= (regs
->bpc
- 2) & ~3;
565 if (unregister_debug_trap(current
, addr
, &code
)) {
566 access_process_vm(current
, addr
, &code
, sizeof(code
), 1);
572 init_debug_traps(struct task_struct
*child
)
574 struct debug_trap
*p
= &child
->thread
.debug_trap
;
577 for (i
= 0; i
< MAX_TRAPS
; i
++) {
583 void user_enable_single_step(struct task_struct
*child
)
585 unsigned long next_pc
;
586 unsigned long pc
, insn
;
588 clear_tsk_thread_flag(child
, TIF_SYSCALL_TRACE
);
590 /* Compute next pc. */
591 pc
= get_stack_long(child
, PT_BPC
);
593 if (access_process_vm(child
, pc
&~3, &insn
, sizeof(insn
), 0)
597 compute_next_pc(insn
, pc
, &next_pc
, child
);
598 if (next_pc
& 0x80000000)
601 if (embed_debug_trap(child
, next_pc
))
608 void user_disable_single_step(struct task_struct
*child
)
610 unregister_all_debug_traps(child
);
615 * Called by kernel/ptrace.c when detaching..
617 * Make sure single step bits etc are not set.
619 void ptrace_disable(struct task_struct
*child
)
621 /* nothing to do.. */
625 arch_ptrace(struct task_struct
*child
, long request
,
626 unsigned long addr
, unsigned long data
)
629 unsigned long __user
*datap
= (unsigned long __user
*) data
;
633 * read word at location "addr" in the child process.
635 case PTRACE_PEEKTEXT
:
636 case PTRACE_PEEKDATA
:
637 ret
= generic_ptrace_peekdata(child
, addr
, data
);
641 * read the word at location addr in the USER area.
644 ret
= ptrace_read_user(child
, addr
, datap
);
648 * write the word at location addr.
650 case PTRACE_POKETEXT
:
651 case PTRACE_POKEDATA
:
652 ret
= generic_ptrace_pokedata(child
, addr
, data
);
653 if (ret
== 0 && request
== PTRACE_POKETEXT
)
658 * write the word at location addr in the USER area.
661 ret
= ptrace_write_user(child
, addr
, data
);
665 ret
= ptrace_getregs(child
, datap
);
669 ret
= ptrace_setregs(child
, datap
);
673 ret
= ptrace_request(child
, request
, addr
, data
);
680 /* notification of system call entry/exit
681 * - triggered by current->work.syscall_trace
683 void do_syscall_trace(void)
685 if (!test_thread_flag(TIF_SYSCALL_TRACE
))
687 if (!(current
->ptrace
& PT_PTRACED
))
689 /* the 0x80 provides a way for the tracing parent to distinguish
690 between a syscall stop and SIGTRAP delivery */
691 ptrace_notify(SIGTRAP
| ((current
->ptrace
& PT_TRACESYSGOOD
)
695 * this isn't the same as continuing with a signal, but it will do
696 * for normal use. strace only continues with a signal if the
697 * stopping signal is not SIGTRAP. -brl
699 if (current
->exit_code
) {
700 send_sig(current
->exit_code
, current
, 1);
701 current
->exit_code
= 0;