2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/smp.h>
25 #include <linux/user.h>
26 #include <linux/security.h>
27 #include <linux/audit.h>
28 #include <linux/seccomp.h>
30 #include <asm/byteorder.h>
34 #include <asm/mipsregs.h>
35 #include <asm/mipsmtregs.h>
36 #include <asm/pgtable.h>
38 #include <asm/uaccess.h>
39 #include <asm/bootinfo.h>
43 * Called by kernel/ptrace.c when detaching..
45 * Make sure single step bits etc are not set.
47 void ptrace_disable(struct task_struct
*child
)
49 /* Don't load the watchpoint registers for the ex-child. */
50 clear_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
54 * Read a general register set. We always use the 64-bit format, even
55 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
56 * Registers are sign extended to fill the available space.
58 int ptrace_getregs(struct task_struct
*child
, __s64 __user
*data
)
63 if (!access_ok(VERIFY_WRITE
, data
, 38 * 8))
66 regs
= task_pt_regs(child
);
68 for (i
= 0; i
< 32; i
++)
69 __put_user((long)regs
->regs
[i
], data
+ i
);
70 __put_user((long)regs
->lo
, data
+ EF_LO
- EF_R0
);
71 __put_user((long)regs
->hi
, data
+ EF_HI
- EF_R0
);
72 __put_user((long)regs
->cp0_epc
, data
+ EF_CP0_EPC
- EF_R0
);
73 __put_user((long)regs
->cp0_badvaddr
, data
+ EF_CP0_BADVADDR
- EF_R0
);
74 __put_user((long)regs
->cp0_status
, data
+ EF_CP0_STATUS
- EF_R0
);
75 __put_user((long)regs
->cp0_cause
, data
+ EF_CP0_CAUSE
- EF_R0
);
81 * Write a general register set. As for PTRACE_GETREGS, we always use
82 * the 64-bit format. On a 32-bit kernel only the lower order half
83 * (according to endianness) will be used.
85 int ptrace_setregs(struct task_struct
*child
, __s64 __user
*data
)
90 if (!access_ok(VERIFY_READ
, data
, 38 * 8))
93 regs
= task_pt_regs(child
);
95 for (i
= 0; i
< 32; i
++)
96 __get_user(regs
->regs
[i
], data
+ i
);
97 __get_user(regs
->lo
, data
+ EF_LO
- EF_R0
);
98 __get_user(regs
->hi
, data
+ EF_HI
- EF_R0
);
99 __get_user(regs
->cp0_epc
, data
+ EF_CP0_EPC
- EF_R0
);
101 /* badvaddr, status, and cause may not be written. */
106 int ptrace_getfpregs(struct task_struct
*child
, __u32 __user
*data
)
111 if (!access_ok(VERIFY_WRITE
, data
, 33 * 8))
114 if (tsk_used_math(child
)) {
115 fpureg_t
*fregs
= get_fpu_regs(child
);
116 for (i
= 0; i
< 32; i
++)
117 __put_user(fregs
[i
], i
+ (__u64 __user
*) data
);
119 for (i
= 0; i
< 32; i
++)
120 __put_user((__u64
) -1, i
+ (__u64 __user
*) data
);
123 __put_user(child
->thread
.fpu
.fcr31
, data
+ 64);
129 if (cpu_has_mipsmt
) {
130 unsigned int vpflags
= dvpe();
131 flags
= read_c0_status();
133 __asm__
__volatile__("cfc1\t%0,$0" : "=r" (tmp
));
134 write_c0_status(flags
);
137 flags
= read_c0_status();
139 __asm__
__volatile__("cfc1\t%0,$0" : "=r" (tmp
));
140 write_c0_status(flags
);
146 __put_user(tmp
, data
+ 65);
151 int ptrace_setfpregs(struct task_struct
*child
, __u32 __user
*data
)
156 if (!access_ok(VERIFY_READ
, data
, 33 * 8))
159 fregs
= get_fpu_regs(child
);
161 for (i
= 0; i
< 32; i
++)
162 __get_user(fregs
[i
], i
+ (__u64 __user
*) data
);
164 __get_user(child
->thread
.fpu
.fcr31
, data
+ 64);
166 /* FIR may not be written. */
171 int ptrace_get_watch_regs(struct task_struct
*child
,
172 struct pt_watch_regs __user
*addr
)
174 enum pt_watch_style style
;
177 if (!cpu_has_watch
|| current_cpu_data
.watch_reg_use_cnt
== 0)
179 if (!access_ok(VERIFY_WRITE
, addr
, sizeof(struct pt_watch_regs
)))
183 style
= pt_watch_style_mips32
;
184 #define WATCH_STYLE mips32
186 style
= pt_watch_style_mips64
;
187 #define WATCH_STYLE mips64
190 __put_user(style
, &addr
->style
);
191 __put_user(current_cpu_data
.watch_reg_use_cnt
,
192 &addr
->WATCH_STYLE
.num_valid
);
193 for (i
= 0; i
< current_cpu_data
.watch_reg_use_cnt
; i
++) {
194 __put_user(child
->thread
.watch
.mips3264
.watchlo
[i
],
195 &addr
->WATCH_STYLE
.watchlo
[i
]);
196 __put_user(child
->thread
.watch
.mips3264
.watchhi
[i
] & 0xfff,
197 &addr
->WATCH_STYLE
.watchhi
[i
]);
198 __put_user(current_cpu_data
.watch_reg_masks
[i
],
199 &addr
->WATCH_STYLE
.watch_masks
[i
]);
202 __put_user(0, &addr
->WATCH_STYLE
.watchlo
[i
]);
203 __put_user(0, &addr
->WATCH_STYLE
.watchhi
[i
]);
204 __put_user(0, &addr
->WATCH_STYLE
.watch_masks
[i
]);
210 int ptrace_set_watch_regs(struct task_struct
*child
,
211 struct pt_watch_regs __user
*addr
)
214 int watch_active
= 0;
215 unsigned long lt
[NUM_WATCH_REGS
];
216 u16 ht
[NUM_WATCH_REGS
];
218 if (!cpu_has_watch
|| current_cpu_data
.watch_reg_use_cnt
== 0)
220 if (!access_ok(VERIFY_READ
, addr
, sizeof(struct pt_watch_regs
)))
222 /* Check the values. */
223 for (i
= 0; i
< current_cpu_data
.watch_reg_use_cnt
; i
++) {
224 __get_user(lt
[i
], &addr
->WATCH_STYLE
.watchlo
[i
]);
226 if (lt
[i
] & __UA_LIMIT
)
229 if (test_tsk_thread_flag(child
, TIF_32BIT_ADDR
)) {
230 if (lt
[i
] & 0xffffffff80000000UL
)
233 if (lt
[i
] & __UA_LIMIT
)
237 __get_user(ht
[i
], &addr
->WATCH_STYLE
.watchhi
[i
]);
242 for (i
= 0; i
< current_cpu_data
.watch_reg_use_cnt
; i
++) {
245 child
->thread
.watch
.mips3264
.watchlo
[i
] = lt
[i
];
247 child
->thread
.watch
.mips3264
.watchhi
[i
] = ht
[i
];
251 set_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
253 clear_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
258 long arch_ptrace(struct task_struct
*child
, long request
,
259 unsigned long addr
, unsigned long data
)
262 void __user
*addrp
= (void __user
*) addr
;
263 void __user
*datavp
= (void __user
*) data
;
264 unsigned long __user
*datalp
= (void __user
*) data
;
267 /* when I and D space are separate, these will need to be fixed. */
268 case PTRACE_PEEKTEXT
: /* read word at location addr. */
269 case PTRACE_PEEKDATA
:
270 ret
= generic_ptrace_peekdata(child
, addr
, data
);
273 /* Read the word at location addr in the USER area. */
274 case PTRACE_PEEKUSR
: {
275 struct pt_regs
*regs
;
276 unsigned long tmp
= 0;
278 regs
= task_pt_regs(child
);
279 ret
= 0; /* Default return value. */
283 tmp
= regs
->regs
[addr
];
285 case FPR_BASE
... FPR_BASE
+ 31:
286 if (tsk_used_math(child
)) {
287 fpureg_t
*fregs
= get_fpu_regs(child
);
291 * The odd registers are actually the high
292 * order bits of the values stored in the even
293 * registers - unless we're using r2k_switch.S.
296 tmp
= (unsigned long) (fregs
[((addr
& ~1) - 32)] >> 32);
298 tmp
= (unsigned long) (fregs
[(addr
- 32)] & 0xffffffff);
301 tmp
= fregs
[addr
- FPR_BASE
];
304 tmp
= -1; /* FP not yet used */
311 tmp
= regs
->cp0_cause
;
314 tmp
= regs
->cp0_badvaddr
;
322 #ifdef CONFIG_CPU_HAS_SMARTMIPS
328 tmp
= child
->thread
.fpu
.fcr31
;
330 case FPC_EIR
: { /* implementation / version register */
332 #ifdef CONFIG_MIPS_MT_SMTC
333 unsigned long irqflags
;
334 unsigned int mtflags
;
335 #endif /* CONFIG_MIPS_MT_SMTC */
343 #ifdef CONFIG_MIPS_MT_SMTC
344 /* Read-modify-write of Status must be atomic */
345 local_irq_save(irqflags
);
347 #endif /* CONFIG_MIPS_MT_SMTC */
348 if (cpu_has_mipsmt
) {
349 unsigned int vpflags
= dvpe();
350 flags
= read_c0_status();
352 __asm__
__volatile__("cfc1\t%0,$0": "=r" (tmp
));
353 write_c0_status(flags
);
356 flags
= read_c0_status();
358 __asm__
__volatile__("cfc1\t%0,$0": "=r" (tmp
));
359 write_c0_status(flags
);
361 #ifdef CONFIG_MIPS_MT_SMTC
363 local_irq_restore(irqflags
);
364 #endif /* CONFIG_MIPS_MT_SMTC */
368 case DSP_BASE
... DSP_BASE
+ 5: {
376 dregs
= __get_dsp_regs(child
);
377 tmp
= (unsigned long) (dregs
[addr
- DSP_BASE
]);
386 tmp
= child
->thread
.dsp
.dspcontrol
;
393 ret
= put_user(tmp
, datalp
);
397 /* when I and D space are separate, this will have to be fixed. */
398 case PTRACE_POKETEXT
: /* write the word at location addr. */
399 case PTRACE_POKEDATA
:
400 ret
= generic_ptrace_pokedata(child
, addr
, data
);
403 case PTRACE_POKEUSR
: {
404 struct pt_regs
*regs
;
406 regs
= task_pt_regs(child
);
410 regs
->regs
[addr
] = data
;
412 case FPR_BASE
... FPR_BASE
+ 31: {
413 fpureg_t
*fregs
= get_fpu_regs(child
);
415 if (!tsk_used_math(child
)) {
416 /* FP not yet used */
417 memset(&child
->thread
.fpu
, ~0,
418 sizeof(child
->thread
.fpu
));
419 child
->thread
.fpu
.fcr31
= 0;
423 * The odd registers are actually the high order bits
424 * of the values stored in the even registers - unless
425 * we're using r2k_switch.S.
428 fregs
[(addr
& ~1) - FPR_BASE
] &= 0xffffffff;
429 fregs
[(addr
& ~1) - FPR_BASE
] |= ((unsigned long long) data
) << 32;
431 fregs
[addr
- FPR_BASE
] &= ~0xffffffffLL
;
432 fregs
[addr
- FPR_BASE
] |= data
;
436 fregs
[addr
- FPR_BASE
] = data
;
441 regs
->cp0_epc
= data
;
449 #ifdef CONFIG_CPU_HAS_SMARTMIPS
455 child
->thread
.fpu
.fcr31
= data
;
457 case DSP_BASE
... DSP_BASE
+ 5: {
465 dregs
= __get_dsp_regs(child
);
466 dregs
[addr
- DSP_BASE
] = data
;
474 child
->thread
.dsp
.dspcontrol
= data
;
477 /* The rest are not allowed. */
485 ret
= ptrace_getregs(child
, datavp
);
489 ret
= ptrace_setregs(child
, datavp
);
492 case PTRACE_GETFPREGS
:
493 ret
= ptrace_getfpregs(child
, datavp
);
496 case PTRACE_SETFPREGS
:
497 ret
= ptrace_setfpregs(child
, datavp
);
500 case PTRACE_GET_THREAD_AREA
:
501 ret
= put_user(task_thread_info(child
)->tp_value
, datalp
);
504 case PTRACE_GET_WATCH_REGS
:
505 ret
= ptrace_get_watch_regs(child
, addrp
);
508 case PTRACE_SET_WATCH_REGS
:
509 ret
= ptrace_set_watch_regs(child
, addrp
);
513 ret
= ptrace_request(child
, request
, addr
, data
);
520 static inline int audit_arch(void)
524 arch
|= __AUDIT_ARCH_64BIT
;
526 #if defined(__LITTLE_ENDIAN)
527 arch
|= __AUDIT_ARCH_LE
;
533 * Notification of system call entry/exit
534 * - triggered by current->work.syscall_trace
536 asmlinkage
void syscall_trace_enter(struct pt_regs
*regs
)
540 /* do the secure computing check first */
541 secure_computing_strict(regs
->regs
[2]);
543 if (!(current
->ptrace
& PT_PTRACED
))
546 if (!test_thread_flag(TIF_SYSCALL_TRACE
))
549 /* The 0x80 provides a way for the tracing parent to distinguish
550 between a syscall stop and SIGTRAP delivery */
551 ptrace_notify(SIGTRAP
| ((current
->ptrace
& PT_TRACESYSGOOD
) ?
555 * this isn't the same as continuing with a signal, but it will do
556 * for normal use. strace only continues with a signal if the
557 * stopping signal is not SIGTRAP. -brl
559 if (current
->exit_code
) {
560 send_sig(current
->exit_code
, current
, 1);
561 current
->exit_code
= 0;
565 audit_syscall_entry(audit_arch(), regs
->regs
[2],
566 regs
->regs
[4], regs
->regs
[5],
567 regs
->regs
[6], regs
->regs
[7]);
571 * Notification of system call entry/exit
572 * - triggered by current->work.syscall_trace
574 asmlinkage
void syscall_trace_leave(struct pt_regs
*regs
)
577 * We may come here right after calling schedule_user()
578 * or do_notify_resume(), in which case we can be in RCU
583 audit_syscall_exit(regs
);
585 if (!(current
->ptrace
& PT_PTRACED
))
588 if (!test_thread_flag(TIF_SYSCALL_TRACE
))
591 /* The 0x80 provides a way for the tracing parent to distinguish
592 between a syscall stop and SIGTRAP delivery */
593 ptrace_notify(SIGTRAP
| ((current
->ptrace
& PT_TRACESYSGOOD
) ?
597 * this isn't the same as continuing with a signal, but it will do
598 * for normal use. strace only continues with a signal if the
599 * stopping signal is not SIGTRAP. -brl
601 if (current
->exit_code
) {
602 send_sig(current
->exit_code
, current
, 1);
603 current
->exit_code
= 0;