MIPS: SB1250: Include correct header and fix a warning
[linux-2.6/linux-mips.git] / arch / powerpc / kernel / ptrace.c
blobed2cfe17d25eeaa5ffcd0f25b830f4e3c8eda547
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #ifdef CONFIG_PPC32
33 #include <linux/module.h>
34 #endif
36 #include <asm/uaccess.h>
37 #include <asm/page.h>
38 #include <asm/pgtable.h>
39 #include <asm/system.h>
42 * does not yet catch signals sent when the child dies.
43 * in exit.c or in signal.c.
47 * Set of msr bits that gdb can change on behalf of a process.
49 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
50 #define MSR_DEBUGCHANGE 0
51 #else
52 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
53 #endif
56 * Max register writeable via put_reg
58 #ifdef CONFIG_PPC32
59 #define PT_MAX_PUT_REG PT_MQ
60 #else
61 #define PT_MAX_PUT_REG PT_CCR
62 #endif
64 static unsigned long get_user_msr(struct task_struct *task)
66 return task->thread.regs->msr | task->thread.fpexc_mode;
69 static int set_user_msr(struct task_struct *task, unsigned long msr)
71 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
72 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
73 return 0;
77 * We prevent mucking around with the reserved area of trap
78 * which are used internally by the kernel.
80 static int set_user_trap(struct task_struct *task, unsigned long trap)
82 task->thread.regs->trap = trap & 0xfff0;
83 return 0;
87 * Get contents of register REGNO in task TASK.
89 unsigned long ptrace_get_reg(struct task_struct *task, int regno)
91 if (task->thread.regs == NULL)
92 return -EIO;
94 if (regno == PT_MSR)
95 return get_user_msr(task);
97 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
98 return ((unsigned long *)task->thread.regs)[regno];
100 return -EIO;
104 * Write contents of register REGNO in task TASK.
106 int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
108 if (task->thread.regs == NULL)
109 return -EIO;
111 if (regno == PT_MSR)
112 return set_user_msr(task, data);
113 if (regno == PT_TRAP)
114 return set_user_trap(task, data);
116 if (regno <= PT_MAX_PUT_REG) {
117 ((unsigned long *)task->thread.regs)[regno] = data;
118 return 0;
120 return -EIO;
123 static int gpr_get(struct task_struct *target, const struct user_regset *regset,
124 unsigned int pos, unsigned int count,
125 void *kbuf, void __user *ubuf)
127 int ret;
129 if (target->thread.regs == NULL)
130 return -EIO;
132 CHECK_FULL_REGS(target->thread.regs);
134 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
135 target->thread.regs,
136 0, offsetof(struct pt_regs, msr));
137 if (!ret) {
138 unsigned long msr = get_user_msr(target);
139 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
140 offsetof(struct pt_regs, msr),
141 offsetof(struct pt_regs, msr) +
142 sizeof(msr));
145 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
146 offsetof(struct pt_regs, msr) + sizeof(long));
148 if (!ret)
149 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
150 &target->thread.regs->orig_gpr3,
151 offsetof(struct pt_regs, orig_gpr3),
152 sizeof(struct pt_regs));
153 if (!ret)
154 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
155 sizeof(struct pt_regs), -1);
157 return ret;
160 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
161 unsigned int pos, unsigned int count,
162 const void *kbuf, const void __user *ubuf)
164 unsigned long reg;
165 int ret;
167 if (target->thread.regs == NULL)
168 return -EIO;
170 CHECK_FULL_REGS(target->thread.regs);
172 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
173 target->thread.regs,
174 0, PT_MSR * sizeof(reg));
176 if (!ret && count > 0) {
177 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
178 PT_MSR * sizeof(reg),
179 (PT_MSR + 1) * sizeof(reg));
180 if (!ret)
181 ret = set_user_msr(target, reg);
184 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
185 offsetof(struct pt_regs, msr) + sizeof(long));
187 if (!ret)
188 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
189 &target->thread.regs->orig_gpr3,
190 PT_ORIG_R3 * sizeof(reg),
191 (PT_MAX_PUT_REG + 1) * sizeof(reg));
193 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
194 ret = user_regset_copyin_ignore(
195 &pos, &count, &kbuf, &ubuf,
196 (PT_MAX_PUT_REG + 1) * sizeof(reg),
197 PT_TRAP * sizeof(reg));
199 if (!ret && count > 0) {
200 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
201 PT_TRAP * sizeof(reg),
202 (PT_TRAP + 1) * sizeof(reg));
203 if (!ret)
204 ret = set_user_trap(target, reg);
207 if (!ret)
208 ret = user_regset_copyin_ignore(
209 &pos, &count, &kbuf, &ubuf,
210 (PT_TRAP + 1) * sizeof(reg), -1);
212 return ret;
215 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
216 unsigned int pos, unsigned int count,
217 void *kbuf, void __user *ubuf)
219 #ifdef CONFIG_VSX
220 double buf[33];
221 int i;
222 #endif
223 flush_fp_to_thread(target);
225 #ifdef CONFIG_VSX
226 /* copy to local buffer then write that out */
227 for (i = 0; i < 32 ; i++)
228 buf[i] = target->thread.TS_FPR(i);
229 memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
230 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
232 #else
233 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
234 offsetof(struct thread_struct, TS_FPR(32)));
236 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
237 &target->thread.fpr, 0, -1);
238 #endif
241 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
242 unsigned int pos, unsigned int count,
243 const void *kbuf, const void __user *ubuf)
245 #ifdef CONFIG_VSX
246 double buf[33];
247 int i;
248 #endif
249 flush_fp_to_thread(target);
251 #ifdef CONFIG_VSX
252 /* copy to local buffer then write that out */
253 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
254 if (i)
255 return i;
256 for (i = 0; i < 32 ; i++)
257 target->thread.TS_FPR(i) = buf[i];
258 memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
259 return 0;
260 #else
261 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
262 offsetof(struct thread_struct, TS_FPR(32)));
264 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
265 &target->thread.fpr, 0, -1);
266 #endif
269 #ifdef CONFIG_ALTIVEC
271 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
272 * The transfer totals 34 quadword. Quadwords 0-31 contain the
273 * corresponding vector registers. Quadword 32 contains the vscr as the
274 * last word (offset 12) within that quadword. Quadword 33 contains the
275 * vrsave as the first word (offset 0) within the quadword.
277 * This definition of the VMX state is compatible with the current PPC32
278 * ptrace interface. This allows signal handling and ptrace to use the
279 * same structures. This also simplifies the implementation of a bi-arch
280 * (combined (32- and 64-bit) gdb.
283 static int vr_active(struct task_struct *target,
284 const struct user_regset *regset)
286 flush_altivec_to_thread(target);
287 return target->thread.used_vr ? regset->n : 0;
290 static int vr_get(struct task_struct *target, const struct user_regset *regset,
291 unsigned int pos, unsigned int count,
292 void *kbuf, void __user *ubuf)
294 int ret;
296 flush_altivec_to_thread(target);
298 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
299 offsetof(struct thread_struct, vr[32]));
301 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
302 &target->thread.vr, 0,
303 33 * sizeof(vector128));
304 if (!ret) {
306 * Copy out only the low-order word of vrsave.
308 union {
309 elf_vrreg_t reg;
310 u32 word;
311 } vrsave;
312 memset(&vrsave, 0, sizeof(vrsave));
313 vrsave.word = target->thread.vrsave;
314 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
315 33 * sizeof(vector128), -1);
318 return ret;
321 static int vr_set(struct task_struct *target, const struct user_regset *regset,
322 unsigned int pos, unsigned int count,
323 const void *kbuf, const void __user *ubuf)
325 int ret;
327 flush_altivec_to_thread(target);
329 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
330 offsetof(struct thread_struct, vr[32]));
332 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
333 &target->thread.vr, 0, 33 * sizeof(vector128));
334 if (!ret && count > 0) {
336 * We use only the first word of vrsave.
338 union {
339 elf_vrreg_t reg;
340 u32 word;
341 } vrsave;
342 memset(&vrsave, 0, sizeof(vrsave));
343 vrsave.word = target->thread.vrsave;
344 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
345 33 * sizeof(vector128), -1);
346 if (!ret)
347 target->thread.vrsave = vrsave.word;
350 return ret;
352 #endif /* CONFIG_ALTIVEC */
354 #ifdef CONFIG_VSX
356 * Currently to set and and get all the vsx state, you need to call
357 * the fp and VMX calls aswell. This only get/sets the lower 32
358 * 128bit VSX registers.
361 static int vsr_active(struct task_struct *target,
362 const struct user_regset *regset)
364 flush_vsx_to_thread(target);
365 return target->thread.used_vsr ? regset->n : 0;
368 static int vsr_get(struct task_struct *target, const struct user_regset *regset,
369 unsigned int pos, unsigned int count,
370 void *kbuf, void __user *ubuf)
372 double buf[32];
373 int ret, i;
375 flush_vsx_to_thread(target);
377 for (i = 0; i < 32 ; i++)
378 buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
379 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
380 buf, 0, 32 * sizeof(double));
382 return ret;
385 static int vsr_set(struct task_struct *target, const struct user_regset *regset,
386 unsigned int pos, unsigned int count,
387 const void *kbuf, const void __user *ubuf)
389 double buf[32];
390 int ret,i;
392 flush_vsx_to_thread(target);
394 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
395 buf, 0, 32 * sizeof(double));
396 for (i = 0; i < 32 ; i++)
397 target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
400 return ret;
402 #endif /* CONFIG_VSX */
404 #ifdef CONFIG_SPE
407 * For get_evrregs/set_evrregs functions 'data' has the following layout:
409 * struct {
410 * u32 evr[32];
411 * u64 acc;
412 * u32 spefscr;
416 static int evr_active(struct task_struct *target,
417 const struct user_regset *regset)
419 flush_spe_to_thread(target);
420 return target->thread.used_spe ? regset->n : 0;
423 static int evr_get(struct task_struct *target, const struct user_regset *regset,
424 unsigned int pos, unsigned int count,
425 void *kbuf, void __user *ubuf)
427 int ret;
429 flush_spe_to_thread(target);
431 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
432 &target->thread.evr,
433 0, sizeof(target->thread.evr));
435 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
436 offsetof(struct thread_struct, spefscr));
438 if (!ret)
439 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
440 &target->thread.acc,
441 sizeof(target->thread.evr), -1);
443 return ret;
446 static int evr_set(struct task_struct *target, const struct user_regset *regset,
447 unsigned int pos, unsigned int count,
448 const void *kbuf, const void __user *ubuf)
450 int ret;
452 flush_spe_to_thread(target);
454 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
455 &target->thread.evr,
456 0, sizeof(target->thread.evr));
458 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
459 offsetof(struct thread_struct, spefscr));
461 if (!ret)
462 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
463 &target->thread.acc,
464 sizeof(target->thread.evr), -1);
466 return ret;
468 #endif /* CONFIG_SPE */
472 * These are our native regset flavors.
474 enum powerpc_regset {
475 REGSET_GPR,
476 REGSET_FPR,
477 #ifdef CONFIG_ALTIVEC
478 REGSET_VMX,
479 #endif
480 #ifdef CONFIG_VSX
481 REGSET_VSX,
482 #endif
483 #ifdef CONFIG_SPE
484 REGSET_SPE,
485 #endif
488 static const struct user_regset native_regsets[] = {
489 [REGSET_GPR] = {
490 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
491 .size = sizeof(long), .align = sizeof(long),
492 .get = gpr_get, .set = gpr_set
494 [REGSET_FPR] = {
495 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
496 .size = sizeof(double), .align = sizeof(double),
497 .get = fpr_get, .set = fpr_set
499 #ifdef CONFIG_ALTIVEC
500 [REGSET_VMX] = {
501 .core_note_type = NT_PPC_VMX, .n = 34,
502 .size = sizeof(vector128), .align = sizeof(vector128),
503 .active = vr_active, .get = vr_get, .set = vr_set
505 #endif
506 #ifdef CONFIG_VSX
507 [REGSET_VSX] = {
508 .core_note_type = NT_PPC_VSX, .n = 32,
509 .size = sizeof(double), .align = sizeof(double),
510 .active = vsr_active, .get = vsr_get, .set = vsr_set
512 #endif
513 #ifdef CONFIG_SPE
514 [REGSET_SPE] = {
515 .n = 35,
516 .size = sizeof(u32), .align = sizeof(u32),
517 .active = evr_active, .get = evr_get, .set = evr_set
519 #endif
522 static const struct user_regset_view user_ppc_native_view = {
523 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
524 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
527 #ifdef CONFIG_PPC64
528 #include <linux/compat.h>
530 static int gpr32_get(struct task_struct *target,
531 const struct user_regset *regset,
532 unsigned int pos, unsigned int count,
533 void *kbuf, void __user *ubuf)
535 const unsigned long *regs = &target->thread.regs->gpr[0];
536 compat_ulong_t *k = kbuf;
537 compat_ulong_t __user *u = ubuf;
538 compat_ulong_t reg;
540 if (target->thread.regs == NULL)
541 return -EIO;
543 CHECK_FULL_REGS(target->thread.regs);
545 pos /= sizeof(reg);
546 count /= sizeof(reg);
548 if (kbuf)
549 for (; count > 0 && pos < PT_MSR; --count)
550 *k++ = regs[pos++];
551 else
552 for (; count > 0 && pos < PT_MSR; --count)
553 if (__put_user((compat_ulong_t) regs[pos++], u++))
554 return -EFAULT;
556 if (count > 0 && pos == PT_MSR) {
557 reg = get_user_msr(target);
558 if (kbuf)
559 *k++ = reg;
560 else if (__put_user(reg, u++))
561 return -EFAULT;
562 ++pos;
563 --count;
566 if (kbuf)
567 for (; count > 0 && pos < PT_REGS_COUNT; --count)
568 *k++ = regs[pos++];
569 else
570 for (; count > 0 && pos < PT_REGS_COUNT; --count)
571 if (__put_user((compat_ulong_t) regs[pos++], u++))
572 return -EFAULT;
574 kbuf = k;
575 ubuf = u;
576 pos *= sizeof(reg);
577 count *= sizeof(reg);
578 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
579 PT_REGS_COUNT * sizeof(reg), -1);
582 static int gpr32_set(struct task_struct *target,
583 const struct user_regset *regset,
584 unsigned int pos, unsigned int count,
585 const void *kbuf, const void __user *ubuf)
587 unsigned long *regs = &target->thread.regs->gpr[0];
588 const compat_ulong_t *k = kbuf;
589 const compat_ulong_t __user *u = ubuf;
590 compat_ulong_t reg;
592 if (target->thread.regs == NULL)
593 return -EIO;
595 CHECK_FULL_REGS(target->thread.regs);
597 pos /= sizeof(reg);
598 count /= sizeof(reg);
600 if (kbuf)
601 for (; count > 0 && pos < PT_MSR; --count)
602 regs[pos++] = *k++;
603 else
604 for (; count > 0 && pos < PT_MSR; --count) {
605 if (__get_user(reg, u++))
606 return -EFAULT;
607 regs[pos++] = reg;
611 if (count > 0 && pos == PT_MSR) {
612 if (kbuf)
613 reg = *k++;
614 else if (__get_user(reg, u++))
615 return -EFAULT;
616 set_user_msr(target, reg);
617 ++pos;
618 --count;
621 if (kbuf) {
622 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
623 regs[pos++] = *k++;
624 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
625 ++k;
626 } else {
627 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
628 if (__get_user(reg, u++))
629 return -EFAULT;
630 regs[pos++] = reg;
632 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
633 if (__get_user(reg, u++))
634 return -EFAULT;
637 if (count > 0 && pos == PT_TRAP) {
638 if (kbuf)
639 reg = *k++;
640 else if (__get_user(reg, u++))
641 return -EFAULT;
642 set_user_trap(target, reg);
643 ++pos;
644 --count;
647 kbuf = k;
648 ubuf = u;
649 pos *= sizeof(reg);
650 count *= sizeof(reg);
651 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
652 (PT_TRAP + 1) * sizeof(reg), -1);
656 * These are the regset flavors matching the CONFIG_PPC32 native set.
658 static const struct user_regset compat_regsets[] = {
659 [REGSET_GPR] = {
660 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
661 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
662 .get = gpr32_get, .set = gpr32_set
664 [REGSET_FPR] = {
665 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
666 .size = sizeof(double), .align = sizeof(double),
667 .get = fpr_get, .set = fpr_set
669 #ifdef CONFIG_ALTIVEC
670 [REGSET_VMX] = {
671 .core_note_type = NT_PPC_VMX, .n = 34,
672 .size = sizeof(vector128), .align = sizeof(vector128),
673 .active = vr_active, .get = vr_get, .set = vr_set
675 #endif
676 #ifdef CONFIG_SPE
677 [REGSET_SPE] = {
678 .core_note_type = NT_PPC_SPE, .n = 35,
679 .size = sizeof(u32), .align = sizeof(u32),
680 .active = evr_active, .get = evr_get, .set = evr_set
682 #endif
685 static const struct user_regset_view user_ppc_compat_view = {
686 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
687 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
689 #endif /* CONFIG_PPC64 */
691 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
693 #ifdef CONFIG_PPC64
694 if (test_tsk_thread_flag(task, TIF_32BIT))
695 return &user_ppc_compat_view;
696 #endif
697 return &user_ppc_native_view;
701 void user_enable_single_step(struct task_struct *task)
703 struct pt_regs *regs = task->thread.regs;
705 if (regs != NULL) {
706 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
707 task->thread.dbcr0 &= ~DBCR0_BT;
708 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
709 regs->msr |= MSR_DE;
710 #else
711 regs->msr &= ~MSR_BE;
712 regs->msr |= MSR_SE;
713 #endif
715 set_tsk_thread_flag(task, TIF_SINGLESTEP);
718 void user_enable_block_step(struct task_struct *task)
720 struct pt_regs *regs = task->thread.regs;
722 if (regs != NULL) {
723 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
724 task->thread.dbcr0 &= ~DBCR0_IC;
725 task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
726 regs->msr |= MSR_DE;
727 #else
728 regs->msr &= ~MSR_SE;
729 regs->msr |= MSR_BE;
730 #endif
732 set_tsk_thread_flag(task, TIF_SINGLESTEP);
735 void user_disable_single_step(struct task_struct *task)
737 struct pt_regs *regs = task->thread.regs;
739 if (regs != NULL) {
740 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
742 * The logic to disable single stepping should be as
743 * simple as turning off the Instruction Complete flag.
744 * And, after doing so, if all debug flags are off, turn
745 * off DBCR0(IDM) and MSR(DE) .... Torez
747 task->thread.dbcr0 &= ~DBCR0_IC;
749 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
751 if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
752 task->thread.dbcr1)) {
754 * All debug events were off.....
756 task->thread.dbcr0 &= ~DBCR0_IDM;
757 regs->msr &= ~MSR_DE;
759 #else
760 regs->msr &= ~(MSR_SE | MSR_BE);
761 #endif
763 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
766 int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
767 unsigned long data)
769 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
770 * For embedded processors we support one DAC and no IAC's at the
771 * moment.
773 if (addr > 0)
774 return -EINVAL;
776 /* The bottom 3 bits in dabr are flags */
777 if ((data & ~0x7UL) >= TASK_SIZE)
778 return -EIO;
780 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
781 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
782 * It was assumed, on previous implementations, that 3 bits were
783 * passed together with the data address, fitting the design of the
784 * DABR register, as follows:
786 * bit 0: Read flag
787 * bit 1: Write flag
788 * bit 2: Breakpoint translation
790 * Thus, we use them here as so.
793 /* Ensure breakpoint translation bit is set */
794 if (data && !(data & DABR_TRANSLATION))
795 return -EIO;
797 /* Move contents to the DABR register */
798 task->thread.dabr = data;
799 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
800 /* As described above, it was assumed 3 bits were passed with the data
801 * address, but we will assume only the mode bits will be passed
802 * as to not cause alignment restrictions for DAC-based processors.
805 /* DAC's hold the whole address without any mode flags */
806 task->thread.dac1 = data & ~0x3UL;
808 if (task->thread.dac1 == 0) {
809 dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
810 if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
811 task->thread.dbcr1)) {
812 task->thread.regs->msr &= ~MSR_DE;
813 task->thread.dbcr0 &= ~DBCR0_IDM;
815 return 0;
818 /* Read or Write bits must be set */
820 if (!(data & 0x3UL))
821 return -EINVAL;
823 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
824 register */
825 task->thread.dbcr0 |= DBCR0_IDM;
827 /* Check for write and read flags and set DBCR0
828 accordingly */
829 dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
830 if (data & 0x1UL)
831 dbcr_dac(task) |= DBCR_DAC1R;
832 if (data & 0x2UL)
833 dbcr_dac(task) |= DBCR_DAC1W;
834 task->thread.regs->msr |= MSR_DE;
835 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
836 return 0;
840 * Called by kernel/ptrace.c when detaching..
842 * Make sure single step bits etc are not set.
844 void ptrace_disable(struct task_struct *child)
846 /* make sure the single step bit is not set. */
847 user_disable_single_step(child);
850 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
851 static long set_intruction_bp(struct task_struct *child,
852 struct ppc_hw_breakpoint *bp_info)
854 int slot;
855 int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
856 int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
857 int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
858 int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
860 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
861 slot2_in_use = 1;
862 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
863 slot4_in_use = 1;
865 if (bp_info->addr >= TASK_SIZE)
866 return -EIO;
868 if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
870 /* Make sure range is valid. */
871 if (bp_info->addr2 >= TASK_SIZE)
872 return -EIO;
874 /* We need a pair of IAC regsisters */
875 if ((!slot1_in_use) && (!slot2_in_use)) {
876 slot = 1;
877 child->thread.iac1 = bp_info->addr;
878 child->thread.iac2 = bp_info->addr2;
879 child->thread.dbcr0 |= DBCR0_IAC1;
880 if (bp_info->addr_mode ==
881 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
882 dbcr_iac_range(child) |= DBCR_IAC12X;
883 else
884 dbcr_iac_range(child) |= DBCR_IAC12I;
885 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
886 } else if ((!slot3_in_use) && (!slot4_in_use)) {
887 slot = 3;
888 child->thread.iac3 = bp_info->addr;
889 child->thread.iac4 = bp_info->addr2;
890 child->thread.dbcr0 |= DBCR0_IAC3;
891 if (bp_info->addr_mode ==
892 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
893 dbcr_iac_range(child) |= DBCR_IAC34X;
894 else
895 dbcr_iac_range(child) |= DBCR_IAC34I;
896 #endif
897 } else
898 return -ENOSPC;
899 } else {
900 /* We only need one. If possible leave a pair free in
901 * case a range is needed later
903 if (!slot1_in_use) {
905 * Don't use iac1 if iac1-iac2 are free and either
906 * iac3 or iac4 (but not both) are free
908 if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
909 slot = 1;
910 child->thread.iac1 = bp_info->addr;
911 child->thread.dbcr0 |= DBCR0_IAC1;
912 goto out;
915 if (!slot2_in_use) {
916 slot = 2;
917 child->thread.iac2 = bp_info->addr;
918 child->thread.dbcr0 |= DBCR0_IAC2;
919 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
920 } else if (!slot3_in_use) {
921 slot = 3;
922 child->thread.iac3 = bp_info->addr;
923 child->thread.dbcr0 |= DBCR0_IAC3;
924 } else if (!slot4_in_use) {
925 slot = 4;
926 child->thread.iac4 = bp_info->addr;
927 child->thread.dbcr0 |= DBCR0_IAC4;
928 #endif
929 } else
930 return -ENOSPC;
932 out:
933 child->thread.dbcr0 |= DBCR0_IDM;
934 child->thread.regs->msr |= MSR_DE;
936 return slot;
939 static int del_instruction_bp(struct task_struct *child, int slot)
941 switch (slot) {
942 case 1:
943 if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
944 return -ENOENT;
946 if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
947 /* address range - clear slots 1 & 2 */
948 child->thread.iac2 = 0;
949 dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
951 child->thread.iac1 = 0;
952 child->thread.dbcr0 &= ~DBCR0_IAC1;
953 break;
954 case 2:
955 if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
956 return -ENOENT;
958 if (dbcr_iac_range(child) & DBCR_IAC12MODE)
959 /* used in a range */
960 return -EINVAL;
961 child->thread.iac2 = 0;
962 child->thread.dbcr0 &= ~DBCR0_IAC2;
963 break;
964 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
965 case 3:
966 if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
967 return -ENOENT;
969 if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
970 /* address range - clear slots 3 & 4 */
971 child->thread.iac4 = 0;
972 dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
974 child->thread.iac3 = 0;
975 child->thread.dbcr0 &= ~DBCR0_IAC3;
976 break;
977 case 4:
978 if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
979 return -ENOENT;
981 if (dbcr_iac_range(child) & DBCR_IAC34MODE)
982 /* Used in a range */
983 return -EINVAL;
984 child->thread.iac4 = 0;
985 child->thread.dbcr0 &= ~DBCR0_IAC4;
986 break;
987 #endif
988 default:
989 return -EINVAL;
991 return 0;
994 static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
996 int byte_enable =
997 (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
998 & 0xf;
999 int condition_mode =
1000 bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
1001 int slot;
1003 if (byte_enable && (condition_mode == 0))
1004 return -EINVAL;
1006 if (bp_info->addr >= TASK_SIZE)
1007 return -EIO;
1009 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
1010 slot = 1;
1011 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1012 dbcr_dac(child) |= DBCR_DAC1R;
1013 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1014 dbcr_dac(child) |= DBCR_DAC1W;
1015 child->thread.dac1 = (unsigned long)bp_info->addr;
1016 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1017 if (byte_enable) {
1018 child->thread.dvc1 =
1019 (unsigned long)bp_info->condition_value;
1020 child->thread.dbcr2 |=
1021 ((byte_enable << DBCR2_DVC1BE_SHIFT) |
1022 (condition_mode << DBCR2_DVC1M_SHIFT));
1024 #endif
1025 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1026 } else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
1027 /* Both dac1 and dac2 are part of a range */
1028 return -ENOSPC;
1029 #endif
1030 } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
1031 slot = 2;
1032 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1033 dbcr_dac(child) |= DBCR_DAC2R;
1034 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1035 dbcr_dac(child) |= DBCR_DAC2W;
1036 child->thread.dac2 = (unsigned long)bp_info->addr;
1037 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1038 if (byte_enable) {
1039 child->thread.dvc2 =
1040 (unsigned long)bp_info->condition_value;
1041 child->thread.dbcr2 |=
1042 ((byte_enable << DBCR2_DVC2BE_SHIFT) |
1043 (condition_mode << DBCR2_DVC2M_SHIFT));
1045 #endif
1046 } else
1047 return -ENOSPC;
1048 child->thread.dbcr0 |= DBCR0_IDM;
1049 child->thread.regs->msr |= MSR_DE;
1051 return slot + 4;
1054 static int del_dac(struct task_struct *child, int slot)
1056 if (slot == 1) {
1057 if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1058 return -ENOENT;
1060 child->thread.dac1 = 0;
1061 dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1062 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1063 if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
1064 child->thread.dac2 = 0;
1065 child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1067 child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1068 #endif
1069 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1070 child->thread.dvc1 = 0;
1071 #endif
1072 } else if (slot == 2) {
1073 if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1074 return -ENOENT;
1076 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1077 if (child->thread.dbcr2 & DBCR2_DAC12MODE)
1078 /* Part of a range */
1079 return -EINVAL;
1080 child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1081 #endif
1082 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1083 child->thread.dvc2 = 0;
1084 #endif
1085 child->thread.dac2 = 0;
1086 dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1087 } else
1088 return -EINVAL;
1090 return 0;
1092 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1094 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1095 static int set_dac_range(struct task_struct *child,
1096 struct ppc_hw_breakpoint *bp_info)
1098 int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
1100 /* We don't allow range watchpoints to be used with DVC */
1101 if (bp_info->condition_mode)
1102 return -EINVAL;
1105 * Best effort to verify the address range. The user/supervisor bits
1106 * prevent trapping in kernel space, but let's fail on an obvious bad
1107 * range. The simple test on the mask is not fool-proof, and any
1108 * exclusive range will spill over into kernel space.
1110 if (bp_info->addr >= TASK_SIZE)
1111 return -EIO;
1112 if (mode == PPC_BREAKPOINT_MODE_MASK) {
1114 * dac2 is a bitmask. Don't allow a mask that makes a
1115 * kernel space address from a valid dac1 value
1117 if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
1118 return -EIO;
1119 } else {
1121 * For range breakpoints, addr2 must also be a valid address
1123 if (bp_info->addr2 >= TASK_SIZE)
1124 return -EIO;
1127 if (child->thread.dbcr0 &
1128 (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1129 return -ENOSPC;
1131 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1132 child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1133 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1134 child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1135 child->thread.dac1 = bp_info->addr;
1136 child->thread.dac2 = bp_info->addr2;
1137 if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1138 child->thread.dbcr2 |= DBCR2_DAC12M;
1139 else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1140 child->thread.dbcr2 |= DBCR2_DAC12MX;
1141 else /* PPC_BREAKPOINT_MODE_MASK */
1142 child->thread.dbcr2 |= DBCR2_DAC12MM;
1143 child->thread.regs->msr |= MSR_DE;
1145 return 5;
1147 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1149 static long ppc_set_hwdebug(struct task_struct *child,
1150 struct ppc_hw_breakpoint *bp_info)
1152 if (bp_info->version != 1)
1153 return -ENOTSUPP;
1154 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1156 * Check for invalid flags and combinations
1158 if ((bp_info->trigger_type == 0) ||
1159 (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
1160 PPC_BREAKPOINT_TRIGGER_RW)) ||
1161 (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
1162 (bp_info->condition_mode &
1163 ~(PPC_BREAKPOINT_CONDITION_MODE |
1164 PPC_BREAKPOINT_CONDITION_BE_ALL)))
1165 return -EINVAL;
1166 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1167 if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1168 return -EINVAL;
1169 #endif
1171 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
1172 if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
1173 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1174 return -EINVAL;
1175 return set_intruction_bp(child, bp_info);
1177 if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1178 return set_dac(child, bp_info);
1180 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1181 return set_dac_range(child, bp_info);
1182 #else
1183 return -EINVAL;
1184 #endif
1185 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1187 * We only support one data breakpoint
1189 if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
1190 ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
1191 (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
1192 (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
1193 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1194 return -EINVAL;
1196 if (child->thread.dabr)
1197 return -ENOSPC;
1199 if ((unsigned long)bp_info->addr >= TASK_SIZE)
1200 return -EIO;
1202 child->thread.dabr = (unsigned long)bp_info->addr;
1204 return 1;
1205 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1208 static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
1210 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1211 int rc;
1213 if (data <= 4)
1214 rc = del_instruction_bp(child, (int)data);
1215 else
1216 rc = del_dac(child, (int)data - 4);
1218 if (!rc) {
1219 if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
1220 child->thread.dbcr1)) {
1221 child->thread.dbcr0 &= ~DBCR0_IDM;
1222 child->thread.regs->msr &= ~MSR_DE;
1225 return rc;
1226 #else
1227 if (data != 1)
1228 return -EINVAL;
1229 if (child->thread.dabr == 0)
1230 return -ENOENT;
1232 child->thread.dabr = 0;
1234 return 0;
1235 #endif
1239 * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
1240 * we mark them as obsolete now, they will be removed in a future version
1242 static long arch_ptrace_old(struct task_struct *child, long request, long addr,
1243 long data)
1245 switch (request) {
1246 case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
1247 return copy_regset_to_user(child, &user_ppc_native_view,
1248 REGSET_GPR, 0, 32 * sizeof(long),
1249 (void __user *) data);
1251 case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
1252 return copy_regset_from_user(child, &user_ppc_native_view,
1253 REGSET_GPR, 0, 32 * sizeof(long),
1254 (const void __user *) data);
1256 case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
1257 return copy_regset_to_user(child, &user_ppc_native_view,
1258 REGSET_FPR, 0, 32 * sizeof(double),
1259 (void __user *) data);
1261 case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
1262 return copy_regset_from_user(child, &user_ppc_native_view,
1263 REGSET_FPR, 0, 32 * sizeof(double),
1264 (const void __user *) data);
1267 return -EPERM;
1270 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
1272 int ret = -EPERM;
1274 switch (request) {
1275 /* read the word at location addr in the USER area. */
1276 case PTRACE_PEEKUSR: {
1277 unsigned long index, tmp;
1279 ret = -EIO;
1280 /* convert to index and check */
1281 #ifdef CONFIG_PPC32
1282 index = (unsigned long) addr >> 2;
1283 if ((addr & 3) || (index > PT_FPSCR)
1284 || (child->thread.regs == NULL))
1285 #else
1286 index = (unsigned long) addr >> 3;
1287 if ((addr & 7) || (index > PT_FPSCR))
1288 #endif
1289 break;
1291 CHECK_FULL_REGS(child->thread.regs);
1292 if (index < PT_FPR0) {
1293 tmp = ptrace_get_reg(child, (int) index);
1294 } else {
1295 flush_fp_to_thread(child);
1296 tmp = ((unsigned long *)child->thread.fpr)
1297 [TS_FPRWIDTH * (index - PT_FPR0)];
1299 ret = put_user(tmp,(unsigned long __user *) data);
1300 break;
1303 /* write the word at location addr in the USER area */
1304 case PTRACE_POKEUSR: {
1305 unsigned long index;
1307 ret = -EIO;
1308 /* convert to index and check */
1309 #ifdef CONFIG_PPC32
1310 index = (unsigned long) addr >> 2;
1311 if ((addr & 3) || (index > PT_FPSCR)
1312 || (child->thread.regs == NULL))
1313 #else
1314 index = (unsigned long) addr >> 3;
1315 if ((addr & 7) || (index > PT_FPSCR))
1316 #endif
1317 break;
1319 CHECK_FULL_REGS(child->thread.regs);
1320 if (index < PT_FPR0) {
1321 ret = ptrace_put_reg(child, index, data);
1322 } else {
1323 flush_fp_to_thread(child);
1324 ((unsigned long *)child->thread.fpr)
1325 [TS_FPRWIDTH * (index - PT_FPR0)] = data;
1326 ret = 0;
1328 break;
1331 case PPC_PTRACE_GETHWDBGINFO: {
1332 struct ppc_debug_info dbginfo;
1334 dbginfo.version = 1;
1335 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1336 dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
1337 dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
1338 dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
1339 dbginfo.data_bp_alignment = 4;
1340 dbginfo.sizeof_condition = 4;
1341 dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
1342 PPC_DEBUG_FEATURE_INSN_BP_MASK;
1343 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1344 dbginfo.features |=
1345 PPC_DEBUG_FEATURE_DATA_BP_RANGE |
1346 PPC_DEBUG_FEATURE_DATA_BP_MASK;
1347 #endif
1348 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1349 dbginfo.num_instruction_bps = 0;
1350 dbginfo.num_data_bps = 1;
1351 dbginfo.num_condition_regs = 0;
1352 #ifdef CONFIG_PPC64
1353 dbginfo.data_bp_alignment = 8;
1354 #else
1355 dbginfo.data_bp_alignment = 4;
1356 #endif
1357 dbginfo.sizeof_condition = 0;
1358 dbginfo.features = 0;
1359 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1361 if (!access_ok(VERIFY_WRITE, data,
1362 sizeof(struct ppc_debug_info)))
1363 return -EFAULT;
1364 ret = __copy_to_user((struct ppc_debug_info __user *)data,
1365 &dbginfo, sizeof(struct ppc_debug_info)) ?
1366 -EFAULT : 0;
1367 break;
1370 case PPC_PTRACE_SETHWDEBUG: {
1371 struct ppc_hw_breakpoint bp_info;
1373 if (!access_ok(VERIFY_READ, data,
1374 sizeof(struct ppc_hw_breakpoint)))
1375 return -EFAULT;
1376 ret = __copy_from_user(&bp_info,
1377 (struct ppc_hw_breakpoint __user *)data,
1378 sizeof(struct ppc_hw_breakpoint)) ?
1379 -EFAULT : 0;
1380 if (!ret)
1381 ret = ppc_set_hwdebug(child, &bp_info);
1382 break;
1385 case PPC_PTRACE_DELHWDEBUG: {
1386 ret = ppc_del_hwdebug(child, addr, data);
1387 break;
1390 case PTRACE_GET_DEBUGREG: {
1391 ret = -EINVAL;
1392 /* We only support one DABR and no IABRS at the moment */
1393 if (addr > 0)
1394 break;
1395 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1396 ret = put_user(child->thread.dac1,
1397 (unsigned long __user *)data);
1398 #else
1399 ret = put_user(child->thread.dabr,
1400 (unsigned long __user *)data);
1401 #endif
1402 break;
1405 case PTRACE_SET_DEBUGREG:
1406 ret = ptrace_set_debugreg(child, addr, data);
1407 break;
1409 #ifdef CONFIG_PPC64
1410 case PTRACE_GETREGS64:
1411 #endif
1412 case PTRACE_GETREGS: /* Get all pt_regs from the child. */
1413 return copy_regset_to_user(child, &user_ppc_native_view,
1414 REGSET_GPR,
1415 0, sizeof(struct pt_regs),
1416 (void __user *) data);
1418 #ifdef CONFIG_PPC64
1419 case PTRACE_SETREGS64:
1420 #endif
1421 case PTRACE_SETREGS: /* Set all gp regs in the child. */
1422 return copy_regset_from_user(child, &user_ppc_native_view,
1423 REGSET_GPR,
1424 0, sizeof(struct pt_regs),
1425 (const void __user *) data);
1427 case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1428 return copy_regset_to_user(child, &user_ppc_native_view,
1429 REGSET_FPR,
1430 0, sizeof(elf_fpregset_t),
1431 (void __user *) data);
1433 case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1434 return copy_regset_from_user(child, &user_ppc_native_view,
1435 REGSET_FPR,
1436 0, sizeof(elf_fpregset_t),
1437 (const void __user *) data);
1439 #ifdef CONFIG_ALTIVEC
1440 case PTRACE_GETVRREGS:
1441 return copy_regset_to_user(child, &user_ppc_native_view,
1442 REGSET_VMX,
1443 0, (33 * sizeof(vector128) +
1444 sizeof(u32)),
1445 (void __user *) data);
1447 case PTRACE_SETVRREGS:
1448 return copy_regset_from_user(child, &user_ppc_native_view,
1449 REGSET_VMX,
1450 0, (33 * sizeof(vector128) +
1451 sizeof(u32)),
1452 (const void __user *) data);
1453 #endif
1454 #ifdef CONFIG_VSX
1455 case PTRACE_GETVSRREGS:
1456 return copy_regset_to_user(child, &user_ppc_native_view,
1457 REGSET_VSX,
1458 0, 32 * sizeof(double),
1459 (void __user *) data);
1461 case PTRACE_SETVSRREGS:
1462 return copy_regset_from_user(child, &user_ppc_native_view,
1463 REGSET_VSX,
1464 0, 32 * sizeof(double),
1465 (const void __user *) data);
1466 #endif
1467 #ifdef CONFIG_SPE
1468 case PTRACE_GETEVRREGS:
1469 /* Get the child spe register state. */
1470 return copy_regset_to_user(child, &user_ppc_native_view,
1471 REGSET_SPE, 0, 35 * sizeof(u32),
1472 (void __user *) data);
1474 case PTRACE_SETEVRREGS:
1475 /* Set the child spe register state. */
1476 return copy_regset_from_user(child, &user_ppc_native_view,
1477 REGSET_SPE, 0, 35 * sizeof(u32),
1478 (const void __user *) data);
1479 #endif
1481 /* Old reverse args ptrace callss */
1482 case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
1483 case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
1484 case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
1485 case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
1486 ret = arch_ptrace_old(child, request, addr, data);
1487 break;
1489 default:
1490 ret = ptrace_request(child, request, addr, data);
1491 break;
1493 return ret;
1497 * We must return the syscall number to actually look up in the table.
1498 * This can be -1L to skip running any syscall at all.
1500 long do_syscall_trace_enter(struct pt_regs *regs)
1502 long ret = 0;
1504 secure_computing(regs->gpr[0]);
1506 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1507 tracehook_report_syscall_entry(regs))
1509 * Tracing decided this syscall should not happen.
1510 * We'll return a bogus call number to get an ENOSYS
1511 * error, but leave the original number in regs->gpr[0].
1513 ret = -1L;
1515 if (unlikely(current->audit_context)) {
1516 #ifdef CONFIG_PPC64
1517 if (!test_thread_flag(TIF_32BIT))
1518 audit_syscall_entry(AUDIT_ARCH_PPC64,
1519 regs->gpr[0],
1520 regs->gpr[3], regs->gpr[4],
1521 regs->gpr[5], regs->gpr[6]);
1522 else
1523 #endif
1524 audit_syscall_entry(AUDIT_ARCH_PPC,
1525 regs->gpr[0],
1526 regs->gpr[3] & 0xffffffff,
1527 regs->gpr[4] & 0xffffffff,
1528 regs->gpr[5] & 0xffffffff,
1529 regs->gpr[6] & 0xffffffff);
1532 return ret ?: regs->gpr[0];
1535 void do_syscall_trace_leave(struct pt_regs *regs)
1537 int step;
1539 if (unlikely(current->audit_context))
1540 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1541 regs->result);
1543 step = test_thread_flag(TIF_SINGLESTEP);
1544 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1545 tracehook_report_syscall_exit(regs, step);