3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
37 #include <linux/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/switch_to.h>
42 #include <asm/asm-prototypes.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
48 * The parameter save area on the stack is used to store arguments being passed
49 * to callee function and is located at fixed offset from stack pointer.
52 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
53 #else /* CONFIG_PPC32 */
54 #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
57 struct pt_regs_offset
{
62 #define STR(s) #s /* convert to string */
63 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
64 #define GPR_OFFSET_NAME(num) \
65 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
66 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
67 #define REG_OFFSET_END {.name = NULL, .offset = 0}
69 #define TVSO(f) (offsetof(struct thread_vr_state, f))
70 #define TFSO(f) (offsetof(struct thread_fp_state, f))
71 #define TSO(f) (offsetof(struct thread_struct, f))
73 static const struct pt_regs_offset regoffset_table
[] = {
106 REG_OFFSET_NAME(nip
),
107 REG_OFFSET_NAME(msr
),
108 REG_OFFSET_NAME(ctr
),
109 REG_OFFSET_NAME(link
),
110 REG_OFFSET_NAME(xer
),
111 REG_OFFSET_NAME(ccr
),
113 REG_OFFSET_NAME(softe
),
117 REG_OFFSET_NAME(trap
),
118 REG_OFFSET_NAME(dar
),
119 REG_OFFSET_NAME(dsisr
),
123 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
124 static void flush_tmregs_to_thread(struct task_struct
*tsk
)
127 * If task is not current, it will have been flushed already to
128 * it's thread_struct during __switch_to().
130 * A reclaim flushes ALL the state or if not in TM save TM SPRs
131 * in the appropriate thread structures from live.
134 if ((!cpu_has_feature(CPU_FTR_TM
)) || (tsk
!= current
))
137 if (MSR_TM_SUSPENDED(mfmsr())) {
138 tm_reclaim_current(TM_CAUSE_SIGNAL
);
141 tm_save_sprs(&(tsk
->thread
));
145 static inline void flush_tmregs_to_thread(struct task_struct
*tsk
) { }
149 * regs_query_register_offset() - query register offset from its name
150 * @name: the name of a register
152 * regs_query_register_offset() returns the offset of a register in struct
153 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
155 int regs_query_register_offset(const char *name
)
157 const struct pt_regs_offset
*roff
;
158 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
159 if (!strcmp(roff
->name
, name
))
165 * regs_query_register_name() - query register name from its offset
166 * @offset: the offset of a register in struct pt_regs.
168 * regs_query_register_name() returns the name of a register from its
169 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
171 const char *regs_query_register_name(unsigned int offset
)
173 const struct pt_regs_offset
*roff
;
174 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
175 if (roff
->offset
== offset
)
181 * does not yet catch signals sent when the child dies.
182 * in exit.c or in signal.c.
186 * Set of msr bits that gdb can change on behalf of a process.
188 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
189 #define MSR_DEBUGCHANGE 0
191 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
195 * Max register writeable via put_reg
198 #define PT_MAX_PUT_REG PT_MQ
200 #define PT_MAX_PUT_REG PT_CCR
203 static unsigned long get_user_msr(struct task_struct
*task
)
205 return task
->thread
.regs
->msr
| task
->thread
.fpexc_mode
;
208 static int set_user_msr(struct task_struct
*task
, unsigned long msr
)
210 task
->thread
.regs
->msr
&= ~MSR_DEBUGCHANGE
;
211 task
->thread
.regs
->msr
|= msr
& MSR_DEBUGCHANGE
;
215 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
216 static unsigned long get_user_ckpt_msr(struct task_struct
*task
)
218 return task
->thread
.ckpt_regs
.msr
| task
->thread
.fpexc_mode
;
221 static int set_user_ckpt_msr(struct task_struct
*task
, unsigned long msr
)
223 task
->thread
.ckpt_regs
.msr
&= ~MSR_DEBUGCHANGE
;
224 task
->thread
.ckpt_regs
.msr
|= msr
& MSR_DEBUGCHANGE
;
228 static int set_user_ckpt_trap(struct task_struct
*task
, unsigned long trap
)
230 task
->thread
.ckpt_regs
.trap
= trap
& 0xfff0;
236 static int get_user_dscr(struct task_struct
*task
, unsigned long *data
)
238 *data
= task
->thread
.dscr
;
242 static int set_user_dscr(struct task_struct
*task
, unsigned long dscr
)
244 task
->thread
.dscr
= dscr
;
245 task
->thread
.dscr_inherit
= 1;
249 static int get_user_dscr(struct task_struct
*task
, unsigned long *data
)
254 static int set_user_dscr(struct task_struct
*task
, unsigned long dscr
)
261 * We prevent mucking around with the reserved area of trap
262 * which are used internally by the kernel.
264 static int set_user_trap(struct task_struct
*task
, unsigned long trap
)
266 task
->thread
.regs
->trap
= trap
& 0xfff0;
271 * Get contents of register REGNO in task TASK.
273 int ptrace_get_reg(struct task_struct
*task
, int regno
, unsigned long *data
)
275 if ((task
->thread
.regs
== NULL
) || !data
)
278 if (regno
== PT_MSR
) {
279 *data
= get_user_msr(task
);
283 if (regno
== PT_DSCR
)
284 return get_user_dscr(task
, data
);
288 * softe copies paca->soft_enabled variable state. Since soft_enabled is
289 * no more used as a flag, lets force usr to alway see the softe value as 1
290 * which means interrupts are not soft disabled.
292 if (regno
== PT_SOFTE
) {
298 if (regno
< (sizeof(struct pt_regs
) / sizeof(unsigned long))) {
299 *data
= ((unsigned long *)task
->thread
.regs
)[regno
];
307 * Write contents of register REGNO in task TASK.
309 int ptrace_put_reg(struct task_struct
*task
, int regno
, unsigned long data
)
311 if (task
->thread
.regs
== NULL
)
315 return set_user_msr(task
, data
);
316 if (regno
== PT_TRAP
)
317 return set_user_trap(task
, data
);
318 if (regno
== PT_DSCR
)
319 return set_user_dscr(task
, data
);
321 if (regno
<= PT_MAX_PUT_REG
) {
322 ((unsigned long *)task
->thread
.regs
)[regno
] = data
;
328 static int gpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
329 unsigned int pos
, unsigned int count
,
330 void *kbuf
, void __user
*ubuf
)
334 if (target
->thread
.regs
== NULL
)
337 if (!FULL_REGS(target
->thread
.regs
)) {
338 /* We have a partial register set. Fill 14-31 with bogus values */
339 for (i
= 14; i
< 32; i
++)
340 target
->thread
.regs
->gpr
[i
] = NV_REG_POISON
;
343 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
345 0, offsetof(struct pt_regs
, msr
));
347 unsigned long msr
= get_user_msr(target
);
348 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &msr
,
349 offsetof(struct pt_regs
, msr
),
350 offsetof(struct pt_regs
, msr
) +
354 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
355 offsetof(struct pt_regs
, msr
) + sizeof(long));
358 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
359 &target
->thread
.regs
->orig_gpr3
,
360 offsetof(struct pt_regs
, orig_gpr3
),
361 sizeof(struct pt_regs
));
363 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
364 sizeof(struct pt_regs
), -1);
369 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
370 unsigned int pos
, unsigned int count
,
371 const void *kbuf
, const void __user
*ubuf
)
376 if (target
->thread
.regs
== NULL
)
379 CHECK_FULL_REGS(target
->thread
.regs
);
381 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
383 0, PT_MSR
* sizeof(reg
));
385 if (!ret
&& count
> 0) {
386 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
387 PT_MSR
* sizeof(reg
),
388 (PT_MSR
+ 1) * sizeof(reg
));
390 ret
= set_user_msr(target
, reg
);
393 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
394 offsetof(struct pt_regs
, msr
) + sizeof(long));
397 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
398 &target
->thread
.regs
->orig_gpr3
,
399 PT_ORIG_R3
* sizeof(reg
),
400 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
));
402 if (PT_MAX_PUT_REG
+ 1 < PT_TRAP
&& !ret
)
403 ret
= user_regset_copyin_ignore(
404 &pos
, &count
, &kbuf
, &ubuf
,
405 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
),
406 PT_TRAP
* sizeof(reg
));
408 if (!ret
&& count
> 0) {
409 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
410 PT_TRAP
* sizeof(reg
),
411 (PT_TRAP
+ 1) * sizeof(reg
));
413 ret
= set_user_trap(target
, reg
);
417 ret
= user_regset_copyin_ignore(
418 &pos
, &count
, &kbuf
, &ubuf
,
419 (PT_TRAP
+ 1) * sizeof(reg
), -1);
425 * Regardless of transactions, 'fp_state' holds the current running
426 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
427 * value of all FPR registers for the current transaction.
429 * Userspace interface buffer layout:
436 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
437 unsigned int pos
, unsigned int count
,
438 void *kbuf
, void __user
*ubuf
)
444 flush_fp_to_thread(target
);
446 /* copy to local buffer then write that out */
447 for (i
= 0; i
< 32 ; i
++)
448 buf
[i
] = target
->thread
.TS_FPR(i
);
449 buf
[32] = target
->thread
.fp_state
.fpscr
;
450 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
452 BUILD_BUG_ON(offsetof(struct thread_fp_state
, fpscr
) !=
453 offsetof(struct thread_fp_state
, fpr
[32]));
455 flush_fp_to_thread(target
);
457 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
458 &target
->thread
.fp_state
, 0, -1);
463 * Regardless of transactions, 'fp_state' holds the current running
464 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
465 * value of all FPR registers for the current transaction.
467 * Userspace interface buffer layout:
475 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
476 unsigned int pos
, unsigned int count
,
477 const void *kbuf
, const void __user
*ubuf
)
483 flush_fp_to_thread(target
);
485 for (i
= 0; i
< 32 ; i
++)
486 buf
[i
] = target
->thread
.TS_FPR(i
);
487 buf
[32] = target
->thread
.fp_state
.fpscr
;
489 /* copy to local buffer then write that out */
490 i
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
494 for (i
= 0; i
< 32 ; i
++)
495 target
->thread
.TS_FPR(i
) = buf
[i
];
496 target
->thread
.fp_state
.fpscr
= buf
[32];
499 BUILD_BUG_ON(offsetof(struct thread_fp_state
, fpscr
) !=
500 offsetof(struct thread_fp_state
, fpr
[32]));
502 flush_fp_to_thread(target
);
504 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
505 &target
->thread
.fp_state
, 0, -1);
509 #ifdef CONFIG_ALTIVEC
511 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
512 * The transfer totals 34 quadword. Quadwords 0-31 contain the
513 * corresponding vector registers. Quadword 32 contains the vscr as the
514 * last word (offset 12) within that quadword. Quadword 33 contains the
515 * vrsave as the first word (offset 0) within the quadword.
517 * This definition of the VMX state is compatible with the current PPC32
518 * ptrace interface. This allows signal handling and ptrace to use the
519 * same structures. This also simplifies the implementation of a bi-arch
520 * (combined (32- and 64-bit) gdb.
523 static int vr_active(struct task_struct
*target
,
524 const struct user_regset
*regset
)
526 flush_altivec_to_thread(target
);
527 return target
->thread
.used_vr
? regset
->n
: 0;
531 * Regardless of transactions, 'vr_state' holds the current running
532 * value of all the VMX registers and 'ckvr_state' holds the last
533 * checkpointed value of all the VMX registers for the current
534 * transaction to fall back on in case it aborts.
536 * Userspace interface buffer layout:
544 static int vr_get(struct task_struct
*target
, const struct user_regset
*regset
,
545 unsigned int pos
, unsigned int count
,
546 void *kbuf
, void __user
*ubuf
)
550 flush_altivec_to_thread(target
);
552 BUILD_BUG_ON(offsetof(struct thread_vr_state
, vscr
) !=
553 offsetof(struct thread_vr_state
, vr
[32]));
555 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
556 &target
->thread
.vr_state
, 0,
557 33 * sizeof(vector128
));
560 * Copy out only the low-order word of vrsave.
566 memset(&vrsave
, 0, sizeof(vrsave
));
568 vrsave
.word
= target
->thread
.vrsave
;
570 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
571 33 * sizeof(vector128
), -1);
578 * Regardless of transactions, 'vr_state' holds the current running
579 * value of all the VMX registers and 'ckvr_state' holds the last
580 * checkpointed value of all the VMX registers for the current
581 * transaction to fall back on in case it aborts.
583 * Userspace interface buffer layout:
591 static int vr_set(struct task_struct
*target
, const struct user_regset
*regset
,
592 unsigned int pos
, unsigned int count
,
593 const void *kbuf
, const void __user
*ubuf
)
597 flush_altivec_to_thread(target
);
599 BUILD_BUG_ON(offsetof(struct thread_vr_state
, vscr
) !=
600 offsetof(struct thread_vr_state
, vr
[32]));
602 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
603 &target
->thread
.vr_state
, 0,
604 33 * sizeof(vector128
));
605 if (!ret
&& count
> 0) {
607 * We use only the first word of vrsave.
613 memset(&vrsave
, 0, sizeof(vrsave
));
615 vrsave
.word
= target
->thread
.vrsave
;
617 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
618 33 * sizeof(vector128
), -1);
620 target
->thread
.vrsave
= vrsave
.word
;
625 #endif /* CONFIG_ALTIVEC */
629 * Currently to set and and get all the vsx state, you need to call
630 * the fp and VMX calls as well. This only get/sets the lower 32
631 * 128bit VSX registers.
634 static int vsr_active(struct task_struct
*target
,
635 const struct user_regset
*regset
)
637 flush_vsx_to_thread(target
);
638 return target
->thread
.used_vsr
? regset
->n
: 0;
642 * Regardless of transactions, 'fp_state' holds the current running
643 * value of all FPR registers and 'ckfp_state' holds the last
644 * checkpointed value of all FPR registers for the current
647 * Userspace interface buffer layout:
653 static int vsr_get(struct task_struct
*target
, const struct user_regset
*regset
,
654 unsigned int pos
, unsigned int count
,
655 void *kbuf
, void __user
*ubuf
)
660 flush_tmregs_to_thread(target
);
661 flush_fp_to_thread(target
);
662 flush_altivec_to_thread(target
);
663 flush_vsx_to_thread(target
);
665 for (i
= 0; i
< 32 ; i
++)
666 buf
[i
] = target
->thread
.fp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
668 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
669 buf
, 0, 32 * sizeof(double));
675 * Regardless of transactions, 'fp_state' holds the current running
676 * value of all FPR registers and 'ckfp_state' holds the last
677 * checkpointed value of all FPR registers for the current
680 * Userspace interface buffer layout:
686 static int vsr_set(struct task_struct
*target
, const struct user_regset
*regset
,
687 unsigned int pos
, unsigned int count
,
688 const void *kbuf
, const void __user
*ubuf
)
693 flush_tmregs_to_thread(target
);
694 flush_fp_to_thread(target
);
695 flush_altivec_to_thread(target
);
696 flush_vsx_to_thread(target
);
698 for (i
= 0; i
< 32 ; i
++)
699 buf
[i
] = target
->thread
.fp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
701 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
702 buf
, 0, 32 * sizeof(double));
704 for (i
= 0; i
< 32 ; i
++)
705 target
->thread
.fp_state
.fpr
[i
][TS_VSRLOWOFFSET
] = buf
[i
];
709 #endif /* CONFIG_VSX */
714 * For get_evrregs/set_evrregs functions 'data' has the following layout:
723 static int evr_active(struct task_struct
*target
,
724 const struct user_regset
*regset
)
726 flush_spe_to_thread(target
);
727 return target
->thread
.used_spe
? regset
->n
: 0;
730 static int evr_get(struct task_struct
*target
, const struct user_regset
*regset
,
731 unsigned int pos
, unsigned int count
,
732 void *kbuf
, void __user
*ubuf
)
736 flush_spe_to_thread(target
);
738 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
740 0, sizeof(target
->thread
.evr
));
742 BUILD_BUG_ON(offsetof(struct thread_struct
, acc
) + sizeof(u64
) !=
743 offsetof(struct thread_struct
, spefscr
));
746 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
748 sizeof(target
->thread
.evr
), -1);
753 static int evr_set(struct task_struct
*target
, const struct user_regset
*regset
,
754 unsigned int pos
, unsigned int count
,
755 const void *kbuf
, const void __user
*ubuf
)
759 flush_spe_to_thread(target
);
761 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
763 0, sizeof(target
->thread
.evr
));
765 BUILD_BUG_ON(offsetof(struct thread_struct
, acc
) + sizeof(u64
) !=
766 offsetof(struct thread_struct
, spefscr
));
769 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
771 sizeof(target
->thread
.evr
), -1);
775 #endif /* CONFIG_SPE */
777 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
779 * tm_cgpr_active - get active number of registers in CGPR
780 * @target: The target task.
781 * @regset: The user regset structure.
783 * This function checks for the active number of available
784 * regisers in transaction checkpointed GPR category.
786 static int tm_cgpr_active(struct task_struct
*target
,
787 const struct user_regset
*regset
)
789 if (!cpu_has_feature(CPU_FTR_TM
))
792 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
799 * tm_cgpr_get - get CGPR registers
800 * @target: The target task.
801 * @regset: The user regset structure.
802 * @pos: The buffer position.
803 * @count: Number of bytes to copy.
804 * @kbuf: Kernel buffer to copy from.
805 * @ubuf: User buffer to copy into.
807 * This function gets transaction checkpointed GPR registers.
809 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
810 * GPR register values for the current transaction to fall back on if it
811 * aborts in between. This function gets those checkpointed GPR registers.
812 * The userspace interface buffer layout is as follows.
815 * struct pt_regs ckpt_regs;
818 static int tm_cgpr_get(struct task_struct
*target
,
819 const struct user_regset
*regset
,
820 unsigned int pos
, unsigned int count
,
821 void *kbuf
, void __user
*ubuf
)
825 if (!cpu_has_feature(CPU_FTR_TM
))
828 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
831 flush_tmregs_to_thread(target
);
832 flush_fp_to_thread(target
);
833 flush_altivec_to_thread(target
);
835 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
836 &target
->thread
.ckpt_regs
,
837 0, offsetof(struct pt_regs
, msr
));
839 unsigned long msr
= get_user_ckpt_msr(target
);
841 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &msr
,
842 offsetof(struct pt_regs
, msr
),
843 offsetof(struct pt_regs
, msr
) +
847 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
848 offsetof(struct pt_regs
, msr
) + sizeof(long));
851 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
852 &target
->thread
.ckpt_regs
.orig_gpr3
,
853 offsetof(struct pt_regs
, orig_gpr3
),
854 sizeof(struct pt_regs
));
856 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
857 sizeof(struct pt_regs
), -1);
863 * tm_cgpr_set - set the CGPR registers
864 * @target: The target task.
865 * @regset: The user regset structure.
866 * @pos: The buffer position.
867 * @count: Number of bytes to copy.
868 * @kbuf: Kernel buffer to copy into.
869 * @ubuf: User buffer to copy from.
871 * This function sets in transaction checkpointed GPR registers.
873 * When the transaction is active, 'ckpt_regs' holds the checkpointed
874 * GPR register values for the current transaction to fall back on if it
875 * aborts in between. This function sets those checkpointed GPR registers.
876 * The userspace interface buffer layout is as follows.
879 * struct pt_regs ckpt_regs;
882 static int tm_cgpr_set(struct task_struct
*target
,
883 const struct user_regset
*regset
,
884 unsigned int pos
, unsigned int count
,
885 const void *kbuf
, const void __user
*ubuf
)
890 if (!cpu_has_feature(CPU_FTR_TM
))
893 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
896 flush_tmregs_to_thread(target
);
897 flush_fp_to_thread(target
);
898 flush_altivec_to_thread(target
);
900 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
901 &target
->thread
.ckpt_regs
,
902 0, PT_MSR
* sizeof(reg
));
904 if (!ret
&& count
> 0) {
905 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
906 PT_MSR
* sizeof(reg
),
907 (PT_MSR
+ 1) * sizeof(reg
));
909 ret
= set_user_ckpt_msr(target
, reg
);
912 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
913 offsetof(struct pt_regs
, msr
) + sizeof(long));
916 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
917 &target
->thread
.ckpt_regs
.orig_gpr3
,
918 PT_ORIG_R3
* sizeof(reg
),
919 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
));
921 if (PT_MAX_PUT_REG
+ 1 < PT_TRAP
&& !ret
)
922 ret
= user_regset_copyin_ignore(
923 &pos
, &count
, &kbuf
, &ubuf
,
924 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
),
925 PT_TRAP
* sizeof(reg
));
927 if (!ret
&& count
> 0) {
928 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
929 PT_TRAP
* sizeof(reg
),
930 (PT_TRAP
+ 1) * sizeof(reg
));
932 ret
= set_user_ckpt_trap(target
, reg
);
936 ret
= user_regset_copyin_ignore(
937 &pos
, &count
, &kbuf
, &ubuf
,
938 (PT_TRAP
+ 1) * sizeof(reg
), -1);
944 * tm_cfpr_active - get active number of registers in CFPR
945 * @target: The target task.
946 * @regset: The user regset structure.
948 * This function checks for the active number of available
949 * regisers in transaction checkpointed FPR category.
951 static int tm_cfpr_active(struct task_struct
*target
,
952 const struct user_regset
*regset
)
954 if (!cpu_has_feature(CPU_FTR_TM
))
957 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
964 * tm_cfpr_get - get CFPR registers
965 * @target: The target task.
966 * @regset: The user regset structure.
967 * @pos: The buffer position.
968 * @count: Number of bytes to copy.
969 * @kbuf: Kernel buffer to copy from.
970 * @ubuf: User buffer to copy into.
972 * This function gets in transaction checkpointed FPR registers.
974 * When the transaction is active 'ckfp_state' holds the checkpointed
975 * values for the current transaction to fall back on if it aborts
976 * in between. This function gets those checkpointed FPR registers.
977 * The userspace interface buffer layout is as follows.
984 static int tm_cfpr_get(struct task_struct
*target
,
985 const struct user_regset
*regset
,
986 unsigned int pos
, unsigned int count
,
987 void *kbuf
, void __user
*ubuf
)
992 if (!cpu_has_feature(CPU_FTR_TM
))
995 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
998 flush_tmregs_to_thread(target
);
999 flush_fp_to_thread(target
);
1000 flush_altivec_to_thread(target
);
1002 /* copy to local buffer then write that out */
1003 for (i
= 0; i
< 32 ; i
++)
1004 buf
[i
] = target
->thread
.TS_CKFPR(i
);
1005 buf
[32] = target
->thread
.ckfp_state
.fpscr
;
1006 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
1010 * tm_cfpr_set - set CFPR registers
1011 * @target: The target task.
1012 * @regset: The user regset structure.
1013 * @pos: The buffer position.
1014 * @count: Number of bytes to copy.
1015 * @kbuf: Kernel buffer to copy into.
1016 * @ubuf: User buffer to copy from.
1018 * This function sets in transaction checkpointed FPR registers.
1020 * When the transaction is active 'ckfp_state' holds the checkpointed
1021 * FPR register values for the current transaction to fall back on
1022 * if it aborts in between. This function sets these checkpointed
1023 * FPR registers. The userspace interface buffer layout is as follows.
1030 static int tm_cfpr_set(struct task_struct
*target
,
1031 const struct user_regset
*regset
,
1032 unsigned int pos
, unsigned int count
,
1033 const void *kbuf
, const void __user
*ubuf
)
1038 if (!cpu_has_feature(CPU_FTR_TM
))
1041 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1044 flush_tmregs_to_thread(target
);
1045 flush_fp_to_thread(target
);
1046 flush_altivec_to_thread(target
);
1048 for (i
= 0; i
< 32; i
++)
1049 buf
[i
] = target
->thread
.TS_CKFPR(i
);
1050 buf
[32] = target
->thread
.ckfp_state
.fpscr
;
1052 /* copy to local buffer then write that out */
1053 i
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
1056 for (i
= 0; i
< 32 ; i
++)
1057 target
->thread
.TS_CKFPR(i
) = buf
[i
];
1058 target
->thread
.ckfp_state
.fpscr
= buf
[32];
1063 * tm_cvmx_active - get active number of registers in CVMX
1064 * @target: The target task.
1065 * @regset: The user regset structure.
1067 * This function checks for the active number of available
1068 * regisers in checkpointed VMX category.
1070 static int tm_cvmx_active(struct task_struct
*target
,
1071 const struct user_regset
*regset
)
1073 if (!cpu_has_feature(CPU_FTR_TM
))
1076 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1083 * tm_cvmx_get - get CMVX registers
1084 * @target: The target task.
1085 * @regset: The user regset structure.
1086 * @pos: The buffer position.
1087 * @count: Number of bytes to copy.
1088 * @kbuf: Kernel buffer to copy from.
1089 * @ubuf: User buffer to copy into.
1091 * This function gets in transaction checkpointed VMX registers.
1093 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1094 * the checkpointed values for the current transaction to fall
1095 * back on if it aborts in between. The userspace interface buffer
1096 * layout is as follows.
1104 static int tm_cvmx_get(struct task_struct
*target
,
1105 const struct user_regset
*regset
,
1106 unsigned int pos
, unsigned int count
,
1107 void *kbuf
, void __user
*ubuf
)
1111 BUILD_BUG_ON(TVSO(vscr
) != TVSO(vr
[32]));
1113 if (!cpu_has_feature(CPU_FTR_TM
))
1116 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1119 /* Flush the state */
1120 flush_tmregs_to_thread(target
);
1121 flush_fp_to_thread(target
);
1122 flush_altivec_to_thread(target
);
1124 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1125 &target
->thread
.ckvr_state
, 0,
1126 33 * sizeof(vector128
));
1129 * Copy out only the low-order word of vrsave.
1135 memset(&vrsave
, 0, sizeof(vrsave
));
1136 vrsave
.word
= target
->thread
.ckvrsave
;
1137 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
1138 33 * sizeof(vector128
), -1);
1145 * tm_cvmx_set - set CMVX registers
1146 * @target: The target task.
1147 * @regset: The user regset structure.
1148 * @pos: The buffer position.
1149 * @count: Number of bytes to copy.
1150 * @kbuf: Kernel buffer to copy into.
1151 * @ubuf: User buffer to copy from.
1153 * This function sets in transaction checkpointed VMX registers.
1155 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1156 * the checkpointed values for the current transaction to fall
1157 * back on if it aborts in between. The userspace interface buffer
1158 * layout is as follows.
1166 static int tm_cvmx_set(struct task_struct
*target
,
1167 const struct user_regset
*regset
,
1168 unsigned int pos
, unsigned int count
,
1169 const void *kbuf
, const void __user
*ubuf
)
1173 BUILD_BUG_ON(TVSO(vscr
) != TVSO(vr
[32]));
1175 if (!cpu_has_feature(CPU_FTR_TM
))
1178 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1181 flush_tmregs_to_thread(target
);
1182 flush_fp_to_thread(target
);
1183 flush_altivec_to_thread(target
);
1185 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1186 &target
->thread
.ckvr_state
, 0,
1187 33 * sizeof(vector128
));
1188 if (!ret
&& count
> 0) {
1190 * We use only the low-order word of vrsave.
1196 memset(&vrsave
, 0, sizeof(vrsave
));
1197 vrsave
.word
= target
->thread
.ckvrsave
;
1198 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
1199 33 * sizeof(vector128
), -1);
1201 target
->thread
.ckvrsave
= vrsave
.word
;
1208 * tm_cvsx_active - get active number of registers in CVSX
1209 * @target: The target task.
1210 * @regset: The user regset structure.
1212 * This function checks for the active number of available
1213 * regisers in transaction checkpointed VSX category.
1215 static int tm_cvsx_active(struct task_struct
*target
,
1216 const struct user_regset
*regset
)
1218 if (!cpu_has_feature(CPU_FTR_TM
))
1221 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1224 flush_vsx_to_thread(target
);
1225 return target
->thread
.used_vsr
? regset
->n
: 0;
1229 * tm_cvsx_get - get CVSX registers
1230 * @target: The target task.
1231 * @regset: The user regset structure.
1232 * @pos: The buffer position.
1233 * @count: Number of bytes to copy.
1234 * @kbuf: Kernel buffer to copy from.
1235 * @ubuf: User buffer to copy into.
1237 * This function gets in transaction checkpointed VSX registers.
1239 * When the transaction is active 'ckfp_state' holds the checkpointed
1240 * values for the current transaction to fall back on if it aborts
1241 * in between. This function gets those checkpointed VSX registers.
1242 * The userspace interface buffer layout is as follows.
1248 static int tm_cvsx_get(struct task_struct
*target
,
1249 const struct user_regset
*regset
,
1250 unsigned int pos
, unsigned int count
,
1251 void *kbuf
, void __user
*ubuf
)
1256 if (!cpu_has_feature(CPU_FTR_TM
))
1259 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1262 /* Flush the state */
1263 flush_tmregs_to_thread(target
);
1264 flush_fp_to_thread(target
);
1265 flush_altivec_to_thread(target
);
1266 flush_vsx_to_thread(target
);
1268 for (i
= 0; i
< 32 ; i
++)
1269 buf
[i
] = target
->thread
.ckfp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
1270 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1271 buf
, 0, 32 * sizeof(double));
1277 * tm_cvsx_set - set CFPR registers
1278 * @target: The target task.
1279 * @regset: The user regset structure.
1280 * @pos: The buffer position.
1281 * @count: Number of bytes to copy.
1282 * @kbuf: Kernel buffer to copy into.
1283 * @ubuf: User buffer to copy from.
1285 * This function sets in transaction checkpointed VSX registers.
1287 * When the transaction is active 'ckfp_state' holds the checkpointed
1288 * VSX register values for the current transaction to fall back on
1289 * if it aborts in between. This function sets these checkpointed
1290 * FPR registers. The userspace interface buffer layout is as follows.
1296 static int tm_cvsx_set(struct task_struct
*target
,
1297 const struct user_regset
*regset
,
1298 unsigned int pos
, unsigned int count
,
1299 const void *kbuf
, const void __user
*ubuf
)
1304 if (!cpu_has_feature(CPU_FTR_TM
))
1307 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1310 /* Flush the state */
1311 flush_tmregs_to_thread(target
);
1312 flush_fp_to_thread(target
);
1313 flush_altivec_to_thread(target
);
1314 flush_vsx_to_thread(target
);
1316 for (i
= 0; i
< 32 ; i
++)
1317 buf
[i
] = target
->thread
.ckfp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
1319 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1320 buf
, 0, 32 * sizeof(double));
1322 for (i
= 0; i
< 32 ; i
++)
1323 target
->thread
.ckfp_state
.fpr
[i
][TS_VSRLOWOFFSET
] = buf
[i
];
1329 * tm_spr_active - get active number of registers in TM SPR
1330 * @target: The target task.
1331 * @regset: The user regset structure.
1333 * This function checks the active number of available
1334 * regisers in the transactional memory SPR category.
1336 static int tm_spr_active(struct task_struct
*target
,
1337 const struct user_regset
*regset
)
1339 if (!cpu_has_feature(CPU_FTR_TM
))
1346 * tm_spr_get - get the TM related SPR registers
1347 * @target: The target task.
1348 * @regset: The user regset structure.
1349 * @pos: The buffer position.
1350 * @count: Number of bytes to copy.
1351 * @kbuf: Kernel buffer to copy from.
1352 * @ubuf: User buffer to copy into.
1354 * This function gets transactional memory related SPR registers.
1355 * The userspace interface buffer layout is as follows.
1363 static int tm_spr_get(struct task_struct
*target
,
1364 const struct user_regset
*regset
,
1365 unsigned int pos
, unsigned int count
,
1366 void *kbuf
, void __user
*ubuf
)
1371 BUILD_BUG_ON(TSO(tm_tfhar
) + sizeof(u64
) != TSO(tm_texasr
));
1372 BUILD_BUG_ON(TSO(tm_texasr
) + sizeof(u64
) != TSO(tm_tfiar
));
1373 BUILD_BUG_ON(TSO(tm_tfiar
) + sizeof(u64
) != TSO(ckpt_regs
));
1375 if (!cpu_has_feature(CPU_FTR_TM
))
1378 /* Flush the states */
1379 flush_tmregs_to_thread(target
);
1380 flush_fp_to_thread(target
);
1381 flush_altivec_to_thread(target
);
1383 /* TFHAR register */
1384 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1385 &target
->thread
.tm_tfhar
, 0, sizeof(u64
));
1387 /* TEXASR register */
1389 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1390 &target
->thread
.tm_texasr
, sizeof(u64
),
1393 /* TFIAR register */
1395 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1396 &target
->thread
.tm_tfiar
,
1397 2 * sizeof(u64
), 3 * sizeof(u64
));
1402 * tm_spr_set - set the TM related SPR registers
1403 * @target: The target task.
1404 * @regset: The user regset structure.
1405 * @pos: The buffer position.
1406 * @count: Number of bytes to copy.
1407 * @kbuf: Kernel buffer to copy into.
1408 * @ubuf: User buffer to copy from.
1410 * This function sets transactional memory related SPR registers.
1411 * The userspace interface buffer layout is as follows.
1419 static int tm_spr_set(struct task_struct
*target
,
1420 const struct user_regset
*regset
,
1421 unsigned int pos
, unsigned int count
,
1422 const void *kbuf
, const void __user
*ubuf
)
1427 BUILD_BUG_ON(TSO(tm_tfhar
) + sizeof(u64
) != TSO(tm_texasr
));
1428 BUILD_BUG_ON(TSO(tm_texasr
) + sizeof(u64
) != TSO(tm_tfiar
));
1429 BUILD_BUG_ON(TSO(tm_tfiar
) + sizeof(u64
) != TSO(ckpt_regs
));
1431 if (!cpu_has_feature(CPU_FTR_TM
))
1434 /* Flush the states */
1435 flush_tmregs_to_thread(target
);
1436 flush_fp_to_thread(target
);
1437 flush_altivec_to_thread(target
);
1439 /* TFHAR register */
1440 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1441 &target
->thread
.tm_tfhar
, 0, sizeof(u64
));
1443 /* TEXASR register */
1445 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1446 &target
->thread
.tm_texasr
, sizeof(u64
),
1449 /* TFIAR register */
1451 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1452 &target
->thread
.tm_tfiar
,
1453 2 * sizeof(u64
), 3 * sizeof(u64
));
1457 static int tm_tar_active(struct task_struct
*target
,
1458 const struct user_regset
*regset
)
1460 if (!cpu_has_feature(CPU_FTR_TM
))
1463 if (MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1469 static int tm_tar_get(struct task_struct
*target
,
1470 const struct user_regset
*regset
,
1471 unsigned int pos
, unsigned int count
,
1472 void *kbuf
, void __user
*ubuf
)
1476 if (!cpu_has_feature(CPU_FTR_TM
))
1479 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1482 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1483 &target
->thread
.tm_tar
, 0, sizeof(u64
));
1487 static int tm_tar_set(struct task_struct
*target
,
1488 const struct user_regset
*regset
,
1489 unsigned int pos
, unsigned int count
,
1490 const void *kbuf
, const void __user
*ubuf
)
1494 if (!cpu_has_feature(CPU_FTR_TM
))
1497 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1500 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1501 &target
->thread
.tm_tar
, 0, sizeof(u64
));
1505 static int tm_ppr_active(struct task_struct
*target
,
1506 const struct user_regset
*regset
)
1508 if (!cpu_has_feature(CPU_FTR_TM
))
1511 if (MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1518 static int tm_ppr_get(struct task_struct
*target
,
1519 const struct user_regset
*regset
,
1520 unsigned int pos
, unsigned int count
,
1521 void *kbuf
, void __user
*ubuf
)
1525 if (!cpu_has_feature(CPU_FTR_TM
))
1528 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1531 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1532 &target
->thread
.tm_ppr
, 0, sizeof(u64
));
1536 static int tm_ppr_set(struct task_struct
*target
,
1537 const struct user_regset
*regset
,
1538 unsigned int pos
, unsigned int count
,
1539 const void *kbuf
, const void __user
*ubuf
)
1543 if (!cpu_has_feature(CPU_FTR_TM
))
1546 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1549 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1550 &target
->thread
.tm_ppr
, 0, sizeof(u64
));
1554 static int tm_dscr_active(struct task_struct
*target
,
1555 const struct user_regset
*regset
)
1557 if (!cpu_has_feature(CPU_FTR_TM
))
1560 if (MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1566 static int tm_dscr_get(struct task_struct
*target
,
1567 const struct user_regset
*regset
,
1568 unsigned int pos
, unsigned int count
,
1569 void *kbuf
, void __user
*ubuf
)
1573 if (!cpu_has_feature(CPU_FTR_TM
))
1576 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1579 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1580 &target
->thread
.tm_dscr
, 0, sizeof(u64
));
1584 static int tm_dscr_set(struct task_struct
*target
,
1585 const struct user_regset
*regset
,
1586 unsigned int pos
, unsigned int count
,
1587 const void *kbuf
, const void __user
*ubuf
)
1591 if (!cpu_has_feature(CPU_FTR_TM
))
1594 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1597 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1598 &target
->thread
.tm_dscr
, 0, sizeof(u64
));
1601 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1604 static int ppr_get(struct task_struct
*target
,
1605 const struct user_regset
*regset
,
1606 unsigned int pos
, unsigned int count
,
1607 void *kbuf
, void __user
*ubuf
)
1609 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1610 &target
->thread
.ppr
, 0, sizeof(u64
));
1613 static int ppr_set(struct task_struct
*target
,
1614 const struct user_regset
*regset
,
1615 unsigned int pos
, unsigned int count
,
1616 const void *kbuf
, const void __user
*ubuf
)
1618 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1619 &target
->thread
.ppr
, 0, sizeof(u64
));
1622 static int dscr_get(struct task_struct
*target
,
1623 const struct user_regset
*regset
,
1624 unsigned int pos
, unsigned int count
,
1625 void *kbuf
, void __user
*ubuf
)
1627 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1628 &target
->thread
.dscr
, 0, sizeof(u64
));
1630 static int dscr_set(struct task_struct
*target
,
1631 const struct user_regset
*regset
,
1632 unsigned int pos
, unsigned int count
,
1633 const void *kbuf
, const void __user
*ubuf
)
1635 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1636 &target
->thread
.dscr
, 0, sizeof(u64
));
1639 #ifdef CONFIG_PPC_BOOK3S_64
1640 static int tar_get(struct task_struct
*target
,
1641 const struct user_regset
*regset
,
1642 unsigned int pos
, unsigned int count
,
1643 void *kbuf
, void __user
*ubuf
)
1645 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1646 &target
->thread
.tar
, 0, sizeof(u64
));
1648 static int tar_set(struct task_struct
*target
,
1649 const struct user_regset
*regset
,
1650 unsigned int pos
, unsigned int count
,
1651 const void *kbuf
, const void __user
*ubuf
)
1653 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1654 &target
->thread
.tar
, 0, sizeof(u64
));
1657 static int ebb_active(struct task_struct
*target
,
1658 const struct user_regset
*regset
)
1660 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1663 if (target
->thread
.used_ebb
)
1669 static int ebb_get(struct task_struct
*target
,
1670 const struct user_regset
*regset
,
1671 unsigned int pos
, unsigned int count
,
1672 void *kbuf
, void __user
*ubuf
)
1675 BUILD_BUG_ON(TSO(ebbrr
) + sizeof(unsigned long) != TSO(ebbhr
));
1676 BUILD_BUG_ON(TSO(ebbhr
) + sizeof(unsigned long) != TSO(bescr
));
1678 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1681 if (!target
->thread
.used_ebb
)
1684 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1685 &target
->thread
.ebbrr
, 0, 3 * sizeof(unsigned long));
1688 static int ebb_set(struct task_struct
*target
,
1689 const struct user_regset
*regset
,
1690 unsigned int pos
, unsigned int count
,
1691 const void *kbuf
, const void __user
*ubuf
)
1696 BUILD_BUG_ON(TSO(ebbrr
) + sizeof(unsigned long) != TSO(ebbhr
));
1697 BUILD_BUG_ON(TSO(ebbhr
) + sizeof(unsigned long) != TSO(bescr
));
1699 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1702 if (target
->thread
.used_ebb
)
1705 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1706 &target
->thread
.ebbrr
, 0, sizeof(unsigned long));
1709 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1710 &target
->thread
.ebbhr
, sizeof(unsigned long),
1711 2 * sizeof(unsigned long));
1714 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1715 &target
->thread
.bescr
,
1716 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1720 static int pmu_active(struct task_struct
*target
,
1721 const struct user_regset
*regset
)
1723 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1729 static int pmu_get(struct task_struct
*target
,
1730 const struct user_regset
*regset
,
1731 unsigned int pos
, unsigned int count
,
1732 void *kbuf
, void __user
*ubuf
)
1735 BUILD_BUG_ON(TSO(siar
) + sizeof(unsigned long) != TSO(sdar
));
1736 BUILD_BUG_ON(TSO(sdar
) + sizeof(unsigned long) != TSO(sier
));
1737 BUILD_BUG_ON(TSO(sier
) + sizeof(unsigned long) != TSO(mmcr2
));
1738 BUILD_BUG_ON(TSO(mmcr2
) + sizeof(unsigned long) != TSO(mmcr0
));
1740 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1743 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1744 &target
->thread
.siar
, 0,
1745 5 * sizeof(unsigned long));
1748 static int pmu_set(struct task_struct
*target
,
1749 const struct user_regset
*regset
,
1750 unsigned int pos
, unsigned int count
,
1751 const void *kbuf
, const void __user
*ubuf
)
1756 BUILD_BUG_ON(TSO(siar
) + sizeof(unsigned long) != TSO(sdar
));
1757 BUILD_BUG_ON(TSO(sdar
) + sizeof(unsigned long) != TSO(sier
));
1758 BUILD_BUG_ON(TSO(sier
) + sizeof(unsigned long) != TSO(mmcr2
));
1759 BUILD_BUG_ON(TSO(mmcr2
) + sizeof(unsigned long) != TSO(mmcr0
));
1761 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1764 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1765 &target
->thread
.siar
, 0,
1766 sizeof(unsigned long));
1769 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1770 &target
->thread
.sdar
, sizeof(unsigned long),
1771 2 * sizeof(unsigned long));
1774 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1775 &target
->thread
.sier
, 2 * sizeof(unsigned long),
1776 3 * sizeof(unsigned long));
1779 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1780 &target
->thread
.mmcr2
, 3 * sizeof(unsigned long),
1781 4 * sizeof(unsigned long));
1784 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1785 &target
->thread
.mmcr0
, 4 * sizeof(unsigned long),
1786 5 * sizeof(unsigned long));
1791 * These are our native regset flavors.
1793 enum powerpc_regset
{
1796 #ifdef CONFIG_ALTIVEC
1805 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1806 REGSET_TM_CGPR
, /* TM checkpointed GPR registers */
1807 REGSET_TM_CFPR
, /* TM checkpointed FPR registers */
1808 REGSET_TM_CVMX
, /* TM checkpointed VMX registers */
1809 REGSET_TM_CVSX
, /* TM checkpointed VSX registers */
1810 REGSET_TM_SPR
, /* TM specific SPR registers */
1811 REGSET_TM_CTAR
, /* TM checkpointed TAR register */
1812 REGSET_TM_CPPR
, /* TM checkpointed PPR register */
1813 REGSET_TM_CDSCR
, /* TM checkpointed DSCR register */
1816 REGSET_PPR
, /* PPR register */
1817 REGSET_DSCR
, /* DSCR register */
1819 #ifdef CONFIG_PPC_BOOK3S_64
1820 REGSET_TAR
, /* TAR register */
1821 REGSET_EBB
, /* EBB registers */
1822 REGSET_PMR
, /* Performance Monitor Registers */
1826 static const struct user_regset native_regsets
[] = {
1828 .core_note_type
= NT_PRSTATUS
, .n
= ELF_NGREG
,
1829 .size
= sizeof(long), .align
= sizeof(long),
1830 .get
= gpr_get
, .set
= gpr_set
1833 .core_note_type
= NT_PRFPREG
, .n
= ELF_NFPREG
,
1834 .size
= sizeof(double), .align
= sizeof(double),
1835 .get
= fpr_get
, .set
= fpr_set
1837 #ifdef CONFIG_ALTIVEC
1839 .core_note_type
= NT_PPC_VMX
, .n
= 34,
1840 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
1841 .active
= vr_active
, .get
= vr_get
, .set
= vr_set
1846 .core_note_type
= NT_PPC_VSX
, .n
= 32,
1847 .size
= sizeof(double), .align
= sizeof(double),
1848 .active
= vsr_active
, .get
= vsr_get
, .set
= vsr_set
1853 .core_note_type
= NT_PPC_SPE
, .n
= 35,
1854 .size
= sizeof(u32
), .align
= sizeof(u32
),
1855 .active
= evr_active
, .get
= evr_get
, .set
= evr_set
1858 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1859 [REGSET_TM_CGPR
] = {
1860 .core_note_type
= NT_PPC_TM_CGPR
, .n
= ELF_NGREG
,
1861 .size
= sizeof(long), .align
= sizeof(long),
1862 .active
= tm_cgpr_active
, .get
= tm_cgpr_get
, .set
= tm_cgpr_set
1864 [REGSET_TM_CFPR
] = {
1865 .core_note_type
= NT_PPC_TM_CFPR
, .n
= ELF_NFPREG
,
1866 .size
= sizeof(double), .align
= sizeof(double),
1867 .active
= tm_cfpr_active
, .get
= tm_cfpr_get
, .set
= tm_cfpr_set
1869 [REGSET_TM_CVMX
] = {
1870 .core_note_type
= NT_PPC_TM_CVMX
, .n
= ELF_NVMX
,
1871 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
1872 .active
= tm_cvmx_active
, .get
= tm_cvmx_get
, .set
= tm_cvmx_set
1874 [REGSET_TM_CVSX
] = {
1875 .core_note_type
= NT_PPC_TM_CVSX
, .n
= ELF_NVSX
,
1876 .size
= sizeof(double), .align
= sizeof(double),
1877 .active
= tm_cvsx_active
, .get
= tm_cvsx_get
, .set
= tm_cvsx_set
1880 .core_note_type
= NT_PPC_TM_SPR
, .n
= ELF_NTMSPRREG
,
1881 .size
= sizeof(u64
), .align
= sizeof(u64
),
1882 .active
= tm_spr_active
, .get
= tm_spr_get
, .set
= tm_spr_set
1884 [REGSET_TM_CTAR
] = {
1885 .core_note_type
= NT_PPC_TM_CTAR
, .n
= 1,
1886 .size
= sizeof(u64
), .align
= sizeof(u64
),
1887 .active
= tm_tar_active
, .get
= tm_tar_get
, .set
= tm_tar_set
1889 [REGSET_TM_CPPR
] = {
1890 .core_note_type
= NT_PPC_TM_CPPR
, .n
= 1,
1891 .size
= sizeof(u64
), .align
= sizeof(u64
),
1892 .active
= tm_ppr_active
, .get
= tm_ppr_get
, .set
= tm_ppr_set
1894 [REGSET_TM_CDSCR
] = {
1895 .core_note_type
= NT_PPC_TM_CDSCR
, .n
= 1,
1896 .size
= sizeof(u64
), .align
= sizeof(u64
),
1897 .active
= tm_dscr_active
, .get
= tm_dscr_get
, .set
= tm_dscr_set
1902 .core_note_type
= NT_PPC_PPR
, .n
= 1,
1903 .size
= sizeof(u64
), .align
= sizeof(u64
),
1904 .get
= ppr_get
, .set
= ppr_set
1907 .core_note_type
= NT_PPC_DSCR
, .n
= 1,
1908 .size
= sizeof(u64
), .align
= sizeof(u64
),
1909 .get
= dscr_get
, .set
= dscr_set
1912 #ifdef CONFIG_PPC_BOOK3S_64
1914 .core_note_type
= NT_PPC_TAR
, .n
= 1,
1915 .size
= sizeof(u64
), .align
= sizeof(u64
),
1916 .get
= tar_get
, .set
= tar_set
1919 .core_note_type
= NT_PPC_EBB
, .n
= ELF_NEBB
,
1920 .size
= sizeof(u64
), .align
= sizeof(u64
),
1921 .active
= ebb_active
, .get
= ebb_get
, .set
= ebb_set
1924 .core_note_type
= NT_PPC_PMU
, .n
= ELF_NPMU
,
1925 .size
= sizeof(u64
), .align
= sizeof(u64
),
1926 .active
= pmu_active
, .get
= pmu_get
, .set
= pmu_set
1931 static const struct user_regset_view user_ppc_native_view
= {
1932 .name
= UTS_MACHINE
, .e_machine
= ELF_ARCH
, .ei_osabi
= ELF_OSABI
,
1933 .regsets
= native_regsets
, .n
= ARRAY_SIZE(native_regsets
)
1937 #include <linux/compat.h>
1939 static int gpr32_get_common(struct task_struct
*target
,
1940 const struct user_regset
*regset
,
1941 unsigned int pos
, unsigned int count
,
1942 void *kbuf
, void __user
*ubuf
,
1943 unsigned long *regs
)
1945 compat_ulong_t
*k
= kbuf
;
1946 compat_ulong_t __user
*u
= ubuf
;
1950 count
/= sizeof(reg
);
1953 for (; count
> 0 && pos
< PT_MSR
; --count
)
1956 for (; count
> 0 && pos
< PT_MSR
; --count
)
1957 if (__put_user((compat_ulong_t
) regs
[pos
++], u
++))
1960 if (count
> 0 && pos
== PT_MSR
) {
1961 reg
= get_user_msr(target
);
1964 else if (__put_user(reg
, u
++))
1971 for (; count
> 0 && pos
< PT_REGS_COUNT
; --count
)
1974 for (; count
> 0 && pos
< PT_REGS_COUNT
; --count
)
1975 if (__put_user((compat_ulong_t
) regs
[pos
++], u
++))
1981 count
*= sizeof(reg
);
1982 return user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
1983 PT_REGS_COUNT
* sizeof(reg
), -1);
1986 static int gpr32_set_common(struct task_struct
*target
,
1987 const struct user_regset
*regset
,
1988 unsigned int pos
, unsigned int count
,
1989 const void *kbuf
, const void __user
*ubuf
,
1990 unsigned long *regs
)
1992 const compat_ulong_t
*k
= kbuf
;
1993 const compat_ulong_t __user
*u
= ubuf
;
1997 count
/= sizeof(reg
);
2000 for (; count
> 0 && pos
< PT_MSR
; --count
)
2003 for (; count
> 0 && pos
< PT_MSR
; --count
) {
2004 if (__get_user(reg
, u
++))
2010 if (count
> 0 && pos
== PT_MSR
) {
2013 else if (__get_user(reg
, u
++))
2015 set_user_msr(target
, reg
);
2021 for (; count
> 0 && pos
<= PT_MAX_PUT_REG
; --count
)
2023 for (; count
> 0 && pos
< PT_TRAP
; --count
, ++pos
)
2026 for (; count
> 0 && pos
<= PT_MAX_PUT_REG
; --count
) {
2027 if (__get_user(reg
, u
++))
2031 for (; count
> 0 && pos
< PT_TRAP
; --count
, ++pos
)
2032 if (__get_user(reg
, u
++))
2036 if (count
> 0 && pos
== PT_TRAP
) {
2039 else if (__get_user(reg
, u
++))
2041 set_user_trap(target
, reg
);
2049 count
*= sizeof(reg
);
2050 return user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
2051 (PT_TRAP
+ 1) * sizeof(reg
), -1);
2054 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2055 static int tm_cgpr32_get(struct task_struct
*target
,
2056 const struct user_regset
*regset
,
2057 unsigned int pos
, unsigned int count
,
2058 void *kbuf
, void __user
*ubuf
)
2060 return gpr32_get_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2061 &target
->thread
.ckpt_regs
.gpr
[0]);
2064 static int tm_cgpr32_set(struct task_struct
*target
,
2065 const struct user_regset
*regset
,
2066 unsigned int pos
, unsigned int count
,
2067 const void *kbuf
, const void __user
*ubuf
)
2069 return gpr32_set_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2070 &target
->thread
.ckpt_regs
.gpr
[0]);
2072 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2074 static int gpr32_get(struct task_struct
*target
,
2075 const struct user_regset
*regset
,
2076 unsigned int pos
, unsigned int count
,
2077 void *kbuf
, void __user
*ubuf
)
2081 if (target
->thread
.regs
== NULL
)
2084 if (!FULL_REGS(target
->thread
.regs
)) {
2086 * We have a partial register set.
2087 * Fill 14-31 with bogus values.
2089 for (i
= 14; i
< 32; i
++)
2090 target
->thread
.regs
->gpr
[i
] = NV_REG_POISON
;
2092 return gpr32_get_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2093 &target
->thread
.regs
->gpr
[0]);
2096 static int gpr32_set(struct task_struct
*target
,
2097 const struct user_regset
*regset
,
2098 unsigned int pos
, unsigned int count
,
2099 const void *kbuf
, const void __user
*ubuf
)
2101 if (target
->thread
.regs
== NULL
)
2104 CHECK_FULL_REGS(target
->thread
.regs
);
2105 return gpr32_set_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2106 &target
->thread
.regs
->gpr
[0]);
2110 * These are the regset flavors matching the CONFIG_PPC32 native set.
2112 static const struct user_regset compat_regsets
[] = {
2114 .core_note_type
= NT_PRSTATUS
, .n
= ELF_NGREG
,
2115 .size
= sizeof(compat_long_t
), .align
= sizeof(compat_long_t
),
2116 .get
= gpr32_get
, .set
= gpr32_set
2119 .core_note_type
= NT_PRFPREG
, .n
= ELF_NFPREG
,
2120 .size
= sizeof(double), .align
= sizeof(double),
2121 .get
= fpr_get
, .set
= fpr_set
2123 #ifdef CONFIG_ALTIVEC
2125 .core_note_type
= NT_PPC_VMX
, .n
= 34,
2126 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
2127 .active
= vr_active
, .get
= vr_get
, .set
= vr_set
2132 .core_note_type
= NT_PPC_SPE
, .n
= 35,
2133 .size
= sizeof(u32
), .align
= sizeof(u32
),
2134 .active
= evr_active
, .get
= evr_get
, .set
= evr_set
2137 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2138 [REGSET_TM_CGPR
] = {
2139 .core_note_type
= NT_PPC_TM_CGPR
, .n
= ELF_NGREG
,
2140 .size
= sizeof(long), .align
= sizeof(long),
2141 .active
= tm_cgpr_active
,
2142 .get
= tm_cgpr32_get
, .set
= tm_cgpr32_set
2144 [REGSET_TM_CFPR
] = {
2145 .core_note_type
= NT_PPC_TM_CFPR
, .n
= ELF_NFPREG
,
2146 .size
= sizeof(double), .align
= sizeof(double),
2147 .active
= tm_cfpr_active
, .get
= tm_cfpr_get
, .set
= tm_cfpr_set
2149 [REGSET_TM_CVMX
] = {
2150 .core_note_type
= NT_PPC_TM_CVMX
, .n
= ELF_NVMX
,
2151 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
2152 .active
= tm_cvmx_active
, .get
= tm_cvmx_get
, .set
= tm_cvmx_set
2154 [REGSET_TM_CVSX
] = {
2155 .core_note_type
= NT_PPC_TM_CVSX
, .n
= ELF_NVSX
,
2156 .size
= sizeof(double), .align
= sizeof(double),
2157 .active
= tm_cvsx_active
, .get
= tm_cvsx_get
, .set
= tm_cvsx_set
2160 .core_note_type
= NT_PPC_TM_SPR
, .n
= ELF_NTMSPRREG
,
2161 .size
= sizeof(u64
), .align
= sizeof(u64
),
2162 .active
= tm_spr_active
, .get
= tm_spr_get
, .set
= tm_spr_set
2164 [REGSET_TM_CTAR
] = {
2165 .core_note_type
= NT_PPC_TM_CTAR
, .n
= 1,
2166 .size
= sizeof(u64
), .align
= sizeof(u64
),
2167 .active
= tm_tar_active
, .get
= tm_tar_get
, .set
= tm_tar_set
2169 [REGSET_TM_CPPR
] = {
2170 .core_note_type
= NT_PPC_TM_CPPR
, .n
= 1,
2171 .size
= sizeof(u64
), .align
= sizeof(u64
),
2172 .active
= tm_ppr_active
, .get
= tm_ppr_get
, .set
= tm_ppr_set
2174 [REGSET_TM_CDSCR
] = {
2175 .core_note_type
= NT_PPC_TM_CDSCR
, .n
= 1,
2176 .size
= sizeof(u64
), .align
= sizeof(u64
),
2177 .active
= tm_dscr_active
, .get
= tm_dscr_get
, .set
= tm_dscr_set
2182 .core_note_type
= NT_PPC_PPR
, .n
= 1,
2183 .size
= sizeof(u64
), .align
= sizeof(u64
),
2184 .get
= ppr_get
, .set
= ppr_set
2187 .core_note_type
= NT_PPC_DSCR
, .n
= 1,
2188 .size
= sizeof(u64
), .align
= sizeof(u64
),
2189 .get
= dscr_get
, .set
= dscr_set
2192 #ifdef CONFIG_PPC_BOOK3S_64
2194 .core_note_type
= NT_PPC_TAR
, .n
= 1,
2195 .size
= sizeof(u64
), .align
= sizeof(u64
),
2196 .get
= tar_get
, .set
= tar_set
2199 .core_note_type
= NT_PPC_EBB
, .n
= ELF_NEBB
,
2200 .size
= sizeof(u64
), .align
= sizeof(u64
),
2201 .active
= ebb_active
, .get
= ebb_get
, .set
= ebb_set
2206 static const struct user_regset_view user_ppc_compat_view
= {
2207 .name
= "ppc", .e_machine
= EM_PPC
, .ei_osabi
= ELF_OSABI
,
2208 .regsets
= compat_regsets
, .n
= ARRAY_SIZE(compat_regsets
)
2210 #endif /* CONFIG_PPC64 */
2212 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
2215 if (test_tsk_thread_flag(task
, TIF_32BIT
))
2216 return &user_ppc_compat_view
;
2218 return &user_ppc_native_view
;
2222 void user_enable_single_step(struct task_struct
*task
)
2224 struct pt_regs
*regs
= task
->thread
.regs
;
2227 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2228 task
->thread
.debug
.dbcr0
&= ~DBCR0_BT
;
2229 task
->thread
.debug
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
2230 regs
->msr
|= MSR_DE
;
2232 regs
->msr
&= ~MSR_BE
;
2233 regs
->msr
|= MSR_SE
;
2236 set_tsk_thread_flag(task
, TIF_SINGLESTEP
);
2239 void user_enable_block_step(struct task_struct
*task
)
2241 struct pt_regs
*regs
= task
->thread
.regs
;
2244 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2245 task
->thread
.debug
.dbcr0
&= ~DBCR0_IC
;
2246 task
->thread
.debug
.dbcr0
= DBCR0_IDM
| DBCR0_BT
;
2247 regs
->msr
|= MSR_DE
;
2249 regs
->msr
&= ~MSR_SE
;
2250 regs
->msr
|= MSR_BE
;
2253 set_tsk_thread_flag(task
, TIF_SINGLESTEP
);
2256 void user_disable_single_step(struct task_struct
*task
)
2258 struct pt_regs
*regs
= task
->thread
.regs
;
2261 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2263 * The logic to disable single stepping should be as
2264 * simple as turning off the Instruction Complete flag.
2265 * And, after doing so, if all debug flags are off, turn
2266 * off DBCR0(IDM) and MSR(DE) .... Torez
2268 task
->thread
.debug
.dbcr0
&= ~(DBCR0_IC
|DBCR0_BT
);
2270 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2272 if (!DBCR_ACTIVE_EVENTS(task
->thread
.debug
.dbcr0
,
2273 task
->thread
.debug
.dbcr1
)) {
2275 * All debug events were off.....
2277 task
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2278 regs
->msr
&= ~MSR_DE
;
2281 regs
->msr
&= ~(MSR_SE
| MSR_BE
);
2284 clear_tsk_thread_flag(task
, TIF_SINGLESTEP
);
2287 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2288 void ptrace_triggered(struct perf_event
*bp
,
2289 struct perf_sample_data
*data
, struct pt_regs
*regs
)
2291 struct perf_event_attr attr
;
2294 * Disable the breakpoint request here since ptrace has defined a
2295 * one-shot behaviour for breakpoint exceptions in PPC64.
2296 * The SIGTRAP signal is generated automatically for us in do_dabr().
2297 * We don't have to do anything about that here
2300 attr
.disabled
= true;
2301 modify_user_hw_breakpoint(bp
, &attr
);
2303 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2305 static int ptrace_set_debugreg(struct task_struct
*task
, unsigned long addr
,
2308 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2310 struct thread_struct
*thread
= &(task
->thread
);
2311 struct perf_event
*bp
;
2312 struct perf_event_attr attr
;
2313 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2314 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2315 struct arch_hw_breakpoint hw_brk
;
2318 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2319 * For embedded processors we support one DAC and no IAC's at the
2325 /* The bottom 3 bits in dabr are flags */
2326 if ((data
& ~0x7UL
) >= TASK_SIZE
)
2329 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2330 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2331 * It was assumed, on previous implementations, that 3 bits were
2332 * passed together with the data address, fitting the design of the
2333 * DABR register, as follows:
2337 * bit 2: Breakpoint translation
2339 * Thus, we use them here as so.
2342 /* Ensure breakpoint translation bit is set */
2343 if (data
&& !(data
& HW_BRK_TYPE_TRANSLATE
))
2345 hw_brk
.address
= data
& (~HW_BRK_TYPE_DABR
);
2346 hw_brk
.type
= (data
& HW_BRK_TYPE_DABR
) | HW_BRK_TYPE_PRIV_ALL
;
2348 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2349 bp
= thread
->ptrace_bps
[0];
2350 if ((!data
) || !(hw_brk
.type
& HW_BRK_TYPE_RDWR
)) {
2352 unregister_hw_breakpoint(bp
);
2353 thread
->ptrace_bps
[0] = NULL
;
2359 attr
.bp_addr
= hw_brk
.address
;
2360 arch_bp_generic_fields(hw_brk
.type
, &attr
.bp_type
);
2362 /* Enable breakpoint */
2363 attr
.disabled
= false;
2365 ret
= modify_user_hw_breakpoint(bp
, &attr
);
2369 thread
->ptrace_bps
[0] = bp
;
2370 thread
->hw_brk
= hw_brk
;
2374 /* Create a new breakpoint request if one doesn't exist already */
2375 hw_breakpoint_init(&attr
);
2376 attr
.bp_addr
= hw_brk
.address
;
2377 arch_bp_generic_fields(hw_brk
.type
,
2380 thread
->ptrace_bps
[0] = bp
= register_user_hw_breakpoint(&attr
,
2381 ptrace_triggered
, NULL
, task
);
2383 thread
->ptrace_bps
[0] = NULL
;
2387 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2388 task
->thread
.hw_brk
= hw_brk
;
2389 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2390 /* As described above, it was assumed 3 bits were passed with the data
2391 * address, but we will assume only the mode bits will be passed
2392 * as to not cause alignment restrictions for DAC-based processors.
2395 /* DAC's hold the whole address without any mode flags */
2396 task
->thread
.debug
.dac1
= data
& ~0x3UL
;
2398 if (task
->thread
.debug
.dac1
== 0) {
2399 dbcr_dac(task
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
2400 if (!DBCR_ACTIVE_EVENTS(task
->thread
.debug
.dbcr0
,
2401 task
->thread
.debug
.dbcr1
)) {
2402 task
->thread
.regs
->msr
&= ~MSR_DE
;
2403 task
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2408 /* Read or Write bits must be set */
2410 if (!(data
& 0x3UL
))
2413 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2415 task
->thread
.debug
.dbcr0
|= DBCR0_IDM
;
2417 /* Check for write and read flags and set DBCR0
2419 dbcr_dac(task
) &= ~(DBCR_DAC1R
|DBCR_DAC1W
);
2421 dbcr_dac(task
) |= DBCR_DAC1R
;
2423 dbcr_dac(task
) |= DBCR_DAC1W
;
2424 task
->thread
.regs
->msr
|= MSR_DE
;
2425 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2430 * Called by kernel/ptrace.c when detaching..
2432 * Make sure single step bits etc are not set.
2434 void ptrace_disable(struct task_struct
*child
)
2436 /* make sure the single step bit is not set. */
2437 user_disable_single_step(child
);
2440 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2441 static long set_instruction_bp(struct task_struct
*child
,
2442 struct ppc_hw_breakpoint
*bp_info
)
2445 int slot1_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC1
) != 0);
2446 int slot2_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC2
) != 0);
2447 int slot3_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC3
) != 0);
2448 int slot4_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC4
) != 0);
2450 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
)
2452 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
)
2455 if (bp_info
->addr
>= TASK_SIZE
)
2458 if (bp_info
->addr_mode
!= PPC_BREAKPOINT_MODE_EXACT
) {
2460 /* Make sure range is valid. */
2461 if (bp_info
->addr2
>= TASK_SIZE
)
2464 /* We need a pair of IAC regsisters */
2465 if ((!slot1_in_use
) && (!slot2_in_use
)) {
2467 child
->thread
.debug
.iac1
= bp_info
->addr
;
2468 child
->thread
.debug
.iac2
= bp_info
->addr2
;
2469 child
->thread
.debug
.dbcr0
|= DBCR0_IAC1
;
2470 if (bp_info
->addr_mode
==
2471 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
2472 dbcr_iac_range(child
) |= DBCR_IAC12X
;
2474 dbcr_iac_range(child
) |= DBCR_IAC12I
;
2475 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2476 } else if ((!slot3_in_use
) && (!slot4_in_use
)) {
2478 child
->thread
.debug
.iac3
= bp_info
->addr
;
2479 child
->thread
.debug
.iac4
= bp_info
->addr2
;
2480 child
->thread
.debug
.dbcr0
|= DBCR0_IAC3
;
2481 if (bp_info
->addr_mode
==
2482 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
2483 dbcr_iac_range(child
) |= DBCR_IAC34X
;
2485 dbcr_iac_range(child
) |= DBCR_IAC34I
;
2490 /* We only need one. If possible leave a pair free in
2491 * case a range is needed later
2493 if (!slot1_in_use
) {
2495 * Don't use iac1 if iac1-iac2 are free and either
2496 * iac3 or iac4 (but not both) are free
2498 if (slot2_in_use
|| (slot3_in_use
== slot4_in_use
)) {
2500 child
->thread
.debug
.iac1
= bp_info
->addr
;
2501 child
->thread
.debug
.dbcr0
|= DBCR0_IAC1
;
2505 if (!slot2_in_use
) {
2507 child
->thread
.debug
.iac2
= bp_info
->addr
;
2508 child
->thread
.debug
.dbcr0
|= DBCR0_IAC2
;
2509 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2510 } else if (!slot3_in_use
) {
2512 child
->thread
.debug
.iac3
= bp_info
->addr
;
2513 child
->thread
.debug
.dbcr0
|= DBCR0_IAC3
;
2514 } else if (!slot4_in_use
) {
2516 child
->thread
.debug
.iac4
= bp_info
->addr
;
2517 child
->thread
.debug
.dbcr0
|= DBCR0_IAC4
;
2523 child
->thread
.debug
.dbcr0
|= DBCR0_IDM
;
2524 child
->thread
.regs
->msr
|= MSR_DE
;
2529 static int del_instruction_bp(struct task_struct
*child
, int slot
)
2533 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC1
) == 0)
2536 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
) {
2537 /* address range - clear slots 1 & 2 */
2538 child
->thread
.debug
.iac2
= 0;
2539 dbcr_iac_range(child
) &= ~DBCR_IAC12MODE
;
2541 child
->thread
.debug
.iac1
= 0;
2542 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC1
;
2545 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC2
) == 0)
2548 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
)
2549 /* used in a range */
2551 child
->thread
.debug
.iac2
= 0;
2552 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC2
;
2554 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2556 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC3
) == 0)
2559 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
) {
2560 /* address range - clear slots 3 & 4 */
2561 child
->thread
.debug
.iac4
= 0;
2562 dbcr_iac_range(child
) &= ~DBCR_IAC34MODE
;
2564 child
->thread
.debug
.iac3
= 0;
2565 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC3
;
2568 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC4
) == 0)
2571 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
)
2572 /* Used in a range */
2574 child
->thread
.debug
.iac4
= 0;
2575 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC4
;
2584 static int set_dac(struct task_struct
*child
, struct ppc_hw_breakpoint
*bp_info
)
2587 (bp_info
->condition_mode
>> PPC_BREAKPOINT_CONDITION_BE_SHIFT
)
2589 int condition_mode
=
2590 bp_info
->condition_mode
& PPC_BREAKPOINT_CONDITION_MODE
;
2593 if (byte_enable
&& (condition_mode
== 0))
2596 if (bp_info
->addr
>= TASK_SIZE
)
2599 if ((dbcr_dac(child
) & (DBCR_DAC1R
| DBCR_DAC1W
)) == 0) {
2601 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2602 dbcr_dac(child
) |= DBCR_DAC1R
;
2603 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2604 dbcr_dac(child
) |= DBCR_DAC1W
;
2605 child
->thread
.debug
.dac1
= (unsigned long)bp_info
->addr
;
2606 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2608 child
->thread
.debug
.dvc1
=
2609 (unsigned long)bp_info
->condition_value
;
2610 child
->thread
.debug
.dbcr2
|=
2611 ((byte_enable
<< DBCR2_DVC1BE_SHIFT
) |
2612 (condition_mode
<< DBCR2_DVC1M_SHIFT
));
2615 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2616 } else if (child
->thread
.debug
.dbcr2
& DBCR2_DAC12MODE
) {
2617 /* Both dac1 and dac2 are part of a range */
2620 } else if ((dbcr_dac(child
) & (DBCR_DAC2R
| DBCR_DAC2W
)) == 0) {
2622 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2623 dbcr_dac(child
) |= DBCR_DAC2R
;
2624 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2625 dbcr_dac(child
) |= DBCR_DAC2W
;
2626 child
->thread
.debug
.dac2
= (unsigned long)bp_info
->addr
;
2627 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2629 child
->thread
.debug
.dvc2
=
2630 (unsigned long)bp_info
->condition_value
;
2631 child
->thread
.debug
.dbcr2
|=
2632 ((byte_enable
<< DBCR2_DVC2BE_SHIFT
) |
2633 (condition_mode
<< DBCR2_DVC2M_SHIFT
));
2638 child
->thread
.debug
.dbcr0
|= DBCR0_IDM
;
2639 child
->thread
.regs
->msr
|= MSR_DE
;
2644 static int del_dac(struct task_struct
*child
, int slot
)
2647 if ((dbcr_dac(child
) & (DBCR_DAC1R
| DBCR_DAC1W
)) == 0)
2650 child
->thread
.debug
.dac1
= 0;
2651 dbcr_dac(child
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
2652 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2653 if (child
->thread
.debug
.dbcr2
& DBCR2_DAC12MODE
) {
2654 child
->thread
.debug
.dac2
= 0;
2655 child
->thread
.debug
.dbcr2
&= ~DBCR2_DAC12MODE
;
2657 child
->thread
.debug
.dbcr2
&= ~(DBCR2_DVC1M
| DBCR2_DVC1BE
);
2659 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2660 child
->thread
.debug
.dvc1
= 0;
2662 } else if (slot
== 2) {
2663 if ((dbcr_dac(child
) & (DBCR_DAC2R
| DBCR_DAC2W
)) == 0)
2666 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2667 if (child
->thread
.debug
.dbcr2
& DBCR2_DAC12MODE
)
2668 /* Part of a range */
2670 child
->thread
.debug
.dbcr2
&= ~(DBCR2_DVC2M
| DBCR2_DVC2BE
);
2672 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2673 child
->thread
.debug
.dvc2
= 0;
2675 child
->thread
.debug
.dac2
= 0;
2676 dbcr_dac(child
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
2682 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2684 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2685 static int set_dac_range(struct task_struct
*child
,
2686 struct ppc_hw_breakpoint
*bp_info
)
2688 int mode
= bp_info
->addr_mode
& PPC_BREAKPOINT_MODE_MASK
;
2690 /* We don't allow range watchpoints to be used with DVC */
2691 if (bp_info
->condition_mode
)
2695 * Best effort to verify the address range. The user/supervisor bits
2696 * prevent trapping in kernel space, but let's fail on an obvious bad
2697 * range. The simple test on the mask is not fool-proof, and any
2698 * exclusive range will spill over into kernel space.
2700 if (bp_info
->addr
>= TASK_SIZE
)
2702 if (mode
== PPC_BREAKPOINT_MODE_MASK
) {
2704 * dac2 is a bitmask. Don't allow a mask that makes a
2705 * kernel space address from a valid dac1 value
2707 if (~((unsigned long)bp_info
->addr2
) >= TASK_SIZE
)
2711 * For range breakpoints, addr2 must also be a valid address
2713 if (bp_info
->addr2
>= TASK_SIZE
)
2717 if (child
->thread
.debug
.dbcr0
&
2718 (DBCR0_DAC1R
| DBCR0_DAC1W
| DBCR0_DAC2R
| DBCR0_DAC2W
))
2721 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2722 child
->thread
.debug
.dbcr0
|= (DBCR0_DAC1R
| DBCR0_IDM
);
2723 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2724 child
->thread
.debug
.dbcr0
|= (DBCR0_DAC1W
| DBCR0_IDM
);
2725 child
->thread
.debug
.dac1
= bp_info
->addr
;
2726 child
->thread
.debug
.dac2
= bp_info
->addr2
;
2727 if (mode
== PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE
)
2728 child
->thread
.debug
.dbcr2
|= DBCR2_DAC12M
;
2729 else if (mode
== PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
2730 child
->thread
.debug
.dbcr2
|= DBCR2_DAC12MX
;
2731 else /* PPC_BREAKPOINT_MODE_MASK */
2732 child
->thread
.debug
.dbcr2
|= DBCR2_DAC12MM
;
2733 child
->thread
.regs
->msr
|= MSR_DE
;
2737 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2739 static long ppc_set_hwdebug(struct task_struct
*child
,
2740 struct ppc_hw_breakpoint
*bp_info
)
2742 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2744 struct thread_struct
*thread
= &(child
->thread
);
2745 struct perf_event
*bp
;
2746 struct perf_event_attr attr
;
2747 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2748 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2749 struct arch_hw_breakpoint brk
;
2752 if (bp_info
->version
!= 1)
2754 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2756 * Check for invalid flags and combinations
2758 if ((bp_info
->trigger_type
== 0) ||
2759 (bp_info
->trigger_type
& ~(PPC_BREAKPOINT_TRIGGER_EXECUTE
|
2760 PPC_BREAKPOINT_TRIGGER_RW
)) ||
2761 (bp_info
->addr_mode
& ~PPC_BREAKPOINT_MODE_MASK
) ||
2762 (bp_info
->condition_mode
&
2763 ~(PPC_BREAKPOINT_CONDITION_MODE
|
2764 PPC_BREAKPOINT_CONDITION_BE_ALL
)))
2766 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2767 if (bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
)
2771 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_EXECUTE
) {
2772 if ((bp_info
->trigger_type
!= PPC_BREAKPOINT_TRIGGER_EXECUTE
) ||
2773 (bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
))
2775 return set_instruction_bp(child
, bp_info
);
2777 if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_EXACT
)
2778 return set_dac(child
, bp_info
);
2780 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2781 return set_dac_range(child
, bp_info
);
2785 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2787 * We only support one data breakpoint
2789 if ((bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_RW
) == 0 ||
2790 (bp_info
->trigger_type
& ~PPC_BREAKPOINT_TRIGGER_RW
) != 0 ||
2791 bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
)
2794 if ((unsigned long)bp_info
->addr
>= TASK_SIZE
)
2797 brk
.address
= bp_info
->addr
& ~7UL;
2798 brk
.type
= HW_BRK_TYPE_TRANSLATE
;
2800 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2801 brk
.type
|= HW_BRK_TYPE_READ
;
2802 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2803 brk
.type
|= HW_BRK_TYPE_WRITE
;
2804 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2806 * Check if the request is for 'range' breakpoints. We can
2807 * support it if range < 8 bytes.
2809 if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE
)
2810 len
= bp_info
->addr2
- bp_info
->addr
;
2811 else if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_EXACT
)
2815 bp
= thread
->ptrace_bps
[0];
2819 /* Create a new breakpoint request if one doesn't exist already */
2820 hw_breakpoint_init(&attr
);
2821 attr
.bp_addr
= (unsigned long)bp_info
->addr
& ~HW_BREAKPOINT_ALIGN
;
2823 arch_bp_generic_fields(brk
.type
, &attr
.bp_type
);
2825 thread
->ptrace_bps
[0] = bp
= register_user_hw_breakpoint(&attr
,
2826 ptrace_triggered
, NULL
, child
);
2828 thread
->ptrace_bps
[0] = NULL
;
2833 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2835 if (bp_info
->addr_mode
!= PPC_BREAKPOINT_MODE_EXACT
)
2838 if (child
->thread
.hw_brk
.address
)
2841 child
->thread
.hw_brk
= brk
;
2844 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2847 static long ppc_del_hwdebug(struct task_struct
*child
, long data
)
2849 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2851 struct thread_struct
*thread
= &(child
->thread
);
2852 struct perf_event
*bp
;
2853 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2854 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2858 rc
= del_instruction_bp(child
, (int)data
);
2860 rc
= del_dac(child
, (int)data
- 4);
2863 if (!DBCR_ACTIVE_EVENTS(child
->thread
.debug
.dbcr0
,
2864 child
->thread
.debug
.dbcr1
)) {
2865 child
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2866 child
->thread
.regs
->msr
&= ~MSR_DE
;
2874 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2875 bp
= thread
->ptrace_bps
[0];
2877 unregister_hw_breakpoint(bp
);
2878 thread
->ptrace_bps
[0] = NULL
;
2882 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2883 if (child
->thread
.hw_brk
.address
== 0)
2886 child
->thread
.hw_brk
.address
= 0;
2887 child
->thread
.hw_brk
.type
= 0;
2888 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2894 long arch_ptrace(struct task_struct
*child
, long request
,
2895 unsigned long addr
, unsigned long data
)
2898 void __user
*datavp
= (void __user
*) data
;
2899 unsigned long __user
*datalp
= datavp
;
2902 /* read the word at location addr in the USER area. */
2903 case PTRACE_PEEKUSR
: {
2904 unsigned long index
, tmp
;
2907 /* convert to index and check */
2910 if ((addr
& 3) || (index
> PT_FPSCR
)
2911 || (child
->thread
.regs
== NULL
))
2914 if ((addr
& 7) || (index
> PT_FPSCR
))
2918 CHECK_FULL_REGS(child
->thread
.regs
);
2919 if (index
< PT_FPR0
) {
2920 ret
= ptrace_get_reg(child
, (int) index
, &tmp
);
2924 unsigned int fpidx
= index
- PT_FPR0
;
2926 flush_fp_to_thread(child
);
2927 if (fpidx
< (PT_FPSCR
- PT_FPR0
))
2928 memcpy(&tmp
, &child
->thread
.TS_FPR(fpidx
),
2931 tmp
= child
->thread
.fp_state
.fpscr
;
2933 ret
= put_user(tmp
, datalp
);
2937 /* write the word at location addr in the USER area */
2938 case PTRACE_POKEUSR
: {
2939 unsigned long index
;
2942 /* convert to index and check */
2945 if ((addr
& 3) || (index
> PT_FPSCR
)
2946 || (child
->thread
.regs
== NULL
))
2949 if ((addr
& 7) || (index
> PT_FPSCR
))
2953 CHECK_FULL_REGS(child
->thread
.regs
);
2954 if (index
< PT_FPR0
) {
2955 ret
= ptrace_put_reg(child
, index
, data
);
2957 unsigned int fpidx
= index
- PT_FPR0
;
2959 flush_fp_to_thread(child
);
2960 if (fpidx
< (PT_FPSCR
- PT_FPR0
))
2961 memcpy(&child
->thread
.TS_FPR(fpidx
), &data
,
2964 child
->thread
.fp_state
.fpscr
= data
;
2970 case PPC_PTRACE_GETHWDBGINFO
: {
2971 struct ppc_debug_info dbginfo
;
2973 dbginfo
.version
= 1;
2974 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2975 dbginfo
.num_instruction_bps
= CONFIG_PPC_ADV_DEBUG_IACS
;
2976 dbginfo
.num_data_bps
= CONFIG_PPC_ADV_DEBUG_DACS
;
2977 dbginfo
.num_condition_regs
= CONFIG_PPC_ADV_DEBUG_DVCS
;
2978 dbginfo
.data_bp_alignment
= 4;
2979 dbginfo
.sizeof_condition
= 4;
2980 dbginfo
.features
= PPC_DEBUG_FEATURE_INSN_BP_RANGE
|
2981 PPC_DEBUG_FEATURE_INSN_BP_MASK
;
2982 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2984 PPC_DEBUG_FEATURE_DATA_BP_RANGE
|
2985 PPC_DEBUG_FEATURE_DATA_BP_MASK
;
2987 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
2988 dbginfo
.num_instruction_bps
= 0;
2989 dbginfo
.num_data_bps
= 1;
2990 dbginfo
.num_condition_regs
= 0;
2992 dbginfo
.data_bp_alignment
= 8;
2994 dbginfo
.data_bp_alignment
= 4;
2996 dbginfo
.sizeof_condition
= 0;
2997 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2998 dbginfo
.features
= PPC_DEBUG_FEATURE_DATA_BP_RANGE
;
2999 if (cpu_has_feature(CPU_FTR_DAWR
))
3000 dbginfo
.features
|= PPC_DEBUG_FEATURE_DATA_BP_DAWR
;
3002 dbginfo
.features
= 0;
3003 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3004 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3006 if (!access_ok(VERIFY_WRITE
, datavp
,
3007 sizeof(struct ppc_debug_info
)))
3009 ret
= __copy_to_user(datavp
, &dbginfo
,
3010 sizeof(struct ppc_debug_info
)) ?
3015 case PPC_PTRACE_SETHWDEBUG
: {
3016 struct ppc_hw_breakpoint bp_info
;
3018 if (!access_ok(VERIFY_READ
, datavp
,
3019 sizeof(struct ppc_hw_breakpoint
)))
3021 ret
= __copy_from_user(&bp_info
, datavp
,
3022 sizeof(struct ppc_hw_breakpoint
)) ?
3025 ret
= ppc_set_hwdebug(child
, &bp_info
);
3029 case PPC_PTRACE_DELHWDEBUG
: {
3030 ret
= ppc_del_hwdebug(child
, data
);
3034 case PTRACE_GET_DEBUGREG
: {
3035 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3036 unsigned long dabr_fake
;
3039 /* We only support one DABR and no IABRS at the moment */
3042 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3043 ret
= put_user(child
->thread
.debug
.dac1
, datalp
);
3045 dabr_fake
= ((child
->thread
.hw_brk
.address
& (~HW_BRK_TYPE_DABR
)) |
3046 (child
->thread
.hw_brk
.type
& HW_BRK_TYPE_DABR
));
3047 ret
= put_user(dabr_fake
, datalp
);
3052 case PTRACE_SET_DEBUGREG
:
3053 ret
= ptrace_set_debugreg(child
, addr
, data
);
3057 case PTRACE_GETREGS64
:
3059 case PTRACE_GETREGS
: /* Get all pt_regs from the child. */
3060 return copy_regset_to_user(child
, &user_ppc_native_view
,
3062 0, sizeof(struct pt_regs
),
3066 case PTRACE_SETREGS64
:
3068 case PTRACE_SETREGS
: /* Set all gp regs in the child. */
3069 return copy_regset_from_user(child
, &user_ppc_native_view
,
3071 0, sizeof(struct pt_regs
),
3074 case PTRACE_GETFPREGS
: /* Get the child FPU state (FPR0...31 + FPSCR) */
3075 return copy_regset_to_user(child
, &user_ppc_native_view
,
3077 0, sizeof(elf_fpregset_t
),
3080 case PTRACE_SETFPREGS
: /* Set the child FPU state (FPR0...31 + FPSCR) */
3081 return copy_regset_from_user(child
, &user_ppc_native_view
,
3083 0, sizeof(elf_fpregset_t
),
3086 #ifdef CONFIG_ALTIVEC
3087 case PTRACE_GETVRREGS
:
3088 return copy_regset_to_user(child
, &user_ppc_native_view
,
3090 0, (33 * sizeof(vector128
) +
3094 case PTRACE_SETVRREGS
:
3095 return copy_regset_from_user(child
, &user_ppc_native_view
,
3097 0, (33 * sizeof(vector128
) +
3102 case PTRACE_GETVSRREGS
:
3103 return copy_regset_to_user(child
, &user_ppc_native_view
,
3105 0, 32 * sizeof(double),
3108 case PTRACE_SETVSRREGS
:
3109 return copy_regset_from_user(child
, &user_ppc_native_view
,
3111 0, 32 * sizeof(double),
3115 case PTRACE_GETEVRREGS
:
3116 /* Get the child spe register state. */
3117 return copy_regset_to_user(child
, &user_ppc_native_view
,
3118 REGSET_SPE
, 0, 35 * sizeof(u32
),
3121 case PTRACE_SETEVRREGS
:
3122 /* Set the child spe register state. */
3123 return copy_regset_from_user(child
, &user_ppc_native_view
,
3124 REGSET_SPE
, 0, 35 * sizeof(u32
),
3129 ret
= ptrace_request(child
, request
, addr
, data
);
3135 #ifdef CONFIG_SECCOMP
3136 static int do_seccomp(struct pt_regs
*regs
)
3138 if (!test_thread_flag(TIF_SECCOMP
))
3142 * The ABI we present to seccomp tracers is that r3 contains
3143 * the syscall return value and orig_gpr3 contains the first
3144 * syscall parameter. This is different to the ptrace ABI where
3145 * both r3 and orig_gpr3 contain the first syscall parameter.
3147 regs
->gpr
[3] = -ENOSYS
;
3150 * We use the __ version here because we have already checked
3151 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3152 * have already loaded -ENOSYS into r3, or seccomp has put
3153 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3155 if (__secure_computing(NULL
))
3159 * The syscall was allowed by seccomp, restore the register
3160 * state to what audit expects.
3161 * Note that we use orig_gpr3, which means a seccomp tracer can
3162 * modify the first syscall parameter (in orig_gpr3) and also
3163 * allow the syscall to proceed.
3165 regs
->gpr
[3] = regs
->orig_gpr3
;
3170 static inline int do_seccomp(struct pt_regs
*regs
) { return 0; }
3171 #endif /* CONFIG_SECCOMP */
3174 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3175 * @regs: the pt_regs of the task to trace (current)
3177 * Performs various types of tracing on syscall entry. This includes seccomp,
3178 * ptrace, syscall tracepoints and audit.
3180 * The pt_regs are potentially visible to userspace via ptrace, so their
3183 * One or more of the tracers may modify the contents of pt_regs, in particular
3184 * to modify arguments or even the syscall number itself.
3186 * It's also possible that a tracer can choose to reject the system call. In
3187 * that case this function will return an illegal syscall number, and will put
3188 * an appropriate return value in regs->r3.
3190 * Return: the (possibly changed) syscall number.
3192 long do_syscall_trace_enter(struct pt_regs
*regs
)
3197 * The tracer may decide to abort the syscall, if so tracehook
3198 * will return !0. Note that the tracer may also just change
3199 * regs->gpr[0] to an invalid syscall number, that is handled
3200 * below on the exit path.
3202 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
3203 tracehook_report_syscall_entry(regs
))
3206 /* Run seccomp after ptrace; allow it to set gpr[3]. */
3207 if (do_seccomp(regs
))
3210 /* Avoid trace and audit when syscall is invalid. */
3211 if (regs
->gpr
[0] >= NR_syscalls
)
3214 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
3215 trace_sys_enter(regs
, regs
->gpr
[0]);
3218 if (!is_32bit_task())
3219 audit_syscall_entry(regs
->gpr
[0], regs
->gpr
[3], regs
->gpr
[4],
3220 regs
->gpr
[5], regs
->gpr
[6]);
3223 audit_syscall_entry(regs
->gpr
[0],
3224 regs
->gpr
[3] & 0xffffffff,
3225 regs
->gpr
[4] & 0xffffffff,
3226 regs
->gpr
[5] & 0xffffffff,
3227 regs
->gpr
[6] & 0xffffffff);
3229 /* Return the possibly modified but valid syscall number */
3230 return regs
->gpr
[0];
3234 * If we are aborting explicitly, or if the syscall number is
3235 * now invalid, set the return value to -ENOSYS.
3237 regs
->gpr
[3] = -ENOSYS
;
3241 void do_syscall_trace_leave(struct pt_regs
*regs
)
3245 audit_syscall_exit(regs
);
3247 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
3248 trace_sys_exit(regs
, regs
->result
);
3250 step
= test_thread_flag(TIF_SINGLESTEP
);
3251 if (step
|| test_thread_flag(TIF_SYSCALL_TRACE
))
3252 tracehook_report_syscall_exit(regs
, step
);