3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/m68k/kernel/ptrace.c"
6 * Copyright (C) 1994 by Hamish Macdonald
7 * Taken from linux/kernel/ptrace.c and modified for M680x0.
8 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
11 * and Paul Mackerras (paulus@samba.org).
13 * This file is subject to the terms and conditions of the GNU General
14 * Public License. See the file README.legal in the main directory of
15 * this archive for more details.
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
21 #include <linux/smp.h>
22 #include <linux/errno.h>
23 #include <linux/ptrace.h>
24 #include <linux/regset.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/user.h>
28 #include <linux/security.h>
29 #include <linux/signal.h>
30 #include <linux/seccomp.h>
31 #include <linux/audit.h>
32 #include <trace/syscall.h>
33 #include <linux/hw_breakpoint.h>
34 #include <linux/perf_event.h>
35 #include <linux/context_tracking.h>
36 #include <linux/nospec.h>
38 #include <linux/uaccess.h>
39 #include <linux/pkeys.h>
41 #include <asm/pgtable.h>
42 #include <asm/switch_to.h>
44 #include <asm/asm-prototypes.h>
45 #include <asm/debug.h>
46 #include <asm/hw_breakpoint.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
52 * The parameter save area on the stack is used to store arguments being passed
53 * to callee function and is located at fixed offset from stack pointer.
56 #define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
57 #else /* CONFIG_PPC32 */
58 #define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
61 struct pt_regs_offset
{
66 #define STR(s) #s /* convert to string */
67 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
68 #define GPR_OFFSET_NAME(num) \
69 {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
70 {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
71 #define REG_OFFSET_END {.name = NULL, .offset = 0}
73 #define TVSO(f) (offsetof(struct thread_vr_state, f))
74 #define TFSO(f) (offsetof(struct thread_fp_state, f))
75 #define TSO(f) (offsetof(struct thread_struct, f))
77 static const struct pt_regs_offset regoffset_table
[] = {
110 REG_OFFSET_NAME(nip
),
111 REG_OFFSET_NAME(msr
),
112 REG_OFFSET_NAME(ctr
),
113 REG_OFFSET_NAME(link
),
114 REG_OFFSET_NAME(xer
),
115 REG_OFFSET_NAME(ccr
),
117 REG_OFFSET_NAME(softe
),
121 REG_OFFSET_NAME(trap
),
122 REG_OFFSET_NAME(dar
),
123 REG_OFFSET_NAME(dsisr
),
127 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
128 static void flush_tmregs_to_thread(struct task_struct
*tsk
)
131 * If task is not current, it will have been flushed already to
132 * it's thread_struct during __switch_to().
134 * A reclaim flushes ALL the state or if not in TM save TM SPRs
135 * in the appropriate thread structures from live.
138 if ((!cpu_has_feature(CPU_FTR_TM
)) || (tsk
!= current
))
141 if (MSR_TM_SUSPENDED(mfmsr())) {
142 tm_reclaim_current(TM_CAUSE_SIGNAL
);
145 tm_save_sprs(&(tsk
->thread
));
149 static inline void flush_tmregs_to_thread(struct task_struct
*tsk
) { }
153 * regs_query_register_offset() - query register offset from its name
154 * @name: the name of a register
156 * regs_query_register_offset() returns the offset of a register in struct
157 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
159 int regs_query_register_offset(const char *name
)
161 const struct pt_regs_offset
*roff
;
162 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
163 if (!strcmp(roff
->name
, name
))
169 * regs_query_register_name() - query register name from its offset
170 * @offset: the offset of a register in struct pt_regs.
172 * regs_query_register_name() returns the name of a register from its
173 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
175 const char *regs_query_register_name(unsigned int offset
)
177 const struct pt_regs_offset
*roff
;
178 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
179 if (roff
->offset
== offset
)
185 * does not yet catch signals sent when the child dies.
186 * in exit.c or in signal.c.
190 * Set of msr bits that gdb can change on behalf of a process.
192 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
193 #define MSR_DEBUGCHANGE 0
195 #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
199 * Max register writeable via put_reg
202 #define PT_MAX_PUT_REG PT_MQ
204 #define PT_MAX_PUT_REG PT_CCR
207 static unsigned long get_user_msr(struct task_struct
*task
)
209 return task
->thread
.regs
->msr
| task
->thread
.fpexc_mode
;
212 static int set_user_msr(struct task_struct
*task
, unsigned long msr
)
214 task
->thread
.regs
->msr
&= ~MSR_DEBUGCHANGE
;
215 task
->thread
.regs
->msr
|= msr
& MSR_DEBUGCHANGE
;
219 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
220 static unsigned long get_user_ckpt_msr(struct task_struct
*task
)
222 return task
->thread
.ckpt_regs
.msr
| task
->thread
.fpexc_mode
;
225 static int set_user_ckpt_msr(struct task_struct
*task
, unsigned long msr
)
227 task
->thread
.ckpt_regs
.msr
&= ~MSR_DEBUGCHANGE
;
228 task
->thread
.ckpt_regs
.msr
|= msr
& MSR_DEBUGCHANGE
;
232 static int set_user_ckpt_trap(struct task_struct
*task
, unsigned long trap
)
234 task
->thread
.ckpt_regs
.trap
= trap
& 0xfff0;
240 static int get_user_dscr(struct task_struct
*task
, unsigned long *data
)
242 *data
= task
->thread
.dscr
;
246 static int set_user_dscr(struct task_struct
*task
, unsigned long dscr
)
248 task
->thread
.dscr
= dscr
;
249 task
->thread
.dscr_inherit
= 1;
253 static int get_user_dscr(struct task_struct
*task
, unsigned long *data
)
258 static int set_user_dscr(struct task_struct
*task
, unsigned long dscr
)
265 * We prevent mucking around with the reserved area of trap
266 * which are used internally by the kernel.
268 static int set_user_trap(struct task_struct
*task
, unsigned long trap
)
270 task
->thread
.regs
->trap
= trap
& 0xfff0;
275 * Get contents of register REGNO in task TASK.
277 int ptrace_get_reg(struct task_struct
*task
, int regno
, unsigned long *data
)
279 unsigned int regs_max
;
281 if ((task
->thread
.regs
== NULL
) || !data
)
284 if (regno
== PT_MSR
) {
285 *data
= get_user_msr(task
);
289 if (regno
== PT_DSCR
)
290 return get_user_dscr(task
, data
);
294 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
295 * no more used as a flag, lets force usr to alway see the softe value as 1
296 * which means interrupts are not soft disabled.
298 if (regno
== PT_SOFTE
) {
304 regs_max
= sizeof(struct user_pt_regs
) / sizeof(unsigned long);
305 if (regno
< regs_max
) {
306 regno
= array_index_nospec(regno
, regs_max
);
307 *data
= ((unsigned long *)task
->thread
.regs
)[regno
];
315 * Write contents of register REGNO in task TASK.
317 int ptrace_put_reg(struct task_struct
*task
, int regno
, unsigned long data
)
319 if (task
->thread
.regs
== NULL
)
323 return set_user_msr(task
, data
);
324 if (regno
== PT_TRAP
)
325 return set_user_trap(task
, data
);
326 if (regno
== PT_DSCR
)
327 return set_user_dscr(task
, data
);
329 if (regno
<= PT_MAX_PUT_REG
) {
330 regno
= array_index_nospec(regno
, PT_MAX_PUT_REG
+ 1);
331 ((unsigned long *)task
->thread
.regs
)[regno
] = data
;
337 static int gpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
338 unsigned int pos
, unsigned int count
,
339 void *kbuf
, void __user
*ubuf
)
343 if (target
->thread
.regs
== NULL
)
346 if (!FULL_REGS(target
->thread
.regs
)) {
347 /* We have a partial register set. Fill 14-31 with bogus values */
348 for (i
= 14; i
< 32; i
++)
349 target
->thread
.regs
->gpr
[i
] = NV_REG_POISON
;
352 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
354 0, offsetof(struct pt_regs
, msr
));
356 unsigned long msr
= get_user_msr(target
);
357 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &msr
,
358 offsetof(struct pt_regs
, msr
),
359 offsetof(struct pt_regs
, msr
) +
363 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
364 offsetof(struct pt_regs
, msr
) + sizeof(long));
367 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
368 &target
->thread
.regs
->orig_gpr3
,
369 offsetof(struct pt_regs
, orig_gpr3
),
370 sizeof(struct user_pt_regs
));
372 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
373 sizeof(struct user_pt_regs
), -1);
378 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
379 unsigned int pos
, unsigned int count
,
380 const void *kbuf
, const void __user
*ubuf
)
385 if (target
->thread
.regs
== NULL
)
388 CHECK_FULL_REGS(target
->thread
.regs
);
390 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
392 0, PT_MSR
* sizeof(reg
));
394 if (!ret
&& count
> 0) {
395 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
396 PT_MSR
* sizeof(reg
),
397 (PT_MSR
+ 1) * sizeof(reg
));
399 ret
= set_user_msr(target
, reg
);
402 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
403 offsetof(struct pt_regs
, msr
) + sizeof(long));
406 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
407 &target
->thread
.regs
->orig_gpr3
,
408 PT_ORIG_R3
* sizeof(reg
),
409 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
));
411 if (PT_MAX_PUT_REG
+ 1 < PT_TRAP
&& !ret
)
412 ret
= user_regset_copyin_ignore(
413 &pos
, &count
, &kbuf
, &ubuf
,
414 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
),
415 PT_TRAP
* sizeof(reg
));
417 if (!ret
&& count
> 0) {
418 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
419 PT_TRAP
* sizeof(reg
),
420 (PT_TRAP
+ 1) * sizeof(reg
));
422 ret
= set_user_trap(target
, reg
);
426 ret
= user_regset_copyin_ignore(
427 &pos
, &count
, &kbuf
, &ubuf
,
428 (PT_TRAP
+ 1) * sizeof(reg
), -1);
434 * Regardless of transactions, 'fp_state' holds the current running
435 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
436 * value of all FPR registers for the current transaction.
438 * Userspace interface buffer layout:
445 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
446 unsigned int pos
, unsigned int count
,
447 void *kbuf
, void __user
*ubuf
)
453 flush_fp_to_thread(target
);
455 /* copy to local buffer then write that out */
456 for (i
= 0; i
< 32 ; i
++)
457 buf
[i
] = target
->thread
.TS_FPR(i
);
458 buf
[32] = target
->thread
.fp_state
.fpscr
;
459 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
461 BUILD_BUG_ON(offsetof(struct thread_fp_state
, fpscr
) !=
462 offsetof(struct thread_fp_state
, fpr
[32]));
464 flush_fp_to_thread(target
);
466 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
467 &target
->thread
.fp_state
, 0, -1);
472 * Regardless of transactions, 'fp_state' holds the current running
473 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
474 * value of all FPR registers for the current transaction.
476 * Userspace interface buffer layout:
484 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
485 unsigned int pos
, unsigned int count
,
486 const void *kbuf
, const void __user
*ubuf
)
492 flush_fp_to_thread(target
);
494 for (i
= 0; i
< 32 ; i
++)
495 buf
[i
] = target
->thread
.TS_FPR(i
);
496 buf
[32] = target
->thread
.fp_state
.fpscr
;
498 /* copy to local buffer then write that out */
499 i
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
503 for (i
= 0; i
< 32 ; i
++)
504 target
->thread
.TS_FPR(i
) = buf
[i
];
505 target
->thread
.fp_state
.fpscr
= buf
[32];
508 BUILD_BUG_ON(offsetof(struct thread_fp_state
, fpscr
) !=
509 offsetof(struct thread_fp_state
, fpr
[32]));
511 flush_fp_to_thread(target
);
513 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
514 &target
->thread
.fp_state
, 0, -1);
518 #ifdef CONFIG_ALTIVEC
520 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
521 * The transfer totals 34 quadword. Quadwords 0-31 contain the
522 * corresponding vector registers. Quadword 32 contains the vscr as the
523 * last word (offset 12) within that quadword. Quadword 33 contains the
524 * vrsave as the first word (offset 0) within the quadword.
526 * This definition of the VMX state is compatible with the current PPC32
527 * ptrace interface. This allows signal handling and ptrace to use the
528 * same structures. This also simplifies the implementation of a bi-arch
529 * (combined (32- and 64-bit) gdb.
532 static int vr_active(struct task_struct
*target
,
533 const struct user_regset
*regset
)
535 flush_altivec_to_thread(target
);
536 return target
->thread
.used_vr
? regset
->n
: 0;
540 * Regardless of transactions, 'vr_state' holds the current running
541 * value of all the VMX registers and 'ckvr_state' holds the last
542 * checkpointed value of all the VMX registers for the current
543 * transaction to fall back on in case it aborts.
545 * Userspace interface buffer layout:
553 static int vr_get(struct task_struct
*target
, const struct user_regset
*regset
,
554 unsigned int pos
, unsigned int count
,
555 void *kbuf
, void __user
*ubuf
)
559 flush_altivec_to_thread(target
);
561 BUILD_BUG_ON(offsetof(struct thread_vr_state
, vscr
) !=
562 offsetof(struct thread_vr_state
, vr
[32]));
564 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
565 &target
->thread
.vr_state
, 0,
566 33 * sizeof(vector128
));
569 * Copy out only the low-order word of vrsave.
576 memset(&vrsave
, 0, sizeof(vrsave
));
578 vrsave
.word
= target
->thread
.vrsave
;
580 start
= 33 * sizeof(vector128
);
581 end
= start
+ sizeof(vrsave
);
582 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
590 * Regardless of transactions, 'vr_state' holds the current running
591 * value of all the VMX registers and 'ckvr_state' holds the last
592 * checkpointed value of all the VMX registers for the current
593 * transaction to fall back on in case it aborts.
595 * Userspace interface buffer layout:
603 static int vr_set(struct task_struct
*target
, const struct user_regset
*regset
,
604 unsigned int pos
, unsigned int count
,
605 const void *kbuf
, const void __user
*ubuf
)
609 flush_altivec_to_thread(target
);
611 BUILD_BUG_ON(offsetof(struct thread_vr_state
, vscr
) !=
612 offsetof(struct thread_vr_state
, vr
[32]));
614 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
615 &target
->thread
.vr_state
, 0,
616 33 * sizeof(vector128
));
617 if (!ret
&& count
> 0) {
619 * We use only the first word of vrsave.
626 memset(&vrsave
, 0, sizeof(vrsave
));
628 vrsave
.word
= target
->thread
.vrsave
;
630 start
= 33 * sizeof(vector128
);
631 end
= start
+ sizeof(vrsave
);
632 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
635 target
->thread
.vrsave
= vrsave
.word
;
640 #endif /* CONFIG_ALTIVEC */
644 * Currently to set and and get all the vsx state, you need to call
645 * the fp and VMX calls as well. This only get/sets the lower 32
646 * 128bit VSX registers.
649 static int vsr_active(struct task_struct
*target
,
650 const struct user_regset
*regset
)
652 flush_vsx_to_thread(target
);
653 return target
->thread
.used_vsr
? regset
->n
: 0;
657 * Regardless of transactions, 'fp_state' holds the current running
658 * value of all FPR registers and 'ckfp_state' holds the last
659 * checkpointed value of all FPR registers for the current
662 * Userspace interface buffer layout:
668 static int vsr_get(struct task_struct
*target
, const struct user_regset
*regset
,
669 unsigned int pos
, unsigned int count
,
670 void *kbuf
, void __user
*ubuf
)
675 flush_tmregs_to_thread(target
);
676 flush_fp_to_thread(target
);
677 flush_altivec_to_thread(target
);
678 flush_vsx_to_thread(target
);
680 for (i
= 0; i
< 32 ; i
++)
681 buf
[i
] = target
->thread
.fp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
683 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
684 buf
, 0, 32 * sizeof(double));
690 * Regardless of transactions, 'fp_state' holds the current running
691 * value of all FPR registers and 'ckfp_state' holds the last
692 * checkpointed value of all FPR registers for the current
695 * Userspace interface buffer layout:
701 static int vsr_set(struct task_struct
*target
, const struct user_regset
*regset
,
702 unsigned int pos
, unsigned int count
,
703 const void *kbuf
, const void __user
*ubuf
)
708 flush_tmregs_to_thread(target
);
709 flush_fp_to_thread(target
);
710 flush_altivec_to_thread(target
);
711 flush_vsx_to_thread(target
);
713 for (i
= 0; i
< 32 ; i
++)
714 buf
[i
] = target
->thread
.fp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
716 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
717 buf
, 0, 32 * sizeof(double));
719 for (i
= 0; i
< 32 ; i
++)
720 target
->thread
.fp_state
.fpr
[i
][TS_VSRLOWOFFSET
] = buf
[i
];
724 #endif /* CONFIG_VSX */
729 * For get_evrregs/set_evrregs functions 'data' has the following layout:
738 static int evr_active(struct task_struct
*target
,
739 const struct user_regset
*regset
)
741 flush_spe_to_thread(target
);
742 return target
->thread
.used_spe
? regset
->n
: 0;
745 static int evr_get(struct task_struct
*target
, const struct user_regset
*regset
,
746 unsigned int pos
, unsigned int count
,
747 void *kbuf
, void __user
*ubuf
)
751 flush_spe_to_thread(target
);
753 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
755 0, sizeof(target
->thread
.evr
));
757 BUILD_BUG_ON(offsetof(struct thread_struct
, acc
) + sizeof(u64
) !=
758 offsetof(struct thread_struct
, spefscr
));
761 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
763 sizeof(target
->thread
.evr
), -1);
768 static int evr_set(struct task_struct
*target
, const struct user_regset
*regset
,
769 unsigned int pos
, unsigned int count
,
770 const void *kbuf
, const void __user
*ubuf
)
774 flush_spe_to_thread(target
);
776 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
778 0, sizeof(target
->thread
.evr
));
780 BUILD_BUG_ON(offsetof(struct thread_struct
, acc
) + sizeof(u64
) !=
781 offsetof(struct thread_struct
, spefscr
));
784 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
786 sizeof(target
->thread
.evr
), -1);
790 #endif /* CONFIG_SPE */
792 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
794 * tm_cgpr_active - get active number of registers in CGPR
795 * @target: The target task.
796 * @regset: The user regset structure.
798 * This function checks for the active number of available
799 * regisers in transaction checkpointed GPR category.
801 static int tm_cgpr_active(struct task_struct
*target
,
802 const struct user_regset
*regset
)
804 if (!cpu_has_feature(CPU_FTR_TM
))
807 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
814 * tm_cgpr_get - get CGPR registers
815 * @target: The target task.
816 * @regset: The user regset structure.
817 * @pos: The buffer position.
818 * @count: Number of bytes to copy.
819 * @kbuf: Kernel buffer to copy from.
820 * @ubuf: User buffer to copy into.
822 * This function gets transaction checkpointed GPR registers.
824 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
825 * GPR register values for the current transaction to fall back on if it
826 * aborts in between. This function gets those checkpointed GPR registers.
827 * The userspace interface buffer layout is as follows.
830 * struct pt_regs ckpt_regs;
833 static int tm_cgpr_get(struct task_struct
*target
,
834 const struct user_regset
*regset
,
835 unsigned int pos
, unsigned int count
,
836 void *kbuf
, void __user
*ubuf
)
840 if (!cpu_has_feature(CPU_FTR_TM
))
843 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
846 flush_tmregs_to_thread(target
);
847 flush_fp_to_thread(target
);
848 flush_altivec_to_thread(target
);
850 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
851 &target
->thread
.ckpt_regs
,
852 0, offsetof(struct pt_regs
, msr
));
854 unsigned long msr
= get_user_ckpt_msr(target
);
856 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &msr
,
857 offsetof(struct pt_regs
, msr
),
858 offsetof(struct pt_regs
, msr
) +
862 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
863 offsetof(struct pt_regs
, msr
) + sizeof(long));
866 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
867 &target
->thread
.ckpt_regs
.orig_gpr3
,
868 offsetof(struct pt_regs
, orig_gpr3
),
869 sizeof(struct user_pt_regs
));
871 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
872 sizeof(struct user_pt_regs
), -1);
878 * tm_cgpr_set - set the CGPR registers
879 * @target: The target task.
880 * @regset: The user regset structure.
881 * @pos: The buffer position.
882 * @count: Number of bytes to copy.
883 * @kbuf: Kernel buffer to copy into.
884 * @ubuf: User buffer to copy from.
886 * This function sets in transaction checkpointed GPR registers.
888 * When the transaction is active, 'ckpt_regs' holds the checkpointed
889 * GPR register values for the current transaction to fall back on if it
890 * aborts in between. This function sets those checkpointed GPR registers.
891 * The userspace interface buffer layout is as follows.
894 * struct pt_regs ckpt_regs;
897 static int tm_cgpr_set(struct task_struct
*target
,
898 const struct user_regset
*regset
,
899 unsigned int pos
, unsigned int count
,
900 const void *kbuf
, const void __user
*ubuf
)
905 if (!cpu_has_feature(CPU_FTR_TM
))
908 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
911 flush_tmregs_to_thread(target
);
912 flush_fp_to_thread(target
);
913 flush_altivec_to_thread(target
);
915 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
916 &target
->thread
.ckpt_regs
,
917 0, PT_MSR
* sizeof(reg
));
919 if (!ret
&& count
> 0) {
920 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
921 PT_MSR
* sizeof(reg
),
922 (PT_MSR
+ 1) * sizeof(reg
));
924 ret
= set_user_ckpt_msr(target
, reg
);
927 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
928 offsetof(struct pt_regs
, msr
) + sizeof(long));
931 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
932 &target
->thread
.ckpt_regs
.orig_gpr3
,
933 PT_ORIG_R3
* sizeof(reg
),
934 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
));
936 if (PT_MAX_PUT_REG
+ 1 < PT_TRAP
&& !ret
)
937 ret
= user_regset_copyin_ignore(
938 &pos
, &count
, &kbuf
, &ubuf
,
939 (PT_MAX_PUT_REG
+ 1) * sizeof(reg
),
940 PT_TRAP
* sizeof(reg
));
942 if (!ret
&& count
> 0) {
943 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, ®
,
944 PT_TRAP
* sizeof(reg
),
945 (PT_TRAP
+ 1) * sizeof(reg
));
947 ret
= set_user_ckpt_trap(target
, reg
);
951 ret
= user_regset_copyin_ignore(
952 &pos
, &count
, &kbuf
, &ubuf
,
953 (PT_TRAP
+ 1) * sizeof(reg
), -1);
959 * tm_cfpr_active - get active number of registers in CFPR
960 * @target: The target task.
961 * @regset: The user regset structure.
963 * This function checks for the active number of available
964 * regisers in transaction checkpointed FPR category.
966 static int tm_cfpr_active(struct task_struct
*target
,
967 const struct user_regset
*regset
)
969 if (!cpu_has_feature(CPU_FTR_TM
))
972 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
979 * tm_cfpr_get - get CFPR registers
980 * @target: The target task.
981 * @regset: The user regset structure.
982 * @pos: The buffer position.
983 * @count: Number of bytes to copy.
984 * @kbuf: Kernel buffer to copy from.
985 * @ubuf: User buffer to copy into.
987 * This function gets in transaction checkpointed FPR registers.
989 * When the transaction is active 'ckfp_state' holds the checkpointed
990 * values for the current transaction to fall back on if it aborts
991 * in between. This function gets those checkpointed FPR registers.
992 * The userspace interface buffer layout is as follows.
999 static int tm_cfpr_get(struct task_struct
*target
,
1000 const struct user_regset
*regset
,
1001 unsigned int pos
, unsigned int count
,
1002 void *kbuf
, void __user
*ubuf
)
1007 if (!cpu_has_feature(CPU_FTR_TM
))
1010 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1013 flush_tmregs_to_thread(target
);
1014 flush_fp_to_thread(target
);
1015 flush_altivec_to_thread(target
);
1017 /* copy to local buffer then write that out */
1018 for (i
= 0; i
< 32 ; i
++)
1019 buf
[i
] = target
->thread
.TS_CKFPR(i
);
1020 buf
[32] = target
->thread
.ckfp_state
.fpscr
;
1021 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
1025 * tm_cfpr_set - set CFPR registers
1026 * @target: The target task.
1027 * @regset: The user regset structure.
1028 * @pos: The buffer position.
1029 * @count: Number of bytes to copy.
1030 * @kbuf: Kernel buffer to copy into.
1031 * @ubuf: User buffer to copy from.
1033 * This function sets in transaction checkpointed FPR registers.
1035 * When the transaction is active 'ckfp_state' holds the checkpointed
1036 * FPR register values for the current transaction to fall back on
1037 * if it aborts in between. This function sets these checkpointed
1038 * FPR registers. The userspace interface buffer layout is as follows.
1045 static int tm_cfpr_set(struct task_struct
*target
,
1046 const struct user_regset
*regset
,
1047 unsigned int pos
, unsigned int count
,
1048 const void *kbuf
, const void __user
*ubuf
)
1053 if (!cpu_has_feature(CPU_FTR_TM
))
1056 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1059 flush_tmregs_to_thread(target
);
1060 flush_fp_to_thread(target
);
1061 flush_altivec_to_thread(target
);
1063 for (i
= 0; i
< 32; i
++)
1064 buf
[i
] = target
->thread
.TS_CKFPR(i
);
1065 buf
[32] = target
->thread
.ckfp_state
.fpscr
;
1067 /* copy to local buffer then write that out */
1068 i
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, buf
, 0, -1);
1071 for (i
= 0; i
< 32 ; i
++)
1072 target
->thread
.TS_CKFPR(i
) = buf
[i
];
1073 target
->thread
.ckfp_state
.fpscr
= buf
[32];
1078 * tm_cvmx_active - get active number of registers in CVMX
1079 * @target: The target task.
1080 * @regset: The user regset structure.
1082 * This function checks for the active number of available
1083 * regisers in checkpointed VMX category.
1085 static int tm_cvmx_active(struct task_struct
*target
,
1086 const struct user_regset
*regset
)
1088 if (!cpu_has_feature(CPU_FTR_TM
))
1091 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1098 * tm_cvmx_get - get CMVX registers
1099 * @target: The target task.
1100 * @regset: The user regset structure.
1101 * @pos: The buffer position.
1102 * @count: Number of bytes to copy.
1103 * @kbuf: Kernel buffer to copy from.
1104 * @ubuf: User buffer to copy into.
1106 * This function gets in transaction checkpointed VMX registers.
1108 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1109 * the checkpointed values for the current transaction to fall
1110 * back on if it aborts in between. The userspace interface buffer
1111 * layout is as follows.
1119 static int tm_cvmx_get(struct task_struct
*target
,
1120 const struct user_regset
*regset
,
1121 unsigned int pos
, unsigned int count
,
1122 void *kbuf
, void __user
*ubuf
)
1126 BUILD_BUG_ON(TVSO(vscr
) != TVSO(vr
[32]));
1128 if (!cpu_has_feature(CPU_FTR_TM
))
1131 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1134 /* Flush the state */
1135 flush_tmregs_to_thread(target
);
1136 flush_fp_to_thread(target
);
1137 flush_altivec_to_thread(target
);
1139 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1140 &target
->thread
.ckvr_state
, 0,
1141 33 * sizeof(vector128
));
1144 * Copy out only the low-order word of vrsave.
1150 memset(&vrsave
, 0, sizeof(vrsave
));
1151 vrsave
.word
= target
->thread
.ckvrsave
;
1152 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
1153 33 * sizeof(vector128
), -1);
1160 * tm_cvmx_set - set CMVX registers
1161 * @target: The target task.
1162 * @regset: The user regset structure.
1163 * @pos: The buffer position.
1164 * @count: Number of bytes to copy.
1165 * @kbuf: Kernel buffer to copy into.
1166 * @ubuf: User buffer to copy from.
1168 * This function sets in transaction checkpointed VMX registers.
1170 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1171 * the checkpointed values for the current transaction to fall
1172 * back on if it aborts in between. The userspace interface buffer
1173 * layout is as follows.
1181 static int tm_cvmx_set(struct task_struct
*target
,
1182 const struct user_regset
*regset
,
1183 unsigned int pos
, unsigned int count
,
1184 const void *kbuf
, const void __user
*ubuf
)
1188 BUILD_BUG_ON(TVSO(vscr
) != TVSO(vr
[32]));
1190 if (!cpu_has_feature(CPU_FTR_TM
))
1193 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1196 flush_tmregs_to_thread(target
);
1197 flush_fp_to_thread(target
);
1198 flush_altivec_to_thread(target
);
1200 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1201 &target
->thread
.ckvr_state
, 0,
1202 33 * sizeof(vector128
));
1203 if (!ret
&& count
> 0) {
1205 * We use only the low-order word of vrsave.
1211 memset(&vrsave
, 0, sizeof(vrsave
));
1212 vrsave
.word
= target
->thread
.ckvrsave
;
1213 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &vrsave
,
1214 33 * sizeof(vector128
), -1);
1216 target
->thread
.ckvrsave
= vrsave
.word
;
1223 * tm_cvsx_active - get active number of registers in CVSX
1224 * @target: The target task.
1225 * @regset: The user regset structure.
1227 * This function checks for the active number of available
1228 * regisers in transaction checkpointed VSX category.
1230 static int tm_cvsx_active(struct task_struct
*target
,
1231 const struct user_regset
*regset
)
1233 if (!cpu_has_feature(CPU_FTR_TM
))
1236 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1239 flush_vsx_to_thread(target
);
1240 return target
->thread
.used_vsr
? regset
->n
: 0;
1244 * tm_cvsx_get - get CVSX registers
1245 * @target: The target task.
1246 * @regset: The user regset structure.
1247 * @pos: The buffer position.
1248 * @count: Number of bytes to copy.
1249 * @kbuf: Kernel buffer to copy from.
1250 * @ubuf: User buffer to copy into.
1252 * This function gets in transaction checkpointed VSX registers.
1254 * When the transaction is active 'ckfp_state' holds the checkpointed
1255 * values for the current transaction to fall back on if it aborts
1256 * in between. This function gets those checkpointed VSX registers.
1257 * The userspace interface buffer layout is as follows.
1263 static int tm_cvsx_get(struct task_struct
*target
,
1264 const struct user_regset
*regset
,
1265 unsigned int pos
, unsigned int count
,
1266 void *kbuf
, void __user
*ubuf
)
1271 if (!cpu_has_feature(CPU_FTR_TM
))
1274 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1277 /* Flush the state */
1278 flush_tmregs_to_thread(target
);
1279 flush_fp_to_thread(target
);
1280 flush_altivec_to_thread(target
);
1281 flush_vsx_to_thread(target
);
1283 for (i
= 0; i
< 32 ; i
++)
1284 buf
[i
] = target
->thread
.ckfp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
1285 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1286 buf
, 0, 32 * sizeof(double));
1292 * tm_cvsx_set - set CFPR registers
1293 * @target: The target task.
1294 * @regset: The user regset structure.
1295 * @pos: The buffer position.
1296 * @count: Number of bytes to copy.
1297 * @kbuf: Kernel buffer to copy into.
1298 * @ubuf: User buffer to copy from.
1300 * This function sets in transaction checkpointed VSX registers.
1302 * When the transaction is active 'ckfp_state' holds the checkpointed
1303 * VSX register values for the current transaction to fall back on
1304 * if it aborts in between. This function sets these checkpointed
1305 * FPR registers. The userspace interface buffer layout is as follows.
1311 static int tm_cvsx_set(struct task_struct
*target
,
1312 const struct user_regset
*regset
,
1313 unsigned int pos
, unsigned int count
,
1314 const void *kbuf
, const void __user
*ubuf
)
1319 if (!cpu_has_feature(CPU_FTR_TM
))
1322 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1325 /* Flush the state */
1326 flush_tmregs_to_thread(target
);
1327 flush_fp_to_thread(target
);
1328 flush_altivec_to_thread(target
);
1329 flush_vsx_to_thread(target
);
1331 for (i
= 0; i
< 32 ; i
++)
1332 buf
[i
] = target
->thread
.ckfp_state
.fpr
[i
][TS_VSRLOWOFFSET
];
1334 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1335 buf
, 0, 32 * sizeof(double));
1337 for (i
= 0; i
< 32 ; i
++)
1338 target
->thread
.ckfp_state
.fpr
[i
][TS_VSRLOWOFFSET
] = buf
[i
];
1344 * tm_spr_active - get active number of registers in TM SPR
1345 * @target: The target task.
1346 * @regset: The user regset structure.
1348 * This function checks the active number of available
1349 * regisers in the transactional memory SPR category.
1351 static int tm_spr_active(struct task_struct
*target
,
1352 const struct user_regset
*regset
)
1354 if (!cpu_has_feature(CPU_FTR_TM
))
1361 * tm_spr_get - get the TM related SPR registers
1362 * @target: The target task.
1363 * @regset: The user regset structure.
1364 * @pos: The buffer position.
1365 * @count: Number of bytes to copy.
1366 * @kbuf: Kernel buffer to copy from.
1367 * @ubuf: User buffer to copy into.
1369 * This function gets transactional memory related SPR registers.
1370 * The userspace interface buffer layout is as follows.
1378 static int tm_spr_get(struct task_struct
*target
,
1379 const struct user_regset
*regset
,
1380 unsigned int pos
, unsigned int count
,
1381 void *kbuf
, void __user
*ubuf
)
1386 BUILD_BUG_ON(TSO(tm_tfhar
) + sizeof(u64
) != TSO(tm_texasr
));
1387 BUILD_BUG_ON(TSO(tm_texasr
) + sizeof(u64
) != TSO(tm_tfiar
));
1388 BUILD_BUG_ON(TSO(tm_tfiar
) + sizeof(u64
) != TSO(ckpt_regs
));
1390 if (!cpu_has_feature(CPU_FTR_TM
))
1393 /* Flush the states */
1394 flush_tmregs_to_thread(target
);
1395 flush_fp_to_thread(target
);
1396 flush_altivec_to_thread(target
);
1398 /* TFHAR register */
1399 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1400 &target
->thread
.tm_tfhar
, 0, sizeof(u64
));
1402 /* TEXASR register */
1404 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1405 &target
->thread
.tm_texasr
, sizeof(u64
),
1408 /* TFIAR register */
1410 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1411 &target
->thread
.tm_tfiar
,
1412 2 * sizeof(u64
), 3 * sizeof(u64
));
1417 * tm_spr_set - set the TM related SPR registers
1418 * @target: The target task.
1419 * @regset: The user regset structure.
1420 * @pos: The buffer position.
1421 * @count: Number of bytes to copy.
1422 * @kbuf: Kernel buffer to copy into.
1423 * @ubuf: User buffer to copy from.
1425 * This function sets transactional memory related SPR registers.
1426 * The userspace interface buffer layout is as follows.
1434 static int tm_spr_set(struct task_struct
*target
,
1435 const struct user_regset
*regset
,
1436 unsigned int pos
, unsigned int count
,
1437 const void *kbuf
, const void __user
*ubuf
)
1442 BUILD_BUG_ON(TSO(tm_tfhar
) + sizeof(u64
) != TSO(tm_texasr
));
1443 BUILD_BUG_ON(TSO(tm_texasr
) + sizeof(u64
) != TSO(tm_tfiar
));
1444 BUILD_BUG_ON(TSO(tm_tfiar
) + sizeof(u64
) != TSO(ckpt_regs
));
1446 if (!cpu_has_feature(CPU_FTR_TM
))
1449 /* Flush the states */
1450 flush_tmregs_to_thread(target
);
1451 flush_fp_to_thread(target
);
1452 flush_altivec_to_thread(target
);
1454 /* TFHAR register */
1455 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1456 &target
->thread
.tm_tfhar
, 0, sizeof(u64
));
1458 /* TEXASR register */
1460 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1461 &target
->thread
.tm_texasr
, sizeof(u64
),
1464 /* TFIAR register */
1466 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1467 &target
->thread
.tm_tfiar
,
1468 2 * sizeof(u64
), 3 * sizeof(u64
));
1472 static int tm_tar_active(struct task_struct
*target
,
1473 const struct user_regset
*regset
)
1475 if (!cpu_has_feature(CPU_FTR_TM
))
1478 if (MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1484 static int tm_tar_get(struct task_struct
*target
,
1485 const struct user_regset
*regset
,
1486 unsigned int pos
, unsigned int count
,
1487 void *kbuf
, void __user
*ubuf
)
1491 if (!cpu_has_feature(CPU_FTR_TM
))
1494 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1497 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1498 &target
->thread
.tm_tar
, 0, sizeof(u64
));
1502 static int tm_tar_set(struct task_struct
*target
,
1503 const struct user_regset
*regset
,
1504 unsigned int pos
, unsigned int count
,
1505 const void *kbuf
, const void __user
*ubuf
)
1509 if (!cpu_has_feature(CPU_FTR_TM
))
1512 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1515 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1516 &target
->thread
.tm_tar
, 0, sizeof(u64
));
1520 static int tm_ppr_active(struct task_struct
*target
,
1521 const struct user_regset
*regset
)
1523 if (!cpu_has_feature(CPU_FTR_TM
))
1526 if (MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1533 static int tm_ppr_get(struct task_struct
*target
,
1534 const struct user_regset
*regset
,
1535 unsigned int pos
, unsigned int count
,
1536 void *kbuf
, void __user
*ubuf
)
1540 if (!cpu_has_feature(CPU_FTR_TM
))
1543 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1546 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1547 &target
->thread
.tm_ppr
, 0, sizeof(u64
));
1551 static int tm_ppr_set(struct task_struct
*target
,
1552 const struct user_regset
*regset
,
1553 unsigned int pos
, unsigned int count
,
1554 const void *kbuf
, const void __user
*ubuf
)
1558 if (!cpu_has_feature(CPU_FTR_TM
))
1561 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1564 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1565 &target
->thread
.tm_ppr
, 0, sizeof(u64
));
1569 static int tm_dscr_active(struct task_struct
*target
,
1570 const struct user_regset
*regset
)
1572 if (!cpu_has_feature(CPU_FTR_TM
))
1575 if (MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1581 static int tm_dscr_get(struct task_struct
*target
,
1582 const struct user_regset
*regset
,
1583 unsigned int pos
, unsigned int count
,
1584 void *kbuf
, void __user
*ubuf
)
1588 if (!cpu_has_feature(CPU_FTR_TM
))
1591 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1594 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1595 &target
->thread
.tm_dscr
, 0, sizeof(u64
));
1599 static int tm_dscr_set(struct task_struct
*target
,
1600 const struct user_regset
*regset
,
1601 unsigned int pos
, unsigned int count
,
1602 const void *kbuf
, const void __user
*ubuf
)
1606 if (!cpu_has_feature(CPU_FTR_TM
))
1609 if (!MSR_TM_ACTIVE(target
->thread
.regs
->msr
))
1612 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1613 &target
->thread
.tm_dscr
, 0, sizeof(u64
));
1616 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1619 static int ppr_get(struct task_struct
*target
,
1620 const struct user_regset
*regset
,
1621 unsigned int pos
, unsigned int count
,
1622 void *kbuf
, void __user
*ubuf
)
1624 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1625 &target
->thread
.regs
->ppr
, 0, sizeof(u64
));
1628 static int ppr_set(struct task_struct
*target
,
1629 const struct user_regset
*regset
,
1630 unsigned int pos
, unsigned int count
,
1631 const void *kbuf
, const void __user
*ubuf
)
1633 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1634 &target
->thread
.regs
->ppr
, 0, sizeof(u64
));
1637 static int dscr_get(struct task_struct
*target
,
1638 const struct user_regset
*regset
,
1639 unsigned int pos
, unsigned int count
,
1640 void *kbuf
, void __user
*ubuf
)
1642 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1643 &target
->thread
.dscr
, 0, sizeof(u64
));
1645 static int dscr_set(struct task_struct
*target
,
1646 const struct user_regset
*regset
,
1647 unsigned int pos
, unsigned int count
,
1648 const void *kbuf
, const void __user
*ubuf
)
1650 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1651 &target
->thread
.dscr
, 0, sizeof(u64
));
1654 #ifdef CONFIG_PPC_BOOK3S_64
1655 static int tar_get(struct task_struct
*target
,
1656 const struct user_regset
*regset
,
1657 unsigned int pos
, unsigned int count
,
1658 void *kbuf
, void __user
*ubuf
)
1660 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1661 &target
->thread
.tar
, 0, sizeof(u64
));
1663 static int tar_set(struct task_struct
*target
,
1664 const struct user_regset
*regset
,
1665 unsigned int pos
, unsigned int count
,
1666 const void *kbuf
, const void __user
*ubuf
)
1668 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1669 &target
->thread
.tar
, 0, sizeof(u64
));
1672 static int ebb_active(struct task_struct
*target
,
1673 const struct user_regset
*regset
)
1675 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1678 if (target
->thread
.used_ebb
)
1684 static int ebb_get(struct task_struct
*target
,
1685 const struct user_regset
*regset
,
1686 unsigned int pos
, unsigned int count
,
1687 void *kbuf
, void __user
*ubuf
)
1690 BUILD_BUG_ON(TSO(ebbrr
) + sizeof(unsigned long) != TSO(ebbhr
));
1691 BUILD_BUG_ON(TSO(ebbhr
) + sizeof(unsigned long) != TSO(bescr
));
1693 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1696 if (!target
->thread
.used_ebb
)
1699 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1700 &target
->thread
.ebbrr
, 0, 3 * sizeof(unsigned long));
1703 static int ebb_set(struct task_struct
*target
,
1704 const struct user_regset
*regset
,
1705 unsigned int pos
, unsigned int count
,
1706 const void *kbuf
, const void __user
*ubuf
)
1711 BUILD_BUG_ON(TSO(ebbrr
) + sizeof(unsigned long) != TSO(ebbhr
));
1712 BUILD_BUG_ON(TSO(ebbhr
) + sizeof(unsigned long) != TSO(bescr
));
1714 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1717 if (target
->thread
.used_ebb
)
1720 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1721 &target
->thread
.ebbrr
, 0, sizeof(unsigned long));
1724 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1725 &target
->thread
.ebbhr
, sizeof(unsigned long),
1726 2 * sizeof(unsigned long));
1729 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1730 &target
->thread
.bescr
,
1731 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1735 static int pmu_active(struct task_struct
*target
,
1736 const struct user_regset
*regset
)
1738 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1744 static int pmu_get(struct task_struct
*target
,
1745 const struct user_regset
*regset
,
1746 unsigned int pos
, unsigned int count
,
1747 void *kbuf
, void __user
*ubuf
)
1750 BUILD_BUG_ON(TSO(siar
) + sizeof(unsigned long) != TSO(sdar
));
1751 BUILD_BUG_ON(TSO(sdar
) + sizeof(unsigned long) != TSO(sier
));
1752 BUILD_BUG_ON(TSO(sier
) + sizeof(unsigned long) != TSO(mmcr2
));
1753 BUILD_BUG_ON(TSO(mmcr2
) + sizeof(unsigned long) != TSO(mmcr0
));
1755 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1758 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1759 &target
->thread
.siar
, 0,
1760 5 * sizeof(unsigned long));
1763 static int pmu_set(struct task_struct
*target
,
1764 const struct user_regset
*regset
,
1765 unsigned int pos
, unsigned int count
,
1766 const void *kbuf
, const void __user
*ubuf
)
1771 BUILD_BUG_ON(TSO(siar
) + sizeof(unsigned long) != TSO(sdar
));
1772 BUILD_BUG_ON(TSO(sdar
) + sizeof(unsigned long) != TSO(sier
));
1773 BUILD_BUG_ON(TSO(sier
) + sizeof(unsigned long) != TSO(mmcr2
));
1774 BUILD_BUG_ON(TSO(mmcr2
) + sizeof(unsigned long) != TSO(mmcr0
));
1776 if (!cpu_has_feature(CPU_FTR_ARCH_207S
))
1779 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1780 &target
->thread
.siar
, 0,
1781 sizeof(unsigned long));
1784 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1785 &target
->thread
.sdar
, sizeof(unsigned long),
1786 2 * sizeof(unsigned long));
1789 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1790 &target
->thread
.sier
, 2 * sizeof(unsigned long),
1791 3 * sizeof(unsigned long));
1794 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1795 &target
->thread
.mmcr2
, 3 * sizeof(unsigned long),
1796 4 * sizeof(unsigned long));
1799 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1800 &target
->thread
.mmcr0
, 4 * sizeof(unsigned long),
1801 5 * sizeof(unsigned long));
1806 #ifdef CONFIG_PPC_MEM_KEYS
1807 static int pkey_active(struct task_struct
*target
,
1808 const struct user_regset
*regset
)
1810 if (!arch_pkeys_enabled())
1816 static int pkey_get(struct task_struct
*target
,
1817 const struct user_regset
*regset
,
1818 unsigned int pos
, unsigned int count
,
1819 void *kbuf
, void __user
*ubuf
)
1821 BUILD_BUG_ON(TSO(amr
) + sizeof(unsigned long) != TSO(iamr
));
1822 BUILD_BUG_ON(TSO(iamr
) + sizeof(unsigned long) != TSO(uamor
));
1824 if (!arch_pkeys_enabled())
1827 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1828 &target
->thread
.amr
, 0,
1829 ELF_NPKEY
* sizeof(unsigned long));
1832 static int pkey_set(struct task_struct
*target
,
1833 const struct user_regset
*regset
,
1834 unsigned int pos
, unsigned int count
,
1835 const void *kbuf
, const void __user
*ubuf
)
1840 if (!arch_pkeys_enabled())
1843 /* Only the AMR can be set from userspace */
1844 if (pos
!= 0 || count
!= sizeof(new_amr
))
1847 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1848 &new_amr
, 0, sizeof(new_amr
));
1852 /* UAMOR determines which bits of the AMR can be set from userspace. */
1853 target
->thread
.amr
= (new_amr
& target
->thread
.uamor
) |
1854 (target
->thread
.amr
& ~target
->thread
.uamor
);
1858 #endif /* CONFIG_PPC_MEM_KEYS */
1861 * These are our native regset flavors.
1863 enum powerpc_regset
{
1866 #ifdef CONFIG_ALTIVEC
1875 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1876 REGSET_TM_CGPR
, /* TM checkpointed GPR registers */
1877 REGSET_TM_CFPR
, /* TM checkpointed FPR registers */
1878 REGSET_TM_CVMX
, /* TM checkpointed VMX registers */
1879 REGSET_TM_CVSX
, /* TM checkpointed VSX registers */
1880 REGSET_TM_SPR
, /* TM specific SPR registers */
1881 REGSET_TM_CTAR
, /* TM checkpointed TAR register */
1882 REGSET_TM_CPPR
, /* TM checkpointed PPR register */
1883 REGSET_TM_CDSCR
, /* TM checkpointed DSCR register */
1886 REGSET_PPR
, /* PPR register */
1887 REGSET_DSCR
, /* DSCR register */
1889 #ifdef CONFIG_PPC_BOOK3S_64
1890 REGSET_TAR
, /* TAR register */
1891 REGSET_EBB
, /* EBB registers */
1892 REGSET_PMR
, /* Performance Monitor Registers */
1894 #ifdef CONFIG_PPC_MEM_KEYS
1895 REGSET_PKEY
, /* AMR register */
1899 static const struct user_regset native_regsets
[] = {
1901 .core_note_type
= NT_PRSTATUS
, .n
= ELF_NGREG
,
1902 .size
= sizeof(long), .align
= sizeof(long),
1903 .get
= gpr_get
, .set
= gpr_set
1906 .core_note_type
= NT_PRFPREG
, .n
= ELF_NFPREG
,
1907 .size
= sizeof(double), .align
= sizeof(double),
1908 .get
= fpr_get
, .set
= fpr_set
1910 #ifdef CONFIG_ALTIVEC
1912 .core_note_type
= NT_PPC_VMX
, .n
= 34,
1913 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
1914 .active
= vr_active
, .get
= vr_get
, .set
= vr_set
1919 .core_note_type
= NT_PPC_VSX
, .n
= 32,
1920 .size
= sizeof(double), .align
= sizeof(double),
1921 .active
= vsr_active
, .get
= vsr_get
, .set
= vsr_set
1926 .core_note_type
= NT_PPC_SPE
, .n
= 35,
1927 .size
= sizeof(u32
), .align
= sizeof(u32
),
1928 .active
= evr_active
, .get
= evr_get
, .set
= evr_set
1931 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1932 [REGSET_TM_CGPR
] = {
1933 .core_note_type
= NT_PPC_TM_CGPR
, .n
= ELF_NGREG
,
1934 .size
= sizeof(long), .align
= sizeof(long),
1935 .active
= tm_cgpr_active
, .get
= tm_cgpr_get
, .set
= tm_cgpr_set
1937 [REGSET_TM_CFPR
] = {
1938 .core_note_type
= NT_PPC_TM_CFPR
, .n
= ELF_NFPREG
,
1939 .size
= sizeof(double), .align
= sizeof(double),
1940 .active
= tm_cfpr_active
, .get
= tm_cfpr_get
, .set
= tm_cfpr_set
1942 [REGSET_TM_CVMX
] = {
1943 .core_note_type
= NT_PPC_TM_CVMX
, .n
= ELF_NVMX
,
1944 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
1945 .active
= tm_cvmx_active
, .get
= tm_cvmx_get
, .set
= tm_cvmx_set
1947 [REGSET_TM_CVSX
] = {
1948 .core_note_type
= NT_PPC_TM_CVSX
, .n
= ELF_NVSX
,
1949 .size
= sizeof(double), .align
= sizeof(double),
1950 .active
= tm_cvsx_active
, .get
= tm_cvsx_get
, .set
= tm_cvsx_set
1953 .core_note_type
= NT_PPC_TM_SPR
, .n
= ELF_NTMSPRREG
,
1954 .size
= sizeof(u64
), .align
= sizeof(u64
),
1955 .active
= tm_spr_active
, .get
= tm_spr_get
, .set
= tm_spr_set
1957 [REGSET_TM_CTAR
] = {
1958 .core_note_type
= NT_PPC_TM_CTAR
, .n
= 1,
1959 .size
= sizeof(u64
), .align
= sizeof(u64
),
1960 .active
= tm_tar_active
, .get
= tm_tar_get
, .set
= tm_tar_set
1962 [REGSET_TM_CPPR
] = {
1963 .core_note_type
= NT_PPC_TM_CPPR
, .n
= 1,
1964 .size
= sizeof(u64
), .align
= sizeof(u64
),
1965 .active
= tm_ppr_active
, .get
= tm_ppr_get
, .set
= tm_ppr_set
1967 [REGSET_TM_CDSCR
] = {
1968 .core_note_type
= NT_PPC_TM_CDSCR
, .n
= 1,
1969 .size
= sizeof(u64
), .align
= sizeof(u64
),
1970 .active
= tm_dscr_active
, .get
= tm_dscr_get
, .set
= tm_dscr_set
1975 .core_note_type
= NT_PPC_PPR
, .n
= 1,
1976 .size
= sizeof(u64
), .align
= sizeof(u64
),
1977 .get
= ppr_get
, .set
= ppr_set
1980 .core_note_type
= NT_PPC_DSCR
, .n
= 1,
1981 .size
= sizeof(u64
), .align
= sizeof(u64
),
1982 .get
= dscr_get
, .set
= dscr_set
1985 #ifdef CONFIG_PPC_BOOK3S_64
1987 .core_note_type
= NT_PPC_TAR
, .n
= 1,
1988 .size
= sizeof(u64
), .align
= sizeof(u64
),
1989 .get
= tar_get
, .set
= tar_set
1992 .core_note_type
= NT_PPC_EBB
, .n
= ELF_NEBB
,
1993 .size
= sizeof(u64
), .align
= sizeof(u64
),
1994 .active
= ebb_active
, .get
= ebb_get
, .set
= ebb_set
1997 .core_note_type
= NT_PPC_PMU
, .n
= ELF_NPMU
,
1998 .size
= sizeof(u64
), .align
= sizeof(u64
),
1999 .active
= pmu_active
, .get
= pmu_get
, .set
= pmu_set
2002 #ifdef CONFIG_PPC_MEM_KEYS
2004 .core_note_type
= NT_PPC_PKEY
, .n
= ELF_NPKEY
,
2005 .size
= sizeof(u64
), .align
= sizeof(u64
),
2006 .active
= pkey_active
, .get
= pkey_get
, .set
= pkey_set
2011 static const struct user_regset_view user_ppc_native_view
= {
2012 .name
= UTS_MACHINE
, .e_machine
= ELF_ARCH
, .ei_osabi
= ELF_OSABI
,
2013 .regsets
= native_regsets
, .n
= ARRAY_SIZE(native_regsets
)
2017 #include <linux/compat.h>
2019 static int gpr32_get_common(struct task_struct
*target
,
2020 const struct user_regset
*regset
,
2021 unsigned int pos
, unsigned int count
,
2022 void *kbuf
, void __user
*ubuf
,
2023 unsigned long *regs
)
2025 compat_ulong_t
*k
= kbuf
;
2026 compat_ulong_t __user
*u
= ubuf
;
2030 count
/= sizeof(reg
);
2033 for (; count
> 0 && pos
< PT_MSR
; --count
)
2036 for (; count
> 0 && pos
< PT_MSR
; --count
)
2037 if (__put_user((compat_ulong_t
) regs
[pos
++], u
++))
2040 if (count
> 0 && pos
== PT_MSR
) {
2041 reg
= get_user_msr(target
);
2044 else if (__put_user(reg
, u
++))
2051 for (; count
> 0 && pos
< PT_REGS_COUNT
; --count
)
2054 for (; count
> 0 && pos
< PT_REGS_COUNT
; --count
)
2055 if (__put_user((compat_ulong_t
) regs
[pos
++], u
++))
2061 count
*= sizeof(reg
);
2062 return user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
2063 PT_REGS_COUNT
* sizeof(reg
), -1);
2066 static int gpr32_set_common(struct task_struct
*target
,
2067 const struct user_regset
*regset
,
2068 unsigned int pos
, unsigned int count
,
2069 const void *kbuf
, const void __user
*ubuf
,
2070 unsigned long *regs
)
2072 const compat_ulong_t
*k
= kbuf
;
2073 const compat_ulong_t __user
*u
= ubuf
;
2077 count
/= sizeof(reg
);
2080 for (; count
> 0 && pos
< PT_MSR
; --count
)
2083 for (; count
> 0 && pos
< PT_MSR
; --count
) {
2084 if (__get_user(reg
, u
++))
2090 if (count
> 0 && pos
== PT_MSR
) {
2093 else if (__get_user(reg
, u
++))
2095 set_user_msr(target
, reg
);
2101 for (; count
> 0 && pos
<= PT_MAX_PUT_REG
; --count
)
2103 for (; count
> 0 && pos
< PT_TRAP
; --count
, ++pos
)
2106 for (; count
> 0 && pos
<= PT_MAX_PUT_REG
; --count
) {
2107 if (__get_user(reg
, u
++))
2111 for (; count
> 0 && pos
< PT_TRAP
; --count
, ++pos
)
2112 if (__get_user(reg
, u
++))
2116 if (count
> 0 && pos
== PT_TRAP
) {
2119 else if (__get_user(reg
, u
++))
2121 set_user_trap(target
, reg
);
2129 count
*= sizeof(reg
);
2130 return user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
2131 (PT_TRAP
+ 1) * sizeof(reg
), -1);
2134 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2135 static int tm_cgpr32_get(struct task_struct
*target
,
2136 const struct user_regset
*regset
,
2137 unsigned int pos
, unsigned int count
,
2138 void *kbuf
, void __user
*ubuf
)
2140 return gpr32_get_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2141 &target
->thread
.ckpt_regs
.gpr
[0]);
2144 static int tm_cgpr32_set(struct task_struct
*target
,
2145 const struct user_regset
*regset
,
2146 unsigned int pos
, unsigned int count
,
2147 const void *kbuf
, const void __user
*ubuf
)
2149 return gpr32_set_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2150 &target
->thread
.ckpt_regs
.gpr
[0]);
2152 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2154 static int gpr32_get(struct task_struct
*target
,
2155 const struct user_regset
*regset
,
2156 unsigned int pos
, unsigned int count
,
2157 void *kbuf
, void __user
*ubuf
)
2161 if (target
->thread
.regs
== NULL
)
2164 if (!FULL_REGS(target
->thread
.regs
)) {
2166 * We have a partial register set.
2167 * Fill 14-31 with bogus values.
2169 for (i
= 14; i
< 32; i
++)
2170 target
->thread
.regs
->gpr
[i
] = NV_REG_POISON
;
2172 return gpr32_get_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2173 &target
->thread
.regs
->gpr
[0]);
2176 static int gpr32_set(struct task_struct
*target
,
2177 const struct user_regset
*regset
,
2178 unsigned int pos
, unsigned int count
,
2179 const void *kbuf
, const void __user
*ubuf
)
2181 if (target
->thread
.regs
== NULL
)
2184 CHECK_FULL_REGS(target
->thread
.regs
);
2185 return gpr32_set_common(target
, regset
, pos
, count
, kbuf
, ubuf
,
2186 &target
->thread
.regs
->gpr
[0]);
2190 * These are the regset flavors matching the CONFIG_PPC32 native set.
2192 static const struct user_regset compat_regsets
[] = {
2194 .core_note_type
= NT_PRSTATUS
, .n
= ELF_NGREG
,
2195 .size
= sizeof(compat_long_t
), .align
= sizeof(compat_long_t
),
2196 .get
= gpr32_get
, .set
= gpr32_set
2199 .core_note_type
= NT_PRFPREG
, .n
= ELF_NFPREG
,
2200 .size
= sizeof(double), .align
= sizeof(double),
2201 .get
= fpr_get
, .set
= fpr_set
2203 #ifdef CONFIG_ALTIVEC
2205 .core_note_type
= NT_PPC_VMX
, .n
= 34,
2206 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
2207 .active
= vr_active
, .get
= vr_get
, .set
= vr_set
2212 .core_note_type
= NT_PPC_SPE
, .n
= 35,
2213 .size
= sizeof(u32
), .align
= sizeof(u32
),
2214 .active
= evr_active
, .get
= evr_get
, .set
= evr_set
2217 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2218 [REGSET_TM_CGPR
] = {
2219 .core_note_type
= NT_PPC_TM_CGPR
, .n
= ELF_NGREG
,
2220 .size
= sizeof(long), .align
= sizeof(long),
2221 .active
= tm_cgpr_active
,
2222 .get
= tm_cgpr32_get
, .set
= tm_cgpr32_set
2224 [REGSET_TM_CFPR
] = {
2225 .core_note_type
= NT_PPC_TM_CFPR
, .n
= ELF_NFPREG
,
2226 .size
= sizeof(double), .align
= sizeof(double),
2227 .active
= tm_cfpr_active
, .get
= tm_cfpr_get
, .set
= tm_cfpr_set
2229 [REGSET_TM_CVMX
] = {
2230 .core_note_type
= NT_PPC_TM_CVMX
, .n
= ELF_NVMX
,
2231 .size
= sizeof(vector128
), .align
= sizeof(vector128
),
2232 .active
= tm_cvmx_active
, .get
= tm_cvmx_get
, .set
= tm_cvmx_set
2234 [REGSET_TM_CVSX
] = {
2235 .core_note_type
= NT_PPC_TM_CVSX
, .n
= ELF_NVSX
,
2236 .size
= sizeof(double), .align
= sizeof(double),
2237 .active
= tm_cvsx_active
, .get
= tm_cvsx_get
, .set
= tm_cvsx_set
2240 .core_note_type
= NT_PPC_TM_SPR
, .n
= ELF_NTMSPRREG
,
2241 .size
= sizeof(u64
), .align
= sizeof(u64
),
2242 .active
= tm_spr_active
, .get
= tm_spr_get
, .set
= tm_spr_set
2244 [REGSET_TM_CTAR
] = {
2245 .core_note_type
= NT_PPC_TM_CTAR
, .n
= 1,
2246 .size
= sizeof(u64
), .align
= sizeof(u64
),
2247 .active
= tm_tar_active
, .get
= tm_tar_get
, .set
= tm_tar_set
2249 [REGSET_TM_CPPR
] = {
2250 .core_note_type
= NT_PPC_TM_CPPR
, .n
= 1,
2251 .size
= sizeof(u64
), .align
= sizeof(u64
),
2252 .active
= tm_ppr_active
, .get
= tm_ppr_get
, .set
= tm_ppr_set
2254 [REGSET_TM_CDSCR
] = {
2255 .core_note_type
= NT_PPC_TM_CDSCR
, .n
= 1,
2256 .size
= sizeof(u64
), .align
= sizeof(u64
),
2257 .active
= tm_dscr_active
, .get
= tm_dscr_get
, .set
= tm_dscr_set
2262 .core_note_type
= NT_PPC_PPR
, .n
= 1,
2263 .size
= sizeof(u64
), .align
= sizeof(u64
),
2264 .get
= ppr_get
, .set
= ppr_set
2267 .core_note_type
= NT_PPC_DSCR
, .n
= 1,
2268 .size
= sizeof(u64
), .align
= sizeof(u64
),
2269 .get
= dscr_get
, .set
= dscr_set
2272 #ifdef CONFIG_PPC_BOOK3S_64
2274 .core_note_type
= NT_PPC_TAR
, .n
= 1,
2275 .size
= sizeof(u64
), .align
= sizeof(u64
),
2276 .get
= tar_get
, .set
= tar_set
2279 .core_note_type
= NT_PPC_EBB
, .n
= ELF_NEBB
,
2280 .size
= sizeof(u64
), .align
= sizeof(u64
),
2281 .active
= ebb_active
, .get
= ebb_get
, .set
= ebb_set
2286 static const struct user_regset_view user_ppc_compat_view
= {
2287 .name
= "ppc", .e_machine
= EM_PPC
, .ei_osabi
= ELF_OSABI
,
2288 .regsets
= compat_regsets
, .n
= ARRAY_SIZE(compat_regsets
)
2290 #endif /* CONFIG_PPC64 */
2292 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
2295 if (test_tsk_thread_flag(task
, TIF_32BIT
))
2296 return &user_ppc_compat_view
;
2298 return &user_ppc_native_view
;
2302 void user_enable_single_step(struct task_struct
*task
)
2304 struct pt_regs
*regs
= task
->thread
.regs
;
2307 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2308 task
->thread
.debug
.dbcr0
&= ~DBCR0_BT
;
2309 task
->thread
.debug
.dbcr0
|= DBCR0_IDM
| DBCR0_IC
;
2310 regs
->msr
|= MSR_DE
;
2312 regs
->msr
&= ~MSR_BE
;
2313 regs
->msr
|= MSR_SE
;
2316 set_tsk_thread_flag(task
, TIF_SINGLESTEP
);
2319 void user_enable_block_step(struct task_struct
*task
)
2321 struct pt_regs
*regs
= task
->thread
.regs
;
2324 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2325 task
->thread
.debug
.dbcr0
&= ~DBCR0_IC
;
2326 task
->thread
.debug
.dbcr0
= DBCR0_IDM
| DBCR0_BT
;
2327 regs
->msr
|= MSR_DE
;
2329 regs
->msr
&= ~MSR_SE
;
2330 regs
->msr
|= MSR_BE
;
2333 set_tsk_thread_flag(task
, TIF_SINGLESTEP
);
2336 void user_disable_single_step(struct task_struct
*task
)
2338 struct pt_regs
*regs
= task
->thread
.regs
;
2341 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2343 * The logic to disable single stepping should be as
2344 * simple as turning off the Instruction Complete flag.
2345 * And, after doing so, if all debug flags are off, turn
2346 * off DBCR0(IDM) and MSR(DE) .... Torez
2348 task
->thread
.debug
.dbcr0
&= ~(DBCR0_IC
|DBCR0_BT
);
2350 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2352 if (!DBCR_ACTIVE_EVENTS(task
->thread
.debug
.dbcr0
,
2353 task
->thread
.debug
.dbcr1
)) {
2355 * All debug events were off.....
2357 task
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2358 regs
->msr
&= ~MSR_DE
;
2361 regs
->msr
&= ~(MSR_SE
| MSR_BE
);
2364 clear_tsk_thread_flag(task
, TIF_SINGLESTEP
);
2367 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2368 void ptrace_triggered(struct perf_event
*bp
,
2369 struct perf_sample_data
*data
, struct pt_regs
*regs
)
2371 struct perf_event_attr attr
;
2374 * Disable the breakpoint request here since ptrace has defined a
2375 * one-shot behaviour for breakpoint exceptions in PPC64.
2376 * The SIGTRAP signal is generated automatically for us in do_dabr().
2377 * We don't have to do anything about that here
2380 attr
.disabled
= true;
2381 modify_user_hw_breakpoint(bp
, &attr
);
2383 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2385 static int ptrace_set_debugreg(struct task_struct
*task
, unsigned long addr
,
2388 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2390 struct thread_struct
*thread
= &(task
->thread
);
2391 struct perf_event
*bp
;
2392 struct perf_event_attr attr
;
2393 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2394 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2396 struct arch_hw_breakpoint hw_brk
;
2399 /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2400 * For embedded processors we support one DAC and no IAC's at the
2406 /* The bottom 3 bits in dabr are flags */
2407 if ((data
& ~0x7UL
) >= TASK_SIZE
)
2410 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2411 /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2412 * It was assumed, on previous implementations, that 3 bits were
2413 * passed together with the data address, fitting the design of the
2414 * DABR register, as follows:
2418 * bit 2: Breakpoint translation
2420 * Thus, we use them here as so.
2423 /* Ensure breakpoint translation bit is set */
2424 if (data
&& !(data
& HW_BRK_TYPE_TRANSLATE
))
2426 hw_brk
.address
= data
& (~HW_BRK_TYPE_DABR
);
2427 hw_brk
.type
= (data
& HW_BRK_TYPE_DABR
) | HW_BRK_TYPE_PRIV_ALL
;
2428 hw_brk
.len
= DABR_MAX_LEN
;
2429 hw_brk
.hw_len
= DABR_MAX_LEN
;
2430 set_bp
= (data
) && (hw_brk
.type
& HW_BRK_TYPE_RDWR
);
2431 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2432 bp
= thread
->ptrace_bps
[0];
2435 unregister_hw_breakpoint(bp
);
2436 thread
->ptrace_bps
[0] = NULL
;
2442 attr
.bp_addr
= hw_brk
.address
;
2443 attr
.bp_len
= DABR_MAX_LEN
;
2444 arch_bp_generic_fields(hw_brk
.type
, &attr
.bp_type
);
2446 /* Enable breakpoint */
2447 attr
.disabled
= false;
2449 ret
= modify_user_hw_breakpoint(bp
, &attr
);
2453 thread
->ptrace_bps
[0] = bp
;
2454 thread
->hw_brk
= hw_brk
;
2458 /* Create a new breakpoint request if one doesn't exist already */
2459 hw_breakpoint_init(&attr
);
2460 attr
.bp_addr
= hw_brk
.address
;
2461 attr
.bp_len
= DABR_MAX_LEN
;
2462 arch_bp_generic_fields(hw_brk
.type
,
2465 thread
->ptrace_bps
[0] = bp
= register_user_hw_breakpoint(&attr
,
2466 ptrace_triggered
, NULL
, task
);
2468 thread
->ptrace_bps
[0] = NULL
;
2472 #else /* !CONFIG_HAVE_HW_BREAKPOINT */
2473 if (set_bp
&& (!ppc_breakpoint_available()))
2475 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2476 task
->thread
.hw_brk
= hw_brk
;
2477 #else /* CONFIG_PPC_ADV_DEBUG_REGS */
2478 /* As described above, it was assumed 3 bits were passed with the data
2479 * address, but we will assume only the mode bits will be passed
2480 * as to not cause alignment restrictions for DAC-based processors.
2483 /* DAC's hold the whole address without any mode flags */
2484 task
->thread
.debug
.dac1
= data
& ~0x3UL
;
2486 if (task
->thread
.debug
.dac1
== 0) {
2487 dbcr_dac(task
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
2488 if (!DBCR_ACTIVE_EVENTS(task
->thread
.debug
.dbcr0
,
2489 task
->thread
.debug
.dbcr1
)) {
2490 task
->thread
.regs
->msr
&= ~MSR_DE
;
2491 task
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2496 /* Read or Write bits must be set */
2498 if (!(data
& 0x3UL
))
2501 /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2503 task
->thread
.debug
.dbcr0
|= DBCR0_IDM
;
2505 /* Check for write and read flags and set DBCR0
2507 dbcr_dac(task
) &= ~(DBCR_DAC1R
|DBCR_DAC1W
);
2509 dbcr_dac(task
) |= DBCR_DAC1R
;
2511 dbcr_dac(task
) |= DBCR_DAC1W
;
2512 task
->thread
.regs
->msr
|= MSR_DE
;
2513 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2518 * Called by kernel/ptrace.c when detaching..
2520 * Make sure single step bits etc are not set.
2522 void ptrace_disable(struct task_struct
*child
)
2524 /* make sure the single step bit is not set. */
2525 user_disable_single_step(child
);
2528 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2529 static long set_instruction_bp(struct task_struct
*child
,
2530 struct ppc_hw_breakpoint
*bp_info
)
2533 int slot1_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC1
) != 0);
2534 int slot2_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC2
) != 0);
2535 int slot3_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC3
) != 0);
2536 int slot4_in_use
= ((child
->thread
.debug
.dbcr0
& DBCR0_IAC4
) != 0);
2538 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
)
2540 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
)
2543 if (bp_info
->addr
>= TASK_SIZE
)
2546 if (bp_info
->addr_mode
!= PPC_BREAKPOINT_MODE_EXACT
) {
2548 /* Make sure range is valid. */
2549 if (bp_info
->addr2
>= TASK_SIZE
)
2552 /* We need a pair of IAC regsisters */
2553 if ((!slot1_in_use
) && (!slot2_in_use
)) {
2555 child
->thread
.debug
.iac1
= bp_info
->addr
;
2556 child
->thread
.debug
.iac2
= bp_info
->addr2
;
2557 child
->thread
.debug
.dbcr0
|= DBCR0_IAC1
;
2558 if (bp_info
->addr_mode
==
2559 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
2560 dbcr_iac_range(child
) |= DBCR_IAC12X
;
2562 dbcr_iac_range(child
) |= DBCR_IAC12I
;
2563 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2564 } else if ((!slot3_in_use
) && (!slot4_in_use
)) {
2566 child
->thread
.debug
.iac3
= bp_info
->addr
;
2567 child
->thread
.debug
.iac4
= bp_info
->addr2
;
2568 child
->thread
.debug
.dbcr0
|= DBCR0_IAC3
;
2569 if (bp_info
->addr_mode
==
2570 PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
2571 dbcr_iac_range(child
) |= DBCR_IAC34X
;
2573 dbcr_iac_range(child
) |= DBCR_IAC34I
;
2578 /* We only need one. If possible leave a pair free in
2579 * case a range is needed later
2581 if (!slot1_in_use
) {
2583 * Don't use iac1 if iac1-iac2 are free and either
2584 * iac3 or iac4 (but not both) are free
2586 if (slot2_in_use
|| (slot3_in_use
== slot4_in_use
)) {
2588 child
->thread
.debug
.iac1
= bp_info
->addr
;
2589 child
->thread
.debug
.dbcr0
|= DBCR0_IAC1
;
2593 if (!slot2_in_use
) {
2595 child
->thread
.debug
.iac2
= bp_info
->addr
;
2596 child
->thread
.debug
.dbcr0
|= DBCR0_IAC2
;
2597 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2598 } else if (!slot3_in_use
) {
2600 child
->thread
.debug
.iac3
= bp_info
->addr
;
2601 child
->thread
.debug
.dbcr0
|= DBCR0_IAC3
;
2602 } else if (!slot4_in_use
) {
2604 child
->thread
.debug
.iac4
= bp_info
->addr
;
2605 child
->thread
.debug
.dbcr0
|= DBCR0_IAC4
;
2611 child
->thread
.debug
.dbcr0
|= DBCR0_IDM
;
2612 child
->thread
.regs
->msr
|= MSR_DE
;
2617 static int del_instruction_bp(struct task_struct
*child
, int slot
)
2621 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC1
) == 0)
2624 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
) {
2625 /* address range - clear slots 1 & 2 */
2626 child
->thread
.debug
.iac2
= 0;
2627 dbcr_iac_range(child
) &= ~DBCR_IAC12MODE
;
2629 child
->thread
.debug
.iac1
= 0;
2630 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC1
;
2633 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC2
) == 0)
2636 if (dbcr_iac_range(child
) & DBCR_IAC12MODE
)
2637 /* used in a range */
2639 child
->thread
.debug
.iac2
= 0;
2640 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC2
;
2642 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
2644 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC3
) == 0)
2647 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
) {
2648 /* address range - clear slots 3 & 4 */
2649 child
->thread
.debug
.iac4
= 0;
2650 dbcr_iac_range(child
) &= ~DBCR_IAC34MODE
;
2652 child
->thread
.debug
.iac3
= 0;
2653 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC3
;
2656 if ((child
->thread
.debug
.dbcr0
& DBCR0_IAC4
) == 0)
2659 if (dbcr_iac_range(child
) & DBCR_IAC34MODE
)
2660 /* Used in a range */
2662 child
->thread
.debug
.iac4
= 0;
2663 child
->thread
.debug
.dbcr0
&= ~DBCR0_IAC4
;
2672 static int set_dac(struct task_struct
*child
, struct ppc_hw_breakpoint
*bp_info
)
2675 (bp_info
->condition_mode
>> PPC_BREAKPOINT_CONDITION_BE_SHIFT
)
2677 int condition_mode
=
2678 bp_info
->condition_mode
& PPC_BREAKPOINT_CONDITION_MODE
;
2681 if (byte_enable
&& (condition_mode
== 0))
2684 if (bp_info
->addr
>= TASK_SIZE
)
2687 if ((dbcr_dac(child
) & (DBCR_DAC1R
| DBCR_DAC1W
)) == 0) {
2689 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2690 dbcr_dac(child
) |= DBCR_DAC1R
;
2691 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2692 dbcr_dac(child
) |= DBCR_DAC1W
;
2693 child
->thread
.debug
.dac1
= (unsigned long)bp_info
->addr
;
2694 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2696 child
->thread
.debug
.dvc1
=
2697 (unsigned long)bp_info
->condition_value
;
2698 child
->thread
.debug
.dbcr2
|=
2699 ((byte_enable
<< DBCR2_DVC1BE_SHIFT
) |
2700 (condition_mode
<< DBCR2_DVC1M_SHIFT
));
2703 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2704 } else if (child
->thread
.debug
.dbcr2
& DBCR2_DAC12MODE
) {
2705 /* Both dac1 and dac2 are part of a range */
2708 } else if ((dbcr_dac(child
) & (DBCR_DAC2R
| DBCR_DAC2W
)) == 0) {
2710 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2711 dbcr_dac(child
) |= DBCR_DAC2R
;
2712 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2713 dbcr_dac(child
) |= DBCR_DAC2W
;
2714 child
->thread
.debug
.dac2
= (unsigned long)bp_info
->addr
;
2715 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2717 child
->thread
.debug
.dvc2
=
2718 (unsigned long)bp_info
->condition_value
;
2719 child
->thread
.debug
.dbcr2
|=
2720 ((byte_enable
<< DBCR2_DVC2BE_SHIFT
) |
2721 (condition_mode
<< DBCR2_DVC2M_SHIFT
));
2726 child
->thread
.debug
.dbcr0
|= DBCR0_IDM
;
2727 child
->thread
.regs
->msr
|= MSR_DE
;
2732 static int del_dac(struct task_struct
*child
, int slot
)
2735 if ((dbcr_dac(child
) & (DBCR_DAC1R
| DBCR_DAC1W
)) == 0)
2738 child
->thread
.debug
.dac1
= 0;
2739 dbcr_dac(child
) &= ~(DBCR_DAC1R
| DBCR_DAC1W
);
2740 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2741 if (child
->thread
.debug
.dbcr2
& DBCR2_DAC12MODE
) {
2742 child
->thread
.debug
.dac2
= 0;
2743 child
->thread
.debug
.dbcr2
&= ~DBCR2_DAC12MODE
;
2745 child
->thread
.debug
.dbcr2
&= ~(DBCR2_DVC1M
| DBCR2_DVC1BE
);
2747 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2748 child
->thread
.debug
.dvc1
= 0;
2750 } else if (slot
== 2) {
2751 if ((dbcr_dac(child
) & (DBCR_DAC2R
| DBCR_DAC2W
)) == 0)
2754 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2755 if (child
->thread
.debug
.dbcr2
& DBCR2_DAC12MODE
)
2756 /* Part of a range */
2758 child
->thread
.debug
.dbcr2
&= ~(DBCR2_DVC2M
| DBCR2_DVC2BE
);
2760 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2761 child
->thread
.debug
.dvc2
= 0;
2763 child
->thread
.debug
.dac2
= 0;
2764 dbcr_dac(child
) &= ~(DBCR_DAC2R
| DBCR_DAC2W
);
2770 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2772 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2773 static int set_dac_range(struct task_struct
*child
,
2774 struct ppc_hw_breakpoint
*bp_info
)
2776 int mode
= bp_info
->addr_mode
& PPC_BREAKPOINT_MODE_MASK
;
2778 /* We don't allow range watchpoints to be used with DVC */
2779 if (bp_info
->condition_mode
)
2783 * Best effort to verify the address range. The user/supervisor bits
2784 * prevent trapping in kernel space, but let's fail on an obvious bad
2785 * range. The simple test on the mask is not fool-proof, and any
2786 * exclusive range will spill over into kernel space.
2788 if (bp_info
->addr
>= TASK_SIZE
)
2790 if (mode
== PPC_BREAKPOINT_MODE_MASK
) {
2792 * dac2 is a bitmask. Don't allow a mask that makes a
2793 * kernel space address from a valid dac1 value
2795 if (~((unsigned long)bp_info
->addr2
) >= TASK_SIZE
)
2799 * For range breakpoints, addr2 must also be a valid address
2801 if (bp_info
->addr2
>= TASK_SIZE
)
2805 if (child
->thread
.debug
.dbcr0
&
2806 (DBCR0_DAC1R
| DBCR0_DAC1W
| DBCR0_DAC2R
| DBCR0_DAC2W
))
2809 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2810 child
->thread
.debug
.dbcr0
|= (DBCR0_DAC1R
| DBCR0_IDM
);
2811 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2812 child
->thread
.debug
.dbcr0
|= (DBCR0_DAC1W
| DBCR0_IDM
);
2813 child
->thread
.debug
.dac1
= bp_info
->addr
;
2814 child
->thread
.debug
.dac2
= bp_info
->addr2
;
2815 if (mode
== PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE
)
2816 child
->thread
.debug
.dbcr2
|= DBCR2_DAC12M
;
2817 else if (mode
== PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE
)
2818 child
->thread
.debug
.dbcr2
|= DBCR2_DAC12MX
;
2819 else /* PPC_BREAKPOINT_MODE_MASK */
2820 child
->thread
.debug
.dbcr2
|= DBCR2_DAC12MM
;
2821 child
->thread
.regs
->msr
|= MSR_DE
;
2825 #endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2827 static long ppc_set_hwdebug(struct task_struct
*child
,
2828 struct ppc_hw_breakpoint
*bp_info
)
2830 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2832 struct thread_struct
*thread
= &(child
->thread
);
2833 struct perf_event
*bp
;
2834 struct perf_event_attr attr
;
2835 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2836 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
2837 struct arch_hw_breakpoint brk
;
2840 if (bp_info
->version
!= 1)
2842 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2844 * Check for invalid flags and combinations
2846 if ((bp_info
->trigger_type
== 0) ||
2847 (bp_info
->trigger_type
& ~(PPC_BREAKPOINT_TRIGGER_EXECUTE
|
2848 PPC_BREAKPOINT_TRIGGER_RW
)) ||
2849 (bp_info
->addr_mode
& ~PPC_BREAKPOINT_MODE_MASK
) ||
2850 (bp_info
->condition_mode
&
2851 ~(PPC_BREAKPOINT_CONDITION_MODE
|
2852 PPC_BREAKPOINT_CONDITION_BE_ALL
)))
2854 #if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2855 if (bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
)
2859 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_EXECUTE
) {
2860 if ((bp_info
->trigger_type
!= PPC_BREAKPOINT_TRIGGER_EXECUTE
) ||
2861 (bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
))
2863 return set_instruction_bp(child
, bp_info
);
2865 if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_EXACT
)
2866 return set_dac(child
, bp_info
);
2868 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2869 return set_dac_range(child
, bp_info
);
2873 #else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2875 * We only support one data breakpoint
2877 if ((bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_RW
) == 0 ||
2878 (bp_info
->trigger_type
& ~PPC_BREAKPOINT_TRIGGER_RW
) != 0 ||
2879 bp_info
->condition_mode
!= PPC_BREAKPOINT_CONDITION_NONE
)
2882 if ((unsigned long)bp_info
->addr
>= TASK_SIZE
)
2885 brk
.address
= bp_info
->addr
& ~HW_BREAKPOINT_ALIGN
;
2886 brk
.type
= HW_BRK_TYPE_TRANSLATE
;
2887 brk
.len
= DABR_MAX_LEN
;
2888 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_READ
)
2889 brk
.type
|= HW_BRK_TYPE_READ
;
2890 if (bp_info
->trigger_type
& PPC_BREAKPOINT_TRIGGER_WRITE
)
2891 brk
.type
|= HW_BRK_TYPE_WRITE
;
2892 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2893 if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE
)
2894 len
= bp_info
->addr2
- bp_info
->addr
;
2895 else if (bp_info
->addr_mode
== PPC_BREAKPOINT_MODE_EXACT
)
2899 bp
= thread
->ptrace_bps
[0];
2903 /* Create a new breakpoint request if one doesn't exist already */
2904 hw_breakpoint_init(&attr
);
2905 attr
.bp_addr
= (unsigned long)bp_info
->addr
;
2907 arch_bp_generic_fields(brk
.type
, &attr
.bp_type
);
2909 thread
->ptrace_bps
[0] = bp
= register_user_hw_breakpoint(&attr
,
2910 ptrace_triggered
, NULL
, child
);
2912 thread
->ptrace_bps
[0] = NULL
;
2917 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2919 if (bp_info
->addr_mode
!= PPC_BREAKPOINT_MODE_EXACT
)
2922 if (child
->thread
.hw_brk
.address
)
2925 if (!ppc_breakpoint_available())
2928 child
->thread
.hw_brk
= brk
;
2931 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2934 static long ppc_del_hwdebug(struct task_struct
*child
, long data
)
2936 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2938 struct thread_struct
*thread
= &(child
->thread
);
2939 struct perf_event
*bp
;
2940 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2941 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
2945 rc
= del_instruction_bp(child
, (int)data
);
2947 rc
= del_dac(child
, (int)data
- 4);
2950 if (!DBCR_ACTIVE_EVENTS(child
->thread
.debug
.dbcr0
,
2951 child
->thread
.debug
.dbcr1
)) {
2952 child
->thread
.debug
.dbcr0
&= ~DBCR0_IDM
;
2953 child
->thread
.regs
->msr
&= ~MSR_DE
;
2961 #ifdef CONFIG_HAVE_HW_BREAKPOINT
2962 bp
= thread
->ptrace_bps
[0];
2964 unregister_hw_breakpoint(bp
);
2965 thread
->ptrace_bps
[0] = NULL
;
2969 #else /* CONFIG_HAVE_HW_BREAKPOINT */
2970 if (child
->thread
.hw_brk
.address
== 0)
2973 child
->thread
.hw_brk
.address
= 0;
2974 child
->thread
.hw_brk
.type
= 0;
2975 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
2981 long arch_ptrace(struct task_struct
*child
, long request
,
2982 unsigned long addr
, unsigned long data
)
2985 void __user
*datavp
= (void __user
*) data
;
2986 unsigned long __user
*datalp
= datavp
;
2989 /* read the word at location addr in the USER area. */
2990 case PTRACE_PEEKUSR
: {
2991 unsigned long index
, tmp
;
2994 /* convert to index and check */
2997 if ((addr
& 3) || (index
> PT_FPSCR
)
2998 || (child
->thread
.regs
== NULL
))
3001 if ((addr
& 7) || (index
> PT_FPSCR
))
3005 CHECK_FULL_REGS(child
->thread
.regs
);
3006 if (index
< PT_FPR0
) {
3007 ret
= ptrace_get_reg(child
, (int) index
, &tmp
);
3011 unsigned int fpidx
= index
- PT_FPR0
;
3013 flush_fp_to_thread(child
);
3014 if (fpidx
< (PT_FPSCR
- PT_FPR0
))
3015 memcpy(&tmp
, &child
->thread
.TS_FPR(fpidx
),
3018 tmp
= child
->thread
.fp_state
.fpscr
;
3020 ret
= put_user(tmp
, datalp
);
3024 /* write the word at location addr in the USER area */
3025 case PTRACE_POKEUSR
: {
3026 unsigned long index
;
3029 /* convert to index and check */
3032 if ((addr
& 3) || (index
> PT_FPSCR
)
3033 || (child
->thread
.regs
== NULL
))
3036 if ((addr
& 7) || (index
> PT_FPSCR
))
3040 CHECK_FULL_REGS(child
->thread
.regs
);
3041 if (index
< PT_FPR0
) {
3042 ret
= ptrace_put_reg(child
, index
, data
);
3044 unsigned int fpidx
= index
- PT_FPR0
;
3046 flush_fp_to_thread(child
);
3047 if (fpidx
< (PT_FPSCR
- PT_FPR0
))
3048 memcpy(&child
->thread
.TS_FPR(fpidx
), &data
,
3051 child
->thread
.fp_state
.fpscr
= data
;
3057 case PPC_PTRACE_GETHWDBGINFO
: {
3058 struct ppc_debug_info dbginfo
;
3060 dbginfo
.version
= 1;
3061 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3062 dbginfo
.num_instruction_bps
= CONFIG_PPC_ADV_DEBUG_IACS
;
3063 dbginfo
.num_data_bps
= CONFIG_PPC_ADV_DEBUG_DACS
;
3064 dbginfo
.num_condition_regs
= CONFIG_PPC_ADV_DEBUG_DVCS
;
3065 dbginfo
.data_bp_alignment
= 4;
3066 dbginfo
.sizeof_condition
= 4;
3067 dbginfo
.features
= PPC_DEBUG_FEATURE_INSN_BP_RANGE
|
3068 PPC_DEBUG_FEATURE_INSN_BP_MASK
;
3069 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3071 PPC_DEBUG_FEATURE_DATA_BP_RANGE
|
3072 PPC_DEBUG_FEATURE_DATA_BP_MASK
;
3074 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3075 dbginfo
.num_instruction_bps
= 0;
3076 if (ppc_breakpoint_available())
3077 dbginfo
.num_data_bps
= 1;
3079 dbginfo
.num_data_bps
= 0;
3080 dbginfo
.num_condition_regs
= 0;
3082 dbginfo
.data_bp_alignment
= 8;
3084 dbginfo
.data_bp_alignment
= 4;
3086 dbginfo
.sizeof_condition
= 0;
3087 #ifdef CONFIG_HAVE_HW_BREAKPOINT
3088 dbginfo
.features
= PPC_DEBUG_FEATURE_DATA_BP_RANGE
;
3090 dbginfo
.features
|= PPC_DEBUG_FEATURE_DATA_BP_DAWR
;
3092 dbginfo
.features
= 0;
3093 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
3094 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3096 if (copy_to_user(datavp
, &dbginfo
,
3097 sizeof(struct ppc_debug_info
)))
3102 case PPC_PTRACE_SETHWDEBUG
: {
3103 struct ppc_hw_breakpoint bp_info
;
3105 if (copy_from_user(&bp_info
, datavp
,
3106 sizeof(struct ppc_hw_breakpoint
)))
3108 return ppc_set_hwdebug(child
, &bp_info
);
3111 case PPC_PTRACE_DELHWDEBUG
: {
3112 ret
= ppc_del_hwdebug(child
, data
);
3116 case PTRACE_GET_DEBUGREG
: {
3117 #ifndef CONFIG_PPC_ADV_DEBUG_REGS
3118 unsigned long dabr_fake
;
3121 /* We only support one DABR and no IABRS at the moment */
3124 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
3125 ret
= put_user(child
->thread
.debug
.dac1
, datalp
);
3127 dabr_fake
= ((child
->thread
.hw_brk
.address
& (~HW_BRK_TYPE_DABR
)) |
3128 (child
->thread
.hw_brk
.type
& HW_BRK_TYPE_DABR
));
3129 ret
= put_user(dabr_fake
, datalp
);
3134 case PTRACE_SET_DEBUGREG
:
3135 ret
= ptrace_set_debugreg(child
, addr
, data
);
3139 case PTRACE_GETREGS64
:
3141 case PTRACE_GETREGS
: /* Get all pt_regs from the child. */
3142 return copy_regset_to_user(child
, &user_ppc_native_view
,
3144 0, sizeof(struct user_pt_regs
),
3148 case PTRACE_SETREGS64
:
3150 case PTRACE_SETREGS
: /* Set all gp regs in the child. */
3151 return copy_regset_from_user(child
, &user_ppc_native_view
,
3153 0, sizeof(struct user_pt_regs
),
3156 case PTRACE_GETFPREGS
: /* Get the child FPU state (FPR0...31 + FPSCR) */
3157 return copy_regset_to_user(child
, &user_ppc_native_view
,
3159 0, sizeof(elf_fpregset_t
),
3162 case PTRACE_SETFPREGS
: /* Set the child FPU state (FPR0...31 + FPSCR) */
3163 return copy_regset_from_user(child
, &user_ppc_native_view
,
3165 0, sizeof(elf_fpregset_t
),
3168 #ifdef CONFIG_ALTIVEC
3169 case PTRACE_GETVRREGS
:
3170 return copy_regset_to_user(child
, &user_ppc_native_view
,
3172 0, (33 * sizeof(vector128
) +
3176 case PTRACE_SETVRREGS
:
3177 return copy_regset_from_user(child
, &user_ppc_native_view
,
3179 0, (33 * sizeof(vector128
) +
3184 case PTRACE_GETVSRREGS
:
3185 return copy_regset_to_user(child
, &user_ppc_native_view
,
3187 0, 32 * sizeof(double),
3190 case PTRACE_SETVSRREGS
:
3191 return copy_regset_from_user(child
, &user_ppc_native_view
,
3193 0, 32 * sizeof(double),
3197 case PTRACE_GETEVRREGS
:
3198 /* Get the child spe register state. */
3199 return copy_regset_to_user(child
, &user_ppc_native_view
,
3200 REGSET_SPE
, 0, 35 * sizeof(u32
),
3203 case PTRACE_SETEVRREGS
:
3204 /* Set the child spe register state. */
3205 return copy_regset_from_user(child
, &user_ppc_native_view
,
3206 REGSET_SPE
, 0, 35 * sizeof(u32
),
3211 ret
= ptrace_request(child
, request
, addr
, data
);
3217 #ifdef CONFIG_SECCOMP
3218 static int do_seccomp(struct pt_regs
*regs
)
3220 if (!test_thread_flag(TIF_SECCOMP
))
3224 * The ABI we present to seccomp tracers is that r3 contains
3225 * the syscall return value and orig_gpr3 contains the first
3226 * syscall parameter. This is different to the ptrace ABI where
3227 * both r3 and orig_gpr3 contain the first syscall parameter.
3229 regs
->gpr
[3] = -ENOSYS
;
3232 * We use the __ version here because we have already checked
3233 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3234 * have already loaded -ENOSYS into r3, or seccomp has put
3235 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3237 if (__secure_computing(NULL
))
3241 * The syscall was allowed by seccomp, restore the register
3242 * state to what audit expects.
3243 * Note that we use orig_gpr3, which means a seccomp tracer can
3244 * modify the first syscall parameter (in orig_gpr3) and also
3245 * allow the syscall to proceed.
3247 regs
->gpr
[3] = regs
->orig_gpr3
;
3252 static inline int do_seccomp(struct pt_regs
*regs
) { return 0; }
3253 #endif /* CONFIG_SECCOMP */
3256 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3257 * @regs: the pt_regs of the task to trace (current)
3259 * Performs various types of tracing on syscall entry. This includes seccomp,
3260 * ptrace, syscall tracepoints and audit.
3262 * The pt_regs are potentially visible to userspace via ptrace, so their
3265 * One or more of the tracers may modify the contents of pt_regs, in particular
3266 * to modify arguments or even the syscall number itself.
3268 * It's also possible that a tracer can choose to reject the system call. In
3269 * that case this function will return an illegal syscall number, and will put
3270 * an appropriate return value in regs->r3.
3272 * Return: the (possibly changed) syscall number.
3274 long do_syscall_trace_enter(struct pt_regs
*regs
)
3280 flags
= READ_ONCE(current_thread_info()->flags
) &
3281 (_TIF_SYSCALL_EMU
| _TIF_SYSCALL_TRACE
);
3284 int rc
= tracehook_report_syscall_entry(regs
);
3286 if (unlikely(flags
& _TIF_SYSCALL_EMU
)) {
3288 * A nonzero return code from
3289 * tracehook_report_syscall_entry() tells us to prevent
3290 * the syscall execution, but we are not going to
3291 * execute it anyway.
3293 * Returning -1 will skip the syscall execution. We want
3294 * to avoid clobbering any registers, so we don't goto
3295 * the skip label below.
3302 * The tracer decided to abort the syscall. Note that
3303 * the tracer may also just change regs->gpr[0] to an
3304 * invalid syscall number, that is handled below on the
3311 /* Run seccomp after ptrace; allow it to set gpr[3]. */
3312 if (do_seccomp(regs
))
3315 /* Avoid trace and audit when syscall is invalid. */
3316 if (regs
->gpr
[0] >= NR_syscalls
)
3319 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
3320 trace_sys_enter(regs
, regs
->gpr
[0]);
3323 if (!is_32bit_task())
3324 audit_syscall_entry(regs
->gpr
[0], regs
->gpr
[3], regs
->gpr
[4],
3325 regs
->gpr
[5], regs
->gpr
[6]);
3328 audit_syscall_entry(regs
->gpr
[0],
3329 regs
->gpr
[3] & 0xffffffff,
3330 regs
->gpr
[4] & 0xffffffff,
3331 regs
->gpr
[5] & 0xffffffff,
3332 regs
->gpr
[6] & 0xffffffff);
3334 /* Return the possibly modified but valid syscall number */
3335 return regs
->gpr
[0];
3339 * If we are aborting explicitly, or if the syscall number is
3340 * now invalid, set the return value to -ENOSYS.
3342 regs
->gpr
[3] = -ENOSYS
;
3346 void do_syscall_trace_leave(struct pt_regs
*regs
)
3350 audit_syscall_exit(regs
);
3352 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
3353 trace_sys_exit(regs
, regs
->result
);
3355 step
= test_thread_flag(TIF_SINGLESTEP
);
3356 if (step
|| test_thread_flag(TIF_SYSCALL_TRACE
))
3357 tracehook_report_syscall_exit(regs
, step
);
3362 void __init
pt_regs_check(void);
3365 * Dummy function, its purpose is to break the build if struct pt_regs and
3366 * struct user_pt_regs don't match.
3368 void __init
pt_regs_check(void)
3370 BUILD_BUG_ON(offsetof(struct pt_regs
, gpr
) !=
3371 offsetof(struct user_pt_regs
, gpr
));
3372 BUILD_BUG_ON(offsetof(struct pt_regs
, nip
) !=
3373 offsetof(struct user_pt_regs
, nip
));
3374 BUILD_BUG_ON(offsetof(struct pt_regs
, msr
) !=
3375 offsetof(struct user_pt_regs
, msr
));
3376 BUILD_BUG_ON(offsetof(struct pt_regs
, msr
) !=
3377 offsetof(struct user_pt_regs
, msr
));
3378 BUILD_BUG_ON(offsetof(struct pt_regs
, orig_gpr3
) !=
3379 offsetof(struct user_pt_regs
, orig_gpr3
));
3380 BUILD_BUG_ON(offsetof(struct pt_regs
, ctr
) !=
3381 offsetof(struct user_pt_regs
, ctr
));
3382 BUILD_BUG_ON(offsetof(struct pt_regs
, link
) !=
3383 offsetof(struct user_pt_regs
, link
));
3384 BUILD_BUG_ON(offsetof(struct pt_regs
, xer
) !=
3385 offsetof(struct user_pt_regs
, xer
));
3386 BUILD_BUG_ON(offsetof(struct pt_regs
, ccr
) !=
3387 offsetof(struct user_pt_regs
, ccr
));
3388 #ifdef __powerpc64__
3389 BUILD_BUG_ON(offsetof(struct pt_regs
, softe
) !=
3390 offsetof(struct user_pt_regs
, softe
));
3392 BUILD_BUG_ON(offsetof(struct pt_regs
, mq
) !=
3393 offsetof(struct user_pt_regs
, mq
));
3395 BUILD_BUG_ON(offsetof(struct pt_regs
, trap
) !=
3396 offsetof(struct user_pt_regs
, trap
));
3397 BUILD_BUG_ON(offsetof(struct pt_regs
, dar
) !=
3398 offsetof(struct user_pt_regs
, dar
));
3399 BUILD_BUG_ON(offsetof(struct pt_regs
, dsisr
) !=
3400 offsetof(struct user_pt_regs
, dsisr
));
3401 BUILD_BUG_ON(offsetof(struct pt_regs
, result
) !=
3402 offsetof(struct user_pt_regs
, result
));
3404 BUILD_BUG_ON(sizeof(struct user_pt_regs
) > sizeof(struct pt_regs
));
3406 // Now check that the pt_regs offsets match the uapi #defines
3407 #define CHECK_REG(_pt, _reg) \
3408 BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
3409 sizeof(unsigned long)));
3411 CHECK_REG(PT_R0
, gpr
[0]);
3412 CHECK_REG(PT_R1
, gpr
[1]);
3413 CHECK_REG(PT_R2
, gpr
[2]);
3414 CHECK_REG(PT_R3
, gpr
[3]);
3415 CHECK_REG(PT_R4
, gpr
[4]);
3416 CHECK_REG(PT_R5
, gpr
[5]);
3417 CHECK_REG(PT_R6
, gpr
[6]);
3418 CHECK_REG(PT_R7
, gpr
[7]);
3419 CHECK_REG(PT_R8
, gpr
[8]);
3420 CHECK_REG(PT_R9
, gpr
[9]);
3421 CHECK_REG(PT_R10
, gpr
[10]);
3422 CHECK_REG(PT_R11
, gpr
[11]);
3423 CHECK_REG(PT_R12
, gpr
[12]);
3424 CHECK_REG(PT_R13
, gpr
[13]);
3425 CHECK_REG(PT_R14
, gpr
[14]);
3426 CHECK_REG(PT_R15
, gpr
[15]);
3427 CHECK_REG(PT_R16
, gpr
[16]);
3428 CHECK_REG(PT_R17
, gpr
[17]);
3429 CHECK_REG(PT_R18
, gpr
[18]);
3430 CHECK_REG(PT_R19
, gpr
[19]);
3431 CHECK_REG(PT_R20
, gpr
[20]);
3432 CHECK_REG(PT_R21
, gpr
[21]);
3433 CHECK_REG(PT_R22
, gpr
[22]);
3434 CHECK_REG(PT_R23
, gpr
[23]);
3435 CHECK_REG(PT_R24
, gpr
[24]);
3436 CHECK_REG(PT_R25
, gpr
[25]);
3437 CHECK_REG(PT_R26
, gpr
[26]);
3438 CHECK_REG(PT_R27
, gpr
[27]);
3439 CHECK_REG(PT_R28
, gpr
[28]);
3440 CHECK_REG(PT_R29
, gpr
[29]);
3441 CHECK_REG(PT_R30
, gpr
[30]);
3442 CHECK_REG(PT_R31
, gpr
[31]);
3443 CHECK_REG(PT_NIP
, nip
);
3444 CHECK_REG(PT_MSR
, msr
);
3445 CHECK_REG(PT_ORIG_R3
, orig_gpr3
);
3446 CHECK_REG(PT_CTR
, ctr
);
3447 CHECK_REG(PT_LNK
, link
);
3448 CHECK_REG(PT_XER
, xer
);
3449 CHECK_REG(PT_CCR
, ccr
);
3451 CHECK_REG(PT_SOFTE
, softe
);
3453 CHECK_REG(PT_MQ
, mq
);
3455 CHECK_REG(PT_TRAP
, trap
);
3456 CHECK_REG(PT_DAR
, dar
);
3457 CHECK_REG(PT_DSISR
, dsisr
);
3458 CHECK_REG(PT_RESULT
, result
);
3461 BUILD_BUG_ON(PT_REGS_COUNT
!= sizeof(struct user_pt_regs
) / sizeof(unsigned long));
3464 * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
3467 BUILD_BUG_ON(PT_DSCR
< sizeof(struct user_pt_regs
) / sizeof(unsigned long));