2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task_stack.h>
24 #include <linux/errno.h>
25 #include <linux/ptrace.h>
26 #include <linux/regset.h>
27 #include <linux/smp.h>
28 #include <linux/security.h>
29 #include <linux/stddef.h>
30 #include <linux/audit.h>
31 #include <linux/seccomp.h>
32 #include <linux/ftrace.h>
34 #include <asm/branch.h>
35 #include <asm/byteorder.h>
37 #include <asm/cpu-info.h>
40 #include <asm/mipsregs.h>
41 #include <asm/mipsmtregs.h>
43 #include <asm/processor.h>
44 #include <asm/syscall.h>
45 #include <linux/uaccess.h>
46 #include <asm/bootinfo.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/syscalls.h>
52 unsigned long exception_ip(struct pt_regs
*regs
)
54 return exception_epc(regs
);
56 EXPORT_SYMBOL(exception_ip
);
59 * Called by kernel/ptrace.c when detaching..
61 * Make sure single step bits etc are not set.
63 void ptrace_disable(struct task_struct
*child
)
65 /* Don't load the watchpoint registers for the ex-child. */
66 clear_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
70 * Read a general register set. We always use the 64-bit format, even
71 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
72 * Registers are sign extended to fill the available space.
74 int ptrace_getregs(struct task_struct
*child
, struct user_pt_regs __user
*data
)
79 if (!access_ok(data
, 38 * 8))
82 regs
= task_pt_regs(child
);
84 for (i
= 0; i
< 32; i
++)
85 __put_user((long)regs
->regs
[i
], (__s64 __user
*)&data
->regs
[i
]);
86 __put_user((long)regs
->lo
, (__s64 __user
*)&data
->lo
);
87 __put_user((long)regs
->hi
, (__s64 __user
*)&data
->hi
);
88 __put_user((long)regs
->cp0_epc
, (__s64 __user
*)&data
->cp0_epc
);
89 __put_user((long)regs
->cp0_badvaddr
, (__s64 __user
*)&data
->cp0_badvaddr
);
90 __put_user((long)regs
->cp0_status
, (__s64 __user
*)&data
->cp0_status
);
91 __put_user((long)regs
->cp0_cause
, (__s64 __user
*)&data
->cp0_cause
);
97 * Write a general register set. As for PTRACE_GETREGS, we always use
98 * the 64-bit format. On a 32-bit kernel only the lower order half
99 * (according to endianness) will be used.
101 int ptrace_setregs(struct task_struct
*child
, struct user_pt_regs __user
*data
)
103 struct pt_regs
*regs
;
106 if (!access_ok(data
, 38 * 8))
109 regs
= task_pt_regs(child
);
111 for (i
= 0; i
< 32; i
++)
112 __get_user(regs
->regs
[i
], (__s64 __user
*)&data
->regs
[i
]);
113 __get_user(regs
->lo
, (__s64 __user
*)&data
->lo
);
114 __get_user(regs
->hi
, (__s64 __user
*)&data
->hi
);
115 __get_user(regs
->cp0_epc
, (__s64 __user
*)&data
->cp0_epc
);
117 /* badvaddr, status, and cause may not be written. */
119 /* System call number may have been changed */
120 mips_syscall_update_nr(child
, regs
);
125 int ptrace_get_watch_regs(struct task_struct
*child
,
126 struct pt_watch_regs __user
*addr
)
128 enum pt_watch_style style
;
131 if (!cpu_has_watch
|| boot_cpu_data
.watch_reg_use_cnt
== 0)
133 if (!access_ok(addr
, sizeof(struct pt_watch_regs
)))
137 style
= pt_watch_style_mips32
;
138 #define WATCH_STYLE mips32
140 style
= pt_watch_style_mips64
;
141 #define WATCH_STYLE mips64
144 __put_user(style
, &addr
->style
);
145 __put_user(boot_cpu_data
.watch_reg_use_cnt
,
146 &addr
->WATCH_STYLE
.num_valid
);
147 for (i
= 0; i
< boot_cpu_data
.watch_reg_use_cnt
; i
++) {
148 __put_user(child
->thread
.watch
.mips3264
.watchlo
[i
],
149 &addr
->WATCH_STYLE
.watchlo
[i
]);
150 __put_user(child
->thread
.watch
.mips3264
.watchhi
[i
] &
151 (MIPS_WATCHHI_MASK
| MIPS_WATCHHI_IRW
),
152 &addr
->WATCH_STYLE
.watchhi
[i
]);
153 __put_user(boot_cpu_data
.watch_reg_masks
[i
],
154 &addr
->WATCH_STYLE
.watch_masks
[i
]);
157 __put_user(0, &addr
->WATCH_STYLE
.watchlo
[i
]);
158 __put_user(0, &addr
->WATCH_STYLE
.watchhi
[i
]);
159 __put_user(0, &addr
->WATCH_STYLE
.watch_masks
[i
]);
165 int ptrace_set_watch_regs(struct task_struct
*child
,
166 struct pt_watch_regs __user
*addr
)
169 int watch_active
= 0;
170 unsigned long lt
[NUM_WATCH_REGS
];
171 u16 ht
[NUM_WATCH_REGS
];
173 if (!cpu_has_watch
|| boot_cpu_data
.watch_reg_use_cnt
== 0)
175 if (!access_ok(addr
, sizeof(struct pt_watch_regs
)))
177 /* Check the values. */
178 for (i
= 0; i
< boot_cpu_data
.watch_reg_use_cnt
; i
++) {
179 __get_user(lt
[i
], &addr
->WATCH_STYLE
.watchlo
[i
]);
181 if (lt
[i
] & __UA_LIMIT
)
184 if (test_tsk_thread_flag(child
, TIF_32BIT_ADDR
)) {
185 if (lt
[i
] & 0xffffffff80000000UL
)
188 if (lt
[i
] & __UA_LIMIT
)
192 __get_user(ht
[i
], &addr
->WATCH_STYLE
.watchhi
[i
]);
193 if (ht
[i
] & ~MIPS_WATCHHI_MASK
)
197 for (i
= 0; i
< boot_cpu_data
.watch_reg_use_cnt
; i
++) {
198 if (lt
[i
] & MIPS_WATCHLO_IRW
)
200 child
->thread
.watch
.mips3264
.watchlo
[i
] = lt
[i
];
202 child
->thread
.watch
.mips3264
.watchhi
[i
] = ht
[i
];
206 set_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
208 clear_tsk_thread_flag(child
, TIF_LOAD_WATCH
);
213 /* regset get/set implementations */
215 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
217 static int gpr32_get(struct task_struct
*target
,
218 const struct user_regset
*regset
,
221 struct pt_regs
*regs
= task_pt_regs(target
);
222 u32 uregs
[ELF_NGREG
] = {};
224 mips_dump_regs32(uregs
, regs
);
225 return membuf_write(&to
, uregs
, sizeof(uregs
));
228 static int gpr32_set(struct task_struct
*target
,
229 const struct user_regset
*regset
,
230 unsigned int pos
, unsigned int count
,
231 const void *kbuf
, const void __user
*ubuf
)
233 struct pt_regs
*regs
= task_pt_regs(target
);
234 u32 uregs
[ELF_NGREG
];
235 unsigned start
, num_regs
, i
;
238 start
= pos
/ sizeof(u32
);
239 num_regs
= count
/ sizeof(u32
);
241 if (start
+ num_regs
> ELF_NGREG
)
244 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
249 for (i
= start
; i
< num_regs
; i
++) {
251 * Cast all values to signed here so that if this is a 64-bit
252 * kernel, the supplied 32-bit values will be sign extended.
255 case MIPS32_EF_R1
... MIPS32_EF_R25
:
256 /* k0/k1 are ignored. */
257 case MIPS32_EF_R28
... MIPS32_EF_R31
:
258 regs
->regs
[i
- MIPS32_EF_R0
] = (s32
)uregs
[i
];
261 regs
->lo
= (s32
)uregs
[i
];
264 regs
->hi
= (s32
)uregs
[i
];
266 case MIPS32_EF_CP0_EPC
:
267 regs
->cp0_epc
= (s32
)uregs
[i
];
272 /* System call number may have been changed */
273 mips_syscall_update_nr(target
, regs
);
278 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
282 static int gpr64_get(struct task_struct
*target
,
283 const struct user_regset
*regset
,
286 struct pt_regs
*regs
= task_pt_regs(target
);
287 u64 uregs
[ELF_NGREG
] = {};
289 mips_dump_regs64(uregs
, regs
);
290 return membuf_write(&to
, uregs
, sizeof(uregs
));
293 static int gpr64_set(struct task_struct
*target
,
294 const struct user_regset
*regset
,
295 unsigned int pos
, unsigned int count
,
296 const void *kbuf
, const void __user
*ubuf
)
298 struct pt_regs
*regs
= task_pt_regs(target
);
299 u64 uregs
[ELF_NGREG
];
300 unsigned start
, num_regs
, i
;
303 start
= pos
/ sizeof(u64
);
304 num_regs
= count
/ sizeof(u64
);
306 if (start
+ num_regs
> ELF_NGREG
)
309 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
314 for (i
= start
; i
< num_regs
; i
++) {
316 case MIPS64_EF_R1
... MIPS64_EF_R25
:
317 /* k0/k1 are ignored. */
318 case MIPS64_EF_R28
... MIPS64_EF_R31
:
319 regs
->regs
[i
- MIPS64_EF_R0
] = uregs
[i
];
327 case MIPS64_EF_CP0_EPC
:
328 regs
->cp0_epc
= uregs
[i
];
333 /* System call number may have been changed */
334 mips_syscall_update_nr(target
, regs
);
339 #endif /* CONFIG_64BIT */
342 #ifdef CONFIG_MIPS_FP_SUPPORT
345 * Poke at FCSR according to its mask. Set the Cause bits even
346 * if a corresponding Enable bit is set. This will be noticed at
347 * the time the thread is switched to and SIGFPE thrown accordingly.
349 static void ptrace_setfcr31(struct task_struct
*child
, u32 value
)
354 fcr31
= child
->thread
.fpu
.fcr31
;
355 mask
= boot_cpu_data
.fpu_msk31
;
356 child
->thread
.fpu
.fcr31
= (value
& ~mask
) | (fcr31
& mask
);
359 int ptrace_getfpregs(struct task_struct
*child
, __u32 __user
*data
)
363 if (!access_ok(data
, 33 * 8))
366 if (tsk_used_math(child
)) {
367 union fpureg
*fregs
= get_fpu_regs(child
);
368 for (i
= 0; i
< 32; i
++)
369 __put_user(get_fpr64(&fregs
[i
], 0),
370 i
+ (__u64 __user
*)data
);
372 for (i
= 0; i
< 32; i
++)
373 __put_user((__u64
) -1, i
+ (__u64 __user
*) data
);
376 __put_user(child
->thread
.fpu
.fcr31
, data
+ 64);
377 __put_user(boot_cpu_data
.fpu_id
, data
+ 65);
382 int ptrace_setfpregs(struct task_struct
*child
, __u32 __user
*data
)
389 if (!access_ok(data
, 33 * 8))
393 fregs
= get_fpu_regs(child
);
395 for (i
= 0; i
< 32; i
++) {
396 __get_user(fpr_val
, i
+ (__u64 __user
*)data
);
397 set_fpr64(&fregs
[i
], 0, fpr_val
);
400 __get_user(value
, data
+ 64);
401 ptrace_setfcr31(child
, value
);
403 /* FIR may not be written. */
409 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
410 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
411 * correspond 1:1 to buffer slots. Only general registers are copied.
413 static void fpr_get_fpa(struct task_struct
*target
,
416 membuf_write(to
, &target
->thread
.fpu
,
417 NUM_FPU_REGS
* sizeof(elf_fpreg_t
));
421 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
422 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
423 * general register slots are copied to buffer slots. Only general
424 * registers are copied.
426 static void fpr_get_msa(struct task_struct
*target
, struct membuf
*to
)
430 BUILD_BUG_ON(sizeof(u64
) != sizeof(elf_fpreg_t
));
431 for (i
= 0; i
< NUM_FPU_REGS
; i
++)
432 membuf_store(to
, get_fpr64(&target
->thread
.fpu
.fpr
[i
], 0));
436 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
437 * Choose the appropriate helper for general registers, and then copy
438 * the FCSR and FIR registers separately.
440 static int fpr_get(struct task_struct
*target
,
441 const struct user_regset
*regset
,
444 if (sizeof(target
->thread
.fpu
.fpr
[0]) == sizeof(elf_fpreg_t
))
445 fpr_get_fpa(target
, &to
);
447 fpr_get_msa(target
, &to
);
449 membuf_write(&to
, &target
->thread
.fpu
.fcr31
, sizeof(u32
));
450 membuf_write(&to
, &boot_cpu_data
.fpu_id
, sizeof(u32
));
455 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
456 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
457 * context's general register slots. Only general registers are copied.
459 static int fpr_set_fpa(struct task_struct
*target
,
460 unsigned int *pos
, unsigned int *count
,
461 const void **kbuf
, const void __user
**ubuf
)
463 return user_regset_copyin(pos
, count
, kbuf
, ubuf
,
465 0, NUM_FPU_REGS
* sizeof(elf_fpreg_t
));
469 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
470 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
471 * bits only of FP context's general register slots. Only general
472 * registers are copied.
474 static int fpr_set_msa(struct task_struct
*target
,
475 unsigned int *pos
, unsigned int *count
,
476 const void **kbuf
, const void __user
**ubuf
)
482 BUILD_BUG_ON(sizeof(fpr_val
) != sizeof(elf_fpreg_t
));
483 for (i
= 0; i
< NUM_FPU_REGS
&& *count
> 0; i
++) {
484 err
= user_regset_copyin(pos
, count
, kbuf
, ubuf
,
485 &fpr_val
, i
* sizeof(elf_fpreg_t
),
486 (i
+ 1) * sizeof(elf_fpreg_t
));
489 set_fpr64(&target
->thread
.fpu
.fpr
[i
], 0, fpr_val
);
496 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
497 * Choose the appropriate helper for general registers, and then copy
498 * the FCSR register separately. Ignore the incoming FIR register
499 * contents though, as the register is read-only.
501 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
502 * which is supposed to have been guaranteed by the kernel before
503 * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
504 * so that we can safely avoid preinitializing temporaries for
505 * partial register writes.
507 static int fpr_set(struct task_struct
*target
,
508 const struct user_regset
*regset
,
509 unsigned int pos
, unsigned int count
,
510 const void *kbuf
, const void __user
*ubuf
)
512 const int fcr31_pos
= NUM_FPU_REGS
* sizeof(elf_fpreg_t
);
513 const int fir_pos
= fcr31_pos
+ sizeof(u32
);
517 BUG_ON(count
% sizeof(elf_fpreg_t
));
519 if (pos
+ count
> sizeof(elf_fpregset_t
))
524 if (sizeof(target
->thread
.fpu
.fpr
[0]) == sizeof(elf_fpreg_t
))
525 err
= fpr_set_fpa(target
, &pos
, &count
, &kbuf
, &ubuf
);
527 err
= fpr_set_msa(target
, &pos
, &count
, &kbuf
, &ubuf
);
532 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
534 fcr31_pos
, fcr31_pos
+ sizeof(u32
));
538 ptrace_setfcr31(target
, fcr31
);
542 user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
543 fir_pos
, fir_pos
+ sizeof(u32
));
550 /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
551 static int fp_mode_get(struct task_struct
*target
,
552 const struct user_regset
*regset
,
555 return membuf_store(&to
, (int)mips_get_process_fp_mode(target
));
559 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
561 * We optimize for the case where `count % sizeof(int) == 0', which
562 * is supposed to have been guaranteed by the kernel before calling
563 * us, e.g. in `ptrace_regset'. We enforce that requirement, so
564 * that we can safely avoid preinitializing temporaries for partial
567 static int fp_mode_set(struct task_struct
*target
,
568 const struct user_regset
*regset
,
569 unsigned int pos
, unsigned int count
,
570 const void *kbuf
, const void __user
*ubuf
)
575 BUG_ON(count
% sizeof(int));
577 if (pos
+ count
> sizeof(fp_mode
))
580 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &fp_mode
, 0,
586 err
= mips_set_process_fp_mode(target
, fp_mode
);
591 #endif /* CONFIG_MIPS_FP_SUPPORT */
593 #ifdef CONFIG_CPU_HAS_MSA
595 struct msa_control_regs
{
602 static void copy_pad_fprs(struct task_struct
*target
,
603 const struct user_regset
*regset
,
605 unsigned int live_sz
)
608 unsigned long long fill
= ~0ull;
609 unsigned int cp_sz
, pad_sz
;
611 cp_sz
= min(regset
->size
, live_sz
);
612 pad_sz
= regset
->size
- cp_sz
;
613 WARN_ON(pad_sz
% sizeof(fill
));
615 for (i
= 0; i
< NUM_FPU_REGS
; i
++) {
616 membuf_write(to
, &target
->thread
.fpu
.fpr
[i
], cp_sz
);
617 for (j
= 0; j
< (pad_sz
/ sizeof(fill
)); j
++)
618 membuf_store(to
, fill
);
622 static int msa_get(struct task_struct
*target
,
623 const struct user_regset
*regset
,
626 const unsigned int wr_size
= NUM_FPU_REGS
* regset
->size
;
627 const struct msa_control_regs ctrl_regs
= {
628 .fir
= boot_cpu_data
.fpu_id
,
629 .fcsr
= target
->thread
.fpu
.fcr31
,
630 .msair
= boot_cpu_data
.msa_id
,
631 .msacsr
= target
->thread
.fpu
.msacsr
,
634 if (!tsk_used_math(target
)) {
635 /* The task hasn't used FP or MSA, fill with 0xff */
636 copy_pad_fprs(target
, regset
, &to
, 0);
637 } else if (!test_tsk_thread_flag(target
, TIF_MSA_CTX_LIVE
)) {
638 /* Copy scalar FP context, fill the rest with 0xff */
639 copy_pad_fprs(target
, regset
, &to
, 8);
640 } else if (sizeof(target
->thread
.fpu
.fpr
[0]) == regset
->size
) {
641 /* Trivially copy the vector registers */
642 membuf_write(&to
, &target
->thread
.fpu
.fpr
, wr_size
);
644 /* Copy as much context as possible, fill the rest with 0xff */
645 copy_pad_fprs(target
, regset
, &to
,
646 sizeof(target
->thread
.fpu
.fpr
[0]));
649 return membuf_write(&to
, &ctrl_regs
, sizeof(ctrl_regs
));
652 static int msa_set(struct task_struct
*target
,
653 const struct user_regset
*regset
,
654 unsigned int pos
, unsigned int count
,
655 const void *kbuf
, const void __user
*ubuf
)
657 const unsigned int wr_size
= NUM_FPU_REGS
* regset
->size
;
658 struct msa_control_regs ctrl_regs
;
664 if (sizeof(target
->thread
.fpu
.fpr
[0]) == regset
->size
) {
665 /* Trivially copy the vector registers */
666 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
667 &target
->thread
.fpu
.fpr
,
670 /* Copy as much context as possible */
671 cp_sz
= min_t(unsigned int, regset
->size
,
672 sizeof(target
->thread
.fpu
.fpr
[0]));
675 for (; i
< NUM_FPU_REGS
; i
++, start
+= regset
->size
) {
676 err
|= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
677 &target
->thread
.fpu
.fpr
[i
],
678 start
, start
+ cp_sz
);
683 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl_regs
,
684 wr_size
, wr_size
+ sizeof(ctrl_regs
));
686 target
->thread
.fpu
.fcr31
= ctrl_regs
.fcsr
& ~FPU_CSR_ALL_X
;
687 target
->thread
.fpu
.msacsr
= ctrl_regs
.msacsr
& ~MSA_CSR_CAUSEF
;
693 #endif /* CONFIG_CPU_HAS_MSA */
695 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
698 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
700 static int dsp32_get(struct task_struct
*target
,
701 const struct user_regset
*regset
,
704 u32 dspregs
[NUM_DSP_REGS
+ 1];
707 BUG_ON(to
.left
% sizeof(u32
));
712 for (i
= 0; i
< NUM_DSP_REGS
; i
++)
713 dspregs
[i
] = target
->thread
.dsp
.dspr
[i
];
714 dspregs
[NUM_DSP_REGS
] = target
->thread
.dsp
.dspcontrol
;
715 return membuf_write(&to
, dspregs
, sizeof(dspregs
));
719 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
721 static int dsp32_set(struct task_struct
*target
,
722 const struct user_regset
*regset
,
723 unsigned int pos
, unsigned int count
,
724 const void *kbuf
, const void __user
*ubuf
)
726 unsigned int start
, num_regs
, i
;
727 u32 dspregs
[NUM_DSP_REGS
+ 1];
730 BUG_ON(count
% sizeof(u32
));
735 start
= pos
/ sizeof(u32
);
736 num_regs
= count
/ sizeof(u32
);
738 if (start
+ num_regs
> NUM_DSP_REGS
+ 1)
741 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, dspregs
, 0,
746 for (i
= start
; i
< num_regs
; i
++)
748 case 0 ... NUM_DSP_REGS
- 1:
749 target
->thread
.dsp
.dspr
[i
] = (s32
)dspregs
[i
];
752 target
->thread
.dsp
.dspcontrol
= (s32
)dspregs
[i
];
759 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
764 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
766 static int dsp64_get(struct task_struct
*target
,
767 const struct user_regset
*regset
,
770 u64 dspregs
[NUM_DSP_REGS
+ 1];
773 BUG_ON(to
.left
% sizeof(u64
));
778 for (i
= 0; i
< NUM_DSP_REGS
; i
++)
779 dspregs
[i
] = target
->thread
.dsp
.dspr
[i
];
780 dspregs
[NUM_DSP_REGS
] = target
->thread
.dsp
.dspcontrol
;
781 return membuf_write(&to
, dspregs
, sizeof(dspregs
));
785 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
787 static int dsp64_set(struct task_struct
*target
,
788 const struct user_regset
*regset
,
789 unsigned int pos
, unsigned int count
,
790 const void *kbuf
, const void __user
*ubuf
)
792 unsigned int start
, num_regs
, i
;
793 u64 dspregs
[NUM_DSP_REGS
+ 1];
796 BUG_ON(count
% sizeof(u64
));
801 start
= pos
/ sizeof(u64
);
802 num_regs
= count
/ sizeof(u64
);
804 if (start
+ num_regs
> NUM_DSP_REGS
+ 1)
807 err
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, dspregs
, 0,
812 for (i
= start
; i
< num_regs
; i
++)
814 case 0 ... NUM_DSP_REGS
- 1:
815 target
->thread
.dsp
.dspr
[i
] = dspregs
[i
];
818 target
->thread
.dsp
.dspcontrol
= dspregs
[i
];
825 #endif /* CONFIG_64BIT */
828 * Determine whether the DSP context is present.
830 static int dsp_active(struct task_struct
*target
,
831 const struct user_regset
*regset
)
833 return cpu_has_dsp
? NUM_DSP_REGS
+ 1 : -ENODEV
;
839 #ifdef CONFIG_MIPS_FP_SUPPORT
843 #ifdef CONFIG_CPU_HAS_MSA
848 struct pt_regs_offset
{
853 #define REG_OFFSET_NAME(reg, r) { \
855 .offset = offsetof(struct pt_regs, r) \
858 #define REG_OFFSET_END { \
863 static const struct pt_regs_offset regoffset_table
[] = {
864 REG_OFFSET_NAME(r0
, regs
[0]),
865 REG_OFFSET_NAME(r1
, regs
[1]),
866 REG_OFFSET_NAME(r2
, regs
[2]),
867 REG_OFFSET_NAME(r3
, regs
[3]),
868 REG_OFFSET_NAME(r4
, regs
[4]),
869 REG_OFFSET_NAME(r5
, regs
[5]),
870 REG_OFFSET_NAME(r6
, regs
[6]),
871 REG_OFFSET_NAME(r7
, regs
[7]),
872 REG_OFFSET_NAME(r8
, regs
[8]),
873 REG_OFFSET_NAME(r9
, regs
[9]),
874 REG_OFFSET_NAME(r10
, regs
[10]),
875 REG_OFFSET_NAME(r11
, regs
[11]),
876 REG_OFFSET_NAME(r12
, regs
[12]),
877 REG_OFFSET_NAME(r13
, regs
[13]),
878 REG_OFFSET_NAME(r14
, regs
[14]),
879 REG_OFFSET_NAME(r15
, regs
[15]),
880 REG_OFFSET_NAME(r16
, regs
[16]),
881 REG_OFFSET_NAME(r17
, regs
[17]),
882 REG_OFFSET_NAME(r18
, regs
[18]),
883 REG_OFFSET_NAME(r19
, regs
[19]),
884 REG_OFFSET_NAME(r20
, regs
[20]),
885 REG_OFFSET_NAME(r21
, regs
[21]),
886 REG_OFFSET_NAME(r22
, regs
[22]),
887 REG_OFFSET_NAME(r23
, regs
[23]),
888 REG_OFFSET_NAME(r24
, regs
[24]),
889 REG_OFFSET_NAME(r25
, regs
[25]),
890 REG_OFFSET_NAME(r26
, regs
[26]),
891 REG_OFFSET_NAME(r27
, regs
[27]),
892 REG_OFFSET_NAME(r28
, regs
[28]),
893 REG_OFFSET_NAME(r29
, regs
[29]),
894 REG_OFFSET_NAME(r30
, regs
[30]),
895 REG_OFFSET_NAME(r31
, regs
[31]),
896 REG_OFFSET_NAME(c0_status
, cp0_status
),
897 REG_OFFSET_NAME(hi
, hi
),
898 REG_OFFSET_NAME(lo
, lo
),
899 #ifdef CONFIG_CPU_HAS_SMARTMIPS
900 REG_OFFSET_NAME(acx
, acx
),
902 REG_OFFSET_NAME(c0_badvaddr
, cp0_badvaddr
),
903 REG_OFFSET_NAME(c0_cause
, cp0_cause
),
904 REG_OFFSET_NAME(c0_epc
, cp0_epc
),
905 #ifdef CONFIG_CPU_CAVIUM_OCTEON
906 REG_OFFSET_NAME(mpl0
, mpl
[0]),
907 REG_OFFSET_NAME(mpl1
, mpl
[1]),
908 REG_OFFSET_NAME(mpl2
, mpl
[2]),
909 REG_OFFSET_NAME(mtp0
, mtp
[0]),
910 REG_OFFSET_NAME(mtp1
, mtp
[1]),
911 REG_OFFSET_NAME(mtp2
, mtp
[2]),
917 * regs_query_register_offset() - query register offset from its name
918 * @name: the name of a register
920 * regs_query_register_offset() returns the offset of a register in struct
921 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
923 int regs_query_register_offset(const char *name
)
925 const struct pt_regs_offset
*roff
;
926 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
927 if (!strcmp(roff
->name
, name
))
932 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
934 static const struct user_regset mips_regsets
[] = {
936 .core_note_type
= NT_PRSTATUS
,
938 .size
= sizeof(unsigned int),
939 .align
= sizeof(unsigned int),
940 .regset_get
= gpr32_get
,
944 .core_note_type
= NT_MIPS_DSP
,
945 .n
= NUM_DSP_REGS
+ 1,
947 .align
= sizeof(u32
),
948 .regset_get
= dsp32_get
,
950 .active
= dsp_active
,
952 #ifdef CONFIG_MIPS_FP_SUPPORT
954 .core_note_type
= NT_PRFPREG
,
956 .size
= sizeof(elf_fpreg_t
),
957 .align
= sizeof(elf_fpreg_t
),
958 .regset_get
= fpr_get
,
962 .core_note_type
= NT_MIPS_FP_MODE
,
965 .align
= sizeof(int),
966 .regset_get
= fp_mode_get
,
970 #ifdef CONFIG_CPU_HAS_MSA
972 .core_note_type
= NT_MIPS_MSA
,
973 .n
= NUM_FPU_REGS
+ 1,
976 .regset_get
= msa_get
,
982 static const struct user_regset_view user_mips_view
= {
984 .e_machine
= ELF_ARCH
,
985 .ei_osabi
= ELF_OSABI
,
986 .regsets
= mips_regsets
,
987 .n
= ARRAY_SIZE(mips_regsets
),
990 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
994 static const struct user_regset mips64_regsets
[] = {
996 .core_note_type
= NT_PRSTATUS
,
998 .size
= sizeof(unsigned long),
999 .align
= sizeof(unsigned long),
1000 .regset_get
= gpr64_get
,
1004 .core_note_type
= NT_MIPS_DSP
,
1005 .n
= NUM_DSP_REGS
+ 1,
1006 .size
= sizeof(u64
),
1007 .align
= sizeof(u64
),
1008 .regset_get
= dsp64_get
,
1010 .active
= dsp_active
,
1012 #ifdef CONFIG_MIPS_FP_SUPPORT
1013 [REGSET_FP_MODE
] = {
1014 .core_note_type
= NT_MIPS_FP_MODE
,
1016 .size
= sizeof(int),
1017 .align
= sizeof(int),
1018 .regset_get
= fp_mode_get
,
1022 .core_note_type
= NT_PRFPREG
,
1024 .size
= sizeof(elf_fpreg_t
),
1025 .align
= sizeof(elf_fpreg_t
),
1026 .regset_get
= fpr_get
,
1030 #ifdef CONFIG_CPU_HAS_MSA
1032 .core_note_type
= NT_MIPS_MSA
,
1033 .n
= NUM_FPU_REGS
+ 1,
1036 .regset_get
= msa_get
,
1042 static const struct user_regset_view user_mips64_view
= {
1044 .e_machine
= ELF_ARCH
,
1045 .ei_osabi
= ELF_OSABI
,
1046 .regsets
= mips64_regsets
,
1047 .n
= ARRAY_SIZE(mips64_regsets
),
1050 #ifdef CONFIG_MIPS32_N32
1052 static const struct user_regset_view user_mipsn32_view
= {
1054 .e_flags
= EF_MIPS_ABI2
,
1055 .e_machine
= ELF_ARCH
,
1056 .ei_osabi
= ELF_OSABI
,
1057 .regsets
= mips64_regsets
,
1058 .n
= ARRAY_SIZE(mips64_regsets
),
1061 #endif /* CONFIG_MIPS32_N32 */
1063 #endif /* CONFIG_64BIT */
1065 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1068 return &user_mips_view
;
1070 #ifdef CONFIG_MIPS32_O32
1071 if (test_tsk_thread_flag(task
, TIF_32BIT_REGS
))
1072 return &user_mips_view
;
1074 #ifdef CONFIG_MIPS32_N32
1075 if (test_tsk_thread_flag(task
, TIF_32BIT_ADDR
))
1076 return &user_mipsn32_view
;
1078 return &user_mips64_view
;
1082 long arch_ptrace(struct task_struct
*child
, long request
,
1083 unsigned long addr
, unsigned long data
)
1086 void __user
*addrp
= (void __user
*) addr
;
1087 void __user
*datavp
= (void __user
*) data
;
1088 unsigned long __user
*datalp
= (void __user
*) data
;
1091 /* when I and D space are separate, these will need to be fixed. */
1092 case PTRACE_PEEKTEXT
: /* read word at location addr. */
1093 case PTRACE_PEEKDATA
:
1094 ret
= generic_ptrace_peekdata(child
, addr
, data
);
1097 /* Read the word at location addr in the USER area. */
1098 case PTRACE_PEEKUSR
: {
1099 struct pt_regs
*regs
;
1100 unsigned long tmp
= 0;
1102 regs
= task_pt_regs(child
);
1103 ret
= 0; /* Default return value. */
1107 tmp
= regs
->regs
[addr
];
1109 #ifdef CONFIG_MIPS_FP_SUPPORT
1110 case FPR_BASE
... FPR_BASE
+ 31: {
1111 union fpureg
*fregs
;
1113 if (!tsk_used_math(child
)) {
1114 /* FP not yet used */
1118 fregs
= get_fpu_regs(child
);
1121 if (test_tsk_thread_flag(child
, TIF_32BIT_FPREGS
)) {
1123 * The odd registers are actually the high
1124 * order bits of the values stored in the even
1127 tmp
= get_fpr32(&fregs
[(addr
& ~1) - FPR_BASE
],
1132 tmp
= get_fpr64(&fregs
[addr
- FPR_BASE
], 0);
1136 tmp
= child
->thread
.fpu
.fcr31
;
1139 /* implementation / version register */
1140 tmp
= boot_cpu_data
.fpu_id
;
1144 tmp
= regs
->cp0_epc
;
1147 tmp
= regs
->cp0_cause
;
1150 tmp
= regs
->cp0_badvaddr
;
1158 #ifdef CONFIG_CPU_HAS_SMARTMIPS
1163 case DSP_BASE
... DSP_BASE
+ 5: {
1171 dregs
= __get_dsp_regs(child
);
1172 tmp
= dregs
[addr
- DSP_BASE
];
1181 tmp
= child
->thread
.dsp
.dspcontrol
;
1188 ret
= put_user(tmp
, datalp
);
1192 /* when I and D space are separate, this will have to be fixed. */
1193 case PTRACE_POKETEXT
: /* write the word at location addr. */
1194 case PTRACE_POKEDATA
:
1195 ret
= generic_ptrace_pokedata(child
, addr
, data
);
1198 case PTRACE_POKEUSR
: {
1199 struct pt_regs
*regs
;
1201 regs
= task_pt_regs(child
);
1205 regs
->regs
[addr
] = data
;
1206 /* System call number may have been changed */
1208 mips_syscall_update_nr(child
, regs
);
1209 else if (addr
== 4 &&
1210 mips_syscall_is_indirect(child
, regs
))
1211 mips_syscall_update_nr(child
, regs
);
1213 #ifdef CONFIG_MIPS_FP_SUPPORT
1214 case FPR_BASE
... FPR_BASE
+ 31: {
1215 union fpureg
*fregs
= get_fpu_regs(child
);
1219 if (test_tsk_thread_flag(child
, TIF_32BIT_FPREGS
)) {
1221 * The odd registers are actually the high
1222 * order bits of the values stored in the even
1225 set_fpr32(&fregs
[(addr
& ~1) - FPR_BASE
],
1230 set_fpr64(&fregs
[addr
- FPR_BASE
], 0, data
);
1235 ptrace_setfcr31(child
, data
);
1239 regs
->cp0_epc
= data
;
1247 #ifdef CONFIG_CPU_HAS_SMARTMIPS
1252 case DSP_BASE
... DSP_BASE
+ 5: {
1260 dregs
= __get_dsp_regs(child
);
1261 dregs
[addr
- DSP_BASE
] = data
;
1269 child
->thread
.dsp
.dspcontrol
= data
;
1272 /* The rest are not allowed. */
1279 case PTRACE_GETREGS
:
1280 ret
= ptrace_getregs(child
, datavp
);
1283 case PTRACE_SETREGS
:
1284 ret
= ptrace_setregs(child
, datavp
);
1287 #ifdef CONFIG_MIPS_FP_SUPPORT
1288 case PTRACE_GETFPREGS
:
1289 ret
= ptrace_getfpregs(child
, datavp
);
1292 case PTRACE_SETFPREGS
:
1293 ret
= ptrace_setfpregs(child
, datavp
);
1296 case PTRACE_GET_THREAD_AREA
:
1297 ret
= put_user(task_thread_info(child
)->tp_value
, datalp
);
1300 case PTRACE_GET_WATCH_REGS
:
1301 ret
= ptrace_get_watch_regs(child
, addrp
);
1304 case PTRACE_SET_WATCH_REGS
:
1305 ret
= ptrace_set_watch_regs(child
, addrp
);
1309 ret
= ptrace_request(child
, request
, addr
, data
);
1317 * Notification of system call entry/exit
1318 * - triggered by current->work.syscall_trace
1320 asmlinkage
long syscall_trace_enter(struct pt_regs
*regs
)
1324 if (test_thread_flag(TIF_SYSCALL_TRACE
)) {
1325 if (ptrace_report_syscall_entry(regs
))
1329 #ifdef CONFIG_SECCOMP
1330 if (unlikely(test_thread_flag(TIF_SECCOMP
))) {
1332 struct seccomp_data sd
;
1333 unsigned long args
[6];
1335 sd
.nr
= current_thread_info()->syscall
;
1336 sd
.arch
= syscall_get_arch(current
);
1337 syscall_get_arguments(current
, regs
, args
);
1338 for (i
= 0; i
< 6; i
++)
1339 sd
.args
[i
] = args
[i
];
1340 sd
.instruction_pointer
= KSTK_EIP(current
);
1342 ret
= __secure_computing(&sd
);
1348 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
1349 trace_sys_enter(regs
, regs
->regs
[2]);
1351 audit_syscall_entry(current_thread_info()->syscall
,
1352 regs
->regs
[4], regs
->regs
[5],
1353 regs
->regs
[6], regs
->regs
[7]);
1356 * Negative syscall numbers are mistaken for rejected syscalls, but
1357 * won't have had the return value set appropriately, so we do so now.
1359 if (current_thread_info()->syscall
< 0)
1360 syscall_set_return_value(current
, regs
, -ENOSYS
, 0);
1361 return current_thread_info()->syscall
;
1365 * Notification of system call entry/exit
1366 * - triggered by current->work.syscall_trace
1368 asmlinkage
void syscall_trace_leave(struct pt_regs
*regs
)
1371 * We may come here right after calling schedule_user()
1372 * or do_notify_resume(), in which case we can be in RCU
1377 audit_syscall_exit(regs
);
1379 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
1380 trace_sys_exit(regs
, regs_return_value(regs
));
1382 if (test_thread_flag(TIF_SYSCALL_TRACE
))
1383 ptrace_report_syscall_exit(regs
, 0);