2 * Ptrace user space interface.
4 * Copyright IBM Corp. 1999, 2010
5 * Author(s): Denis Joseph Barrow
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/sched/task_stack.h>
13 #include <linux/smp.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/signal.h>
20 #include <linux/elf.h>
21 #include <linux/regset.h>
22 #include <linux/tracehook.h>
23 #include <linux/seccomp.h>
24 #include <linux/compat.h>
25 #include <trace/syscall.h>
26 #include <asm/segment.h>
28 #include <asm/pgtable.h>
29 #include <asm/pgalloc.h>
30 #include <linux/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/switch_to.h>
36 #include "compat_ptrace.h"
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/syscalls.h>
42 void update_cr_regs(struct task_struct
*task
)
44 struct pt_regs
*regs
= task_pt_regs(task
);
45 struct thread_struct
*thread
= &task
->thread
;
46 struct per_regs old
, new;
47 unsigned long cr0_old
, cr0_new
;
48 unsigned long cr2_old
, cr2_new
;
49 int cr0_changed
, cr2_changed
;
51 __ctl_store(cr0_old
, 0, 0);
52 __ctl_store(cr2_old
, 2, 2);
55 /* Take care of the enable/disable of transactional execution. */
57 /* Set or clear transaction execution TXC bit 8. */
58 cr0_new
|= (1UL << 55);
59 if (task
->thread
.per_flags
& PER_FLAG_NO_TE
)
60 cr0_new
&= ~(1UL << 55);
61 /* Set or clear transaction execution TDC bits 62 and 63. */
63 if (task
->thread
.per_flags
& PER_FLAG_TE_ABORT_RAND
) {
64 if (task
->thread
.per_flags
& PER_FLAG_TE_ABORT_RAND_TEND
)
70 /* Take care of enable/disable of guarded storage. */
72 cr2_new
&= ~(1UL << 4);
73 if (task
->thread
.gs_cb
)
74 cr2_new
|= (1UL << 4);
76 /* Load control register 0/2 iff changed */
77 cr0_changed
= cr0_new
!= cr0_old
;
78 cr2_changed
= cr2_new
!= cr2_old
;
80 __ctl_load(cr0_new
, 0, 0);
82 __ctl_load(cr2_new
, 2, 2);
83 /* Copy user specified PER registers */
84 new.control
= thread
->per_user
.control
;
85 new.start
= thread
->per_user
.start
;
86 new.end
= thread
->per_user
.end
;
88 /* merge TIF_SINGLE_STEP into user specified PER registers. */
89 if (test_tsk_thread_flag(task
, TIF_SINGLE_STEP
) ||
90 test_tsk_thread_flag(task
, TIF_UPROBE_SINGLESTEP
)) {
91 if (test_tsk_thread_flag(task
, TIF_BLOCK_STEP
))
92 new.control
|= PER_EVENT_BRANCH
;
94 new.control
|= PER_EVENT_IFETCH
;
95 new.control
|= PER_CONTROL_SUSPENSION
;
96 new.control
|= PER_EVENT_TRANSACTION_END
;
97 if (test_tsk_thread_flag(task
, TIF_UPROBE_SINGLESTEP
))
98 new.control
|= PER_EVENT_IFETCH
;
103 /* Take care of the PER enablement bit in the PSW. */
104 if (!(new.control
& PER_EVENT_MASK
)) {
105 regs
->psw
.mask
&= ~PSW_MASK_PER
;
108 regs
->psw
.mask
|= PSW_MASK_PER
;
109 __ctl_store(old
, 9, 11);
110 if (memcmp(&new, &old
, sizeof(struct per_regs
)) != 0)
111 __ctl_load(new, 9, 11);
114 void user_enable_single_step(struct task_struct
*task
)
116 clear_tsk_thread_flag(task
, TIF_BLOCK_STEP
);
117 set_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
120 void user_disable_single_step(struct task_struct
*task
)
122 clear_tsk_thread_flag(task
, TIF_BLOCK_STEP
);
123 clear_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
126 void user_enable_block_step(struct task_struct
*task
)
128 set_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
129 set_tsk_thread_flag(task
, TIF_BLOCK_STEP
);
133 * Called by kernel/ptrace.c when detaching..
135 * Clear all debugging related fields.
137 void ptrace_disable(struct task_struct
*task
)
139 memset(&task
->thread
.per_user
, 0, sizeof(task
->thread
.per_user
));
140 memset(&task
->thread
.per_event
, 0, sizeof(task
->thread
.per_event
));
141 clear_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
142 clear_pt_regs_flag(task_pt_regs(task
), PIF_PER_TRAP
);
143 task
->thread
.per_flags
= 0;
146 #define __ADDR_MASK 7
148 static inline unsigned long __peek_user_per(struct task_struct
*child
,
151 struct per_struct_kernel
*dummy
= NULL
;
153 if (addr
== (addr_t
) &dummy
->cr9
)
154 /* Control bits of the active per set. */
155 return test_thread_flag(TIF_SINGLE_STEP
) ?
156 PER_EVENT_IFETCH
: child
->thread
.per_user
.control
;
157 else if (addr
== (addr_t
) &dummy
->cr10
)
158 /* Start address of the active per set. */
159 return test_thread_flag(TIF_SINGLE_STEP
) ?
160 0 : child
->thread
.per_user
.start
;
161 else if (addr
== (addr_t
) &dummy
->cr11
)
162 /* End address of the active per set. */
163 return test_thread_flag(TIF_SINGLE_STEP
) ?
164 -1UL : child
->thread
.per_user
.end
;
165 else if (addr
== (addr_t
) &dummy
->bits
)
166 /* Single-step bit. */
167 return test_thread_flag(TIF_SINGLE_STEP
) ?
168 (1UL << (BITS_PER_LONG
- 1)) : 0;
169 else if (addr
== (addr_t
) &dummy
->starting_addr
)
170 /* Start address of the user specified per set. */
171 return child
->thread
.per_user
.start
;
172 else if (addr
== (addr_t
) &dummy
->ending_addr
)
173 /* End address of the user specified per set. */
174 return child
->thread
.per_user
.end
;
175 else if (addr
== (addr_t
) &dummy
->perc_atmid
)
176 /* PER code, ATMID and AI of the last PER trap */
177 return (unsigned long)
178 child
->thread
.per_event
.cause
<< (BITS_PER_LONG
- 16);
179 else if (addr
== (addr_t
) &dummy
->address
)
180 /* Address of the last PER trap */
181 return child
->thread
.per_event
.address
;
182 else if (addr
== (addr_t
) &dummy
->access_id
)
183 /* Access id of the last PER trap */
184 return (unsigned long)
185 child
->thread
.per_event
.paid
<< (BITS_PER_LONG
- 8);
190 * Read the word at offset addr from the user area of a process. The
191 * trouble here is that the information is littered over different
192 * locations. The process registers are found on the kernel stack,
193 * the floating point stuff and the trace settings are stored in
194 * the task structure. In addition the different structures in
195 * struct user contain pad bytes that should be read as zeroes.
198 static unsigned long __peek_user(struct task_struct
*child
, addr_t addr
)
200 struct user
*dummy
= NULL
;
203 if (addr
< (addr_t
) &dummy
->regs
.acrs
) {
205 * psw and gprs are stored on the stack
207 tmp
= *(addr_t
*)((addr_t
) &task_pt_regs(child
)->psw
+ addr
);
208 if (addr
== (addr_t
) &dummy
->regs
.psw
.mask
) {
209 /* Return a clean psw mask. */
210 tmp
&= PSW_MASK_USER
| PSW_MASK_RI
;
211 tmp
|= PSW_USER_BITS
;
214 } else if (addr
< (addr_t
) &dummy
->regs
.orig_gpr2
) {
216 * access registers are stored in the thread structure
218 offset
= addr
- (addr_t
) &dummy
->regs
.acrs
;
220 * Very special case: old & broken 64 bit gdb reading
221 * from acrs[15]. Result is a 64 bit value. Read the
222 * 32 bit acrs[15] value and shift it by 32. Sick...
224 if (addr
== (addr_t
) &dummy
->regs
.acrs
[15])
225 tmp
= ((unsigned long) child
->thread
.acrs
[15]) << 32;
227 tmp
= *(addr_t
*)((addr_t
) &child
->thread
.acrs
+ offset
);
229 } else if (addr
== (addr_t
) &dummy
->regs
.orig_gpr2
) {
231 * orig_gpr2 is stored on the kernel stack
233 tmp
= (addr_t
) task_pt_regs(child
)->orig_gpr2
;
235 } else if (addr
< (addr_t
) &dummy
->regs
.fp_regs
) {
237 * prevent reads of padding hole between
238 * orig_gpr2 and fp_regs on s390.
242 } else if (addr
== (addr_t
) &dummy
->regs
.fp_regs
.fpc
) {
244 * floating point control reg. is in the thread structure
246 tmp
= child
->thread
.fpu
.fpc
;
247 tmp
<<= BITS_PER_LONG
- 32;
249 } else if (addr
< (addr_t
) (&dummy
->regs
.fp_regs
+ 1)) {
251 * floating point regs. are either in child->thread.fpu
252 * or the child->thread.fpu.vxrs array
254 offset
= addr
- (addr_t
) &dummy
->regs
.fp_regs
.fprs
;
257 ((addr_t
) child
->thread
.fpu
.vxrs
+ 2*offset
);
260 ((addr_t
) child
->thread
.fpu
.fprs
+ offset
);
262 } else if (addr
< (addr_t
) (&dummy
->regs
.per_info
+ 1)) {
264 * Handle access to the per_info structure.
266 addr
-= (addr_t
) &dummy
->regs
.per_info
;
267 tmp
= __peek_user_per(child
, addr
);
276 peek_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
281 * Stupid gdb peeks/pokes the access registers in 64 bit with
282 * an alignment of 4. Programmers from hell...
285 if (addr
>= (addr_t
) &((struct user
*) NULL
)->regs
.acrs
&&
286 addr
< (addr_t
) &((struct user
*) NULL
)->regs
.orig_gpr2
)
288 if ((addr
& mask
) || addr
> sizeof(struct user
) - __ADDR_MASK
)
291 tmp
= __peek_user(child
, addr
);
292 return put_user(tmp
, (addr_t __user
*) data
);
295 static inline void __poke_user_per(struct task_struct
*child
,
296 addr_t addr
, addr_t data
)
298 struct per_struct_kernel
*dummy
= NULL
;
301 * There are only three fields in the per_info struct that the
302 * debugger user can write to.
303 * 1) cr9: the debugger wants to set a new PER event mask
304 * 2) starting_addr: the debugger wants to set a new starting
305 * address to use with the PER event mask.
306 * 3) ending_addr: the debugger wants to set a new ending
307 * address to use with the PER event mask.
308 * The user specified PER event mask and the start and end
309 * addresses are used only if single stepping is not in effect.
310 * Writes to any other field in per_info are ignored.
312 if (addr
== (addr_t
) &dummy
->cr9
)
313 /* PER event mask of the user specified per set. */
314 child
->thread
.per_user
.control
=
315 data
& (PER_EVENT_MASK
| PER_CONTROL_MASK
);
316 else if (addr
== (addr_t
) &dummy
->starting_addr
)
317 /* Starting address of the user specified per set. */
318 child
->thread
.per_user
.start
= data
;
319 else if (addr
== (addr_t
) &dummy
->ending_addr
)
320 /* Ending address of the user specified per set. */
321 child
->thread
.per_user
.end
= data
;
325 * Write a word to the user area of a process at location addr. This
326 * operation does have an additional problem compared to peek_user.
327 * Stores to the program status word and on the floating point
328 * control register needs to get checked for validity.
330 static int __poke_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
332 struct user
*dummy
= NULL
;
335 if (addr
< (addr_t
) &dummy
->regs
.acrs
) {
337 * psw and gprs are stored on the stack
339 if (addr
== (addr_t
) &dummy
->regs
.psw
.mask
) {
340 unsigned long mask
= PSW_MASK_USER
;
342 mask
|= is_ri_task(child
) ? PSW_MASK_RI
: 0;
343 if ((data
^ PSW_USER_BITS
) & ~mask
)
344 /* Invalid psw mask. */
346 if ((data
& PSW_MASK_ASC
) == PSW_ASC_HOME
)
347 /* Invalid address-space-control bits */
349 if ((data
& PSW_MASK_EA
) && !(data
& PSW_MASK_BA
))
350 /* Invalid addressing mode bits */
353 *(addr_t
*)((addr_t
) &task_pt_regs(child
)->psw
+ addr
) = data
;
355 } else if (addr
< (addr_t
) (&dummy
->regs
.orig_gpr2
)) {
357 * access registers are stored in the thread structure
359 offset
= addr
- (addr_t
) &dummy
->regs
.acrs
;
361 * Very special case: old & broken 64 bit gdb writing
362 * to acrs[15] with a 64 bit value. Ignore the lower
363 * half of the value and write the upper 32 bit to
366 if (addr
== (addr_t
) &dummy
->regs
.acrs
[15])
367 child
->thread
.acrs
[15] = (unsigned int) (data
>> 32);
369 *(addr_t
*)((addr_t
) &child
->thread
.acrs
+ offset
) = data
;
371 } else if (addr
== (addr_t
) &dummy
->regs
.orig_gpr2
) {
373 * orig_gpr2 is stored on the kernel stack
375 task_pt_regs(child
)->orig_gpr2
= data
;
377 } else if (addr
< (addr_t
) &dummy
->regs
.fp_regs
) {
379 * prevent writes of padding hole between
380 * orig_gpr2 and fp_regs on s390.
384 } else if (addr
== (addr_t
) &dummy
->regs
.fp_regs
.fpc
) {
386 * floating point control reg. is in the thread structure
388 if ((unsigned int) data
!= 0 ||
389 test_fp_ctl(data
>> (BITS_PER_LONG
- 32)))
391 child
->thread
.fpu
.fpc
= data
>> (BITS_PER_LONG
- 32);
393 } else if (addr
< (addr_t
) (&dummy
->regs
.fp_regs
+ 1)) {
395 * floating point regs. are either in child->thread.fpu
396 * or the child->thread.fpu.vxrs array
398 offset
= addr
- (addr_t
) &dummy
->regs
.fp_regs
.fprs
;
401 child
->thread
.fpu
.vxrs
+ 2*offset
) = data
;
404 child
->thread
.fpu
.fprs
+ offset
) = data
;
406 } else if (addr
< (addr_t
) (&dummy
->regs
.per_info
+ 1)) {
408 * Handle access to the per_info structure.
410 addr
-= (addr_t
) &dummy
->regs
.per_info
;
411 __poke_user_per(child
, addr
, data
);
418 static int poke_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
423 * Stupid gdb peeks/pokes the access registers in 64 bit with
424 * an alignment of 4. Programmers from hell indeed...
427 if (addr
>= (addr_t
) &((struct user
*) NULL
)->regs
.acrs
&&
428 addr
< (addr_t
) &((struct user
*) NULL
)->regs
.orig_gpr2
)
430 if ((addr
& mask
) || addr
> sizeof(struct user
) - __ADDR_MASK
)
433 return __poke_user(child
, addr
, data
);
436 long arch_ptrace(struct task_struct
*child
, long request
,
437 unsigned long addr
, unsigned long data
)
444 /* read the word at location addr in the USER area. */
445 return peek_user(child
, addr
, data
);
448 /* write the word at location addr in the USER area */
449 return poke_user(child
, addr
, data
);
451 case PTRACE_PEEKUSR_AREA
:
452 case PTRACE_POKEUSR_AREA
:
453 if (copy_from_user(&parea
, (void __force __user
*) addr
,
456 addr
= parea
.kernel_addr
;
457 data
= parea
.process_addr
;
459 while (copied
< parea
.len
) {
460 if (request
== PTRACE_PEEKUSR_AREA
)
461 ret
= peek_user(child
, addr
, data
);
465 (addr_t __force __user
*) data
))
467 ret
= poke_user(child
, addr
, utmp
);
471 addr
+= sizeof(unsigned long);
472 data
+= sizeof(unsigned long);
473 copied
+= sizeof(unsigned long);
476 case PTRACE_GET_LAST_BREAK
:
477 put_user(child
->thread
.last_break
,
478 (unsigned long __user
*) data
);
480 case PTRACE_ENABLE_TE
:
483 child
->thread
.per_flags
&= ~PER_FLAG_NO_TE
;
485 case PTRACE_DISABLE_TE
:
488 child
->thread
.per_flags
|= PER_FLAG_NO_TE
;
489 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND
;
491 case PTRACE_TE_ABORT_RAND
:
492 if (!MACHINE_HAS_TE
|| (child
->thread
.per_flags
& PER_FLAG_NO_TE
))
496 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND
;
499 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND
;
500 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND_TEND
;
503 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND
;
504 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND_TEND
;
511 return ptrace_request(child
, request
, addr
, data
);
517 * Now the fun part starts... a 31 bit program running in the
518 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
519 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
520 * to handle, the difference to the 64 bit versions of the requests
521 * is that the access is done in multiples of 4 byte instead of
522 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
523 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
524 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
525 * is a 31 bit program too, the content of struct user can be
526 * emulated. A 31 bit program peeking into the struct user of
527 * a 64 bit program is a no-no.
531 * Same as peek_user_per but for a 31 bit program.
533 static inline __u32
__peek_user_per_compat(struct task_struct
*child
,
536 struct compat_per_struct_kernel
*dummy32
= NULL
;
538 if (addr
== (addr_t
) &dummy32
->cr9
)
539 /* Control bits of the active per set. */
540 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
541 PER_EVENT_IFETCH
: child
->thread
.per_user
.control
;
542 else if (addr
== (addr_t
) &dummy32
->cr10
)
543 /* Start address of the active per set. */
544 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
545 0 : child
->thread
.per_user
.start
;
546 else if (addr
== (addr_t
) &dummy32
->cr11
)
547 /* End address of the active per set. */
548 return test_thread_flag(TIF_SINGLE_STEP
) ?
549 PSW32_ADDR_INSN
: child
->thread
.per_user
.end
;
550 else if (addr
== (addr_t
) &dummy32
->bits
)
551 /* Single-step bit. */
552 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
554 else if (addr
== (addr_t
) &dummy32
->starting_addr
)
555 /* Start address of the user specified per set. */
556 return (__u32
) child
->thread
.per_user
.start
;
557 else if (addr
== (addr_t
) &dummy32
->ending_addr
)
558 /* End address of the user specified per set. */
559 return (__u32
) child
->thread
.per_user
.end
;
560 else if (addr
== (addr_t
) &dummy32
->perc_atmid
)
561 /* PER code, ATMID and AI of the last PER trap */
562 return (__u32
) child
->thread
.per_event
.cause
<< 16;
563 else if (addr
== (addr_t
) &dummy32
->address
)
564 /* Address of the last PER trap */
565 return (__u32
) child
->thread
.per_event
.address
;
566 else if (addr
== (addr_t
) &dummy32
->access_id
)
567 /* Access id of the last PER trap */
568 return (__u32
) child
->thread
.per_event
.paid
<< 24;
573 * Same as peek_user but for a 31 bit program.
575 static u32
__peek_user_compat(struct task_struct
*child
, addr_t addr
)
577 struct compat_user
*dummy32
= NULL
;
581 if (addr
< (addr_t
) &dummy32
->regs
.acrs
) {
582 struct pt_regs
*regs
= task_pt_regs(child
);
584 * psw and gprs are stored on the stack
586 if (addr
== (addr_t
) &dummy32
->regs
.psw
.mask
) {
587 /* Fake a 31 bit psw mask. */
588 tmp
= (__u32
)(regs
->psw
.mask
>> 32);
589 tmp
&= PSW32_MASK_USER
| PSW32_MASK_RI
;
590 tmp
|= PSW32_USER_BITS
;
591 } else if (addr
== (addr_t
) &dummy32
->regs
.psw
.addr
) {
592 /* Fake a 31 bit psw address. */
593 tmp
= (__u32
) regs
->psw
.addr
|
594 (__u32
)(regs
->psw
.mask
& PSW_MASK_BA
);
597 tmp
= *(__u32
*)((addr_t
) ®s
->psw
+ addr
*2 + 4);
599 } else if (addr
< (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
601 * access registers are stored in the thread structure
603 offset
= addr
- (addr_t
) &dummy32
->regs
.acrs
;
604 tmp
= *(__u32
*)((addr_t
) &child
->thread
.acrs
+ offset
);
606 } else if (addr
== (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
608 * orig_gpr2 is stored on the kernel stack
610 tmp
= *(__u32
*)((addr_t
) &task_pt_regs(child
)->orig_gpr2
+ 4);
612 } else if (addr
< (addr_t
) &dummy32
->regs
.fp_regs
) {
614 * prevent reads of padding hole between
615 * orig_gpr2 and fp_regs on s390.
619 } else if (addr
== (addr_t
) &dummy32
->regs
.fp_regs
.fpc
) {
621 * floating point control reg. is in the thread structure
623 tmp
= child
->thread
.fpu
.fpc
;
625 } else if (addr
< (addr_t
) (&dummy32
->regs
.fp_regs
+ 1)) {
627 * floating point regs. are either in child->thread.fpu
628 * or the child->thread.fpu.vxrs array
630 offset
= addr
- (addr_t
) &dummy32
->regs
.fp_regs
.fprs
;
633 ((addr_t
) child
->thread
.fpu
.vxrs
+ 2*offset
);
636 ((addr_t
) child
->thread
.fpu
.fprs
+ offset
);
638 } else if (addr
< (addr_t
) (&dummy32
->regs
.per_info
+ 1)) {
640 * Handle access to the per_info structure.
642 addr
-= (addr_t
) &dummy32
->regs
.per_info
;
643 tmp
= __peek_user_per_compat(child
, addr
);
651 static int peek_user_compat(struct task_struct
*child
,
652 addr_t addr
, addr_t data
)
656 if (!is_compat_task() || (addr
& 3) || addr
> sizeof(struct user
) - 3)
659 tmp
= __peek_user_compat(child
, addr
);
660 return put_user(tmp
, (__u32 __user
*) data
);
664 * Same as poke_user_per but for a 31 bit program.
666 static inline void __poke_user_per_compat(struct task_struct
*child
,
667 addr_t addr
, __u32 data
)
669 struct compat_per_struct_kernel
*dummy32
= NULL
;
671 if (addr
== (addr_t
) &dummy32
->cr9
)
672 /* PER event mask of the user specified per set. */
673 child
->thread
.per_user
.control
=
674 data
& (PER_EVENT_MASK
| PER_CONTROL_MASK
);
675 else if (addr
== (addr_t
) &dummy32
->starting_addr
)
676 /* Starting address of the user specified per set. */
677 child
->thread
.per_user
.start
= data
;
678 else if (addr
== (addr_t
) &dummy32
->ending_addr
)
679 /* Ending address of the user specified per set. */
680 child
->thread
.per_user
.end
= data
;
684 * Same as poke_user but for a 31 bit program.
686 static int __poke_user_compat(struct task_struct
*child
,
687 addr_t addr
, addr_t data
)
689 struct compat_user
*dummy32
= NULL
;
690 __u32 tmp
= (__u32
) data
;
693 if (addr
< (addr_t
) &dummy32
->regs
.acrs
) {
694 struct pt_regs
*regs
= task_pt_regs(child
);
696 * psw, gprs, acrs and orig_gpr2 are stored on the stack
698 if (addr
== (addr_t
) &dummy32
->regs
.psw
.mask
) {
699 __u32 mask
= PSW32_MASK_USER
;
701 mask
|= is_ri_task(child
) ? PSW32_MASK_RI
: 0;
702 /* Build a 64 bit psw mask from 31 bit mask. */
703 if ((tmp
^ PSW32_USER_BITS
) & ~mask
)
704 /* Invalid psw mask. */
706 if ((data
& PSW32_MASK_ASC
) == PSW32_ASC_HOME
)
707 /* Invalid address-space-control bits */
709 regs
->psw
.mask
= (regs
->psw
.mask
& ~PSW_MASK_USER
) |
710 (regs
->psw
.mask
& PSW_MASK_BA
) |
711 (__u64
)(tmp
& mask
) << 32;
712 } else if (addr
== (addr_t
) &dummy32
->regs
.psw
.addr
) {
713 /* Build a 64 bit psw address from 31 bit address. */
714 regs
->psw
.addr
= (__u64
) tmp
& PSW32_ADDR_INSN
;
715 /* Transfer 31 bit amode bit to psw mask. */
716 regs
->psw
.mask
= (regs
->psw
.mask
& ~PSW_MASK_BA
) |
717 (__u64
)(tmp
& PSW32_ADDR_AMODE
);
720 *(__u32
*)((addr_t
) ®s
->psw
+ addr
*2 + 4) = tmp
;
722 } else if (addr
< (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
724 * access registers are stored in the thread structure
726 offset
= addr
- (addr_t
) &dummy32
->regs
.acrs
;
727 *(__u32
*)((addr_t
) &child
->thread
.acrs
+ offset
) = tmp
;
729 } else if (addr
== (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
731 * orig_gpr2 is stored on the kernel stack
733 *(__u32
*)((addr_t
) &task_pt_regs(child
)->orig_gpr2
+ 4) = tmp
;
735 } else if (addr
< (addr_t
) &dummy32
->regs
.fp_regs
) {
737 * prevent writess of padding hole between
738 * orig_gpr2 and fp_regs on s390.
742 } else if (addr
== (addr_t
) &dummy32
->regs
.fp_regs
.fpc
) {
744 * floating point control reg. is in the thread structure
746 if (test_fp_ctl(tmp
))
748 child
->thread
.fpu
.fpc
= data
;
750 } else if (addr
< (addr_t
) (&dummy32
->regs
.fp_regs
+ 1)) {
752 * floating point regs. are either in child->thread.fpu
753 * or the child->thread.fpu.vxrs array
755 offset
= addr
- (addr_t
) &dummy32
->regs
.fp_regs
.fprs
;
758 child
->thread
.fpu
.vxrs
+ 2*offset
) = tmp
;
761 child
->thread
.fpu
.fprs
+ offset
) = tmp
;
763 } else if (addr
< (addr_t
) (&dummy32
->regs
.per_info
+ 1)) {
765 * Handle access to the per_info structure.
767 addr
-= (addr_t
) &dummy32
->regs
.per_info
;
768 __poke_user_per_compat(child
, addr
, data
);
774 static int poke_user_compat(struct task_struct
*child
,
775 addr_t addr
, addr_t data
)
777 if (!is_compat_task() || (addr
& 3) ||
778 addr
> sizeof(struct compat_user
) - 3)
781 return __poke_user_compat(child
, addr
, data
);
784 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
785 compat_ulong_t caddr
, compat_ulong_t cdata
)
787 unsigned long addr
= caddr
;
788 unsigned long data
= cdata
;
789 compat_ptrace_area parea
;
794 /* read the word at location addr in the USER area. */
795 return peek_user_compat(child
, addr
, data
);
798 /* write the word at location addr in the USER area */
799 return poke_user_compat(child
, addr
, data
);
801 case PTRACE_PEEKUSR_AREA
:
802 case PTRACE_POKEUSR_AREA
:
803 if (copy_from_user(&parea
, (void __force __user
*) addr
,
806 addr
= parea
.kernel_addr
;
807 data
= parea
.process_addr
;
809 while (copied
< parea
.len
) {
810 if (request
== PTRACE_PEEKUSR_AREA
)
811 ret
= peek_user_compat(child
, addr
, data
);
815 (__u32 __force __user
*) data
))
817 ret
= poke_user_compat(child
, addr
, utmp
);
821 addr
+= sizeof(unsigned int);
822 data
+= sizeof(unsigned int);
823 copied
+= sizeof(unsigned int);
826 case PTRACE_GET_LAST_BREAK
:
827 put_user(child
->thread
.last_break
,
828 (unsigned int __user
*) data
);
831 return compat_ptrace_request(child
, request
, addr
, data
);
835 asmlinkage
long do_syscall_trace_enter(struct pt_regs
*regs
)
837 unsigned long mask
= -1UL;
840 * The sysc_tracesys code in entry.S stored the system
841 * call number to gprs[2].
843 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
844 (tracehook_report_syscall_entry(regs
) ||
845 regs
->gprs
[2] >= NR_syscalls
)) {
847 * Tracing decided this syscall should not happen or the
848 * debugger stored an invalid system call number. Skip
849 * the system call and the system call restart handling.
851 clear_pt_regs_flag(regs
, PIF_SYSCALL
);
855 /* Do the secure computing check after ptrace. */
856 if (secure_computing(NULL
)) {
857 /* seccomp failures shouldn't expose any additional code. */
861 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
862 trace_sys_enter(regs
, regs
->gprs
[2]);
864 if (is_compat_task())
867 audit_syscall_entry(regs
->gprs
[2], regs
->orig_gpr2
& mask
,
868 regs
->gprs
[3] &mask
, regs
->gprs
[4] &mask
,
869 regs
->gprs
[5] &mask
);
871 return regs
->gprs
[2];
874 asmlinkage
void do_syscall_trace_exit(struct pt_regs
*regs
)
876 audit_syscall_exit(regs
);
878 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
879 trace_sys_exit(regs
, regs
->gprs
[2]);
881 if (test_thread_flag(TIF_SYSCALL_TRACE
))
882 tracehook_report_syscall_exit(regs
, 0);
886 * user_regset definitions.
889 static int s390_regs_get(struct task_struct
*target
,
890 const struct user_regset
*regset
,
891 unsigned int pos
, unsigned int count
,
892 void *kbuf
, void __user
*ubuf
)
894 if (target
== current
)
895 save_access_regs(target
->thread
.acrs
);
898 unsigned long *k
= kbuf
;
900 *k
++ = __peek_user(target
, pos
);
905 unsigned long __user
*u
= ubuf
;
907 if (__put_user(__peek_user(target
, pos
), u
++))
916 static int s390_regs_set(struct task_struct
*target
,
917 const struct user_regset
*regset
,
918 unsigned int pos
, unsigned int count
,
919 const void *kbuf
, const void __user
*ubuf
)
923 if (target
== current
)
924 save_access_regs(target
->thread
.acrs
);
927 const unsigned long *k
= kbuf
;
928 while (count
> 0 && !rc
) {
929 rc
= __poke_user(target
, pos
, *k
++);
934 const unsigned long __user
*u
= ubuf
;
935 while (count
> 0 && !rc
) {
937 rc
= __get_user(word
, u
++);
940 rc
= __poke_user(target
, pos
, word
);
946 if (rc
== 0 && target
== current
)
947 restore_access_regs(target
->thread
.acrs
);
952 static int s390_fpregs_get(struct task_struct
*target
,
953 const struct user_regset
*regset
, unsigned int pos
,
954 unsigned int count
, void *kbuf
, void __user
*ubuf
)
956 _s390_fp_regs fp_regs
;
958 if (target
== current
)
961 fp_regs
.fpc
= target
->thread
.fpu
.fpc
;
962 fpregs_store(&fp_regs
, &target
->thread
.fpu
);
964 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
968 static int s390_fpregs_set(struct task_struct
*target
,
969 const struct user_regset
*regset
, unsigned int pos
,
970 unsigned int count
, const void *kbuf
,
971 const void __user
*ubuf
)
974 freg_t fprs
[__NUM_FPRS
];
976 if (target
== current
)
980 convert_vx_to_fp(fprs
, target
->thread
.fpu
.vxrs
);
982 memcpy(&fprs
, target
->thread
.fpu
.fprs
, sizeof(fprs
));
984 /* If setting FPC, must validate it first. */
985 if (count
> 0 && pos
< offsetof(s390_fp_regs
, fprs
)) {
986 u32 ufpc
[2] = { target
->thread
.fpu
.fpc
, 0 };
987 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ufpc
,
988 0, offsetof(s390_fp_regs
, fprs
));
991 if (ufpc
[1] != 0 || test_fp_ctl(ufpc
[0]))
993 target
->thread
.fpu
.fpc
= ufpc
[0];
996 if (rc
== 0 && count
> 0)
997 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
998 fprs
, offsetof(s390_fp_regs
, fprs
), -1);
1003 convert_fp_to_vx(target
->thread
.fpu
.vxrs
, fprs
);
1005 memcpy(target
->thread
.fpu
.fprs
, &fprs
, sizeof(fprs
));
1010 static int s390_last_break_get(struct task_struct
*target
,
1011 const struct user_regset
*regset
,
1012 unsigned int pos
, unsigned int count
,
1013 void *kbuf
, void __user
*ubuf
)
1017 unsigned long *k
= kbuf
;
1018 *k
= target
->thread
.last_break
;
1020 unsigned long __user
*u
= ubuf
;
1021 if (__put_user(target
->thread
.last_break
, u
))
1028 static int s390_last_break_set(struct task_struct
*target
,
1029 const struct user_regset
*regset
,
1030 unsigned int pos
, unsigned int count
,
1031 const void *kbuf
, const void __user
*ubuf
)
1036 static int s390_tdb_get(struct task_struct
*target
,
1037 const struct user_regset
*regset
,
1038 unsigned int pos
, unsigned int count
,
1039 void *kbuf
, void __user
*ubuf
)
1041 struct pt_regs
*regs
= task_pt_regs(target
);
1042 unsigned char *data
;
1044 if (!(regs
->int_code
& 0x200))
1046 data
= target
->thread
.trap_tdb
;
1047 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, data
, 0, 256);
1050 static int s390_tdb_set(struct task_struct
*target
,
1051 const struct user_regset
*regset
,
1052 unsigned int pos
, unsigned int count
,
1053 const void *kbuf
, const void __user
*ubuf
)
1058 static int s390_vxrs_low_get(struct task_struct
*target
,
1059 const struct user_regset
*regset
,
1060 unsigned int pos
, unsigned int count
,
1061 void *kbuf
, void __user
*ubuf
)
1063 __u64 vxrs
[__NUM_VXRS_LOW
];
1066 if (!MACHINE_HAS_VX
)
1068 if (target
== current
)
1070 for (i
= 0; i
< __NUM_VXRS_LOW
; i
++)
1071 vxrs
[i
] = *((__u64
*)(target
->thread
.fpu
.vxrs
+ i
) + 1);
1072 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, vxrs
, 0, -1);
1075 static int s390_vxrs_low_set(struct task_struct
*target
,
1076 const struct user_regset
*regset
,
1077 unsigned int pos
, unsigned int count
,
1078 const void *kbuf
, const void __user
*ubuf
)
1080 __u64 vxrs
[__NUM_VXRS_LOW
];
1083 if (!MACHINE_HAS_VX
)
1085 if (target
== current
)
1088 for (i
= 0; i
< __NUM_VXRS_LOW
; i
++)
1089 vxrs
[i
] = *((__u64
*)(target
->thread
.fpu
.vxrs
+ i
) + 1);
1091 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, vxrs
, 0, -1);
1093 for (i
= 0; i
< __NUM_VXRS_LOW
; i
++)
1094 *((__u64
*)(target
->thread
.fpu
.vxrs
+ i
) + 1) = vxrs
[i
];
1099 static int s390_vxrs_high_get(struct task_struct
*target
,
1100 const struct user_regset
*regset
,
1101 unsigned int pos
, unsigned int count
,
1102 void *kbuf
, void __user
*ubuf
)
1104 __vector128 vxrs
[__NUM_VXRS_HIGH
];
1106 if (!MACHINE_HAS_VX
)
1108 if (target
== current
)
1110 memcpy(vxrs
, target
->thread
.fpu
.vxrs
+ __NUM_VXRS_LOW
, sizeof(vxrs
));
1112 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, vxrs
, 0, -1);
1115 static int s390_vxrs_high_set(struct task_struct
*target
,
1116 const struct user_regset
*regset
,
1117 unsigned int pos
, unsigned int count
,
1118 const void *kbuf
, const void __user
*ubuf
)
1122 if (!MACHINE_HAS_VX
)
1124 if (target
== current
)
1127 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1128 target
->thread
.fpu
.vxrs
+ __NUM_VXRS_LOW
, 0, -1);
1132 static int s390_system_call_get(struct task_struct
*target
,
1133 const struct user_regset
*regset
,
1134 unsigned int pos
, unsigned int count
,
1135 void *kbuf
, void __user
*ubuf
)
1137 unsigned int *data
= &target
->thread
.system_call
;
1138 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1139 data
, 0, sizeof(unsigned int));
1142 static int s390_system_call_set(struct task_struct
*target
,
1143 const struct user_regset
*regset
,
1144 unsigned int pos
, unsigned int count
,
1145 const void *kbuf
, const void __user
*ubuf
)
1147 unsigned int *data
= &target
->thread
.system_call
;
1148 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1149 data
, 0, sizeof(unsigned int));
1152 static int s390_gs_cb_get(struct task_struct
*target
,
1153 const struct user_regset
*regset
,
1154 unsigned int pos
, unsigned int count
,
1155 void *kbuf
, void __user
*ubuf
)
1157 struct gs_cb
*data
= target
->thread
.gs_cb
;
1159 if (!MACHINE_HAS_GS
)
1163 if (target
== current
)
1165 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1166 data
, 0, sizeof(struct gs_cb
));
1169 static int s390_gs_cb_set(struct task_struct
*target
,
1170 const struct user_regset
*regset
,
1171 unsigned int pos
, unsigned int count
,
1172 const void *kbuf
, const void __user
*ubuf
)
1174 struct gs_cb
*data
= target
->thread
.gs_cb
;
1177 if (!MACHINE_HAS_GS
)
1180 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1184 target
->thread
.gs_cb
= data
;
1185 if (target
== current
)
1186 __ctl_set_bit(2, 4);
1187 } else if (target
== current
) {
1190 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1191 data
, 0, sizeof(struct gs_cb
));
1192 if (target
== current
)
1193 restore_gs_cb(data
);
1197 static int s390_gs_bc_get(struct task_struct
*target
,
1198 const struct user_regset
*regset
,
1199 unsigned int pos
, unsigned int count
,
1200 void *kbuf
, void __user
*ubuf
)
1202 struct gs_cb
*data
= target
->thread
.gs_bc_cb
;
1204 if (!MACHINE_HAS_GS
)
1208 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
1209 data
, 0, sizeof(struct gs_cb
));
1212 static int s390_gs_bc_set(struct task_struct
*target
,
1213 const struct user_regset
*regset
,
1214 unsigned int pos
, unsigned int count
,
1215 const void *kbuf
, const void __user
*ubuf
)
1217 struct gs_cb
*data
= target
->thread
.gs_bc_cb
;
1219 if (!MACHINE_HAS_GS
)
1222 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1225 target
->thread
.gs_bc_cb
= data
;
1227 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1228 data
, 0, sizeof(struct gs_cb
));
1231 static const struct user_regset s390_regsets
[] = {
1233 .core_note_type
= NT_PRSTATUS
,
1234 .n
= sizeof(s390_regs
) / sizeof(long),
1235 .size
= sizeof(long),
1236 .align
= sizeof(long),
1237 .get
= s390_regs_get
,
1238 .set
= s390_regs_set
,
1241 .core_note_type
= NT_PRFPREG
,
1242 .n
= sizeof(s390_fp_regs
) / sizeof(long),
1243 .size
= sizeof(long),
1244 .align
= sizeof(long),
1245 .get
= s390_fpregs_get
,
1246 .set
= s390_fpregs_set
,
1249 .core_note_type
= NT_S390_SYSTEM_CALL
,
1251 .size
= sizeof(unsigned int),
1252 .align
= sizeof(unsigned int),
1253 .get
= s390_system_call_get
,
1254 .set
= s390_system_call_set
,
1257 .core_note_type
= NT_S390_LAST_BREAK
,
1259 .size
= sizeof(long),
1260 .align
= sizeof(long),
1261 .get
= s390_last_break_get
,
1262 .set
= s390_last_break_set
,
1265 .core_note_type
= NT_S390_TDB
,
1269 .get
= s390_tdb_get
,
1270 .set
= s390_tdb_set
,
1273 .core_note_type
= NT_S390_VXRS_LOW
,
1274 .n
= __NUM_VXRS_LOW
,
1275 .size
= sizeof(__u64
),
1276 .align
= sizeof(__u64
),
1277 .get
= s390_vxrs_low_get
,
1278 .set
= s390_vxrs_low_set
,
1281 .core_note_type
= NT_S390_VXRS_HIGH
,
1282 .n
= __NUM_VXRS_HIGH
,
1283 .size
= sizeof(__vector128
),
1284 .align
= sizeof(__vector128
),
1285 .get
= s390_vxrs_high_get
,
1286 .set
= s390_vxrs_high_set
,
1289 .core_note_type
= NT_S390_GS_CB
,
1290 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1291 .size
= sizeof(__u64
),
1292 .align
= sizeof(__u64
),
1293 .get
= s390_gs_cb_get
,
1294 .set
= s390_gs_cb_set
,
1297 .core_note_type
= NT_S390_GS_BC
,
1298 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1299 .size
= sizeof(__u64
),
1300 .align
= sizeof(__u64
),
1301 .get
= s390_gs_bc_get
,
1302 .set
= s390_gs_bc_set
,
1306 static const struct user_regset_view user_s390_view
= {
1307 .name
= UTS_MACHINE
,
1308 .e_machine
= EM_S390
,
1309 .regsets
= s390_regsets
,
1310 .n
= ARRAY_SIZE(s390_regsets
)
1313 #ifdef CONFIG_COMPAT
1314 static int s390_compat_regs_get(struct task_struct
*target
,
1315 const struct user_regset
*regset
,
1316 unsigned int pos
, unsigned int count
,
1317 void *kbuf
, void __user
*ubuf
)
1319 if (target
== current
)
1320 save_access_regs(target
->thread
.acrs
);
1323 compat_ulong_t
*k
= kbuf
;
1325 *k
++ = __peek_user_compat(target
, pos
);
1326 count
-= sizeof(*k
);
1330 compat_ulong_t __user
*u
= ubuf
;
1332 if (__put_user(__peek_user_compat(target
, pos
), u
++))
1334 count
-= sizeof(*u
);
1341 static int s390_compat_regs_set(struct task_struct
*target
,
1342 const struct user_regset
*regset
,
1343 unsigned int pos
, unsigned int count
,
1344 const void *kbuf
, const void __user
*ubuf
)
1348 if (target
== current
)
1349 save_access_regs(target
->thread
.acrs
);
1352 const compat_ulong_t
*k
= kbuf
;
1353 while (count
> 0 && !rc
) {
1354 rc
= __poke_user_compat(target
, pos
, *k
++);
1355 count
-= sizeof(*k
);
1359 const compat_ulong_t __user
*u
= ubuf
;
1360 while (count
> 0 && !rc
) {
1361 compat_ulong_t word
;
1362 rc
= __get_user(word
, u
++);
1365 rc
= __poke_user_compat(target
, pos
, word
);
1366 count
-= sizeof(*u
);
1371 if (rc
== 0 && target
== current
)
1372 restore_access_regs(target
->thread
.acrs
);
1377 static int s390_compat_regs_high_get(struct task_struct
*target
,
1378 const struct user_regset
*regset
,
1379 unsigned int pos
, unsigned int count
,
1380 void *kbuf
, void __user
*ubuf
)
1382 compat_ulong_t
*gprs_high
;
1384 gprs_high
= (compat_ulong_t
*)
1385 &task_pt_regs(target
)->gprs
[pos
/ sizeof(compat_ulong_t
)];
1387 compat_ulong_t
*k
= kbuf
;
1391 count
-= sizeof(*k
);
1394 compat_ulong_t __user
*u
= ubuf
;
1396 if (__put_user(*gprs_high
, u
++))
1399 count
-= sizeof(*u
);
1405 static int s390_compat_regs_high_set(struct task_struct
*target
,
1406 const struct user_regset
*regset
,
1407 unsigned int pos
, unsigned int count
,
1408 const void *kbuf
, const void __user
*ubuf
)
1410 compat_ulong_t
*gprs_high
;
1413 gprs_high
= (compat_ulong_t
*)
1414 &task_pt_regs(target
)->gprs
[pos
/ sizeof(compat_ulong_t
)];
1416 const compat_ulong_t
*k
= kbuf
;
1420 count
-= sizeof(*k
);
1423 const compat_ulong_t __user
*u
= ubuf
;
1424 while (count
> 0 && !rc
) {
1426 rc
= __get_user(word
, u
++);
1431 count
-= sizeof(*u
);
1438 static int s390_compat_last_break_get(struct task_struct
*target
,
1439 const struct user_regset
*regset
,
1440 unsigned int pos
, unsigned int count
,
1441 void *kbuf
, void __user
*ubuf
)
1443 compat_ulong_t last_break
;
1446 last_break
= target
->thread
.last_break
;
1448 unsigned long *k
= kbuf
;
1451 unsigned long __user
*u
= ubuf
;
1452 if (__put_user(last_break
, u
))
1459 static int s390_compat_last_break_set(struct task_struct
*target
,
1460 const struct user_regset
*regset
,
1461 unsigned int pos
, unsigned int count
,
1462 const void *kbuf
, const void __user
*ubuf
)
1467 static const struct user_regset s390_compat_regsets
[] = {
1469 .core_note_type
= NT_PRSTATUS
,
1470 .n
= sizeof(s390_compat_regs
) / sizeof(compat_long_t
),
1471 .size
= sizeof(compat_long_t
),
1472 .align
= sizeof(compat_long_t
),
1473 .get
= s390_compat_regs_get
,
1474 .set
= s390_compat_regs_set
,
1477 .core_note_type
= NT_PRFPREG
,
1478 .n
= sizeof(s390_fp_regs
) / sizeof(compat_long_t
),
1479 .size
= sizeof(compat_long_t
),
1480 .align
= sizeof(compat_long_t
),
1481 .get
= s390_fpregs_get
,
1482 .set
= s390_fpregs_set
,
1485 .core_note_type
= NT_S390_SYSTEM_CALL
,
1487 .size
= sizeof(compat_uint_t
),
1488 .align
= sizeof(compat_uint_t
),
1489 .get
= s390_system_call_get
,
1490 .set
= s390_system_call_set
,
1493 .core_note_type
= NT_S390_LAST_BREAK
,
1495 .size
= sizeof(long),
1496 .align
= sizeof(long),
1497 .get
= s390_compat_last_break_get
,
1498 .set
= s390_compat_last_break_set
,
1501 .core_note_type
= NT_S390_TDB
,
1505 .get
= s390_tdb_get
,
1506 .set
= s390_tdb_set
,
1509 .core_note_type
= NT_S390_VXRS_LOW
,
1510 .n
= __NUM_VXRS_LOW
,
1511 .size
= sizeof(__u64
),
1512 .align
= sizeof(__u64
),
1513 .get
= s390_vxrs_low_get
,
1514 .set
= s390_vxrs_low_set
,
1517 .core_note_type
= NT_S390_VXRS_HIGH
,
1518 .n
= __NUM_VXRS_HIGH
,
1519 .size
= sizeof(__vector128
),
1520 .align
= sizeof(__vector128
),
1521 .get
= s390_vxrs_high_get
,
1522 .set
= s390_vxrs_high_set
,
1525 .core_note_type
= NT_S390_HIGH_GPRS
,
1526 .n
= sizeof(s390_compat_regs_high
) / sizeof(compat_long_t
),
1527 .size
= sizeof(compat_long_t
),
1528 .align
= sizeof(compat_long_t
),
1529 .get
= s390_compat_regs_high_get
,
1530 .set
= s390_compat_regs_high_set
,
1533 .core_note_type
= NT_S390_GS_CB
,
1534 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1535 .size
= sizeof(__u64
),
1536 .align
= sizeof(__u64
),
1537 .get
= s390_gs_cb_get
,
1538 .set
= s390_gs_cb_set
,
1542 static const struct user_regset_view user_s390_compat_view
= {
1544 .e_machine
= EM_S390
,
1545 .regsets
= s390_compat_regsets
,
1546 .n
= ARRAY_SIZE(s390_compat_regsets
)
1550 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1552 #ifdef CONFIG_COMPAT
1553 if (test_tsk_thread_flag(task
, TIF_31BIT
))
1554 return &user_s390_compat_view
;
1556 return &user_s390_view
;
1559 static const char *gpr_names
[NUM_GPRS
] = {
1560 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1561 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1564 unsigned long regs_get_register(struct pt_regs
*regs
, unsigned int offset
)
1566 if (offset
>= NUM_GPRS
)
1568 return regs
->gprs
[offset
];
1571 int regs_query_register_offset(const char *name
)
1573 unsigned long offset
;
1575 if (!name
|| *name
!= 'r')
1577 if (kstrtoul(name
+ 1, 10, &offset
))
1579 if (offset
>= NUM_GPRS
)
1584 const char *regs_query_register_name(unsigned int offset
)
1586 if (offset
>= NUM_GPRS
)
1588 return gpr_names
[offset
];
1591 static int regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
1593 unsigned long ksp
= kernel_stack_pointer(regs
);
1595 return (addr
& ~(THREAD_SIZE
- 1)) == (ksp
& ~(THREAD_SIZE
- 1));
1599 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1600 * @regs:pt_regs which contains kernel stack pointer.
1601 * @n:stack entry number.
1603 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1604 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1607 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
1611 addr
= kernel_stack_pointer(regs
) + n
* sizeof(long);
1612 if (!regs_within_kernel_stack(regs
, addr
))
1614 return *(unsigned long *)addr
;