1 // SPDX-License-Identifier: GPL-2.0
3 * Ptrace user space interface.
5 * Copyright IBM Corp. 1999, 2010
6 * Author(s): Denis Joseph Barrow
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
28 #include <linux/uaccess.h>
29 #include <asm/unistd.h>
30 #include <asm/switch_to.h>
31 #include <asm/runtime_instr.h>
32 #include <asm/facility.h>
37 #include "compat_ptrace.h"
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
43 void update_cr_regs(struct task_struct
*task
)
45 struct pt_regs
*regs
= task_pt_regs(task
);
46 struct thread_struct
*thread
= &task
->thread
;
47 struct per_regs old
, new;
48 union ctlreg0 cr0_old
, cr0_new
;
49 union ctlreg2 cr2_old
, cr2_new
;
50 int cr0_changed
, cr2_changed
;
52 __ctl_store(cr0_old
.val
, 0, 0);
53 __ctl_store(cr2_old
.val
, 2, 2);
56 /* Take care of the enable/disable of transactional execution. */
58 /* Set or clear transaction execution TXC bit 8. */
60 if (task
->thread
.per_flags
& PER_FLAG_NO_TE
)
62 /* Set or clear transaction execution TDC bits 62 and 63. */
64 if (task
->thread
.per_flags
& PER_FLAG_TE_ABORT_RAND
) {
65 if (task
->thread
.per_flags
& PER_FLAG_TE_ABORT_RAND_TEND
)
71 /* Take care of enable/disable of guarded storage. */
74 if (task
->thread
.gs_cb
)
77 /* Load control register 0/2 iff changed */
78 cr0_changed
= cr0_new
.val
!= cr0_old
.val
;
79 cr2_changed
= cr2_new
.val
!= cr2_old
.val
;
81 __ctl_load(cr0_new
.val
, 0, 0);
83 __ctl_load(cr2_new
.val
, 2, 2);
84 /* Copy user specified PER registers */
85 new.control
= thread
->per_user
.control
;
86 new.start
= thread
->per_user
.start
;
87 new.end
= thread
->per_user
.end
;
89 /* merge TIF_SINGLE_STEP into user specified PER registers. */
90 if (test_tsk_thread_flag(task
, TIF_SINGLE_STEP
) ||
91 test_tsk_thread_flag(task
, TIF_UPROBE_SINGLESTEP
)) {
92 if (test_tsk_thread_flag(task
, TIF_BLOCK_STEP
))
93 new.control
|= PER_EVENT_BRANCH
;
95 new.control
|= PER_EVENT_IFETCH
;
96 new.control
|= PER_CONTROL_SUSPENSION
;
97 new.control
|= PER_EVENT_TRANSACTION_END
;
98 if (test_tsk_thread_flag(task
, TIF_UPROBE_SINGLESTEP
))
99 new.control
|= PER_EVENT_IFETCH
;
104 /* Take care of the PER enablement bit in the PSW. */
105 if (!(new.control
& PER_EVENT_MASK
)) {
106 regs
->psw
.mask
&= ~PSW_MASK_PER
;
109 regs
->psw
.mask
|= PSW_MASK_PER
;
110 __ctl_store(old
, 9, 11);
111 if (memcmp(&new, &old
, sizeof(struct per_regs
)) != 0)
112 __ctl_load(new, 9, 11);
115 void user_enable_single_step(struct task_struct
*task
)
117 clear_tsk_thread_flag(task
, TIF_BLOCK_STEP
);
118 set_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
121 void user_disable_single_step(struct task_struct
*task
)
123 clear_tsk_thread_flag(task
, TIF_BLOCK_STEP
);
124 clear_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
127 void user_enable_block_step(struct task_struct
*task
)
129 set_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
130 set_tsk_thread_flag(task
, TIF_BLOCK_STEP
);
134 * Called by kernel/ptrace.c when detaching..
136 * Clear all debugging related fields.
138 void ptrace_disable(struct task_struct
*task
)
140 memset(&task
->thread
.per_user
, 0, sizeof(task
->thread
.per_user
));
141 memset(&task
->thread
.per_event
, 0, sizeof(task
->thread
.per_event
));
142 clear_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
143 clear_pt_regs_flag(task_pt_regs(task
), PIF_PER_TRAP
);
144 task
->thread
.per_flags
= 0;
147 #define __ADDR_MASK 7
149 static inline unsigned long __peek_user_per(struct task_struct
*child
,
152 struct per_struct_kernel
*dummy
= NULL
;
154 if (addr
== (addr_t
) &dummy
->cr9
)
155 /* Control bits of the active per set. */
156 return test_thread_flag(TIF_SINGLE_STEP
) ?
157 PER_EVENT_IFETCH
: child
->thread
.per_user
.control
;
158 else if (addr
== (addr_t
) &dummy
->cr10
)
159 /* Start address of the active per set. */
160 return test_thread_flag(TIF_SINGLE_STEP
) ?
161 0 : child
->thread
.per_user
.start
;
162 else if (addr
== (addr_t
) &dummy
->cr11
)
163 /* End address of the active per set. */
164 return test_thread_flag(TIF_SINGLE_STEP
) ?
165 -1UL : child
->thread
.per_user
.end
;
166 else if (addr
== (addr_t
) &dummy
->bits
)
167 /* Single-step bit. */
168 return test_thread_flag(TIF_SINGLE_STEP
) ?
169 (1UL << (BITS_PER_LONG
- 1)) : 0;
170 else if (addr
== (addr_t
) &dummy
->starting_addr
)
171 /* Start address of the user specified per set. */
172 return child
->thread
.per_user
.start
;
173 else if (addr
== (addr_t
) &dummy
->ending_addr
)
174 /* End address of the user specified per set. */
175 return child
->thread
.per_user
.end
;
176 else if (addr
== (addr_t
) &dummy
->perc_atmid
)
177 /* PER code, ATMID and AI of the last PER trap */
178 return (unsigned long)
179 child
->thread
.per_event
.cause
<< (BITS_PER_LONG
- 16);
180 else if (addr
== (addr_t
) &dummy
->address
)
181 /* Address of the last PER trap */
182 return child
->thread
.per_event
.address
;
183 else if (addr
== (addr_t
) &dummy
->access_id
)
184 /* Access id of the last PER trap */
185 return (unsigned long)
186 child
->thread
.per_event
.paid
<< (BITS_PER_LONG
- 8);
191 * Read the word at offset addr from the user area of a process. The
192 * trouble here is that the information is littered over different
193 * locations. The process registers are found on the kernel stack,
194 * the floating point stuff and the trace settings are stored in
195 * the task structure. In addition the different structures in
196 * struct user contain pad bytes that should be read as zeroes.
199 static unsigned long __peek_user(struct task_struct
*child
, addr_t addr
)
201 struct user
*dummy
= NULL
;
204 if (addr
< (addr_t
) &dummy
->regs
.acrs
) {
206 * psw and gprs are stored on the stack
208 tmp
= *(addr_t
*)((addr_t
) &task_pt_regs(child
)->psw
+ addr
);
209 if (addr
== (addr_t
) &dummy
->regs
.psw
.mask
) {
210 /* Return a clean psw mask. */
211 tmp
&= PSW_MASK_USER
| PSW_MASK_RI
;
212 tmp
|= PSW_USER_BITS
;
215 } else if (addr
< (addr_t
) &dummy
->regs
.orig_gpr2
) {
217 * access registers are stored in the thread structure
219 offset
= addr
- (addr_t
) &dummy
->regs
.acrs
;
221 * Very special case: old & broken 64 bit gdb reading
222 * from acrs[15]. Result is a 64 bit value. Read the
223 * 32 bit acrs[15] value and shift it by 32. Sick...
225 if (addr
== (addr_t
) &dummy
->regs
.acrs
[15])
226 tmp
= ((unsigned long) child
->thread
.acrs
[15]) << 32;
228 tmp
= *(addr_t
*)((addr_t
) &child
->thread
.acrs
+ offset
);
230 } else if (addr
== (addr_t
) &dummy
->regs
.orig_gpr2
) {
232 * orig_gpr2 is stored on the kernel stack
234 tmp
= (addr_t
) task_pt_regs(child
)->orig_gpr2
;
236 } else if (addr
< (addr_t
) &dummy
->regs
.fp_regs
) {
238 * prevent reads of padding hole between
239 * orig_gpr2 and fp_regs on s390.
243 } else if (addr
== (addr_t
) &dummy
->regs
.fp_regs
.fpc
) {
245 * floating point control reg. is in the thread structure
247 tmp
= child
->thread
.fpu
.fpc
;
248 tmp
<<= BITS_PER_LONG
- 32;
250 } else if (addr
< (addr_t
) (&dummy
->regs
.fp_regs
+ 1)) {
252 * floating point regs. are either in child->thread.fpu
253 * or the child->thread.fpu.vxrs array
255 offset
= addr
- (addr_t
) &dummy
->regs
.fp_regs
.fprs
;
258 ((addr_t
) child
->thread
.fpu
.vxrs
+ 2*offset
);
261 ((addr_t
) child
->thread
.fpu
.fprs
+ offset
);
263 } else if (addr
< (addr_t
) (&dummy
->regs
.per_info
+ 1)) {
265 * Handle access to the per_info structure.
267 addr
-= (addr_t
) &dummy
->regs
.per_info
;
268 tmp
= __peek_user_per(child
, addr
);
277 peek_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
282 * Stupid gdb peeks/pokes the access registers in 64 bit with
283 * an alignment of 4. Programmers from hell...
286 if (addr
>= (addr_t
) &((struct user
*) NULL
)->regs
.acrs
&&
287 addr
< (addr_t
) &((struct user
*) NULL
)->regs
.orig_gpr2
)
289 if ((addr
& mask
) || addr
> sizeof(struct user
) - __ADDR_MASK
)
292 tmp
= __peek_user(child
, addr
);
293 return put_user(tmp
, (addr_t __user
*) data
);
296 static inline void __poke_user_per(struct task_struct
*child
,
297 addr_t addr
, addr_t data
)
299 struct per_struct_kernel
*dummy
= NULL
;
302 * There are only three fields in the per_info struct that the
303 * debugger user can write to.
304 * 1) cr9: the debugger wants to set a new PER event mask
305 * 2) starting_addr: the debugger wants to set a new starting
306 * address to use with the PER event mask.
307 * 3) ending_addr: the debugger wants to set a new ending
308 * address to use with the PER event mask.
309 * The user specified PER event mask and the start and end
310 * addresses are used only if single stepping is not in effect.
311 * Writes to any other field in per_info are ignored.
313 if (addr
== (addr_t
) &dummy
->cr9
)
314 /* PER event mask of the user specified per set. */
315 child
->thread
.per_user
.control
=
316 data
& (PER_EVENT_MASK
| PER_CONTROL_MASK
);
317 else if (addr
== (addr_t
) &dummy
->starting_addr
)
318 /* Starting address of the user specified per set. */
319 child
->thread
.per_user
.start
= data
;
320 else if (addr
== (addr_t
) &dummy
->ending_addr
)
321 /* Ending address of the user specified per set. */
322 child
->thread
.per_user
.end
= data
;
325 static void fixup_int_code(struct task_struct
*child
, addr_t data
)
327 struct pt_regs
*regs
= task_pt_regs(child
);
328 int ilc
= regs
->int_code
>> 16;
334 if (ptrace_access_vm(child
, regs
->psw
.addr
- (regs
->int_code
>> 16),
335 &insn
, sizeof(insn
), FOLL_FORCE
) != sizeof(insn
))
338 /* double check that tracee stopped on svc instruction */
339 if ((insn
>> 8) != 0xa)
342 regs
->int_code
= 0x20000 | (data
& 0xffff);
345 * Write a word to the user area of a process at location addr. This
346 * operation does have an additional problem compared to peek_user.
347 * Stores to the program status word and on the floating point
348 * control register needs to get checked for validity.
350 static int __poke_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
352 struct user
*dummy
= NULL
;
356 if (addr
< (addr_t
) &dummy
->regs
.acrs
) {
357 struct pt_regs
*regs
= task_pt_regs(child
);
359 * psw and gprs are stored on the stack
361 if (addr
== (addr_t
) &dummy
->regs
.psw
.mask
) {
362 unsigned long mask
= PSW_MASK_USER
;
364 mask
|= is_ri_task(child
) ? PSW_MASK_RI
: 0;
365 if ((data
^ PSW_USER_BITS
) & ~mask
)
366 /* Invalid psw mask. */
368 if ((data
& PSW_MASK_ASC
) == PSW_ASC_HOME
)
369 /* Invalid address-space-control bits */
371 if ((data
& PSW_MASK_EA
) && !(data
& PSW_MASK_BA
))
372 /* Invalid addressing mode bits */
376 if (test_pt_regs_flag(regs
, PIF_SYSCALL
) &&
377 addr
== offsetof(struct user
, regs
.gprs
[2]))
378 fixup_int_code(child
, data
);
379 *(addr_t
*)((addr_t
) ®s
->psw
+ addr
) = data
;
381 } else if (addr
< (addr_t
) (&dummy
->regs
.orig_gpr2
)) {
383 * access registers are stored in the thread structure
385 offset
= addr
- (addr_t
) &dummy
->regs
.acrs
;
387 * Very special case: old & broken 64 bit gdb writing
388 * to acrs[15] with a 64 bit value. Ignore the lower
389 * half of the value and write the upper 32 bit to
392 if (addr
== (addr_t
) &dummy
->regs
.acrs
[15])
393 child
->thread
.acrs
[15] = (unsigned int) (data
>> 32);
395 *(addr_t
*)((addr_t
) &child
->thread
.acrs
+ offset
) = data
;
397 } else if (addr
== (addr_t
) &dummy
->regs
.orig_gpr2
) {
399 * orig_gpr2 is stored on the kernel stack
401 task_pt_regs(child
)->orig_gpr2
= data
;
403 } else if (addr
< (addr_t
) &dummy
->regs
.fp_regs
) {
405 * prevent writes of padding hole between
406 * orig_gpr2 and fp_regs on s390.
410 } else if (addr
== (addr_t
) &dummy
->regs
.fp_regs
.fpc
) {
412 * floating point control reg. is in the thread structure
414 if ((unsigned int) data
!= 0 ||
415 test_fp_ctl(data
>> (BITS_PER_LONG
- 32)))
417 child
->thread
.fpu
.fpc
= data
>> (BITS_PER_LONG
- 32);
419 } else if (addr
< (addr_t
) (&dummy
->regs
.fp_regs
+ 1)) {
421 * floating point regs. are either in child->thread.fpu
422 * or the child->thread.fpu.vxrs array
424 offset
= addr
- (addr_t
) &dummy
->regs
.fp_regs
.fprs
;
427 child
->thread
.fpu
.vxrs
+ 2*offset
) = data
;
430 child
->thread
.fpu
.fprs
+ offset
) = data
;
432 } else if (addr
< (addr_t
) (&dummy
->regs
.per_info
+ 1)) {
434 * Handle access to the per_info structure.
436 addr
-= (addr_t
) &dummy
->regs
.per_info
;
437 __poke_user_per(child
, addr
, data
);
444 static int poke_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
449 * Stupid gdb peeks/pokes the access registers in 64 bit with
450 * an alignment of 4. Programmers from hell indeed...
453 if (addr
>= (addr_t
) &((struct user
*) NULL
)->regs
.acrs
&&
454 addr
< (addr_t
) &((struct user
*) NULL
)->regs
.orig_gpr2
)
456 if ((addr
& mask
) || addr
> sizeof(struct user
) - __ADDR_MASK
)
459 return __poke_user(child
, addr
, data
);
462 long arch_ptrace(struct task_struct
*child
, long request
,
463 unsigned long addr
, unsigned long data
)
470 /* read the word at location addr in the USER area. */
471 return peek_user(child
, addr
, data
);
474 /* write the word at location addr in the USER area */
475 return poke_user(child
, addr
, data
);
477 case PTRACE_PEEKUSR_AREA
:
478 case PTRACE_POKEUSR_AREA
:
479 if (copy_from_user(&parea
, (void __force __user
*) addr
,
482 addr
= parea
.kernel_addr
;
483 data
= parea
.process_addr
;
485 while (copied
< parea
.len
) {
486 if (request
== PTRACE_PEEKUSR_AREA
)
487 ret
= peek_user(child
, addr
, data
);
491 (addr_t __force __user
*) data
))
493 ret
= poke_user(child
, addr
, utmp
);
497 addr
+= sizeof(unsigned long);
498 data
+= sizeof(unsigned long);
499 copied
+= sizeof(unsigned long);
502 case PTRACE_GET_LAST_BREAK
:
503 put_user(child
->thread
.last_break
,
504 (unsigned long __user
*) data
);
506 case PTRACE_ENABLE_TE
:
509 child
->thread
.per_flags
&= ~PER_FLAG_NO_TE
;
511 case PTRACE_DISABLE_TE
:
514 child
->thread
.per_flags
|= PER_FLAG_NO_TE
;
515 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND
;
517 case PTRACE_TE_ABORT_RAND
:
518 if (!MACHINE_HAS_TE
|| (child
->thread
.per_flags
& PER_FLAG_NO_TE
))
522 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND
;
525 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND
;
526 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND_TEND
;
529 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND
;
530 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND_TEND
;
537 return ptrace_request(child
, request
, addr
, data
);
543 * Now the fun part starts... a 31 bit program running in the
544 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
545 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
546 * to handle, the difference to the 64 bit versions of the requests
547 * is that the access is done in multiples of 4 byte instead of
548 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
549 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
550 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
551 * is a 31 bit program too, the content of struct user can be
552 * emulated. A 31 bit program peeking into the struct user of
553 * a 64 bit program is a no-no.
557 * Same as peek_user_per but for a 31 bit program.
559 static inline __u32
__peek_user_per_compat(struct task_struct
*child
,
562 struct compat_per_struct_kernel
*dummy32
= NULL
;
564 if (addr
== (addr_t
) &dummy32
->cr9
)
565 /* Control bits of the active per set. */
566 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
567 PER_EVENT_IFETCH
: child
->thread
.per_user
.control
;
568 else if (addr
== (addr_t
) &dummy32
->cr10
)
569 /* Start address of the active per set. */
570 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
571 0 : child
->thread
.per_user
.start
;
572 else if (addr
== (addr_t
) &dummy32
->cr11
)
573 /* End address of the active per set. */
574 return test_thread_flag(TIF_SINGLE_STEP
) ?
575 PSW32_ADDR_INSN
: child
->thread
.per_user
.end
;
576 else if (addr
== (addr_t
) &dummy32
->bits
)
577 /* Single-step bit. */
578 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
580 else if (addr
== (addr_t
) &dummy32
->starting_addr
)
581 /* Start address of the user specified per set. */
582 return (__u32
) child
->thread
.per_user
.start
;
583 else if (addr
== (addr_t
) &dummy32
->ending_addr
)
584 /* End address of the user specified per set. */
585 return (__u32
) child
->thread
.per_user
.end
;
586 else if (addr
== (addr_t
) &dummy32
->perc_atmid
)
587 /* PER code, ATMID and AI of the last PER trap */
588 return (__u32
) child
->thread
.per_event
.cause
<< 16;
589 else if (addr
== (addr_t
) &dummy32
->address
)
590 /* Address of the last PER trap */
591 return (__u32
) child
->thread
.per_event
.address
;
592 else if (addr
== (addr_t
) &dummy32
->access_id
)
593 /* Access id of the last PER trap */
594 return (__u32
) child
->thread
.per_event
.paid
<< 24;
599 * Same as peek_user but for a 31 bit program.
601 static u32
__peek_user_compat(struct task_struct
*child
, addr_t addr
)
603 struct compat_user
*dummy32
= NULL
;
607 if (addr
< (addr_t
) &dummy32
->regs
.acrs
) {
608 struct pt_regs
*regs
= task_pt_regs(child
);
610 * psw and gprs are stored on the stack
612 if (addr
== (addr_t
) &dummy32
->regs
.psw
.mask
) {
613 /* Fake a 31 bit psw mask. */
614 tmp
= (__u32
)(regs
->psw
.mask
>> 32);
615 tmp
&= PSW32_MASK_USER
| PSW32_MASK_RI
;
616 tmp
|= PSW32_USER_BITS
;
617 } else if (addr
== (addr_t
) &dummy32
->regs
.psw
.addr
) {
618 /* Fake a 31 bit psw address. */
619 tmp
= (__u32
) regs
->psw
.addr
|
620 (__u32
)(regs
->psw
.mask
& PSW_MASK_BA
);
623 tmp
= *(__u32
*)((addr_t
) ®s
->psw
+ addr
*2 + 4);
625 } else if (addr
< (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
627 * access registers are stored in the thread structure
629 offset
= addr
- (addr_t
) &dummy32
->regs
.acrs
;
630 tmp
= *(__u32
*)((addr_t
) &child
->thread
.acrs
+ offset
);
632 } else if (addr
== (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
634 * orig_gpr2 is stored on the kernel stack
636 tmp
= *(__u32
*)((addr_t
) &task_pt_regs(child
)->orig_gpr2
+ 4);
638 } else if (addr
< (addr_t
) &dummy32
->regs
.fp_regs
) {
640 * prevent reads of padding hole between
641 * orig_gpr2 and fp_regs on s390.
645 } else if (addr
== (addr_t
) &dummy32
->regs
.fp_regs
.fpc
) {
647 * floating point control reg. is in the thread structure
649 tmp
= child
->thread
.fpu
.fpc
;
651 } else if (addr
< (addr_t
) (&dummy32
->regs
.fp_regs
+ 1)) {
653 * floating point regs. are either in child->thread.fpu
654 * or the child->thread.fpu.vxrs array
656 offset
= addr
- (addr_t
) &dummy32
->regs
.fp_regs
.fprs
;
659 ((addr_t
) child
->thread
.fpu
.vxrs
+ 2*offset
);
662 ((addr_t
) child
->thread
.fpu
.fprs
+ offset
);
664 } else if (addr
< (addr_t
) (&dummy32
->regs
.per_info
+ 1)) {
666 * Handle access to the per_info structure.
668 addr
-= (addr_t
) &dummy32
->regs
.per_info
;
669 tmp
= __peek_user_per_compat(child
, addr
);
677 static int peek_user_compat(struct task_struct
*child
,
678 addr_t addr
, addr_t data
)
682 if (!is_compat_task() || (addr
& 3) || addr
> sizeof(struct user
) - 3)
685 tmp
= __peek_user_compat(child
, addr
);
686 return put_user(tmp
, (__u32 __user
*) data
);
690 * Same as poke_user_per but for a 31 bit program.
692 static inline void __poke_user_per_compat(struct task_struct
*child
,
693 addr_t addr
, __u32 data
)
695 struct compat_per_struct_kernel
*dummy32
= NULL
;
697 if (addr
== (addr_t
) &dummy32
->cr9
)
698 /* PER event mask of the user specified per set. */
699 child
->thread
.per_user
.control
=
700 data
& (PER_EVENT_MASK
| PER_CONTROL_MASK
);
701 else if (addr
== (addr_t
) &dummy32
->starting_addr
)
702 /* Starting address of the user specified per set. */
703 child
->thread
.per_user
.start
= data
;
704 else if (addr
== (addr_t
) &dummy32
->ending_addr
)
705 /* Ending address of the user specified per set. */
706 child
->thread
.per_user
.end
= data
;
710 * Same as poke_user but for a 31 bit program.
712 static int __poke_user_compat(struct task_struct
*child
,
713 addr_t addr
, addr_t data
)
715 struct compat_user
*dummy32
= NULL
;
716 __u32 tmp
= (__u32
) data
;
719 if (addr
< (addr_t
) &dummy32
->regs
.acrs
) {
720 struct pt_regs
*regs
= task_pt_regs(child
);
722 * psw, gprs, acrs and orig_gpr2 are stored on the stack
724 if (addr
== (addr_t
) &dummy32
->regs
.psw
.mask
) {
725 __u32 mask
= PSW32_MASK_USER
;
727 mask
|= is_ri_task(child
) ? PSW32_MASK_RI
: 0;
728 /* Build a 64 bit psw mask from 31 bit mask. */
729 if ((tmp
^ PSW32_USER_BITS
) & ~mask
)
730 /* Invalid psw mask. */
732 if ((data
& PSW32_MASK_ASC
) == PSW32_ASC_HOME
)
733 /* Invalid address-space-control bits */
735 regs
->psw
.mask
= (regs
->psw
.mask
& ~PSW_MASK_USER
) |
736 (regs
->psw
.mask
& PSW_MASK_BA
) |
737 (__u64
)(tmp
& mask
) << 32;
738 } else if (addr
== (addr_t
) &dummy32
->regs
.psw
.addr
) {
739 /* Build a 64 bit psw address from 31 bit address. */
740 regs
->psw
.addr
= (__u64
) tmp
& PSW32_ADDR_INSN
;
741 /* Transfer 31 bit amode bit to psw mask. */
742 regs
->psw
.mask
= (regs
->psw
.mask
& ~PSW_MASK_BA
) |
743 (__u64
)(tmp
& PSW32_ADDR_AMODE
);
746 if (test_pt_regs_flag(regs
, PIF_SYSCALL
) &&
747 addr
== offsetof(struct compat_user
, regs
.gprs
[2]))
748 fixup_int_code(child
, data
);
750 *(__u32
*)((addr_t
) ®s
->psw
+ addr
*2 + 4) = tmp
;
752 } else if (addr
< (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
754 * access registers are stored in the thread structure
756 offset
= addr
- (addr_t
) &dummy32
->regs
.acrs
;
757 *(__u32
*)((addr_t
) &child
->thread
.acrs
+ offset
) = tmp
;
759 } else if (addr
== (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
761 * orig_gpr2 is stored on the kernel stack
763 *(__u32
*)((addr_t
) &task_pt_regs(child
)->orig_gpr2
+ 4) = tmp
;
765 } else if (addr
< (addr_t
) &dummy32
->regs
.fp_regs
) {
767 * prevent writess of padding hole between
768 * orig_gpr2 and fp_regs on s390.
772 } else if (addr
== (addr_t
) &dummy32
->regs
.fp_regs
.fpc
) {
774 * floating point control reg. is in the thread structure
776 if (test_fp_ctl(tmp
))
778 child
->thread
.fpu
.fpc
= data
;
780 } else if (addr
< (addr_t
) (&dummy32
->regs
.fp_regs
+ 1)) {
782 * floating point regs. are either in child->thread.fpu
783 * or the child->thread.fpu.vxrs array
785 offset
= addr
- (addr_t
) &dummy32
->regs
.fp_regs
.fprs
;
788 child
->thread
.fpu
.vxrs
+ 2*offset
) = tmp
;
791 child
->thread
.fpu
.fprs
+ offset
) = tmp
;
793 } else if (addr
< (addr_t
) (&dummy32
->regs
.per_info
+ 1)) {
795 * Handle access to the per_info structure.
797 addr
-= (addr_t
) &dummy32
->regs
.per_info
;
798 __poke_user_per_compat(child
, addr
, data
);
804 static int poke_user_compat(struct task_struct
*child
,
805 addr_t addr
, addr_t data
)
807 if (!is_compat_task() || (addr
& 3) ||
808 addr
> sizeof(struct compat_user
) - 3)
811 return __poke_user_compat(child
, addr
, data
);
814 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
815 compat_ulong_t caddr
, compat_ulong_t cdata
)
817 unsigned long addr
= caddr
;
818 unsigned long data
= cdata
;
819 compat_ptrace_area parea
;
824 /* read the word at location addr in the USER area. */
825 return peek_user_compat(child
, addr
, data
);
828 /* write the word at location addr in the USER area */
829 return poke_user_compat(child
, addr
, data
);
831 case PTRACE_PEEKUSR_AREA
:
832 case PTRACE_POKEUSR_AREA
:
833 if (copy_from_user(&parea
, (void __force __user
*) addr
,
836 addr
= parea
.kernel_addr
;
837 data
= parea
.process_addr
;
839 while (copied
< parea
.len
) {
840 if (request
== PTRACE_PEEKUSR_AREA
)
841 ret
= peek_user_compat(child
, addr
, data
);
845 (__u32 __force __user
*) data
))
847 ret
= poke_user_compat(child
, addr
, utmp
);
851 addr
+= sizeof(unsigned int);
852 data
+= sizeof(unsigned int);
853 copied
+= sizeof(unsigned int);
856 case PTRACE_GET_LAST_BREAK
:
857 put_user(child
->thread
.last_break
,
858 (unsigned int __user
*) data
);
861 return compat_ptrace_request(child
, request
, addr
, data
);
865 asmlinkage
long do_syscall_trace_enter(struct pt_regs
*regs
)
867 unsigned long mask
= -1UL;
870 if (is_compat_task())
874 * The sysc_tracesys code in entry.S stored the system
875 * call number to gprs[2].
877 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
878 tracehook_report_syscall_entry(regs
)) {
880 * Tracing decided this syscall should not happen. Skip
881 * the system call and the system call restart handling.
886 #ifdef CONFIG_SECCOMP
887 /* Do the secure computing check after ptrace. */
888 if (unlikely(test_thread_flag(TIF_SECCOMP
))) {
889 struct seccomp_data sd
;
891 if (is_compat_task()) {
892 sd
.instruction_pointer
= regs
->psw
.addr
& 0x7fffffff;
893 sd
.arch
= AUDIT_ARCH_S390
;
895 sd
.instruction_pointer
= regs
->psw
.addr
;
896 sd
.arch
= AUDIT_ARCH_S390X
;
899 sd
.nr
= regs
->int_code
& 0xffff;
900 sd
.args
[0] = regs
->orig_gpr2
& mask
;
901 sd
.args
[1] = regs
->gprs
[3] & mask
;
902 sd
.args
[2] = regs
->gprs
[4] & mask
;
903 sd
.args
[3] = regs
->gprs
[5] & mask
;
904 sd
.args
[4] = regs
->gprs
[6] & mask
;
905 sd
.args
[5] = regs
->gprs
[7] & mask
;
907 if (__secure_computing(&sd
) == -1)
910 #endif /* CONFIG_SECCOMP */
912 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
913 trace_sys_enter(regs
, regs
->int_code
& 0xffff);
916 audit_syscall_entry(regs
->int_code
& 0xffff, regs
->orig_gpr2
& mask
,
917 regs
->gprs
[3] &mask
, regs
->gprs
[4] &mask
,
918 regs
->gprs
[5] &mask
);
920 if ((signed long)regs
->gprs
[2] >= NR_syscalls
) {
921 regs
->gprs
[2] = -ENOSYS
;
924 return regs
->gprs
[2];
926 clear_pt_regs_flag(regs
, PIF_SYSCALL
);
930 asmlinkage
void do_syscall_trace_exit(struct pt_regs
*regs
)
932 audit_syscall_exit(regs
);
934 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
935 trace_sys_exit(regs
, regs
->gprs
[2]);
937 if (test_thread_flag(TIF_SYSCALL_TRACE
))
938 tracehook_report_syscall_exit(regs
, 0);
942 * user_regset definitions.
945 static int s390_regs_get(struct task_struct
*target
,
946 const struct user_regset
*regset
,
950 if (target
== current
)
951 save_access_regs(target
->thread
.acrs
);
953 for (pos
= 0; pos
< sizeof(s390_regs
); pos
+= sizeof(long))
954 membuf_store(&to
, __peek_user(target
, pos
));
958 static int s390_regs_set(struct task_struct
*target
,
959 const struct user_regset
*regset
,
960 unsigned int pos
, unsigned int count
,
961 const void *kbuf
, const void __user
*ubuf
)
965 if (target
== current
)
966 save_access_regs(target
->thread
.acrs
);
969 const unsigned long *k
= kbuf
;
970 while (count
> 0 && !rc
) {
971 rc
= __poke_user(target
, pos
, *k
++);
976 const unsigned long __user
*u
= ubuf
;
977 while (count
> 0 && !rc
) {
979 rc
= __get_user(word
, u
++);
982 rc
= __poke_user(target
, pos
, word
);
988 if (rc
== 0 && target
== current
)
989 restore_access_regs(target
->thread
.acrs
);
994 static int s390_fpregs_get(struct task_struct
*target
,
995 const struct user_regset
*regset
,
998 _s390_fp_regs fp_regs
;
1000 if (target
== current
)
1003 fp_regs
.fpc
= target
->thread
.fpu
.fpc
;
1004 fpregs_store(&fp_regs
, &target
->thread
.fpu
);
1006 return membuf_write(&to
, &fp_regs
, sizeof(fp_regs
));
1009 static int s390_fpregs_set(struct task_struct
*target
,
1010 const struct user_regset
*regset
, unsigned int pos
,
1011 unsigned int count
, const void *kbuf
,
1012 const void __user
*ubuf
)
1015 freg_t fprs
[__NUM_FPRS
];
1017 if (target
== current
)
1021 convert_vx_to_fp(fprs
, target
->thread
.fpu
.vxrs
);
1023 memcpy(&fprs
, target
->thread
.fpu
.fprs
, sizeof(fprs
));
1025 /* If setting FPC, must validate it first. */
1026 if (count
> 0 && pos
< offsetof(s390_fp_regs
, fprs
)) {
1027 u32 ufpc
[2] = { target
->thread
.fpu
.fpc
, 0 };
1028 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ufpc
,
1029 0, offsetof(s390_fp_regs
, fprs
));
1032 if (ufpc
[1] != 0 || test_fp_ctl(ufpc
[0]))
1034 target
->thread
.fpu
.fpc
= ufpc
[0];
1037 if (rc
== 0 && count
> 0)
1038 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1039 fprs
, offsetof(s390_fp_regs
, fprs
), -1);
1044 convert_fp_to_vx(target
->thread
.fpu
.vxrs
, fprs
);
1046 memcpy(target
->thread
.fpu
.fprs
, &fprs
, sizeof(fprs
));
1051 static int s390_last_break_get(struct task_struct
*target
,
1052 const struct user_regset
*regset
,
1055 return membuf_store(&to
, target
->thread
.last_break
);
1058 static int s390_last_break_set(struct task_struct
*target
,
1059 const struct user_regset
*regset
,
1060 unsigned int pos
, unsigned int count
,
1061 const void *kbuf
, const void __user
*ubuf
)
1066 static int s390_tdb_get(struct task_struct
*target
,
1067 const struct user_regset
*regset
,
1070 struct pt_regs
*regs
= task_pt_regs(target
);
1072 if (!(regs
->int_code
& 0x200))
1074 return membuf_write(&to
, target
->thread
.trap_tdb
, 256);
1077 static int s390_tdb_set(struct task_struct
*target
,
1078 const struct user_regset
*regset
,
1079 unsigned int pos
, unsigned int count
,
1080 const void *kbuf
, const void __user
*ubuf
)
1085 static int s390_vxrs_low_get(struct task_struct
*target
,
1086 const struct user_regset
*regset
,
1089 __u64 vxrs
[__NUM_VXRS_LOW
];
1092 if (!MACHINE_HAS_VX
)
1094 if (target
== current
)
1096 for (i
= 0; i
< __NUM_VXRS_LOW
; i
++)
1097 vxrs
[i
] = *((__u64
*)(target
->thread
.fpu
.vxrs
+ i
) + 1);
1098 return membuf_write(&to
, vxrs
, sizeof(vxrs
));
1101 static int s390_vxrs_low_set(struct task_struct
*target
,
1102 const struct user_regset
*regset
,
1103 unsigned int pos
, unsigned int count
,
1104 const void *kbuf
, const void __user
*ubuf
)
1106 __u64 vxrs
[__NUM_VXRS_LOW
];
1109 if (!MACHINE_HAS_VX
)
1111 if (target
== current
)
1114 for (i
= 0; i
< __NUM_VXRS_LOW
; i
++)
1115 vxrs
[i
] = *((__u64
*)(target
->thread
.fpu
.vxrs
+ i
) + 1);
1117 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, vxrs
, 0, -1);
1119 for (i
= 0; i
< __NUM_VXRS_LOW
; i
++)
1120 *((__u64
*)(target
->thread
.fpu
.vxrs
+ i
) + 1) = vxrs
[i
];
1125 static int s390_vxrs_high_get(struct task_struct
*target
,
1126 const struct user_regset
*regset
,
1129 if (!MACHINE_HAS_VX
)
1131 if (target
== current
)
1133 return membuf_write(&to
, target
->thread
.fpu
.vxrs
+ __NUM_VXRS_LOW
,
1134 __NUM_VXRS_HIGH
* sizeof(__vector128
));
1137 static int s390_vxrs_high_set(struct task_struct
*target
,
1138 const struct user_regset
*regset
,
1139 unsigned int pos
, unsigned int count
,
1140 const void *kbuf
, const void __user
*ubuf
)
1144 if (!MACHINE_HAS_VX
)
1146 if (target
== current
)
1149 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1150 target
->thread
.fpu
.vxrs
+ __NUM_VXRS_LOW
, 0, -1);
1154 static int s390_system_call_get(struct task_struct
*target
,
1155 const struct user_regset
*regset
,
1158 return membuf_store(&to
, target
->thread
.system_call
);
1161 static int s390_system_call_set(struct task_struct
*target
,
1162 const struct user_regset
*regset
,
1163 unsigned int pos
, unsigned int count
,
1164 const void *kbuf
, const void __user
*ubuf
)
1166 unsigned int *data
= &target
->thread
.system_call
;
1167 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1168 data
, 0, sizeof(unsigned int));
1171 static int s390_gs_cb_get(struct task_struct
*target
,
1172 const struct user_regset
*regset
,
1175 struct gs_cb
*data
= target
->thread
.gs_cb
;
1177 if (!MACHINE_HAS_GS
)
1181 if (target
== current
)
1183 return membuf_write(&to
, data
, sizeof(struct gs_cb
));
1186 static int s390_gs_cb_set(struct task_struct
*target
,
1187 const struct user_regset
*regset
,
1188 unsigned int pos
, unsigned int count
,
1189 const void *kbuf
, const void __user
*ubuf
)
1191 struct gs_cb gs_cb
= { }, *data
= NULL
;
1194 if (!MACHINE_HAS_GS
)
1196 if (!target
->thread
.gs_cb
) {
1197 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1201 if (!target
->thread
.gs_cb
)
1203 else if (target
== current
)
1206 gs_cb
= *target
->thread
.gs_cb
;
1207 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1208 &gs_cb
, 0, sizeof(gs_cb
));
1214 if (!target
->thread
.gs_cb
)
1215 target
->thread
.gs_cb
= data
;
1216 *target
->thread
.gs_cb
= gs_cb
;
1217 if (target
== current
) {
1218 __ctl_set_bit(2, 4);
1219 restore_gs_cb(target
->thread
.gs_cb
);
1225 static int s390_gs_bc_get(struct task_struct
*target
,
1226 const struct user_regset
*regset
,
1229 struct gs_cb
*data
= target
->thread
.gs_bc_cb
;
1231 if (!MACHINE_HAS_GS
)
1235 return membuf_write(&to
, data
, sizeof(struct gs_cb
));
1238 static int s390_gs_bc_set(struct task_struct
*target
,
1239 const struct user_regset
*regset
,
1240 unsigned int pos
, unsigned int count
,
1241 const void *kbuf
, const void __user
*ubuf
)
1243 struct gs_cb
*data
= target
->thread
.gs_bc_cb
;
1245 if (!MACHINE_HAS_GS
)
1248 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1251 target
->thread
.gs_bc_cb
= data
;
1253 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1254 data
, 0, sizeof(struct gs_cb
));
1257 static bool is_ri_cb_valid(struct runtime_instr_cb
*cb
)
1259 return (cb
->rca
& 0x1f) == 0 &&
1260 (cb
->roa
& 0xfff) == 0 &&
1261 (cb
->rla
& 0xfff) == 0xfff &&
1265 cb
->reserved1
== 0 &&
1270 cb
->reserved2
== 0 &&
1271 cb
->reserved3
== 0 &&
1272 cb
->reserved4
== 0 &&
1273 cb
->reserved5
== 0 &&
1274 cb
->reserved6
== 0 &&
1275 cb
->reserved7
== 0 &&
1276 cb
->reserved8
== 0 &&
1277 cb
->rla
>= cb
->roa
&&
1278 cb
->rca
>= cb
->roa
&&
1279 cb
->rca
<= cb
->rla
+1 &&
1283 static int s390_runtime_instr_get(struct task_struct
*target
,
1284 const struct user_regset
*regset
,
1287 struct runtime_instr_cb
*data
= target
->thread
.ri_cb
;
1289 if (!test_facility(64))
1294 return membuf_write(&to
, data
, sizeof(struct runtime_instr_cb
));
1297 static int s390_runtime_instr_set(struct task_struct
*target
,
1298 const struct user_regset
*regset
,
1299 unsigned int pos
, unsigned int count
,
1300 const void *kbuf
, const void __user
*ubuf
)
1302 struct runtime_instr_cb ri_cb
= { }, *data
= NULL
;
1305 if (!test_facility(64))
1308 if (!target
->thread
.ri_cb
) {
1309 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1314 if (target
->thread
.ri_cb
) {
1315 if (target
== current
)
1316 store_runtime_instr_cb(&ri_cb
);
1318 ri_cb
= *target
->thread
.ri_cb
;
1321 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1322 &ri_cb
, 0, sizeof(struct runtime_instr_cb
));
1328 if (!is_ri_cb_valid(&ri_cb
)) {
1333 * Override access key in any case, since user space should
1334 * not be able to set it, nor should it care about it.
1336 ri_cb
.key
= PAGE_DEFAULT_KEY
>> 4;
1338 if (!target
->thread
.ri_cb
)
1339 target
->thread
.ri_cb
= data
;
1340 *target
->thread
.ri_cb
= ri_cb
;
1341 if (target
== current
)
1342 load_runtime_instr_cb(target
->thread
.ri_cb
);
1348 static const struct user_regset s390_regsets
[] = {
1350 .core_note_type
= NT_PRSTATUS
,
1351 .n
= sizeof(s390_regs
) / sizeof(long),
1352 .size
= sizeof(long),
1353 .align
= sizeof(long),
1354 .regset_get
= s390_regs_get
,
1355 .set
= s390_regs_set
,
1358 .core_note_type
= NT_PRFPREG
,
1359 .n
= sizeof(s390_fp_regs
) / sizeof(long),
1360 .size
= sizeof(long),
1361 .align
= sizeof(long),
1362 .regset_get
= s390_fpregs_get
,
1363 .set
= s390_fpregs_set
,
1366 .core_note_type
= NT_S390_SYSTEM_CALL
,
1368 .size
= sizeof(unsigned int),
1369 .align
= sizeof(unsigned int),
1370 .regset_get
= s390_system_call_get
,
1371 .set
= s390_system_call_set
,
1374 .core_note_type
= NT_S390_LAST_BREAK
,
1376 .size
= sizeof(long),
1377 .align
= sizeof(long),
1378 .regset_get
= s390_last_break_get
,
1379 .set
= s390_last_break_set
,
1382 .core_note_type
= NT_S390_TDB
,
1386 .regset_get
= s390_tdb_get
,
1387 .set
= s390_tdb_set
,
1390 .core_note_type
= NT_S390_VXRS_LOW
,
1391 .n
= __NUM_VXRS_LOW
,
1392 .size
= sizeof(__u64
),
1393 .align
= sizeof(__u64
),
1394 .regset_get
= s390_vxrs_low_get
,
1395 .set
= s390_vxrs_low_set
,
1398 .core_note_type
= NT_S390_VXRS_HIGH
,
1399 .n
= __NUM_VXRS_HIGH
,
1400 .size
= sizeof(__vector128
),
1401 .align
= sizeof(__vector128
),
1402 .regset_get
= s390_vxrs_high_get
,
1403 .set
= s390_vxrs_high_set
,
1406 .core_note_type
= NT_S390_GS_CB
,
1407 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1408 .size
= sizeof(__u64
),
1409 .align
= sizeof(__u64
),
1410 .regset_get
= s390_gs_cb_get
,
1411 .set
= s390_gs_cb_set
,
1414 .core_note_type
= NT_S390_GS_BC
,
1415 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1416 .size
= sizeof(__u64
),
1417 .align
= sizeof(__u64
),
1418 .regset_get
= s390_gs_bc_get
,
1419 .set
= s390_gs_bc_set
,
1422 .core_note_type
= NT_S390_RI_CB
,
1423 .n
= sizeof(struct runtime_instr_cb
) / sizeof(__u64
),
1424 .size
= sizeof(__u64
),
1425 .align
= sizeof(__u64
),
1426 .regset_get
= s390_runtime_instr_get
,
1427 .set
= s390_runtime_instr_set
,
1431 static const struct user_regset_view user_s390_view
= {
1433 .e_machine
= EM_S390
,
1434 .regsets
= s390_regsets
,
1435 .n
= ARRAY_SIZE(s390_regsets
)
1438 #ifdef CONFIG_COMPAT
1439 static int s390_compat_regs_get(struct task_struct
*target
,
1440 const struct user_regset
*regset
,
1445 if (target
== current
)
1446 save_access_regs(target
->thread
.acrs
);
1448 for (n
= 0; n
< sizeof(s390_compat_regs
); n
+= sizeof(compat_ulong_t
))
1449 membuf_store(&to
, __peek_user_compat(target
, n
));
1453 static int s390_compat_regs_set(struct task_struct
*target
,
1454 const struct user_regset
*regset
,
1455 unsigned int pos
, unsigned int count
,
1456 const void *kbuf
, const void __user
*ubuf
)
1460 if (target
== current
)
1461 save_access_regs(target
->thread
.acrs
);
1464 const compat_ulong_t
*k
= kbuf
;
1465 while (count
> 0 && !rc
) {
1466 rc
= __poke_user_compat(target
, pos
, *k
++);
1467 count
-= sizeof(*k
);
1471 const compat_ulong_t __user
*u
= ubuf
;
1472 while (count
> 0 && !rc
) {
1473 compat_ulong_t word
;
1474 rc
= __get_user(word
, u
++);
1477 rc
= __poke_user_compat(target
, pos
, word
);
1478 count
-= sizeof(*u
);
1483 if (rc
== 0 && target
== current
)
1484 restore_access_regs(target
->thread
.acrs
);
1489 static int s390_compat_regs_high_get(struct task_struct
*target
,
1490 const struct user_regset
*regset
,
1493 compat_ulong_t
*gprs_high
;
1496 gprs_high
= (compat_ulong_t
*)task_pt_regs(target
)->gprs
;
1497 for (i
= 0; i
< NUM_GPRS
; i
++, gprs_high
+= 2)
1498 membuf_store(&to
, *gprs_high
);
1502 static int s390_compat_regs_high_set(struct task_struct
*target
,
1503 const struct user_regset
*regset
,
1504 unsigned int pos
, unsigned int count
,
1505 const void *kbuf
, const void __user
*ubuf
)
1507 compat_ulong_t
*gprs_high
;
1510 gprs_high
= (compat_ulong_t
*)
1511 &task_pt_regs(target
)->gprs
[pos
/ sizeof(compat_ulong_t
)];
1513 const compat_ulong_t
*k
= kbuf
;
1517 count
-= sizeof(*k
);
1520 const compat_ulong_t __user
*u
= ubuf
;
1521 while (count
> 0 && !rc
) {
1523 rc
= __get_user(word
, u
++);
1528 count
-= sizeof(*u
);
1535 static int s390_compat_last_break_get(struct task_struct
*target
,
1536 const struct user_regset
*regset
,
1539 compat_ulong_t last_break
= target
->thread
.last_break
;
1541 return membuf_store(&to
, (unsigned long)last_break
);
1544 static int s390_compat_last_break_set(struct task_struct
*target
,
1545 const struct user_regset
*regset
,
1546 unsigned int pos
, unsigned int count
,
1547 const void *kbuf
, const void __user
*ubuf
)
1552 static const struct user_regset s390_compat_regsets
[] = {
1554 .core_note_type
= NT_PRSTATUS
,
1555 .n
= sizeof(s390_compat_regs
) / sizeof(compat_long_t
),
1556 .size
= sizeof(compat_long_t
),
1557 .align
= sizeof(compat_long_t
),
1558 .regset_get
= s390_compat_regs_get
,
1559 .set
= s390_compat_regs_set
,
1562 .core_note_type
= NT_PRFPREG
,
1563 .n
= sizeof(s390_fp_regs
) / sizeof(compat_long_t
),
1564 .size
= sizeof(compat_long_t
),
1565 .align
= sizeof(compat_long_t
),
1566 .regset_get
= s390_fpregs_get
,
1567 .set
= s390_fpregs_set
,
1570 .core_note_type
= NT_S390_SYSTEM_CALL
,
1572 .size
= sizeof(compat_uint_t
),
1573 .align
= sizeof(compat_uint_t
),
1574 .regset_get
= s390_system_call_get
,
1575 .set
= s390_system_call_set
,
1578 .core_note_type
= NT_S390_LAST_BREAK
,
1580 .size
= sizeof(long),
1581 .align
= sizeof(long),
1582 .regset_get
= s390_compat_last_break_get
,
1583 .set
= s390_compat_last_break_set
,
1586 .core_note_type
= NT_S390_TDB
,
1590 .regset_get
= s390_tdb_get
,
1591 .set
= s390_tdb_set
,
1594 .core_note_type
= NT_S390_VXRS_LOW
,
1595 .n
= __NUM_VXRS_LOW
,
1596 .size
= sizeof(__u64
),
1597 .align
= sizeof(__u64
),
1598 .regset_get
= s390_vxrs_low_get
,
1599 .set
= s390_vxrs_low_set
,
1602 .core_note_type
= NT_S390_VXRS_HIGH
,
1603 .n
= __NUM_VXRS_HIGH
,
1604 .size
= sizeof(__vector128
),
1605 .align
= sizeof(__vector128
),
1606 .regset_get
= s390_vxrs_high_get
,
1607 .set
= s390_vxrs_high_set
,
1610 .core_note_type
= NT_S390_HIGH_GPRS
,
1611 .n
= sizeof(s390_compat_regs_high
) / sizeof(compat_long_t
),
1612 .size
= sizeof(compat_long_t
),
1613 .align
= sizeof(compat_long_t
),
1614 .regset_get
= s390_compat_regs_high_get
,
1615 .set
= s390_compat_regs_high_set
,
1618 .core_note_type
= NT_S390_GS_CB
,
1619 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1620 .size
= sizeof(__u64
),
1621 .align
= sizeof(__u64
),
1622 .regset_get
= s390_gs_cb_get
,
1623 .set
= s390_gs_cb_set
,
1626 .core_note_type
= NT_S390_GS_BC
,
1627 .n
= sizeof(struct gs_cb
) / sizeof(__u64
),
1628 .size
= sizeof(__u64
),
1629 .align
= sizeof(__u64
),
1630 .regset_get
= s390_gs_bc_get
,
1631 .set
= s390_gs_bc_set
,
1634 .core_note_type
= NT_S390_RI_CB
,
1635 .n
= sizeof(struct runtime_instr_cb
) / sizeof(__u64
),
1636 .size
= sizeof(__u64
),
1637 .align
= sizeof(__u64
),
1638 .regset_get
= s390_runtime_instr_get
,
1639 .set
= s390_runtime_instr_set
,
1643 static const struct user_regset_view user_s390_compat_view
= {
1645 .e_machine
= EM_S390
,
1646 .regsets
= s390_compat_regsets
,
1647 .n
= ARRAY_SIZE(s390_compat_regsets
)
1651 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1653 #ifdef CONFIG_COMPAT
1654 if (test_tsk_thread_flag(task
, TIF_31BIT
))
1655 return &user_s390_compat_view
;
1657 return &user_s390_view
;
1660 static const char *gpr_names
[NUM_GPRS
] = {
1661 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1662 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1665 unsigned long regs_get_register(struct pt_regs
*regs
, unsigned int offset
)
1667 if (offset
>= NUM_GPRS
)
1669 return regs
->gprs
[offset
];
1672 int regs_query_register_offset(const char *name
)
1674 unsigned long offset
;
1676 if (!name
|| *name
!= 'r')
1678 if (kstrtoul(name
+ 1, 10, &offset
))
1680 if (offset
>= NUM_GPRS
)
1685 const char *regs_query_register_name(unsigned int offset
)
1687 if (offset
>= NUM_GPRS
)
1689 return gpr_names
[offset
];
1692 static int regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
1694 unsigned long ksp
= kernel_stack_pointer(regs
);
1696 return (addr
& ~(THREAD_SIZE
- 1)) == (ksp
& ~(THREAD_SIZE
- 1));
1700 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1701 * @regs:pt_regs which contains kernel stack pointer.
1702 * @n:stack entry number.
1704 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1705 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1708 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
1712 addr
= kernel_stack_pointer(regs
) + n
* sizeof(long);
1713 if (!regs_within_kernel_stack(regs
, addr
))
1715 return *(unsigned long *)addr
;