2 * Ptrace user space interface.
4 * Copyright IBM Corp. 1999, 2010
5 * Author(s): Denis Joseph Barrow
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/user.h>
16 #include <linux/security.h>
17 #include <linux/audit.h>
18 #include <linux/signal.h>
19 #include <linux/elf.h>
20 #include <linux/regset.h>
21 #include <linux/tracehook.h>
22 #include <linux/seccomp.h>
23 #include <linux/compat.h>
24 #include <trace/syscall.h>
25 #include <asm/segment.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/switch_to.h>
35 #include "compat_ptrace.h"
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/syscalls.h>
47 REGSET_GENERAL_EXTENDED
,
50 void update_cr_regs(struct task_struct
*task
)
52 struct pt_regs
*regs
= task_pt_regs(task
);
53 struct thread_struct
*thread
= &task
->thread
;
54 struct per_regs old
, new;
57 /* Take care of the enable/disable of transactional execution. */
59 unsigned long cr
[3], cr_new
[3];
61 __ctl_store(cr
, 0, 2);
63 /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
64 if (task
->thread
.per_flags
& PER_FLAG_NO_TE
)
65 cr_new
[0] = cr
[0] & ~(3UL << 54);
67 cr_new
[0] = cr
[0] | (3UL << 54);
68 /* Set or clear transaction execution TDC bits 62 and 63. */
69 cr_new
[2] = cr
[2] & ~3UL;
70 if (task
->thread
.per_flags
& PER_FLAG_TE_ABORT_RAND
) {
71 if (task
->thread
.per_flags
& PER_FLAG_TE_ABORT_RAND_TEND
)
76 if (memcmp(&cr_new
, &cr
, sizeof(cr
)))
77 __ctl_load(cr_new
, 0, 2);
80 /* Copy user specified PER registers */
81 new.control
= thread
->per_user
.control
;
82 new.start
= thread
->per_user
.start
;
83 new.end
= thread
->per_user
.end
;
85 /* merge TIF_SINGLE_STEP into user specified PER registers. */
86 if (test_tsk_thread_flag(task
, TIF_SINGLE_STEP
)) {
87 new.control
|= PER_EVENT_IFETCH
;
89 new.control
|= PER_CONTROL_SUSPENSION
;
90 new.control
|= PER_EVENT_TRANSACTION_END
;
93 new.end
= PSW_ADDR_INSN
;
96 /* Take care of the PER enablement bit in the PSW. */
97 if (!(new.control
& PER_EVENT_MASK
)) {
98 regs
->psw
.mask
&= ~PSW_MASK_PER
;
101 regs
->psw
.mask
|= PSW_MASK_PER
;
102 __ctl_store(old
, 9, 11);
103 if (memcmp(&new, &old
, sizeof(struct per_regs
)) != 0)
104 __ctl_load(new, 9, 11);
107 void user_enable_single_step(struct task_struct
*task
)
109 set_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
111 update_cr_regs(task
);
114 void user_disable_single_step(struct task_struct
*task
)
116 clear_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
118 update_cr_regs(task
);
122 * Called by kernel/ptrace.c when detaching..
124 * Clear all debugging related fields.
126 void ptrace_disable(struct task_struct
*task
)
128 memset(&task
->thread
.per_user
, 0, sizeof(task
->thread
.per_user
));
129 memset(&task
->thread
.per_event
, 0, sizeof(task
->thread
.per_event
));
130 clear_tsk_thread_flag(task
, TIF_SINGLE_STEP
);
131 clear_tsk_thread_flag(task
, TIF_PER_TRAP
);
132 task
->thread
.per_flags
= 0;
136 # define __ADDR_MASK 3
138 # define __ADDR_MASK 7
141 static inline unsigned long __peek_user_per(struct task_struct
*child
,
144 struct per_struct_kernel
*dummy
= NULL
;
146 if (addr
== (addr_t
) &dummy
->cr9
)
147 /* Control bits of the active per set. */
148 return test_thread_flag(TIF_SINGLE_STEP
) ?
149 PER_EVENT_IFETCH
: child
->thread
.per_user
.control
;
150 else if (addr
== (addr_t
) &dummy
->cr10
)
151 /* Start address of the active per set. */
152 return test_thread_flag(TIF_SINGLE_STEP
) ?
153 0 : child
->thread
.per_user
.start
;
154 else if (addr
== (addr_t
) &dummy
->cr11
)
155 /* End address of the active per set. */
156 return test_thread_flag(TIF_SINGLE_STEP
) ?
157 PSW_ADDR_INSN
: child
->thread
.per_user
.end
;
158 else if (addr
== (addr_t
) &dummy
->bits
)
159 /* Single-step bit. */
160 return test_thread_flag(TIF_SINGLE_STEP
) ?
161 (1UL << (BITS_PER_LONG
- 1)) : 0;
162 else if (addr
== (addr_t
) &dummy
->starting_addr
)
163 /* Start address of the user specified per set. */
164 return child
->thread
.per_user
.start
;
165 else if (addr
== (addr_t
) &dummy
->ending_addr
)
166 /* End address of the user specified per set. */
167 return child
->thread
.per_user
.end
;
168 else if (addr
== (addr_t
) &dummy
->perc_atmid
)
169 /* PER code, ATMID and AI of the last PER trap */
170 return (unsigned long)
171 child
->thread
.per_event
.cause
<< (BITS_PER_LONG
- 16);
172 else if (addr
== (addr_t
) &dummy
->address
)
173 /* Address of the last PER trap */
174 return child
->thread
.per_event
.address
;
175 else if (addr
== (addr_t
) &dummy
->access_id
)
176 /* Access id of the last PER trap */
177 return (unsigned long)
178 child
->thread
.per_event
.paid
<< (BITS_PER_LONG
- 8);
183 * Read the word at offset addr from the user area of a process. The
184 * trouble here is that the information is littered over different
185 * locations. The process registers are found on the kernel stack,
186 * the floating point stuff and the trace settings are stored in
187 * the task structure. In addition the different structures in
188 * struct user contain pad bytes that should be read as zeroes.
191 static unsigned long __peek_user(struct task_struct
*child
, addr_t addr
)
193 struct user
*dummy
= NULL
;
196 if (addr
< (addr_t
) &dummy
->regs
.acrs
) {
198 * psw and gprs are stored on the stack
200 tmp
= *(addr_t
*)((addr_t
) &task_pt_regs(child
)->psw
+ addr
);
201 if (addr
== (addr_t
) &dummy
->regs
.psw
.mask
)
202 /* Return a clean psw mask. */
203 tmp
= psw_user_bits
| (tmp
& PSW_MASK_USER
);
205 } else if (addr
< (addr_t
) &dummy
->regs
.orig_gpr2
) {
207 * access registers are stored in the thread structure
209 offset
= addr
- (addr_t
) &dummy
->regs
.acrs
;
212 * Very special case: old & broken 64 bit gdb reading
213 * from acrs[15]. Result is a 64 bit value. Read the
214 * 32 bit acrs[15] value and shift it by 32. Sick...
216 if (addr
== (addr_t
) &dummy
->regs
.acrs
[15])
217 tmp
= ((unsigned long) child
->thread
.acrs
[15]) << 32;
220 tmp
= *(addr_t
*)((addr_t
) &child
->thread
.acrs
+ offset
);
222 } else if (addr
== (addr_t
) &dummy
->regs
.orig_gpr2
) {
224 * orig_gpr2 is stored on the kernel stack
226 tmp
= (addr_t
) task_pt_regs(child
)->orig_gpr2
;
228 } else if (addr
< (addr_t
) &dummy
->regs
.fp_regs
) {
230 * prevent reads of padding hole between
231 * orig_gpr2 and fp_regs on s390.
235 } else if (addr
< (addr_t
) (&dummy
->regs
.fp_regs
+ 1)) {
237 * floating point regs. are stored in the thread structure
239 offset
= addr
- (addr_t
) &dummy
->regs
.fp_regs
;
240 tmp
= *(addr_t
*)((addr_t
) &child
->thread
.fp_regs
+ offset
);
241 if (addr
== (addr_t
) &dummy
->regs
.fp_regs
.fpc
)
242 tmp
&= (unsigned long) FPC_VALID_MASK
243 << (BITS_PER_LONG
- 32);
245 } else if (addr
< (addr_t
) (&dummy
->regs
.per_info
+ 1)) {
247 * Handle access to the per_info structure.
249 addr
-= (addr_t
) &dummy
->regs
.per_info
;
250 tmp
= __peek_user_per(child
, addr
);
259 peek_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
264 * Stupid gdb peeks/pokes the access registers in 64 bit with
265 * an alignment of 4. Programmers from hell...
269 if (addr
>= (addr_t
) &((struct user
*) NULL
)->regs
.acrs
&&
270 addr
< (addr_t
) &((struct user
*) NULL
)->regs
.orig_gpr2
)
273 if ((addr
& mask
) || addr
> sizeof(struct user
) - __ADDR_MASK
)
276 tmp
= __peek_user(child
, addr
);
277 return put_user(tmp
, (addr_t __user
*) data
);
280 static inline void __poke_user_per(struct task_struct
*child
,
281 addr_t addr
, addr_t data
)
283 struct per_struct_kernel
*dummy
= NULL
;
286 * There are only three fields in the per_info struct that the
287 * debugger user can write to.
288 * 1) cr9: the debugger wants to set a new PER event mask
289 * 2) starting_addr: the debugger wants to set a new starting
290 * address to use with the PER event mask.
291 * 3) ending_addr: the debugger wants to set a new ending
292 * address to use with the PER event mask.
293 * The user specified PER event mask and the start and end
294 * addresses are used only if single stepping is not in effect.
295 * Writes to any other field in per_info are ignored.
297 if (addr
== (addr_t
) &dummy
->cr9
)
298 /* PER event mask of the user specified per set. */
299 child
->thread
.per_user
.control
=
300 data
& (PER_EVENT_MASK
| PER_CONTROL_MASK
);
301 else if (addr
== (addr_t
) &dummy
->starting_addr
)
302 /* Starting address of the user specified per set. */
303 child
->thread
.per_user
.start
= data
;
304 else if (addr
== (addr_t
) &dummy
->ending_addr
)
305 /* Ending address of the user specified per set. */
306 child
->thread
.per_user
.end
= data
;
310 * Write a word to the user area of a process at location addr. This
311 * operation does have an additional problem compared to peek_user.
312 * Stores to the program status word and on the floating point
313 * control register needs to get checked for validity.
315 static int __poke_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
317 struct user
*dummy
= NULL
;
320 if (addr
< (addr_t
) &dummy
->regs
.acrs
) {
322 * psw and gprs are stored on the stack
324 if (addr
== (addr_t
) &dummy
->regs
.psw
.mask
&&
325 ((data
& ~PSW_MASK_USER
) != psw_user_bits
||
326 ((data
& PSW_MASK_EA
) && !(data
& PSW_MASK_BA
))))
327 /* Invalid psw mask. */
329 *(addr_t
*)((addr_t
) &task_pt_regs(child
)->psw
+ addr
) = data
;
331 } else if (addr
< (addr_t
) (&dummy
->regs
.orig_gpr2
)) {
333 * access registers are stored in the thread structure
335 offset
= addr
- (addr_t
) &dummy
->regs
.acrs
;
338 * Very special case: old & broken 64 bit gdb writing
339 * to acrs[15] with a 64 bit value. Ignore the lower
340 * half of the value and write the upper 32 bit to
343 if (addr
== (addr_t
) &dummy
->regs
.acrs
[15])
344 child
->thread
.acrs
[15] = (unsigned int) (data
>> 32);
347 *(addr_t
*)((addr_t
) &child
->thread
.acrs
+ offset
) = data
;
349 } else if (addr
== (addr_t
) &dummy
->regs
.orig_gpr2
) {
351 * orig_gpr2 is stored on the kernel stack
353 task_pt_regs(child
)->orig_gpr2
= data
;
355 } else if (addr
< (addr_t
) &dummy
->regs
.fp_regs
) {
357 * prevent writes of padding hole between
358 * orig_gpr2 and fp_regs on s390.
362 } else if (addr
< (addr_t
) (&dummy
->regs
.fp_regs
+ 1)) {
364 * floating point regs. are stored in the thread structure
366 if (addr
== (addr_t
) &dummy
->regs
.fp_regs
.fpc
&&
367 (data
& ~((unsigned long) FPC_VALID_MASK
368 << (BITS_PER_LONG
- 32))) != 0)
370 offset
= addr
- (addr_t
) &dummy
->regs
.fp_regs
;
371 *(addr_t
*)((addr_t
) &child
->thread
.fp_regs
+ offset
) = data
;
373 } else if (addr
< (addr_t
) (&dummy
->regs
.per_info
+ 1)) {
375 * Handle access to the per_info structure.
377 addr
-= (addr_t
) &dummy
->regs
.per_info
;
378 __poke_user_per(child
, addr
, data
);
385 static int poke_user(struct task_struct
*child
, addr_t addr
, addr_t data
)
390 * Stupid gdb peeks/pokes the access registers in 64 bit with
391 * an alignment of 4. Programmers from hell indeed...
395 if (addr
>= (addr_t
) &((struct user
*) NULL
)->regs
.acrs
&&
396 addr
< (addr_t
) &((struct user
*) NULL
)->regs
.orig_gpr2
)
399 if ((addr
& mask
) || addr
> sizeof(struct user
) - __ADDR_MASK
)
402 return __poke_user(child
, addr
, data
);
405 long arch_ptrace(struct task_struct
*child
, long request
,
406 unsigned long addr
, unsigned long data
)
413 /* read the word at location addr in the USER area. */
414 return peek_user(child
, addr
, data
);
417 /* write the word at location addr in the USER area */
418 return poke_user(child
, addr
, data
);
420 case PTRACE_PEEKUSR_AREA
:
421 case PTRACE_POKEUSR_AREA
:
422 if (copy_from_user(&parea
, (void __force __user
*) addr
,
425 addr
= parea
.kernel_addr
;
426 data
= parea
.process_addr
;
428 while (copied
< parea
.len
) {
429 if (request
== PTRACE_PEEKUSR_AREA
)
430 ret
= peek_user(child
, addr
, data
);
434 (addr_t __force __user
*) data
))
436 ret
= poke_user(child
, addr
, utmp
);
440 addr
+= sizeof(unsigned long);
441 data
+= sizeof(unsigned long);
442 copied
+= sizeof(unsigned long);
445 case PTRACE_GET_LAST_BREAK
:
446 put_user(task_thread_info(child
)->last_break
,
447 (unsigned long __user
*) data
);
449 case PTRACE_ENABLE_TE
:
452 child
->thread
.per_flags
&= ~PER_FLAG_NO_TE
;
454 case PTRACE_DISABLE_TE
:
457 child
->thread
.per_flags
|= PER_FLAG_NO_TE
;
458 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND
;
460 case PTRACE_TE_ABORT_RAND
:
461 if (!MACHINE_HAS_TE
|| (child
->thread
.per_flags
& PER_FLAG_NO_TE
))
465 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND
;
468 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND
;
469 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND_TEND
;
472 child
->thread
.per_flags
|= PER_FLAG_TE_ABORT_RAND
;
473 child
->thread
.per_flags
&= ~PER_FLAG_TE_ABORT_RAND_TEND
;
480 /* Removing high order bit from addr (only for 31 bit). */
481 addr
&= PSW_ADDR_INSN
;
482 return ptrace_request(child
, request
, addr
, data
);
488 * Now the fun part starts... a 31 bit program running in the
489 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
490 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
491 * to handle, the difference to the 64 bit versions of the requests
492 * is that the access is done in multiples of 4 byte instead of
493 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
494 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
495 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
496 * is a 31 bit program too, the content of struct user can be
497 * emulated. A 31 bit program peeking into the struct user of
498 * a 64 bit program is a no-no.
502 * Same as peek_user_per but for a 31 bit program.
504 static inline __u32
__peek_user_per_compat(struct task_struct
*child
,
507 struct compat_per_struct_kernel
*dummy32
= NULL
;
509 if (addr
== (addr_t
) &dummy32
->cr9
)
510 /* Control bits of the active per set. */
511 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
512 PER_EVENT_IFETCH
: child
->thread
.per_user
.control
;
513 else if (addr
== (addr_t
) &dummy32
->cr10
)
514 /* Start address of the active per set. */
515 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
516 0 : child
->thread
.per_user
.start
;
517 else if (addr
== (addr_t
) &dummy32
->cr11
)
518 /* End address of the active per set. */
519 return test_thread_flag(TIF_SINGLE_STEP
) ?
520 PSW32_ADDR_INSN
: child
->thread
.per_user
.end
;
521 else if (addr
== (addr_t
) &dummy32
->bits
)
522 /* Single-step bit. */
523 return (__u32
) test_thread_flag(TIF_SINGLE_STEP
) ?
525 else if (addr
== (addr_t
) &dummy32
->starting_addr
)
526 /* Start address of the user specified per set. */
527 return (__u32
) child
->thread
.per_user
.start
;
528 else if (addr
== (addr_t
) &dummy32
->ending_addr
)
529 /* End address of the user specified per set. */
530 return (__u32
) child
->thread
.per_user
.end
;
531 else if (addr
== (addr_t
) &dummy32
->perc_atmid
)
532 /* PER code, ATMID and AI of the last PER trap */
533 return (__u32
) child
->thread
.per_event
.cause
<< 16;
534 else if (addr
== (addr_t
) &dummy32
->address
)
535 /* Address of the last PER trap */
536 return (__u32
) child
->thread
.per_event
.address
;
537 else if (addr
== (addr_t
) &dummy32
->access_id
)
538 /* Access id of the last PER trap */
539 return (__u32
) child
->thread
.per_event
.paid
<< 24;
544 * Same as peek_user but for a 31 bit program.
546 static u32
__peek_user_compat(struct task_struct
*child
, addr_t addr
)
548 struct compat_user
*dummy32
= NULL
;
552 if (addr
< (addr_t
) &dummy32
->regs
.acrs
) {
553 struct pt_regs
*regs
= task_pt_regs(child
);
555 * psw and gprs are stored on the stack
557 if (addr
== (addr_t
) &dummy32
->regs
.psw
.mask
) {
558 /* Fake a 31 bit psw mask. */
559 tmp
= (__u32
)(regs
->psw
.mask
>> 32);
560 tmp
= psw32_user_bits
| (tmp
& PSW32_MASK_USER
);
561 } else if (addr
== (addr_t
) &dummy32
->regs
.psw
.addr
) {
562 /* Fake a 31 bit psw address. */
563 tmp
= (__u32
) regs
->psw
.addr
|
564 (__u32
)(regs
->psw
.mask
& PSW_MASK_BA
);
567 tmp
= *(__u32
*)((addr_t
) ®s
->psw
+ addr
*2 + 4);
569 } else if (addr
< (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
571 * access registers are stored in the thread structure
573 offset
= addr
- (addr_t
) &dummy32
->regs
.acrs
;
574 tmp
= *(__u32
*)((addr_t
) &child
->thread
.acrs
+ offset
);
576 } else if (addr
== (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
578 * orig_gpr2 is stored on the kernel stack
580 tmp
= *(__u32
*)((addr_t
) &task_pt_regs(child
)->orig_gpr2
+ 4);
582 } else if (addr
< (addr_t
) &dummy32
->regs
.fp_regs
) {
584 * prevent reads of padding hole between
585 * orig_gpr2 and fp_regs on s390.
589 } else if (addr
< (addr_t
) (&dummy32
->regs
.fp_regs
+ 1)) {
591 * floating point regs. are stored in the thread structure
593 offset
= addr
- (addr_t
) &dummy32
->regs
.fp_regs
;
594 tmp
= *(__u32
*)((addr_t
) &child
->thread
.fp_regs
+ offset
);
596 } else if (addr
< (addr_t
) (&dummy32
->regs
.per_info
+ 1)) {
598 * Handle access to the per_info structure.
600 addr
-= (addr_t
) &dummy32
->regs
.per_info
;
601 tmp
= __peek_user_per_compat(child
, addr
);
609 static int peek_user_compat(struct task_struct
*child
,
610 addr_t addr
, addr_t data
)
614 if (!is_compat_task() || (addr
& 3) || addr
> sizeof(struct user
) - 3)
617 tmp
= __peek_user_compat(child
, addr
);
618 return put_user(tmp
, (__u32 __user
*) data
);
622 * Same as poke_user_per but for a 31 bit program.
624 static inline void __poke_user_per_compat(struct task_struct
*child
,
625 addr_t addr
, __u32 data
)
627 struct compat_per_struct_kernel
*dummy32
= NULL
;
629 if (addr
== (addr_t
) &dummy32
->cr9
)
630 /* PER event mask of the user specified per set. */
631 child
->thread
.per_user
.control
=
632 data
& (PER_EVENT_MASK
| PER_CONTROL_MASK
);
633 else if (addr
== (addr_t
) &dummy32
->starting_addr
)
634 /* Starting address of the user specified per set. */
635 child
->thread
.per_user
.start
= data
;
636 else if (addr
== (addr_t
) &dummy32
->ending_addr
)
637 /* Ending address of the user specified per set. */
638 child
->thread
.per_user
.end
= data
;
642 * Same as poke_user but for a 31 bit program.
644 static int __poke_user_compat(struct task_struct
*child
,
645 addr_t addr
, addr_t data
)
647 struct compat_user
*dummy32
= NULL
;
648 __u32 tmp
= (__u32
) data
;
651 if (addr
< (addr_t
) &dummy32
->regs
.acrs
) {
652 struct pt_regs
*regs
= task_pt_regs(child
);
654 * psw, gprs, acrs and orig_gpr2 are stored on the stack
656 if (addr
== (addr_t
) &dummy32
->regs
.psw
.mask
) {
657 /* Build a 64 bit psw mask from 31 bit mask. */
658 if ((tmp
& ~PSW32_MASK_USER
) != psw32_user_bits
)
659 /* Invalid psw mask. */
661 regs
->psw
.mask
= (regs
->psw
.mask
& ~PSW_MASK_USER
) |
662 (regs
->psw
.mask
& PSW_MASK_BA
) |
663 (__u64
)(tmp
& PSW32_MASK_USER
) << 32;
664 } else if (addr
== (addr_t
) &dummy32
->regs
.psw
.addr
) {
665 /* Build a 64 bit psw address from 31 bit address. */
666 regs
->psw
.addr
= (__u64
) tmp
& PSW32_ADDR_INSN
;
667 /* Transfer 31 bit amode bit to psw mask. */
668 regs
->psw
.mask
= (regs
->psw
.mask
& ~PSW_MASK_BA
) |
669 (__u64
)(tmp
& PSW32_ADDR_AMODE
);
672 *(__u32
*)((addr_t
) ®s
->psw
+ addr
*2 + 4) = tmp
;
674 } else if (addr
< (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
676 * access registers are stored in the thread structure
678 offset
= addr
- (addr_t
) &dummy32
->regs
.acrs
;
679 *(__u32
*)((addr_t
) &child
->thread
.acrs
+ offset
) = tmp
;
681 } else if (addr
== (addr_t
) (&dummy32
->regs
.orig_gpr2
)) {
683 * orig_gpr2 is stored on the kernel stack
685 *(__u32
*)((addr_t
) &task_pt_regs(child
)->orig_gpr2
+ 4) = tmp
;
687 } else if (addr
< (addr_t
) &dummy32
->regs
.fp_regs
) {
689 * prevent writess of padding hole between
690 * orig_gpr2 and fp_regs on s390.
694 } else if (addr
< (addr_t
) (&dummy32
->regs
.fp_regs
+ 1)) {
696 * floating point regs. are stored in the thread structure
698 if (addr
== (addr_t
) &dummy32
->regs
.fp_regs
.fpc
&&
699 (tmp
& ~FPC_VALID_MASK
) != 0)
700 /* Invalid floating point control. */
702 offset
= addr
- (addr_t
) &dummy32
->regs
.fp_regs
;
703 *(__u32
*)((addr_t
) &child
->thread
.fp_regs
+ offset
) = tmp
;
705 } else if (addr
< (addr_t
) (&dummy32
->regs
.per_info
+ 1)) {
707 * Handle access to the per_info structure.
709 addr
-= (addr_t
) &dummy32
->regs
.per_info
;
710 __poke_user_per_compat(child
, addr
, data
);
716 static int poke_user_compat(struct task_struct
*child
,
717 addr_t addr
, addr_t data
)
719 if (!is_compat_task() || (addr
& 3) ||
720 addr
> sizeof(struct compat_user
) - 3)
723 return __poke_user_compat(child
, addr
, data
);
726 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
727 compat_ulong_t caddr
, compat_ulong_t cdata
)
729 unsigned long addr
= caddr
;
730 unsigned long data
= cdata
;
731 compat_ptrace_area parea
;
736 /* read the word at location addr in the USER area. */
737 return peek_user_compat(child
, addr
, data
);
740 /* write the word at location addr in the USER area */
741 return poke_user_compat(child
, addr
, data
);
743 case PTRACE_PEEKUSR_AREA
:
744 case PTRACE_POKEUSR_AREA
:
745 if (copy_from_user(&parea
, (void __force __user
*) addr
,
748 addr
= parea
.kernel_addr
;
749 data
= parea
.process_addr
;
751 while (copied
< parea
.len
) {
752 if (request
== PTRACE_PEEKUSR_AREA
)
753 ret
= peek_user_compat(child
, addr
, data
);
757 (__u32 __force __user
*) data
))
759 ret
= poke_user_compat(child
, addr
, utmp
);
763 addr
+= sizeof(unsigned int);
764 data
+= sizeof(unsigned int);
765 copied
+= sizeof(unsigned int);
768 case PTRACE_GET_LAST_BREAK
:
769 put_user(task_thread_info(child
)->last_break
,
770 (unsigned int __user
*) data
);
773 return compat_ptrace_request(child
, request
, addr
, data
);
777 asmlinkage
long do_syscall_trace_enter(struct pt_regs
*regs
)
781 /* Do the secure computing check first. */
782 if (secure_computing(regs
->gprs
[2])) {
783 /* seccomp failures shouldn't expose any additional code. */
789 * The sysc_tracesys code in entry.S stored the system
790 * call number to gprs[2].
792 if (test_thread_flag(TIF_SYSCALL_TRACE
) &&
793 (tracehook_report_syscall_entry(regs
) ||
794 regs
->gprs
[2] >= NR_syscalls
)) {
796 * Tracing decided this syscall should not happen or the
797 * debugger stored an invalid system call number. Skip
798 * the system call and the system call restart handling.
800 clear_thread_flag(TIF_SYSCALL
);
804 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
805 trace_sys_enter(regs
, regs
->gprs
[2]);
807 audit_syscall_entry(is_compat_task() ?
808 AUDIT_ARCH_S390
: AUDIT_ARCH_S390X
,
809 regs
->gprs
[2], regs
->orig_gpr2
,
810 regs
->gprs
[3], regs
->gprs
[4],
813 return ret
?: regs
->gprs
[2];
816 asmlinkage
void do_syscall_trace_exit(struct pt_regs
*regs
)
818 audit_syscall_exit(regs
);
820 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT
)))
821 trace_sys_exit(regs
, regs
->gprs
[2]);
823 if (test_thread_flag(TIF_SYSCALL_TRACE
))
824 tracehook_report_syscall_exit(regs
, 0);
828 * user_regset definitions.
831 static int s390_regs_get(struct task_struct
*target
,
832 const struct user_regset
*regset
,
833 unsigned int pos
, unsigned int count
,
834 void *kbuf
, void __user
*ubuf
)
836 if (target
== current
)
837 save_access_regs(target
->thread
.acrs
);
840 unsigned long *k
= kbuf
;
842 *k
++ = __peek_user(target
, pos
);
847 unsigned long __user
*u
= ubuf
;
849 if (__put_user(__peek_user(target
, pos
), u
++))
858 static int s390_regs_set(struct task_struct
*target
,
859 const struct user_regset
*regset
,
860 unsigned int pos
, unsigned int count
,
861 const void *kbuf
, const void __user
*ubuf
)
865 if (target
== current
)
866 save_access_regs(target
->thread
.acrs
);
869 const unsigned long *k
= kbuf
;
870 while (count
> 0 && !rc
) {
871 rc
= __poke_user(target
, pos
, *k
++);
876 const unsigned long __user
*u
= ubuf
;
877 while (count
> 0 && !rc
) {
879 rc
= __get_user(word
, u
++);
882 rc
= __poke_user(target
, pos
, word
);
888 if (rc
== 0 && target
== current
)
889 restore_access_regs(target
->thread
.acrs
);
894 static int s390_fpregs_get(struct task_struct
*target
,
895 const struct user_regset
*regset
, unsigned int pos
,
896 unsigned int count
, void *kbuf
, void __user
*ubuf
)
898 if (target
== current
)
899 save_fp_regs(&target
->thread
.fp_regs
);
901 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
902 &target
->thread
.fp_regs
, 0, -1);
905 static int s390_fpregs_set(struct task_struct
*target
,
906 const struct user_regset
*regset
, unsigned int pos
,
907 unsigned int count
, const void *kbuf
,
908 const void __user
*ubuf
)
912 if (target
== current
)
913 save_fp_regs(&target
->thread
.fp_regs
);
915 /* If setting FPC, must validate it first. */
916 if (count
> 0 && pos
< offsetof(s390_fp_regs
, fprs
)) {
917 u32 fpc
[2] = { target
->thread
.fp_regs
.fpc
, 0 };
918 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &fpc
,
919 0, offsetof(s390_fp_regs
, fprs
));
922 if ((fpc
[0] & ~FPC_VALID_MASK
) != 0 || fpc
[1] != 0)
924 target
->thread
.fp_regs
.fpc
= fpc
[0];
927 if (rc
== 0 && count
> 0)
928 rc
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
929 target
->thread
.fp_regs
.fprs
,
930 offsetof(s390_fp_regs
, fprs
), -1);
932 if (rc
== 0 && target
== current
)
933 restore_fp_regs(&target
->thread
.fp_regs
);
940 static int s390_last_break_get(struct task_struct
*target
,
941 const struct user_regset
*regset
,
942 unsigned int pos
, unsigned int count
,
943 void *kbuf
, void __user
*ubuf
)
947 unsigned long *k
= kbuf
;
948 *k
= task_thread_info(target
)->last_break
;
950 unsigned long __user
*u
= ubuf
;
951 if (__put_user(task_thread_info(target
)->last_break
, u
))
958 static int s390_last_break_set(struct task_struct
*target
,
959 const struct user_regset
*regset
,
960 unsigned int pos
, unsigned int count
,
961 const void *kbuf
, const void __user
*ubuf
)
966 static int s390_tdb_get(struct task_struct
*target
,
967 const struct user_regset
*regset
,
968 unsigned int pos
, unsigned int count
,
969 void *kbuf
, void __user
*ubuf
)
971 struct pt_regs
*regs
= task_pt_regs(target
);
974 if (!(regs
->int_code
& 0x200))
976 data
= target
->thread
.trap_tdb
;
977 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
, data
, 0, 256);
980 static int s390_tdb_set(struct task_struct
*target
,
981 const struct user_regset
*regset
,
982 unsigned int pos
, unsigned int count
,
983 const void *kbuf
, const void __user
*ubuf
)
990 static int s390_system_call_get(struct task_struct
*target
,
991 const struct user_regset
*regset
,
992 unsigned int pos
, unsigned int count
,
993 void *kbuf
, void __user
*ubuf
)
995 unsigned int *data
= &task_thread_info(target
)->system_call
;
996 return user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
997 data
, 0, sizeof(unsigned int));
1000 static int s390_system_call_set(struct task_struct
*target
,
1001 const struct user_regset
*regset
,
1002 unsigned int pos
, unsigned int count
,
1003 const void *kbuf
, const void __user
*ubuf
)
1005 unsigned int *data
= &task_thread_info(target
)->system_call
;
1006 return user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1007 data
, 0, sizeof(unsigned int));
1010 static const struct user_regset s390_regsets
[] = {
1011 [REGSET_GENERAL
] = {
1012 .core_note_type
= NT_PRSTATUS
,
1013 .n
= sizeof(s390_regs
) / sizeof(long),
1014 .size
= sizeof(long),
1015 .align
= sizeof(long),
1016 .get
= s390_regs_get
,
1017 .set
= s390_regs_set
,
1020 .core_note_type
= NT_PRFPREG
,
1021 .n
= sizeof(s390_fp_regs
) / sizeof(long),
1022 .size
= sizeof(long),
1023 .align
= sizeof(long),
1024 .get
= s390_fpregs_get
,
1025 .set
= s390_fpregs_set
,
1028 [REGSET_LAST_BREAK
] = {
1029 .core_note_type
= NT_S390_LAST_BREAK
,
1031 .size
= sizeof(long),
1032 .align
= sizeof(long),
1033 .get
= s390_last_break_get
,
1034 .set
= s390_last_break_set
,
1037 .core_note_type
= NT_S390_TDB
,
1041 .get
= s390_tdb_get
,
1042 .set
= s390_tdb_set
,
1045 [REGSET_SYSTEM_CALL
] = {
1046 .core_note_type
= NT_S390_SYSTEM_CALL
,
1048 .size
= sizeof(unsigned int),
1049 .align
= sizeof(unsigned int),
1050 .get
= s390_system_call_get
,
1051 .set
= s390_system_call_set
,
1055 static const struct user_regset_view user_s390_view
= {
1056 .name
= UTS_MACHINE
,
1057 .e_machine
= EM_S390
,
1058 .regsets
= s390_regsets
,
1059 .n
= ARRAY_SIZE(s390_regsets
)
1062 #ifdef CONFIG_COMPAT
1063 static int s390_compat_regs_get(struct task_struct
*target
,
1064 const struct user_regset
*regset
,
1065 unsigned int pos
, unsigned int count
,
1066 void *kbuf
, void __user
*ubuf
)
1068 if (target
== current
)
1069 save_access_regs(target
->thread
.acrs
);
1072 compat_ulong_t
*k
= kbuf
;
1074 *k
++ = __peek_user_compat(target
, pos
);
1075 count
-= sizeof(*k
);
1079 compat_ulong_t __user
*u
= ubuf
;
1081 if (__put_user(__peek_user_compat(target
, pos
), u
++))
1083 count
-= sizeof(*u
);
1090 static int s390_compat_regs_set(struct task_struct
*target
,
1091 const struct user_regset
*regset
,
1092 unsigned int pos
, unsigned int count
,
1093 const void *kbuf
, const void __user
*ubuf
)
1097 if (target
== current
)
1098 save_access_regs(target
->thread
.acrs
);
1101 const compat_ulong_t
*k
= kbuf
;
1102 while (count
> 0 && !rc
) {
1103 rc
= __poke_user_compat(target
, pos
, *k
++);
1104 count
-= sizeof(*k
);
1108 const compat_ulong_t __user
*u
= ubuf
;
1109 while (count
> 0 && !rc
) {
1110 compat_ulong_t word
;
1111 rc
= __get_user(word
, u
++);
1114 rc
= __poke_user_compat(target
, pos
, word
);
1115 count
-= sizeof(*u
);
1120 if (rc
== 0 && target
== current
)
1121 restore_access_regs(target
->thread
.acrs
);
1126 static int s390_compat_regs_high_get(struct task_struct
*target
,
1127 const struct user_regset
*regset
,
1128 unsigned int pos
, unsigned int count
,
1129 void *kbuf
, void __user
*ubuf
)
1131 compat_ulong_t
*gprs_high
;
1133 gprs_high
= (compat_ulong_t
*)
1134 &task_pt_regs(target
)->gprs
[pos
/ sizeof(compat_ulong_t
)];
1136 compat_ulong_t
*k
= kbuf
;
1140 count
-= sizeof(*k
);
1143 compat_ulong_t __user
*u
= ubuf
;
1145 if (__put_user(*gprs_high
, u
++))
1148 count
-= sizeof(*u
);
1154 static int s390_compat_regs_high_set(struct task_struct
*target
,
1155 const struct user_regset
*regset
,
1156 unsigned int pos
, unsigned int count
,
1157 const void *kbuf
, const void __user
*ubuf
)
1159 compat_ulong_t
*gprs_high
;
1162 gprs_high
= (compat_ulong_t
*)
1163 &task_pt_regs(target
)->gprs
[pos
/ sizeof(compat_ulong_t
)];
1165 const compat_ulong_t
*k
= kbuf
;
1169 count
-= sizeof(*k
);
1172 const compat_ulong_t __user
*u
= ubuf
;
1173 while (count
> 0 && !rc
) {
1175 rc
= __get_user(word
, u
++);
1180 count
-= sizeof(*u
);
1187 static int s390_compat_last_break_get(struct task_struct
*target
,
1188 const struct user_regset
*regset
,
1189 unsigned int pos
, unsigned int count
,
1190 void *kbuf
, void __user
*ubuf
)
1192 compat_ulong_t last_break
;
1195 last_break
= task_thread_info(target
)->last_break
;
1197 unsigned long *k
= kbuf
;
1200 unsigned long __user
*u
= ubuf
;
1201 if (__put_user(last_break
, u
))
1208 static int s390_compat_last_break_set(struct task_struct
*target
,
1209 const struct user_regset
*regset
,
1210 unsigned int pos
, unsigned int count
,
1211 const void *kbuf
, const void __user
*ubuf
)
1216 static const struct user_regset s390_compat_regsets
[] = {
1217 [REGSET_GENERAL
] = {
1218 .core_note_type
= NT_PRSTATUS
,
1219 .n
= sizeof(s390_compat_regs
) / sizeof(compat_long_t
),
1220 .size
= sizeof(compat_long_t
),
1221 .align
= sizeof(compat_long_t
),
1222 .get
= s390_compat_regs_get
,
1223 .set
= s390_compat_regs_set
,
1226 .core_note_type
= NT_PRFPREG
,
1227 .n
= sizeof(s390_fp_regs
) / sizeof(compat_long_t
),
1228 .size
= sizeof(compat_long_t
),
1229 .align
= sizeof(compat_long_t
),
1230 .get
= s390_fpregs_get
,
1231 .set
= s390_fpregs_set
,
1233 [REGSET_LAST_BREAK
] = {
1234 .core_note_type
= NT_S390_LAST_BREAK
,
1236 .size
= sizeof(long),
1237 .align
= sizeof(long),
1238 .get
= s390_compat_last_break_get
,
1239 .set
= s390_compat_last_break_set
,
1242 .core_note_type
= NT_S390_TDB
,
1246 .get
= s390_tdb_get
,
1247 .set
= s390_tdb_set
,
1249 [REGSET_SYSTEM_CALL
] = {
1250 .core_note_type
= NT_S390_SYSTEM_CALL
,
1252 .size
= sizeof(compat_uint_t
),
1253 .align
= sizeof(compat_uint_t
),
1254 .get
= s390_system_call_get
,
1255 .set
= s390_system_call_set
,
1257 [REGSET_GENERAL_EXTENDED
] = {
1258 .core_note_type
= NT_S390_HIGH_GPRS
,
1259 .n
= sizeof(s390_compat_regs_high
) / sizeof(compat_long_t
),
1260 .size
= sizeof(compat_long_t
),
1261 .align
= sizeof(compat_long_t
),
1262 .get
= s390_compat_regs_high_get
,
1263 .set
= s390_compat_regs_high_set
,
1267 static const struct user_regset_view user_s390_compat_view
= {
1269 .e_machine
= EM_S390
,
1270 .regsets
= s390_compat_regsets
,
1271 .n
= ARRAY_SIZE(s390_compat_regsets
)
1275 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1277 #ifdef CONFIG_COMPAT
1278 if (test_tsk_thread_flag(task
, TIF_31BIT
))
1279 return &user_s390_compat_view
;
1281 return &user_s390_view
;
1284 static const char *gpr_names
[NUM_GPRS
] = {
1285 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1286 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1289 unsigned long regs_get_register(struct pt_regs
*regs
, unsigned int offset
)
1291 if (offset
>= NUM_GPRS
)
1293 return regs
->gprs
[offset
];
1296 int regs_query_register_offset(const char *name
)
1298 unsigned long offset
;
1300 if (!name
|| *name
!= 'r')
1302 if (strict_strtoul(name
+ 1, 10, &offset
))
1304 if (offset
>= NUM_GPRS
)
1309 const char *regs_query_register_name(unsigned int offset
)
1311 if (offset
>= NUM_GPRS
)
1313 return gpr_names
[offset
];
1316 static int regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
1318 unsigned long ksp
= kernel_stack_pointer(regs
);
1320 return (addr
& ~(THREAD_SIZE
- 1)) == (ksp
& ~(THREAD_SIZE
- 1));
1324 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1325 * @regs:pt_regs which contains kernel stack pointer.
1326 * @n:stack entry number.
1328 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1329 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1332 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
1336 addr
= kernel_stack_pointer(regs
) + n
* sizeof(long);
1337 if (!regs_within_kernel_stack(regs
, addr
))
1339 return *(unsigned long *)addr
;