x86/speculation/mds: Fix documentation typo
[linux/fpc-iii.git] / arch / s390 / kernel / ptrace.c
blob56e0190d6e6518cff6512525431c588e04b9ae3e
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ptrace user space interface.
5 * Copyright IBM Corp. 1999, 2010
6 * Author(s): Denis Joseph Barrow
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/mm.h>
14 #include <linux/smp.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/elf.h>
22 #include <linux/regset.h>
23 #include <linux/tracehook.h>
24 #include <linux/seccomp.h>
25 #include <linux/compat.h>
26 #include <trace/syscall.h>
27 #include <asm/segment.h>
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <linux/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/switch_to.h>
34 #include "entry.h"
36 #ifdef CONFIG_COMPAT
37 #include "compat_ptrace.h"
38 #endif
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/syscalls.h>
43 void update_cr_regs(struct task_struct *task)
45 struct pt_regs *regs = task_pt_regs(task);
46 struct thread_struct *thread = &task->thread;
47 struct per_regs old, new;
48 unsigned long cr0_old, cr0_new;
49 unsigned long cr2_old, cr2_new;
50 int cr0_changed, cr2_changed;
52 __ctl_store(cr0_old, 0, 0);
53 __ctl_store(cr2_old, 2, 2);
54 cr0_new = cr0_old;
55 cr2_new = cr2_old;
56 /* Take care of the enable/disable of transactional execution. */
57 if (MACHINE_HAS_TE) {
58 /* Set or clear transaction execution TXC bit 8. */
59 cr0_new |= (1UL << 55);
60 if (task->thread.per_flags & PER_FLAG_NO_TE)
61 cr0_new &= ~(1UL << 55);
62 /* Set or clear transaction execution TDC bits 62 and 63. */
63 cr2_new &= ~3UL;
64 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
65 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
66 cr2_new |= 1UL;
67 else
68 cr2_new |= 2UL;
71 /* Take care of enable/disable of guarded storage. */
72 if (MACHINE_HAS_GS) {
73 cr2_new &= ~(1UL << 4);
74 if (task->thread.gs_cb)
75 cr2_new |= (1UL << 4);
77 /* Load control register 0/2 iff changed */
78 cr0_changed = cr0_new != cr0_old;
79 cr2_changed = cr2_new != cr2_old;
80 if (cr0_changed)
81 __ctl_load(cr0_new, 0, 0);
82 if (cr2_changed)
83 __ctl_load(cr2_new, 2, 2);
84 /* Copy user specified PER registers */
85 new.control = thread->per_user.control;
86 new.start = thread->per_user.start;
87 new.end = thread->per_user.end;
89 /* merge TIF_SINGLE_STEP into user specified PER registers. */
90 if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) ||
91 test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) {
92 if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
93 new.control |= PER_EVENT_BRANCH;
94 else
95 new.control |= PER_EVENT_IFETCH;
96 new.control |= PER_CONTROL_SUSPENSION;
97 new.control |= PER_EVENT_TRANSACTION_END;
98 if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
99 new.control |= PER_EVENT_IFETCH;
100 new.start = 0;
101 new.end = -1UL;
104 /* Take care of the PER enablement bit in the PSW. */
105 if (!(new.control & PER_EVENT_MASK)) {
106 regs->psw.mask &= ~PSW_MASK_PER;
107 return;
109 regs->psw.mask |= PSW_MASK_PER;
110 __ctl_store(old, 9, 11);
111 if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
112 __ctl_load(new, 9, 11);
115 void user_enable_single_step(struct task_struct *task)
117 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
118 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
121 void user_disable_single_step(struct task_struct *task)
123 clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
124 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
127 void user_enable_block_step(struct task_struct *task)
129 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
130 set_tsk_thread_flag(task, TIF_BLOCK_STEP);
134 * Called by kernel/ptrace.c when detaching..
136 * Clear all debugging related fields.
138 void ptrace_disable(struct task_struct *task)
140 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
141 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
142 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
143 clear_pt_regs_flag(task_pt_regs(task), PIF_PER_TRAP);
144 task->thread.per_flags = 0;
147 #define __ADDR_MASK 7
149 static inline unsigned long __peek_user_per(struct task_struct *child,
150 addr_t addr)
152 struct per_struct_kernel *dummy = NULL;
154 if (addr == (addr_t) &dummy->cr9)
155 /* Control bits of the active per set. */
156 return test_thread_flag(TIF_SINGLE_STEP) ?
157 PER_EVENT_IFETCH : child->thread.per_user.control;
158 else if (addr == (addr_t) &dummy->cr10)
159 /* Start address of the active per set. */
160 return test_thread_flag(TIF_SINGLE_STEP) ?
161 0 : child->thread.per_user.start;
162 else if (addr == (addr_t) &dummy->cr11)
163 /* End address of the active per set. */
164 return test_thread_flag(TIF_SINGLE_STEP) ?
165 -1UL : child->thread.per_user.end;
166 else if (addr == (addr_t) &dummy->bits)
167 /* Single-step bit. */
168 return test_thread_flag(TIF_SINGLE_STEP) ?
169 (1UL << (BITS_PER_LONG - 1)) : 0;
170 else if (addr == (addr_t) &dummy->starting_addr)
171 /* Start address of the user specified per set. */
172 return child->thread.per_user.start;
173 else if (addr == (addr_t) &dummy->ending_addr)
174 /* End address of the user specified per set. */
175 return child->thread.per_user.end;
176 else if (addr == (addr_t) &dummy->perc_atmid)
177 /* PER code, ATMID and AI of the last PER trap */
178 return (unsigned long)
179 child->thread.per_event.cause << (BITS_PER_LONG - 16);
180 else if (addr == (addr_t) &dummy->address)
181 /* Address of the last PER trap */
182 return child->thread.per_event.address;
183 else if (addr == (addr_t) &dummy->access_id)
184 /* Access id of the last PER trap */
185 return (unsigned long)
186 child->thread.per_event.paid << (BITS_PER_LONG - 8);
187 return 0;
191 * Read the word at offset addr from the user area of a process. The
192 * trouble here is that the information is littered over different
193 * locations. The process registers are found on the kernel stack,
194 * the floating point stuff and the trace settings are stored in
195 * the task structure. In addition the different structures in
196 * struct user contain pad bytes that should be read as zeroes.
197 * Lovely...
199 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
201 struct user *dummy = NULL;
202 addr_t offset, tmp;
204 if (addr < (addr_t) &dummy->regs.acrs) {
206 * psw and gprs are stored on the stack
208 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
209 if (addr == (addr_t) &dummy->regs.psw.mask) {
210 /* Return a clean psw mask. */
211 tmp &= PSW_MASK_USER | PSW_MASK_RI;
212 tmp |= PSW_USER_BITS;
215 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
217 * access registers are stored in the thread structure
219 offset = addr - (addr_t) &dummy->regs.acrs;
221 * Very special case: old & broken 64 bit gdb reading
222 * from acrs[15]. Result is a 64 bit value. Read the
223 * 32 bit acrs[15] value and shift it by 32. Sick...
225 if (addr == (addr_t) &dummy->regs.acrs[15])
226 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
227 else
228 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
230 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
232 * orig_gpr2 is stored on the kernel stack
234 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
236 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
238 * prevent reads of padding hole between
239 * orig_gpr2 and fp_regs on s390.
241 tmp = 0;
243 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
245 * floating point control reg. is in the thread structure
247 tmp = child->thread.fpu.fpc;
248 tmp <<= BITS_PER_LONG - 32;
250 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
252 * floating point regs. are either in child->thread.fpu
253 * or the child->thread.fpu.vxrs array
255 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256 if (MACHINE_HAS_VX)
257 tmp = *(addr_t *)
258 ((addr_t) child->thread.fpu.vxrs + 2*offset);
259 else
260 tmp = *(addr_t *)
261 ((addr_t) child->thread.fpu.fprs + offset);
263 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
265 * Handle access to the per_info structure.
267 addr -= (addr_t) &dummy->regs.per_info;
268 tmp = __peek_user_per(child, addr);
270 } else
271 tmp = 0;
273 return tmp;
276 static int
277 peek_user(struct task_struct *child, addr_t addr, addr_t data)
279 addr_t tmp, mask;
282 * Stupid gdb peeks/pokes the access registers in 64 bit with
283 * an alignment of 4. Programmers from hell...
285 mask = __ADDR_MASK;
286 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
287 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
288 mask = 3;
289 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
290 return -EIO;
292 tmp = __peek_user(child, addr);
293 return put_user(tmp, (addr_t __user *) data);
296 static inline void __poke_user_per(struct task_struct *child,
297 addr_t addr, addr_t data)
299 struct per_struct_kernel *dummy = NULL;
302 * There are only three fields in the per_info struct that the
303 * debugger user can write to.
304 * 1) cr9: the debugger wants to set a new PER event mask
305 * 2) starting_addr: the debugger wants to set a new starting
306 * address to use with the PER event mask.
307 * 3) ending_addr: the debugger wants to set a new ending
308 * address to use with the PER event mask.
309 * The user specified PER event mask and the start and end
310 * addresses are used only if single stepping is not in effect.
311 * Writes to any other field in per_info are ignored.
313 if (addr == (addr_t) &dummy->cr9)
314 /* PER event mask of the user specified per set. */
315 child->thread.per_user.control =
316 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
317 else if (addr == (addr_t) &dummy->starting_addr)
318 /* Starting address of the user specified per set. */
319 child->thread.per_user.start = data;
320 else if (addr == (addr_t) &dummy->ending_addr)
321 /* Ending address of the user specified per set. */
322 child->thread.per_user.end = data;
326 * Write a word to the user area of a process at location addr. This
327 * operation does have an additional problem compared to peek_user.
328 * Stores to the program status word and on the floating point
329 * control register needs to get checked for validity.
331 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
333 struct user *dummy = NULL;
334 addr_t offset;
336 if (addr < (addr_t) &dummy->regs.acrs) {
338 * psw and gprs are stored on the stack
340 if (addr == (addr_t) &dummy->regs.psw.mask) {
341 unsigned long mask = PSW_MASK_USER;
343 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
344 if ((data ^ PSW_USER_BITS) & ~mask)
345 /* Invalid psw mask. */
346 return -EINVAL;
347 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
348 /* Invalid address-space-control bits */
349 return -EINVAL;
350 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
351 /* Invalid addressing mode bits */
352 return -EINVAL;
354 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
356 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
358 * access registers are stored in the thread structure
360 offset = addr - (addr_t) &dummy->regs.acrs;
362 * Very special case: old & broken 64 bit gdb writing
363 * to acrs[15] with a 64 bit value. Ignore the lower
364 * half of the value and write the upper 32 bit to
365 * acrs[15]. Sick...
367 if (addr == (addr_t) &dummy->regs.acrs[15])
368 child->thread.acrs[15] = (unsigned int) (data >> 32);
369 else
370 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
372 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
374 * orig_gpr2 is stored on the kernel stack
376 task_pt_regs(child)->orig_gpr2 = data;
378 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
380 * prevent writes of padding hole between
381 * orig_gpr2 and fp_regs on s390.
383 return 0;
385 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
387 * floating point control reg. is in the thread structure
389 if ((unsigned int) data != 0 ||
390 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
391 return -EINVAL;
392 child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
394 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
396 * floating point regs. are either in child->thread.fpu
397 * or the child->thread.fpu.vxrs array
399 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
400 if (MACHINE_HAS_VX)
401 *(addr_t *)((addr_t)
402 child->thread.fpu.vxrs + 2*offset) = data;
403 else
404 *(addr_t *)((addr_t)
405 child->thread.fpu.fprs + offset) = data;
407 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
409 * Handle access to the per_info structure.
411 addr -= (addr_t) &dummy->regs.per_info;
412 __poke_user_per(child, addr, data);
416 return 0;
419 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
421 addr_t mask;
424 * Stupid gdb peeks/pokes the access registers in 64 bit with
425 * an alignment of 4. Programmers from hell indeed...
427 mask = __ADDR_MASK;
428 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
429 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
430 mask = 3;
431 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
432 return -EIO;
434 return __poke_user(child, addr, data);
437 long arch_ptrace(struct task_struct *child, long request,
438 unsigned long addr, unsigned long data)
440 ptrace_area parea;
441 int copied, ret;
443 switch (request) {
444 case PTRACE_PEEKUSR:
445 /* read the word at location addr in the USER area. */
446 return peek_user(child, addr, data);
448 case PTRACE_POKEUSR:
449 /* write the word at location addr in the USER area */
450 return poke_user(child, addr, data);
452 case PTRACE_PEEKUSR_AREA:
453 case PTRACE_POKEUSR_AREA:
454 if (copy_from_user(&parea, (void __force __user *) addr,
455 sizeof(parea)))
456 return -EFAULT;
457 addr = parea.kernel_addr;
458 data = parea.process_addr;
459 copied = 0;
460 while (copied < parea.len) {
461 if (request == PTRACE_PEEKUSR_AREA)
462 ret = peek_user(child, addr, data);
463 else {
464 addr_t utmp;
465 if (get_user(utmp,
466 (addr_t __force __user *) data))
467 return -EFAULT;
468 ret = poke_user(child, addr, utmp);
470 if (ret)
471 return ret;
472 addr += sizeof(unsigned long);
473 data += sizeof(unsigned long);
474 copied += sizeof(unsigned long);
476 return 0;
477 case PTRACE_GET_LAST_BREAK:
478 put_user(child->thread.last_break,
479 (unsigned long __user *) data);
480 return 0;
481 case PTRACE_ENABLE_TE:
482 if (!MACHINE_HAS_TE)
483 return -EIO;
484 child->thread.per_flags &= ~PER_FLAG_NO_TE;
485 return 0;
486 case PTRACE_DISABLE_TE:
487 if (!MACHINE_HAS_TE)
488 return -EIO;
489 child->thread.per_flags |= PER_FLAG_NO_TE;
490 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
491 return 0;
492 case PTRACE_TE_ABORT_RAND:
493 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
494 return -EIO;
495 switch (data) {
496 case 0UL:
497 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
498 break;
499 case 1UL:
500 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
501 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
502 break;
503 case 2UL:
504 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
505 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
506 break;
507 default:
508 return -EINVAL;
510 return 0;
511 default:
512 return ptrace_request(child, request, addr, data);
516 #ifdef CONFIG_COMPAT
518 * Now the fun part starts... a 31 bit program running in the
519 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
520 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
521 * to handle, the difference to the 64 bit versions of the requests
522 * is that the access is done in multiples of 4 byte instead of
523 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
524 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
525 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
526 * is a 31 bit program too, the content of struct user can be
527 * emulated. A 31 bit program peeking into the struct user of
528 * a 64 bit program is a no-no.
532 * Same as peek_user_per but for a 31 bit program.
534 static inline __u32 __peek_user_per_compat(struct task_struct *child,
535 addr_t addr)
537 struct compat_per_struct_kernel *dummy32 = NULL;
539 if (addr == (addr_t) &dummy32->cr9)
540 /* Control bits of the active per set. */
541 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
542 PER_EVENT_IFETCH : child->thread.per_user.control;
543 else if (addr == (addr_t) &dummy32->cr10)
544 /* Start address of the active per set. */
545 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
546 0 : child->thread.per_user.start;
547 else if (addr == (addr_t) &dummy32->cr11)
548 /* End address of the active per set. */
549 return test_thread_flag(TIF_SINGLE_STEP) ?
550 PSW32_ADDR_INSN : child->thread.per_user.end;
551 else if (addr == (addr_t) &dummy32->bits)
552 /* Single-step bit. */
553 return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
554 0x80000000 : 0;
555 else if (addr == (addr_t) &dummy32->starting_addr)
556 /* Start address of the user specified per set. */
557 return (__u32) child->thread.per_user.start;
558 else if (addr == (addr_t) &dummy32->ending_addr)
559 /* End address of the user specified per set. */
560 return (__u32) child->thread.per_user.end;
561 else if (addr == (addr_t) &dummy32->perc_atmid)
562 /* PER code, ATMID and AI of the last PER trap */
563 return (__u32) child->thread.per_event.cause << 16;
564 else if (addr == (addr_t) &dummy32->address)
565 /* Address of the last PER trap */
566 return (__u32) child->thread.per_event.address;
567 else if (addr == (addr_t) &dummy32->access_id)
568 /* Access id of the last PER trap */
569 return (__u32) child->thread.per_event.paid << 24;
570 return 0;
574 * Same as peek_user but for a 31 bit program.
576 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
578 struct compat_user *dummy32 = NULL;
579 addr_t offset;
580 __u32 tmp;
582 if (addr < (addr_t) &dummy32->regs.acrs) {
583 struct pt_regs *regs = task_pt_regs(child);
585 * psw and gprs are stored on the stack
587 if (addr == (addr_t) &dummy32->regs.psw.mask) {
588 /* Fake a 31 bit psw mask. */
589 tmp = (__u32)(regs->psw.mask >> 32);
590 tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
591 tmp |= PSW32_USER_BITS;
592 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
593 /* Fake a 31 bit psw address. */
594 tmp = (__u32) regs->psw.addr |
595 (__u32)(regs->psw.mask & PSW_MASK_BA);
596 } else {
597 /* gpr 0-15 */
598 tmp = *(__u32 *)((addr_t) &regs->psw + addr*2 + 4);
600 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
602 * access registers are stored in the thread structure
604 offset = addr - (addr_t) &dummy32->regs.acrs;
605 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
607 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
609 * orig_gpr2 is stored on the kernel stack
611 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
613 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
615 * prevent reads of padding hole between
616 * orig_gpr2 and fp_regs on s390.
618 tmp = 0;
620 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
622 * floating point control reg. is in the thread structure
624 tmp = child->thread.fpu.fpc;
626 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
628 * floating point regs. are either in child->thread.fpu
629 * or the child->thread.fpu.vxrs array
631 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
632 if (MACHINE_HAS_VX)
633 tmp = *(__u32 *)
634 ((addr_t) child->thread.fpu.vxrs + 2*offset);
635 else
636 tmp = *(__u32 *)
637 ((addr_t) child->thread.fpu.fprs + offset);
639 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
641 * Handle access to the per_info structure.
643 addr -= (addr_t) &dummy32->regs.per_info;
644 tmp = __peek_user_per_compat(child, addr);
646 } else
647 tmp = 0;
649 return tmp;
652 static int peek_user_compat(struct task_struct *child,
653 addr_t addr, addr_t data)
655 __u32 tmp;
657 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
658 return -EIO;
660 tmp = __peek_user_compat(child, addr);
661 return put_user(tmp, (__u32 __user *) data);
665 * Same as poke_user_per but for a 31 bit program.
667 static inline void __poke_user_per_compat(struct task_struct *child,
668 addr_t addr, __u32 data)
670 struct compat_per_struct_kernel *dummy32 = NULL;
672 if (addr == (addr_t) &dummy32->cr9)
673 /* PER event mask of the user specified per set. */
674 child->thread.per_user.control =
675 data & (PER_EVENT_MASK | PER_CONTROL_MASK);
676 else if (addr == (addr_t) &dummy32->starting_addr)
677 /* Starting address of the user specified per set. */
678 child->thread.per_user.start = data;
679 else if (addr == (addr_t) &dummy32->ending_addr)
680 /* Ending address of the user specified per set. */
681 child->thread.per_user.end = data;
685 * Same as poke_user but for a 31 bit program.
687 static int __poke_user_compat(struct task_struct *child,
688 addr_t addr, addr_t data)
690 struct compat_user *dummy32 = NULL;
691 __u32 tmp = (__u32) data;
692 addr_t offset;
694 if (addr < (addr_t) &dummy32->regs.acrs) {
695 struct pt_regs *regs = task_pt_regs(child);
697 * psw, gprs, acrs and orig_gpr2 are stored on the stack
699 if (addr == (addr_t) &dummy32->regs.psw.mask) {
700 __u32 mask = PSW32_MASK_USER;
702 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
703 /* Build a 64 bit psw mask from 31 bit mask. */
704 if ((tmp ^ PSW32_USER_BITS) & ~mask)
705 /* Invalid psw mask. */
706 return -EINVAL;
707 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
708 /* Invalid address-space-control bits */
709 return -EINVAL;
710 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
711 (regs->psw.mask & PSW_MASK_BA) |
712 (__u64)(tmp & mask) << 32;
713 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
714 /* Build a 64 bit psw address from 31 bit address. */
715 regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
716 /* Transfer 31 bit amode bit to psw mask. */
717 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
718 (__u64)(tmp & PSW32_ADDR_AMODE);
719 } else {
720 /* gpr 0-15 */
721 *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
723 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
725 * access registers are stored in the thread structure
727 offset = addr - (addr_t) &dummy32->regs.acrs;
728 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
730 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
732 * orig_gpr2 is stored on the kernel stack
734 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
736 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
738 * prevent writess of padding hole between
739 * orig_gpr2 and fp_regs on s390.
741 return 0;
743 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
745 * floating point control reg. is in the thread structure
747 if (test_fp_ctl(tmp))
748 return -EINVAL;
749 child->thread.fpu.fpc = data;
751 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
753 * floating point regs. are either in child->thread.fpu
754 * or the child->thread.fpu.vxrs array
756 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
757 if (MACHINE_HAS_VX)
758 *(__u32 *)((addr_t)
759 child->thread.fpu.vxrs + 2*offset) = tmp;
760 else
761 *(__u32 *)((addr_t)
762 child->thread.fpu.fprs + offset) = tmp;
764 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
766 * Handle access to the per_info structure.
768 addr -= (addr_t) &dummy32->regs.per_info;
769 __poke_user_per_compat(child, addr, data);
772 return 0;
775 static int poke_user_compat(struct task_struct *child,
776 addr_t addr, addr_t data)
778 if (!is_compat_task() || (addr & 3) ||
779 addr > sizeof(struct compat_user) - 3)
780 return -EIO;
782 return __poke_user_compat(child, addr, data);
785 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
786 compat_ulong_t caddr, compat_ulong_t cdata)
788 unsigned long addr = caddr;
789 unsigned long data = cdata;
790 compat_ptrace_area parea;
791 int copied, ret;
793 switch (request) {
794 case PTRACE_PEEKUSR:
795 /* read the word at location addr in the USER area. */
796 return peek_user_compat(child, addr, data);
798 case PTRACE_POKEUSR:
799 /* write the word at location addr in the USER area */
800 return poke_user_compat(child, addr, data);
802 case PTRACE_PEEKUSR_AREA:
803 case PTRACE_POKEUSR_AREA:
804 if (copy_from_user(&parea, (void __force __user *) addr,
805 sizeof(parea)))
806 return -EFAULT;
807 addr = parea.kernel_addr;
808 data = parea.process_addr;
809 copied = 0;
810 while (copied < parea.len) {
811 if (request == PTRACE_PEEKUSR_AREA)
812 ret = peek_user_compat(child, addr, data);
813 else {
814 __u32 utmp;
815 if (get_user(utmp,
816 (__u32 __force __user *) data))
817 return -EFAULT;
818 ret = poke_user_compat(child, addr, utmp);
820 if (ret)
821 return ret;
822 addr += sizeof(unsigned int);
823 data += sizeof(unsigned int);
824 copied += sizeof(unsigned int);
826 return 0;
827 case PTRACE_GET_LAST_BREAK:
828 put_user(child->thread.last_break,
829 (unsigned int __user *) data);
830 return 0;
832 return compat_ptrace_request(child, request, addr, data);
834 #endif
836 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
838 unsigned long mask = -1UL;
841 * The sysc_tracesys code in entry.S stored the system
842 * call number to gprs[2].
844 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
845 (tracehook_report_syscall_entry(regs) ||
846 regs->gprs[2] >= NR_syscalls)) {
848 * Tracing decided this syscall should not happen or the
849 * debugger stored an invalid system call number. Skip
850 * the system call and the system call restart handling.
852 clear_pt_regs_flag(regs, PIF_SYSCALL);
853 return -1;
856 /* Do the secure computing check after ptrace. */
857 if (secure_computing(NULL)) {
858 /* seccomp failures shouldn't expose any additional code. */
859 return -1;
862 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
863 trace_sys_enter(regs, regs->gprs[2]);
865 if (is_compat_task())
866 mask = 0xffffffff;
868 audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
869 regs->gprs[3] &mask, regs->gprs[4] &mask,
870 regs->gprs[5] &mask);
872 return regs->gprs[2];
875 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
877 audit_syscall_exit(regs);
879 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
880 trace_sys_exit(regs, regs->gprs[2]);
882 if (test_thread_flag(TIF_SYSCALL_TRACE))
883 tracehook_report_syscall_exit(regs, 0);
887 * user_regset definitions.
890 static int s390_regs_get(struct task_struct *target,
891 const struct user_regset *regset,
892 unsigned int pos, unsigned int count,
893 void *kbuf, void __user *ubuf)
895 if (target == current)
896 save_access_regs(target->thread.acrs);
898 if (kbuf) {
899 unsigned long *k = kbuf;
900 while (count > 0) {
901 *k++ = __peek_user(target, pos);
902 count -= sizeof(*k);
903 pos += sizeof(*k);
905 } else {
906 unsigned long __user *u = ubuf;
907 while (count > 0) {
908 if (__put_user(__peek_user(target, pos), u++))
909 return -EFAULT;
910 count -= sizeof(*u);
911 pos += sizeof(*u);
914 return 0;
917 static int s390_regs_set(struct task_struct *target,
918 const struct user_regset *regset,
919 unsigned int pos, unsigned int count,
920 const void *kbuf, const void __user *ubuf)
922 int rc = 0;
924 if (target == current)
925 save_access_regs(target->thread.acrs);
927 if (kbuf) {
928 const unsigned long *k = kbuf;
929 while (count > 0 && !rc) {
930 rc = __poke_user(target, pos, *k++);
931 count -= sizeof(*k);
932 pos += sizeof(*k);
934 } else {
935 const unsigned long __user *u = ubuf;
936 while (count > 0 && !rc) {
937 unsigned long word;
938 rc = __get_user(word, u++);
939 if (rc)
940 break;
941 rc = __poke_user(target, pos, word);
942 count -= sizeof(*u);
943 pos += sizeof(*u);
947 if (rc == 0 && target == current)
948 restore_access_regs(target->thread.acrs);
950 return rc;
953 static int s390_fpregs_get(struct task_struct *target,
954 const struct user_regset *regset, unsigned int pos,
955 unsigned int count, void *kbuf, void __user *ubuf)
957 _s390_fp_regs fp_regs;
959 if (target == current)
960 save_fpu_regs();
962 fp_regs.fpc = target->thread.fpu.fpc;
963 fpregs_store(&fp_regs, &target->thread.fpu);
965 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
966 &fp_regs, 0, -1);
969 static int s390_fpregs_set(struct task_struct *target,
970 const struct user_regset *regset, unsigned int pos,
971 unsigned int count, const void *kbuf,
972 const void __user *ubuf)
974 int rc = 0;
975 freg_t fprs[__NUM_FPRS];
977 if (target == current)
978 save_fpu_regs();
980 if (MACHINE_HAS_VX)
981 convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
982 else
983 memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
985 /* If setting FPC, must validate it first. */
986 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
987 u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
988 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
989 0, offsetof(s390_fp_regs, fprs));
990 if (rc)
991 return rc;
992 if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
993 return -EINVAL;
994 target->thread.fpu.fpc = ufpc[0];
997 if (rc == 0 && count > 0)
998 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
999 fprs, offsetof(s390_fp_regs, fprs), -1);
1000 if (rc)
1001 return rc;
1003 if (MACHINE_HAS_VX)
1004 convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
1005 else
1006 memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
1008 return rc;
1011 static int s390_last_break_get(struct task_struct *target,
1012 const struct user_regset *regset,
1013 unsigned int pos, unsigned int count,
1014 void *kbuf, void __user *ubuf)
1016 if (count > 0) {
1017 if (kbuf) {
1018 unsigned long *k = kbuf;
1019 *k = target->thread.last_break;
1020 } else {
1021 unsigned long __user *u = ubuf;
1022 if (__put_user(target->thread.last_break, u))
1023 return -EFAULT;
1026 return 0;
1029 static int s390_last_break_set(struct task_struct *target,
1030 const struct user_regset *regset,
1031 unsigned int pos, unsigned int count,
1032 const void *kbuf, const void __user *ubuf)
1034 return 0;
1037 static int s390_tdb_get(struct task_struct *target,
1038 const struct user_regset *regset,
1039 unsigned int pos, unsigned int count,
1040 void *kbuf, void __user *ubuf)
1042 struct pt_regs *regs = task_pt_regs(target);
1043 unsigned char *data;
1045 if (!(regs->int_code & 0x200))
1046 return -ENODATA;
1047 data = target->thread.trap_tdb;
1048 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, data, 0, 256);
1051 static int s390_tdb_set(struct task_struct *target,
1052 const struct user_regset *regset,
1053 unsigned int pos, unsigned int count,
1054 const void *kbuf, const void __user *ubuf)
1056 return 0;
1059 static int s390_vxrs_low_get(struct task_struct *target,
1060 const struct user_regset *regset,
1061 unsigned int pos, unsigned int count,
1062 void *kbuf, void __user *ubuf)
1064 __u64 vxrs[__NUM_VXRS_LOW];
1065 int i;
1067 if (!MACHINE_HAS_VX)
1068 return -ENODEV;
1069 if (target == current)
1070 save_fpu_regs();
1071 for (i = 0; i < __NUM_VXRS_LOW; i++)
1072 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1073 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1076 static int s390_vxrs_low_set(struct task_struct *target,
1077 const struct user_regset *regset,
1078 unsigned int pos, unsigned int count,
1079 const void *kbuf, const void __user *ubuf)
1081 __u64 vxrs[__NUM_VXRS_LOW];
1082 int i, rc;
1084 if (!MACHINE_HAS_VX)
1085 return -ENODEV;
1086 if (target == current)
1087 save_fpu_regs();
1089 for (i = 0; i < __NUM_VXRS_LOW; i++)
1090 vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
1092 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1093 if (rc == 0)
1094 for (i = 0; i < __NUM_VXRS_LOW; i++)
1095 *((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
1097 return rc;
1100 static int s390_vxrs_high_get(struct task_struct *target,
1101 const struct user_regset *regset,
1102 unsigned int pos, unsigned int count,
1103 void *kbuf, void __user *ubuf)
1105 __vector128 vxrs[__NUM_VXRS_HIGH];
1107 if (!MACHINE_HAS_VX)
1108 return -ENODEV;
1109 if (target == current)
1110 save_fpu_regs();
1111 memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
1113 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
1116 static int s390_vxrs_high_set(struct task_struct *target,
1117 const struct user_regset *regset,
1118 unsigned int pos, unsigned int count,
1119 const void *kbuf, const void __user *ubuf)
1121 int rc;
1123 if (!MACHINE_HAS_VX)
1124 return -ENODEV;
1125 if (target == current)
1126 save_fpu_regs();
1128 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1129 target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
1130 return rc;
1133 static int s390_system_call_get(struct task_struct *target,
1134 const struct user_regset *regset,
1135 unsigned int pos, unsigned int count,
1136 void *kbuf, void __user *ubuf)
1138 unsigned int *data = &target->thread.system_call;
1139 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1140 data, 0, sizeof(unsigned int));
1143 static int s390_system_call_set(struct task_struct *target,
1144 const struct user_regset *regset,
1145 unsigned int pos, unsigned int count,
1146 const void *kbuf, const void __user *ubuf)
1148 unsigned int *data = &target->thread.system_call;
1149 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1150 data, 0, sizeof(unsigned int));
1153 static int s390_gs_cb_get(struct task_struct *target,
1154 const struct user_regset *regset,
1155 unsigned int pos, unsigned int count,
1156 void *kbuf, void __user *ubuf)
1158 struct gs_cb *data = target->thread.gs_cb;
1160 if (!MACHINE_HAS_GS)
1161 return -ENODEV;
1162 if (!data)
1163 return -ENODATA;
1164 if (target == current)
1165 save_gs_cb(data);
1166 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1167 data, 0, sizeof(struct gs_cb));
1170 static int s390_gs_cb_set(struct task_struct *target,
1171 const struct user_regset *regset,
1172 unsigned int pos, unsigned int count,
1173 const void *kbuf, const void __user *ubuf)
1175 struct gs_cb gs_cb = { }, *data = NULL;
1176 int rc;
1178 if (!MACHINE_HAS_GS)
1179 return -ENODEV;
1180 if (!target->thread.gs_cb) {
1181 data = kzalloc(sizeof(*data), GFP_KERNEL);
1182 if (!data)
1183 return -ENOMEM;
1185 if (!target->thread.gs_cb)
1186 gs_cb.gsd = 25;
1187 else if (target == current)
1188 save_gs_cb(&gs_cb);
1189 else
1190 gs_cb = *target->thread.gs_cb;
1191 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1192 &gs_cb, 0, sizeof(gs_cb));
1193 if (rc) {
1194 kfree(data);
1195 return -EFAULT;
1197 preempt_disable();
1198 if (!target->thread.gs_cb)
1199 target->thread.gs_cb = data;
1200 *target->thread.gs_cb = gs_cb;
1201 if (target == current) {
1202 __ctl_set_bit(2, 4);
1203 restore_gs_cb(target->thread.gs_cb);
1205 preempt_enable();
1206 return rc;
1209 static int s390_gs_bc_get(struct task_struct *target,
1210 const struct user_regset *regset,
1211 unsigned int pos, unsigned int count,
1212 void *kbuf, void __user *ubuf)
1214 struct gs_cb *data = target->thread.gs_bc_cb;
1216 if (!MACHINE_HAS_GS)
1217 return -ENODEV;
1218 if (!data)
1219 return -ENODATA;
1220 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1221 data, 0, sizeof(struct gs_cb));
1224 static int s390_gs_bc_set(struct task_struct *target,
1225 const struct user_regset *regset,
1226 unsigned int pos, unsigned int count,
1227 const void *kbuf, const void __user *ubuf)
1229 struct gs_cb *data = target->thread.gs_bc_cb;
1231 if (!MACHINE_HAS_GS)
1232 return -ENODEV;
1233 if (!data) {
1234 data = kzalloc(sizeof(*data), GFP_KERNEL);
1235 if (!data)
1236 return -ENOMEM;
1237 target->thread.gs_bc_cb = data;
1239 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1240 data, 0, sizeof(struct gs_cb));
1243 static const struct user_regset s390_regsets[] = {
1245 .core_note_type = NT_PRSTATUS,
1246 .n = sizeof(s390_regs) / sizeof(long),
1247 .size = sizeof(long),
1248 .align = sizeof(long),
1249 .get = s390_regs_get,
1250 .set = s390_regs_set,
1253 .core_note_type = NT_PRFPREG,
1254 .n = sizeof(s390_fp_regs) / sizeof(long),
1255 .size = sizeof(long),
1256 .align = sizeof(long),
1257 .get = s390_fpregs_get,
1258 .set = s390_fpregs_set,
1261 .core_note_type = NT_S390_SYSTEM_CALL,
1262 .n = 1,
1263 .size = sizeof(unsigned int),
1264 .align = sizeof(unsigned int),
1265 .get = s390_system_call_get,
1266 .set = s390_system_call_set,
1269 .core_note_type = NT_S390_LAST_BREAK,
1270 .n = 1,
1271 .size = sizeof(long),
1272 .align = sizeof(long),
1273 .get = s390_last_break_get,
1274 .set = s390_last_break_set,
1277 .core_note_type = NT_S390_TDB,
1278 .n = 1,
1279 .size = 256,
1280 .align = 1,
1281 .get = s390_tdb_get,
1282 .set = s390_tdb_set,
1285 .core_note_type = NT_S390_VXRS_LOW,
1286 .n = __NUM_VXRS_LOW,
1287 .size = sizeof(__u64),
1288 .align = sizeof(__u64),
1289 .get = s390_vxrs_low_get,
1290 .set = s390_vxrs_low_set,
1293 .core_note_type = NT_S390_VXRS_HIGH,
1294 .n = __NUM_VXRS_HIGH,
1295 .size = sizeof(__vector128),
1296 .align = sizeof(__vector128),
1297 .get = s390_vxrs_high_get,
1298 .set = s390_vxrs_high_set,
1301 .core_note_type = NT_S390_GS_CB,
1302 .n = sizeof(struct gs_cb) / sizeof(__u64),
1303 .size = sizeof(__u64),
1304 .align = sizeof(__u64),
1305 .get = s390_gs_cb_get,
1306 .set = s390_gs_cb_set,
1309 .core_note_type = NT_S390_GS_BC,
1310 .n = sizeof(struct gs_cb) / sizeof(__u64),
1311 .size = sizeof(__u64),
1312 .align = sizeof(__u64),
1313 .get = s390_gs_bc_get,
1314 .set = s390_gs_bc_set,
1318 static const struct user_regset_view user_s390_view = {
1319 .name = UTS_MACHINE,
1320 .e_machine = EM_S390,
1321 .regsets = s390_regsets,
1322 .n = ARRAY_SIZE(s390_regsets)
1325 #ifdef CONFIG_COMPAT
1326 static int s390_compat_regs_get(struct task_struct *target,
1327 const struct user_regset *regset,
1328 unsigned int pos, unsigned int count,
1329 void *kbuf, void __user *ubuf)
1331 if (target == current)
1332 save_access_regs(target->thread.acrs);
1334 if (kbuf) {
1335 compat_ulong_t *k = kbuf;
1336 while (count > 0) {
1337 *k++ = __peek_user_compat(target, pos);
1338 count -= sizeof(*k);
1339 pos += sizeof(*k);
1341 } else {
1342 compat_ulong_t __user *u = ubuf;
1343 while (count > 0) {
1344 if (__put_user(__peek_user_compat(target, pos), u++))
1345 return -EFAULT;
1346 count -= sizeof(*u);
1347 pos += sizeof(*u);
1350 return 0;
1353 static int s390_compat_regs_set(struct task_struct *target,
1354 const struct user_regset *regset,
1355 unsigned int pos, unsigned int count,
1356 const void *kbuf, const void __user *ubuf)
1358 int rc = 0;
1360 if (target == current)
1361 save_access_regs(target->thread.acrs);
1363 if (kbuf) {
1364 const compat_ulong_t *k = kbuf;
1365 while (count > 0 && !rc) {
1366 rc = __poke_user_compat(target, pos, *k++);
1367 count -= sizeof(*k);
1368 pos += sizeof(*k);
1370 } else {
1371 const compat_ulong_t __user *u = ubuf;
1372 while (count > 0 && !rc) {
1373 compat_ulong_t word;
1374 rc = __get_user(word, u++);
1375 if (rc)
1376 break;
1377 rc = __poke_user_compat(target, pos, word);
1378 count -= sizeof(*u);
1379 pos += sizeof(*u);
1383 if (rc == 0 && target == current)
1384 restore_access_regs(target->thread.acrs);
1386 return rc;
1389 static int s390_compat_regs_high_get(struct task_struct *target,
1390 const struct user_regset *regset,
1391 unsigned int pos, unsigned int count,
1392 void *kbuf, void __user *ubuf)
1394 compat_ulong_t *gprs_high;
1396 gprs_high = (compat_ulong_t *)
1397 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1398 if (kbuf) {
1399 compat_ulong_t *k = kbuf;
1400 while (count > 0) {
1401 *k++ = *gprs_high;
1402 gprs_high += 2;
1403 count -= sizeof(*k);
1405 } else {
1406 compat_ulong_t __user *u = ubuf;
1407 while (count > 0) {
1408 if (__put_user(*gprs_high, u++))
1409 return -EFAULT;
1410 gprs_high += 2;
1411 count -= sizeof(*u);
1414 return 0;
1417 static int s390_compat_regs_high_set(struct task_struct *target,
1418 const struct user_regset *regset,
1419 unsigned int pos, unsigned int count,
1420 const void *kbuf, const void __user *ubuf)
1422 compat_ulong_t *gprs_high;
1423 int rc = 0;
1425 gprs_high = (compat_ulong_t *)
1426 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1427 if (kbuf) {
1428 const compat_ulong_t *k = kbuf;
1429 while (count > 0) {
1430 *gprs_high = *k++;
1431 *gprs_high += 2;
1432 count -= sizeof(*k);
1434 } else {
1435 const compat_ulong_t __user *u = ubuf;
1436 while (count > 0 && !rc) {
1437 unsigned long word;
1438 rc = __get_user(word, u++);
1439 if (rc)
1440 break;
1441 *gprs_high = word;
1442 *gprs_high += 2;
1443 count -= sizeof(*u);
1447 return rc;
1450 static int s390_compat_last_break_get(struct task_struct *target,
1451 const struct user_regset *regset,
1452 unsigned int pos, unsigned int count,
1453 void *kbuf, void __user *ubuf)
1455 compat_ulong_t last_break;
1457 if (count > 0) {
1458 last_break = target->thread.last_break;
1459 if (kbuf) {
1460 unsigned long *k = kbuf;
1461 *k = last_break;
1462 } else {
1463 unsigned long __user *u = ubuf;
1464 if (__put_user(last_break, u))
1465 return -EFAULT;
1468 return 0;
1471 static int s390_compat_last_break_set(struct task_struct *target,
1472 const struct user_regset *regset,
1473 unsigned int pos, unsigned int count,
1474 const void *kbuf, const void __user *ubuf)
1476 return 0;
1479 static const struct user_regset s390_compat_regsets[] = {
1481 .core_note_type = NT_PRSTATUS,
1482 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1483 .size = sizeof(compat_long_t),
1484 .align = sizeof(compat_long_t),
1485 .get = s390_compat_regs_get,
1486 .set = s390_compat_regs_set,
1489 .core_note_type = NT_PRFPREG,
1490 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1491 .size = sizeof(compat_long_t),
1492 .align = sizeof(compat_long_t),
1493 .get = s390_fpregs_get,
1494 .set = s390_fpregs_set,
1497 .core_note_type = NT_S390_SYSTEM_CALL,
1498 .n = 1,
1499 .size = sizeof(compat_uint_t),
1500 .align = sizeof(compat_uint_t),
1501 .get = s390_system_call_get,
1502 .set = s390_system_call_set,
1505 .core_note_type = NT_S390_LAST_BREAK,
1506 .n = 1,
1507 .size = sizeof(long),
1508 .align = sizeof(long),
1509 .get = s390_compat_last_break_get,
1510 .set = s390_compat_last_break_set,
1513 .core_note_type = NT_S390_TDB,
1514 .n = 1,
1515 .size = 256,
1516 .align = 1,
1517 .get = s390_tdb_get,
1518 .set = s390_tdb_set,
1521 .core_note_type = NT_S390_VXRS_LOW,
1522 .n = __NUM_VXRS_LOW,
1523 .size = sizeof(__u64),
1524 .align = sizeof(__u64),
1525 .get = s390_vxrs_low_get,
1526 .set = s390_vxrs_low_set,
1529 .core_note_type = NT_S390_VXRS_HIGH,
1530 .n = __NUM_VXRS_HIGH,
1531 .size = sizeof(__vector128),
1532 .align = sizeof(__vector128),
1533 .get = s390_vxrs_high_get,
1534 .set = s390_vxrs_high_set,
1537 .core_note_type = NT_S390_HIGH_GPRS,
1538 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1539 .size = sizeof(compat_long_t),
1540 .align = sizeof(compat_long_t),
1541 .get = s390_compat_regs_high_get,
1542 .set = s390_compat_regs_high_set,
1545 .core_note_type = NT_S390_GS_CB,
1546 .n = sizeof(struct gs_cb) / sizeof(__u64),
1547 .size = sizeof(__u64),
1548 .align = sizeof(__u64),
1549 .get = s390_gs_cb_get,
1550 .set = s390_gs_cb_set,
1554 static const struct user_regset_view user_s390_compat_view = {
1555 .name = "s390",
1556 .e_machine = EM_S390,
1557 .regsets = s390_compat_regsets,
1558 .n = ARRAY_SIZE(s390_compat_regsets)
1560 #endif
1562 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1564 #ifdef CONFIG_COMPAT
1565 if (test_tsk_thread_flag(task, TIF_31BIT))
1566 return &user_s390_compat_view;
1567 #endif
1568 return &user_s390_view;
1571 static const char *gpr_names[NUM_GPRS] = {
1572 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1573 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1576 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1578 if (offset >= NUM_GPRS)
1579 return 0;
1580 return regs->gprs[offset];
1583 int regs_query_register_offset(const char *name)
1585 unsigned long offset;
1587 if (!name || *name != 'r')
1588 return -EINVAL;
1589 if (kstrtoul(name + 1, 10, &offset))
1590 return -EINVAL;
1591 if (offset >= NUM_GPRS)
1592 return -EINVAL;
1593 return offset;
1596 const char *regs_query_register_name(unsigned int offset)
1598 if (offset >= NUM_GPRS)
1599 return NULL;
1600 return gpr_names[offset];
1603 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1605 unsigned long ksp = kernel_stack_pointer(regs);
1607 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1611 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1612 * @regs:pt_regs which contains kernel stack pointer.
1613 * @n:stack entry number.
1615 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1616 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1617 * this returns 0.
1619 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1621 unsigned long addr;
1623 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1624 if (!regs_within_kernel_stack(regs, addr))
1625 return 0;
1626 return *(unsigned long *)addr;