Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[linux/fpc-iii.git] / arch / s390 / kernel / ptrace.c
blob83339d33c4b127b0aaf2e36a7e56aef91b57bbfa
1 /*
2 * arch/s390/kernel/ptrace.c
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29 #include <linux/errno.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/security.h>
33 #include <linux/audit.h>
34 #include <linux/signal.h>
35 #include <linux/elf.h>
36 #include <linux/regset.h>
37 #include <linux/tracehook.h>
38 #include <linux/seccomp.h>
39 #include <trace/syscall.h>
40 #include <asm/compat.h>
41 #include <asm/segment.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/pgalloc.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include "entry.h"
50 #ifdef CONFIG_COMPAT
51 #include "compat_ptrace.h"
52 #endif
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/syscalls.h>
57 enum s390_regset {
58 REGSET_GENERAL,
59 REGSET_FP,
60 REGSET_LAST_BREAK,
61 REGSET_GENERAL_EXTENDED,
64 static void
65 FixPerRegisters(struct task_struct *task)
67 struct pt_regs *regs;
68 per_struct *per_info;
69 per_cr_words cr_words;
71 regs = task_pt_regs(task);
72 per_info = (per_struct *) &task->thread.per_info;
73 per_info->control_regs.bits.em_instruction_fetch =
74 per_info->single_step | per_info->instruction_fetch;
76 if (per_info->single_step) {
77 per_info->control_regs.bits.starting_addr = 0;
78 #ifdef CONFIG_COMPAT
79 if (is_compat_task())
80 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
81 else
82 #endif
83 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
84 } else {
85 per_info->control_regs.bits.starting_addr =
86 per_info->starting_addr;
87 per_info->control_regs.bits.ending_addr =
88 per_info->ending_addr;
91 * if any of the control reg tracing bits are on
92 * we switch on per in the psw
94 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
95 regs->psw.mask |= PSW_MASK_PER;
96 else
97 regs->psw.mask &= ~PSW_MASK_PER;
99 if (per_info->control_regs.bits.em_storage_alteration)
100 per_info->control_regs.bits.storage_alt_space_ctl = 1;
101 else
102 per_info->control_regs.bits.storage_alt_space_ctl = 0;
104 if (task == current) {
105 __ctl_store(cr_words, 9, 11);
106 if (memcmp(&cr_words, &per_info->control_regs.words,
107 sizeof(cr_words)) != 0)
108 __ctl_load(per_info->control_regs.words, 9, 11);
112 void user_enable_single_step(struct task_struct *task)
114 task->thread.per_info.single_step = 1;
115 FixPerRegisters(task);
118 void user_disable_single_step(struct task_struct *task)
120 task->thread.per_info.single_step = 0;
121 FixPerRegisters(task);
125 * Called by kernel/ptrace.c when detaching..
127 * Make sure single step bits etc are not set.
129 void
130 ptrace_disable(struct task_struct *child)
132 /* make sure the single step bit is not set. */
133 user_disable_single_step(child);
136 #ifndef CONFIG_64BIT
137 # define __ADDR_MASK 3
138 #else
139 # define __ADDR_MASK 7
140 #endif
143 * Read the word at offset addr from the user area of a process. The
144 * trouble here is that the information is littered over different
145 * locations. The process registers are found on the kernel stack,
146 * the floating point stuff and the trace settings are stored in
147 * the task structure. In addition the different structures in
148 * struct user contain pad bytes that should be read as zeroes.
149 * Lovely...
151 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
153 struct user *dummy = NULL;
154 addr_t offset, tmp;
156 if (addr < (addr_t) &dummy->regs.acrs) {
158 * psw and gprs are stored on the stack
160 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
161 if (addr == (addr_t) &dummy->regs.psw.mask)
162 /* Remove per bit from user psw. */
163 tmp &= ~PSW_MASK_PER;
165 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
167 * access registers are stored in the thread structure
169 offset = addr - (addr_t) &dummy->regs.acrs;
170 #ifdef CONFIG_64BIT
172 * Very special case: old & broken 64 bit gdb reading
173 * from acrs[15]. Result is a 64 bit value. Read the
174 * 32 bit acrs[15] value and shift it by 32. Sick...
176 if (addr == (addr_t) &dummy->regs.acrs[15])
177 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
178 else
179 #endif
180 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
182 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
184 * orig_gpr2 is stored on the kernel stack
186 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
188 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
190 * prevent reads of padding hole between
191 * orig_gpr2 and fp_regs on s390.
193 tmp = 0;
195 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
197 * floating point regs. are stored in the thread structure
199 offset = addr - (addr_t) &dummy->regs.fp_regs;
200 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
201 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
202 tmp &= (unsigned long) FPC_VALID_MASK
203 << (BITS_PER_LONG - 32);
205 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
207 * per_info is found in the thread structure
209 offset = addr - (addr_t) &dummy->regs.per_info;
210 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
212 } else
213 tmp = 0;
215 return tmp;
218 static int
219 peek_user(struct task_struct *child, addr_t addr, addr_t data)
221 addr_t tmp, mask;
224 * Stupid gdb peeks/pokes the access registers in 64 bit with
225 * an alignment of 4. Programmers from hell...
227 mask = __ADDR_MASK;
228 #ifdef CONFIG_64BIT
229 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
230 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
231 mask = 3;
232 #endif
233 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
234 return -EIO;
236 tmp = __peek_user(child, addr);
237 return put_user(tmp, (addr_t __user *) data);
241 * Write a word to the user area of a process at location addr. This
242 * operation does have an additional problem compared to peek_user.
243 * Stores to the program status word and on the floating point
244 * control register needs to get checked for validity.
246 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
248 struct user *dummy = NULL;
249 addr_t offset;
251 if (addr < (addr_t) &dummy->regs.acrs) {
253 * psw and gprs are stored on the stack
255 if (addr == (addr_t) &dummy->regs.psw.mask &&
256 #ifdef CONFIG_COMPAT
257 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
258 #endif
259 data != PSW_MASK_MERGE(psw_user_bits, data))
260 /* Invalid psw mask. */
261 return -EINVAL;
262 #ifndef CONFIG_64BIT
263 if (addr == (addr_t) &dummy->regs.psw.addr)
264 /* I'd like to reject addresses without the
265 high order bit but older gdb's rely on it */
266 data |= PSW_ADDR_AMODE;
267 #endif
268 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
270 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
272 * access registers are stored in the thread structure
274 offset = addr - (addr_t) &dummy->regs.acrs;
275 #ifdef CONFIG_64BIT
277 * Very special case: old & broken 64 bit gdb writing
278 * to acrs[15] with a 64 bit value. Ignore the lower
279 * half of the value and write the upper 32 bit to
280 * acrs[15]. Sick...
282 if (addr == (addr_t) &dummy->regs.acrs[15])
283 child->thread.acrs[15] = (unsigned int) (data >> 32);
284 else
285 #endif
286 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
288 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
290 * orig_gpr2 is stored on the kernel stack
292 task_pt_regs(child)->orig_gpr2 = data;
294 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
296 * prevent writes of padding hole between
297 * orig_gpr2 and fp_regs on s390.
299 return 0;
301 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
303 * floating point regs. are stored in the thread structure
305 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
306 (data & ~((unsigned long) FPC_VALID_MASK
307 << (BITS_PER_LONG - 32))) != 0)
308 return -EINVAL;
309 offset = addr - (addr_t) &dummy->regs.fp_regs;
310 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
312 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
314 * per_info is found in the thread structure
316 offset = addr - (addr_t) &dummy->regs.per_info;
317 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
321 FixPerRegisters(child);
322 return 0;
325 static int
326 poke_user(struct task_struct *child, addr_t addr, addr_t data)
328 addr_t mask;
331 * Stupid gdb peeks/pokes the access registers in 64 bit with
332 * an alignment of 4. Programmers from hell indeed...
334 mask = __ADDR_MASK;
335 #ifdef CONFIG_64BIT
336 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
337 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
338 mask = 3;
339 #endif
340 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
341 return -EIO;
343 return __poke_user(child, addr, data);
346 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
348 ptrace_area parea;
349 int copied, ret;
351 switch (request) {
352 case PTRACE_PEEKUSR:
353 /* read the word at location addr in the USER area. */
354 return peek_user(child, addr, data);
356 case PTRACE_POKEUSR:
357 /* write the word at location addr in the USER area */
358 return poke_user(child, addr, data);
360 case PTRACE_PEEKUSR_AREA:
361 case PTRACE_POKEUSR_AREA:
362 if (copy_from_user(&parea, (void __force __user *) addr,
363 sizeof(parea)))
364 return -EFAULT;
365 addr = parea.kernel_addr;
366 data = parea.process_addr;
367 copied = 0;
368 while (copied < parea.len) {
369 if (request == PTRACE_PEEKUSR_AREA)
370 ret = peek_user(child, addr, data);
371 else {
372 addr_t utmp;
373 if (get_user(utmp,
374 (addr_t __force __user *) data))
375 return -EFAULT;
376 ret = poke_user(child, addr, utmp);
378 if (ret)
379 return ret;
380 addr += sizeof(unsigned long);
381 data += sizeof(unsigned long);
382 copied += sizeof(unsigned long);
384 return 0;
385 case PTRACE_GET_LAST_BREAK:
386 put_user(task_thread_info(child)->last_break,
387 (unsigned long __user *) data);
388 return 0;
389 default:
390 /* Removing high order bit from addr (only for 31 bit). */
391 addr &= PSW_ADDR_INSN;
392 return ptrace_request(child, request, addr, data);
396 #ifdef CONFIG_COMPAT
398 * Now the fun part starts... a 31 bit program running in the
399 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
400 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
401 * to handle, the difference to the 64 bit versions of the requests
402 * is that the access is done in multiples of 4 byte instead of
403 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
404 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
405 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
406 * is a 31 bit program too, the content of struct user can be
407 * emulated. A 31 bit program peeking into the struct user of
408 * a 64 bit program is a no-no.
412 * Same as peek_user but for a 31 bit program.
414 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
416 struct user32 *dummy32 = NULL;
417 per_struct32 *dummy_per32 = NULL;
418 addr_t offset;
419 __u32 tmp;
421 if (addr < (addr_t) &dummy32->regs.acrs) {
423 * psw and gprs are stored on the stack
425 if (addr == (addr_t) &dummy32->regs.psw.mask) {
426 /* Fake a 31 bit psw mask. */
427 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
428 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
429 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
430 /* Fake a 31 bit psw address. */
431 tmp = (__u32) task_pt_regs(child)->psw.addr |
432 PSW32_ADDR_AMODE31;
433 } else {
434 /* gpr 0-15 */
435 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
436 addr*2 + 4);
438 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
440 * access registers are stored in the thread structure
442 offset = addr - (addr_t) &dummy32->regs.acrs;
443 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
445 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
447 * orig_gpr2 is stored on the kernel stack
449 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
451 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
453 * prevent reads of padding hole between
454 * orig_gpr2 and fp_regs on s390.
456 tmp = 0;
458 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
460 * floating point regs. are stored in the thread structure
462 offset = addr - (addr_t) &dummy32->regs.fp_regs;
463 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
465 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
467 * per_info is found in the thread structure
469 offset = addr - (addr_t) &dummy32->regs.per_info;
470 /* This is magic. See per_struct and per_struct32. */
471 if ((offset >= (addr_t) &dummy_per32->control_regs &&
472 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
473 (offset >= (addr_t) &dummy_per32->starting_addr &&
474 offset <= (addr_t) &dummy_per32->ending_addr) ||
475 offset == (addr_t) &dummy_per32->lowcore.words.address)
476 offset = offset*2 + 4;
477 else
478 offset = offset*2;
479 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
481 } else
482 tmp = 0;
484 return tmp;
487 static int peek_user_compat(struct task_struct *child,
488 addr_t addr, addr_t data)
490 __u32 tmp;
492 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
493 return -EIO;
495 tmp = __peek_user_compat(child, addr);
496 return put_user(tmp, (__u32 __user *) data);
500 * Same as poke_user but for a 31 bit program.
502 static int __poke_user_compat(struct task_struct *child,
503 addr_t addr, addr_t data)
505 struct user32 *dummy32 = NULL;
506 per_struct32 *dummy_per32 = NULL;
507 __u32 tmp = (__u32) data;
508 addr_t offset;
510 if (addr < (addr_t) &dummy32->regs.acrs) {
512 * psw, gprs, acrs and orig_gpr2 are stored on the stack
514 if (addr == (addr_t) &dummy32->regs.psw.mask) {
515 /* Build a 64 bit psw mask from 31 bit mask. */
516 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
517 /* Invalid psw mask. */
518 return -EINVAL;
519 task_pt_regs(child)->psw.mask =
520 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
521 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
522 /* Build a 64 bit psw address from 31 bit address. */
523 task_pt_regs(child)->psw.addr =
524 (__u64) tmp & PSW32_ADDR_INSN;
525 } else {
526 /* gpr 0-15 */
527 *(__u32*)((addr_t) &task_pt_regs(child)->psw
528 + addr*2 + 4) = tmp;
530 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
532 * access registers are stored in the thread structure
534 offset = addr - (addr_t) &dummy32->regs.acrs;
535 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
537 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
539 * orig_gpr2 is stored on the kernel stack
541 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
543 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
545 * prevent writess of padding hole between
546 * orig_gpr2 and fp_regs on s390.
548 return 0;
550 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
552 * floating point regs. are stored in the thread structure
554 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
555 (tmp & ~FPC_VALID_MASK) != 0)
556 /* Invalid floating point control. */
557 return -EINVAL;
558 offset = addr - (addr_t) &dummy32->regs.fp_regs;
559 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
561 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
563 * per_info is found in the thread structure.
565 offset = addr - (addr_t) &dummy32->regs.per_info;
567 * This is magic. See per_struct and per_struct32.
568 * By incident the offsets in per_struct are exactly
569 * twice the offsets in per_struct32 for all fields.
570 * The 8 byte fields need special handling though,
571 * because the second half (bytes 4-7) is needed and
572 * not the first half.
574 if ((offset >= (addr_t) &dummy_per32->control_regs &&
575 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
576 (offset >= (addr_t) &dummy_per32->starting_addr &&
577 offset <= (addr_t) &dummy_per32->ending_addr) ||
578 offset == (addr_t) &dummy_per32->lowcore.words.address)
579 offset = offset*2 + 4;
580 else
581 offset = offset*2;
582 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
586 FixPerRegisters(child);
587 return 0;
590 static int poke_user_compat(struct task_struct *child,
591 addr_t addr, addr_t data)
593 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
594 return -EIO;
596 return __poke_user_compat(child, addr, data);
599 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
600 compat_ulong_t caddr, compat_ulong_t cdata)
602 unsigned long addr = caddr;
603 unsigned long data = cdata;
604 ptrace_area_emu31 parea;
605 int copied, ret;
607 switch (request) {
608 case PTRACE_PEEKUSR:
609 /* read the word at location addr in the USER area. */
610 return peek_user_compat(child, addr, data);
612 case PTRACE_POKEUSR:
613 /* write the word at location addr in the USER area */
614 return poke_user_compat(child, addr, data);
616 case PTRACE_PEEKUSR_AREA:
617 case PTRACE_POKEUSR_AREA:
618 if (copy_from_user(&parea, (void __force __user *) addr,
619 sizeof(parea)))
620 return -EFAULT;
621 addr = parea.kernel_addr;
622 data = parea.process_addr;
623 copied = 0;
624 while (copied < parea.len) {
625 if (request == PTRACE_PEEKUSR_AREA)
626 ret = peek_user_compat(child, addr, data);
627 else {
628 __u32 utmp;
629 if (get_user(utmp,
630 (__u32 __force __user *) data))
631 return -EFAULT;
632 ret = poke_user_compat(child, addr, utmp);
634 if (ret)
635 return ret;
636 addr += sizeof(unsigned int);
637 data += sizeof(unsigned int);
638 copied += sizeof(unsigned int);
640 return 0;
641 case PTRACE_GET_LAST_BREAK:
642 put_user(task_thread_info(child)->last_break,
643 (unsigned int __user *) data);
644 return 0;
646 return compat_ptrace_request(child, request, addr, data);
648 #endif
650 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
652 long ret = 0;
654 /* Do the secure computing check first. */
655 secure_computing(regs->gprs[2]);
658 * The sysc_tracesys code in entry.S stored the system
659 * call number to gprs[2].
661 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
662 (tracehook_report_syscall_entry(regs) ||
663 regs->gprs[2] >= NR_syscalls)) {
665 * Tracing decided this syscall should not happen or the
666 * debugger stored an invalid system call number. Skip
667 * the system call and the system call restart handling.
669 regs->svcnr = 0;
670 ret = -1;
673 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
674 trace_sys_enter(regs, regs->gprs[2]);
676 if (unlikely(current->audit_context))
677 audit_syscall_entry(is_compat_task() ?
678 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
679 regs->gprs[2], regs->orig_gpr2,
680 regs->gprs[3], regs->gprs[4],
681 regs->gprs[5]);
682 return ret ?: regs->gprs[2];
685 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
687 if (unlikely(current->audit_context))
688 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
689 regs->gprs[2]);
691 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
692 trace_sys_exit(regs, regs->gprs[2]);
694 if (test_thread_flag(TIF_SYSCALL_TRACE))
695 tracehook_report_syscall_exit(regs, 0);
699 * user_regset definitions.
702 static int s390_regs_get(struct task_struct *target,
703 const struct user_regset *regset,
704 unsigned int pos, unsigned int count,
705 void *kbuf, void __user *ubuf)
707 if (target == current)
708 save_access_regs(target->thread.acrs);
710 if (kbuf) {
711 unsigned long *k = kbuf;
712 while (count > 0) {
713 *k++ = __peek_user(target, pos);
714 count -= sizeof(*k);
715 pos += sizeof(*k);
717 } else {
718 unsigned long __user *u = ubuf;
719 while (count > 0) {
720 if (__put_user(__peek_user(target, pos), u++))
721 return -EFAULT;
722 count -= sizeof(*u);
723 pos += sizeof(*u);
726 return 0;
729 static int s390_regs_set(struct task_struct *target,
730 const struct user_regset *regset,
731 unsigned int pos, unsigned int count,
732 const void *kbuf, const void __user *ubuf)
734 int rc = 0;
736 if (target == current)
737 save_access_regs(target->thread.acrs);
739 if (kbuf) {
740 const unsigned long *k = kbuf;
741 while (count > 0 && !rc) {
742 rc = __poke_user(target, pos, *k++);
743 count -= sizeof(*k);
744 pos += sizeof(*k);
746 } else {
747 const unsigned long __user *u = ubuf;
748 while (count > 0 && !rc) {
749 unsigned long word;
750 rc = __get_user(word, u++);
751 if (rc)
752 break;
753 rc = __poke_user(target, pos, word);
754 count -= sizeof(*u);
755 pos += sizeof(*u);
759 if (rc == 0 && target == current)
760 restore_access_regs(target->thread.acrs);
762 return rc;
765 static int s390_fpregs_get(struct task_struct *target,
766 const struct user_regset *regset, unsigned int pos,
767 unsigned int count, void *kbuf, void __user *ubuf)
769 if (target == current)
770 save_fp_regs(&target->thread.fp_regs);
772 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
773 &target->thread.fp_regs, 0, -1);
776 static int s390_fpregs_set(struct task_struct *target,
777 const struct user_regset *regset, unsigned int pos,
778 unsigned int count, const void *kbuf,
779 const void __user *ubuf)
781 int rc = 0;
783 if (target == current)
784 save_fp_regs(&target->thread.fp_regs);
786 /* If setting FPC, must validate it first. */
787 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
788 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
789 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
790 0, offsetof(s390_fp_regs, fprs));
791 if (rc)
792 return rc;
793 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
794 return -EINVAL;
795 target->thread.fp_regs.fpc = fpc[0];
798 if (rc == 0 && count > 0)
799 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
800 target->thread.fp_regs.fprs,
801 offsetof(s390_fp_regs, fprs), -1);
803 if (rc == 0 && target == current)
804 restore_fp_regs(&target->thread.fp_regs);
806 return rc;
809 #ifdef CONFIG_64BIT
811 static int s390_last_break_get(struct task_struct *target,
812 const struct user_regset *regset,
813 unsigned int pos, unsigned int count,
814 void *kbuf, void __user *ubuf)
816 if (count > 0) {
817 if (kbuf) {
818 unsigned long *k = kbuf;
819 *k = task_thread_info(target)->last_break;
820 } else {
821 unsigned long __user *u = ubuf;
822 if (__put_user(task_thread_info(target)->last_break, u))
823 return -EFAULT;
826 return 0;
829 #endif
831 static const struct user_regset s390_regsets[] = {
832 [REGSET_GENERAL] = {
833 .core_note_type = NT_PRSTATUS,
834 .n = sizeof(s390_regs) / sizeof(long),
835 .size = sizeof(long),
836 .align = sizeof(long),
837 .get = s390_regs_get,
838 .set = s390_regs_set,
840 [REGSET_FP] = {
841 .core_note_type = NT_PRFPREG,
842 .n = sizeof(s390_fp_regs) / sizeof(long),
843 .size = sizeof(long),
844 .align = sizeof(long),
845 .get = s390_fpregs_get,
846 .set = s390_fpregs_set,
848 #ifdef CONFIG_64BIT
849 [REGSET_LAST_BREAK] = {
850 .core_note_type = NT_S390_LAST_BREAK,
851 .n = 1,
852 .size = sizeof(long),
853 .align = sizeof(long),
854 .get = s390_last_break_get,
856 #endif
859 static const struct user_regset_view user_s390_view = {
860 .name = UTS_MACHINE,
861 .e_machine = EM_S390,
862 .regsets = s390_regsets,
863 .n = ARRAY_SIZE(s390_regsets)
866 #ifdef CONFIG_COMPAT
867 static int s390_compat_regs_get(struct task_struct *target,
868 const struct user_regset *regset,
869 unsigned int pos, unsigned int count,
870 void *kbuf, void __user *ubuf)
872 if (target == current)
873 save_access_regs(target->thread.acrs);
875 if (kbuf) {
876 compat_ulong_t *k = kbuf;
877 while (count > 0) {
878 *k++ = __peek_user_compat(target, pos);
879 count -= sizeof(*k);
880 pos += sizeof(*k);
882 } else {
883 compat_ulong_t __user *u = ubuf;
884 while (count > 0) {
885 if (__put_user(__peek_user_compat(target, pos), u++))
886 return -EFAULT;
887 count -= sizeof(*u);
888 pos += sizeof(*u);
891 return 0;
894 static int s390_compat_regs_set(struct task_struct *target,
895 const struct user_regset *regset,
896 unsigned int pos, unsigned int count,
897 const void *kbuf, const void __user *ubuf)
899 int rc = 0;
901 if (target == current)
902 save_access_regs(target->thread.acrs);
904 if (kbuf) {
905 const compat_ulong_t *k = kbuf;
906 while (count > 0 && !rc) {
907 rc = __poke_user_compat(target, pos, *k++);
908 count -= sizeof(*k);
909 pos += sizeof(*k);
911 } else {
912 const compat_ulong_t __user *u = ubuf;
913 while (count > 0 && !rc) {
914 compat_ulong_t word;
915 rc = __get_user(word, u++);
916 if (rc)
917 break;
918 rc = __poke_user_compat(target, pos, word);
919 count -= sizeof(*u);
920 pos += sizeof(*u);
924 if (rc == 0 && target == current)
925 restore_access_regs(target->thread.acrs);
927 return rc;
930 static int s390_compat_regs_high_get(struct task_struct *target,
931 const struct user_regset *regset,
932 unsigned int pos, unsigned int count,
933 void *kbuf, void __user *ubuf)
935 compat_ulong_t *gprs_high;
937 gprs_high = (compat_ulong_t *)
938 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
939 if (kbuf) {
940 compat_ulong_t *k = kbuf;
941 while (count > 0) {
942 *k++ = *gprs_high;
943 gprs_high += 2;
944 count -= sizeof(*k);
946 } else {
947 compat_ulong_t __user *u = ubuf;
948 while (count > 0) {
949 if (__put_user(*gprs_high, u++))
950 return -EFAULT;
951 gprs_high += 2;
952 count -= sizeof(*u);
955 return 0;
958 static int s390_compat_regs_high_set(struct task_struct *target,
959 const struct user_regset *regset,
960 unsigned int pos, unsigned int count,
961 const void *kbuf, const void __user *ubuf)
963 compat_ulong_t *gprs_high;
964 int rc = 0;
966 gprs_high = (compat_ulong_t *)
967 &task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
968 if (kbuf) {
969 const compat_ulong_t *k = kbuf;
970 while (count > 0) {
971 *gprs_high = *k++;
972 *gprs_high += 2;
973 count -= sizeof(*k);
975 } else {
976 const compat_ulong_t __user *u = ubuf;
977 while (count > 0 && !rc) {
978 unsigned long word;
979 rc = __get_user(word, u++);
980 if (rc)
981 break;
982 *gprs_high = word;
983 *gprs_high += 2;
984 count -= sizeof(*u);
988 return rc;
991 static int s390_compat_last_break_get(struct task_struct *target,
992 const struct user_regset *regset,
993 unsigned int pos, unsigned int count,
994 void *kbuf, void __user *ubuf)
996 compat_ulong_t last_break;
998 if (count > 0) {
999 last_break = task_thread_info(target)->last_break;
1000 if (kbuf) {
1001 unsigned long *k = kbuf;
1002 *k = last_break;
1003 } else {
1004 unsigned long __user *u = ubuf;
1005 if (__put_user(last_break, u))
1006 return -EFAULT;
1009 return 0;
1012 static const struct user_regset s390_compat_regsets[] = {
1013 [REGSET_GENERAL] = {
1014 .core_note_type = NT_PRSTATUS,
1015 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1016 .size = sizeof(compat_long_t),
1017 .align = sizeof(compat_long_t),
1018 .get = s390_compat_regs_get,
1019 .set = s390_compat_regs_set,
1021 [REGSET_FP] = {
1022 .core_note_type = NT_PRFPREG,
1023 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1024 .size = sizeof(compat_long_t),
1025 .align = sizeof(compat_long_t),
1026 .get = s390_fpregs_get,
1027 .set = s390_fpregs_set,
1029 [REGSET_LAST_BREAK] = {
1030 .core_note_type = NT_S390_LAST_BREAK,
1031 .n = 1,
1032 .size = sizeof(long),
1033 .align = sizeof(long),
1034 .get = s390_compat_last_break_get,
1036 [REGSET_GENERAL_EXTENDED] = {
1037 .core_note_type = NT_S390_HIGH_GPRS,
1038 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1039 .size = sizeof(compat_long_t),
1040 .align = sizeof(compat_long_t),
1041 .get = s390_compat_regs_high_get,
1042 .set = s390_compat_regs_high_set,
1046 static const struct user_regset_view user_s390_compat_view = {
1047 .name = "s390",
1048 .e_machine = EM_S390,
1049 .regsets = s390_compat_regsets,
1050 .n = ARRAY_SIZE(s390_compat_regsets)
1052 #endif
1054 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1056 #ifdef CONFIG_COMPAT
1057 if (test_tsk_thread_flag(task, TIF_31BIT))
1058 return &user_s390_compat_view;
1059 #endif
1060 return &user_s390_view;
1063 static const char *gpr_names[NUM_GPRS] = {
1064 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1065 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1068 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1070 if (offset >= NUM_GPRS)
1071 return 0;
1072 return regs->gprs[offset];
1075 int regs_query_register_offset(const char *name)
1077 unsigned long offset;
1079 if (!name || *name != 'r')
1080 return -EINVAL;
1081 if (strict_strtoul(name + 1, 10, &offset))
1082 return -EINVAL;
1083 if (offset >= NUM_GPRS)
1084 return -EINVAL;
1085 return offset;
1088 const char *regs_query_register_name(unsigned int offset)
1090 if (offset >= NUM_GPRS)
1091 return NULL;
1092 return gpr_names[offset];
1095 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1097 unsigned long ksp = kernel_stack_pointer(regs);
1099 return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1103 * regs_get_kernel_stack_nth() - get Nth entry of the stack
1104 * @regs:pt_regs which contains kernel stack pointer.
1105 * @n:stack entry number.
1107 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1108 * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1109 * this returns 0.
1111 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1113 unsigned long addr;
1115 addr = kernel_stack_pointer(regs) + n * sizeof(long);
1116 if (!regs_within_kernel_stack(regs, addr))
1117 return 0;
1118 return *(unsigned long *)addr;