neighbor state switching
[cor_2_6_31.git] / arch / s390 / kernel / ptrace.c
blob43acd73105b7f06f279776a9c96be780ae1927ce
1 /*
2 * arch/s390/kernel/ptrace.c
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/mm.h>
28 #include <linux/smp.h>
29 #include <linux/errno.h>
30 #include <linux/ptrace.h>
31 #include <linux/user.h>
32 #include <linux/security.h>
33 #include <linux/audit.h>
34 #include <linux/signal.h>
35 #include <linux/elf.h>
36 #include <linux/regset.h>
37 #include <linux/tracehook.h>
38 #include <linux/seccomp.h>
39 #include <trace/syscall.h>
40 #include <asm/compat.h>
41 #include <asm/segment.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/pgalloc.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unistd.h>
48 #include "entry.h"
50 #ifdef CONFIG_COMPAT
51 #include "compat_ptrace.h"
52 #endif
54 enum s390_regset {
55 REGSET_GENERAL,
56 REGSET_FP,
59 static void
60 FixPerRegisters(struct task_struct *task)
62 struct pt_regs *regs;
63 per_struct *per_info;
65 regs = task_pt_regs(task);
66 per_info = (per_struct *) &task->thread.per_info;
67 per_info->control_regs.bits.em_instruction_fetch =
68 per_info->single_step | per_info->instruction_fetch;
70 if (per_info->single_step) {
71 per_info->control_regs.bits.starting_addr = 0;
72 #ifdef CONFIG_COMPAT
73 if (is_compat_task())
74 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
75 else
76 #endif
77 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
78 } else {
79 per_info->control_regs.bits.starting_addr =
80 per_info->starting_addr;
81 per_info->control_regs.bits.ending_addr =
82 per_info->ending_addr;
85 * if any of the control reg tracing bits are on
86 * we switch on per in the psw
88 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
89 regs->psw.mask |= PSW_MASK_PER;
90 else
91 regs->psw.mask &= ~PSW_MASK_PER;
93 if (per_info->control_regs.bits.em_storage_alteration)
94 per_info->control_regs.bits.storage_alt_space_ctl = 1;
95 else
96 per_info->control_regs.bits.storage_alt_space_ctl = 0;
99 void user_enable_single_step(struct task_struct *task)
101 task->thread.per_info.single_step = 1;
102 FixPerRegisters(task);
105 void user_disable_single_step(struct task_struct *task)
107 task->thread.per_info.single_step = 0;
108 FixPerRegisters(task);
112 * Called by kernel/ptrace.c when detaching..
114 * Make sure single step bits etc are not set.
116 void
117 ptrace_disable(struct task_struct *child)
119 /* make sure the single step bit is not set. */
120 user_disable_single_step(child);
123 #ifndef CONFIG_64BIT
124 # define __ADDR_MASK 3
125 #else
126 # define __ADDR_MASK 7
127 #endif
130 * Read the word at offset addr from the user area of a process. The
131 * trouble here is that the information is littered over different
132 * locations. The process registers are found on the kernel stack,
133 * the floating point stuff and the trace settings are stored in
134 * the task structure. In addition the different structures in
135 * struct user contain pad bytes that should be read as zeroes.
136 * Lovely...
138 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
140 struct user *dummy = NULL;
141 addr_t offset, tmp;
143 if (addr < (addr_t) &dummy->regs.acrs) {
145 * psw and gprs are stored on the stack
147 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
148 if (addr == (addr_t) &dummy->regs.psw.mask)
149 /* Remove per bit from user psw. */
150 tmp &= ~PSW_MASK_PER;
152 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
154 * access registers are stored in the thread structure
156 offset = addr - (addr_t) &dummy->regs.acrs;
157 #ifdef CONFIG_64BIT
159 * Very special case: old & broken 64 bit gdb reading
160 * from acrs[15]. Result is a 64 bit value. Read the
161 * 32 bit acrs[15] value and shift it by 32. Sick...
163 if (addr == (addr_t) &dummy->regs.acrs[15])
164 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
165 else
166 #endif
167 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
169 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
171 * orig_gpr2 is stored on the kernel stack
173 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
175 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
177 * prevent reads of padding hole between
178 * orig_gpr2 and fp_regs on s390.
180 tmp = 0;
182 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
184 * floating point regs. are stored in the thread structure
186 offset = addr - (addr_t) &dummy->regs.fp_regs;
187 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
188 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
189 tmp &= (unsigned long) FPC_VALID_MASK
190 << (BITS_PER_LONG - 32);
192 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
194 * per_info is found in the thread structure
196 offset = addr - (addr_t) &dummy->regs.per_info;
197 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
199 } else
200 tmp = 0;
202 return tmp;
205 static int
206 peek_user(struct task_struct *child, addr_t addr, addr_t data)
208 addr_t tmp, mask;
211 * Stupid gdb peeks/pokes the access registers in 64 bit with
212 * an alignment of 4. Programmers from hell...
214 mask = __ADDR_MASK;
215 #ifdef CONFIG_64BIT
216 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
217 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
218 mask = 3;
219 #endif
220 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
221 return -EIO;
223 tmp = __peek_user(child, addr);
224 return put_user(tmp, (addr_t __user *) data);
228 * Write a word to the user area of a process at location addr. This
229 * operation does have an additional problem compared to peek_user.
230 * Stores to the program status word and on the floating point
231 * control register needs to get checked for validity.
233 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
235 struct user *dummy = NULL;
236 addr_t offset;
238 if (addr < (addr_t) &dummy->regs.acrs) {
240 * psw and gprs are stored on the stack
242 if (addr == (addr_t) &dummy->regs.psw.mask &&
243 #ifdef CONFIG_COMPAT
244 data != PSW_MASK_MERGE(psw_user32_bits, data) &&
245 #endif
246 data != PSW_MASK_MERGE(psw_user_bits, data))
247 /* Invalid psw mask. */
248 return -EINVAL;
249 #ifndef CONFIG_64BIT
250 if (addr == (addr_t) &dummy->regs.psw.addr)
251 /* I'd like to reject addresses without the
252 high order bit but older gdb's rely on it */
253 data |= PSW_ADDR_AMODE;
254 #endif
255 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
257 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
259 * access registers are stored in the thread structure
261 offset = addr - (addr_t) &dummy->regs.acrs;
262 #ifdef CONFIG_64BIT
264 * Very special case: old & broken 64 bit gdb writing
265 * to acrs[15] with a 64 bit value. Ignore the lower
266 * half of the value and write the upper 32 bit to
267 * acrs[15]. Sick...
269 if (addr == (addr_t) &dummy->regs.acrs[15])
270 child->thread.acrs[15] = (unsigned int) (data >> 32);
271 else
272 #endif
273 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
275 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
277 * orig_gpr2 is stored on the kernel stack
279 task_pt_regs(child)->orig_gpr2 = data;
281 } else if (addr < (addr_t) &dummy->regs.fp_regs) {
283 * prevent writes of padding hole between
284 * orig_gpr2 and fp_regs on s390.
286 return 0;
288 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
290 * floating point regs. are stored in the thread structure
292 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
293 (data & ~((unsigned long) FPC_VALID_MASK
294 << (BITS_PER_LONG - 32))) != 0)
295 return -EINVAL;
296 offset = addr - (addr_t) &dummy->regs.fp_regs;
297 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
299 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
301 * per_info is found in the thread structure
303 offset = addr - (addr_t) &dummy->regs.per_info;
304 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
308 FixPerRegisters(child);
309 return 0;
312 static int
313 poke_user(struct task_struct *child, addr_t addr, addr_t data)
315 addr_t mask;
318 * Stupid gdb peeks/pokes the access registers in 64 bit with
319 * an alignment of 4. Programmers from hell indeed...
321 mask = __ADDR_MASK;
322 #ifdef CONFIG_64BIT
323 if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
324 addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
325 mask = 3;
326 #endif
327 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
328 return -EIO;
330 return __poke_user(child, addr, data);
333 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
335 ptrace_area parea;
336 int copied, ret;
338 switch (request) {
339 case PTRACE_PEEKTEXT:
340 case PTRACE_PEEKDATA:
341 /* Remove high order bit from address (only for 31 bit). */
342 addr &= PSW_ADDR_INSN;
343 /* read word at location addr. */
344 return generic_ptrace_peekdata(child, addr, data);
346 case PTRACE_PEEKUSR:
347 /* read the word at location addr in the USER area. */
348 return peek_user(child, addr, data);
350 case PTRACE_POKETEXT:
351 case PTRACE_POKEDATA:
352 /* Remove high order bit from address (only for 31 bit). */
353 addr &= PSW_ADDR_INSN;
354 /* write the word at location addr. */
355 return generic_ptrace_pokedata(child, addr, data);
357 case PTRACE_POKEUSR:
358 /* write the word at location addr in the USER area */
359 return poke_user(child, addr, data);
361 case PTRACE_PEEKUSR_AREA:
362 case PTRACE_POKEUSR_AREA:
363 if (copy_from_user(&parea, (void __force __user *) addr,
364 sizeof(parea)))
365 return -EFAULT;
366 addr = parea.kernel_addr;
367 data = parea.process_addr;
368 copied = 0;
369 while (copied < parea.len) {
370 if (request == PTRACE_PEEKUSR_AREA)
371 ret = peek_user(child, addr, data);
372 else {
373 addr_t utmp;
374 if (get_user(utmp,
375 (addr_t __force __user *) data))
376 return -EFAULT;
377 ret = poke_user(child, addr, utmp);
379 if (ret)
380 return ret;
381 addr += sizeof(unsigned long);
382 data += sizeof(unsigned long);
383 copied += sizeof(unsigned long);
385 return 0;
387 return ptrace_request(child, request, addr, data);
390 #ifdef CONFIG_COMPAT
392 * Now the fun part starts... a 31 bit program running in the
393 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
394 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
395 * to handle, the difference to the 64 bit versions of the requests
396 * is that the access is done in multiples of 4 byte instead of
397 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
398 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
399 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
400 * is a 31 bit program too, the content of struct user can be
401 * emulated. A 31 bit program peeking into the struct user of
402 * a 64 bit program is a no-no.
406 * Same as peek_user but for a 31 bit program.
408 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
410 struct user32 *dummy32 = NULL;
411 per_struct32 *dummy_per32 = NULL;
412 addr_t offset;
413 __u32 tmp;
415 if (addr < (addr_t) &dummy32->regs.acrs) {
417 * psw and gprs are stored on the stack
419 if (addr == (addr_t) &dummy32->regs.psw.mask) {
420 /* Fake a 31 bit psw mask. */
421 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
422 tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
423 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
424 /* Fake a 31 bit psw address. */
425 tmp = (__u32) task_pt_regs(child)->psw.addr |
426 PSW32_ADDR_AMODE31;
427 } else {
428 /* gpr 0-15 */
429 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
430 addr*2 + 4);
432 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
434 * access registers are stored in the thread structure
436 offset = addr - (addr_t) &dummy32->regs.acrs;
437 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
439 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
441 * orig_gpr2 is stored on the kernel stack
443 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
445 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
447 * prevent reads of padding hole between
448 * orig_gpr2 and fp_regs on s390.
450 tmp = 0;
452 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
454 * floating point regs. are stored in the thread structure
456 offset = addr - (addr_t) &dummy32->regs.fp_regs;
457 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
459 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
461 * per_info is found in the thread structure
463 offset = addr - (addr_t) &dummy32->regs.per_info;
464 /* This is magic. See per_struct and per_struct32. */
465 if ((offset >= (addr_t) &dummy_per32->control_regs &&
466 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
467 (offset >= (addr_t) &dummy_per32->starting_addr &&
468 offset <= (addr_t) &dummy_per32->ending_addr) ||
469 offset == (addr_t) &dummy_per32->lowcore.words.address)
470 offset = offset*2 + 4;
471 else
472 offset = offset*2;
473 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
475 } else
476 tmp = 0;
478 return tmp;
481 static int peek_user_compat(struct task_struct *child,
482 addr_t addr, addr_t data)
484 __u32 tmp;
486 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
487 return -EIO;
489 tmp = __peek_user_compat(child, addr);
490 return put_user(tmp, (__u32 __user *) data);
494 * Same as poke_user but for a 31 bit program.
496 static int __poke_user_compat(struct task_struct *child,
497 addr_t addr, addr_t data)
499 struct user32 *dummy32 = NULL;
500 per_struct32 *dummy_per32 = NULL;
501 __u32 tmp = (__u32) data;
502 addr_t offset;
504 if (addr < (addr_t) &dummy32->regs.acrs) {
506 * psw, gprs, acrs and orig_gpr2 are stored on the stack
508 if (addr == (addr_t) &dummy32->regs.psw.mask) {
509 /* Build a 64 bit psw mask from 31 bit mask. */
510 if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
511 /* Invalid psw mask. */
512 return -EINVAL;
513 task_pt_regs(child)->psw.mask =
514 PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
515 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
516 /* Build a 64 bit psw address from 31 bit address. */
517 task_pt_regs(child)->psw.addr =
518 (__u64) tmp & PSW32_ADDR_INSN;
519 } else {
520 /* gpr 0-15 */
521 *(__u32*)((addr_t) &task_pt_regs(child)->psw
522 + addr*2 + 4) = tmp;
524 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
526 * access registers are stored in the thread structure
528 offset = addr - (addr_t) &dummy32->regs.acrs;
529 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
531 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
533 * orig_gpr2 is stored on the kernel stack
535 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
537 } else if (addr < (addr_t) &dummy32->regs.fp_regs) {
539 * prevent writess of padding hole between
540 * orig_gpr2 and fp_regs on s390.
542 return 0;
544 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
546 * floating point regs. are stored in the thread structure
548 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
549 (tmp & ~FPC_VALID_MASK) != 0)
550 /* Invalid floating point control. */
551 return -EINVAL;
552 offset = addr - (addr_t) &dummy32->regs.fp_regs;
553 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
555 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
557 * per_info is found in the thread structure.
559 offset = addr - (addr_t) &dummy32->regs.per_info;
561 * This is magic. See per_struct and per_struct32.
562 * By incident the offsets in per_struct are exactly
563 * twice the offsets in per_struct32 for all fields.
564 * The 8 byte fields need special handling though,
565 * because the second half (bytes 4-7) is needed and
566 * not the first half.
568 if ((offset >= (addr_t) &dummy_per32->control_regs &&
569 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
570 (offset >= (addr_t) &dummy_per32->starting_addr &&
571 offset <= (addr_t) &dummy_per32->ending_addr) ||
572 offset == (addr_t) &dummy_per32->lowcore.words.address)
573 offset = offset*2 + 4;
574 else
575 offset = offset*2;
576 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
580 FixPerRegisters(child);
581 return 0;
584 static int poke_user_compat(struct task_struct *child,
585 addr_t addr, addr_t data)
587 if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3)
588 return -EIO;
590 return __poke_user_compat(child, addr, data);
593 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
594 compat_ulong_t caddr, compat_ulong_t cdata)
596 unsigned long addr = caddr;
597 unsigned long data = cdata;
598 ptrace_area_emu31 parea;
599 int copied, ret;
601 switch (request) {
602 case PTRACE_PEEKUSR:
603 /* read the word at location addr in the USER area. */
604 return peek_user_compat(child, addr, data);
606 case PTRACE_POKEUSR:
607 /* write the word at location addr in the USER area */
608 return poke_user_compat(child, addr, data);
610 case PTRACE_PEEKUSR_AREA:
611 case PTRACE_POKEUSR_AREA:
612 if (copy_from_user(&parea, (void __force __user *) addr,
613 sizeof(parea)))
614 return -EFAULT;
615 addr = parea.kernel_addr;
616 data = parea.process_addr;
617 copied = 0;
618 while (copied < parea.len) {
619 if (request == PTRACE_PEEKUSR_AREA)
620 ret = peek_user_compat(child, addr, data);
621 else {
622 __u32 utmp;
623 if (get_user(utmp,
624 (__u32 __force __user *) data))
625 return -EFAULT;
626 ret = poke_user_compat(child, addr, utmp);
628 if (ret)
629 return ret;
630 addr += sizeof(unsigned int);
631 data += sizeof(unsigned int);
632 copied += sizeof(unsigned int);
634 return 0;
636 return compat_ptrace_request(child, request, addr, data);
638 #endif
640 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
642 long ret;
644 /* Do the secure computing check first. */
645 secure_computing(regs->gprs[2]);
648 * The sysc_tracesys code in entry.S stored the system
649 * call number to gprs[2].
651 ret = regs->gprs[2];
652 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
653 (tracehook_report_syscall_entry(regs) ||
654 regs->gprs[2] >= NR_syscalls)) {
656 * Tracing decided this syscall should not happen or the
657 * debugger stored an invalid system call number. Skip
658 * the system call and the system call restart handling.
660 regs->svcnr = 0;
661 ret = -1;
664 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
665 ftrace_syscall_enter(regs);
667 if (unlikely(current->audit_context))
668 audit_syscall_entry(is_compat_task() ?
669 AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
670 regs->gprs[2], regs->orig_gpr2,
671 regs->gprs[3], regs->gprs[4],
672 regs->gprs[5]);
673 return ret;
676 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
678 if (unlikely(current->audit_context))
679 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
680 regs->gprs[2]);
682 if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE)))
683 ftrace_syscall_exit(regs);
685 if (test_thread_flag(TIF_SYSCALL_TRACE))
686 tracehook_report_syscall_exit(regs, 0);
690 * user_regset definitions.
693 static int s390_regs_get(struct task_struct *target,
694 const struct user_regset *regset,
695 unsigned int pos, unsigned int count,
696 void *kbuf, void __user *ubuf)
698 if (target == current)
699 save_access_regs(target->thread.acrs);
701 if (kbuf) {
702 unsigned long *k = kbuf;
703 while (count > 0) {
704 *k++ = __peek_user(target, pos);
705 count -= sizeof(*k);
706 pos += sizeof(*k);
708 } else {
709 unsigned long __user *u = ubuf;
710 while (count > 0) {
711 if (__put_user(__peek_user(target, pos), u++))
712 return -EFAULT;
713 count -= sizeof(*u);
714 pos += sizeof(*u);
717 return 0;
720 static int s390_regs_set(struct task_struct *target,
721 const struct user_regset *regset,
722 unsigned int pos, unsigned int count,
723 const void *kbuf, const void __user *ubuf)
725 int rc = 0;
727 if (target == current)
728 save_access_regs(target->thread.acrs);
730 if (kbuf) {
731 const unsigned long *k = kbuf;
732 while (count > 0 && !rc) {
733 rc = __poke_user(target, pos, *k++);
734 count -= sizeof(*k);
735 pos += sizeof(*k);
737 } else {
738 const unsigned long __user *u = ubuf;
739 while (count > 0 && !rc) {
740 unsigned long word;
741 rc = __get_user(word, u++);
742 if (rc)
743 break;
744 rc = __poke_user(target, pos, word);
745 count -= sizeof(*u);
746 pos += sizeof(*u);
750 if (rc == 0 && target == current)
751 restore_access_regs(target->thread.acrs);
753 return rc;
756 static int s390_fpregs_get(struct task_struct *target,
757 const struct user_regset *regset, unsigned int pos,
758 unsigned int count, void *kbuf, void __user *ubuf)
760 if (target == current)
761 save_fp_regs(&target->thread.fp_regs);
763 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
764 &target->thread.fp_regs, 0, -1);
767 static int s390_fpregs_set(struct task_struct *target,
768 const struct user_regset *regset, unsigned int pos,
769 unsigned int count, const void *kbuf,
770 const void __user *ubuf)
772 int rc = 0;
774 if (target == current)
775 save_fp_regs(&target->thread.fp_regs);
777 /* If setting FPC, must validate it first. */
778 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
779 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
780 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
781 0, offsetof(s390_fp_regs, fprs));
782 if (rc)
783 return rc;
784 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
785 return -EINVAL;
786 target->thread.fp_regs.fpc = fpc[0];
789 if (rc == 0 && count > 0)
790 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
791 target->thread.fp_regs.fprs,
792 offsetof(s390_fp_regs, fprs), -1);
794 if (rc == 0 && target == current)
795 restore_fp_regs(&target->thread.fp_regs);
797 return rc;
800 static const struct user_regset s390_regsets[] = {
801 [REGSET_GENERAL] = {
802 .core_note_type = NT_PRSTATUS,
803 .n = sizeof(s390_regs) / sizeof(long),
804 .size = sizeof(long),
805 .align = sizeof(long),
806 .get = s390_regs_get,
807 .set = s390_regs_set,
809 [REGSET_FP] = {
810 .core_note_type = NT_PRFPREG,
811 .n = sizeof(s390_fp_regs) / sizeof(long),
812 .size = sizeof(long),
813 .align = sizeof(long),
814 .get = s390_fpregs_get,
815 .set = s390_fpregs_set,
819 static const struct user_regset_view user_s390_view = {
820 .name = UTS_MACHINE,
821 .e_machine = EM_S390,
822 .regsets = s390_regsets,
823 .n = ARRAY_SIZE(s390_regsets)
826 #ifdef CONFIG_COMPAT
827 static int s390_compat_regs_get(struct task_struct *target,
828 const struct user_regset *regset,
829 unsigned int pos, unsigned int count,
830 void *kbuf, void __user *ubuf)
832 if (target == current)
833 save_access_regs(target->thread.acrs);
835 if (kbuf) {
836 compat_ulong_t *k = kbuf;
837 while (count > 0) {
838 *k++ = __peek_user_compat(target, pos);
839 count -= sizeof(*k);
840 pos += sizeof(*k);
842 } else {
843 compat_ulong_t __user *u = ubuf;
844 while (count > 0) {
845 if (__put_user(__peek_user_compat(target, pos), u++))
846 return -EFAULT;
847 count -= sizeof(*u);
848 pos += sizeof(*u);
851 return 0;
854 static int s390_compat_regs_set(struct task_struct *target,
855 const struct user_regset *regset,
856 unsigned int pos, unsigned int count,
857 const void *kbuf, const void __user *ubuf)
859 int rc = 0;
861 if (target == current)
862 save_access_regs(target->thread.acrs);
864 if (kbuf) {
865 const compat_ulong_t *k = kbuf;
866 while (count > 0 && !rc) {
867 rc = __poke_user_compat(target, pos, *k++);
868 count -= sizeof(*k);
869 pos += sizeof(*k);
871 } else {
872 const compat_ulong_t __user *u = ubuf;
873 while (count > 0 && !rc) {
874 compat_ulong_t word;
875 rc = __get_user(word, u++);
876 if (rc)
877 break;
878 rc = __poke_user_compat(target, pos, word);
879 count -= sizeof(*u);
880 pos += sizeof(*u);
884 if (rc == 0 && target == current)
885 restore_access_regs(target->thread.acrs);
887 return rc;
890 static const struct user_regset s390_compat_regsets[] = {
891 [REGSET_GENERAL] = {
892 .core_note_type = NT_PRSTATUS,
893 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
894 .size = sizeof(compat_long_t),
895 .align = sizeof(compat_long_t),
896 .get = s390_compat_regs_get,
897 .set = s390_compat_regs_set,
899 [REGSET_FP] = {
900 .core_note_type = NT_PRFPREG,
901 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
902 .size = sizeof(compat_long_t),
903 .align = sizeof(compat_long_t),
904 .get = s390_fpregs_get,
905 .set = s390_fpregs_set,
909 static const struct user_regset_view user_s390_compat_view = {
910 .name = "s390",
911 .e_machine = EM_S390,
912 .regsets = s390_compat_regsets,
913 .n = ARRAY_SIZE(s390_compat_regsets)
915 #endif
917 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
919 #ifdef CONFIG_COMPAT
920 if (test_tsk_thread_flag(task, TIF_31BIT))
921 return &user_s390_compat_view;
922 #endif
923 return &user_s390_view;