Merge branch 'akpm'
[linux-2.6/next.git] / arch / s390 / kernel / compat_linux.c
blob53acaa86dd94f353e36beb03f5c9e904c14d42d3
1 /*
2 * arch/s390x/kernel/linux32.c
4 * S390 version
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerhard Tonn (ton@de.ibm.com)
8 * Thomas Spatzier (tspat@de.ibm.com)
10 * Conversion between 31bit and 64bit native syscalls.
12 * Heavily inspired by the 32-bit Sparc compat code which is
13 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
14 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/file.h>
24 #include <linux/signal.h>
25 #include <linux/resource.h>
26 #include <linux/times.h>
27 #include <linux/smp.h>
28 #include <linux/sem.h>
29 #include <linux/msg.h>
30 #include <linux/shm.h>
31 #include <linux/uio.h>
32 #include <linux/quota.h>
33 #include <linux/module.h>
34 #include <linux/poll.h>
35 #include <linux/personality.h>
36 #include <linux/stat.h>
37 #include <linux/filter.h>
38 #include <linux/highmem.h>
39 #include <linux/highuid.h>
40 #include <linux/mman.h>
41 #include <linux/ipv6.h>
42 #include <linux/in.h>
43 #include <linux/icmpv6.h>
44 #include <linux/syscalls.h>
45 #include <linux/sysctl.h>
46 #include <linux/binfmts.h>
47 #include <linux/capability.h>
48 #include <linux/compat.h>
49 #include <linux/vfs.h>
50 #include <linux/ptrace.h>
51 #include <linux/fadvise.h>
52 #include <linux/ipc.h>
53 #include <linux/slab.h>
55 #include <asm/types.h>
56 #include <asm/uaccess.h>
58 #include <net/scm.h>
59 #include <net/sock.h>
61 #include "compat_linux.h"
63 long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
64 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
65 PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
66 long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
67 PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
68 PSW32_MASK_PSTATE);
70 /* For this source file, we want overflow handling. */
72 #undef high2lowuid
73 #undef high2lowgid
74 #undef low2highuid
75 #undef low2highgid
76 #undef SET_UID16
77 #undef SET_GID16
78 #undef NEW_TO_OLD_UID
79 #undef NEW_TO_OLD_GID
80 #undef SET_OLDSTAT_UID
81 #undef SET_OLDSTAT_GID
82 #undef SET_STAT_UID
83 #undef SET_STAT_GID
85 #define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
86 #define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
87 #define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
88 #define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
89 #define SET_UID16(var, uid) var = high2lowuid(uid)
90 #define SET_GID16(var, gid) var = high2lowgid(gid)
91 #define NEW_TO_OLD_UID(uid) high2lowuid(uid)
92 #define NEW_TO_OLD_GID(gid) high2lowgid(gid)
93 #define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
94 #define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
95 #define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
96 #define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
98 asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
100 return sys_chown(filename, low2highuid(user), low2highgid(group));
103 asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
105 return sys_lchown(filename, low2highuid(user), low2highgid(group));
108 asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
110 return sys_fchown(fd, low2highuid(user), low2highgid(group));
113 asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
115 return sys_setregid(low2highgid(rgid), low2highgid(egid));
118 asmlinkage long sys32_setgid16(u16 gid)
120 return sys_setgid((gid_t)gid);
123 asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
125 return sys_setreuid(low2highuid(ruid), low2highuid(euid));
128 asmlinkage long sys32_setuid16(u16 uid)
130 return sys_setuid((uid_t)uid);
133 asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
135 return sys_setresuid(low2highuid(ruid), low2highuid(euid),
136 low2highuid(suid));
139 asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
141 int retval;
143 if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) &&
144 !(retval = put_user(high2lowuid(current->cred->euid), euid)))
145 retval = put_user(high2lowuid(current->cred->suid), suid);
147 return retval;
150 asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
152 return sys_setresgid(low2highgid(rgid), low2highgid(egid),
153 low2highgid(sgid));
156 asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
158 int retval;
160 if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) &&
161 !(retval = put_user(high2lowgid(current->cred->egid), egid)))
162 retval = put_user(high2lowgid(current->cred->sgid), sgid);
164 return retval;
167 asmlinkage long sys32_setfsuid16(u16 uid)
169 return sys_setfsuid((uid_t)uid);
172 asmlinkage long sys32_setfsgid16(u16 gid)
174 return sys_setfsgid((gid_t)gid);
177 static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
179 int i;
180 u16 group;
182 for (i = 0; i < group_info->ngroups; i++) {
183 group = (u16)GROUP_AT(group_info, i);
184 if (put_user(group, grouplist+i))
185 return -EFAULT;
188 return 0;
191 static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
193 int i;
194 u16 group;
196 for (i = 0; i < group_info->ngroups; i++) {
197 if (get_user(group, grouplist+i))
198 return -EFAULT;
199 GROUP_AT(group_info, i) = (gid_t)group;
202 return 0;
205 asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
207 int i;
209 if (gidsetsize < 0)
210 return -EINVAL;
212 get_group_info(current->cred->group_info);
213 i = current->cred->group_info->ngroups;
214 if (gidsetsize) {
215 if (i > gidsetsize) {
216 i = -EINVAL;
217 goto out;
219 if (groups16_to_user(grouplist, current->cred->group_info)) {
220 i = -EFAULT;
221 goto out;
224 out:
225 put_group_info(current->cred->group_info);
226 return i;
229 asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
231 struct group_info *group_info;
232 int retval;
234 if (!capable(CAP_SETGID))
235 return -EPERM;
236 if ((unsigned)gidsetsize > NGROUPS_MAX)
237 return -EINVAL;
239 group_info = groups_alloc(gidsetsize);
240 if (!group_info)
241 return -ENOMEM;
242 retval = groups16_from_user(group_info, grouplist);
243 if (retval) {
244 put_group_info(group_info);
245 return retval;
248 retval = set_current_groups(group_info);
249 put_group_info(group_info);
251 return retval;
254 asmlinkage long sys32_getuid16(void)
256 return high2lowuid(current->cred->uid);
259 asmlinkage long sys32_geteuid16(void)
261 return high2lowuid(current->cred->euid);
264 asmlinkage long sys32_getgid16(void)
266 return high2lowgid(current->cred->gid);
269 asmlinkage long sys32_getegid16(void)
271 return high2lowgid(current->cred->egid);
275 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
277 * This is really horribly ugly.
279 #ifdef CONFIG_SYSVIPC
280 asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
282 if (call >> 16) /* hack for backward compatibility */
283 return -EINVAL;
285 call &= 0xffff;
287 switch (call) {
288 case SEMTIMEDOP:
289 return compat_sys_semtimedop(first, compat_ptr(ptr),
290 second, compat_ptr(third));
291 case SEMOP:
292 /* struct sembuf is the same on 32 and 64bit :)) */
293 return sys_semtimedop(first, compat_ptr(ptr),
294 second, NULL);
295 case SEMGET:
296 return sys_semget(first, second, third);
297 case SEMCTL:
298 return compat_sys_semctl(first, second, third,
299 compat_ptr(ptr));
300 case MSGSND:
301 return compat_sys_msgsnd(first, second, third,
302 compat_ptr(ptr));
303 case MSGRCV:
304 return compat_sys_msgrcv(first, second, 0, third,
305 0, compat_ptr(ptr));
306 case MSGGET:
307 return sys_msgget((key_t) first, second);
308 case MSGCTL:
309 return compat_sys_msgctl(first, second, compat_ptr(ptr));
310 case SHMAT:
311 return compat_sys_shmat(first, second, third,
312 0, compat_ptr(ptr));
313 case SHMDT:
314 return sys_shmdt(compat_ptr(ptr));
315 case SHMGET:
316 return sys_shmget(first, (unsigned)second, third);
317 case SHMCTL:
318 return compat_sys_shmctl(first, second, compat_ptr(ptr));
321 return -ENOSYS;
323 #endif
325 asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
327 if ((int)high < 0)
328 return -EINVAL;
329 else
330 return sys_truncate(path, (high << 32) | low);
333 asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
335 if ((int)high < 0)
336 return -EINVAL;
337 else
338 return sys_ftruncate(fd, (high << 32) | low);
341 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
342 struct compat_timespec __user *interval)
344 struct timespec t;
345 int ret;
346 mm_segment_t old_fs = get_fs ();
348 set_fs (KERNEL_DS);
349 ret = sys_sched_rr_get_interval(pid,
350 (struct timespec __force __user *) &t);
351 set_fs (old_fs);
352 if (put_compat_timespec(&t, interval))
353 return -EFAULT;
354 return ret;
357 asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
358 compat_sigset_t __user *oset, size_t sigsetsize)
360 sigset_t s;
361 compat_sigset_t s32;
362 int ret;
363 mm_segment_t old_fs = get_fs();
365 if (set) {
366 if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
367 return -EFAULT;
368 switch (_NSIG_WORDS) {
369 case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
370 case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
371 case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
372 case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
375 set_fs (KERNEL_DS);
376 ret = sys_rt_sigprocmask(how,
377 set ? (sigset_t __force __user *) &s : NULL,
378 oset ? (sigset_t __force __user *) &s : NULL,
379 sigsetsize);
380 set_fs (old_fs);
381 if (ret) return ret;
382 if (oset) {
383 switch (_NSIG_WORDS) {
384 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
385 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
386 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
387 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
389 if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
390 return -EFAULT;
392 return 0;
395 asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
396 size_t sigsetsize)
398 sigset_t s;
399 compat_sigset_t s32;
400 int ret;
401 mm_segment_t old_fs = get_fs();
403 set_fs (KERNEL_DS);
404 ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
405 set_fs (old_fs);
406 if (!ret) {
407 switch (_NSIG_WORDS) {
408 case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
409 case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
410 case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
411 case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
413 if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
414 return -EFAULT;
416 return ret;
419 asmlinkage long
420 sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
422 siginfo_t info;
423 int ret;
424 mm_segment_t old_fs = get_fs();
426 if (copy_siginfo_from_user32(&info, uinfo))
427 return -EFAULT;
428 set_fs (KERNEL_DS);
429 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
430 set_fs (old_fs);
431 return ret;
435 * sys32_execve() executes a new program after the asm stub has set
436 * things up for us. This should basically do what I want it to.
438 asmlinkage long sys32_execve(const char __user *name, compat_uptr_t __user *argv,
439 compat_uptr_t __user *envp)
441 struct pt_regs *regs = task_pt_regs(current);
442 char *filename;
443 long rc;
445 filename = getname(name);
446 rc = PTR_ERR(filename);
447 if (IS_ERR(filename))
448 return rc;
449 rc = compat_do_execve(filename, argv, envp, regs);
450 if (rc)
451 goto out;
452 current->thread.fp_regs.fpc=0;
453 asm volatile("sfpc %0,0" : : "d" (0));
454 rc = regs->gprs[2];
455 out:
456 putname(filename);
457 return rc;
460 asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
461 size_t count, u32 poshi, u32 poslo)
463 if ((compat_ssize_t) count < 0)
464 return -EINVAL;
465 return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
468 asmlinkage long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
469 size_t count, u32 poshi, u32 poslo)
471 if ((compat_ssize_t) count < 0)
472 return -EINVAL;
473 return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
476 asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
478 return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
481 asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, size_t count)
483 mm_segment_t old_fs = get_fs();
484 int ret;
485 off_t of;
487 if (offset && get_user(of, offset))
488 return -EFAULT;
490 set_fs(KERNEL_DS);
491 ret = sys_sendfile(out_fd, in_fd,
492 offset ? (off_t __force __user *) &of : NULL, count);
493 set_fs(old_fs);
495 if (offset && put_user(of, offset))
496 return -EFAULT;
498 return ret;
501 asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
502 compat_loff_t __user *offset, s32 count)
504 mm_segment_t old_fs = get_fs();
505 int ret;
506 loff_t lof;
508 if (offset && get_user(lof, offset))
509 return -EFAULT;
511 set_fs(KERNEL_DS);
512 ret = sys_sendfile64(out_fd, in_fd,
513 offset ? (loff_t __force __user *) &lof : NULL,
514 count);
515 set_fs(old_fs);
517 if (offset && put_user(lof, offset))
518 return -EFAULT;
520 return ret;
523 struct stat64_emu31 {
524 unsigned long long st_dev;
525 unsigned int __pad1;
526 #define STAT64_HAS_BROKEN_ST_INO 1
527 u32 __st_ino;
528 unsigned int st_mode;
529 unsigned int st_nlink;
530 u32 st_uid;
531 u32 st_gid;
532 unsigned long long st_rdev;
533 unsigned int __pad3;
534 long st_size;
535 u32 st_blksize;
536 unsigned char __pad4[4];
537 u32 __pad5; /* future possible st_blocks high bits */
538 u32 st_blocks; /* Number 512-byte blocks allocated. */
539 u32 st_atime;
540 u32 __pad6;
541 u32 st_mtime;
542 u32 __pad7;
543 u32 st_ctime;
544 u32 __pad8; /* will be high 32 bits of ctime someday */
545 unsigned long st_ino;
548 static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat)
550 struct stat64_emu31 tmp;
552 memset(&tmp, 0, sizeof(tmp));
554 tmp.st_dev = huge_encode_dev(stat->dev);
555 tmp.st_ino = stat->ino;
556 tmp.__st_ino = (u32)stat->ino;
557 tmp.st_mode = stat->mode;
558 tmp.st_nlink = (unsigned int)stat->nlink;
559 tmp.st_uid = stat->uid;
560 tmp.st_gid = stat->gid;
561 tmp.st_rdev = huge_encode_dev(stat->rdev);
562 tmp.st_size = stat->size;
563 tmp.st_blksize = (u32)stat->blksize;
564 tmp.st_blocks = (u32)stat->blocks;
565 tmp.st_atime = (u32)stat->atime.tv_sec;
566 tmp.st_mtime = (u32)stat->mtime.tv_sec;
567 tmp.st_ctime = (u32)stat->ctime.tv_sec;
569 return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
572 asmlinkage long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf)
574 struct kstat stat;
575 int ret = vfs_stat(filename, &stat);
576 if (!ret)
577 ret = cp_stat64(statbuf, &stat);
578 return ret;
581 asmlinkage long sys32_lstat64(const char __user * filename, struct stat64_emu31 __user * statbuf)
583 struct kstat stat;
584 int ret = vfs_lstat(filename, &stat);
585 if (!ret)
586 ret = cp_stat64(statbuf, &stat);
587 return ret;
590 asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf)
592 struct kstat stat;
593 int ret = vfs_fstat(fd, &stat);
594 if (!ret)
595 ret = cp_stat64(statbuf, &stat);
596 return ret;
599 asmlinkage long sys32_fstatat64(unsigned int dfd, const char __user *filename,
600 struct stat64_emu31 __user* statbuf, int flag)
602 struct kstat stat;
603 int error;
605 error = vfs_fstatat(dfd, filename, &stat, flag);
606 if (error)
607 return error;
608 return cp_stat64(statbuf, &stat);
612 * Linux/i386 didn't use to be able to handle more than
613 * 4 system call parameters, so these system calls used a memory
614 * block for parameter passing..
617 struct mmap_arg_struct_emu31 {
618 compat_ulong_t addr;
619 compat_ulong_t len;
620 compat_ulong_t prot;
621 compat_ulong_t flags;
622 compat_ulong_t fd;
623 compat_ulong_t offset;
626 asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
628 struct mmap_arg_struct_emu31 a;
630 if (copy_from_user(&a, arg, sizeof(a)))
631 return -EFAULT;
632 if (a.offset & ~PAGE_MASK)
633 return -EINVAL;
634 a.addr = (unsigned long) compat_ptr(a.addr);
635 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
636 a.offset >> PAGE_SHIFT);
639 asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
641 struct mmap_arg_struct_emu31 a;
643 if (copy_from_user(&a, arg, sizeof(a)))
644 return -EFAULT;
645 a.addr = (unsigned long) compat_ptr(a.addr);
646 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
649 asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
651 if ((compat_ssize_t) count < 0)
652 return -EINVAL;
654 return sys_read(fd, buf, count);
657 asmlinkage long sys32_write(unsigned int fd, const char __user * buf, size_t count)
659 if ((compat_ssize_t) count < 0)
660 return -EINVAL;
662 return sys_write(fd, buf, count);
666 * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
667 * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
668 * because the 31 bit values differ from the 64 bit values.
671 asmlinkage long
672 sys32_fadvise64(int fd, loff_t offset, size_t len, int advise)
674 if (advise == 4)
675 advise = POSIX_FADV_DONTNEED;
676 else if (advise == 5)
677 advise = POSIX_FADV_NOREUSE;
678 return sys_fadvise64(fd, offset, len, advise);
681 struct fadvise64_64_args {
682 int fd;
683 long long offset;
684 long long len;
685 int advice;
688 asmlinkage long
689 sys32_fadvise64_64(struct fadvise64_64_args __user *args)
691 struct fadvise64_64_args a;
693 if ( copy_from_user(&a, args, sizeof(a)) )
694 return -EFAULT;
695 if (a.advice == 4)
696 a.advice = POSIX_FADV_DONTNEED;
697 else if (a.advice == 5)
698 a.advice = POSIX_FADV_NOREUSE;
699 return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);