2 * arch/s390x/kernel/linux32.c
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Gerhard Tonn (ton@de.ibm.com)
8 * Thomas Spatzier (tspat@de.ibm.com)
10 * Conversion between 31bit and 64bit native syscalls.
12 * Heavily inspired by the 32-bit Sparc compat code which is
13 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
14 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
23 #include <linux/file.h>
24 #include <linux/signal.h>
25 #include <linux/resource.h>
26 #include <linux/times.h>
27 #include <linux/smp.h>
28 #include <linux/sem.h>
29 #include <linux/msg.h>
30 #include <linux/shm.h>
31 #include <linux/uio.h>
32 #include <linux/quota.h>
33 #include <linux/module.h>
34 #include <linux/poll.h>
35 #include <linux/personality.h>
36 #include <linux/stat.h>
37 #include <linux/filter.h>
38 #include <linux/highmem.h>
39 #include <linux/highuid.h>
40 #include <linux/mman.h>
41 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/syscalls.h>
45 #include <linux/sysctl.h>
46 #include <linux/binfmts.h>
47 #include <linux/capability.h>
48 #include <linux/compat.h>
49 #include <linux/vfs.h>
50 #include <linux/ptrace.h>
51 #include <linux/fadvise.h>
52 #include <linux/ipc.h>
53 #include <linux/slab.h>
55 #include <asm/types.h>
56 #include <asm/uaccess.h>
61 #include "compat_linux.h"
63 u32 psw32_user_bits
= PSW32_MASK_DAT
| PSW32_MASK_IO
| PSW32_MASK_EXT
|
64 PSW32_DEFAULT_KEY
| PSW32_MASK_BASE
| PSW32_MASK_MCHECK
|
65 PSW32_MASK_PSTATE
| PSW32_ASC_HOME
;
67 /* For this source file, we want overflow handling. */
77 #undef SET_OLDSTAT_UID
78 #undef SET_OLDSTAT_GID
82 #define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
83 #define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
84 #define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
85 #define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
86 #define SET_UID16(var, uid) var = high2lowuid(uid)
87 #define SET_GID16(var, gid) var = high2lowgid(gid)
88 #define NEW_TO_OLD_UID(uid) high2lowuid(uid)
89 #define NEW_TO_OLD_GID(gid) high2lowgid(gid)
90 #define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
91 #define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
92 #define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
93 #define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
95 asmlinkage
long sys32_chown16(const char __user
* filename
, u16 user
, u16 group
)
97 return sys_chown(filename
, low2highuid(user
), low2highgid(group
));
100 asmlinkage
long sys32_lchown16(const char __user
* filename
, u16 user
, u16 group
)
102 return sys_lchown(filename
, low2highuid(user
), low2highgid(group
));
105 asmlinkage
long sys32_fchown16(unsigned int fd
, u16 user
, u16 group
)
107 return sys_fchown(fd
, low2highuid(user
), low2highgid(group
));
110 asmlinkage
long sys32_setregid16(u16 rgid
, u16 egid
)
112 return sys_setregid(low2highgid(rgid
), low2highgid(egid
));
115 asmlinkage
long sys32_setgid16(u16 gid
)
117 return sys_setgid((gid_t
)gid
);
120 asmlinkage
long sys32_setreuid16(u16 ruid
, u16 euid
)
122 return sys_setreuid(low2highuid(ruid
), low2highuid(euid
));
125 asmlinkage
long sys32_setuid16(u16 uid
)
127 return sys_setuid((uid_t
)uid
);
130 asmlinkage
long sys32_setresuid16(u16 ruid
, u16 euid
, u16 suid
)
132 return sys_setresuid(low2highuid(ruid
), low2highuid(euid
),
136 asmlinkage
long sys32_getresuid16(u16 __user
*ruid
, u16 __user
*euid
, u16 __user
*suid
)
140 if (!(retval
= put_user(high2lowuid(current
->cred
->uid
), ruid
)) &&
141 !(retval
= put_user(high2lowuid(current
->cred
->euid
), euid
)))
142 retval
= put_user(high2lowuid(current
->cred
->suid
), suid
);
147 asmlinkage
long sys32_setresgid16(u16 rgid
, u16 egid
, u16 sgid
)
149 return sys_setresgid(low2highgid(rgid
), low2highgid(egid
),
153 asmlinkage
long sys32_getresgid16(u16 __user
*rgid
, u16 __user
*egid
, u16 __user
*sgid
)
157 if (!(retval
= put_user(high2lowgid(current
->cred
->gid
), rgid
)) &&
158 !(retval
= put_user(high2lowgid(current
->cred
->egid
), egid
)))
159 retval
= put_user(high2lowgid(current
->cred
->sgid
), sgid
);
164 asmlinkage
long sys32_setfsuid16(u16 uid
)
166 return sys_setfsuid((uid_t
)uid
);
169 asmlinkage
long sys32_setfsgid16(u16 gid
)
171 return sys_setfsgid((gid_t
)gid
);
174 static int groups16_to_user(u16 __user
*grouplist
, struct group_info
*group_info
)
179 for (i
= 0; i
< group_info
->ngroups
; i
++) {
180 group
= (u16
)GROUP_AT(group_info
, i
);
181 if (put_user(group
, grouplist
+i
))
188 static int groups16_from_user(struct group_info
*group_info
, u16 __user
*grouplist
)
193 for (i
= 0; i
< group_info
->ngroups
; i
++) {
194 if (get_user(group
, grouplist
+i
))
196 GROUP_AT(group_info
, i
) = (gid_t
)group
;
202 asmlinkage
long sys32_getgroups16(int gidsetsize
, u16 __user
*grouplist
)
209 get_group_info(current
->cred
->group_info
);
210 i
= current
->cred
->group_info
->ngroups
;
212 if (i
> gidsetsize
) {
216 if (groups16_to_user(grouplist
, current
->cred
->group_info
)) {
222 put_group_info(current
->cred
->group_info
);
226 asmlinkage
long sys32_setgroups16(int gidsetsize
, u16 __user
*grouplist
)
228 struct group_info
*group_info
;
231 if (!capable(CAP_SETGID
))
233 if ((unsigned)gidsetsize
> NGROUPS_MAX
)
236 group_info
= groups_alloc(gidsetsize
);
239 retval
= groups16_from_user(group_info
, grouplist
);
241 put_group_info(group_info
);
245 retval
= set_current_groups(group_info
);
246 put_group_info(group_info
);
251 asmlinkage
long sys32_getuid16(void)
253 return high2lowuid(current
->cred
->uid
);
256 asmlinkage
long sys32_geteuid16(void)
258 return high2lowuid(current
->cred
->euid
);
261 asmlinkage
long sys32_getgid16(void)
263 return high2lowgid(current
->cred
->gid
);
266 asmlinkage
long sys32_getegid16(void)
268 return high2lowgid(current
->cred
->egid
);
272 * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
274 * This is really horribly ugly.
276 #ifdef CONFIG_SYSVIPC
277 asmlinkage
long sys32_ipc(u32 call
, int first
, int second
, int third
, u32 ptr
)
279 if (call
>> 16) /* hack for backward compatibility */
283 return compat_sys_semtimedop(first
, compat_ptr(ptr
),
284 second
, compat_ptr(third
));
286 /* struct sembuf is the same on 32 and 64bit :)) */
287 return sys_semtimedop(first
, compat_ptr(ptr
),
290 return sys_semget(first
, second
, third
);
292 return compat_sys_semctl(first
, second
, third
,
295 return compat_sys_msgsnd(first
, second
, third
,
298 return compat_sys_msgrcv(first
, second
, 0, third
,
301 return sys_msgget((key_t
) first
, second
);
303 return compat_sys_msgctl(first
, second
, compat_ptr(ptr
));
305 return compat_sys_shmat(first
, second
, third
,
308 return sys_shmdt(compat_ptr(ptr
));
310 return sys_shmget(first
, (unsigned)second
, third
);
312 return compat_sys_shmctl(first
, second
, compat_ptr(ptr
));
319 asmlinkage
long sys32_truncate64(const char __user
* path
, unsigned long high
, unsigned long low
)
324 return sys_truncate(path
, (high
<< 32) | low
);
327 asmlinkage
long sys32_ftruncate64(unsigned int fd
, unsigned long high
, unsigned long low
)
332 return sys_ftruncate(fd
, (high
<< 32) | low
);
335 asmlinkage
long sys32_sched_rr_get_interval(compat_pid_t pid
,
336 struct compat_timespec __user
*interval
)
340 mm_segment_t old_fs
= get_fs ();
343 ret
= sys_sched_rr_get_interval(pid
,
344 (struct timespec __force __user
*) &t
);
346 if (put_compat_timespec(&t
, interval
))
351 asmlinkage
long sys32_rt_sigprocmask(int how
, compat_sigset_t __user
*set
,
352 compat_sigset_t __user
*oset
, size_t sigsetsize
)
357 mm_segment_t old_fs
= get_fs();
360 if (copy_from_user (&s32
, set
, sizeof(compat_sigset_t
)))
362 s
.sig
[0] = s32
.sig
[0] | (((long)s32
.sig
[1]) << 32);
365 ret
= sys_rt_sigprocmask(how
,
366 set
? (sigset_t __force __user
*) &s
: NULL
,
367 oset
? (sigset_t __force __user
*) &s
: NULL
,
372 s32
.sig
[1] = (s
.sig
[0] >> 32);
373 s32
.sig
[0] = s
.sig
[0];
374 if (copy_to_user (oset
, &s32
, sizeof(compat_sigset_t
)))
380 asmlinkage
long sys32_rt_sigpending(compat_sigset_t __user
*set
,
386 mm_segment_t old_fs
= get_fs();
389 ret
= sys_rt_sigpending((sigset_t __force __user
*) &s
, sigsetsize
);
392 s32
.sig
[1] = (s
.sig
[0] >> 32);
393 s32
.sig
[0] = s
.sig
[0];
394 if (copy_to_user (set
, &s32
, sizeof(compat_sigset_t
)))
401 sys32_rt_sigqueueinfo(int pid
, int sig
, compat_siginfo_t __user
*uinfo
)
405 mm_segment_t old_fs
= get_fs();
407 if (copy_siginfo_from_user32(&info
, uinfo
))
410 ret
= sys_rt_sigqueueinfo(pid
, sig
, (siginfo_t __force __user
*) &info
);
416 * sys32_execve() executes a new program after the asm stub has set
417 * things up for us. This should basically do what I want it to.
419 asmlinkage
long sys32_execve(const char __user
*name
, compat_uptr_t __user
*argv
,
420 compat_uptr_t __user
*envp
)
422 struct pt_regs
*regs
= task_pt_regs(current
);
426 filename
= getname(name
);
427 rc
= PTR_ERR(filename
);
428 if (IS_ERR(filename
))
430 rc
= compat_do_execve(filename
, argv
, envp
, regs
);
433 current
->thread
.fp_regs
.fpc
=0;
434 asm volatile("sfpc %0,0" : : "d" (0));
441 asmlinkage
long sys32_pread64(unsigned int fd
, char __user
*ubuf
,
442 size_t count
, u32 poshi
, u32 poslo
)
444 if ((compat_ssize_t
) count
< 0)
446 return sys_pread64(fd
, ubuf
, count
, ((loff_t
)AA(poshi
) << 32) | AA(poslo
));
449 asmlinkage
long sys32_pwrite64(unsigned int fd
, const char __user
*ubuf
,
450 size_t count
, u32 poshi
, u32 poslo
)
452 if ((compat_ssize_t
) count
< 0)
454 return sys_pwrite64(fd
, ubuf
, count
, ((loff_t
)AA(poshi
) << 32) | AA(poslo
));
457 asmlinkage compat_ssize_t
sys32_readahead(int fd
, u32 offhi
, u32 offlo
, s32 count
)
459 return sys_readahead(fd
, ((loff_t
)AA(offhi
) << 32) | AA(offlo
), count
);
462 asmlinkage
long sys32_sendfile(int out_fd
, int in_fd
, compat_off_t __user
*offset
, size_t count
)
464 mm_segment_t old_fs
= get_fs();
468 if (offset
&& get_user(of
, offset
))
472 ret
= sys_sendfile(out_fd
, in_fd
,
473 offset
? (off_t __force __user
*) &of
: NULL
, count
);
476 if (offset
&& put_user(of
, offset
))
482 asmlinkage
long sys32_sendfile64(int out_fd
, int in_fd
,
483 compat_loff_t __user
*offset
, s32 count
)
485 mm_segment_t old_fs
= get_fs();
489 if (offset
&& get_user(lof
, offset
))
493 ret
= sys_sendfile64(out_fd
, in_fd
,
494 offset
? (loff_t __force __user
*) &lof
: NULL
,
498 if (offset
&& put_user(lof
, offset
))
504 struct stat64_emu31
{
505 unsigned long long st_dev
;
507 #define STAT64_HAS_BROKEN_ST_INO 1
509 unsigned int st_mode
;
510 unsigned int st_nlink
;
513 unsigned long long st_rdev
;
517 unsigned char __pad4
[4];
518 u32 __pad5
; /* future possible st_blocks high bits */
519 u32 st_blocks
; /* Number 512-byte blocks allocated. */
525 u32 __pad8
; /* will be high 32 bits of ctime someday */
526 unsigned long st_ino
;
529 static int cp_stat64(struct stat64_emu31 __user
*ubuf
, struct kstat
*stat
)
531 struct stat64_emu31 tmp
;
533 memset(&tmp
, 0, sizeof(tmp
));
535 tmp
.st_dev
= huge_encode_dev(stat
->dev
);
536 tmp
.st_ino
= stat
->ino
;
537 tmp
.__st_ino
= (u32
)stat
->ino
;
538 tmp
.st_mode
= stat
->mode
;
539 tmp
.st_nlink
= (unsigned int)stat
->nlink
;
540 tmp
.st_uid
= stat
->uid
;
541 tmp
.st_gid
= stat
->gid
;
542 tmp
.st_rdev
= huge_encode_dev(stat
->rdev
);
543 tmp
.st_size
= stat
->size
;
544 tmp
.st_blksize
= (u32
)stat
->blksize
;
545 tmp
.st_blocks
= (u32
)stat
->blocks
;
546 tmp
.st_atime
= (u32
)stat
->atime
.tv_sec
;
547 tmp
.st_mtime
= (u32
)stat
->mtime
.tv_sec
;
548 tmp
.st_ctime
= (u32
)stat
->ctime
.tv_sec
;
550 return copy_to_user(ubuf
,&tmp
,sizeof(tmp
)) ? -EFAULT
: 0;
553 asmlinkage
long sys32_stat64(const char __user
* filename
, struct stat64_emu31 __user
* statbuf
)
556 int ret
= vfs_stat(filename
, &stat
);
558 ret
= cp_stat64(statbuf
, &stat
);
562 asmlinkage
long sys32_lstat64(const char __user
* filename
, struct stat64_emu31 __user
* statbuf
)
565 int ret
= vfs_lstat(filename
, &stat
);
567 ret
= cp_stat64(statbuf
, &stat
);
571 asmlinkage
long sys32_fstat64(unsigned long fd
, struct stat64_emu31 __user
* statbuf
)
574 int ret
= vfs_fstat(fd
, &stat
);
576 ret
= cp_stat64(statbuf
, &stat
);
580 asmlinkage
long sys32_fstatat64(unsigned int dfd
, const char __user
*filename
,
581 struct stat64_emu31 __user
* statbuf
, int flag
)
586 error
= vfs_fstatat(dfd
, filename
, &stat
, flag
);
589 return cp_stat64(statbuf
, &stat
);
593 * Linux/i386 didn't use to be able to handle more than
594 * 4 system call parameters, so these system calls used a memory
595 * block for parameter passing..
598 struct mmap_arg_struct_emu31
{
602 compat_ulong_t flags
;
604 compat_ulong_t offset
;
607 asmlinkage
unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user
*arg
)
609 struct mmap_arg_struct_emu31 a
;
611 if (copy_from_user(&a
, arg
, sizeof(a
)))
613 if (a
.offset
& ~PAGE_MASK
)
615 a
.addr
= (unsigned long) compat_ptr(a
.addr
);
616 return sys_mmap_pgoff(a
.addr
, a
.len
, a
.prot
, a
.flags
, a
.fd
,
617 a
.offset
>> PAGE_SHIFT
);
620 asmlinkage
long sys32_mmap2(struct mmap_arg_struct_emu31 __user
*arg
)
622 struct mmap_arg_struct_emu31 a
;
624 if (copy_from_user(&a
, arg
, sizeof(a
)))
626 a
.addr
= (unsigned long) compat_ptr(a
.addr
);
627 return sys_mmap_pgoff(a
.addr
, a
.len
, a
.prot
, a
.flags
, a
.fd
, a
.offset
);
630 asmlinkage
long sys32_read(unsigned int fd
, char __user
* buf
, size_t count
)
632 if ((compat_ssize_t
) count
< 0)
635 return sys_read(fd
, buf
, count
);
638 asmlinkage
long sys32_write(unsigned int fd
, const char __user
* buf
, size_t count
)
640 if ((compat_ssize_t
) count
< 0)
643 return sys_write(fd
, buf
, count
);
647 * 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
648 * These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
649 * because the 31 bit values differ from the 64 bit values.
653 sys32_fadvise64(int fd
, loff_t offset
, size_t len
, int advise
)
656 advise
= POSIX_FADV_DONTNEED
;
657 else if (advise
== 5)
658 advise
= POSIX_FADV_NOREUSE
;
659 return sys_fadvise64(fd
, offset
, len
, advise
);
662 struct fadvise64_64_args
{
670 sys32_fadvise64_64(struct fadvise64_64_args __user
*args
)
672 struct fadvise64_64_args a
;
674 if ( copy_from_user(&a
, args
, sizeof(a
)) )
677 a
.advice
= POSIX_FADV_DONTNEED
;
678 else if (a
.advice
== 5)
679 a
.advice
= POSIX_FADV_NOREUSE
;
680 return sys_fadvise64_64(a
.fd
, a
.offset
, a
.len
, a
.advice
);