4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
49 int __clone2(int (*fn
)(void *), void *child_stack_base
,
50 size_t stack_size
, int flags
, void *arg
, ...);
52 #include <sys/socket.h>
56 #include <sys/times.h>
59 #include <sys/statfs.h>
61 #include <sys/sysinfo.h>
62 #include <sys/utsname.h>
63 //#include <sys/user.h>
64 #include <netinet/ip.h>
65 #include <netinet/tcp.h>
66 #include <linux/wireless.h>
67 #include <linux/icmp.h>
68 #include "qemu-common.h"
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/utsname.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include "linux_loop.h"
105 #include "cpu-uname.h"
109 #if defined(CONFIG_USE_NPTL)
110 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
111 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
113 /* XXX: Hardcode the above values. */
114 #define CLONE_NPTL_FLAGS2 0
119 //#include <linux/msdos_fs.h>
120 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
121 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
132 #define _syscall0(type,name) \
133 static type name (void) \
135 return syscall(__NR_##name); \
138 #define _syscall1(type,name,type1,arg1) \
139 static type name (type1 arg1) \
141 return syscall(__NR_##name, arg1); \
144 #define _syscall2(type,name,type1,arg1,type2,arg2) \
145 static type name (type1 arg1,type2 arg2) \
147 return syscall(__NR_##name, arg1, arg2); \
150 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
151 static type name (type1 arg1,type2 arg2,type3 arg3) \
153 return syscall(__NR_##name, arg1, arg2, arg3); \
156 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
159 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
162 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
166 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
170 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
171 type5,arg5,type6,arg6) \
172 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
175 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
179 #define __NR_sys_uname __NR_uname
180 #define __NR_sys_faccessat __NR_faccessat
181 #define __NR_sys_fchmodat __NR_fchmodat
182 #define __NR_sys_fchownat __NR_fchownat
183 #define __NR_sys_fstatat64 __NR_fstatat64
184 #define __NR_sys_futimesat __NR_futimesat
185 #define __NR_sys_getcwd1 __NR_getcwd
186 #define __NR_sys_getdents __NR_getdents
187 #define __NR_sys_getdents64 __NR_getdents64
188 #define __NR_sys_getpriority __NR_getpriority
189 #define __NR_sys_linkat __NR_linkat
190 #define __NR_sys_mkdirat __NR_mkdirat
191 #define __NR_sys_mknodat __NR_mknodat
192 #define __NR_sys_newfstatat __NR_newfstatat
193 #define __NR_sys_openat __NR_openat
194 #define __NR_sys_readlinkat __NR_readlinkat
195 #define __NR_sys_renameat __NR_renameat
196 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
197 #define __NR_sys_symlinkat __NR_symlinkat
198 #define __NR_sys_syslog __NR_syslog
199 #define __NR_sys_tgkill __NR_tgkill
200 #define __NR_sys_tkill __NR_tkill
201 #define __NR_sys_unlinkat __NR_unlinkat
202 #define __NR_sys_utimensat __NR_utimensat
203 #define __NR_sys_futex __NR_futex
204 #define __NR_sys_inotify_init __NR_inotify_init
205 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
206 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
208 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
210 #define __NR__llseek __NR_lseek
214 _syscall0(int, gettid
)
216 /* This is a replacement for the host gettid() and must return a host
218 static int gettid(void) {
222 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
223 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
224 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
226 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
227 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
228 loff_t
*, res
, uint
, wh
);
230 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
231 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
232 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
233 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
235 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
236 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
238 #ifdef __NR_exit_group
239 _syscall1(int,exit_group
,int,error_code
)
241 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
242 _syscall1(int,set_tid_address
,int *,tidptr
)
244 #if defined(CONFIG_USE_NPTL)
245 #if defined(TARGET_NR_futex) && defined(__NR_futex)
246 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
247 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
250 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
251 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
252 unsigned long *, user_mask_ptr
);
253 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
254 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
255 unsigned long *, user_mask_ptr
);
256 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
292 #define COPY_UTSNAME_FIELD(dest, src) \
294 /* __NEW_UTS_LEN doesn't include terminating null */ \
295 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
296 (dest)[__NEW_UTS_LEN] = '\0'; \
299 static int sys_uname(struct new_utsname
*buf
)
301 struct utsname uts_buf
;
303 if (uname(&uts_buf
) < 0)
307 * Just in case these have some differences, we
308 * translate utsname to new_utsname (which is the
309 * struct linux kernel uses).
312 memset(buf
, 0, sizeof(*buf
));
313 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
314 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
315 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
316 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
317 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
319 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
323 #undef COPY_UTSNAME_FIELD
326 static int sys_getcwd1(char *buf
, size_t size
)
328 if (getcwd(buf
, size
) == NULL
) {
329 /* getcwd() sets errno */
332 return strlen(buf
)+1;
337 * Host system seems to have atfile syscall stubs available. We
338 * now enable them one by one as specified by target syscall_nr.h.
341 #ifdef TARGET_NR_faccessat
342 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
344 return (faccessat(dirfd
, pathname
, mode
, 0));
347 #ifdef TARGET_NR_fchmodat
348 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
350 return (fchmodat(dirfd
, pathname
, mode
, 0));
353 #if defined(TARGET_NR_fchownat)
354 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
355 gid_t group
, int flags
)
357 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
360 #ifdef __NR_fstatat64
361 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
364 return (fstatat(dirfd
, pathname
, buf
, flags
));
367 #ifdef __NR_newfstatat
368 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
371 return (fstatat(dirfd
, pathname
, buf
, flags
));
374 #ifdef TARGET_NR_futimesat
375 static int sys_futimesat(int dirfd
, const char *pathname
,
376 const struct timeval times
[2])
378 return (futimesat(dirfd
, pathname
, times
));
381 #ifdef TARGET_NR_linkat
382 static int sys_linkat(int olddirfd
, const char *oldpath
,
383 int newdirfd
, const char *newpath
, int flags
)
385 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
388 #ifdef TARGET_NR_mkdirat
389 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
391 return (mkdirat(dirfd
, pathname
, mode
));
394 #ifdef TARGET_NR_mknodat
395 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
398 return (mknodat(dirfd
, pathname
, mode
, dev
));
401 #ifdef TARGET_NR_openat
402 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
405 * open(2) has extra parameter 'mode' when called with
408 if ((flags
& O_CREAT
) != 0) {
409 return (openat(dirfd
, pathname
, flags
, mode
));
411 return (openat(dirfd
, pathname
, flags
));
414 #ifdef TARGET_NR_readlinkat
415 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
417 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
420 #ifdef TARGET_NR_renameat
421 static int sys_renameat(int olddirfd
, const char *oldpath
,
422 int newdirfd
, const char *newpath
)
424 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
427 #ifdef TARGET_NR_symlinkat
428 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
430 return (symlinkat(oldpath
, newdirfd
, newpath
));
433 #ifdef TARGET_NR_unlinkat
434 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
436 return (unlinkat(dirfd
, pathname
, flags
));
439 #else /* !CONFIG_ATFILE */
442 * Try direct syscalls instead
444 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
445 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
447 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
448 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
450 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
451 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
452 uid_t
,owner
,gid_t
,group
,int,flags
)
454 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
455 defined(__NR_fstatat64)
456 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
457 struct stat
*,buf
,int,flags
)
459 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
460 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
461 const struct timeval
*,times
)
463 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
464 defined(__NR_newfstatat)
465 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
466 struct stat
*,buf
,int,flags
)
468 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
469 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
470 int,newdirfd
,const char *,newpath
,int,flags
)
472 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
473 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
476 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
477 mode_t
,mode
,dev_t
,dev
)
479 #if defined(TARGET_NR_openat) && defined(__NR_openat)
480 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
482 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
483 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
484 char *,buf
,size_t,bufsize
)
486 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
487 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
488 int,newdirfd
,const char *,newpath
)
490 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
491 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
492 int,newdirfd
,const char *,newpath
)
494 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
495 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
498 #endif /* CONFIG_ATFILE */
500 #ifdef CONFIG_UTIMENSAT
501 static int sys_utimensat(int dirfd
, const char *pathname
,
502 const struct timespec times
[2], int flags
)
504 if (pathname
== NULL
)
505 return futimens(dirfd
, times
);
507 return utimensat(dirfd
, pathname
, times
, flags
);
510 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
511 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
512 const struct timespec
*,tsp
,int,flags
)
514 #endif /* CONFIG_UTIMENSAT */
516 #ifdef CONFIG_INOTIFY
517 #include <sys/inotify.h>
519 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
520 static int sys_inotify_init(void)
522 return (inotify_init());
525 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
526 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
528 return (inotify_add_watch(fd
, pathname
, mask
));
531 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
532 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
534 return (inotify_rm_watch(fd
, wd
));
537 #ifdef CONFIG_INOTIFY1
538 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
539 static int sys_inotify_init1(int flags
)
541 return (inotify_init1(flags
));
546 /* Userspace can usually survive runtime without inotify */
547 #undef TARGET_NR_inotify_init
548 #undef TARGET_NR_inotify_init1
549 #undef TARGET_NR_inotify_add_watch
550 #undef TARGET_NR_inotify_rm_watch
551 #endif /* CONFIG_INOTIFY */
553 #if defined(TARGET_NR_ppoll)
555 # define __NR_ppoll -1
557 #define __NR_sys_ppoll __NR_ppoll
558 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
559 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
563 #if defined(TARGET_NR_pselect6)
564 #ifndef __NR_pselect6
565 # define __NR_pselect6 -1
567 #define __NR_sys_pselect6 __NR_pselect6
568 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
569 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
572 #if defined(TARGET_NR_prlimit64)
573 #ifndef __NR_prlimit64
574 # define __NR_prlimit64 -1
576 #define __NR_sys_prlimit64 __NR_prlimit64
577 /* The glibc rlimit structure may not be that used by the underlying syscall */
578 struct host_rlimit64
{
582 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
583 const struct host_rlimit64
*, new_limit
,
584 struct host_rlimit64
*, old_limit
)
587 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
589 static inline int regpairs_aligned(void *cpu_env
) {
590 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
592 #elif defined(TARGET_MIPS)
593 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
594 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
595 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
596 * of registers which translates to the same as ARM/MIPS, because we start with
598 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
600 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
603 #define ERRNO_TABLE_SIZE 1200
605 /* target_to_host_errno_table[] is initialized from
606 * host_to_target_errno_table[] in syscall_init(). */
607 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
611 * This list is the union of errno values overridden in asm-<arch>/errno.h
612 * minus the errnos that are not actually generic to all archs.
614 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
615 [EIDRM
] = TARGET_EIDRM
,
616 [ECHRNG
] = TARGET_ECHRNG
,
617 [EL2NSYNC
] = TARGET_EL2NSYNC
,
618 [EL3HLT
] = TARGET_EL3HLT
,
619 [EL3RST
] = TARGET_EL3RST
,
620 [ELNRNG
] = TARGET_ELNRNG
,
621 [EUNATCH
] = TARGET_EUNATCH
,
622 [ENOCSI
] = TARGET_ENOCSI
,
623 [EL2HLT
] = TARGET_EL2HLT
,
624 [EDEADLK
] = TARGET_EDEADLK
,
625 [ENOLCK
] = TARGET_ENOLCK
,
626 [EBADE
] = TARGET_EBADE
,
627 [EBADR
] = TARGET_EBADR
,
628 [EXFULL
] = TARGET_EXFULL
,
629 [ENOANO
] = TARGET_ENOANO
,
630 [EBADRQC
] = TARGET_EBADRQC
,
631 [EBADSLT
] = TARGET_EBADSLT
,
632 [EBFONT
] = TARGET_EBFONT
,
633 [ENOSTR
] = TARGET_ENOSTR
,
634 [ENODATA
] = TARGET_ENODATA
,
635 [ETIME
] = TARGET_ETIME
,
636 [ENOSR
] = TARGET_ENOSR
,
637 [ENONET
] = TARGET_ENONET
,
638 [ENOPKG
] = TARGET_ENOPKG
,
639 [EREMOTE
] = TARGET_EREMOTE
,
640 [ENOLINK
] = TARGET_ENOLINK
,
641 [EADV
] = TARGET_EADV
,
642 [ESRMNT
] = TARGET_ESRMNT
,
643 [ECOMM
] = TARGET_ECOMM
,
644 [EPROTO
] = TARGET_EPROTO
,
645 [EDOTDOT
] = TARGET_EDOTDOT
,
646 [EMULTIHOP
] = TARGET_EMULTIHOP
,
647 [EBADMSG
] = TARGET_EBADMSG
,
648 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
649 [EOVERFLOW
] = TARGET_EOVERFLOW
,
650 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
651 [EBADFD
] = TARGET_EBADFD
,
652 [EREMCHG
] = TARGET_EREMCHG
,
653 [ELIBACC
] = TARGET_ELIBACC
,
654 [ELIBBAD
] = TARGET_ELIBBAD
,
655 [ELIBSCN
] = TARGET_ELIBSCN
,
656 [ELIBMAX
] = TARGET_ELIBMAX
,
657 [ELIBEXEC
] = TARGET_ELIBEXEC
,
658 [EILSEQ
] = TARGET_EILSEQ
,
659 [ENOSYS
] = TARGET_ENOSYS
,
660 [ELOOP
] = TARGET_ELOOP
,
661 [ERESTART
] = TARGET_ERESTART
,
662 [ESTRPIPE
] = TARGET_ESTRPIPE
,
663 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
664 [EUSERS
] = TARGET_EUSERS
,
665 [ENOTSOCK
] = TARGET_ENOTSOCK
,
666 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
667 [EMSGSIZE
] = TARGET_EMSGSIZE
,
668 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
669 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
670 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
671 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
672 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
673 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
674 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
675 [EADDRINUSE
] = TARGET_EADDRINUSE
,
676 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
677 [ENETDOWN
] = TARGET_ENETDOWN
,
678 [ENETUNREACH
] = TARGET_ENETUNREACH
,
679 [ENETRESET
] = TARGET_ENETRESET
,
680 [ECONNABORTED
] = TARGET_ECONNABORTED
,
681 [ECONNRESET
] = TARGET_ECONNRESET
,
682 [ENOBUFS
] = TARGET_ENOBUFS
,
683 [EISCONN
] = TARGET_EISCONN
,
684 [ENOTCONN
] = TARGET_ENOTCONN
,
685 [EUCLEAN
] = TARGET_EUCLEAN
,
686 [ENOTNAM
] = TARGET_ENOTNAM
,
687 [ENAVAIL
] = TARGET_ENAVAIL
,
688 [EISNAM
] = TARGET_EISNAM
,
689 [EREMOTEIO
] = TARGET_EREMOTEIO
,
690 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
691 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
692 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
693 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
694 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
695 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
696 [EALREADY
] = TARGET_EALREADY
,
697 [EINPROGRESS
] = TARGET_EINPROGRESS
,
698 [ESTALE
] = TARGET_ESTALE
,
699 [ECANCELED
] = TARGET_ECANCELED
,
700 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
701 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
703 [ENOKEY
] = TARGET_ENOKEY
,
706 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
709 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
712 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
715 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
717 #ifdef ENOTRECOVERABLE
718 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
722 static inline int host_to_target_errno(int err
)
724 if(host_to_target_errno_table
[err
])
725 return host_to_target_errno_table
[err
];
729 static inline int target_to_host_errno(int err
)
731 if (target_to_host_errno_table
[err
])
732 return target_to_host_errno_table
[err
];
736 static inline abi_long
get_errno(abi_long ret
)
739 return -host_to_target_errno(errno
);
744 static inline int is_error(abi_long ret
)
746 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
749 char *target_strerror(int err
)
751 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
754 return strerror(target_to_host_errno(err
));
757 static abi_ulong target_brk
;
758 static abi_ulong target_original_brk
;
759 static abi_ulong brk_page
;
761 void target_set_brk(abi_ulong new_brk
)
763 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
764 brk_page
= HOST_PAGE_ALIGN(target_brk
);
767 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
768 #define DEBUGF_BRK(message, args...)
770 /* do_brk() must return target values and target errnos. */
771 abi_long
do_brk(abi_ulong new_brk
)
773 abi_long mapped_addr
;
776 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
779 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
782 if (new_brk
< target_original_brk
) {
783 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
788 /* If the new brk is less than the highest page reserved to the
789 * target heap allocation, set it and we're almost done... */
790 if (new_brk
<= brk_page
) {
791 /* Heap contents are initialized to zero, as for anonymous
793 if (new_brk
> target_brk
) {
794 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
796 target_brk
= new_brk
;
797 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
801 /* We need to allocate more memory after the brk... Note that
802 * we don't use MAP_FIXED because that will map over the top of
803 * any existing mapping (like the one with the host libc or qemu
804 * itself); instead we treat "mapped but at wrong address" as
805 * a failure and unmap again.
807 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
808 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
809 PROT_READ
|PROT_WRITE
,
810 MAP_ANON
|MAP_PRIVATE
, 0, 0));
812 if (mapped_addr
== brk_page
) {
813 /* Heap contents are initialized to zero, as for anonymous
814 * mapped pages. Technically the new pages are already
815 * initialized to zero since they *are* anonymous mapped
816 * pages, however we have to take care with the contents that
817 * come from the remaining part of the previous page: it may
818 * contains garbage data due to a previous heap usage (grown
820 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
822 target_brk
= new_brk
;
823 brk_page
= HOST_PAGE_ALIGN(target_brk
);
824 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
827 } else if (mapped_addr
!= -1) {
828 /* Mapped but at wrong address, meaning there wasn't actually
829 * enough space for this brk.
831 target_munmap(mapped_addr
, new_alloc_size
);
833 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
836 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
839 #if defined(TARGET_ALPHA)
840 /* We (partially) emulate OSF/1 on Alpha, which requires we
841 return a proper errno, not an unchanged brk value. */
842 return -TARGET_ENOMEM
;
844 /* For everything else, return the previous break. */
848 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
849 abi_ulong target_fds_addr
,
853 abi_ulong b
, *target_fds
;
855 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
856 if (!(target_fds
= lock_user(VERIFY_READ
,
858 sizeof(abi_ulong
) * nw
,
860 return -TARGET_EFAULT
;
864 for (i
= 0; i
< nw
; i
++) {
865 /* grab the abi_ulong */
866 __get_user(b
, &target_fds
[i
]);
867 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
868 /* check the bit inside the abi_ulong */
875 unlock_user(target_fds
, target_fds_addr
, 0);
880 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
881 abi_ulong target_fds_addr
,
884 if (target_fds_addr
) {
885 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
886 return -TARGET_EFAULT
;
894 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
900 abi_ulong
*target_fds
;
902 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
903 if (!(target_fds
= lock_user(VERIFY_WRITE
,
905 sizeof(abi_ulong
) * nw
,
907 return -TARGET_EFAULT
;
910 for (i
= 0; i
< nw
; i
++) {
912 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
913 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
916 __put_user(v
, &target_fds
[i
]);
919 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
924 #if defined(__alpha__)
930 static inline abi_long
host_to_target_clock_t(long ticks
)
932 #if HOST_HZ == TARGET_HZ
935 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
939 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
940 const struct rusage
*rusage
)
942 struct target_rusage
*target_rusage
;
944 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
945 return -TARGET_EFAULT
;
946 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
947 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
948 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
949 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
950 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
951 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
952 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
953 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
954 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
955 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
956 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
957 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
958 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
959 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
960 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
961 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
962 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
963 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
964 unlock_user_struct(target_rusage
, target_addr
, 1);
969 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
971 abi_ulong target_rlim_swap
;
974 target_rlim_swap
= tswapal(target_rlim
);
975 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
976 return RLIM_INFINITY
;
978 result
= target_rlim_swap
;
979 if (target_rlim_swap
!= (rlim_t
)result
)
980 return RLIM_INFINITY
;
985 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
987 abi_ulong target_rlim_swap
;
990 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
991 target_rlim_swap
= TARGET_RLIM_INFINITY
;
993 target_rlim_swap
= rlim
;
994 result
= tswapal(target_rlim_swap
);
999 static inline int target_to_host_resource(int code
)
1002 case TARGET_RLIMIT_AS
:
1004 case TARGET_RLIMIT_CORE
:
1006 case TARGET_RLIMIT_CPU
:
1008 case TARGET_RLIMIT_DATA
:
1010 case TARGET_RLIMIT_FSIZE
:
1011 return RLIMIT_FSIZE
;
1012 case TARGET_RLIMIT_LOCKS
:
1013 return RLIMIT_LOCKS
;
1014 case TARGET_RLIMIT_MEMLOCK
:
1015 return RLIMIT_MEMLOCK
;
1016 case TARGET_RLIMIT_MSGQUEUE
:
1017 return RLIMIT_MSGQUEUE
;
1018 case TARGET_RLIMIT_NICE
:
1020 case TARGET_RLIMIT_NOFILE
:
1021 return RLIMIT_NOFILE
;
1022 case TARGET_RLIMIT_NPROC
:
1023 return RLIMIT_NPROC
;
1024 case TARGET_RLIMIT_RSS
:
1026 case TARGET_RLIMIT_RTPRIO
:
1027 return RLIMIT_RTPRIO
;
1028 case TARGET_RLIMIT_SIGPENDING
:
1029 return RLIMIT_SIGPENDING
;
1030 case TARGET_RLIMIT_STACK
:
1031 return RLIMIT_STACK
;
1037 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1038 abi_ulong target_tv_addr
)
1040 struct target_timeval
*target_tv
;
1042 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1043 return -TARGET_EFAULT
;
1045 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1046 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1048 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1053 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1054 const struct timeval
*tv
)
1056 struct target_timeval
*target_tv
;
1058 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1059 return -TARGET_EFAULT
;
1061 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1062 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1064 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1069 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1072 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1073 abi_ulong target_mq_attr_addr
)
1075 struct target_mq_attr
*target_mq_attr
;
1077 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1078 target_mq_attr_addr
, 1))
1079 return -TARGET_EFAULT
;
1081 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1082 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1083 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1084 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1086 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1091 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1092 const struct mq_attr
*attr
)
1094 struct target_mq_attr
*target_mq_attr
;
1096 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1097 target_mq_attr_addr
, 0))
1098 return -TARGET_EFAULT
;
1100 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1101 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1102 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1103 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1105 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1111 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1112 /* do_select() must return target values and target errnos. */
1113 static abi_long
do_select(int n
,
1114 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1115 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1117 fd_set rfds
, wfds
, efds
;
1118 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1119 struct timeval tv
, *tv_ptr
;
1122 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1126 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1130 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1135 if (target_tv_addr
) {
1136 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1137 return -TARGET_EFAULT
;
1143 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1145 if (!is_error(ret
)) {
1146 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1147 return -TARGET_EFAULT
;
1148 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1149 return -TARGET_EFAULT
;
1150 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1151 return -TARGET_EFAULT
;
1153 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1154 return -TARGET_EFAULT
;
1161 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1164 return pipe2(host_pipe
, flags
);
1170 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1171 int flags
, int is_pipe2
)
1175 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1178 return get_errno(ret
);
1180 /* Several targets have special calling conventions for the original
1181 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1183 #if defined(TARGET_ALPHA)
1184 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1185 return host_pipe
[0];
1186 #elif defined(TARGET_MIPS)
1187 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1188 return host_pipe
[0];
1189 #elif defined(TARGET_SH4)
1190 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1191 return host_pipe
[0];
1195 if (put_user_s32(host_pipe
[0], pipedes
)
1196 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1197 return -TARGET_EFAULT
;
1198 return get_errno(ret
);
1201 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1202 abi_ulong target_addr
,
1205 struct target_ip_mreqn
*target_smreqn
;
1207 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1209 return -TARGET_EFAULT
;
1210 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1211 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1212 if (len
== sizeof(struct target_ip_mreqn
))
1213 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1214 unlock_user(target_smreqn
, target_addr
, 0);
1219 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1220 abi_ulong target_addr
,
1223 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1224 sa_family_t sa_family
;
1225 struct target_sockaddr
*target_saddr
;
1227 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1229 return -TARGET_EFAULT
;
1231 sa_family
= tswap16(target_saddr
->sa_family
);
1233 /* Oops. The caller might send a incomplete sun_path; sun_path
1234 * must be terminated by \0 (see the manual page), but
1235 * unfortunately it is quite common to specify sockaddr_un
1236 * length as "strlen(x->sun_path)" while it should be
1237 * "strlen(...) + 1". We'll fix that here if needed.
1238 * Linux kernel has a similar feature.
1241 if (sa_family
== AF_UNIX
) {
1242 if (len
< unix_maxlen
&& len
> 0) {
1243 char *cp
= (char*)target_saddr
;
1245 if ( cp
[len
-1] && !cp
[len
] )
1248 if (len
> unix_maxlen
)
1252 memcpy(addr
, target_saddr
, len
);
1253 addr
->sa_family
= sa_family
;
1254 unlock_user(target_saddr
, target_addr
, 0);
1259 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1260 struct sockaddr
*addr
,
1263 struct target_sockaddr
*target_saddr
;
1265 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1267 return -TARGET_EFAULT
;
1268 memcpy(target_saddr
, addr
, len
);
1269 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1270 unlock_user(target_saddr
, target_addr
, len
);
1275 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1276 struct target_msghdr
*target_msgh
)
1278 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1279 abi_long msg_controllen
;
1280 abi_ulong target_cmsg_addr
;
1281 struct target_cmsghdr
*target_cmsg
;
1282 socklen_t space
= 0;
1284 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1285 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1287 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1288 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1290 return -TARGET_EFAULT
;
1292 while (cmsg
&& target_cmsg
) {
1293 void *data
= CMSG_DATA(cmsg
);
1294 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1296 int len
= tswapal(target_cmsg
->cmsg_len
)
1297 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1299 space
+= CMSG_SPACE(len
);
1300 if (space
> msgh
->msg_controllen
) {
1301 space
-= CMSG_SPACE(len
);
1302 gemu_log("Host cmsg overflow\n");
1306 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1307 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1308 cmsg
->cmsg_len
= CMSG_LEN(len
);
1310 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1311 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1312 memcpy(data
, target_data
, len
);
1314 int *fd
= (int *)data
;
1315 int *target_fd
= (int *)target_data
;
1316 int i
, numfds
= len
/ sizeof(int);
1318 for (i
= 0; i
< numfds
; i
++)
1319 fd
[i
] = tswap32(target_fd
[i
]);
1322 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1323 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1325 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1327 msgh
->msg_controllen
= space
;
1331 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1332 struct msghdr
*msgh
)
1334 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1335 abi_long msg_controllen
;
1336 abi_ulong target_cmsg_addr
;
1337 struct target_cmsghdr
*target_cmsg
;
1338 socklen_t space
= 0;
1340 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1341 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1343 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1344 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1346 return -TARGET_EFAULT
;
1348 while (cmsg
&& target_cmsg
) {
1349 void *data
= CMSG_DATA(cmsg
);
1350 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1352 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1354 space
+= TARGET_CMSG_SPACE(len
);
1355 if (space
> msg_controllen
) {
1356 space
-= TARGET_CMSG_SPACE(len
);
1357 gemu_log("Target cmsg overflow\n");
1361 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1362 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1363 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1365 if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1366 (cmsg
->cmsg_type
== SCM_RIGHTS
)) {
1367 int *fd
= (int *)data
;
1368 int *target_fd
= (int *)target_data
;
1369 int i
, numfds
= len
/ sizeof(int);
1371 for (i
= 0; i
< numfds
; i
++)
1372 target_fd
[i
] = tswap32(fd
[i
]);
1373 } else if ((cmsg
->cmsg_level
== TARGET_SOL_SOCKET
) &&
1374 (cmsg
->cmsg_type
== SO_TIMESTAMP
) &&
1375 (len
== sizeof(struct timeval
))) {
1376 /* copy struct timeval to target */
1377 struct timeval
*tv
= (struct timeval
*)data
;
1378 struct target_timeval
*target_tv
=
1379 (struct target_timeval
*)target_data
;
1381 target_tv
->tv_sec
= tswapal(tv
->tv_sec
);
1382 target_tv
->tv_usec
= tswapal(tv
->tv_usec
);
1384 gemu_log("Unsupported ancillary data: %d/%d\n",
1385 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1386 memcpy(target_data
, data
, len
);
1389 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1390 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1392 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1394 target_msgh
->msg_controllen
= tswapal(space
);
1398 /* do_setsockopt() Must return target values and target errnos. */
1399 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1400 abi_ulong optval_addr
, socklen_t optlen
)
1404 struct ip_mreqn
*ip_mreq
;
1405 struct ip_mreq_source
*ip_mreq_source
;
1409 /* TCP options all take an 'int' value. */
1410 if (optlen
< sizeof(uint32_t))
1411 return -TARGET_EINVAL
;
1413 if (get_user_u32(val
, optval_addr
))
1414 return -TARGET_EFAULT
;
1415 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1422 case IP_ROUTER_ALERT
:
1426 case IP_MTU_DISCOVER
:
1432 case IP_MULTICAST_TTL
:
1433 case IP_MULTICAST_LOOP
:
1435 if (optlen
>= sizeof(uint32_t)) {
1436 if (get_user_u32(val
, optval_addr
))
1437 return -TARGET_EFAULT
;
1438 } else if (optlen
>= 1) {
1439 if (get_user_u8(val
, optval_addr
))
1440 return -TARGET_EFAULT
;
1442 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1444 case IP_ADD_MEMBERSHIP
:
1445 case IP_DROP_MEMBERSHIP
:
1446 if (optlen
< sizeof (struct target_ip_mreq
) ||
1447 optlen
> sizeof (struct target_ip_mreqn
))
1448 return -TARGET_EINVAL
;
1450 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1451 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1452 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1455 case IP_BLOCK_SOURCE
:
1456 case IP_UNBLOCK_SOURCE
:
1457 case IP_ADD_SOURCE_MEMBERSHIP
:
1458 case IP_DROP_SOURCE_MEMBERSHIP
:
1459 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1460 return -TARGET_EINVAL
;
1462 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1463 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1464 unlock_user (ip_mreq_source
, optval_addr
, 0);
1474 /* struct icmp_filter takes an u32 value */
1475 if (optlen
< sizeof(uint32_t)) {
1476 return -TARGET_EINVAL
;
1479 if (get_user_u32(val
, optval_addr
)) {
1480 return -TARGET_EFAULT
;
1482 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
1483 &val
, sizeof(val
)));
1490 case TARGET_SOL_SOCKET
:
1492 /* Options with 'int' argument. */
1493 case TARGET_SO_DEBUG
:
1496 case TARGET_SO_REUSEADDR
:
1497 optname
= SO_REUSEADDR
;
1499 case TARGET_SO_TYPE
:
1502 case TARGET_SO_ERROR
:
1505 case TARGET_SO_DONTROUTE
:
1506 optname
= SO_DONTROUTE
;
1508 case TARGET_SO_BROADCAST
:
1509 optname
= SO_BROADCAST
;
1511 case TARGET_SO_SNDBUF
:
1512 optname
= SO_SNDBUF
;
1514 case TARGET_SO_RCVBUF
:
1515 optname
= SO_RCVBUF
;
1517 case TARGET_SO_KEEPALIVE
:
1518 optname
= SO_KEEPALIVE
;
1520 case TARGET_SO_OOBINLINE
:
1521 optname
= SO_OOBINLINE
;
1523 case TARGET_SO_NO_CHECK
:
1524 optname
= SO_NO_CHECK
;
1526 case TARGET_SO_PRIORITY
:
1527 optname
= SO_PRIORITY
;
1530 case TARGET_SO_BSDCOMPAT
:
1531 optname
= SO_BSDCOMPAT
;
1534 case TARGET_SO_PASSCRED
:
1535 optname
= SO_PASSCRED
;
1537 case TARGET_SO_TIMESTAMP
:
1538 optname
= SO_TIMESTAMP
;
1540 case TARGET_SO_RCVLOWAT
:
1541 optname
= SO_RCVLOWAT
;
1543 case TARGET_SO_RCVTIMEO
:
1544 optname
= SO_RCVTIMEO
;
1546 case TARGET_SO_SNDTIMEO
:
1547 optname
= SO_SNDTIMEO
;
1553 if (optlen
< sizeof(uint32_t))
1554 return -TARGET_EINVAL
;
1556 if (get_user_u32(val
, optval_addr
))
1557 return -TARGET_EFAULT
;
1558 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1562 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1563 ret
= -TARGET_ENOPROTOOPT
;
1568 /* do_getsockopt() Must return target values and target errnos. */
1569 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1570 abi_ulong optval_addr
, abi_ulong optlen
)
1577 case TARGET_SOL_SOCKET
:
1580 /* These don't just return a single integer */
1581 case TARGET_SO_LINGER
:
1582 case TARGET_SO_RCVTIMEO
:
1583 case TARGET_SO_SNDTIMEO
:
1584 case TARGET_SO_PEERNAME
:
1586 case TARGET_SO_PEERCRED
: {
1589 struct target_ucred
*tcr
;
1591 if (get_user_u32(len
, optlen
)) {
1592 return -TARGET_EFAULT
;
1595 return -TARGET_EINVAL
;
1599 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1607 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1608 return -TARGET_EFAULT
;
1610 __put_user(cr
.pid
, &tcr
->pid
);
1611 __put_user(cr
.uid
, &tcr
->uid
);
1612 __put_user(cr
.gid
, &tcr
->gid
);
1613 unlock_user_struct(tcr
, optval_addr
, 1);
1614 if (put_user_u32(len
, optlen
)) {
1615 return -TARGET_EFAULT
;
1619 /* Options with 'int' argument. */
1620 case TARGET_SO_DEBUG
:
1623 case TARGET_SO_REUSEADDR
:
1624 optname
= SO_REUSEADDR
;
1626 case TARGET_SO_TYPE
:
1629 case TARGET_SO_ERROR
:
1632 case TARGET_SO_DONTROUTE
:
1633 optname
= SO_DONTROUTE
;
1635 case TARGET_SO_BROADCAST
:
1636 optname
= SO_BROADCAST
;
1638 case TARGET_SO_SNDBUF
:
1639 optname
= SO_SNDBUF
;
1641 case TARGET_SO_RCVBUF
:
1642 optname
= SO_RCVBUF
;
1644 case TARGET_SO_KEEPALIVE
:
1645 optname
= SO_KEEPALIVE
;
1647 case TARGET_SO_OOBINLINE
:
1648 optname
= SO_OOBINLINE
;
1650 case TARGET_SO_NO_CHECK
:
1651 optname
= SO_NO_CHECK
;
1653 case TARGET_SO_PRIORITY
:
1654 optname
= SO_PRIORITY
;
1657 case TARGET_SO_BSDCOMPAT
:
1658 optname
= SO_BSDCOMPAT
;
1661 case TARGET_SO_PASSCRED
:
1662 optname
= SO_PASSCRED
;
1664 case TARGET_SO_TIMESTAMP
:
1665 optname
= SO_TIMESTAMP
;
1667 case TARGET_SO_RCVLOWAT
:
1668 optname
= SO_RCVLOWAT
;
1675 /* TCP options all take an 'int' value. */
1677 if (get_user_u32(len
, optlen
))
1678 return -TARGET_EFAULT
;
1680 return -TARGET_EINVAL
;
1682 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1688 if (put_user_u32(val
, optval_addr
))
1689 return -TARGET_EFAULT
;
1691 if (put_user_u8(val
, optval_addr
))
1692 return -TARGET_EFAULT
;
1694 if (put_user_u32(len
, optlen
))
1695 return -TARGET_EFAULT
;
1702 case IP_ROUTER_ALERT
:
1706 case IP_MTU_DISCOVER
:
1712 case IP_MULTICAST_TTL
:
1713 case IP_MULTICAST_LOOP
:
1714 if (get_user_u32(len
, optlen
))
1715 return -TARGET_EFAULT
;
1717 return -TARGET_EINVAL
;
1719 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1722 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1724 if (put_user_u32(len
, optlen
)
1725 || put_user_u8(val
, optval_addr
))
1726 return -TARGET_EFAULT
;
1728 if (len
> sizeof(int))
1730 if (put_user_u32(len
, optlen
)
1731 || put_user_u32(val
, optval_addr
))
1732 return -TARGET_EFAULT
;
1736 ret
= -TARGET_ENOPROTOOPT
;
1742 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1744 ret
= -TARGET_EOPNOTSUPP
;
1750 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
1751 int count
, int copy
)
1753 struct target_iovec
*target_vec
;
1755 abi_ulong total_len
, max_len
;
1762 if (count
> IOV_MAX
) {
1767 vec
= calloc(count
, sizeof(struct iovec
));
1773 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1774 count
* sizeof(struct target_iovec
), 1);
1775 if (target_vec
== NULL
) {
1780 /* ??? If host page size > target page size, this will result in a
1781 value larger than what we can actually support. */
1782 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
1785 for (i
= 0; i
< count
; i
++) {
1786 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1787 abi_long len
= tswapal(target_vec
[i
].iov_len
);
1792 } else if (len
== 0) {
1793 /* Zero length pointer is ignored. */
1794 vec
[i
].iov_base
= 0;
1796 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
1797 if (!vec
[i
].iov_base
) {
1801 if (len
> max_len
- total_len
) {
1802 len
= max_len
- total_len
;
1805 vec
[i
].iov_len
= len
;
1809 unlock_user(target_vec
, target_addr
, 0);
1815 unlock_user(target_vec
, target_addr
, 0);
1819 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1820 int count
, int copy
)
1822 struct target_iovec
*target_vec
;
1825 target_vec
= lock_user(VERIFY_READ
, target_addr
,
1826 count
* sizeof(struct target_iovec
), 1);
1828 for (i
= 0; i
< count
; i
++) {
1829 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
1830 abi_long len
= tswapal(target_vec
[i
].iov_base
);
1834 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1836 unlock_user(target_vec
, target_addr
, 0);
1842 /* do_socket() Must return target values and target errnos. */
1843 static abi_long
do_socket(int domain
, int type
, int protocol
)
1845 #if defined(TARGET_MIPS)
1847 case TARGET_SOCK_DGRAM
:
1850 case TARGET_SOCK_STREAM
:
1853 case TARGET_SOCK_RAW
:
1856 case TARGET_SOCK_RDM
:
1859 case TARGET_SOCK_SEQPACKET
:
1860 type
= SOCK_SEQPACKET
;
1862 case TARGET_SOCK_PACKET
:
1867 if (domain
== PF_NETLINK
)
1868 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1869 return get_errno(socket(domain
, type
, protocol
));
1872 /* do_bind() Must return target values and target errnos. */
1873 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1879 if ((int)addrlen
< 0) {
1880 return -TARGET_EINVAL
;
1883 addr
= alloca(addrlen
+1);
1885 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1889 return get_errno(bind(sockfd
, addr
, addrlen
));
1892 /* do_connect() Must return target values and target errnos. */
1893 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1899 if ((int)addrlen
< 0) {
1900 return -TARGET_EINVAL
;
1903 addr
= alloca(addrlen
);
1905 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1909 return get_errno(connect(sockfd
, addr
, addrlen
));
1912 /* do_sendrecvmsg() Must return target values and target errnos. */
1913 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1914 int flags
, int send
)
1917 struct target_msghdr
*msgp
;
1921 abi_ulong target_vec
;
1924 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1928 return -TARGET_EFAULT
;
1929 if (msgp
->msg_name
) {
1930 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1931 msg
.msg_name
= alloca(msg
.msg_namelen
);
1932 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1938 msg
.msg_name
= NULL
;
1939 msg
.msg_namelen
= 0;
1941 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1942 msg
.msg_control
= alloca(msg
.msg_controllen
);
1943 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1945 count
= tswapal(msgp
->msg_iovlen
);
1946 target_vec
= tswapal(msgp
->msg_iov
);
1947 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
1948 target_vec
, count
, send
);
1950 ret
= -host_to_target_errno(errno
);
1953 msg
.msg_iovlen
= count
;
1957 ret
= target_to_host_cmsg(&msg
, msgp
);
1959 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1961 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1962 if (!is_error(ret
)) {
1964 ret
= host_to_target_cmsg(msgp
, &msg
);
1965 if (!is_error(ret
)) {
1966 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
1967 if (msg
.msg_name
!= NULL
) {
1968 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
1969 msg
.msg_name
, msg
.msg_namelen
);
1981 unlock_iovec(vec
, target_vec
, count
, !send
);
1983 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1987 /* do_accept() Must return target values and target errnos. */
1988 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1989 abi_ulong target_addrlen_addr
)
1995 if (target_addr
== 0)
1996 return get_errno(accept(fd
, NULL
, NULL
));
1998 /* linux returns EINVAL if addrlen pointer is invalid */
1999 if (get_user_u32(addrlen
, target_addrlen_addr
))
2000 return -TARGET_EINVAL
;
2002 if ((int)addrlen
< 0) {
2003 return -TARGET_EINVAL
;
2006 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2007 return -TARGET_EINVAL
;
2009 addr
= alloca(addrlen
);
2011 ret
= get_errno(accept(fd
, addr
, &addrlen
));
2012 if (!is_error(ret
)) {
2013 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2014 if (put_user_u32(addrlen
, target_addrlen_addr
))
2015 ret
= -TARGET_EFAULT
;
2020 /* do_getpeername() Must return target values and target errnos. */
2021 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
2022 abi_ulong target_addrlen_addr
)
2028 if (get_user_u32(addrlen
, target_addrlen_addr
))
2029 return -TARGET_EFAULT
;
2031 if ((int)addrlen
< 0) {
2032 return -TARGET_EINVAL
;
2035 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2036 return -TARGET_EFAULT
;
2038 addr
= alloca(addrlen
);
2040 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
2041 if (!is_error(ret
)) {
2042 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2043 if (put_user_u32(addrlen
, target_addrlen_addr
))
2044 ret
= -TARGET_EFAULT
;
2049 /* do_getsockname() Must return target values and target errnos. */
2050 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
2051 abi_ulong target_addrlen_addr
)
2057 if (get_user_u32(addrlen
, target_addrlen_addr
))
2058 return -TARGET_EFAULT
;
2060 if ((int)addrlen
< 0) {
2061 return -TARGET_EINVAL
;
2064 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
2065 return -TARGET_EFAULT
;
2067 addr
= alloca(addrlen
);
2069 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
2070 if (!is_error(ret
)) {
2071 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2072 if (put_user_u32(addrlen
, target_addrlen_addr
))
2073 ret
= -TARGET_EFAULT
;
2078 /* do_socketpair() Must return target values and target errnos. */
2079 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
2080 abi_ulong target_tab_addr
)
2085 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
2086 if (!is_error(ret
)) {
2087 if (put_user_s32(tab
[0], target_tab_addr
)
2088 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
2089 ret
= -TARGET_EFAULT
;
2094 /* do_sendto() Must return target values and target errnos. */
2095 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
2096 abi_ulong target_addr
, socklen_t addrlen
)
2102 if ((int)addrlen
< 0) {
2103 return -TARGET_EINVAL
;
2106 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2108 return -TARGET_EFAULT
;
2110 addr
= alloca(addrlen
);
2111 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2113 unlock_user(host_msg
, msg
, 0);
2116 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2118 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2120 unlock_user(host_msg
, msg
, 0);
2124 /* do_recvfrom() Must return target values and target errnos. */
2125 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2126 abi_ulong target_addr
,
2127 abi_ulong target_addrlen
)
2134 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2136 return -TARGET_EFAULT
;
2138 if (get_user_u32(addrlen
, target_addrlen
)) {
2139 ret
= -TARGET_EFAULT
;
2142 if ((int)addrlen
< 0) {
2143 ret
= -TARGET_EINVAL
;
2146 addr
= alloca(addrlen
);
2147 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2149 addr
= NULL
; /* To keep compiler quiet. */
2150 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2152 if (!is_error(ret
)) {
2154 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2155 if (put_user_u32(addrlen
, target_addrlen
)) {
2156 ret
= -TARGET_EFAULT
;
2160 unlock_user(host_msg
, msg
, len
);
2163 unlock_user(host_msg
, msg
, 0);
2168 #ifdef TARGET_NR_socketcall
2169 /* do_socketcall() Must return target values and target errnos. */
2170 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2173 const int n
= sizeof(abi_ulong
);
2178 abi_ulong domain
, type
, protocol
;
2180 if (get_user_ual(domain
, vptr
)
2181 || get_user_ual(type
, vptr
+ n
)
2182 || get_user_ual(protocol
, vptr
+ 2 * n
))
2183 return -TARGET_EFAULT
;
2185 ret
= do_socket(domain
, type
, protocol
);
2191 abi_ulong target_addr
;
2194 if (get_user_ual(sockfd
, vptr
)
2195 || get_user_ual(target_addr
, vptr
+ n
)
2196 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2197 return -TARGET_EFAULT
;
2199 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2202 case SOCKOP_connect
:
2205 abi_ulong target_addr
;
2208 if (get_user_ual(sockfd
, vptr
)
2209 || get_user_ual(target_addr
, vptr
+ n
)
2210 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2211 return -TARGET_EFAULT
;
2213 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2218 abi_ulong sockfd
, backlog
;
2220 if (get_user_ual(sockfd
, vptr
)
2221 || get_user_ual(backlog
, vptr
+ n
))
2222 return -TARGET_EFAULT
;
2224 ret
= get_errno(listen(sockfd
, backlog
));
2230 abi_ulong target_addr
, target_addrlen
;
2232 if (get_user_ual(sockfd
, vptr
)
2233 || get_user_ual(target_addr
, vptr
+ n
)
2234 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2235 return -TARGET_EFAULT
;
2237 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2240 case SOCKOP_getsockname
:
2243 abi_ulong target_addr
, target_addrlen
;
2245 if (get_user_ual(sockfd
, vptr
)
2246 || get_user_ual(target_addr
, vptr
+ n
)
2247 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2248 return -TARGET_EFAULT
;
2250 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2253 case SOCKOP_getpeername
:
2256 abi_ulong target_addr
, target_addrlen
;
2258 if (get_user_ual(sockfd
, vptr
)
2259 || get_user_ual(target_addr
, vptr
+ n
)
2260 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2261 return -TARGET_EFAULT
;
2263 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2266 case SOCKOP_socketpair
:
2268 abi_ulong domain
, type
, protocol
;
2271 if (get_user_ual(domain
, vptr
)
2272 || get_user_ual(type
, vptr
+ n
)
2273 || get_user_ual(protocol
, vptr
+ 2 * n
)
2274 || get_user_ual(tab
, vptr
+ 3 * n
))
2275 return -TARGET_EFAULT
;
2277 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2287 if (get_user_ual(sockfd
, vptr
)
2288 || get_user_ual(msg
, vptr
+ n
)
2289 || get_user_ual(len
, vptr
+ 2 * n
)
2290 || get_user_ual(flags
, vptr
+ 3 * n
))
2291 return -TARGET_EFAULT
;
2293 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2303 if (get_user_ual(sockfd
, vptr
)
2304 || get_user_ual(msg
, vptr
+ n
)
2305 || get_user_ual(len
, vptr
+ 2 * n
)
2306 || get_user_ual(flags
, vptr
+ 3 * n
))
2307 return -TARGET_EFAULT
;
2309 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2321 if (get_user_ual(sockfd
, vptr
)
2322 || get_user_ual(msg
, vptr
+ n
)
2323 || get_user_ual(len
, vptr
+ 2 * n
)
2324 || get_user_ual(flags
, vptr
+ 3 * n
)
2325 || get_user_ual(addr
, vptr
+ 4 * n
)
2326 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2327 return -TARGET_EFAULT
;
2329 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2332 case SOCKOP_recvfrom
:
2341 if (get_user_ual(sockfd
, vptr
)
2342 || get_user_ual(msg
, vptr
+ n
)
2343 || get_user_ual(len
, vptr
+ 2 * n
)
2344 || get_user_ual(flags
, vptr
+ 3 * n
)
2345 || get_user_ual(addr
, vptr
+ 4 * n
)
2346 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2347 return -TARGET_EFAULT
;
2349 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2352 case SOCKOP_shutdown
:
2354 abi_ulong sockfd
, how
;
2356 if (get_user_ual(sockfd
, vptr
)
2357 || get_user_ual(how
, vptr
+ n
))
2358 return -TARGET_EFAULT
;
2360 ret
= get_errno(shutdown(sockfd
, how
));
2363 case SOCKOP_sendmsg
:
2364 case SOCKOP_recvmsg
:
2367 abi_ulong target_msg
;
2370 if (get_user_ual(fd
, vptr
)
2371 || get_user_ual(target_msg
, vptr
+ n
)
2372 || get_user_ual(flags
, vptr
+ 2 * n
))
2373 return -TARGET_EFAULT
;
2375 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2376 (num
== SOCKOP_sendmsg
));
2379 case SOCKOP_setsockopt
:
2387 if (get_user_ual(sockfd
, vptr
)
2388 || get_user_ual(level
, vptr
+ n
)
2389 || get_user_ual(optname
, vptr
+ 2 * n
)
2390 || get_user_ual(optval
, vptr
+ 3 * n
)
2391 || get_user_ual(optlen
, vptr
+ 4 * n
))
2392 return -TARGET_EFAULT
;
2394 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2397 case SOCKOP_getsockopt
:
2405 if (get_user_ual(sockfd
, vptr
)
2406 || get_user_ual(level
, vptr
+ n
)
2407 || get_user_ual(optname
, vptr
+ 2 * n
)
2408 || get_user_ual(optval
, vptr
+ 3 * n
)
2409 || get_user_ual(optlen
, vptr
+ 4 * n
))
2410 return -TARGET_EFAULT
;
2412 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2416 gemu_log("Unsupported socketcall: %d\n", num
);
2417 ret
= -TARGET_ENOSYS
;
2424 #define N_SHM_REGIONS 32
2426 static struct shm_region
{
2429 } shm_regions
[N_SHM_REGIONS
];
2431 struct target_ipc_perm
2438 unsigned short int mode
;
2439 unsigned short int __pad1
;
2440 unsigned short int __seq
;
2441 unsigned short int __pad2
;
2442 abi_ulong __unused1
;
2443 abi_ulong __unused2
;
2446 struct target_semid_ds
2448 struct target_ipc_perm sem_perm
;
2449 abi_ulong sem_otime
;
2450 abi_ulong __unused1
;
2451 abi_ulong sem_ctime
;
2452 abi_ulong __unused2
;
2453 abi_ulong sem_nsems
;
2454 abi_ulong __unused3
;
2455 abi_ulong __unused4
;
2458 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2459 abi_ulong target_addr
)
2461 struct target_ipc_perm
*target_ip
;
2462 struct target_semid_ds
*target_sd
;
2464 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2465 return -TARGET_EFAULT
;
2466 target_ip
= &(target_sd
->sem_perm
);
2467 host_ip
->__key
= tswapal(target_ip
->__key
);
2468 host_ip
->uid
= tswapal(target_ip
->uid
);
2469 host_ip
->gid
= tswapal(target_ip
->gid
);
2470 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2471 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2472 host_ip
->mode
= tswap16(target_ip
->mode
);
2473 unlock_user_struct(target_sd
, target_addr
, 0);
2477 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2478 struct ipc_perm
*host_ip
)
2480 struct target_ipc_perm
*target_ip
;
2481 struct target_semid_ds
*target_sd
;
2483 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2484 return -TARGET_EFAULT
;
2485 target_ip
= &(target_sd
->sem_perm
);
2486 target_ip
->__key
= tswapal(host_ip
->__key
);
2487 target_ip
->uid
= tswapal(host_ip
->uid
);
2488 target_ip
->gid
= tswapal(host_ip
->gid
);
2489 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2490 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2491 target_ip
->mode
= tswap16(host_ip
->mode
);
2492 unlock_user_struct(target_sd
, target_addr
, 1);
2496 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2497 abi_ulong target_addr
)
2499 struct target_semid_ds
*target_sd
;
2501 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2502 return -TARGET_EFAULT
;
2503 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2504 return -TARGET_EFAULT
;
2505 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2506 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2507 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2508 unlock_user_struct(target_sd
, target_addr
, 0);
2512 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2513 struct semid_ds
*host_sd
)
2515 struct target_semid_ds
*target_sd
;
2517 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2518 return -TARGET_EFAULT
;
2519 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2520 return -TARGET_EFAULT
;
2521 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2522 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2523 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2524 unlock_user_struct(target_sd
, target_addr
, 1);
2528 struct target_seminfo
{
2541 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2542 struct seminfo
*host_seminfo
)
2544 struct target_seminfo
*target_seminfo
;
2545 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2546 return -TARGET_EFAULT
;
2547 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2548 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2549 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2550 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2551 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2552 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2553 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2554 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2555 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2556 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2557 unlock_user_struct(target_seminfo
, target_addr
, 1);
2563 struct semid_ds
*buf
;
2564 unsigned short *array
;
2565 struct seminfo
*__buf
;
2568 union target_semun
{
2575 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2576 abi_ulong target_addr
)
2579 unsigned short *array
;
2581 struct semid_ds semid_ds
;
2584 semun
.buf
= &semid_ds
;
2586 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2588 return get_errno(ret
);
2590 nsems
= semid_ds
.sem_nsems
;
2592 *host_array
= malloc(nsems
*sizeof(unsigned short));
2593 array
= lock_user(VERIFY_READ
, target_addr
,
2594 nsems
*sizeof(unsigned short), 1);
2596 return -TARGET_EFAULT
;
2598 for(i
=0; i
<nsems
; i
++) {
2599 __get_user((*host_array
)[i
], &array
[i
]);
2601 unlock_user(array
, target_addr
, 0);
2606 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2607 unsigned short **host_array
)
2610 unsigned short *array
;
2612 struct semid_ds semid_ds
;
2615 semun
.buf
= &semid_ds
;
2617 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2619 return get_errno(ret
);
2621 nsems
= semid_ds
.sem_nsems
;
2623 array
= lock_user(VERIFY_WRITE
, target_addr
,
2624 nsems
*sizeof(unsigned short), 0);
2626 return -TARGET_EFAULT
;
2628 for(i
=0; i
<nsems
; i
++) {
2629 __put_user((*host_array
)[i
], &array
[i
]);
2632 unlock_user(array
, target_addr
, 1);
2637 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2638 union target_semun target_su
)
2641 struct semid_ds dsarg
;
2642 unsigned short *array
= NULL
;
2643 struct seminfo seminfo
;
2644 abi_long ret
= -TARGET_EINVAL
;
2651 arg
.val
= tswap32(target_su
.val
);
2652 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2653 target_su
.val
= tswap32(arg
.val
);
2657 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2661 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2662 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2669 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2673 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2674 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2680 arg
.__buf
= &seminfo
;
2681 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2682 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2690 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2697 struct target_sembuf
{
2698 unsigned short sem_num
;
2703 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2704 abi_ulong target_addr
,
2707 struct target_sembuf
*target_sembuf
;
2710 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2711 nsops
*sizeof(struct target_sembuf
), 1);
2713 return -TARGET_EFAULT
;
2715 for(i
=0; i
<nsops
; i
++) {
2716 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2717 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2718 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2721 unlock_user(target_sembuf
, target_addr
, 0);
2726 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2728 struct sembuf sops
[nsops
];
2730 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2731 return -TARGET_EFAULT
;
2733 return semop(semid
, sops
, nsops
);
2736 struct target_msqid_ds
2738 struct target_ipc_perm msg_perm
;
2739 abi_ulong msg_stime
;
2740 #if TARGET_ABI_BITS == 32
2741 abi_ulong __unused1
;
2743 abi_ulong msg_rtime
;
2744 #if TARGET_ABI_BITS == 32
2745 abi_ulong __unused2
;
2747 abi_ulong msg_ctime
;
2748 #if TARGET_ABI_BITS == 32
2749 abi_ulong __unused3
;
2751 abi_ulong __msg_cbytes
;
2753 abi_ulong msg_qbytes
;
2754 abi_ulong msg_lspid
;
2755 abi_ulong msg_lrpid
;
2756 abi_ulong __unused4
;
2757 abi_ulong __unused5
;
2760 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2761 abi_ulong target_addr
)
2763 struct target_msqid_ds
*target_md
;
2765 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2766 return -TARGET_EFAULT
;
2767 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2768 return -TARGET_EFAULT
;
2769 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2770 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2771 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2772 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2773 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2774 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2775 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2776 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2777 unlock_user_struct(target_md
, target_addr
, 0);
2781 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2782 struct msqid_ds
*host_md
)
2784 struct target_msqid_ds
*target_md
;
2786 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2787 return -TARGET_EFAULT
;
2788 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2789 return -TARGET_EFAULT
;
2790 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2791 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2792 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2793 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2794 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2795 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2796 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2797 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2798 unlock_user_struct(target_md
, target_addr
, 1);
2802 struct target_msginfo
{
2810 unsigned short int msgseg
;
2813 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2814 struct msginfo
*host_msginfo
)
2816 struct target_msginfo
*target_msginfo
;
2817 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2818 return -TARGET_EFAULT
;
2819 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2820 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2821 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2822 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2823 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2824 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2825 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2826 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2827 unlock_user_struct(target_msginfo
, target_addr
, 1);
2831 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2833 struct msqid_ds dsarg
;
2834 struct msginfo msginfo
;
2835 abi_long ret
= -TARGET_EINVAL
;
2843 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2844 return -TARGET_EFAULT
;
2845 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2846 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2847 return -TARGET_EFAULT
;
2850 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2854 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2855 if (host_to_target_msginfo(ptr
, &msginfo
))
2856 return -TARGET_EFAULT
;
2863 struct target_msgbuf
{
2868 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2869 unsigned int msgsz
, int msgflg
)
2871 struct target_msgbuf
*target_mb
;
2872 struct msgbuf
*host_mb
;
2875 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2876 return -TARGET_EFAULT
;
2877 host_mb
= malloc(msgsz
+sizeof(long));
2878 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2879 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2880 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2882 unlock_user_struct(target_mb
, msgp
, 0);
2887 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2888 unsigned int msgsz
, abi_long msgtyp
,
2891 struct target_msgbuf
*target_mb
;
2893 struct msgbuf
*host_mb
;
2896 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2897 return -TARGET_EFAULT
;
2899 host_mb
= g_malloc(msgsz
+sizeof(long));
2900 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2903 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2904 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2905 if (!target_mtext
) {
2906 ret
= -TARGET_EFAULT
;
2909 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2910 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2913 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2917 unlock_user_struct(target_mb
, msgp
, 1);
2922 struct target_shmid_ds
2924 struct target_ipc_perm shm_perm
;
2925 abi_ulong shm_segsz
;
2926 abi_ulong shm_atime
;
2927 #if TARGET_ABI_BITS == 32
2928 abi_ulong __unused1
;
2930 abi_ulong shm_dtime
;
2931 #if TARGET_ABI_BITS == 32
2932 abi_ulong __unused2
;
2934 abi_ulong shm_ctime
;
2935 #if TARGET_ABI_BITS == 32
2936 abi_ulong __unused3
;
2940 abi_ulong shm_nattch
;
2941 unsigned long int __unused4
;
2942 unsigned long int __unused5
;
2945 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2946 abi_ulong target_addr
)
2948 struct target_shmid_ds
*target_sd
;
2950 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2951 return -TARGET_EFAULT
;
2952 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2953 return -TARGET_EFAULT
;
2954 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2955 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2956 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2957 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2958 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2959 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2960 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2961 unlock_user_struct(target_sd
, target_addr
, 0);
2965 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2966 struct shmid_ds
*host_sd
)
2968 struct target_shmid_ds
*target_sd
;
2970 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2971 return -TARGET_EFAULT
;
2972 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2973 return -TARGET_EFAULT
;
2974 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2975 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2976 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2977 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2978 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2979 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2980 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2981 unlock_user_struct(target_sd
, target_addr
, 1);
2985 struct target_shminfo
{
2993 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2994 struct shminfo
*host_shminfo
)
2996 struct target_shminfo
*target_shminfo
;
2997 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2998 return -TARGET_EFAULT
;
2999 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
3000 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
3001 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
3002 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
3003 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
3004 unlock_user_struct(target_shminfo
, target_addr
, 1);
3008 struct target_shm_info
{
3013 abi_ulong swap_attempts
;
3014 abi_ulong swap_successes
;
3017 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
3018 struct shm_info
*host_shm_info
)
3020 struct target_shm_info
*target_shm_info
;
3021 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
3022 return -TARGET_EFAULT
;
3023 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
3024 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
3025 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
3026 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
3027 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
3028 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
3029 unlock_user_struct(target_shm_info
, target_addr
, 1);
3033 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
3035 struct shmid_ds dsarg
;
3036 struct shminfo shminfo
;
3037 struct shm_info shm_info
;
3038 abi_long ret
= -TARGET_EINVAL
;
3046 if (target_to_host_shmid_ds(&dsarg
, buf
))
3047 return -TARGET_EFAULT
;
3048 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
3049 if (host_to_target_shmid_ds(buf
, &dsarg
))
3050 return -TARGET_EFAULT
;
3053 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
3054 if (host_to_target_shminfo(buf
, &shminfo
))
3055 return -TARGET_EFAULT
;
3058 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
3059 if (host_to_target_shm_info(buf
, &shm_info
))
3060 return -TARGET_EFAULT
;
3065 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
3072 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
3076 struct shmid_ds shm_info
;
3079 /* find out the length of the shared memory segment */
3080 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
3081 if (is_error(ret
)) {
3082 /* can't get length, bail out */
3089 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
3091 abi_ulong mmap_start
;
3093 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
3095 if (mmap_start
== -1) {
3097 host_raddr
= (void *)-1;
3099 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
3102 if (host_raddr
== (void *)-1) {
3104 return get_errno((long)host_raddr
);
3106 raddr
=h2g((unsigned long)host_raddr
);
3108 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3109 PAGE_VALID
| PAGE_READ
|
3110 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3112 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3113 if (shm_regions
[i
].start
== 0) {
3114 shm_regions
[i
].start
= raddr
;
3115 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3125 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3129 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3130 if (shm_regions
[i
].start
== shmaddr
) {
3131 shm_regions
[i
].start
= 0;
3132 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3137 return get_errno(shmdt(g2h(shmaddr
)));
3140 #ifdef TARGET_NR_ipc
3141 /* ??? This only works with linear mappings. */
3142 /* do_ipc() must return target values and target errnos. */
3143 static abi_long
do_ipc(unsigned int call
, int first
,
3144 int second
, int third
,
3145 abi_long ptr
, abi_long fifth
)
3150 version
= call
>> 16;
3155 ret
= do_semop(first
, ptr
, second
);
3159 ret
= get_errno(semget(first
, second
, third
));
3163 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3167 ret
= get_errno(msgget(first
, second
));
3171 ret
= do_msgsnd(first
, ptr
, second
, third
);
3175 ret
= do_msgctl(first
, second
, ptr
);
3182 struct target_ipc_kludge
{
3187 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3188 ret
= -TARGET_EFAULT
;
3192 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3194 unlock_user_struct(tmp
, ptr
, 0);
3198 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3207 raddr
= do_shmat(first
, ptr
, second
);
3208 if (is_error(raddr
))
3209 return get_errno(raddr
);
3210 if (put_user_ual(raddr
, third
))
3211 return -TARGET_EFAULT
;
3215 ret
= -TARGET_EINVAL
;
3220 ret
= do_shmdt(ptr
);
3224 /* IPC_* flag values are the same on all linux platforms */
3225 ret
= get_errno(shmget(first
, second
, third
));
3228 /* IPC_* and SHM_* command values are the same on all linux platforms */
3230 ret
= do_shmctl(first
, second
, third
);
3233 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3234 ret
= -TARGET_ENOSYS
;
3241 /* kernel structure types definitions */
3243 #define STRUCT(name, ...) STRUCT_ ## name,
3244 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3246 #include "syscall_types.h"
3249 #undef STRUCT_SPECIAL
3251 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3252 #define STRUCT_SPECIAL(name)
3253 #include "syscall_types.h"
3255 #undef STRUCT_SPECIAL
3257 typedef struct IOCTLEntry IOCTLEntry
;
3259 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3260 int fd
, abi_long cmd
, abi_long arg
);
3263 unsigned int target_cmd
;
3264 unsigned int host_cmd
;
3267 do_ioctl_fn
*do_ioctl
;
3268 const argtype arg_type
[5];
3271 #define IOC_R 0x0001
3272 #define IOC_W 0x0002
3273 #define IOC_RW (IOC_R | IOC_W)
3275 #define MAX_STRUCT_SIZE 4096
3277 #ifdef CONFIG_FIEMAP
3278 /* So fiemap access checks don't overflow on 32 bit systems.
3279 * This is very slightly smaller than the limit imposed by
3280 * the underlying kernel.
3282 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3283 / sizeof(struct fiemap_extent))
3285 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3286 int fd
, abi_long cmd
, abi_long arg
)
3288 /* The parameter for this ioctl is a struct fiemap followed
3289 * by an array of struct fiemap_extent whose size is set
3290 * in fiemap->fm_extent_count. The array is filled in by the
3293 int target_size_in
, target_size_out
;
3295 const argtype
*arg_type
= ie
->arg_type
;
3296 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3299 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3303 assert(arg_type
[0] == TYPE_PTR
);
3304 assert(ie
->access
== IOC_RW
);
3306 target_size_in
= thunk_type_size(arg_type
, 0);
3307 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3309 return -TARGET_EFAULT
;
3311 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3312 unlock_user(argptr
, arg
, 0);
3313 fm
= (struct fiemap
*)buf_temp
;
3314 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3315 return -TARGET_EINVAL
;
3318 outbufsz
= sizeof (*fm
) +
3319 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3321 if (outbufsz
> MAX_STRUCT_SIZE
) {
3322 /* We can't fit all the extents into the fixed size buffer.
3323 * Allocate one that is large enough and use it instead.
3325 fm
= malloc(outbufsz
);
3327 return -TARGET_ENOMEM
;
3329 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3332 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3333 if (!is_error(ret
)) {
3334 target_size_out
= target_size_in
;
3335 /* An extent_count of 0 means we were only counting the extents
3336 * so there are no structs to copy
3338 if (fm
->fm_extent_count
!= 0) {
3339 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3341 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3343 ret
= -TARGET_EFAULT
;
3345 /* Convert the struct fiemap */
3346 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3347 if (fm
->fm_extent_count
!= 0) {
3348 p
= argptr
+ target_size_in
;
3349 /* ...and then all the struct fiemap_extents */
3350 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3351 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3356 unlock_user(argptr
, arg
, target_size_out
);
3366 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3367 int fd
, abi_long cmd
, abi_long arg
)
3369 const argtype
*arg_type
= ie
->arg_type
;
3373 struct ifconf
*host_ifconf
;
3375 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3376 int target_ifreq_size
;
3381 abi_long target_ifc_buf
;
3385 assert(arg_type
[0] == TYPE_PTR
);
3386 assert(ie
->access
== IOC_RW
);
3389 target_size
= thunk_type_size(arg_type
, 0);
3391 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3393 return -TARGET_EFAULT
;
3394 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3395 unlock_user(argptr
, arg
, 0);
3397 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3398 target_ifc_len
= host_ifconf
->ifc_len
;
3399 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3401 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3402 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3403 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3405 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3406 if (outbufsz
> MAX_STRUCT_SIZE
) {
3407 /* We can't fit all the extents into the fixed size buffer.
3408 * Allocate one that is large enough and use it instead.
3410 host_ifconf
= malloc(outbufsz
);
3412 return -TARGET_ENOMEM
;
3414 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3417 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3419 host_ifconf
->ifc_len
= host_ifc_len
;
3420 host_ifconf
->ifc_buf
= host_ifc_buf
;
3422 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3423 if (!is_error(ret
)) {
3424 /* convert host ifc_len to target ifc_len */
3426 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3427 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3428 host_ifconf
->ifc_len
= target_ifc_len
;
3430 /* restore target ifc_buf */
3432 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3434 /* copy struct ifconf to target user */
3436 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3438 return -TARGET_EFAULT
;
3439 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3440 unlock_user(argptr
, arg
, target_size
);
3442 /* copy ifreq[] to target user */
3444 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3445 for (i
= 0; i
< nb_ifreq
; i
++) {
3446 thunk_convert(argptr
+ i
* target_ifreq_size
,
3447 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3448 ifreq_arg_type
, THUNK_TARGET
);
3450 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3460 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
3461 abi_long cmd
, abi_long arg
)
3464 struct dm_ioctl
*host_dm
;
3465 abi_long guest_data
;
3466 uint32_t guest_data_size
;
3468 const argtype
*arg_type
= ie
->arg_type
;
3470 void *big_buf
= NULL
;
3474 target_size
= thunk_type_size(arg_type
, 0);
3475 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3477 ret
= -TARGET_EFAULT
;
3480 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3481 unlock_user(argptr
, arg
, 0);
3483 /* buf_temp is too small, so fetch things into a bigger buffer */
3484 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
3485 memcpy(big_buf
, buf_temp
, target_size
);
3489 guest_data
= arg
+ host_dm
->data_start
;
3490 if ((guest_data
- arg
) < 0) {
3494 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3495 host_data
= (char*)host_dm
+ host_dm
->data_start
;
3497 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
3498 switch (ie
->host_cmd
) {
3500 case DM_LIST_DEVICES
:
3503 case DM_DEV_SUSPEND
:
3506 case DM_TABLE_STATUS
:
3507 case DM_TABLE_CLEAR
:
3509 case DM_LIST_VERSIONS
:
3513 case DM_DEV_SET_GEOMETRY
:
3514 /* data contains only strings */
3515 memcpy(host_data
, argptr
, guest_data_size
);
3518 memcpy(host_data
, argptr
, guest_data_size
);
3519 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
3523 void *gspec
= argptr
;
3524 void *cur_data
= host_data
;
3525 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3526 int spec_size
= thunk_type_size(arg_type
, 0);
3529 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3530 struct dm_target_spec
*spec
= cur_data
;
3534 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
3535 slen
= strlen((char*)gspec
+ spec_size
) + 1;
3537 spec
->next
= sizeof(*spec
) + slen
;
3538 strcpy((char*)&spec
[1], gspec
+ spec_size
);
3540 cur_data
+= spec
->next
;
3545 ret
= -TARGET_EINVAL
;
3548 unlock_user(argptr
, guest_data
, 0);
3550 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3551 if (!is_error(ret
)) {
3552 guest_data
= arg
+ host_dm
->data_start
;
3553 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
3554 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
3555 switch (ie
->host_cmd
) {
3560 case DM_DEV_SUSPEND
:
3563 case DM_TABLE_CLEAR
:
3565 case DM_DEV_SET_GEOMETRY
:
3566 /* no return data */
3568 case DM_LIST_DEVICES
:
3570 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
3571 uint32_t remaining_data
= guest_data_size
;
3572 void *cur_data
= argptr
;
3573 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
3574 int nl_size
= 12; /* can't use thunk_size due to alignment */
3577 uint32_t next
= nl
->next
;
3579 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
3581 if (remaining_data
< nl
->next
) {
3582 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3585 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
3586 strcpy(cur_data
+ nl_size
, nl
->name
);
3587 cur_data
+= nl
->next
;
3588 remaining_data
-= nl
->next
;
3592 nl
= (void*)nl
+ next
;
3597 case DM_TABLE_STATUS
:
3599 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
3600 void *cur_data
= argptr
;
3601 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
3602 int spec_size
= thunk_type_size(arg_type
, 0);
3605 for (i
= 0; i
< host_dm
->target_count
; i
++) {
3606 uint32_t next
= spec
->next
;
3607 int slen
= strlen((char*)&spec
[1]) + 1;
3608 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
3609 if (guest_data_size
< spec
->next
) {
3610 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3613 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
3614 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
3615 cur_data
= argptr
+ spec
->next
;
3616 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
3622 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
3623 int count
= *(uint32_t*)hdata
;
3624 uint64_t *hdev
= hdata
+ 8;
3625 uint64_t *gdev
= argptr
+ 8;
3628 *(uint32_t*)argptr
= tswap32(count
);
3629 for (i
= 0; i
< count
; i
++) {
3630 *gdev
= tswap64(*hdev
);
3636 case DM_LIST_VERSIONS
:
3638 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
3639 uint32_t remaining_data
= guest_data_size
;
3640 void *cur_data
= argptr
;
3641 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
3642 int vers_size
= thunk_type_size(arg_type
, 0);
3645 uint32_t next
= vers
->next
;
3647 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
3649 if (remaining_data
< vers
->next
) {
3650 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
3653 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
3654 strcpy(cur_data
+ vers_size
, vers
->name
);
3655 cur_data
+= vers
->next
;
3656 remaining_data
-= vers
->next
;
3660 vers
= (void*)vers
+ next
;
3665 ret
= -TARGET_EINVAL
;
3668 unlock_user(argptr
, guest_data
, guest_data_size
);
3670 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3672 ret
= -TARGET_EFAULT
;
3675 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3676 unlock_user(argptr
, arg
, target_size
);
3683 static IOCTLEntry ioctl_entries
[] = {
3684 #define IOCTL(cmd, access, ...) \
3685 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3686 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3687 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3692 /* ??? Implement proper locking for ioctls. */
3693 /* do_ioctl() Must return target values and target errnos. */
3694 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3696 const IOCTLEntry
*ie
;
3697 const argtype
*arg_type
;
3699 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3705 if (ie
->target_cmd
== 0) {
3706 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3707 return -TARGET_ENOSYS
;
3709 if (ie
->target_cmd
== cmd
)
3713 arg_type
= ie
->arg_type
;
3715 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3718 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3721 switch(arg_type
[0]) {
3724 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3729 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3733 target_size
= thunk_type_size(arg_type
, 0);
3734 switch(ie
->access
) {
3736 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3737 if (!is_error(ret
)) {
3738 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3740 return -TARGET_EFAULT
;
3741 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3742 unlock_user(argptr
, arg
, target_size
);
3746 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3748 return -TARGET_EFAULT
;
3749 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3750 unlock_user(argptr
, arg
, 0);
3751 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3755 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3757 return -TARGET_EFAULT
;
3758 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3759 unlock_user(argptr
, arg
, 0);
3760 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3761 if (!is_error(ret
)) {
3762 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3764 return -TARGET_EFAULT
;
3765 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3766 unlock_user(argptr
, arg
, target_size
);
3772 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3773 (long)cmd
, arg_type
[0]);
3774 ret
= -TARGET_ENOSYS
;
3780 static const bitmask_transtbl iflag_tbl
[] = {
3781 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3782 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3783 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3784 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3785 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3786 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3787 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3788 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3789 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3790 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3791 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3792 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3793 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3794 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3798 static const bitmask_transtbl oflag_tbl
[] = {
3799 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3800 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3801 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3802 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3803 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3804 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3805 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3806 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3807 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3808 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3809 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3810 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3811 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3812 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3813 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3814 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3815 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3816 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3817 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3818 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3819 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3820 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3821 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3822 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3826 static const bitmask_transtbl cflag_tbl
[] = {
3827 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3828 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3829 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3830 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3831 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3832 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3833 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3834 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3835 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3836 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3837 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3838 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3839 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3840 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3841 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3842 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3843 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3844 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3845 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3846 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3847 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3848 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3849 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3850 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3851 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3852 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3853 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3854 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3855 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3856 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3857 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3861 static const bitmask_transtbl lflag_tbl
[] = {
3862 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3863 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3864 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3865 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3866 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3867 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3868 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3869 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3870 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3871 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3872 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3873 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3874 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3875 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3876 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3880 static void target_to_host_termios (void *dst
, const void *src
)
3882 struct host_termios
*host
= dst
;
3883 const struct target_termios
*target
= src
;
3886 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3888 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3890 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3892 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3893 host
->c_line
= target
->c_line
;
3895 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3896 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3897 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3898 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3899 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3900 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3901 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3902 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3903 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3904 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3905 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3906 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3907 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3908 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3909 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3910 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3911 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3912 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3915 static void host_to_target_termios (void *dst
, const void *src
)
3917 struct target_termios
*target
= dst
;
3918 const struct host_termios
*host
= src
;
3921 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3923 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3925 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3927 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3928 target
->c_line
= host
->c_line
;
3930 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3931 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3932 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3933 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3934 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3935 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3936 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3937 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3938 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3939 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3940 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3941 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3942 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3943 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3944 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3945 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3946 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3947 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3950 static const StructEntry struct_termios_def
= {
3951 .convert
= { host_to_target_termios
, target_to_host_termios
},
3952 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3953 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3956 static bitmask_transtbl mmap_flags_tbl
[] = {
3957 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3958 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3959 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3960 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3961 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3962 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3963 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3964 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3968 #if defined(TARGET_I386)
3970 /* NOTE: there is really one LDT for all the threads */
3971 static uint8_t *ldt_table
;
3973 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3980 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3981 if (size
> bytecount
)
3983 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3985 return -TARGET_EFAULT
;
3986 /* ??? Should this by byteswapped? */
3987 memcpy(p
, ldt_table
, size
);
3988 unlock_user(p
, ptr
, size
);
3992 /* XXX: add locking support */
3993 static abi_long
write_ldt(CPUX86State
*env
,
3994 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3996 struct target_modify_ldt_ldt_s ldt_info
;
3997 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3998 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3999 int seg_not_present
, useable
, lm
;
4000 uint32_t *lp
, entry_1
, entry_2
;
4002 if (bytecount
!= sizeof(ldt_info
))
4003 return -TARGET_EINVAL
;
4004 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
4005 return -TARGET_EFAULT
;
4006 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4007 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4008 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4009 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4010 unlock_user_struct(target_ldt_info
, ptr
, 0);
4012 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
4013 return -TARGET_EINVAL
;
4014 seg_32bit
= ldt_info
.flags
& 1;
4015 contents
= (ldt_info
.flags
>> 1) & 3;
4016 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4017 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4018 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4019 useable
= (ldt_info
.flags
>> 6) & 1;
4023 lm
= (ldt_info
.flags
>> 7) & 1;
4025 if (contents
== 3) {
4027 return -TARGET_EINVAL
;
4028 if (seg_not_present
== 0)
4029 return -TARGET_EINVAL
;
4031 /* allocate the LDT */
4033 env
->ldt
.base
= target_mmap(0,
4034 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
4035 PROT_READ
|PROT_WRITE
,
4036 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
4037 if (env
->ldt
.base
== -1)
4038 return -TARGET_ENOMEM
;
4039 memset(g2h(env
->ldt
.base
), 0,
4040 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
4041 env
->ldt
.limit
= 0xffff;
4042 ldt_table
= g2h(env
->ldt
.base
);
4045 /* NOTE: same code as Linux kernel */
4046 /* Allow LDTs to be cleared by the user. */
4047 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4050 read_exec_only
== 1 &&
4052 limit_in_pages
== 0 &&
4053 seg_not_present
== 1 &&
4061 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4062 (ldt_info
.limit
& 0x0ffff);
4063 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4064 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4065 (ldt_info
.limit
& 0xf0000) |
4066 ((read_exec_only
^ 1) << 9) |
4068 ((seg_not_present
^ 1) << 15) |
4070 (limit_in_pages
<< 23) |
4074 entry_2
|= (useable
<< 20);
4076 /* Install the new entry ... */
4078 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
4079 lp
[0] = tswap32(entry_1
);
4080 lp
[1] = tswap32(entry_2
);
4084 /* specific and weird i386 syscalls */
4085 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
4086 unsigned long bytecount
)
4092 ret
= read_ldt(ptr
, bytecount
);
4095 ret
= write_ldt(env
, ptr
, bytecount
, 1);
4098 ret
= write_ldt(env
, ptr
, bytecount
, 0);
4101 ret
= -TARGET_ENOSYS
;
4107 #if defined(TARGET_I386) && defined(TARGET_ABI32)
4108 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4110 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4111 struct target_modify_ldt_ldt_s ldt_info
;
4112 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4113 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
4114 int seg_not_present
, useable
, lm
;
4115 uint32_t *lp
, entry_1
, entry_2
;
4118 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4119 if (!target_ldt_info
)
4120 return -TARGET_EFAULT
;
4121 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
4122 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
4123 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
4124 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
4125 if (ldt_info
.entry_number
== -1) {
4126 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
4127 if (gdt_table
[i
] == 0) {
4128 ldt_info
.entry_number
= i
;
4129 target_ldt_info
->entry_number
= tswap32(i
);
4134 unlock_user_struct(target_ldt_info
, ptr
, 1);
4136 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
4137 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
4138 return -TARGET_EINVAL
;
4139 seg_32bit
= ldt_info
.flags
& 1;
4140 contents
= (ldt_info
.flags
>> 1) & 3;
4141 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
4142 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
4143 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
4144 useable
= (ldt_info
.flags
>> 6) & 1;
4148 lm
= (ldt_info
.flags
>> 7) & 1;
4151 if (contents
== 3) {
4152 if (seg_not_present
== 0)
4153 return -TARGET_EINVAL
;
4156 /* NOTE: same code as Linux kernel */
4157 /* Allow LDTs to be cleared by the user. */
4158 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
4159 if ((contents
== 0 &&
4160 read_exec_only
== 1 &&
4162 limit_in_pages
== 0 &&
4163 seg_not_present
== 1 &&
4171 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
4172 (ldt_info
.limit
& 0x0ffff);
4173 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
4174 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
4175 (ldt_info
.limit
& 0xf0000) |
4176 ((read_exec_only
^ 1) << 9) |
4178 ((seg_not_present
^ 1) << 15) |
4180 (limit_in_pages
<< 23) |
4185 /* Install the new entry ... */
4187 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
4188 lp
[0] = tswap32(entry_1
);
4189 lp
[1] = tswap32(entry_2
);
4193 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
4195 struct target_modify_ldt_ldt_s
*target_ldt_info
;
4196 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
4197 uint32_t base_addr
, limit
, flags
;
4198 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
4199 int seg_not_present
, useable
, lm
;
4200 uint32_t *lp
, entry_1
, entry_2
;
4202 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
4203 if (!target_ldt_info
)
4204 return -TARGET_EFAULT
;
4205 idx
= tswap32(target_ldt_info
->entry_number
);
4206 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
4207 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
4208 unlock_user_struct(target_ldt_info
, ptr
, 1);
4209 return -TARGET_EINVAL
;
4211 lp
= (uint32_t *)(gdt_table
+ idx
);
4212 entry_1
= tswap32(lp
[0]);
4213 entry_2
= tswap32(lp
[1]);
4215 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
4216 contents
= (entry_2
>> 10) & 3;
4217 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
4218 seg_32bit
= (entry_2
>> 22) & 1;
4219 limit_in_pages
= (entry_2
>> 23) & 1;
4220 useable
= (entry_2
>> 20) & 1;
4224 lm
= (entry_2
>> 21) & 1;
4226 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
4227 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
4228 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
4229 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
4230 base_addr
= (entry_1
>> 16) |
4231 (entry_2
& 0xff000000) |
4232 ((entry_2
& 0xff) << 16);
4233 target_ldt_info
->base_addr
= tswapal(base_addr
);
4234 target_ldt_info
->limit
= tswap32(limit
);
4235 target_ldt_info
->flags
= tswap32(flags
);
4236 unlock_user_struct(target_ldt_info
, ptr
, 1);
4239 #endif /* TARGET_I386 && TARGET_ABI32 */
4241 #ifndef TARGET_ABI32
4242 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
4249 case TARGET_ARCH_SET_GS
:
4250 case TARGET_ARCH_SET_FS
:
4251 if (code
== TARGET_ARCH_SET_GS
)
4255 cpu_x86_load_seg(env
, idx
, 0);
4256 env
->segs
[idx
].base
= addr
;
4258 case TARGET_ARCH_GET_GS
:
4259 case TARGET_ARCH_GET_FS
:
4260 if (code
== TARGET_ARCH_GET_GS
)
4264 val
= env
->segs
[idx
].base
;
4265 if (put_user(val
, addr
, abi_ulong
))
4266 ret
= -TARGET_EFAULT
;
4269 ret
= -TARGET_EINVAL
;
4276 #endif /* defined(TARGET_I386) */
4278 #define NEW_STACK_SIZE 0x40000
4280 #if defined(CONFIG_USE_NPTL)
4282 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
4285 pthread_mutex_t mutex
;
4286 pthread_cond_t cond
;
4289 abi_ulong child_tidptr
;
4290 abi_ulong parent_tidptr
;
4294 static void *clone_func(void *arg
)
4296 new_thread_info
*info
= arg
;
4302 ts
= (TaskState
*)thread_env
->opaque
;
4303 info
->tid
= gettid();
4304 env
->host_tid
= info
->tid
;
4306 if (info
->child_tidptr
)
4307 put_user_u32(info
->tid
, info
->child_tidptr
);
4308 if (info
->parent_tidptr
)
4309 put_user_u32(info
->tid
, info
->parent_tidptr
);
4310 /* Enable signals. */
4311 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
4312 /* Signal to the parent that we're ready. */
4313 pthread_mutex_lock(&info
->mutex
);
4314 pthread_cond_broadcast(&info
->cond
);
4315 pthread_mutex_unlock(&info
->mutex
);
4316 /* Wait until the parent has finshed initializing the tls state. */
4317 pthread_mutex_lock(&clone_lock
);
4318 pthread_mutex_unlock(&clone_lock
);
4325 static int clone_func(void *arg
)
4327 CPUArchState
*env
= arg
;
4334 /* do_fork() Must return host values and target errnos (unlike most
4335 do_*() functions). */
4336 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
4337 abi_ulong parent_tidptr
, target_ulong newtls
,
4338 abi_ulong child_tidptr
)
4342 CPUArchState
*new_env
;
4343 #if defined(CONFIG_USE_NPTL)
4344 unsigned int nptl_flags
;
4350 /* Emulate vfork() with fork() */
4351 if (flags
& CLONE_VFORK
)
4352 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4354 if (flags
& CLONE_VM
) {
4355 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4356 #if defined(CONFIG_USE_NPTL)
4357 new_thread_info info
;
4358 pthread_attr_t attr
;
4360 ts
= g_malloc0(sizeof(TaskState
));
4361 init_task_state(ts
);
4362 /* we create a new CPU instance. */
4363 new_env
= cpu_copy(env
);
4364 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4365 cpu_reset(ENV_GET_CPU(new_env
));
4367 /* Init regs that differ from the parent. */
4368 cpu_clone_regs(new_env
, newsp
);
4369 new_env
->opaque
= ts
;
4370 ts
->bprm
= parent_ts
->bprm
;
4371 ts
->info
= parent_ts
->info
;
4372 #if defined(CONFIG_USE_NPTL)
4374 flags
&= ~CLONE_NPTL_FLAGS2
;
4376 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4377 ts
->child_tidptr
= child_tidptr
;
4380 if (nptl_flags
& CLONE_SETTLS
)
4381 cpu_set_tls (new_env
, newtls
);
4383 /* Grab a mutex so that thread setup appears atomic. */
4384 pthread_mutex_lock(&clone_lock
);
4386 memset(&info
, 0, sizeof(info
));
4387 pthread_mutex_init(&info
.mutex
, NULL
);
4388 pthread_mutex_lock(&info
.mutex
);
4389 pthread_cond_init(&info
.cond
, NULL
);
4391 if (nptl_flags
& CLONE_CHILD_SETTID
)
4392 info
.child_tidptr
= child_tidptr
;
4393 if (nptl_flags
& CLONE_PARENT_SETTID
)
4394 info
.parent_tidptr
= parent_tidptr
;
4396 ret
= pthread_attr_init(&attr
);
4397 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4398 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4399 /* It is not safe to deliver signals until the child has finished
4400 initializing, so temporarily block all signals. */
4401 sigfillset(&sigmask
);
4402 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4404 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4405 /* TODO: Free new CPU state if thread creation failed. */
4407 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4408 pthread_attr_destroy(&attr
);
4410 /* Wait for the child to initialize. */
4411 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4413 if (flags
& CLONE_PARENT_SETTID
)
4414 put_user_u32(ret
, parent_tidptr
);
4418 pthread_mutex_unlock(&info
.mutex
);
4419 pthread_cond_destroy(&info
.cond
);
4420 pthread_mutex_destroy(&info
.mutex
);
4421 pthread_mutex_unlock(&clone_lock
);
4423 if (flags
& CLONE_NPTL_FLAGS2
)
4425 /* This is probably going to die very quickly, but do it anyway. */
4426 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4428 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4430 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4434 /* if no CLONE_VM, we consider it is a fork */
4435 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4440 /* Child Process. */
4441 cpu_clone_regs(env
, newsp
);
4443 #if defined(CONFIG_USE_NPTL)
4444 /* There is a race condition here. The parent process could
4445 theoretically read the TID in the child process before the child
4446 tid is set. This would require using either ptrace
4447 (not implemented) or having *_tidptr to point at a shared memory
4448 mapping. We can't repeat the spinlock hack used above because
4449 the child process gets its own copy of the lock. */
4450 if (flags
& CLONE_CHILD_SETTID
)
4451 put_user_u32(gettid(), child_tidptr
);
4452 if (flags
& CLONE_PARENT_SETTID
)
4453 put_user_u32(gettid(), parent_tidptr
);
4454 ts
= (TaskState
*)env
->opaque
;
4455 if (flags
& CLONE_SETTLS
)
4456 cpu_set_tls (env
, newtls
);
4457 if (flags
& CLONE_CHILD_CLEARTID
)
4458 ts
->child_tidptr
= child_tidptr
;
4467 /* warning : doesn't handle linux specific flags... */
4468 static int target_to_host_fcntl_cmd(int cmd
)
4471 case TARGET_F_DUPFD
:
4472 case TARGET_F_GETFD
:
4473 case TARGET_F_SETFD
:
4474 case TARGET_F_GETFL
:
4475 case TARGET_F_SETFL
:
4477 case TARGET_F_GETLK
:
4479 case TARGET_F_SETLK
:
4481 case TARGET_F_SETLKW
:
4483 case TARGET_F_GETOWN
:
4485 case TARGET_F_SETOWN
:
4487 case TARGET_F_GETSIG
:
4489 case TARGET_F_SETSIG
:
4491 #if TARGET_ABI_BITS == 32
4492 case TARGET_F_GETLK64
:
4494 case TARGET_F_SETLK64
:
4496 case TARGET_F_SETLKW64
:
4499 case TARGET_F_SETLEASE
:
4501 case TARGET_F_GETLEASE
:
4503 #ifdef F_DUPFD_CLOEXEC
4504 case TARGET_F_DUPFD_CLOEXEC
:
4505 return F_DUPFD_CLOEXEC
;
4507 case TARGET_F_NOTIFY
:
4510 return -TARGET_EINVAL
;
4512 return -TARGET_EINVAL
;
4515 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4518 struct target_flock
*target_fl
;
4519 struct flock64 fl64
;
4520 struct target_flock64
*target_fl64
;
4522 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4524 if (host_cmd
== -TARGET_EINVAL
)
4528 case TARGET_F_GETLK
:
4529 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4530 return -TARGET_EFAULT
;
4531 fl
.l_type
= tswap16(target_fl
->l_type
);
4532 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4533 fl
.l_start
= tswapal(target_fl
->l_start
);
4534 fl
.l_len
= tswapal(target_fl
->l_len
);
4535 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4536 unlock_user_struct(target_fl
, arg
, 0);
4537 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4539 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4540 return -TARGET_EFAULT
;
4541 target_fl
->l_type
= tswap16(fl
.l_type
);
4542 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4543 target_fl
->l_start
= tswapal(fl
.l_start
);
4544 target_fl
->l_len
= tswapal(fl
.l_len
);
4545 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4546 unlock_user_struct(target_fl
, arg
, 1);
4550 case TARGET_F_SETLK
:
4551 case TARGET_F_SETLKW
:
4552 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4553 return -TARGET_EFAULT
;
4554 fl
.l_type
= tswap16(target_fl
->l_type
);
4555 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4556 fl
.l_start
= tswapal(target_fl
->l_start
);
4557 fl
.l_len
= tswapal(target_fl
->l_len
);
4558 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4559 unlock_user_struct(target_fl
, arg
, 0);
4560 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4563 case TARGET_F_GETLK64
:
4564 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4565 return -TARGET_EFAULT
;
4566 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4567 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4568 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4569 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4570 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4571 unlock_user_struct(target_fl64
, arg
, 0);
4572 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4574 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4575 return -TARGET_EFAULT
;
4576 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4577 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4578 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4579 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4580 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4581 unlock_user_struct(target_fl64
, arg
, 1);
4584 case TARGET_F_SETLK64
:
4585 case TARGET_F_SETLKW64
:
4586 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4587 return -TARGET_EFAULT
;
4588 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4589 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4590 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4591 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4592 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4593 unlock_user_struct(target_fl64
, arg
, 0);
4594 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4597 case TARGET_F_GETFL
:
4598 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4600 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4604 case TARGET_F_SETFL
:
4605 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4608 case TARGET_F_SETOWN
:
4609 case TARGET_F_GETOWN
:
4610 case TARGET_F_SETSIG
:
4611 case TARGET_F_GETSIG
:
4612 case TARGET_F_SETLEASE
:
4613 case TARGET_F_GETLEASE
:
4614 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4618 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4626 static inline int high2lowuid(int uid
)
4634 static inline int high2lowgid(int gid
)
4642 static inline int low2highuid(int uid
)
4644 if ((int16_t)uid
== -1)
4650 static inline int low2highgid(int gid
)
4652 if ((int16_t)gid
== -1)
4657 static inline int tswapid(int id
)
4661 #else /* !USE_UID16 */
4662 static inline int high2lowuid(int uid
)
4666 static inline int high2lowgid(int gid
)
4670 static inline int low2highuid(int uid
)
4674 static inline int low2highgid(int gid
)
4678 static inline int tswapid(int id
)
4682 #endif /* USE_UID16 */
4684 void syscall_init(void)
4687 const argtype
*arg_type
;
4691 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4692 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4693 #include "syscall_types.h"
4695 #undef STRUCT_SPECIAL
4697 /* Build target_to_host_errno_table[] table from
4698 * host_to_target_errno_table[]. */
4699 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
4700 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4703 /* we patch the ioctl size if necessary. We rely on the fact that
4704 no ioctl has all the bits at '1' in the size field */
4706 while (ie
->target_cmd
!= 0) {
4707 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4708 TARGET_IOC_SIZEMASK
) {
4709 arg_type
= ie
->arg_type
;
4710 if (arg_type
[0] != TYPE_PTR
) {
4711 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4716 size
= thunk_type_size(arg_type
, 0);
4717 ie
->target_cmd
= (ie
->target_cmd
&
4718 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4719 (size
<< TARGET_IOC_SIZESHIFT
);
4722 /* automatic consistency check if same arch */
4723 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4724 (defined(__x86_64__) && defined(TARGET_X86_64))
4725 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4726 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4727 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4734 #if TARGET_ABI_BITS == 32
4735 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4737 #ifdef TARGET_WORDS_BIGENDIAN
4738 return ((uint64_t)word0
<< 32) | word1
;
4740 return ((uint64_t)word1
<< 32) | word0
;
4743 #else /* TARGET_ABI_BITS == 32 */
4744 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4748 #endif /* TARGET_ABI_BITS != 32 */
4750 #ifdef TARGET_NR_truncate64
4751 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4756 if (regpairs_aligned(cpu_env
)) {
4760 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4764 #ifdef TARGET_NR_ftruncate64
4765 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4770 if (regpairs_aligned(cpu_env
)) {
4774 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4778 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4779 abi_ulong target_addr
)
4781 struct target_timespec
*target_ts
;
4783 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4784 return -TARGET_EFAULT
;
4785 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4786 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4787 unlock_user_struct(target_ts
, target_addr
, 0);
4791 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4792 struct timespec
*host_ts
)
4794 struct target_timespec
*target_ts
;
4796 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4797 return -TARGET_EFAULT
;
4798 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4799 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4800 unlock_user_struct(target_ts
, target_addr
, 1);
4804 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4805 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4806 abi_ulong target_addr
,
4807 struct stat
*host_st
)
4810 if (((CPUARMState
*)cpu_env
)->eabi
) {
4811 struct target_eabi_stat64
*target_st
;
4813 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4814 return -TARGET_EFAULT
;
4815 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4816 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4817 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4818 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4819 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4821 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4822 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4823 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4824 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4825 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4826 __put_user(host_st
->st_size
, &target_st
->st_size
);
4827 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4828 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4829 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4830 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4831 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4832 unlock_user_struct(target_st
, target_addr
, 1);
4836 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4837 struct target_stat
*target_st
;
4839 struct target_stat64
*target_st
;
4842 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4843 return -TARGET_EFAULT
;
4844 memset(target_st
, 0, sizeof(*target_st
));
4845 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4846 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4847 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4848 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4850 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4851 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4852 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4853 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4854 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4855 /* XXX: better use of kernel struct */
4856 __put_user(host_st
->st_size
, &target_st
->st_size
);
4857 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4858 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4859 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4860 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4861 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4862 unlock_user_struct(target_st
, target_addr
, 1);
4869 #if defined(CONFIG_USE_NPTL)
4870 /* ??? Using host futex calls even when target atomic operations
4871 are not really atomic probably breaks things. However implementing
4872 futexes locally would make futexes shared between multiple processes
4873 tricky. However they're probably useless because guest atomic
4874 operations won't work either. */
4875 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4876 target_ulong uaddr2
, int val3
)
4878 struct timespec ts
, *pts
;
4881 /* ??? We assume FUTEX_* constants are the same on both host
4883 #ifdef FUTEX_CMD_MASK
4884 base_op
= op
& FUTEX_CMD_MASK
;
4892 target_to_host_timespec(pts
, timeout
);
4896 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4899 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4901 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4903 case FUTEX_CMP_REQUEUE
:
4905 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4906 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4907 But the prototype takes a `struct timespec *'; insert casts
4908 to satisfy the compiler. We do not need to tswap TIMEOUT
4909 since it's not compared to guest memory. */
4910 pts
= (struct timespec
*)(uintptr_t) timeout
;
4911 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4913 (base_op
== FUTEX_CMP_REQUEUE
4917 return -TARGET_ENOSYS
;
4922 /* Map host to target signal numbers for the wait family of syscalls.
4923 Assume all other status bits are the same. */
4924 int host_to_target_waitstatus(int status
)
4926 if (WIFSIGNALED(status
)) {
4927 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4929 if (WIFSTOPPED(status
)) {
4930 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4936 int get_osversion(void)
4938 static int osversion
;
4939 struct new_utsname buf
;
4944 if (qemu_uname_release
&& *qemu_uname_release
) {
4945 s
= qemu_uname_release
;
4947 if (sys_uname(&buf
))
4952 for (i
= 0; i
< 3; i
++) {
4954 while (*s
>= '0' && *s
<= '9') {
4959 tmp
= (tmp
<< 8) + n
;
4968 static int open_self_maps(void *cpu_env
, int fd
)
4970 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
4971 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
4978 fp
= fopen("/proc/self/maps", "r");
4983 while ((read
= getline(&line
, &len
, fp
)) != -1) {
4984 int fields
, dev_maj
, dev_min
, inode
;
4985 uint64_t min
, max
, offset
;
4986 char flag_r
, flag_w
, flag_x
, flag_p
;
4987 char path
[512] = "";
4988 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
4989 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
4990 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
4992 if ((fields
< 10) || (fields
> 11)) {
4995 if (!strncmp(path
, "[stack]", 7)) {
4998 if (h2g_valid(min
) && h2g_valid(max
)) {
4999 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
5000 " %c%c%c%c %08" PRIx64
" %02x:%02x %d%s%s\n",
5001 h2g(min
), h2g(max
), flag_r
, flag_w
,
5002 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
5003 path
[0] ? " " : "", path
);
5010 #if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
5011 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
5012 (unsigned long long)ts
->info
->stack_limit
,
5013 (unsigned long long)(ts
->info
->start_stack
+
5014 (TARGET_PAGE_SIZE
- 1)) & TARGET_PAGE_MASK
,
5015 (unsigned long long)0);
5021 static int open_self_stat(void *cpu_env
, int fd
)
5023 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5024 abi_ulong start_stack
= ts
->info
->start_stack
;
5027 for (i
= 0; i
< 44; i
++) {
5035 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5036 } else if (i
== 1) {
5038 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
5039 } else if (i
== 27) {
5042 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
5044 /* for the rest, there is MasterCard */
5045 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
5049 if (write(fd
, buf
, len
) != len
) {
5057 static int open_self_auxv(void *cpu_env
, int fd
)
5059 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5060 abi_ulong auxv
= ts
->info
->saved_auxv
;
5061 abi_ulong len
= ts
->info
->auxv_len
;
5065 * Auxiliary vector is stored in target process stack.
5066 * read in whole auxv vector and copy it to file
5068 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
5072 r
= write(fd
, ptr
, len
);
5079 lseek(fd
, 0, SEEK_SET
);
5080 unlock_user(ptr
, auxv
, len
);
5086 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
5089 const char *filename
;
5090 int (*fill
)(void *cpu_env
, int fd
);
5092 const struct fake_open
*fake_open
;
5093 static const struct fake_open fakes
[] = {
5094 { "/proc/self/maps", open_self_maps
},
5095 { "/proc/self/stat", open_self_stat
},
5096 { "/proc/self/auxv", open_self_auxv
},
5100 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
5101 if (!strncmp(pathname
, fake_open
->filename
,
5102 strlen(fake_open
->filename
))) {
5107 if (fake_open
->filename
) {
5109 char filename
[PATH_MAX
];
5112 /* create temporary file to map stat to */
5113 tmpdir
= getenv("TMPDIR");
5116 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
5117 fd
= mkstemp(filename
);
5123 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
5127 lseek(fd
, 0, SEEK_SET
);
5132 return get_errno(open(path(pathname
), flags
, mode
));
5135 /* do_syscall() should always have a single exit point at the end so
5136 that actions, such as logging of syscall results, can be performed.
5137 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
5138 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
5139 abi_long arg2
, abi_long arg3
, abi_long arg4
,
5140 abi_long arg5
, abi_long arg6
, abi_long arg7
,
5149 gemu_log("syscall %d", num
);
5152 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5155 case TARGET_NR_exit
:
5156 #ifdef CONFIG_USE_NPTL
5157 /* In old applications this may be used to implement _exit(2).
5158 However in threaded applictions it is used for thread termination,
5159 and _exit_group is used for application termination.
5160 Do thread termination if we have more then one thread. */
5161 /* FIXME: This probably breaks if a signal arrives. We should probably
5162 be disabling signals. */
5163 if (first_cpu
->next_cpu
) {
5165 CPUArchState
**lastp
;
5171 while (p
&& p
!= (CPUArchState
*)cpu_env
) {
5172 lastp
= &p
->next_cpu
;
5175 /* If we didn't find the CPU for this thread then something is
5179 /* Remove the CPU from the list. */
5180 *lastp
= p
->next_cpu
;
5182 ts
= ((CPUArchState
*)cpu_env
)->opaque
;
5183 if (ts
->child_tidptr
) {
5184 put_user_u32(0, ts
->child_tidptr
);
5185 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
5189 object_delete(OBJECT(ENV_GET_CPU(cpu_env
)));
5197 gdb_exit(cpu_env
, arg1
);
5199 ret
= 0; /* avoid warning */
5201 case TARGET_NR_read
:
5205 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5207 ret
= get_errno(read(arg1
, p
, arg3
));
5208 unlock_user(p
, arg2
, ret
);
5211 case TARGET_NR_write
:
5212 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5214 ret
= get_errno(write(arg1
, p
, arg3
));
5215 unlock_user(p
, arg2
, 0);
5217 case TARGET_NR_open
:
5218 if (!(p
= lock_user_string(arg1
)))
5220 ret
= get_errno(do_open(cpu_env
, p
,
5221 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
5223 unlock_user(p
, arg1
, 0);
5225 #if defined(TARGET_NR_openat) && defined(__NR_openat)
5226 case TARGET_NR_openat
:
5227 if (!(p
= lock_user_string(arg2
)))
5229 ret
= get_errno(sys_openat(arg1
,
5231 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
5233 unlock_user(p
, arg2
, 0);
5236 case TARGET_NR_close
:
5237 ret
= get_errno(close(arg1
));
5242 case TARGET_NR_fork
:
5243 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
5245 #ifdef TARGET_NR_waitpid
5246 case TARGET_NR_waitpid
:
5249 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
5250 if (!is_error(ret
) && arg2
&& ret
5251 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
5256 #ifdef TARGET_NR_waitid
5257 case TARGET_NR_waitid
:
5261 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
5262 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
5263 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
5265 host_to_target_siginfo(p
, &info
);
5266 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
5271 #ifdef TARGET_NR_creat /* not on alpha */
5272 case TARGET_NR_creat
:
5273 if (!(p
= lock_user_string(arg1
)))
5275 ret
= get_errno(creat(p
, arg2
));
5276 unlock_user(p
, arg1
, 0);
5279 case TARGET_NR_link
:
5282 p
= lock_user_string(arg1
);
5283 p2
= lock_user_string(arg2
);
5285 ret
= -TARGET_EFAULT
;
5287 ret
= get_errno(link(p
, p2
));
5288 unlock_user(p2
, arg2
, 0);
5289 unlock_user(p
, arg1
, 0);
5292 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
5293 case TARGET_NR_linkat
:
5298 p
= lock_user_string(arg2
);
5299 p2
= lock_user_string(arg4
);
5301 ret
= -TARGET_EFAULT
;
5303 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
5304 unlock_user(p
, arg2
, 0);
5305 unlock_user(p2
, arg4
, 0);
5309 case TARGET_NR_unlink
:
5310 if (!(p
= lock_user_string(arg1
)))
5312 ret
= get_errno(unlink(p
));
5313 unlock_user(p
, arg1
, 0);
5315 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
5316 case TARGET_NR_unlinkat
:
5317 if (!(p
= lock_user_string(arg2
)))
5319 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
5320 unlock_user(p
, arg2
, 0);
5323 case TARGET_NR_execve
:
5325 char **argp
, **envp
;
5328 abi_ulong guest_argp
;
5329 abi_ulong guest_envp
;
5336 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
5337 if (get_user_ual(addr
, gp
))
5345 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
5346 if (get_user_ual(addr
, gp
))
5353 argp
= alloca((argc
+ 1) * sizeof(void *));
5354 envp
= alloca((envc
+ 1) * sizeof(void *));
5356 for (gp
= guest_argp
, q
= argp
; gp
;
5357 gp
+= sizeof(abi_ulong
), q
++) {
5358 if (get_user_ual(addr
, gp
))
5362 if (!(*q
= lock_user_string(addr
)))
5364 total_size
+= strlen(*q
) + 1;
5368 for (gp
= guest_envp
, q
= envp
; gp
;
5369 gp
+= sizeof(abi_ulong
), q
++) {
5370 if (get_user_ual(addr
, gp
))
5374 if (!(*q
= lock_user_string(addr
)))
5376 total_size
+= strlen(*q
) + 1;
5380 /* This case will not be caught by the host's execve() if its
5381 page size is bigger than the target's. */
5382 if (total_size
> MAX_ARG_PAGES
* TARGET_PAGE_SIZE
) {
5383 ret
= -TARGET_E2BIG
;
5386 if (!(p
= lock_user_string(arg1
)))
5388 ret
= get_errno(execve(p
, argp
, envp
));
5389 unlock_user(p
, arg1
, 0);
5394 ret
= -TARGET_EFAULT
;
5397 for (gp
= guest_argp
, q
= argp
; *q
;
5398 gp
+= sizeof(abi_ulong
), q
++) {
5399 if (get_user_ual(addr
, gp
)
5402 unlock_user(*q
, addr
, 0);
5404 for (gp
= guest_envp
, q
= envp
; *q
;
5405 gp
+= sizeof(abi_ulong
), q
++) {
5406 if (get_user_ual(addr
, gp
)
5409 unlock_user(*q
, addr
, 0);
5413 case TARGET_NR_chdir
:
5414 if (!(p
= lock_user_string(arg1
)))
5416 ret
= get_errno(chdir(p
));
5417 unlock_user(p
, arg1
, 0);
5419 #ifdef TARGET_NR_time
5420 case TARGET_NR_time
:
5423 ret
= get_errno(time(&host_time
));
5426 && put_user_sal(host_time
, arg1
))
5431 case TARGET_NR_mknod
:
5432 if (!(p
= lock_user_string(arg1
)))
5434 ret
= get_errno(mknod(p
, arg2
, arg3
));
5435 unlock_user(p
, arg1
, 0);
5437 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5438 case TARGET_NR_mknodat
:
5439 if (!(p
= lock_user_string(arg2
)))
5441 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5442 unlock_user(p
, arg2
, 0);
5445 case TARGET_NR_chmod
:
5446 if (!(p
= lock_user_string(arg1
)))
5448 ret
= get_errno(chmod(p
, arg2
));
5449 unlock_user(p
, arg1
, 0);
5451 #ifdef TARGET_NR_break
5452 case TARGET_NR_break
:
5455 #ifdef TARGET_NR_oldstat
5456 case TARGET_NR_oldstat
:
5459 case TARGET_NR_lseek
:
5460 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5462 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5463 /* Alpha specific */
5464 case TARGET_NR_getxpid
:
5465 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5466 ret
= get_errno(getpid());
5469 #ifdef TARGET_NR_getpid
5470 case TARGET_NR_getpid
:
5471 ret
= get_errno(getpid());
5474 case TARGET_NR_mount
:
5476 /* need to look at the data field */
5478 p
= lock_user_string(arg1
);
5479 p2
= lock_user_string(arg2
);
5480 p3
= lock_user_string(arg3
);
5481 if (!p
|| !p2
|| !p3
)
5482 ret
= -TARGET_EFAULT
;
5484 /* FIXME - arg5 should be locked, but it isn't clear how to
5485 * do that since it's not guaranteed to be a NULL-terminated
5489 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5491 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5493 unlock_user(p
, arg1
, 0);
5494 unlock_user(p2
, arg2
, 0);
5495 unlock_user(p3
, arg3
, 0);
5498 #ifdef TARGET_NR_umount
5499 case TARGET_NR_umount
:
5500 if (!(p
= lock_user_string(arg1
)))
5502 ret
= get_errno(umount(p
));
5503 unlock_user(p
, arg1
, 0);
5506 #ifdef TARGET_NR_stime /* not on alpha */
5507 case TARGET_NR_stime
:
5510 if (get_user_sal(host_time
, arg1
))
5512 ret
= get_errno(stime(&host_time
));
5516 case TARGET_NR_ptrace
:
5518 #ifdef TARGET_NR_alarm /* not on alpha */
5519 case TARGET_NR_alarm
:
5523 #ifdef TARGET_NR_oldfstat
5524 case TARGET_NR_oldfstat
:
5527 #ifdef TARGET_NR_pause /* not on alpha */
5528 case TARGET_NR_pause
:
5529 ret
= get_errno(pause());
5532 #ifdef TARGET_NR_utime
5533 case TARGET_NR_utime
:
5535 struct utimbuf tbuf
, *host_tbuf
;
5536 struct target_utimbuf
*target_tbuf
;
5538 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5540 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5541 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5542 unlock_user_struct(target_tbuf
, arg2
, 0);
5547 if (!(p
= lock_user_string(arg1
)))
5549 ret
= get_errno(utime(p
, host_tbuf
));
5550 unlock_user(p
, arg1
, 0);
5554 case TARGET_NR_utimes
:
5556 struct timeval
*tvp
, tv
[2];
5558 if (copy_from_user_timeval(&tv
[0], arg2
)
5559 || copy_from_user_timeval(&tv
[1],
5560 arg2
+ sizeof(struct target_timeval
)))
5566 if (!(p
= lock_user_string(arg1
)))
5568 ret
= get_errno(utimes(p
, tvp
));
5569 unlock_user(p
, arg1
, 0);
5572 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5573 case TARGET_NR_futimesat
:
5575 struct timeval
*tvp
, tv
[2];
5577 if (copy_from_user_timeval(&tv
[0], arg3
)
5578 || copy_from_user_timeval(&tv
[1],
5579 arg3
+ sizeof(struct target_timeval
)))
5585 if (!(p
= lock_user_string(arg2
)))
5587 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5588 unlock_user(p
, arg2
, 0);
5592 #ifdef TARGET_NR_stty
5593 case TARGET_NR_stty
:
5596 #ifdef TARGET_NR_gtty
5597 case TARGET_NR_gtty
:
5600 case TARGET_NR_access
:
5601 if (!(p
= lock_user_string(arg1
)))
5603 ret
= get_errno(access(path(p
), arg2
));
5604 unlock_user(p
, arg1
, 0);
5606 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5607 case TARGET_NR_faccessat
:
5608 if (!(p
= lock_user_string(arg2
)))
5610 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5611 unlock_user(p
, arg2
, 0);
5614 #ifdef TARGET_NR_nice /* not on alpha */
5615 case TARGET_NR_nice
:
5616 ret
= get_errno(nice(arg1
));
5619 #ifdef TARGET_NR_ftime
5620 case TARGET_NR_ftime
:
5623 case TARGET_NR_sync
:
5627 case TARGET_NR_kill
:
5628 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5630 case TARGET_NR_rename
:
5633 p
= lock_user_string(arg1
);
5634 p2
= lock_user_string(arg2
);
5636 ret
= -TARGET_EFAULT
;
5638 ret
= get_errno(rename(p
, p2
));
5639 unlock_user(p2
, arg2
, 0);
5640 unlock_user(p
, arg1
, 0);
5643 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5644 case TARGET_NR_renameat
:
5647 p
= lock_user_string(arg2
);
5648 p2
= lock_user_string(arg4
);
5650 ret
= -TARGET_EFAULT
;
5652 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5653 unlock_user(p2
, arg4
, 0);
5654 unlock_user(p
, arg2
, 0);
5658 case TARGET_NR_mkdir
:
5659 if (!(p
= lock_user_string(arg1
)))
5661 ret
= get_errno(mkdir(p
, arg2
));
5662 unlock_user(p
, arg1
, 0);
5664 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5665 case TARGET_NR_mkdirat
:
5666 if (!(p
= lock_user_string(arg2
)))
5668 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5669 unlock_user(p
, arg2
, 0);
5672 case TARGET_NR_rmdir
:
5673 if (!(p
= lock_user_string(arg1
)))
5675 ret
= get_errno(rmdir(p
));
5676 unlock_user(p
, arg1
, 0);
5679 ret
= get_errno(dup(arg1
));
5681 case TARGET_NR_pipe
:
5682 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5684 #ifdef TARGET_NR_pipe2
5685 case TARGET_NR_pipe2
:
5686 ret
= do_pipe(cpu_env
, arg1
,
5687 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
5690 case TARGET_NR_times
:
5692 struct target_tms
*tmsp
;
5694 ret
= get_errno(times(&tms
));
5696 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5699 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5700 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5701 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5702 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5705 ret
= host_to_target_clock_t(ret
);
5708 #ifdef TARGET_NR_prof
5709 case TARGET_NR_prof
:
5712 #ifdef TARGET_NR_signal
5713 case TARGET_NR_signal
:
5716 case TARGET_NR_acct
:
5718 ret
= get_errno(acct(NULL
));
5720 if (!(p
= lock_user_string(arg1
)))
5722 ret
= get_errno(acct(path(p
)));
5723 unlock_user(p
, arg1
, 0);
5726 #ifdef TARGET_NR_umount2 /* not on alpha */
5727 case TARGET_NR_umount2
:
5728 if (!(p
= lock_user_string(arg1
)))
5730 ret
= get_errno(umount2(p
, arg2
));
5731 unlock_user(p
, arg1
, 0);
5734 #ifdef TARGET_NR_lock
5735 case TARGET_NR_lock
:
5738 case TARGET_NR_ioctl
:
5739 ret
= do_ioctl(arg1
, arg2
, arg3
);
5741 case TARGET_NR_fcntl
:
5742 ret
= do_fcntl(arg1
, arg2
, arg3
);
5744 #ifdef TARGET_NR_mpx
5748 case TARGET_NR_setpgid
:
5749 ret
= get_errno(setpgid(arg1
, arg2
));
5751 #ifdef TARGET_NR_ulimit
5752 case TARGET_NR_ulimit
:
5755 #ifdef TARGET_NR_oldolduname
5756 case TARGET_NR_oldolduname
:
5759 case TARGET_NR_umask
:
5760 ret
= get_errno(umask(arg1
));
5762 case TARGET_NR_chroot
:
5763 if (!(p
= lock_user_string(arg1
)))
5765 ret
= get_errno(chroot(p
));
5766 unlock_user(p
, arg1
, 0);
5768 case TARGET_NR_ustat
:
5770 case TARGET_NR_dup2
:
5771 ret
= get_errno(dup2(arg1
, arg2
));
5773 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5774 case TARGET_NR_dup3
:
5775 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5778 #ifdef TARGET_NR_getppid /* not on alpha */
5779 case TARGET_NR_getppid
:
5780 ret
= get_errno(getppid());
5783 case TARGET_NR_getpgrp
:
5784 ret
= get_errno(getpgrp());
5786 case TARGET_NR_setsid
:
5787 ret
= get_errno(setsid());
5789 #ifdef TARGET_NR_sigaction
5790 case TARGET_NR_sigaction
:
5792 #if defined(TARGET_ALPHA)
5793 struct target_sigaction act
, oact
, *pact
= 0;
5794 struct target_old_sigaction
*old_act
;
5796 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5798 act
._sa_handler
= old_act
->_sa_handler
;
5799 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5800 act
.sa_flags
= old_act
->sa_flags
;
5801 act
.sa_restorer
= 0;
5802 unlock_user_struct(old_act
, arg2
, 0);
5805 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5806 if (!is_error(ret
) && arg3
) {
5807 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5809 old_act
->_sa_handler
= oact
._sa_handler
;
5810 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5811 old_act
->sa_flags
= oact
.sa_flags
;
5812 unlock_user_struct(old_act
, arg3
, 1);
5814 #elif defined(TARGET_MIPS)
5815 struct target_sigaction act
, oact
, *pact
, *old_act
;
5818 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5820 act
._sa_handler
= old_act
->_sa_handler
;
5821 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5822 act
.sa_flags
= old_act
->sa_flags
;
5823 unlock_user_struct(old_act
, arg2
, 0);
5829 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5831 if (!is_error(ret
) && arg3
) {
5832 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5834 old_act
->_sa_handler
= oact
._sa_handler
;
5835 old_act
->sa_flags
= oact
.sa_flags
;
5836 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5837 old_act
->sa_mask
.sig
[1] = 0;
5838 old_act
->sa_mask
.sig
[2] = 0;
5839 old_act
->sa_mask
.sig
[3] = 0;
5840 unlock_user_struct(old_act
, arg3
, 1);
5843 struct target_old_sigaction
*old_act
;
5844 struct target_sigaction act
, oact
, *pact
;
5846 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5848 act
._sa_handler
= old_act
->_sa_handler
;
5849 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5850 act
.sa_flags
= old_act
->sa_flags
;
5851 act
.sa_restorer
= old_act
->sa_restorer
;
5852 unlock_user_struct(old_act
, arg2
, 0);
5857 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5858 if (!is_error(ret
) && arg3
) {
5859 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5861 old_act
->_sa_handler
= oact
._sa_handler
;
5862 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5863 old_act
->sa_flags
= oact
.sa_flags
;
5864 old_act
->sa_restorer
= oact
.sa_restorer
;
5865 unlock_user_struct(old_act
, arg3
, 1);
5871 case TARGET_NR_rt_sigaction
:
5873 #if defined(TARGET_ALPHA)
5874 struct target_sigaction act
, oact
, *pact
= 0;
5875 struct target_rt_sigaction
*rt_act
;
5876 /* ??? arg4 == sizeof(sigset_t). */
5878 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5880 act
._sa_handler
= rt_act
->_sa_handler
;
5881 act
.sa_mask
= rt_act
->sa_mask
;
5882 act
.sa_flags
= rt_act
->sa_flags
;
5883 act
.sa_restorer
= arg5
;
5884 unlock_user_struct(rt_act
, arg2
, 0);
5887 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5888 if (!is_error(ret
) && arg3
) {
5889 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5891 rt_act
->_sa_handler
= oact
._sa_handler
;
5892 rt_act
->sa_mask
= oact
.sa_mask
;
5893 rt_act
->sa_flags
= oact
.sa_flags
;
5894 unlock_user_struct(rt_act
, arg3
, 1);
5897 struct target_sigaction
*act
;
5898 struct target_sigaction
*oact
;
5901 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5906 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5907 ret
= -TARGET_EFAULT
;
5908 goto rt_sigaction_fail
;
5912 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5915 unlock_user_struct(act
, arg2
, 0);
5917 unlock_user_struct(oact
, arg3
, 1);
5921 #ifdef TARGET_NR_sgetmask /* not on alpha */
5922 case TARGET_NR_sgetmask
:
5925 abi_ulong target_set
;
5926 sigprocmask(0, NULL
, &cur_set
);
5927 host_to_target_old_sigset(&target_set
, &cur_set
);
5932 #ifdef TARGET_NR_ssetmask /* not on alpha */
5933 case TARGET_NR_ssetmask
:
5935 sigset_t set
, oset
, cur_set
;
5936 abi_ulong target_set
= arg1
;
5937 sigprocmask(0, NULL
, &cur_set
);
5938 target_to_host_old_sigset(&set
, &target_set
);
5939 sigorset(&set
, &set
, &cur_set
);
5940 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5941 host_to_target_old_sigset(&target_set
, &oset
);
5946 #ifdef TARGET_NR_sigprocmask
5947 case TARGET_NR_sigprocmask
:
5949 #if defined(TARGET_ALPHA)
5950 sigset_t set
, oldset
;
5955 case TARGET_SIG_BLOCK
:
5958 case TARGET_SIG_UNBLOCK
:
5961 case TARGET_SIG_SETMASK
:
5965 ret
= -TARGET_EINVAL
;
5969 target_to_host_old_sigset(&set
, &mask
);
5971 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5972 if (!is_error(ret
)) {
5973 host_to_target_old_sigset(&mask
, &oldset
);
5975 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
5978 sigset_t set
, oldset
, *set_ptr
;
5983 case TARGET_SIG_BLOCK
:
5986 case TARGET_SIG_UNBLOCK
:
5989 case TARGET_SIG_SETMASK
:
5993 ret
= -TARGET_EINVAL
;
5996 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5998 target_to_host_old_sigset(&set
, p
);
5999 unlock_user(p
, arg2
, 0);
6005 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6006 if (!is_error(ret
) && arg3
) {
6007 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6009 host_to_target_old_sigset(p
, &oldset
);
6010 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6016 case TARGET_NR_rt_sigprocmask
:
6019 sigset_t set
, oldset
, *set_ptr
;
6023 case TARGET_SIG_BLOCK
:
6026 case TARGET_SIG_UNBLOCK
:
6029 case TARGET_SIG_SETMASK
:
6033 ret
= -TARGET_EINVAL
;
6036 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
6038 target_to_host_sigset(&set
, p
);
6039 unlock_user(p
, arg2
, 0);
6045 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
6046 if (!is_error(ret
) && arg3
) {
6047 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
6049 host_to_target_sigset(p
, &oldset
);
6050 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
6054 #ifdef TARGET_NR_sigpending
6055 case TARGET_NR_sigpending
:
6058 ret
= get_errno(sigpending(&set
));
6059 if (!is_error(ret
)) {
6060 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6062 host_to_target_old_sigset(p
, &set
);
6063 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6068 case TARGET_NR_rt_sigpending
:
6071 ret
= get_errno(sigpending(&set
));
6072 if (!is_error(ret
)) {
6073 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
6075 host_to_target_sigset(p
, &set
);
6076 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
6080 #ifdef TARGET_NR_sigsuspend
6081 case TARGET_NR_sigsuspend
:
6084 #if defined(TARGET_ALPHA)
6085 abi_ulong mask
= arg1
;
6086 target_to_host_old_sigset(&set
, &mask
);
6088 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6090 target_to_host_old_sigset(&set
, p
);
6091 unlock_user(p
, arg1
, 0);
6093 ret
= get_errno(sigsuspend(&set
));
6097 case TARGET_NR_rt_sigsuspend
:
6100 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6102 target_to_host_sigset(&set
, p
);
6103 unlock_user(p
, arg1
, 0);
6104 ret
= get_errno(sigsuspend(&set
));
6107 case TARGET_NR_rt_sigtimedwait
:
6110 struct timespec uts
, *puts
;
6113 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
6115 target_to_host_sigset(&set
, p
);
6116 unlock_user(p
, arg1
, 0);
6119 target_to_host_timespec(puts
, arg3
);
6123 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
6124 if (!is_error(ret
) && arg2
) {
6125 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
6127 host_to_target_siginfo(p
, &uinfo
);
6128 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
6132 case TARGET_NR_rt_sigqueueinfo
:
6135 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
6137 target_to_host_siginfo(&uinfo
, p
);
6138 unlock_user(p
, arg1
, 0);
6139 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
6142 #ifdef TARGET_NR_sigreturn
6143 case TARGET_NR_sigreturn
:
6144 /* NOTE: ret is eax, so not transcoding must be done */
6145 ret
= do_sigreturn(cpu_env
);
6148 case TARGET_NR_rt_sigreturn
:
6149 /* NOTE: ret is eax, so not transcoding must be done */
6150 ret
= do_rt_sigreturn(cpu_env
);
6152 case TARGET_NR_sethostname
:
6153 if (!(p
= lock_user_string(arg1
)))
6155 ret
= get_errno(sethostname(p
, arg2
));
6156 unlock_user(p
, arg1
, 0);
6158 case TARGET_NR_setrlimit
:
6160 int resource
= target_to_host_resource(arg1
);
6161 struct target_rlimit
*target_rlim
;
6163 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
6165 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
6166 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
6167 unlock_user_struct(target_rlim
, arg2
, 0);
6168 ret
= get_errno(setrlimit(resource
, &rlim
));
6171 case TARGET_NR_getrlimit
:
6173 int resource
= target_to_host_resource(arg1
);
6174 struct target_rlimit
*target_rlim
;
6177 ret
= get_errno(getrlimit(resource
, &rlim
));
6178 if (!is_error(ret
)) {
6179 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6181 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6182 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6183 unlock_user_struct(target_rlim
, arg2
, 1);
6187 case TARGET_NR_getrusage
:
6189 struct rusage rusage
;
6190 ret
= get_errno(getrusage(arg1
, &rusage
));
6191 if (!is_error(ret
)) {
6192 host_to_target_rusage(arg2
, &rusage
);
6196 case TARGET_NR_gettimeofday
:
6199 ret
= get_errno(gettimeofday(&tv
, NULL
));
6200 if (!is_error(ret
)) {
6201 if (copy_to_user_timeval(arg1
, &tv
))
6206 case TARGET_NR_settimeofday
:
6209 if (copy_from_user_timeval(&tv
, arg1
))
6211 ret
= get_errno(settimeofday(&tv
, NULL
));
6214 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
6215 case TARGET_NR_select
:
6217 struct target_sel_arg_struct
*sel
;
6218 abi_ulong inp
, outp
, exp
, tvp
;
6221 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
6223 nsel
= tswapal(sel
->n
);
6224 inp
= tswapal(sel
->inp
);
6225 outp
= tswapal(sel
->outp
);
6226 exp
= tswapal(sel
->exp
);
6227 tvp
= tswapal(sel
->tvp
);
6228 unlock_user_struct(sel
, arg1
, 0);
6229 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
6233 #ifdef TARGET_NR_pselect6
6234 case TARGET_NR_pselect6
:
6236 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
6237 fd_set rfds
, wfds
, efds
;
6238 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
6239 struct timespec ts
, *ts_ptr
;
6242 * The 6th arg is actually two args smashed together,
6243 * so we cannot use the C library.
6251 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
6252 target_sigset_t
*target_sigset
;
6260 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
6264 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
6268 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
6274 * This takes a timespec, and not a timeval, so we cannot
6275 * use the do_select() helper ...
6278 if (target_to_host_timespec(&ts
, ts_addr
)) {
6286 /* Extract the two packed args for the sigset */
6289 sig
.size
= _NSIG
/ 8;
6291 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
6295 arg_sigset
= tswapal(arg7
[0]);
6296 arg_sigsize
= tswapal(arg7
[1]);
6297 unlock_user(arg7
, arg6
, 0);
6301 if (arg_sigsize
!= sizeof(*target_sigset
)) {
6302 /* Like the kernel, we enforce correct size sigsets */
6303 ret
= -TARGET_EINVAL
;
6306 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
6307 sizeof(*target_sigset
), 1);
6308 if (!target_sigset
) {
6311 target_to_host_sigset(&set
, target_sigset
);
6312 unlock_user(target_sigset
, arg_sigset
, 0);
6320 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
6323 if (!is_error(ret
)) {
6324 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
6326 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
6328 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
6331 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
6337 case TARGET_NR_symlink
:
6340 p
= lock_user_string(arg1
);
6341 p2
= lock_user_string(arg2
);
6343 ret
= -TARGET_EFAULT
;
6345 ret
= get_errno(symlink(p
, p2
));
6346 unlock_user(p2
, arg2
, 0);
6347 unlock_user(p
, arg1
, 0);
6350 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
6351 case TARGET_NR_symlinkat
:
6354 p
= lock_user_string(arg1
);
6355 p2
= lock_user_string(arg3
);
6357 ret
= -TARGET_EFAULT
;
6359 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
6360 unlock_user(p2
, arg3
, 0);
6361 unlock_user(p
, arg1
, 0);
6365 #ifdef TARGET_NR_oldlstat
6366 case TARGET_NR_oldlstat
:
6369 case TARGET_NR_readlink
:
6372 p
= lock_user_string(arg1
);
6373 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
6375 ret
= -TARGET_EFAULT
;
6377 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
6378 char real
[PATH_MAX
];
6379 temp
= realpath(exec_path
,real
);
6380 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
6381 snprintf((char *)p2
, arg3
, "%s", real
);
6384 ret
= get_errno(readlink(path(p
), p2
, arg3
));
6386 unlock_user(p2
, arg2
, ret
);
6387 unlock_user(p
, arg1
, 0);
6390 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6391 case TARGET_NR_readlinkat
:
6394 p
= lock_user_string(arg2
);
6395 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6397 ret
= -TARGET_EFAULT
;
6399 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6400 unlock_user(p2
, arg3
, ret
);
6401 unlock_user(p
, arg2
, 0);
6405 #ifdef TARGET_NR_uselib
6406 case TARGET_NR_uselib
:
6409 #ifdef TARGET_NR_swapon
6410 case TARGET_NR_swapon
:
6411 if (!(p
= lock_user_string(arg1
)))
6413 ret
= get_errno(swapon(p
, arg2
));
6414 unlock_user(p
, arg1
, 0);
6417 case TARGET_NR_reboot
:
6418 if (!(p
= lock_user_string(arg4
)))
6420 ret
= reboot(arg1
, arg2
, arg3
, p
);
6421 unlock_user(p
, arg4
, 0);
6423 #ifdef TARGET_NR_readdir
6424 case TARGET_NR_readdir
:
6427 #ifdef TARGET_NR_mmap
6428 case TARGET_NR_mmap
:
6429 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6430 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6431 || defined(TARGET_S390X)
6434 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6435 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6443 unlock_user(v
, arg1
, 0);
6444 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6445 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6449 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6450 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6456 #ifdef TARGET_NR_mmap2
6457 case TARGET_NR_mmap2
:
6459 #define MMAP_SHIFT 12
6461 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6462 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6464 arg6
<< MMAP_SHIFT
));
6467 case TARGET_NR_munmap
:
6468 ret
= get_errno(target_munmap(arg1
, arg2
));
6470 case TARGET_NR_mprotect
:
6472 TaskState
*ts
= ((CPUArchState
*)cpu_env
)->opaque
;
6473 /* Special hack to detect libc making the stack executable. */
6474 if ((arg3
& PROT_GROWSDOWN
)
6475 && arg1
>= ts
->info
->stack_limit
6476 && arg1
<= ts
->info
->start_stack
) {
6477 arg3
&= ~PROT_GROWSDOWN
;
6478 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6479 arg1
= ts
->info
->stack_limit
;
6482 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6484 #ifdef TARGET_NR_mremap
6485 case TARGET_NR_mremap
:
6486 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6489 /* ??? msync/mlock/munlock are broken for softmmu. */
6490 #ifdef TARGET_NR_msync
6491 case TARGET_NR_msync
:
6492 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6495 #ifdef TARGET_NR_mlock
6496 case TARGET_NR_mlock
:
6497 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6500 #ifdef TARGET_NR_munlock
6501 case TARGET_NR_munlock
:
6502 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6505 #ifdef TARGET_NR_mlockall
6506 case TARGET_NR_mlockall
:
6507 ret
= get_errno(mlockall(arg1
));
6510 #ifdef TARGET_NR_munlockall
6511 case TARGET_NR_munlockall
:
6512 ret
= get_errno(munlockall());
6515 case TARGET_NR_truncate
:
6516 if (!(p
= lock_user_string(arg1
)))
6518 ret
= get_errno(truncate(p
, arg2
));
6519 unlock_user(p
, arg1
, 0);
6521 case TARGET_NR_ftruncate
:
6522 ret
= get_errno(ftruncate(arg1
, arg2
));
6524 case TARGET_NR_fchmod
:
6525 ret
= get_errno(fchmod(arg1
, arg2
));
6527 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6528 case TARGET_NR_fchmodat
:
6529 if (!(p
= lock_user_string(arg2
)))
6531 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6532 unlock_user(p
, arg2
, 0);
6535 case TARGET_NR_getpriority
:
6536 /* Note that negative values are valid for getpriority, so we must
6537 differentiate based on errno settings. */
6539 ret
= getpriority(arg1
, arg2
);
6540 if (ret
== -1 && errno
!= 0) {
6541 ret
= -host_to_target_errno(errno
);
6545 /* Return value is the unbiased priority. Signal no error. */
6546 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
6548 /* Return value is a biased priority to avoid negative numbers. */
6552 case TARGET_NR_setpriority
:
6553 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6555 #ifdef TARGET_NR_profil
6556 case TARGET_NR_profil
:
6559 case TARGET_NR_statfs
:
6560 if (!(p
= lock_user_string(arg1
)))
6562 ret
= get_errno(statfs(path(p
), &stfs
));
6563 unlock_user(p
, arg1
, 0);
6565 if (!is_error(ret
)) {
6566 struct target_statfs
*target_stfs
;
6568 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6570 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6571 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6572 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6573 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6574 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6575 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6576 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6577 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6578 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6579 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6580 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6581 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6582 unlock_user_struct(target_stfs
, arg2
, 1);
6585 case TARGET_NR_fstatfs
:
6586 ret
= get_errno(fstatfs(arg1
, &stfs
));
6587 goto convert_statfs
;
6588 #ifdef TARGET_NR_statfs64
6589 case TARGET_NR_statfs64
:
6590 if (!(p
= lock_user_string(arg1
)))
6592 ret
= get_errno(statfs(path(p
), &stfs
));
6593 unlock_user(p
, arg1
, 0);
6595 if (!is_error(ret
)) {
6596 struct target_statfs64
*target_stfs
;
6598 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6600 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6601 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6602 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6603 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6604 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6605 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6606 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6607 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6608 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6609 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6610 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
6611 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
6612 unlock_user_struct(target_stfs
, arg3
, 1);
6615 case TARGET_NR_fstatfs64
:
6616 ret
= get_errno(fstatfs(arg1
, &stfs
));
6617 goto convert_statfs64
;
6619 #ifdef TARGET_NR_ioperm
6620 case TARGET_NR_ioperm
:
6623 #ifdef TARGET_NR_socketcall
6624 case TARGET_NR_socketcall
:
6625 ret
= do_socketcall(arg1
, arg2
);
6628 #ifdef TARGET_NR_accept
6629 case TARGET_NR_accept
:
6630 ret
= do_accept(arg1
, arg2
, arg3
);
6633 #ifdef TARGET_NR_bind
6634 case TARGET_NR_bind
:
6635 ret
= do_bind(arg1
, arg2
, arg3
);
6638 #ifdef TARGET_NR_connect
6639 case TARGET_NR_connect
:
6640 ret
= do_connect(arg1
, arg2
, arg3
);
6643 #ifdef TARGET_NR_getpeername
6644 case TARGET_NR_getpeername
:
6645 ret
= do_getpeername(arg1
, arg2
, arg3
);
6648 #ifdef TARGET_NR_getsockname
6649 case TARGET_NR_getsockname
:
6650 ret
= do_getsockname(arg1
, arg2
, arg3
);
6653 #ifdef TARGET_NR_getsockopt
6654 case TARGET_NR_getsockopt
:
6655 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6658 #ifdef TARGET_NR_listen
6659 case TARGET_NR_listen
:
6660 ret
= get_errno(listen(arg1
, arg2
));
6663 #ifdef TARGET_NR_recv
6664 case TARGET_NR_recv
:
6665 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6668 #ifdef TARGET_NR_recvfrom
6669 case TARGET_NR_recvfrom
:
6670 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6673 #ifdef TARGET_NR_recvmsg
6674 case TARGET_NR_recvmsg
:
6675 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6678 #ifdef TARGET_NR_send
6679 case TARGET_NR_send
:
6680 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6683 #ifdef TARGET_NR_sendmsg
6684 case TARGET_NR_sendmsg
:
6685 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6688 #ifdef TARGET_NR_sendto
6689 case TARGET_NR_sendto
:
6690 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6693 #ifdef TARGET_NR_shutdown
6694 case TARGET_NR_shutdown
:
6695 ret
= get_errno(shutdown(arg1
, arg2
));
6698 #ifdef TARGET_NR_socket
6699 case TARGET_NR_socket
:
6700 ret
= do_socket(arg1
, arg2
, arg3
);
6703 #ifdef TARGET_NR_socketpair
6704 case TARGET_NR_socketpair
:
6705 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6708 #ifdef TARGET_NR_setsockopt
6709 case TARGET_NR_setsockopt
:
6710 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6714 case TARGET_NR_syslog
:
6715 if (!(p
= lock_user_string(arg2
)))
6717 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6718 unlock_user(p
, arg2
, 0);
6721 case TARGET_NR_setitimer
:
6723 struct itimerval value
, ovalue
, *pvalue
;
6727 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6728 || copy_from_user_timeval(&pvalue
->it_value
,
6729 arg2
+ sizeof(struct target_timeval
)))
6734 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6735 if (!is_error(ret
) && arg3
) {
6736 if (copy_to_user_timeval(arg3
,
6737 &ovalue
.it_interval
)
6738 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6744 case TARGET_NR_getitimer
:
6746 struct itimerval value
;
6748 ret
= get_errno(getitimer(arg1
, &value
));
6749 if (!is_error(ret
) && arg2
) {
6750 if (copy_to_user_timeval(arg2
,
6752 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6758 case TARGET_NR_stat
:
6759 if (!(p
= lock_user_string(arg1
)))
6761 ret
= get_errno(stat(path(p
), &st
));
6762 unlock_user(p
, arg1
, 0);
6764 case TARGET_NR_lstat
:
6765 if (!(p
= lock_user_string(arg1
)))
6767 ret
= get_errno(lstat(path(p
), &st
));
6768 unlock_user(p
, arg1
, 0);
6770 case TARGET_NR_fstat
:
6772 ret
= get_errno(fstat(arg1
, &st
));
6774 if (!is_error(ret
)) {
6775 struct target_stat
*target_st
;
6777 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6779 memset(target_st
, 0, sizeof(*target_st
));
6780 __put_user(st
.st_dev
, &target_st
->st_dev
);
6781 __put_user(st
.st_ino
, &target_st
->st_ino
);
6782 __put_user(st
.st_mode
, &target_st
->st_mode
);
6783 __put_user(st
.st_uid
, &target_st
->st_uid
);
6784 __put_user(st
.st_gid
, &target_st
->st_gid
);
6785 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6786 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6787 __put_user(st
.st_size
, &target_st
->st_size
);
6788 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6789 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6790 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6791 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6792 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6793 unlock_user_struct(target_st
, arg2
, 1);
6797 #ifdef TARGET_NR_olduname
6798 case TARGET_NR_olduname
:
6801 #ifdef TARGET_NR_iopl
6802 case TARGET_NR_iopl
:
6805 case TARGET_NR_vhangup
:
6806 ret
= get_errno(vhangup());
6808 #ifdef TARGET_NR_idle
6809 case TARGET_NR_idle
:
6812 #ifdef TARGET_NR_syscall
6813 case TARGET_NR_syscall
:
6814 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6815 arg6
, arg7
, arg8
, 0);
6818 case TARGET_NR_wait4
:
6821 abi_long status_ptr
= arg2
;
6822 struct rusage rusage
, *rusage_ptr
;
6823 abi_ulong target_rusage
= arg4
;
6825 rusage_ptr
= &rusage
;
6828 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6829 if (!is_error(ret
)) {
6830 if (status_ptr
&& ret
) {
6831 status
= host_to_target_waitstatus(status
);
6832 if (put_user_s32(status
, status_ptr
))
6836 host_to_target_rusage(target_rusage
, &rusage
);
6840 #ifdef TARGET_NR_swapoff
6841 case TARGET_NR_swapoff
:
6842 if (!(p
= lock_user_string(arg1
)))
6844 ret
= get_errno(swapoff(p
));
6845 unlock_user(p
, arg1
, 0);
6848 case TARGET_NR_sysinfo
:
6850 struct target_sysinfo
*target_value
;
6851 struct sysinfo value
;
6852 ret
= get_errno(sysinfo(&value
));
6853 if (!is_error(ret
) && arg1
)
6855 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6857 __put_user(value
.uptime
, &target_value
->uptime
);
6858 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6859 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6860 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6861 __put_user(value
.totalram
, &target_value
->totalram
);
6862 __put_user(value
.freeram
, &target_value
->freeram
);
6863 __put_user(value
.sharedram
, &target_value
->sharedram
);
6864 __put_user(value
.bufferram
, &target_value
->bufferram
);
6865 __put_user(value
.totalswap
, &target_value
->totalswap
);
6866 __put_user(value
.freeswap
, &target_value
->freeswap
);
6867 __put_user(value
.procs
, &target_value
->procs
);
6868 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6869 __put_user(value
.freehigh
, &target_value
->freehigh
);
6870 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6871 unlock_user_struct(target_value
, arg1
, 1);
6875 #ifdef TARGET_NR_ipc
6877 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6880 #ifdef TARGET_NR_semget
6881 case TARGET_NR_semget
:
6882 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6885 #ifdef TARGET_NR_semop
6886 case TARGET_NR_semop
:
6887 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6890 #ifdef TARGET_NR_semctl
6891 case TARGET_NR_semctl
:
6892 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6895 #ifdef TARGET_NR_msgctl
6896 case TARGET_NR_msgctl
:
6897 ret
= do_msgctl(arg1
, arg2
, arg3
);
6900 #ifdef TARGET_NR_msgget
6901 case TARGET_NR_msgget
:
6902 ret
= get_errno(msgget(arg1
, arg2
));
6905 #ifdef TARGET_NR_msgrcv
6906 case TARGET_NR_msgrcv
:
6907 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6910 #ifdef TARGET_NR_msgsnd
6911 case TARGET_NR_msgsnd
:
6912 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6915 #ifdef TARGET_NR_shmget
6916 case TARGET_NR_shmget
:
6917 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6920 #ifdef TARGET_NR_shmctl
6921 case TARGET_NR_shmctl
:
6922 ret
= do_shmctl(arg1
, arg2
, arg3
);
6925 #ifdef TARGET_NR_shmat
6926 case TARGET_NR_shmat
:
6927 ret
= do_shmat(arg1
, arg2
, arg3
);
6930 #ifdef TARGET_NR_shmdt
6931 case TARGET_NR_shmdt
:
6932 ret
= do_shmdt(arg1
);
6935 case TARGET_NR_fsync
:
6936 ret
= get_errno(fsync(arg1
));
6938 case TARGET_NR_clone
:
6939 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6940 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6941 #elif defined(TARGET_CRIS)
6942 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6943 #elif defined(TARGET_MICROBLAZE)
6944 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
6945 #elif defined(TARGET_S390X)
6946 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6948 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6951 #ifdef __NR_exit_group
6952 /* new thread calls */
6953 case TARGET_NR_exit_group
:
6957 gdb_exit(cpu_env
, arg1
);
6958 ret
= get_errno(exit_group(arg1
));
6961 case TARGET_NR_setdomainname
:
6962 if (!(p
= lock_user_string(arg1
)))
6964 ret
= get_errno(setdomainname(p
, arg2
));
6965 unlock_user(p
, arg1
, 0);
6967 case TARGET_NR_uname
:
6968 /* no need to transcode because we use the linux syscall */
6970 struct new_utsname
* buf
;
6972 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6974 ret
= get_errno(sys_uname(buf
));
6975 if (!is_error(ret
)) {
6976 /* Overrite the native machine name with whatever is being
6978 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6979 /* Allow the user to override the reported release. */
6980 if (qemu_uname_release
&& *qemu_uname_release
)
6981 strcpy (buf
->release
, qemu_uname_release
);
6983 unlock_user_struct(buf
, arg1
, 1);
6987 case TARGET_NR_modify_ldt
:
6988 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6990 #if !defined(TARGET_X86_64)
6991 case TARGET_NR_vm86old
:
6993 case TARGET_NR_vm86
:
6994 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6998 case TARGET_NR_adjtimex
:
7000 #ifdef TARGET_NR_create_module
7001 case TARGET_NR_create_module
:
7003 case TARGET_NR_init_module
:
7004 case TARGET_NR_delete_module
:
7005 #ifdef TARGET_NR_get_kernel_syms
7006 case TARGET_NR_get_kernel_syms
:
7009 case TARGET_NR_quotactl
:
7011 case TARGET_NR_getpgid
:
7012 ret
= get_errno(getpgid(arg1
));
7014 case TARGET_NR_fchdir
:
7015 ret
= get_errno(fchdir(arg1
));
7017 #ifdef TARGET_NR_bdflush /* not on x86_64 */
7018 case TARGET_NR_bdflush
:
7021 #ifdef TARGET_NR_sysfs
7022 case TARGET_NR_sysfs
:
7025 case TARGET_NR_personality
:
7026 ret
= get_errno(personality(arg1
));
7028 #ifdef TARGET_NR_afs_syscall
7029 case TARGET_NR_afs_syscall
:
7032 #ifdef TARGET_NR__llseek /* Not on alpha */
7033 case TARGET_NR__llseek
:
7036 #if !defined(__NR_llseek)
7037 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
7039 ret
= get_errno(res
);
7044 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
7046 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
7052 case TARGET_NR_getdents
:
7053 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
7055 struct target_dirent
*target_dirp
;
7056 struct linux_dirent
*dirp
;
7057 abi_long count
= arg3
;
7059 dirp
= malloc(count
);
7061 ret
= -TARGET_ENOMEM
;
7065 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7066 if (!is_error(ret
)) {
7067 struct linux_dirent
*de
;
7068 struct target_dirent
*tde
;
7070 int reclen
, treclen
;
7071 int count1
, tnamelen
;
7075 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7079 reclen
= de
->d_reclen
;
7080 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
7081 assert(tnamelen
>= 0);
7082 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
7083 assert(count1
+ treclen
<= count
);
7084 tde
->d_reclen
= tswap16(treclen
);
7085 tde
->d_ino
= tswapal(de
->d_ino
);
7086 tde
->d_off
= tswapal(de
->d_off
);
7087 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
7088 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7090 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
7094 unlock_user(target_dirp
, arg2
, ret
);
7100 struct linux_dirent
*dirp
;
7101 abi_long count
= arg3
;
7103 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7105 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
7106 if (!is_error(ret
)) {
7107 struct linux_dirent
*de
;
7112 reclen
= de
->d_reclen
;
7115 de
->d_reclen
= tswap16(reclen
);
7116 tswapls(&de
->d_ino
);
7117 tswapls(&de
->d_off
);
7118 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
7122 unlock_user(dirp
, arg2
, ret
);
7126 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
7127 case TARGET_NR_getdents64
:
7129 struct linux_dirent64
*dirp
;
7130 abi_long count
= arg3
;
7131 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
7133 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
7134 if (!is_error(ret
)) {
7135 struct linux_dirent64
*de
;
7140 reclen
= de
->d_reclen
;
7143 de
->d_reclen
= tswap16(reclen
);
7144 tswap64s((uint64_t *)&de
->d_ino
);
7145 tswap64s((uint64_t *)&de
->d_off
);
7146 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
7150 unlock_user(dirp
, arg2
, ret
);
7153 #endif /* TARGET_NR_getdents64 */
7154 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
7156 case TARGET_NR_select
:
7158 case TARGET_NR__newselect
:
7160 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
7163 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
7164 # ifdef TARGET_NR_poll
7165 case TARGET_NR_poll
:
7167 # ifdef TARGET_NR_ppoll
7168 case TARGET_NR_ppoll
:
7171 struct target_pollfd
*target_pfd
;
7172 unsigned int nfds
= arg2
;
7177 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
7181 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
7182 for(i
= 0; i
< nfds
; i
++) {
7183 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
7184 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
7187 # ifdef TARGET_NR_ppoll
7188 if (num
== TARGET_NR_ppoll
) {
7189 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
7190 target_sigset_t
*target_set
;
7191 sigset_t _set
, *set
= &_set
;
7194 if (target_to_host_timespec(timeout_ts
, arg3
)) {
7195 unlock_user(target_pfd
, arg1
, 0);
7203 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
7205 unlock_user(target_pfd
, arg1
, 0);
7208 target_to_host_sigset(set
, target_set
);
7213 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
7215 if (!is_error(ret
) && arg3
) {
7216 host_to_target_timespec(arg3
, timeout_ts
);
7219 unlock_user(target_set
, arg4
, 0);
7223 ret
= get_errno(poll(pfd
, nfds
, timeout
));
7225 if (!is_error(ret
)) {
7226 for(i
= 0; i
< nfds
; i
++) {
7227 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
7230 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
7234 case TARGET_NR_flock
:
7235 /* NOTE: the flock constant seems to be the same for every
7237 ret
= get_errno(flock(arg1
, arg2
));
7239 case TARGET_NR_readv
:
7241 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
7243 ret
= get_errno(readv(arg1
, vec
, arg3
));
7244 unlock_iovec(vec
, arg2
, arg3
, 1);
7246 ret
= -host_to_target_errno(errno
);
7250 case TARGET_NR_writev
:
7252 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
7254 ret
= get_errno(writev(arg1
, vec
, arg3
));
7255 unlock_iovec(vec
, arg2
, arg3
, 0);
7257 ret
= -host_to_target_errno(errno
);
7261 case TARGET_NR_getsid
:
7262 ret
= get_errno(getsid(arg1
));
7264 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
7265 case TARGET_NR_fdatasync
:
7266 ret
= get_errno(fdatasync(arg1
));
7269 case TARGET_NR__sysctl
:
7270 /* We don't implement this, but ENOTDIR is always a safe
7272 ret
= -TARGET_ENOTDIR
;
7274 case TARGET_NR_sched_getaffinity
:
7276 unsigned int mask_size
;
7277 unsigned long *mask
;
7280 * sched_getaffinity needs multiples of ulong, so need to take
7281 * care of mismatches between target ulong and host ulong sizes.
7283 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7284 ret
= -TARGET_EINVAL
;
7287 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7289 mask
= alloca(mask_size
);
7290 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
7292 if (!is_error(ret
)) {
7293 if (copy_to_user(arg3
, mask
, ret
)) {
7299 case TARGET_NR_sched_setaffinity
:
7301 unsigned int mask_size
;
7302 unsigned long *mask
;
7305 * sched_setaffinity needs multiples of ulong, so need to take
7306 * care of mismatches between target ulong and host ulong sizes.
7308 if (arg2
& (sizeof(abi_ulong
) - 1)) {
7309 ret
= -TARGET_EINVAL
;
7312 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
7314 mask
= alloca(mask_size
);
7315 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
7318 memcpy(mask
, p
, arg2
);
7319 unlock_user_struct(p
, arg2
, 0);
7321 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
7324 case TARGET_NR_sched_setparam
:
7326 struct sched_param
*target_schp
;
7327 struct sched_param schp
;
7329 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
7331 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7332 unlock_user_struct(target_schp
, arg2
, 0);
7333 ret
= get_errno(sched_setparam(arg1
, &schp
));
7336 case TARGET_NR_sched_getparam
:
7338 struct sched_param
*target_schp
;
7339 struct sched_param schp
;
7340 ret
= get_errno(sched_getparam(arg1
, &schp
));
7341 if (!is_error(ret
)) {
7342 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
7344 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
7345 unlock_user_struct(target_schp
, arg2
, 1);
7349 case TARGET_NR_sched_setscheduler
:
7351 struct sched_param
*target_schp
;
7352 struct sched_param schp
;
7353 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
7355 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
7356 unlock_user_struct(target_schp
, arg3
, 0);
7357 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
7360 case TARGET_NR_sched_getscheduler
:
7361 ret
= get_errno(sched_getscheduler(arg1
));
7363 case TARGET_NR_sched_yield
:
7364 ret
= get_errno(sched_yield());
7366 case TARGET_NR_sched_get_priority_max
:
7367 ret
= get_errno(sched_get_priority_max(arg1
));
7369 case TARGET_NR_sched_get_priority_min
:
7370 ret
= get_errno(sched_get_priority_min(arg1
));
7372 case TARGET_NR_sched_rr_get_interval
:
7375 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
7376 if (!is_error(ret
)) {
7377 host_to_target_timespec(arg2
, &ts
);
7381 case TARGET_NR_nanosleep
:
7383 struct timespec req
, rem
;
7384 target_to_host_timespec(&req
, arg1
);
7385 ret
= get_errno(nanosleep(&req
, &rem
));
7386 if (is_error(ret
) && arg2
) {
7387 host_to_target_timespec(arg2
, &rem
);
7391 #ifdef TARGET_NR_query_module
7392 case TARGET_NR_query_module
:
7395 #ifdef TARGET_NR_nfsservctl
7396 case TARGET_NR_nfsservctl
:
7399 case TARGET_NR_prctl
:
7401 case PR_GET_PDEATHSIG
:
7404 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7405 if (!is_error(ret
) && arg2
7406 && put_user_ual(deathsig
, arg2
)) {
7414 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
7418 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7420 unlock_user(name
, arg2
, 16);
7425 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
7429 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
7431 unlock_user(name
, arg2
, 0);
7436 /* Most prctl options have no pointer arguments */
7437 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7441 #ifdef TARGET_NR_arch_prctl
7442 case TARGET_NR_arch_prctl
:
7443 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7444 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7450 #ifdef TARGET_NR_pread64
7451 case TARGET_NR_pread64
:
7452 if (regpairs_aligned(cpu_env
)) {
7456 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7458 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7459 unlock_user(p
, arg2
, ret
);
7461 case TARGET_NR_pwrite64
:
7462 if (regpairs_aligned(cpu_env
)) {
7466 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7468 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7469 unlock_user(p
, arg2
, 0);
7472 case TARGET_NR_getcwd
:
7473 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7475 ret
= get_errno(sys_getcwd1(p
, arg2
));
7476 unlock_user(p
, arg1
, ret
);
7478 case TARGET_NR_capget
:
7480 case TARGET_NR_capset
:
7482 case TARGET_NR_sigaltstack
:
7483 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7484 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7485 defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
7486 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
7491 case TARGET_NR_sendfile
:
7493 #ifdef TARGET_NR_getpmsg
7494 case TARGET_NR_getpmsg
:
7497 #ifdef TARGET_NR_putpmsg
7498 case TARGET_NR_putpmsg
:
7501 #ifdef TARGET_NR_vfork
7502 case TARGET_NR_vfork
:
7503 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7507 #ifdef TARGET_NR_ugetrlimit
7508 case TARGET_NR_ugetrlimit
:
7511 int resource
= target_to_host_resource(arg1
);
7512 ret
= get_errno(getrlimit(resource
, &rlim
));
7513 if (!is_error(ret
)) {
7514 struct target_rlimit
*target_rlim
;
7515 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7517 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7518 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7519 unlock_user_struct(target_rlim
, arg2
, 1);
7524 #ifdef TARGET_NR_truncate64
7525 case TARGET_NR_truncate64
:
7526 if (!(p
= lock_user_string(arg1
)))
7528 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7529 unlock_user(p
, arg1
, 0);
7532 #ifdef TARGET_NR_ftruncate64
7533 case TARGET_NR_ftruncate64
:
7534 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7537 #ifdef TARGET_NR_stat64
7538 case TARGET_NR_stat64
:
7539 if (!(p
= lock_user_string(arg1
)))
7541 ret
= get_errno(stat(path(p
), &st
));
7542 unlock_user(p
, arg1
, 0);
7544 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7547 #ifdef TARGET_NR_lstat64
7548 case TARGET_NR_lstat64
:
7549 if (!(p
= lock_user_string(arg1
)))
7551 ret
= get_errno(lstat(path(p
), &st
));
7552 unlock_user(p
, arg1
, 0);
7554 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7557 #ifdef TARGET_NR_fstat64
7558 case TARGET_NR_fstat64
:
7559 ret
= get_errno(fstat(arg1
, &st
));
7561 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7564 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7565 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7566 #ifdef TARGET_NR_fstatat64
7567 case TARGET_NR_fstatat64
:
7569 #ifdef TARGET_NR_newfstatat
7570 case TARGET_NR_newfstatat
:
7572 if (!(p
= lock_user_string(arg2
)))
7574 #ifdef __NR_fstatat64
7575 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7577 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7580 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7583 case TARGET_NR_lchown
:
7584 if (!(p
= lock_user_string(arg1
)))
7586 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7587 unlock_user(p
, arg1
, 0);
7589 #ifdef TARGET_NR_getuid
7590 case TARGET_NR_getuid
:
7591 ret
= get_errno(high2lowuid(getuid()));
7594 #ifdef TARGET_NR_getgid
7595 case TARGET_NR_getgid
:
7596 ret
= get_errno(high2lowgid(getgid()));
7599 #ifdef TARGET_NR_geteuid
7600 case TARGET_NR_geteuid
:
7601 ret
= get_errno(high2lowuid(geteuid()));
7604 #ifdef TARGET_NR_getegid
7605 case TARGET_NR_getegid
:
7606 ret
= get_errno(high2lowgid(getegid()));
7609 case TARGET_NR_setreuid
:
7610 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7612 case TARGET_NR_setregid
:
7613 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7615 case TARGET_NR_getgroups
:
7617 int gidsetsize
= arg1
;
7618 target_id
*target_grouplist
;
7622 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7623 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7624 if (gidsetsize
== 0)
7626 if (!is_error(ret
)) {
7627 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7628 if (!target_grouplist
)
7630 for(i
= 0;i
< ret
; i
++)
7631 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7632 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7636 case TARGET_NR_setgroups
:
7638 int gidsetsize
= arg1
;
7639 target_id
*target_grouplist
;
7643 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7644 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7645 if (!target_grouplist
) {
7646 ret
= -TARGET_EFAULT
;
7649 for(i
= 0;i
< gidsetsize
; i
++)
7650 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7651 unlock_user(target_grouplist
, arg2
, 0);
7652 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7655 case TARGET_NR_fchown
:
7656 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7658 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7659 case TARGET_NR_fchownat
:
7660 if (!(p
= lock_user_string(arg2
)))
7662 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7663 unlock_user(p
, arg2
, 0);
7666 #ifdef TARGET_NR_setresuid
7667 case TARGET_NR_setresuid
:
7668 ret
= get_errno(setresuid(low2highuid(arg1
),
7670 low2highuid(arg3
)));
7673 #ifdef TARGET_NR_getresuid
7674 case TARGET_NR_getresuid
:
7676 uid_t ruid
, euid
, suid
;
7677 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7678 if (!is_error(ret
)) {
7679 if (put_user_u16(high2lowuid(ruid
), arg1
)
7680 || put_user_u16(high2lowuid(euid
), arg2
)
7681 || put_user_u16(high2lowuid(suid
), arg3
))
7687 #ifdef TARGET_NR_getresgid
7688 case TARGET_NR_setresgid
:
7689 ret
= get_errno(setresgid(low2highgid(arg1
),
7691 low2highgid(arg3
)));
7694 #ifdef TARGET_NR_getresgid
7695 case TARGET_NR_getresgid
:
7697 gid_t rgid
, egid
, sgid
;
7698 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7699 if (!is_error(ret
)) {
7700 if (put_user_u16(high2lowgid(rgid
), arg1
)
7701 || put_user_u16(high2lowgid(egid
), arg2
)
7702 || put_user_u16(high2lowgid(sgid
), arg3
))
7708 case TARGET_NR_chown
:
7709 if (!(p
= lock_user_string(arg1
)))
7711 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7712 unlock_user(p
, arg1
, 0);
7714 case TARGET_NR_setuid
:
7715 ret
= get_errno(setuid(low2highuid(arg1
)));
7717 case TARGET_NR_setgid
:
7718 ret
= get_errno(setgid(low2highgid(arg1
)));
7720 case TARGET_NR_setfsuid
:
7721 ret
= get_errno(setfsuid(arg1
));
7723 case TARGET_NR_setfsgid
:
7724 ret
= get_errno(setfsgid(arg1
));
7727 #ifdef TARGET_NR_lchown32
7728 case TARGET_NR_lchown32
:
7729 if (!(p
= lock_user_string(arg1
)))
7731 ret
= get_errno(lchown(p
, arg2
, arg3
));
7732 unlock_user(p
, arg1
, 0);
7735 #ifdef TARGET_NR_getuid32
7736 case TARGET_NR_getuid32
:
7737 ret
= get_errno(getuid());
7741 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7742 /* Alpha specific */
7743 case TARGET_NR_getxuid
:
7747 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7749 ret
= get_errno(getuid());
7752 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7753 /* Alpha specific */
7754 case TARGET_NR_getxgid
:
7758 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7760 ret
= get_errno(getgid());
7763 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7764 /* Alpha specific */
7765 case TARGET_NR_osf_getsysinfo
:
7766 ret
= -TARGET_EOPNOTSUPP
;
7768 case TARGET_GSI_IEEE_FP_CONTROL
:
7770 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7772 /* Copied from linux ieee_fpcr_to_swcr. */
7773 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7774 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7775 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7776 | SWCR_TRAP_ENABLE_DZE
7777 | SWCR_TRAP_ENABLE_OVF
);
7778 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7779 | SWCR_TRAP_ENABLE_INE
);
7780 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7781 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7783 if (put_user_u64 (swcr
, arg2
))
7789 /* case GSI_IEEE_STATE_AT_SIGNAL:
7790 -- Not implemented in linux kernel.
7792 -- Retrieves current unaligned access state; not much used.
7794 -- Retrieves implver information; surely not used.
7796 -- Grabs a copy of the HWRPB; surely not used.
7801 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7802 /* Alpha specific */
7803 case TARGET_NR_osf_setsysinfo
:
7804 ret
= -TARGET_EOPNOTSUPP
;
7806 case TARGET_SSI_IEEE_FP_CONTROL
:
7808 uint64_t swcr
, fpcr
, orig_fpcr
;
7810 if (get_user_u64 (swcr
, arg2
)) {
7813 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7814 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7816 /* Copied from linux ieee_swcr_to_fpcr. */
7817 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7818 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7819 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7820 | SWCR_TRAP_ENABLE_DZE
7821 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7822 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7823 | SWCR_TRAP_ENABLE_INE
)) << 57;
7824 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7825 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7827 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7832 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7834 uint64_t exc
, fpcr
, orig_fpcr
;
7837 if (get_user_u64(exc
, arg2
)) {
7841 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
7843 /* We only add to the exception status here. */
7844 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
7846 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
7849 /* Old exceptions are not signaled. */
7850 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7852 /* If any exceptions set by this call,
7853 and are unmasked, send a signal. */
7855 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
7856 si_code
= TARGET_FPE_FLTRES
;
7858 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
7859 si_code
= TARGET_FPE_FLTUND
;
7861 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
7862 si_code
= TARGET_FPE_FLTOVF
;
7864 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
7865 si_code
= TARGET_FPE_FLTDIV
;
7867 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
7868 si_code
= TARGET_FPE_FLTINV
;
7871 target_siginfo_t info
;
7872 info
.si_signo
= SIGFPE
;
7874 info
.si_code
= si_code
;
7875 info
._sifields
._sigfault
._addr
7876 = ((CPUArchState
*)cpu_env
)->pc
;
7877 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
7882 /* case SSI_NVPAIRS:
7883 -- Used with SSIN_UACPROC to enable unaligned accesses.
7884 case SSI_IEEE_STATE_AT_SIGNAL:
7885 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7886 -- Not implemented in linux kernel
7891 #ifdef TARGET_NR_osf_sigprocmask
7892 /* Alpha specific. */
7893 case TARGET_NR_osf_sigprocmask
:
7897 sigset_t set
, oldset
;
7900 case TARGET_SIG_BLOCK
:
7903 case TARGET_SIG_UNBLOCK
:
7906 case TARGET_SIG_SETMASK
:
7910 ret
= -TARGET_EINVAL
;
7914 target_to_host_old_sigset(&set
, &mask
);
7915 sigprocmask(how
, &set
, &oldset
);
7916 host_to_target_old_sigset(&mask
, &oldset
);
7922 #ifdef TARGET_NR_getgid32
7923 case TARGET_NR_getgid32
:
7924 ret
= get_errno(getgid());
7927 #ifdef TARGET_NR_geteuid32
7928 case TARGET_NR_geteuid32
:
7929 ret
= get_errno(geteuid());
7932 #ifdef TARGET_NR_getegid32
7933 case TARGET_NR_getegid32
:
7934 ret
= get_errno(getegid());
7937 #ifdef TARGET_NR_setreuid32
7938 case TARGET_NR_setreuid32
:
7939 ret
= get_errno(setreuid(arg1
, arg2
));
7942 #ifdef TARGET_NR_setregid32
7943 case TARGET_NR_setregid32
:
7944 ret
= get_errno(setregid(arg1
, arg2
));
7947 #ifdef TARGET_NR_getgroups32
7948 case TARGET_NR_getgroups32
:
7950 int gidsetsize
= arg1
;
7951 uint32_t *target_grouplist
;
7955 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7956 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7957 if (gidsetsize
== 0)
7959 if (!is_error(ret
)) {
7960 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7961 if (!target_grouplist
) {
7962 ret
= -TARGET_EFAULT
;
7965 for(i
= 0;i
< ret
; i
++)
7966 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7967 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7972 #ifdef TARGET_NR_setgroups32
7973 case TARGET_NR_setgroups32
:
7975 int gidsetsize
= arg1
;
7976 uint32_t *target_grouplist
;
7980 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7981 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7982 if (!target_grouplist
) {
7983 ret
= -TARGET_EFAULT
;
7986 for(i
= 0;i
< gidsetsize
; i
++)
7987 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7988 unlock_user(target_grouplist
, arg2
, 0);
7989 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7993 #ifdef TARGET_NR_fchown32
7994 case TARGET_NR_fchown32
:
7995 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7998 #ifdef TARGET_NR_setresuid32
7999 case TARGET_NR_setresuid32
:
8000 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
8003 #ifdef TARGET_NR_getresuid32
8004 case TARGET_NR_getresuid32
:
8006 uid_t ruid
, euid
, suid
;
8007 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
8008 if (!is_error(ret
)) {
8009 if (put_user_u32(ruid
, arg1
)
8010 || put_user_u32(euid
, arg2
)
8011 || put_user_u32(suid
, arg3
))
8017 #ifdef TARGET_NR_setresgid32
8018 case TARGET_NR_setresgid32
:
8019 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
8022 #ifdef TARGET_NR_getresgid32
8023 case TARGET_NR_getresgid32
:
8025 gid_t rgid
, egid
, sgid
;
8026 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
8027 if (!is_error(ret
)) {
8028 if (put_user_u32(rgid
, arg1
)
8029 || put_user_u32(egid
, arg2
)
8030 || put_user_u32(sgid
, arg3
))
8036 #ifdef TARGET_NR_chown32
8037 case TARGET_NR_chown32
:
8038 if (!(p
= lock_user_string(arg1
)))
8040 ret
= get_errno(chown(p
, arg2
, arg3
));
8041 unlock_user(p
, arg1
, 0);
8044 #ifdef TARGET_NR_setuid32
8045 case TARGET_NR_setuid32
:
8046 ret
= get_errno(setuid(arg1
));
8049 #ifdef TARGET_NR_setgid32
8050 case TARGET_NR_setgid32
:
8051 ret
= get_errno(setgid(arg1
));
8054 #ifdef TARGET_NR_setfsuid32
8055 case TARGET_NR_setfsuid32
:
8056 ret
= get_errno(setfsuid(arg1
));
8059 #ifdef TARGET_NR_setfsgid32
8060 case TARGET_NR_setfsgid32
:
8061 ret
= get_errno(setfsgid(arg1
));
8065 case TARGET_NR_pivot_root
:
8067 #ifdef TARGET_NR_mincore
8068 case TARGET_NR_mincore
:
8071 ret
= -TARGET_EFAULT
;
8072 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
8074 if (!(p
= lock_user_string(arg3
)))
8076 ret
= get_errno(mincore(a
, arg2
, p
));
8077 unlock_user(p
, arg3
, ret
);
8079 unlock_user(a
, arg1
, 0);
8083 #ifdef TARGET_NR_arm_fadvise64_64
8084 case TARGET_NR_arm_fadvise64_64
:
8087 * arm_fadvise64_64 looks like fadvise64_64 but
8088 * with different argument order
8096 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
8097 #ifdef TARGET_NR_fadvise64_64
8098 case TARGET_NR_fadvise64_64
:
8100 #ifdef TARGET_NR_fadvise64
8101 case TARGET_NR_fadvise64
:
8105 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
8106 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
8107 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
8108 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
8112 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
8115 #ifdef TARGET_NR_madvise
8116 case TARGET_NR_madvise
:
8117 /* A straight passthrough may not be safe because qemu sometimes
8118 turns private flie-backed mappings into anonymous mappings.
8119 This will break MADV_DONTNEED.
8120 This is a hint, so ignoring and returning success is ok. */
8124 #if TARGET_ABI_BITS == 32
8125 case TARGET_NR_fcntl64
:
8129 struct target_flock64
*target_fl
;
8131 struct target_eabi_flock64
*target_efl
;
8134 cmd
= target_to_host_fcntl_cmd(arg2
);
8135 if (cmd
== -TARGET_EINVAL
) {
8141 case TARGET_F_GETLK64
:
8143 if (((CPUARMState
*)cpu_env
)->eabi
) {
8144 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8146 fl
.l_type
= tswap16(target_efl
->l_type
);
8147 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8148 fl
.l_start
= tswap64(target_efl
->l_start
);
8149 fl
.l_len
= tswap64(target_efl
->l_len
);
8150 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8151 unlock_user_struct(target_efl
, arg3
, 0);
8155 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8157 fl
.l_type
= tswap16(target_fl
->l_type
);
8158 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8159 fl
.l_start
= tswap64(target_fl
->l_start
);
8160 fl
.l_len
= tswap64(target_fl
->l_len
);
8161 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8162 unlock_user_struct(target_fl
, arg3
, 0);
8164 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8167 if (((CPUARMState
*)cpu_env
)->eabi
) {
8168 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
8170 target_efl
->l_type
= tswap16(fl
.l_type
);
8171 target_efl
->l_whence
= tswap16(fl
.l_whence
);
8172 target_efl
->l_start
= tswap64(fl
.l_start
);
8173 target_efl
->l_len
= tswap64(fl
.l_len
);
8174 target_efl
->l_pid
= tswap32(fl
.l_pid
);
8175 unlock_user_struct(target_efl
, arg3
, 1);
8179 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
8181 target_fl
->l_type
= tswap16(fl
.l_type
);
8182 target_fl
->l_whence
= tswap16(fl
.l_whence
);
8183 target_fl
->l_start
= tswap64(fl
.l_start
);
8184 target_fl
->l_len
= tswap64(fl
.l_len
);
8185 target_fl
->l_pid
= tswap32(fl
.l_pid
);
8186 unlock_user_struct(target_fl
, arg3
, 1);
8191 case TARGET_F_SETLK64
:
8192 case TARGET_F_SETLKW64
:
8194 if (((CPUARMState
*)cpu_env
)->eabi
) {
8195 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
8197 fl
.l_type
= tswap16(target_efl
->l_type
);
8198 fl
.l_whence
= tswap16(target_efl
->l_whence
);
8199 fl
.l_start
= tswap64(target_efl
->l_start
);
8200 fl
.l_len
= tswap64(target_efl
->l_len
);
8201 fl
.l_pid
= tswap32(target_efl
->l_pid
);
8202 unlock_user_struct(target_efl
, arg3
, 0);
8206 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
8208 fl
.l_type
= tswap16(target_fl
->l_type
);
8209 fl
.l_whence
= tswap16(target_fl
->l_whence
);
8210 fl
.l_start
= tswap64(target_fl
->l_start
);
8211 fl
.l_len
= tswap64(target_fl
->l_len
);
8212 fl
.l_pid
= tswap32(target_fl
->l_pid
);
8213 unlock_user_struct(target_fl
, arg3
, 0);
8215 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
8218 ret
= do_fcntl(arg1
, arg2
, arg3
);
8224 #ifdef TARGET_NR_cacheflush
8225 case TARGET_NR_cacheflush
:
8226 /* self-modifying code is handled automatically, so nothing needed */
8230 #ifdef TARGET_NR_security
8231 case TARGET_NR_security
:
8234 #ifdef TARGET_NR_getpagesize
8235 case TARGET_NR_getpagesize
:
8236 ret
= TARGET_PAGE_SIZE
;
8239 case TARGET_NR_gettid
:
8240 ret
= get_errno(gettid());
8242 #ifdef TARGET_NR_readahead
8243 case TARGET_NR_readahead
:
8244 #if TARGET_ABI_BITS == 32
8245 if (regpairs_aligned(cpu_env
)) {
8250 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
8252 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
8257 #ifdef TARGET_NR_setxattr
8258 case TARGET_NR_listxattr
:
8259 case TARGET_NR_llistxattr
:
8263 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8265 ret
= -TARGET_EFAULT
;
8269 p
= lock_user_string(arg1
);
8271 if (num
== TARGET_NR_listxattr
) {
8272 ret
= get_errno(listxattr(p
, b
, arg3
));
8274 ret
= get_errno(llistxattr(p
, b
, arg3
));
8277 ret
= -TARGET_EFAULT
;
8279 unlock_user(p
, arg1
, 0);
8280 unlock_user(b
, arg2
, arg3
);
8283 case TARGET_NR_flistxattr
:
8287 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8289 ret
= -TARGET_EFAULT
;
8293 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
8294 unlock_user(b
, arg2
, arg3
);
8297 case TARGET_NR_setxattr
:
8298 case TARGET_NR_lsetxattr
:
8300 void *p
, *n
, *v
= 0;
8302 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8304 ret
= -TARGET_EFAULT
;
8308 p
= lock_user_string(arg1
);
8309 n
= lock_user_string(arg2
);
8311 if (num
== TARGET_NR_setxattr
) {
8312 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
8314 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
8317 ret
= -TARGET_EFAULT
;
8319 unlock_user(p
, arg1
, 0);
8320 unlock_user(n
, arg2
, 0);
8321 unlock_user(v
, arg3
, 0);
8324 case TARGET_NR_fsetxattr
:
8328 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
8330 ret
= -TARGET_EFAULT
;
8334 n
= lock_user_string(arg2
);
8336 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
8338 ret
= -TARGET_EFAULT
;
8340 unlock_user(n
, arg2
, 0);
8341 unlock_user(v
, arg3
, 0);
8344 case TARGET_NR_getxattr
:
8345 case TARGET_NR_lgetxattr
:
8347 void *p
, *n
, *v
= 0;
8349 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8351 ret
= -TARGET_EFAULT
;
8355 p
= lock_user_string(arg1
);
8356 n
= lock_user_string(arg2
);
8358 if (num
== TARGET_NR_getxattr
) {
8359 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
8361 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
8364 ret
= -TARGET_EFAULT
;
8366 unlock_user(p
, arg1
, 0);
8367 unlock_user(n
, arg2
, 0);
8368 unlock_user(v
, arg3
, arg4
);
8371 case TARGET_NR_fgetxattr
:
8375 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8377 ret
= -TARGET_EFAULT
;
8381 n
= lock_user_string(arg2
);
8383 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
8385 ret
= -TARGET_EFAULT
;
8387 unlock_user(n
, arg2
, 0);
8388 unlock_user(v
, arg3
, arg4
);
8391 case TARGET_NR_removexattr
:
8392 case TARGET_NR_lremovexattr
:
8395 p
= lock_user_string(arg1
);
8396 n
= lock_user_string(arg2
);
8398 if (num
== TARGET_NR_removexattr
) {
8399 ret
= get_errno(removexattr(p
, n
));
8401 ret
= get_errno(lremovexattr(p
, n
));
8404 ret
= -TARGET_EFAULT
;
8406 unlock_user(p
, arg1
, 0);
8407 unlock_user(n
, arg2
, 0);
8410 case TARGET_NR_fremovexattr
:
8413 n
= lock_user_string(arg2
);
8415 ret
= get_errno(fremovexattr(arg1
, n
));
8417 ret
= -TARGET_EFAULT
;
8419 unlock_user(n
, arg2
, 0);
8423 #endif /* CONFIG_ATTR */
8424 #ifdef TARGET_NR_set_thread_area
8425 case TARGET_NR_set_thread_area
:
8426 #if defined(TARGET_MIPS)
8427 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
8430 #elif defined(TARGET_CRIS)
8432 ret
= -TARGET_EINVAL
;
8434 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
8438 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
8439 ret
= do_set_thread_area(cpu_env
, arg1
);
8442 goto unimplemented_nowarn
;
8445 #ifdef TARGET_NR_get_thread_area
8446 case TARGET_NR_get_thread_area
:
8447 #if defined(TARGET_I386) && defined(TARGET_ABI32)
8448 ret
= do_get_thread_area(cpu_env
, arg1
);
8450 goto unimplemented_nowarn
;
8453 #ifdef TARGET_NR_getdomainname
8454 case TARGET_NR_getdomainname
:
8455 goto unimplemented_nowarn
;
8458 #ifdef TARGET_NR_clock_gettime
8459 case TARGET_NR_clock_gettime
:
8462 ret
= get_errno(clock_gettime(arg1
, &ts
));
8463 if (!is_error(ret
)) {
8464 host_to_target_timespec(arg2
, &ts
);
8469 #ifdef TARGET_NR_clock_getres
8470 case TARGET_NR_clock_getres
:
8473 ret
= get_errno(clock_getres(arg1
, &ts
));
8474 if (!is_error(ret
)) {
8475 host_to_target_timespec(arg2
, &ts
);
8480 #ifdef TARGET_NR_clock_nanosleep
8481 case TARGET_NR_clock_nanosleep
:
8484 target_to_host_timespec(&ts
, arg3
);
8485 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8487 host_to_target_timespec(arg4
, &ts
);
8492 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8493 case TARGET_NR_set_tid_address
:
8494 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8498 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8499 case TARGET_NR_tkill
:
8500 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8504 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8505 case TARGET_NR_tgkill
:
8506 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8507 target_to_host_signal(arg3
)));
8511 #ifdef TARGET_NR_set_robust_list
8512 case TARGET_NR_set_robust_list
:
8513 goto unimplemented_nowarn
;
8516 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8517 case TARGET_NR_utimensat
:
8519 struct timespec
*tsp
, ts
[2];
8523 target_to_host_timespec(ts
, arg3
);
8524 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8528 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8530 if (!(p
= lock_user_string(arg2
))) {
8531 ret
= -TARGET_EFAULT
;
8534 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8535 unlock_user(p
, arg2
, 0);
8540 #if defined(CONFIG_USE_NPTL)
8541 case TARGET_NR_futex
:
8542 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8545 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8546 case TARGET_NR_inotify_init
:
8547 ret
= get_errno(sys_inotify_init());
8550 #ifdef CONFIG_INOTIFY1
8551 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8552 case TARGET_NR_inotify_init1
:
8553 ret
= get_errno(sys_inotify_init1(arg1
));
8557 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8558 case TARGET_NR_inotify_add_watch
:
8559 p
= lock_user_string(arg2
);
8560 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8561 unlock_user(p
, arg2
, 0);
8564 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8565 case TARGET_NR_inotify_rm_watch
:
8566 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8570 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8571 case TARGET_NR_mq_open
:
8573 struct mq_attr posix_mq_attr
;
8575 p
= lock_user_string(arg1
- 1);
8577 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8578 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8579 unlock_user (p
, arg1
, 0);
8583 case TARGET_NR_mq_unlink
:
8584 p
= lock_user_string(arg1
- 1);
8585 ret
= get_errno(mq_unlink(p
));
8586 unlock_user (p
, arg1
, 0);
8589 case TARGET_NR_mq_timedsend
:
8593 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8595 target_to_host_timespec(&ts
, arg5
);
8596 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8597 host_to_target_timespec(arg5
, &ts
);
8600 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8601 unlock_user (p
, arg2
, arg3
);
8605 case TARGET_NR_mq_timedreceive
:
8610 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8612 target_to_host_timespec(&ts
, arg5
);
8613 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8614 host_to_target_timespec(arg5
, &ts
);
8617 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8618 unlock_user (p
, arg2
, arg3
);
8620 put_user_u32(prio
, arg4
);
8624 /* Not implemented for now... */
8625 /* case TARGET_NR_mq_notify: */
8628 case TARGET_NR_mq_getsetattr
:
8630 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8633 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8634 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8637 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8638 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8645 #ifdef CONFIG_SPLICE
8646 #ifdef TARGET_NR_tee
8649 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8653 #ifdef TARGET_NR_splice
8654 case TARGET_NR_splice
:
8656 loff_t loff_in
, loff_out
;
8657 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8659 get_user_u64(loff_in
, arg2
);
8660 ploff_in
= &loff_in
;
8663 get_user_u64(loff_out
, arg2
);
8664 ploff_out
= &loff_out
;
8666 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8670 #ifdef TARGET_NR_vmsplice
8671 case TARGET_NR_vmsplice
:
8673 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
8675 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
8676 unlock_iovec(vec
, arg2
, arg3
, 0);
8678 ret
= -host_to_target_errno(errno
);
8683 #endif /* CONFIG_SPLICE */
8684 #ifdef CONFIG_EVENTFD
8685 #if defined(TARGET_NR_eventfd)
8686 case TARGET_NR_eventfd
:
8687 ret
= get_errno(eventfd(arg1
, 0));
8690 #if defined(TARGET_NR_eventfd2)
8691 case TARGET_NR_eventfd2
:
8692 ret
= get_errno(eventfd(arg1
, arg2
));
8695 #endif /* CONFIG_EVENTFD */
8696 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8697 case TARGET_NR_fallocate
:
8698 #if TARGET_ABI_BITS == 32
8699 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
8700 target_offset64(arg5
, arg6
)));
8702 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8706 #if defined(CONFIG_SYNC_FILE_RANGE)
8707 #if defined(TARGET_NR_sync_file_range)
8708 case TARGET_NR_sync_file_range
:
8709 #if TARGET_ABI_BITS == 32
8710 #if defined(TARGET_MIPS)
8711 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8712 target_offset64(arg5
, arg6
), arg7
));
8714 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8715 target_offset64(arg4
, arg5
), arg6
));
8716 #endif /* !TARGET_MIPS */
8718 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8722 #if defined(TARGET_NR_sync_file_range2)
8723 case TARGET_NR_sync_file_range2
:
8724 /* This is like sync_file_range but the arguments are reordered */
8725 #if TARGET_ABI_BITS == 32
8726 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8727 target_offset64(arg5
, arg6
), arg2
));
8729 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8734 #if defined(CONFIG_EPOLL)
8735 #if defined(TARGET_NR_epoll_create)
8736 case TARGET_NR_epoll_create
:
8737 ret
= get_errno(epoll_create(arg1
));
8740 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8741 case TARGET_NR_epoll_create1
:
8742 ret
= get_errno(epoll_create1(arg1
));
8745 #if defined(TARGET_NR_epoll_ctl)
8746 case TARGET_NR_epoll_ctl
:
8748 struct epoll_event ep
;
8749 struct epoll_event
*epp
= 0;
8751 struct target_epoll_event
*target_ep
;
8752 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8755 ep
.events
= tswap32(target_ep
->events
);
8756 /* The epoll_data_t union is just opaque data to the kernel,
8757 * so we transfer all 64 bits across and need not worry what
8758 * actual data type it is.
8760 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8761 unlock_user_struct(target_ep
, arg4
, 0);
8764 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8769 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8770 #define IMPLEMENT_EPOLL_PWAIT
8772 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8773 #if defined(TARGET_NR_epoll_wait)
8774 case TARGET_NR_epoll_wait
:
8776 #if defined(IMPLEMENT_EPOLL_PWAIT)
8777 case TARGET_NR_epoll_pwait
:
8780 struct target_epoll_event
*target_ep
;
8781 struct epoll_event
*ep
;
8783 int maxevents
= arg3
;
8786 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8787 maxevents
* sizeof(struct target_epoll_event
), 1);
8792 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8795 #if defined(IMPLEMENT_EPOLL_PWAIT)
8796 case TARGET_NR_epoll_pwait
:
8798 target_sigset_t
*target_set
;
8799 sigset_t _set
, *set
= &_set
;
8802 target_set
= lock_user(VERIFY_READ
, arg5
,
8803 sizeof(target_sigset_t
), 1);
8805 unlock_user(target_ep
, arg2
, 0);
8808 target_to_host_sigset(set
, target_set
);
8809 unlock_user(target_set
, arg5
, 0);
8814 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8818 #if defined(TARGET_NR_epoll_wait)
8819 case TARGET_NR_epoll_wait
:
8820 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8824 ret
= -TARGET_ENOSYS
;
8826 if (!is_error(ret
)) {
8828 for (i
= 0; i
< ret
; i
++) {
8829 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8830 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8833 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8838 #ifdef TARGET_NR_prlimit64
8839 case TARGET_NR_prlimit64
:
8841 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8842 struct target_rlimit64
*target_rnew
, *target_rold
;
8843 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8845 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8848 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8849 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8850 unlock_user_struct(target_rnew
, arg3
, 0);
8854 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8855 if (!is_error(ret
) && arg4
) {
8856 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8859 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8860 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8861 unlock_user_struct(target_rold
, arg4
, 1);
8866 #ifdef TARGET_NR_gethostname
8867 case TARGET_NR_gethostname
:
8869 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
8871 ret
= get_errno(gethostname(name
, arg2
));
8872 unlock_user(name
, arg1
, arg2
);
8874 ret
= -TARGET_EFAULT
;
8881 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8882 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8883 unimplemented_nowarn
:
8885 ret
= -TARGET_ENOSYS
;
8890 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8893 print_syscall_ret(num
, ret
);
8896 ret
= -TARGET_EFAULT
;