4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include <attr/xattr.h>
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 #define _syscall0(type,name) \
127 static type name (void) \
129 return syscall(__NR_##name); \
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
135 return syscall(__NR_##name, arg1); \
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
141 return syscall(__NR_##name, arg1, arg2); \
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 #define __NR__llseek __NR_lseek
208 _syscall0(int, gettid
)
210 /* This is a replacement for the host gettid() and must return a host
212 static int gettid(void) {
216 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
220 _syscall2(int, sys_getpriority
, int, which
, int, who
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group
,int,error_code
)
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address
,int *,tidptr
)
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
242 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
250 unsigned long *, user_mask_ptr
);
252 static bitmask_transtbl fcntl_flags_tbl
[] = {
253 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
254 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
255 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
256 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
257 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
258 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
259 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
260 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
261 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
262 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
263 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
264 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
265 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
272 #define COPY_UTSNAME_FIELD(dest, src) \
274 /* __NEW_UTS_LEN doesn't include terminating null */ \
275 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
276 (dest)[__NEW_UTS_LEN] = '\0'; \
279 static int sys_uname(struct new_utsname
*buf
)
281 struct utsname uts_buf
;
283 if (uname(&uts_buf
) < 0)
287 * Just in case these have some differences, we
288 * translate utsname to new_utsname (which is the
289 * struct linux kernel uses).
292 memset(buf
, 0, sizeof(*buf
));
293 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
294 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
295 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
296 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
297 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
299 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
303 #undef COPY_UTSNAME_FIELD
306 static int sys_getcwd1(char *buf
, size_t size
)
308 if (getcwd(buf
, size
) == NULL
) {
309 /* getcwd() sets errno */
312 return strlen(buf
)+1;
317 * Host system seems to have atfile syscall stubs available. We
318 * now enable them one by one as specified by target syscall_nr.h.
321 #ifdef TARGET_NR_faccessat
322 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
324 return (faccessat(dirfd
, pathname
, mode
, 0));
327 #ifdef TARGET_NR_fchmodat
328 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
330 return (fchmodat(dirfd
, pathname
, mode
, 0));
333 #if defined(TARGET_NR_fchownat)
334 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
335 gid_t group
, int flags
)
337 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
340 #ifdef __NR_fstatat64
341 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
344 return (fstatat(dirfd
, pathname
, buf
, flags
));
347 #ifdef __NR_newfstatat
348 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
351 return (fstatat(dirfd
, pathname
, buf
, flags
));
354 #ifdef TARGET_NR_futimesat
355 static int sys_futimesat(int dirfd
, const char *pathname
,
356 const struct timeval times
[2])
358 return (futimesat(dirfd
, pathname
, times
));
361 #ifdef TARGET_NR_linkat
362 static int sys_linkat(int olddirfd
, const char *oldpath
,
363 int newdirfd
, const char *newpath
, int flags
)
365 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
368 #ifdef TARGET_NR_mkdirat
369 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
371 return (mkdirat(dirfd
, pathname
, mode
));
374 #ifdef TARGET_NR_mknodat
375 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
378 return (mknodat(dirfd
, pathname
, mode
, dev
));
381 #ifdef TARGET_NR_openat
382 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
385 * open(2) has extra parameter 'mode' when called with
388 if ((flags
& O_CREAT
) != 0) {
393 * Get the 'mode' parameter and translate it to
397 mode
= va_arg(ap
, mode_t
);
398 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
401 return (openat(dirfd
, pathname
, flags
, mode
));
403 return (openat(dirfd
, pathname
, flags
));
406 #ifdef TARGET_NR_readlinkat
407 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
409 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
412 #ifdef TARGET_NR_renameat
413 static int sys_renameat(int olddirfd
, const char *oldpath
,
414 int newdirfd
, const char *newpath
)
416 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
419 #ifdef TARGET_NR_symlinkat
420 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
422 return (symlinkat(oldpath
, newdirfd
, newpath
));
425 #ifdef TARGET_NR_unlinkat
426 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
428 return (unlinkat(dirfd
, pathname
, flags
));
431 #else /* !CONFIG_ATFILE */
434 * Try direct syscalls instead
436 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
437 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
439 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
440 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
442 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
443 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
444 uid_t
,owner
,gid_t
,group
,int,flags
)
446 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
447 defined(__NR_fstatat64)
448 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
449 struct stat
*,buf
,int,flags
)
451 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
452 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
453 const struct timeval
*,times
)
455 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
456 defined(__NR_newfstatat)
457 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
458 struct stat
*,buf
,int,flags
)
460 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
461 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
462 int,newdirfd
,const char *,newpath
,int,flags
)
464 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
465 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
467 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
468 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
469 mode_t
,mode
,dev_t
,dev
)
471 #if defined(TARGET_NR_openat) && defined(__NR_openat)
472 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
474 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
475 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
476 char *,buf
,size_t,bufsize
)
478 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
479 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
480 int,newdirfd
,const char *,newpath
)
482 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
483 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
484 int,newdirfd
,const char *,newpath
)
486 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
487 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
490 #endif /* CONFIG_ATFILE */
492 #ifdef CONFIG_UTIMENSAT
493 static int sys_utimensat(int dirfd
, const char *pathname
,
494 const struct timespec times
[2], int flags
)
496 if (pathname
== NULL
)
497 return futimens(dirfd
, times
);
499 return utimensat(dirfd
, pathname
, times
, flags
);
502 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
503 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
504 const struct timespec
*,tsp
,int,flags
)
506 #endif /* CONFIG_UTIMENSAT */
508 #ifdef CONFIG_INOTIFY
509 #include <sys/inotify.h>
511 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
512 static int sys_inotify_init(void)
514 return (inotify_init());
517 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
518 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
520 return (inotify_add_watch(fd
, pathname
, mask
));
523 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
524 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
526 return (inotify_rm_watch(fd
, wd
));
529 #ifdef CONFIG_INOTIFY1
530 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
531 static int sys_inotify_init1(int flags
)
533 return (inotify_init1(flags
));
538 /* Userspace can usually survive runtime without inotify */
539 #undef TARGET_NR_inotify_init
540 #undef TARGET_NR_inotify_init1
541 #undef TARGET_NR_inotify_add_watch
542 #undef TARGET_NR_inotify_rm_watch
543 #endif /* CONFIG_INOTIFY */
545 #if defined(TARGET_NR_ppoll)
547 # define __NR_ppoll -1
549 #define __NR_sys_ppoll __NR_ppoll
550 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
551 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
555 #if defined(TARGET_NR_pselect6)
556 #ifndef __NR_pselect6
557 # define __NR_pselect6 -1
559 #define __NR_sys_pselect6 __NR_pselect6
560 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
561 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
564 #if defined(TARGET_NR_prlimit64)
565 #ifndef __NR_prlimit64
566 # define __NR_prlimit64 -1
568 #define __NR_sys_prlimit64 __NR_prlimit64
569 /* The glibc rlimit structure may not be that used by the underlying syscall */
570 struct host_rlimit64
{
574 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
575 const struct host_rlimit64
*, new_limit
,
576 struct host_rlimit64
*, old_limit
)
579 extern int personality(int);
580 extern int flock(int, int);
581 extern int setfsuid(int);
582 extern int setfsgid(int);
583 extern int setgroups(int, gid_t
*);
585 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
587 static inline int regpairs_aligned(void *cpu_env
) {
588 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
590 #elif defined(TARGET_MIPS)
591 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
593 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
596 #define ERRNO_TABLE_SIZE 1200
598 /* target_to_host_errno_table[] is initialized from
599 * host_to_target_errno_table[] in syscall_init(). */
600 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
604 * This list is the union of errno values overridden in asm-<arch>/errno.h
605 * minus the errnos that are not actually generic to all archs.
607 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
608 [EIDRM
] = TARGET_EIDRM
,
609 [ECHRNG
] = TARGET_ECHRNG
,
610 [EL2NSYNC
] = TARGET_EL2NSYNC
,
611 [EL3HLT
] = TARGET_EL3HLT
,
612 [EL3RST
] = TARGET_EL3RST
,
613 [ELNRNG
] = TARGET_ELNRNG
,
614 [EUNATCH
] = TARGET_EUNATCH
,
615 [ENOCSI
] = TARGET_ENOCSI
,
616 [EL2HLT
] = TARGET_EL2HLT
,
617 [EDEADLK
] = TARGET_EDEADLK
,
618 [ENOLCK
] = TARGET_ENOLCK
,
619 [EBADE
] = TARGET_EBADE
,
620 [EBADR
] = TARGET_EBADR
,
621 [EXFULL
] = TARGET_EXFULL
,
622 [ENOANO
] = TARGET_ENOANO
,
623 [EBADRQC
] = TARGET_EBADRQC
,
624 [EBADSLT
] = TARGET_EBADSLT
,
625 [EBFONT
] = TARGET_EBFONT
,
626 [ENOSTR
] = TARGET_ENOSTR
,
627 [ENODATA
] = TARGET_ENODATA
,
628 [ETIME
] = TARGET_ETIME
,
629 [ENOSR
] = TARGET_ENOSR
,
630 [ENONET
] = TARGET_ENONET
,
631 [ENOPKG
] = TARGET_ENOPKG
,
632 [EREMOTE
] = TARGET_EREMOTE
,
633 [ENOLINK
] = TARGET_ENOLINK
,
634 [EADV
] = TARGET_EADV
,
635 [ESRMNT
] = TARGET_ESRMNT
,
636 [ECOMM
] = TARGET_ECOMM
,
637 [EPROTO
] = TARGET_EPROTO
,
638 [EDOTDOT
] = TARGET_EDOTDOT
,
639 [EMULTIHOP
] = TARGET_EMULTIHOP
,
640 [EBADMSG
] = TARGET_EBADMSG
,
641 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
642 [EOVERFLOW
] = TARGET_EOVERFLOW
,
643 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
644 [EBADFD
] = TARGET_EBADFD
,
645 [EREMCHG
] = TARGET_EREMCHG
,
646 [ELIBACC
] = TARGET_ELIBACC
,
647 [ELIBBAD
] = TARGET_ELIBBAD
,
648 [ELIBSCN
] = TARGET_ELIBSCN
,
649 [ELIBMAX
] = TARGET_ELIBMAX
,
650 [ELIBEXEC
] = TARGET_ELIBEXEC
,
651 [EILSEQ
] = TARGET_EILSEQ
,
652 [ENOSYS
] = TARGET_ENOSYS
,
653 [ELOOP
] = TARGET_ELOOP
,
654 [ERESTART
] = TARGET_ERESTART
,
655 [ESTRPIPE
] = TARGET_ESTRPIPE
,
656 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
657 [EUSERS
] = TARGET_EUSERS
,
658 [ENOTSOCK
] = TARGET_ENOTSOCK
,
659 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
660 [EMSGSIZE
] = TARGET_EMSGSIZE
,
661 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
662 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
663 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
664 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
665 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
666 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
667 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
668 [EADDRINUSE
] = TARGET_EADDRINUSE
,
669 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
670 [ENETDOWN
] = TARGET_ENETDOWN
,
671 [ENETUNREACH
] = TARGET_ENETUNREACH
,
672 [ENETRESET
] = TARGET_ENETRESET
,
673 [ECONNABORTED
] = TARGET_ECONNABORTED
,
674 [ECONNRESET
] = TARGET_ECONNRESET
,
675 [ENOBUFS
] = TARGET_ENOBUFS
,
676 [EISCONN
] = TARGET_EISCONN
,
677 [ENOTCONN
] = TARGET_ENOTCONN
,
678 [EUCLEAN
] = TARGET_EUCLEAN
,
679 [ENOTNAM
] = TARGET_ENOTNAM
,
680 [ENAVAIL
] = TARGET_ENAVAIL
,
681 [EISNAM
] = TARGET_EISNAM
,
682 [EREMOTEIO
] = TARGET_EREMOTEIO
,
683 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
684 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
685 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
686 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
687 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
688 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
689 [EALREADY
] = TARGET_EALREADY
,
690 [EINPROGRESS
] = TARGET_EINPROGRESS
,
691 [ESTALE
] = TARGET_ESTALE
,
692 [ECANCELED
] = TARGET_ECANCELED
,
693 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
694 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
696 [ENOKEY
] = TARGET_ENOKEY
,
699 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
702 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
705 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
708 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
710 #ifdef ENOTRECOVERABLE
711 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
715 static inline int host_to_target_errno(int err
)
717 if(host_to_target_errno_table
[err
])
718 return host_to_target_errno_table
[err
];
722 static inline int target_to_host_errno(int err
)
724 if (target_to_host_errno_table
[err
])
725 return target_to_host_errno_table
[err
];
729 static inline abi_long
get_errno(abi_long ret
)
732 return -host_to_target_errno(errno
);
737 static inline int is_error(abi_long ret
)
739 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
742 char *target_strerror(int err
)
744 return strerror(target_to_host_errno(err
));
747 static abi_ulong target_brk
;
748 static abi_ulong target_original_brk
;
749 static abi_ulong brk_page
;
751 void target_set_brk(abi_ulong new_brk
)
753 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
754 brk_page
= HOST_PAGE_ALIGN(target_brk
);
757 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
758 #define DEBUGF_BRK(message, args...)
760 /* do_brk() must return target values and target errnos. */
761 abi_long
do_brk(abi_ulong new_brk
)
763 abi_long mapped_addr
;
766 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk
);
769 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk
);
772 if (new_brk
< target_original_brk
) {
773 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk
);
777 /* If the new brk is less than the highest page reserved to the
778 * target heap allocation, set it and we're almost done... */
779 if (new_brk
<= brk_page
) {
780 /* Heap contents are initialized to zero, as for anonymous
782 if (new_brk
> target_brk
) {
783 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
785 target_brk
= new_brk
;
786 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk
);
790 /* We need to allocate more memory after the brk... Note that
791 * we don't use MAP_FIXED because that will map over the top of
792 * any existing mapping (like the one with the host libc or qemu
793 * itself); instead we treat "mapped but at wrong address" as
794 * a failure and unmap again.
796 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
797 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
798 PROT_READ
|PROT_WRITE
,
799 MAP_ANON
|MAP_PRIVATE
, 0, 0));
801 if (mapped_addr
== brk_page
) {
802 /* Heap contents are initialized to zero, as for anonymous
803 * mapped pages. Technically the new pages are already
804 * initialized to zero since they *are* anonymous mapped
805 * pages, however we have to take care with the contents that
806 * come from the remaining part of the previous page: it may
807 * contains garbage data due to a previous heap usage (grown
809 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
811 target_brk
= new_brk
;
812 brk_page
= HOST_PAGE_ALIGN(target_brk
);
813 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk
);
815 } else if (mapped_addr
!= -1) {
816 /* Mapped but at wrong address, meaning there wasn't actually
817 * enough space for this brk.
819 target_munmap(mapped_addr
, new_alloc_size
);
821 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk
);
824 DEBUGF_BRK("%#010x (otherwise)\n", target_brk
);
827 #if defined(TARGET_ALPHA)
828 /* We (partially) emulate OSF/1 on Alpha, which requires we
829 return a proper errno, not an unchanged brk value. */
830 return -TARGET_ENOMEM
;
832 /* For everything else, return the previous break. */
836 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
837 abi_ulong target_fds_addr
,
841 abi_ulong b
, *target_fds
;
843 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
844 if (!(target_fds
= lock_user(VERIFY_READ
,
846 sizeof(abi_ulong
) * nw
,
848 return -TARGET_EFAULT
;
852 for (i
= 0; i
< nw
; i
++) {
853 /* grab the abi_ulong */
854 __get_user(b
, &target_fds
[i
]);
855 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
856 /* check the bit inside the abi_ulong */
863 unlock_user(target_fds
, target_fds_addr
, 0);
868 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
869 abi_ulong target_fds_addr
,
872 if (target_fds_addr
) {
873 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
874 return -TARGET_EFAULT
;
882 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
888 abi_ulong
*target_fds
;
890 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
891 if (!(target_fds
= lock_user(VERIFY_WRITE
,
893 sizeof(abi_ulong
) * nw
,
895 return -TARGET_EFAULT
;
898 for (i
= 0; i
< nw
; i
++) {
900 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
901 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
904 __put_user(v
, &target_fds
[i
]);
907 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
912 #if defined(__alpha__)
918 static inline abi_long
host_to_target_clock_t(long ticks
)
920 #if HOST_HZ == TARGET_HZ
923 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
927 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
928 const struct rusage
*rusage
)
930 struct target_rusage
*target_rusage
;
932 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
933 return -TARGET_EFAULT
;
934 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
935 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
936 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
937 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
938 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
939 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
940 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
941 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
942 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
943 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
944 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
945 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
946 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
947 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
948 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
949 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
950 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
951 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
952 unlock_user_struct(target_rusage
, target_addr
, 1);
957 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
959 target_ulong target_rlim_swap
;
962 target_rlim_swap
= tswapl(target_rlim
);
963 if (target_rlim_swap
== TARGET_RLIM_INFINITY
|| target_rlim_swap
!= (rlim_t
)target_rlim_swap
)
964 result
= RLIM_INFINITY
;
966 result
= target_rlim_swap
;
971 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
973 target_ulong target_rlim_swap
;
976 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
977 target_rlim_swap
= TARGET_RLIM_INFINITY
;
979 target_rlim_swap
= rlim
;
980 result
= tswapl(target_rlim_swap
);
985 static inline int target_to_host_resource(int code
)
988 case TARGET_RLIMIT_AS
:
990 case TARGET_RLIMIT_CORE
:
992 case TARGET_RLIMIT_CPU
:
994 case TARGET_RLIMIT_DATA
:
996 case TARGET_RLIMIT_FSIZE
:
998 case TARGET_RLIMIT_LOCKS
:
1000 case TARGET_RLIMIT_MEMLOCK
:
1001 return RLIMIT_MEMLOCK
;
1002 case TARGET_RLIMIT_MSGQUEUE
:
1003 return RLIMIT_MSGQUEUE
;
1004 case TARGET_RLIMIT_NICE
:
1006 case TARGET_RLIMIT_NOFILE
:
1007 return RLIMIT_NOFILE
;
1008 case TARGET_RLIMIT_NPROC
:
1009 return RLIMIT_NPROC
;
1010 case TARGET_RLIMIT_RSS
:
1012 case TARGET_RLIMIT_RTPRIO
:
1013 return RLIMIT_RTPRIO
;
1014 case TARGET_RLIMIT_SIGPENDING
:
1015 return RLIMIT_SIGPENDING
;
1016 case TARGET_RLIMIT_STACK
:
1017 return RLIMIT_STACK
;
1023 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1024 abi_ulong target_tv_addr
)
1026 struct target_timeval
*target_tv
;
1028 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1029 return -TARGET_EFAULT
;
1031 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1032 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1034 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1039 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1040 const struct timeval
*tv
)
1042 struct target_timeval
*target_tv
;
1044 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1045 return -TARGET_EFAULT
;
1047 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1048 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1050 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1055 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1058 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1059 abi_ulong target_mq_attr_addr
)
1061 struct target_mq_attr
*target_mq_attr
;
1063 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1064 target_mq_attr_addr
, 1))
1065 return -TARGET_EFAULT
;
1067 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1068 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1069 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1070 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1072 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1077 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1078 const struct mq_attr
*attr
)
1080 struct target_mq_attr
*target_mq_attr
;
1082 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1083 target_mq_attr_addr
, 0))
1084 return -TARGET_EFAULT
;
1086 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1087 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1088 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1089 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1091 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1097 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1098 /* do_select() must return target values and target errnos. */
1099 static abi_long
do_select(int n
,
1100 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1101 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1103 fd_set rfds
, wfds
, efds
;
1104 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1105 struct timeval tv
, *tv_ptr
;
1108 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1112 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1116 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1121 if (target_tv_addr
) {
1122 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1123 return -TARGET_EFAULT
;
1129 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1131 if (!is_error(ret
)) {
1132 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1133 return -TARGET_EFAULT
;
1134 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1135 return -TARGET_EFAULT
;
1136 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1137 return -TARGET_EFAULT
;
1139 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1140 return -TARGET_EFAULT
;
1147 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1150 return pipe2(host_pipe
, flags
);
1156 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1157 int flags
, int is_pipe2
)
1161 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1164 return get_errno(ret
);
1166 /* Several targets have special calling conventions for the original
1167 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1169 #if defined(TARGET_ALPHA)
1170 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1171 return host_pipe
[0];
1172 #elif defined(TARGET_MIPS)
1173 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1174 return host_pipe
[0];
1175 #elif defined(TARGET_SH4)
1176 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1177 return host_pipe
[0];
1181 if (put_user_s32(host_pipe
[0], pipedes
)
1182 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1183 return -TARGET_EFAULT
;
1184 return get_errno(ret
);
1187 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1188 abi_ulong target_addr
,
1191 struct target_ip_mreqn
*target_smreqn
;
1193 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1195 return -TARGET_EFAULT
;
1196 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1197 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1198 if (len
== sizeof(struct target_ip_mreqn
))
1199 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1200 unlock_user(target_smreqn
, target_addr
, 0);
1205 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1206 abi_ulong target_addr
,
1209 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1210 sa_family_t sa_family
;
1211 struct target_sockaddr
*target_saddr
;
1213 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1215 return -TARGET_EFAULT
;
1217 sa_family
= tswap16(target_saddr
->sa_family
);
1219 /* Oops. The caller might send a incomplete sun_path; sun_path
1220 * must be terminated by \0 (see the manual page), but
1221 * unfortunately it is quite common to specify sockaddr_un
1222 * length as "strlen(x->sun_path)" while it should be
1223 * "strlen(...) + 1". We'll fix that here if needed.
1224 * Linux kernel has a similar feature.
1227 if (sa_family
== AF_UNIX
) {
1228 if (len
< unix_maxlen
&& len
> 0) {
1229 char *cp
= (char*)target_saddr
;
1231 if ( cp
[len
-1] && !cp
[len
] )
1234 if (len
> unix_maxlen
)
1238 memcpy(addr
, target_saddr
, len
);
1239 addr
->sa_family
= sa_family
;
1240 unlock_user(target_saddr
, target_addr
, 0);
1245 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1246 struct sockaddr
*addr
,
1249 struct target_sockaddr
*target_saddr
;
1251 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1253 return -TARGET_EFAULT
;
1254 memcpy(target_saddr
, addr
, len
);
1255 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1256 unlock_user(target_saddr
, target_addr
, len
);
1261 /* ??? Should this also swap msgh->name? */
1262 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1263 struct target_msghdr
*target_msgh
)
1265 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1266 abi_long msg_controllen
;
1267 abi_ulong target_cmsg_addr
;
1268 struct target_cmsghdr
*target_cmsg
;
1269 socklen_t space
= 0;
1271 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1272 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1274 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1275 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1277 return -TARGET_EFAULT
;
1279 while (cmsg
&& target_cmsg
) {
1280 void *data
= CMSG_DATA(cmsg
);
1281 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1283 int len
= tswapl(target_cmsg
->cmsg_len
)
1284 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1286 space
+= CMSG_SPACE(len
);
1287 if (space
> msgh
->msg_controllen
) {
1288 space
-= CMSG_SPACE(len
);
1289 gemu_log("Host cmsg overflow\n");
1293 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1294 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1295 cmsg
->cmsg_len
= CMSG_LEN(len
);
1297 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1298 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1299 memcpy(data
, target_data
, len
);
1301 int *fd
= (int *)data
;
1302 int *target_fd
= (int *)target_data
;
1303 int i
, numfds
= len
/ sizeof(int);
1305 for (i
= 0; i
< numfds
; i
++)
1306 fd
[i
] = tswap32(target_fd
[i
]);
1309 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1310 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1312 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1314 msgh
->msg_controllen
= space
;
1318 /* ??? Should this also swap msgh->name? */
1319 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1320 struct msghdr
*msgh
)
1322 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1323 abi_long msg_controllen
;
1324 abi_ulong target_cmsg_addr
;
1325 struct target_cmsghdr
*target_cmsg
;
1326 socklen_t space
= 0;
1328 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1329 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1331 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1332 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1334 return -TARGET_EFAULT
;
1336 while (cmsg
&& target_cmsg
) {
1337 void *data
= CMSG_DATA(cmsg
);
1338 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1340 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1342 space
+= TARGET_CMSG_SPACE(len
);
1343 if (space
> msg_controllen
) {
1344 space
-= TARGET_CMSG_SPACE(len
);
1345 gemu_log("Target cmsg overflow\n");
1349 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1350 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1351 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1353 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1354 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1355 memcpy(target_data
, data
, len
);
1357 int *fd
= (int *)data
;
1358 int *target_fd
= (int *)target_data
;
1359 int i
, numfds
= len
/ sizeof(int);
1361 for (i
= 0; i
< numfds
; i
++)
1362 target_fd
[i
] = tswap32(fd
[i
]);
1365 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1366 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1368 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1370 target_msgh
->msg_controllen
= tswapl(space
);
1374 /* do_setsockopt() Must return target values and target errnos. */
1375 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1376 abi_ulong optval_addr
, socklen_t optlen
)
1380 struct ip_mreqn
*ip_mreq
;
1381 struct ip_mreq_source
*ip_mreq_source
;
1385 /* TCP options all take an 'int' value. */
1386 if (optlen
< sizeof(uint32_t))
1387 return -TARGET_EINVAL
;
1389 if (get_user_u32(val
, optval_addr
))
1390 return -TARGET_EFAULT
;
1391 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1398 case IP_ROUTER_ALERT
:
1402 case IP_MTU_DISCOVER
:
1408 case IP_MULTICAST_TTL
:
1409 case IP_MULTICAST_LOOP
:
1411 if (optlen
>= sizeof(uint32_t)) {
1412 if (get_user_u32(val
, optval_addr
))
1413 return -TARGET_EFAULT
;
1414 } else if (optlen
>= 1) {
1415 if (get_user_u8(val
, optval_addr
))
1416 return -TARGET_EFAULT
;
1418 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1420 case IP_ADD_MEMBERSHIP
:
1421 case IP_DROP_MEMBERSHIP
:
1422 if (optlen
< sizeof (struct target_ip_mreq
) ||
1423 optlen
> sizeof (struct target_ip_mreqn
))
1424 return -TARGET_EINVAL
;
1426 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1427 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1428 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1431 case IP_BLOCK_SOURCE
:
1432 case IP_UNBLOCK_SOURCE
:
1433 case IP_ADD_SOURCE_MEMBERSHIP
:
1434 case IP_DROP_SOURCE_MEMBERSHIP
:
1435 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1436 return -TARGET_EINVAL
;
1438 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1439 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1440 unlock_user (ip_mreq_source
, optval_addr
, 0);
1447 case TARGET_SOL_SOCKET
:
1449 /* Options with 'int' argument. */
1450 case TARGET_SO_DEBUG
:
1453 case TARGET_SO_REUSEADDR
:
1454 optname
= SO_REUSEADDR
;
1456 case TARGET_SO_TYPE
:
1459 case TARGET_SO_ERROR
:
1462 case TARGET_SO_DONTROUTE
:
1463 optname
= SO_DONTROUTE
;
1465 case TARGET_SO_BROADCAST
:
1466 optname
= SO_BROADCAST
;
1468 case TARGET_SO_SNDBUF
:
1469 optname
= SO_SNDBUF
;
1471 case TARGET_SO_RCVBUF
:
1472 optname
= SO_RCVBUF
;
1474 case TARGET_SO_KEEPALIVE
:
1475 optname
= SO_KEEPALIVE
;
1477 case TARGET_SO_OOBINLINE
:
1478 optname
= SO_OOBINLINE
;
1480 case TARGET_SO_NO_CHECK
:
1481 optname
= SO_NO_CHECK
;
1483 case TARGET_SO_PRIORITY
:
1484 optname
= SO_PRIORITY
;
1487 case TARGET_SO_BSDCOMPAT
:
1488 optname
= SO_BSDCOMPAT
;
1491 case TARGET_SO_PASSCRED
:
1492 optname
= SO_PASSCRED
;
1494 case TARGET_SO_TIMESTAMP
:
1495 optname
= SO_TIMESTAMP
;
1497 case TARGET_SO_RCVLOWAT
:
1498 optname
= SO_RCVLOWAT
;
1500 case TARGET_SO_RCVTIMEO
:
1501 optname
= SO_RCVTIMEO
;
1503 case TARGET_SO_SNDTIMEO
:
1504 optname
= SO_SNDTIMEO
;
1510 if (optlen
< sizeof(uint32_t))
1511 return -TARGET_EINVAL
;
1513 if (get_user_u32(val
, optval_addr
))
1514 return -TARGET_EFAULT
;
1515 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1519 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1520 ret
= -TARGET_ENOPROTOOPT
;
1525 /* do_getsockopt() Must return target values and target errnos. */
1526 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1527 abi_ulong optval_addr
, abi_ulong optlen
)
1534 case TARGET_SOL_SOCKET
:
1537 /* These don't just return a single integer */
1538 case TARGET_SO_LINGER
:
1539 case TARGET_SO_RCVTIMEO
:
1540 case TARGET_SO_SNDTIMEO
:
1541 case TARGET_SO_PEERCRED
:
1542 case TARGET_SO_PEERNAME
:
1544 /* Options with 'int' argument. */
1545 case TARGET_SO_DEBUG
:
1548 case TARGET_SO_REUSEADDR
:
1549 optname
= SO_REUSEADDR
;
1551 case TARGET_SO_TYPE
:
1554 case TARGET_SO_ERROR
:
1557 case TARGET_SO_DONTROUTE
:
1558 optname
= SO_DONTROUTE
;
1560 case TARGET_SO_BROADCAST
:
1561 optname
= SO_BROADCAST
;
1563 case TARGET_SO_SNDBUF
:
1564 optname
= SO_SNDBUF
;
1566 case TARGET_SO_RCVBUF
:
1567 optname
= SO_RCVBUF
;
1569 case TARGET_SO_KEEPALIVE
:
1570 optname
= SO_KEEPALIVE
;
1572 case TARGET_SO_OOBINLINE
:
1573 optname
= SO_OOBINLINE
;
1575 case TARGET_SO_NO_CHECK
:
1576 optname
= SO_NO_CHECK
;
1578 case TARGET_SO_PRIORITY
:
1579 optname
= SO_PRIORITY
;
1582 case TARGET_SO_BSDCOMPAT
:
1583 optname
= SO_BSDCOMPAT
;
1586 case TARGET_SO_PASSCRED
:
1587 optname
= SO_PASSCRED
;
1589 case TARGET_SO_TIMESTAMP
:
1590 optname
= SO_TIMESTAMP
;
1592 case TARGET_SO_RCVLOWAT
:
1593 optname
= SO_RCVLOWAT
;
1600 /* TCP options all take an 'int' value. */
1602 if (get_user_u32(len
, optlen
))
1603 return -TARGET_EFAULT
;
1605 return -TARGET_EINVAL
;
1607 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1613 if (put_user_u32(val
, optval_addr
))
1614 return -TARGET_EFAULT
;
1616 if (put_user_u8(val
, optval_addr
))
1617 return -TARGET_EFAULT
;
1619 if (put_user_u32(len
, optlen
))
1620 return -TARGET_EFAULT
;
1627 case IP_ROUTER_ALERT
:
1631 case IP_MTU_DISCOVER
:
1637 case IP_MULTICAST_TTL
:
1638 case IP_MULTICAST_LOOP
:
1639 if (get_user_u32(len
, optlen
))
1640 return -TARGET_EFAULT
;
1642 return -TARGET_EINVAL
;
1644 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1647 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1649 if (put_user_u32(len
, optlen
)
1650 || put_user_u8(val
, optval_addr
))
1651 return -TARGET_EFAULT
;
1653 if (len
> sizeof(int))
1655 if (put_user_u32(len
, optlen
)
1656 || put_user_u32(val
, optval_addr
))
1657 return -TARGET_EFAULT
;
1661 ret
= -TARGET_ENOPROTOOPT
;
1667 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1669 ret
= -TARGET_EOPNOTSUPP
;
1676 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1677 * other lock functions have a return code of 0 for failure.
1679 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1680 int count
, int copy
)
1682 struct target_iovec
*target_vec
;
1686 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1688 return -TARGET_EFAULT
;
1689 for(i
= 0;i
< count
; i
++) {
1690 base
= tswapl(target_vec
[i
].iov_base
);
1691 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1692 if (vec
[i
].iov_len
!= 0) {
1693 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1694 /* Don't check lock_user return value. We must call writev even
1695 if a element has invalid base address. */
1697 /* zero length pointer is ignored */
1698 vec
[i
].iov_base
= NULL
;
1701 unlock_user (target_vec
, target_addr
, 0);
1705 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1706 int count
, int copy
)
1708 struct target_iovec
*target_vec
;
1712 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1714 return -TARGET_EFAULT
;
1715 for(i
= 0;i
< count
; i
++) {
1716 if (target_vec
[i
].iov_base
) {
1717 base
= tswapl(target_vec
[i
].iov_base
);
1718 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1721 unlock_user (target_vec
, target_addr
, 0);
1726 /* do_socket() Must return target values and target errnos. */
1727 static abi_long
do_socket(int domain
, int type
, int protocol
)
1729 #if defined(TARGET_MIPS)
1731 case TARGET_SOCK_DGRAM
:
1734 case TARGET_SOCK_STREAM
:
1737 case TARGET_SOCK_RAW
:
1740 case TARGET_SOCK_RDM
:
1743 case TARGET_SOCK_SEQPACKET
:
1744 type
= SOCK_SEQPACKET
;
1746 case TARGET_SOCK_PACKET
:
1751 if (domain
== PF_NETLINK
)
1752 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1753 return get_errno(socket(domain
, type
, protocol
));
1756 /* do_bind() Must return target values and target errnos. */
1757 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1763 if ((int)addrlen
< 0) {
1764 return -TARGET_EINVAL
;
1767 addr
= alloca(addrlen
+1);
1769 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1773 return get_errno(bind(sockfd
, addr
, addrlen
));
1776 /* do_connect() Must return target values and target errnos. */
1777 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1783 if ((int)addrlen
< 0) {
1784 return -TARGET_EINVAL
;
1787 addr
= alloca(addrlen
);
1789 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1793 return get_errno(connect(sockfd
, addr
, addrlen
));
1796 /* do_sendrecvmsg() Must return target values and target errnos. */
1797 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1798 int flags
, int send
)
1801 struct target_msghdr
*msgp
;
1805 abi_ulong target_vec
;
1808 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1812 return -TARGET_EFAULT
;
1813 if (msgp
->msg_name
) {
1814 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1815 msg
.msg_name
= alloca(msg
.msg_namelen
);
1816 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1819 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1823 msg
.msg_name
= NULL
;
1824 msg
.msg_namelen
= 0;
1826 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1827 msg
.msg_control
= alloca(msg
.msg_controllen
);
1828 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1830 count
= tswapl(msgp
->msg_iovlen
);
1831 vec
= alloca(count
* sizeof(struct iovec
));
1832 target_vec
= tswapl(msgp
->msg_iov
);
1833 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1834 msg
.msg_iovlen
= count
;
1838 ret
= target_to_host_cmsg(&msg
, msgp
);
1840 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1842 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1843 if (!is_error(ret
)) {
1845 ret
= host_to_target_cmsg(msgp
, &msg
);
1850 unlock_iovec(vec
, target_vec
, count
, !send
);
1851 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1855 /* do_accept() Must return target values and target errnos. */
1856 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1857 abi_ulong target_addrlen_addr
)
1863 if (target_addr
== 0)
1864 return get_errno(accept(fd
, NULL
, NULL
));
1866 /* linux returns EINVAL if addrlen pointer is invalid */
1867 if (get_user_u32(addrlen
, target_addrlen_addr
))
1868 return -TARGET_EINVAL
;
1870 if ((int)addrlen
< 0) {
1871 return -TARGET_EINVAL
;
1874 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1875 return -TARGET_EINVAL
;
1877 addr
= alloca(addrlen
);
1879 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1880 if (!is_error(ret
)) {
1881 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1882 if (put_user_u32(addrlen
, target_addrlen_addr
))
1883 ret
= -TARGET_EFAULT
;
1888 /* do_getpeername() Must return target values and target errnos. */
1889 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1890 abi_ulong target_addrlen_addr
)
1896 if (get_user_u32(addrlen
, target_addrlen_addr
))
1897 return -TARGET_EFAULT
;
1899 if ((int)addrlen
< 0) {
1900 return -TARGET_EINVAL
;
1903 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1904 return -TARGET_EFAULT
;
1906 addr
= alloca(addrlen
);
1908 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1909 if (!is_error(ret
)) {
1910 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1911 if (put_user_u32(addrlen
, target_addrlen_addr
))
1912 ret
= -TARGET_EFAULT
;
1917 /* do_getsockname() Must return target values and target errnos. */
1918 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1919 abi_ulong target_addrlen_addr
)
1925 if (get_user_u32(addrlen
, target_addrlen_addr
))
1926 return -TARGET_EFAULT
;
1928 if ((int)addrlen
< 0) {
1929 return -TARGET_EINVAL
;
1932 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1933 return -TARGET_EFAULT
;
1935 addr
= alloca(addrlen
);
1937 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1938 if (!is_error(ret
)) {
1939 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1940 if (put_user_u32(addrlen
, target_addrlen_addr
))
1941 ret
= -TARGET_EFAULT
;
1946 /* do_socketpair() Must return target values and target errnos. */
1947 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1948 abi_ulong target_tab_addr
)
1953 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1954 if (!is_error(ret
)) {
1955 if (put_user_s32(tab
[0], target_tab_addr
)
1956 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1957 ret
= -TARGET_EFAULT
;
1962 /* do_sendto() Must return target values and target errnos. */
1963 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1964 abi_ulong target_addr
, socklen_t addrlen
)
1970 if ((int)addrlen
< 0) {
1971 return -TARGET_EINVAL
;
1974 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1976 return -TARGET_EFAULT
;
1978 addr
= alloca(addrlen
);
1979 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1981 unlock_user(host_msg
, msg
, 0);
1984 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1986 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1988 unlock_user(host_msg
, msg
, 0);
1992 /* do_recvfrom() Must return target values and target errnos. */
1993 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1994 abi_ulong target_addr
,
1995 abi_ulong target_addrlen
)
2002 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2004 return -TARGET_EFAULT
;
2006 if (get_user_u32(addrlen
, target_addrlen
)) {
2007 ret
= -TARGET_EFAULT
;
2010 if ((int)addrlen
< 0) {
2011 ret
= -TARGET_EINVAL
;
2014 addr
= alloca(addrlen
);
2015 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2017 addr
= NULL
; /* To keep compiler quiet. */
2018 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2020 if (!is_error(ret
)) {
2022 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2023 if (put_user_u32(addrlen
, target_addrlen
)) {
2024 ret
= -TARGET_EFAULT
;
2028 unlock_user(host_msg
, msg
, len
);
2031 unlock_user(host_msg
, msg
, 0);
2036 #ifdef TARGET_NR_socketcall
2037 /* do_socketcall() Must return target values and target errnos. */
2038 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2041 const int n
= sizeof(abi_ulong
);
2046 abi_ulong domain
, type
, protocol
;
2048 if (get_user_ual(domain
, vptr
)
2049 || get_user_ual(type
, vptr
+ n
)
2050 || get_user_ual(protocol
, vptr
+ 2 * n
))
2051 return -TARGET_EFAULT
;
2053 ret
= do_socket(domain
, type
, protocol
);
2059 abi_ulong target_addr
;
2062 if (get_user_ual(sockfd
, vptr
)
2063 || get_user_ual(target_addr
, vptr
+ n
)
2064 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2065 return -TARGET_EFAULT
;
2067 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2070 case SOCKOP_connect
:
2073 abi_ulong target_addr
;
2076 if (get_user_ual(sockfd
, vptr
)
2077 || get_user_ual(target_addr
, vptr
+ n
)
2078 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2079 return -TARGET_EFAULT
;
2081 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2086 abi_ulong sockfd
, backlog
;
2088 if (get_user_ual(sockfd
, vptr
)
2089 || get_user_ual(backlog
, vptr
+ n
))
2090 return -TARGET_EFAULT
;
2092 ret
= get_errno(listen(sockfd
, backlog
));
2098 abi_ulong target_addr
, target_addrlen
;
2100 if (get_user_ual(sockfd
, vptr
)
2101 || get_user_ual(target_addr
, vptr
+ n
)
2102 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2103 return -TARGET_EFAULT
;
2105 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2108 case SOCKOP_getsockname
:
2111 abi_ulong target_addr
, target_addrlen
;
2113 if (get_user_ual(sockfd
, vptr
)
2114 || get_user_ual(target_addr
, vptr
+ n
)
2115 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2116 return -TARGET_EFAULT
;
2118 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2121 case SOCKOP_getpeername
:
2124 abi_ulong target_addr
, target_addrlen
;
2126 if (get_user_ual(sockfd
, vptr
)
2127 || get_user_ual(target_addr
, vptr
+ n
)
2128 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2129 return -TARGET_EFAULT
;
2131 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2134 case SOCKOP_socketpair
:
2136 abi_ulong domain
, type
, protocol
;
2139 if (get_user_ual(domain
, vptr
)
2140 || get_user_ual(type
, vptr
+ n
)
2141 || get_user_ual(protocol
, vptr
+ 2 * n
)
2142 || get_user_ual(tab
, vptr
+ 3 * n
))
2143 return -TARGET_EFAULT
;
2145 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2155 if (get_user_ual(sockfd
, vptr
)
2156 || get_user_ual(msg
, vptr
+ n
)
2157 || get_user_ual(len
, vptr
+ 2 * n
)
2158 || get_user_ual(flags
, vptr
+ 3 * n
))
2159 return -TARGET_EFAULT
;
2161 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2171 if (get_user_ual(sockfd
, vptr
)
2172 || get_user_ual(msg
, vptr
+ n
)
2173 || get_user_ual(len
, vptr
+ 2 * n
)
2174 || get_user_ual(flags
, vptr
+ 3 * n
))
2175 return -TARGET_EFAULT
;
2177 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2189 if (get_user_ual(sockfd
, vptr
)
2190 || get_user_ual(msg
, vptr
+ n
)
2191 || get_user_ual(len
, vptr
+ 2 * n
)
2192 || get_user_ual(flags
, vptr
+ 3 * n
)
2193 || get_user_ual(addr
, vptr
+ 4 * n
)
2194 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2195 return -TARGET_EFAULT
;
2197 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2200 case SOCKOP_recvfrom
:
2209 if (get_user_ual(sockfd
, vptr
)
2210 || get_user_ual(msg
, vptr
+ n
)
2211 || get_user_ual(len
, vptr
+ 2 * n
)
2212 || get_user_ual(flags
, vptr
+ 3 * n
)
2213 || get_user_ual(addr
, vptr
+ 4 * n
)
2214 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2215 return -TARGET_EFAULT
;
2217 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2220 case SOCKOP_shutdown
:
2222 abi_ulong sockfd
, how
;
2224 if (get_user_ual(sockfd
, vptr
)
2225 || get_user_ual(how
, vptr
+ n
))
2226 return -TARGET_EFAULT
;
2228 ret
= get_errno(shutdown(sockfd
, how
));
2231 case SOCKOP_sendmsg
:
2232 case SOCKOP_recvmsg
:
2235 abi_ulong target_msg
;
2238 if (get_user_ual(fd
, vptr
)
2239 || get_user_ual(target_msg
, vptr
+ n
)
2240 || get_user_ual(flags
, vptr
+ 2 * n
))
2241 return -TARGET_EFAULT
;
2243 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2244 (num
== SOCKOP_sendmsg
));
2247 case SOCKOP_setsockopt
:
2255 if (get_user_ual(sockfd
, vptr
)
2256 || get_user_ual(level
, vptr
+ n
)
2257 || get_user_ual(optname
, vptr
+ 2 * n
)
2258 || get_user_ual(optval
, vptr
+ 3 * n
)
2259 || get_user_ual(optlen
, vptr
+ 4 * n
))
2260 return -TARGET_EFAULT
;
2262 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2265 case SOCKOP_getsockopt
:
2273 if (get_user_ual(sockfd
, vptr
)
2274 || get_user_ual(level
, vptr
+ n
)
2275 || get_user_ual(optname
, vptr
+ 2 * n
)
2276 || get_user_ual(optval
, vptr
+ 3 * n
)
2277 || get_user_ual(optlen
, vptr
+ 4 * n
))
2278 return -TARGET_EFAULT
;
2280 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2284 gemu_log("Unsupported socketcall: %d\n", num
);
2285 ret
= -TARGET_ENOSYS
;
2292 #define N_SHM_REGIONS 32
2294 static struct shm_region
{
2297 } shm_regions
[N_SHM_REGIONS
];
2299 struct target_ipc_perm
2306 unsigned short int mode
;
2307 unsigned short int __pad1
;
2308 unsigned short int __seq
;
2309 unsigned short int __pad2
;
2310 abi_ulong __unused1
;
2311 abi_ulong __unused2
;
2314 struct target_semid_ds
2316 struct target_ipc_perm sem_perm
;
2317 abi_ulong sem_otime
;
2318 abi_ulong __unused1
;
2319 abi_ulong sem_ctime
;
2320 abi_ulong __unused2
;
2321 abi_ulong sem_nsems
;
2322 abi_ulong __unused3
;
2323 abi_ulong __unused4
;
2326 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2327 abi_ulong target_addr
)
2329 struct target_ipc_perm
*target_ip
;
2330 struct target_semid_ds
*target_sd
;
2332 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2333 return -TARGET_EFAULT
;
2334 target_ip
= &(target_sd
->sem_perm
);
2335 host_ip
->__key
= tswapl(target_ip
->__key
);
2336 host_ip
->uid
= tswapl(target_ip
->uid
);
2337 host_ip
->gid
= tswapl(target_ip
->gid
);
2338 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2339 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2340 host_ip
->mode
= tswapl(target_ip
->mode
);
2341 unlock_user_struct(target_sd
, target_addr
, 0);
2345 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2346 struct ipc_perm
*host_ip
)
2348 struct target_ipc_perm
*target_ip
;
2349 struct target_semid_ds
*target_sd
;
2351 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2352 return -TARGET_EFAULT
;
2353 target_ip
= &(target_sd
->sem_perm
);
2354 target_ip
->__key
= tswapl(host_ip
->__key
);
2355 target_ip
->uid
= tswapl(host_ip
->uid
);
2356 target_ip
->gid
= tswapl(host_ip
->gid
);
2357 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2358 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2359 target_ip
->mode
= tswapl(host_ip
->mode
);
2360 unlock_user_struct(target_sd
, target_addr
, 1);
2364 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2365 abi_ulong target_addr
)
2367 struct target_semid_ds
*target_sd
;
2369 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2370 return -TARGET_EFAULT
;
2371 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2372 return -TARGET_EFAULT
;
2373 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2374 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2375 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2376 unlock_user_struct(target_sd
, target_addr
, 0);
2380 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2381 struct semid_ds
*host_sd
)
2383 struct target_semid_ds
*target_sd
;
2385 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2386 return -TARGET_EFAULT
;
2387 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2388 return -TARGET_EFAULT
;;
2389 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2390 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2391 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2392 unlock_user_struct(target_sd
, target_addr
, 1);
2396 struct target_seminfo
{
2409 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2410 struct seminfo
*host_seminfo
)
2412 struct target_seminfo
*target_seminfo
;
2413 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2414 return -TARGET_EFAULT
;
2415 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2416 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2417 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2418 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2419 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2420 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2421 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2422 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2423 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2424 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2425 unlock_user_struct(target_seminfo
, target_addr
, 1);
2431 struct semid_ds
*buf
;
2432 unsigned short *array
;
2433 struct seminfo
*__buf
;
2436 union target_semun
{
2443 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2444 abi_ulong target_addr
)
2447 unsigned short *array
;
2449 struct semid_ds semid_ds
;
2452 semun
.buf
= &semid_ds
;
2454 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2456 return get_errno(ret
);
2458 nsems
= semid_ds
.sem_nsems
;
2460 *host_array
= malloc(nsems
*sizeof(unsigned short));
2461 array
= lock_user(VERIFY_READ
, target_addr
,
2462 nsems
*sizeof(unsigned short), 1);
2464 return -TARGET_EFAULT
;
2466 for(i
=0; i
<nsems
; i
++) {
2467 __get_user((*host_array
)[i
], &array
[i
]);
2469 unlock_user(array
, target_addr
, 0);
2474 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2475 unsigned short **host_array
)
2478 unsigned short *array
;
2480 struct semid_ds semid_ds
;
2483 semun
.buf
= &semid_ds
;
2485 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2487 return get_errno(ret
);
2489 nsems
= semid_ds
.sem_nsems
;
2491 array
= lock_user(VERIFY_WRITE
, target_addr
,
2492 nsems
*sizeof(unsigned short), 0);
2494 return -TARGET_EFAULT
;
2496 for(i
=0; i
<nsems
; i
++) {
2497 __put_user((*host_array
)[i
], &array
[i
]);
2500 unlock_user(array
, target_addr
, 1);
2505 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2506 union target_semun target_su
)
2509 struct semid_ds dsarg
;
2510 unsigned short *array
= NULL
;
2511 struct seminfo seminfo
;
2512 abi_long ret
= -TARGET_EINVAL
;
2519 arg
.val
= tswapl(target_su
.val
);
2520 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2521 target_su
.val
= tswapl(arg
.val
);
2525 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2529 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2530 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2537 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2541 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2542 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2548 arg
.__buf
= &seminfo
;
2549 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2550 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2558 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2565 struct target_sembuf
{
2566 unsigned short sem_num
;
2571 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2572 abi_ulong target_addr
,
2575 struct target_sembuf
*target_sembuf
;
2578 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2579 nsops
*sizeof(struct target_sembuf
), 1);
2581 return -TARGET_EFAULT
;
2583 for(i
=0; i
<nsops
; i
++) {
2584 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2585 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2586 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2589 unlock_user(target_sembuf
, target_addr
, 0);
2594 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2596 struct sembuf sops
[nsops
];
2598 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2599 return -TARGET_EFAULT
;
2601 return semop(semid
, sops
, nsops
);
2604 struct target_msqid_ds
2606 struct target_ipc_perm msg_perm
;
2607 abi_ulong msg_stime
;
2608 #if TARGET_ABI_BITS == 32
2609 abi_ulong __unused1
;
2611 abi_ulong msg_rtime
;
2612 #if TARGET_ABI_BITS == 32
2613 abi_ulong __unused2
;
2615 abi_ulong msg_ctime
;
2616 #if TARGET_ABI_BITS == 32
2617 abi_ulong __unused3
;
2619 abi_ulong __msg_cbytes
;
2621 abi_ulong msg_qbytes
;
2622 abi_ulong msg_lspid
;
2623 abi_ulong msg_lrpid
;
2624 abi_ulong __unused4
;
2625 abi_ulong __unused5
;
2628 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2629 abi_ulong target_addr
)
2631 struct target_msqid_ds
*target_md
;
2633 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2634 return -TARGET_EFAULT
;
2635 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2636 return -TARGET_EFAULT
;
2637 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2638 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2639 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2640 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2641 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2642 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2643 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2644 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2645 unlock_user_struct(target_md
, target_addr
, 0);
2649 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2650 struct msqid_ds
*host_md
)
2652 struct target_msqid_ds
*target_md
;
2654 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2655 return -TARGET_EFAULT
;
2656 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2657 return -TARGET_EFAULT
;
2658 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2659 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2660 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2661 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2662 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2663 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2664 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2665 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2666 unlock_user_struct(target_md
, target_addr
, 1);
2670 struct target_msginfo
{
2678 unsigned short int msgseg
;
2681 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2682 struct msginfo
*host_msginfo
)
2684 struct target_msginfo
*target_msginfo
;
2685 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2686 return -TARGET_EFAULT
;
2687 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2688 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2689 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2690 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2691 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2692 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2693 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2694 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2695 unlock_user_struct(target_msginfo
, target_addr
, 1);
2699 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2701 struct msqid_ds dsarg
;
2702 struct msginfo msginfo
;
2703 abi_long ret
= -TARGET_EINVAL
;
2711 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2712 return -TARGET_EFAULT
;
2713 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2714 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2715 return -TARGET_EFAULT
;
2718 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2722 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2723 if (host_to_target_msginfo(ptr
, &msginfo
))
2724 return -TARGET_EFAULT
;
2731 struct target_msgbuf
{
2736 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2737 unsigned int msgsz
, int msgflg
)
2739 struct target_msgbuf
*target_mb
;
2740 struct msgbuf
*host_mb
;
2743 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2744 return -TARGET_EFAULT
;
2745 host_mb
= malloc(msgsz
+sizeof(long));
2746 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2747 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2748 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2750 unlock_user_struct(target_mb
, msgp
, 0);
2755 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2756 unsigned int msgsz
, abi_long msgtyp
,
2759 struct target_msgbuf
*target_mb
;
2761 struct msgbuf
*host_mb
;
2764 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2765 return -TARGET_EFAULT
;
2767 host_mb
= malloc(msgsz
+sizeof(long));
2768 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2771 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2772 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2773 if (!target_mtext
) {
2774 ret
= -TARGET_EFAULT
;
2777 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2778 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2781 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2786 unlock_user_struct(target_mb
, msgp
, 1);
2790 struct target_shmid_ds
2792 struct target_ipc_perm shm_perm
;
2793 abi_ulong shm_segsz
;
2794 abi_ulong shm_atime
;
2795 #if TARGET_ABI_BITS == 32
2796 abi_ulong __unused1
;
2798 abi_ulong shm_dtime
;
2799 #if TARGET_ABI_BITS == 32
2800 abi_ulong __unused2
;
2802 abi_ulong shm_ctime
;
2803 #if TARGET_ABI_BITS == 32
2804 abi_ulong __unused3
;
2808 abi_ulong shm_nattch
;
2809 unsigned long int __unused4
;
2810 unsigned long int __unused5
;
2813 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2814 abi_ulong target_addr
)
2816 struct target_shmid_ds
*target_sd
;
2818 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2819 return -TARGET_EFAULT
;
2820 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2821 return -TARGET_EFAULT
;
2822 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2823 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2824 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2825 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2826 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2827 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2828 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2829 unlock_user_struct(target_sd
, target_addr
, 0);
2833 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2834 struct shmid_ds
*host_sd
)
2836 struct target_shmid_ds
*target_sd
;
2838 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2839 return -TARGET_EFAULT
;
2840 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2841 return -TARGET_EFAULT
;
2842 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2843 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2844 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2845 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2846 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2847 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2848 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2849 unlock_user_struct(target_sd
, target_addr
, 1);
2853 struct target_shminfo
{
2861 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2862 struct shminfo
*host_shminfo
)
2864 struct target_shminfo
*target_shminfo
;
2865 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2866 return -TARGET_EFAULT
;
2867 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2868 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2869 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2870 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2871 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2872 unlock_user_struct(target_shminfo
, target_addr
, 1);
2876 struct target_shm_info
{
2881 abi_ulong swap_attempts
;
2882 abi_ulong swap_successes
;
2885 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2886 struct shm_info
*host_shm_info
)
2888 struct target_shm_info
*target_shm_info
;
2889 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2890 return -TARGET_EFAULT
;
2891 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2892 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2893 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2894 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2895 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2896 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2897 unlock_user_struct(target_shm_info
, target_addr
, 1);
2901 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2903 struct shmid_ds dsarg
;
2904 struct shminfo shminfo
;
2905 struct shm_info shm_info
;
2906 abi_long ret
= -TARGET_EINVAL
;
2914 if (target_to_host_shmid_ds(&dsarg
, buf
))
2915 return -TARGET_EFAULT
;
2916 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2917 if (host_to_target_shmid_ds(buf
, &dsarg
))
2918 return -TARGET_EFAULT
;
2921 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2922 if (host_to_target_shminfo(buf
, &shminfo
))
2923 return -TARGET_EFAULT
;
2926 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2927 if (host_to_target_shm_info(buf
, &shm_info
))
2928 return -TARGET_EFAULT
;
2933 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2940 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2944 struct shmid_ds shm_info
;
2947 /* find out the length of the shared memory segment */
2948 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2949 if (is_error(ret
)) {
2950 /* can't get length, bail out */
2957 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2959 abi_ulong mmap_start
;
2961 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2963 if (mmap_start
== -1) {
2965 host_raddr
= (void *)-1;
2967 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2970 if (host_raddr
== (void *)-1) {
2972 return get_errno((long)host_raddr
);
2974 raddr
=h2g((unsigned long)host_raddr
);
2976 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2977 PAGE_VALID
| PAGE_READ
|
2978 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2980 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2981 if (shm_regions
[i
].start
== 0) {
2982 shm_regions
[i
].start
= raddr
;
2983 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2993 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2997 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2998 if (shm_regions
[i
].start
== shmaddr
) {
2999 shm_regions
[i
].start
= 0;
3000 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3005 return get_errno(shmdt(g2h(shmaddr
)));
3008 #ifdef TARGET_NR_ipc
3009 /* ??? This only works with linear mappings. */
3010 /* do_ipc() must return target values and target errnos. */
3011 static abi_long
do_ipc(unsigned int call
, int first
,
3012 int second
, int third
,
3013 abi_long ptr
, abi_long fifth
)
3018 version
= call
>> 16;
3023 ret
= do_semop(first
, ptr
, second
);
3027 ret
= get_errno(semget(first
, second
, third
));
3031 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3035 ret
= get_errno(msgget(first
, second
));
3039 ret
= do_msgsnd(first
, ptr
, second
, third
);
3043 ret
= do_msgctl(first
, second
, ptr
);
3050 struct target_ipc_kludge
{
3055 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3056 ret
= -TARGET_EFAULT
;
3060 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3062 unlock_user_struct(tmp
, ptr
, 0);
3066 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3075 raddr
= do_shmat(first
, ptr
, second
);
3076 if (is_error(raddr
))
3077 return get_errno(raddr
);
3078 if (put_user_ual(raddr
, third
))
3079 return -TARGET_EFAULT
;
3083 ret
= -TARGET_EINVAL
;
3088 ret
= do_shmdt(ptr
);
3092 /* IPC_* flag values are the same on all linux platforms */
3093 ret
= get_errno(shmget(first
, second
, third
));
3096 /* IPC_* and SHM_* command values are the same on all linux platforms */
3098 ret
= do_shmctl(first
, second
, third
);
3101 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3102 ret
= -TARGET_ENOSYS
;
3109 /* kernel structure types definitions */
3111 #define STRUCT(name, ...) STRUCT_ ## name,
3112 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3114 #include "syscall_types.h"
3117 #undef STRUCT_SPECIAL
3119 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3120 #define STRUCT_SPECIAL(name)
3121 #include "syscall_types.h"
3123 #undef STRUCT_SPECIAL
3125 typedef struct IOCTLEntry IOCTLEntry
;
3127 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3128 int fd
, abi_long cmd
, abi_long arg
);
3131 unsigned int target_cmd
;
3132 unsigned int host_cmd
;
3135 do_ioctl_fn
*do_ioctl
;
3136 const argtype arg_type
[5];
3139 #define IOC_R 0x0001
3140 #define IOC_W 0x0002
3141 #define IOC_RW (IOC_R | IOC_W)
3143 #define MAX_STRUCT_SIZE 4096
3145 #ifdef CONFIG_FIEMAP
3146 /* So fiemap access checks don't overflow on 32 bit systems.
3147 * This is very slightly smaller than the limit imposed by
3148 * the underlying kernel.
3150 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3151 / sizeof(struct fiemap_extent))
3153 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3154 int fd
, abi_long cmd
, abi_long arg
)
3156 /* The parameter for this ioctl is a struct fiemap followed
3157 * by an array of struct fiemap_extent whose size is set
3158 * in fiemap->fm_extent_count. The array is filled in by the
3161 int target_size_in
, target_size_out
;
3163 const argtype
*arg_type
= ie
->arg_type
;
3164 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3167 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3171 assert(arg_type
[0] == TYPE_PTR
);
3172 assert(ie
->access
== IOC_RW
);
3174 target_size_in
= thunk_type_size(arg_type
, 0);
3175 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3177 return -TARGET_EFAULT
;
3179 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3180 unlock_user(argptr
, arg
, 0);
3181 fm
= (struct fiemap
*)buf_temp
;
3182 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3183 return -TARGET_EINVAL
;
3186 outbufsz
= sizeof (*fm
) +
3187 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3189 if (outbufsz
> MAX_STRUCT_SIZE
) {
3190 /* We can't fit all the extents into the fixed size buffer.
3191 * Allocate one that is large enough and use it instead.
3193 fm
= malloc(outbufsz
);
3195 return -TARGET_ENOMEM
;
3197 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3200 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3201 if (!is_error(ret
)) {
3202 target_size_out
= target_size_in
;
3203 /* An extent_count of 0 means we were only counting the extents
3204 * so there are no structs to copy
3206 if (fm
->fm_extent_count
!= 0) {
3207 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3209 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3211 ret
= -TARGET_EFAULT
;
3213 /* Convert the struct fiemap */
3214 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3215 if (fm
->fm_extent_count
!= 0) {
3216 p
= argptr
+ target_size_in
;
3217 /* ...and then all the struct fiemap_extents */
3218 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3219 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3224 unlock_user(argptr
, arg
, target_size_out
);
3234 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3235 int fd
, abi_long cmd
, abi_long arg
)
3237 const argtype
*arg_type
= ie
->arg_type
;
3241 struct ifconf
*host_ifconf
;
3243 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3244 int target_ifreq_size
;
3249 abi_long target_ifc_buf
;
3253 assert(arg_type
[0] == TYPE_PTR
);
3254 assert(ie
->access
== IOC_RW
);
3257 target_size
= thunk_type_size(arg_type
, 0);
3259 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3261 return -TARGET_EFAULT
;
3262 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3263 unlock_user(argptr
, arg
, 0);
3265 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3266 target_ifc_len
= host_ifconf
->ifc_len
;
3267 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3269 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3270 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3271 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3273 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3274 if (outbufsz
> MAX_STRUCT_SIZE
) {
3275 /* We can't fit all the extents into the fixed size buffer.
3276 * Allocate one that is large enough and use it instead.
3278 host_ifconf
= malloc(outbufsz
);
3280 return -TARGET_ENOMEM
;
3282 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3285 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3287 host_ifconf
->ifc_len
= host_ifc_len
;
3288 host_ifconf
->ifc_buf
= host_ifc_buf
;
3290 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3291 if (!is_error(ret
)) {
3292 /* convert host ifc_len to target ifc_len */
3294 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3295 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3296 host_ifconf
->ifc_len
= target_ifc_len
;
3298 /* restore target ifc_buf */
3300 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3302 /* copy struct ifconf to target user */
3304 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3306 return -TARGET_EFAULT
;
3307 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3308 unlock_user(argptr
, arg
, target_size
);
3310 /* copy ifreq[] to target user */
3312 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3313 for (i
= 0; i
< nb_ifreq
; i
++) {
3314 thunk_convert(argptr
+ i
* target_ifreq_size
,
3315 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3316 ifreq_arg_type
, THUNK_TARGET
);
3318 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3328 static IOCTLEntry ioctl_entries
[] = {
3329 #define IOCTL(cmd, access, ...) \
3330 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3331 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3332 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3337 /* ??? Implement proper locking for ioctls. */
3338 /* do_ioctl() Must return target values and target errnos. */
3339 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3341 const IOCTLEntry
*ie
;
3342 const argtype
*arg_type
;
3344 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3350 if (ie
->target_cmd
== 0) {
3351 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3352 return -TARGET_ENOSYS
;
3354 if (ie
->target_cmd
== cmd
)
3358 arg_type
= ie
->arg_type
;
3360 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3363 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3366 switch(arg_type
[0]) {
3369 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3374 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3378 target_size
= thunk_type_size(arg_type
, 0);
3379 switch(ie
->access
) {
3381 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3382 if (!is_error(ret
)) {
3383 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3385 return -TARGET_EFAULT
;
3386 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3387 unlock_user(argptr
, arg
, target_size
);
3391 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3393 return -TARGET_EFAULT
;
3394 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3395 unlock_user(argptr
, arg
, 0);
3396 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3400 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3402 return -TARGET_EFAULT
;
3403 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3404 unlock_user(argptr
, arg
, 0);
3405 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3406 if (!is_error(ret
)) {
3407 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3409 return -TARGET_EFAULT
;
3410 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3411 unlock_user(argptr
, arg
, target_size
);
3417 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3418 (long)cmd
, arg_type
[0]);
3419 ret
= -TARGET_ENOSYS
;
3425 static const bitmask_transtbl iflag_tbl
[] = {
3426 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3427 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3428 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3429 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3430 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3431 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3432 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3433 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3434 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3435 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3436 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3437 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3438 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3439 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3443 static const bitmask_transtbl oflag_tbl
[] = {
3444 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3445 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3446 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3447 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3448 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3449 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3450 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3451 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3452 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3453 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3454 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3455 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3456 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3457 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3458 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3459 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3460 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3461 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3462 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3463 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3464 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3465 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3466 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3467 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3471 static const bitmask_transtbl cflag_tbl
[] = {
3472 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3473 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3474 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3475 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3476 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3477 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3478 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3479 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3480 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3481 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3482 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3483 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3484 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3485 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3486 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3487 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3488 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3489 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3490 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3491 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3492 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3493 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3494 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3495 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3496 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3497 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3498 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3499 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3500 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3501 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3502 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3506 static const bitmask_transtbl lflag_tbl
[] = {
3507 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3508 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3509 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3510 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3511 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3512 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3513 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3514 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3515 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3516 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3517 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3518 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3519 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3520 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3521 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3525 static void target_to_host_termios (void *dst
, const void *src
)
3527 struct host_termios
*host
= dst
;
3528 const struct target_termios
*target
= src
;
3531 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3533 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3535 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3537 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3538 host
->c_line
= target
->c_line
;
3540 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3541 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3542 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3543 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3544 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3545 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3546 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3547 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3548 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3549 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3550 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3551 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3552 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3553 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3554 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3555 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3556 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3557 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3560 static void host_to_target_termios (void *dst
, const void *src
)
3562 struct target_termios
*target
= dst
;
3563 const struct host_termios
*host
= src
;
3566 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3568 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3570 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3572 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3573 target
->c_line
= host
->c_line
;
3575 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3576 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3577 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3578 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3579 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3580 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3581 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3582 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3583 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3584 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3585 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3586 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3587 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3588 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3589 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3590 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3591 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3592 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3595 static const StructEntry struct_termios_def
= {
3596 .convert
= { host_to_target_termios
, target_to_host_termios
},
3597 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3598 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3601 static bitmask_transtbl mmap_flags_tbl
[] = {
3602 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3603 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3604 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3605 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3606 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3607 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3608 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3609 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3613 #if defined(TARGET_I386)
3615 /* NOTE: there is really one LDT for all the threads */
3616 static uint8_t *ldt_table
;
3618 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3625 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3626 if (size
> bytecount
)
3628 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3630 return -TARGET_EFAULT
;
3631 /* ??? Should this by byteswapped? */
3632 memcpy(p
, ldt_table
, size
);
3633 unlock_user(p
, ptr
, size
);
3637 /* XXX: add locking support */
3638 static abi_long
write_ldt(CPUX86State
*env
,
3639 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3641 struct target_modify_ldt_ldt_s ldt_info
;
3642 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3643 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3644 int seg_not_present
, useable
, lm
;
3645 uint32_t *lp
, entry_1
, entry_2
;
3647 if (bytecount
!= sizeof(ldt_info
))
3648 return -TARGET_EINVAL
;
3649 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3650 return -TARGET_EFAULT
;
3651 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3652 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3653 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3654 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3655 unlock_user_struct(target_ldt_info
, ptr
, 0);
3657 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3658 return -TARGET_EINVAL
;
3659 seg_32bit
= ldt_info
.flags
& 1;
3660 contents
= (ldt_info
.flags
>> 1) & 3;
3661 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3662 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3663 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3664 useable
= (ldt_info
.flags
>> 6) & 1;
3668 lm
= (ldt_info
.flags
>> 7) & 1;
3670 if (contents
== 3) {
3672 return -TARGET_EINVAL
;
3673 if (seg_not_present
== 0)
3674 return -TARGET_EINVAL
;
3676 /* allocate the LDT */
3678 env
->ldt
.base
= target_mmap(0,
3679 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3680 PROT_READ
|PROT_WRITE
,
3681 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3682 if (env
->ldt
.base
== -1)
3683 return -TARGET_ENOMEM
;
3684 memset(g2h(env
->ldt
.base
), 0,
3685 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3686 env
->ldt
.limit
= 0xffff;
3687 ldt_table
= g2h(env
->ldt
.base
);
3690 /* NOTE: same code as Linux kernel */
3691 /* Allow LDTs to be cleared by the user. */
3692 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3695 read_exec_only
== 1 &&
3697 limit_in_pages
== 0 &&
3698 seg_not_present
== 1 &&
3706 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3707 (ldt_info
.limit
& 0x0ffff);
3708 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3709 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3710 (ldt_info
.limit
& 0xf0000) |
3711 ((read_exec_only
^ 1) << 9) |
3713 ((seg_not_present
^ 1) << 15) |
3715 (limit_in_pages
<< 23) |
3719 entry_2
|= (useable
<< 20);
3721 /* Install the new entry ... */
3723 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3724 lp
[0] = tswap32(entry_1
);
3725 lp
[1] = tswap32(entry_2
);
3729 /* specific and weird i386 syscalls */
3730 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3731 unsigned long bytecount
)
3737 ret
= read_ldt(ptr
, bytecount
);
3740 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3743 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3746 ret
= -TARGET_ENOSYS
;
3752 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3753 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3755 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3756 struct target_modify_ldt_ldt_s ldt_info
;
3757 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3758 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3759 int seg_not_present
, useable
, lm
;
3760 uint32_t *lp
, entry_1
, entry_2
;
3763 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3764 if (!target_ldt_info
)
3765 return -TARGET_EFAULT
;
3766 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3767 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3768 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3769 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3770 if (ldt_info
.entry_number
== -1) {
3771 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3772 if (gdt_table
[i
] == 0) {
3773 ldt_info
.entry_number
= i
;
3774 target_ldt_info
->entry_number
= tswap32(i
);
3779 unlock_user_struct(target_ldt_info
, ptr
, 1);
3781 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3782 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3783 return -TARGET_EINVAL
;
3784 seg_32bit
= ldt_info
.flags
& 1;
3785 contents
= (ldt_info
.flags
>> 1) & 3;
3786 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3787 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3788 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3789 useable
= (ldt_info
.flags
>> 6) & 1;
3793 lm
= (ldt_info
.flags
>> 7) & 1;
3796 if (contents
== 3) {
3797 if (seg_not_present
== 0)
3798 return -TARGET_EINVAL
;
3801 /* NOTE: same code as Linux kernel */
3802 /* Allow LDTs to be cleared by the user. */
3803 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3804 if ((contents
== 0 &&
3805 read_exec_only
== 1 &&
3807 limit_in_pages
== 0 &&
3808 seg_not_present
== 1 &&
3816 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3817 (ldt_info
.limit
& 0x0ffff);
3818 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3819 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3820 (ldt_info
.limit
& 0xf0000) |
3821 ((read_exec_only
^ 1) << 9) |
3823 ((seg_not_present
^ 1) << 15) |
3825 (limit_in_pages
<< 23) |
3830 /* Install the new entry ... */
3832 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3833 lp
[0] = tswap32(entry_1
);
3834 lp
[1] = tswap32(entry_2
);
3838 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3840 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3841 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3842 uint32_t base_addr
, limit
, flags
;
3843 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3844 int seg_not_present
, useable
, lm
;
3845 uint32_t *lp
, entry_1
, entry_2
;
3847 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3848 if (!target_ldt_info
)
3849 return -TARGET_EFAULT
;
3850 idx
= tswap32(target_ldt_info
->entry_number
);
3851 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3852 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3853 unlock_user_struct(target_ldt_info
, ptr
, 1);
3854 return -TARGET_EINVAL
;
3856 lp
= (uint32_t *)(gdt_table
+ idx
);
3857 entry_1
= tswap32(lp
[0]);
3858 entry_2
= tswap32(lp
[1]);
3860 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3861 contents
= (entry_2
>> 10) & 3;
3862 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3863 seg_32bit
= (entry_2
>> 22) & 1;
3864 limit_in_pages
= (entry_2
>> 23) & 1;
3865 useable
= (entry_2
>> 20) & 1;
3869 lm
= (entry_2
>> 21) & 1;
3871 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3872 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3873 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3874 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3875 base_addr
= (entry_1
>> 16) |
3876 (entry_2
& 0xff000000) |
3877 ((entry_2
& 0xff) << 16);
3878 target_ldt_info
->base_addr
= tswapl(base_addr
);
3879 target_ldt_info
->limit
= tswap32(limit
);
3880 target_ldt_info
->flags
= tswap32(flags
);
3881 unlock_user_struct(target_ldt_info
, ptr
, 1);
3884 #endif /* TARGET_I386 && TARGET_ABI32 */
3886 #ifndef TARGET_ABI32
3887 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3894 case TARGET_ARCH_SET_GS
:
3895 case TARGET_ARCH_SET_FS
:
3896 if (code
== TARGET_ARCH_SET_GS
)
3900 cpu_x86_load_seg(env
, idx
, 0);
3901 env
->segs
[idx
].base
= addr
;
3903 case TARGET_ARCH_GET_GS
:
3904 case TARGET_ARCH_GET_FS
:
3905 if (code
== TARGET_ARCH_GET_GS
)
3909 val
= env
->segs
[idx
].base
;
3910 if (put_user(val
, addr
, abi_ulong
))
3911 ret
= -TARGET_EFAULT
;
3914 ret
= -TARGET_EINVAL
;
3921 #endif /* defined(TARGET_I386) */
3923 #define NEW_STACK_SIZE 0x40000
3925 #if defined(CONFIG_USE_NPTL)
3927 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3930 pthread_mutex_t mutex
;
3931 pthread_cond_t cond
;
3934 abi_ulong child_tidptr
;
3935 abi_ulong parent_tidptr
;
3939 static void *clone_func(void *arg
)
3941 new_thread_info
*info
= arg
;
3947 ts
= (TaskState
*)thread_env
->opaque
;
3948 info
->tid
= gettid();
3949 env
->host_tid
= info
->tid
;
3951 if (info
->child_tidptr
)
3952 put_user_u32(info
->tid
, info
->child_tidptr
);
3953 if (info
->parent_tidptr
)
3954 put_user_u32(info
->tid
, info
->parent_tidptr
);
3955 /* Enable signals. */
3956 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3957 /* Signal to the parent that we're ready. */
3958 pthread_mutex_lock(&info
->mutex
);
3959 pthread_cond_broadcast(&info
->cond
);
3960 pthread_mutex_unlock(&info
->mutex
);
3961 /* Wait until the parent has finshed initializing the tls state. */
3962 pthread_mutex_lock(&clone_lock
);
3963 pthread_mutex_unlock(&clone_lock
);
3970 static int clone_func(void *arg
)
3972 CPUState
*env
= arg
;
3979 /* do_fork() Must return host values and target errnos (unlike most
3980 do_*() functions). */
3981 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3982 abi_ulong parent_tidptr
, target_ulong newtls
,
3983 abi_ulong child_tidptr
)
3988 #if defined(CONFIG_USE_NPTL)
3989 unsigned int nptl_flags
;
3995 /* Emulate vfork() with fork() */
3996 if (flags
& CLONE_VFORK
)
3997 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3999 if (flags
& CLONE_VM
) {
4000 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4001 #if defined(CONFIG_USE_NPTL)
4002 new_thread_info info
;
4003 pthread_attr_t attr
;
4005 ts
= g_malloc0(sizeof(TaskState
));
4006 init_task_state(ts
);
4007 /* we create a new CPU instance. */
4008 new_env
= cpu_copy(env
);
4009 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4012 /* Init regs that differ from the parent. */
4013 cpu_clone_regs(new_env
, newsp
);
4014 new_env
->opaque
= ts
;
4015 ts
->bprm
= parent_ts
->bprm
;
4016 ts
->info
= parent_ts
->info
;
4017 #if defined(CONFIG_USE_NPTL)
4019 flags
&= ~CLONE_NPTL_FLAGS2
;
4021 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4022 ts
->child_tidptr
= child_tidptr
;
4025 if (nptl_flags
& CLONE_SETTLS
)
4026 cpu_set_tls (new_env
, newtls
);
4028 /* Grab a mutex so that thread setup appears atomic. */
4029 pthread_mutex_lock(&clone_lock
);
4031 memset(&info
, 0, sizeof(info
));
4032 pthread_mutex_init(&info
.mutex
, NULL
);
4033 pthread_mutex_lock(&info
.mutex
);
4034 pthread_cond_init(&info
.cond
, NULL
);
4036 if (nptl_flags
& CLONE_CHILD_SETTID
)
4037 info
.child_tidptr
= child_tidptr
;
4038 if (nptl_flags
& CLONE_PARENT_SETTID
)
4039 info
.parent_tidptr
= parent_tidptr
;
4041 ret
= pthread_attr_init(&attr
);
4042 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4043 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4044 /* It is not safe to deliver signals until the child has finished
4045 initializing, so temporarily block all signals. */
4046 sigfillset(&sigmask
);
4047 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4049 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4050 /* TODO: Free new CPU state if thread creation failed. */
4052 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4053 pthread_attr_destroy(&attr
);
4055 /* Wait for the child to initialize. */
4056 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4058 if (flags
& CLONE_PARENT_SETTID
)
4059 put_user_u32(ret
, parent_tidptr
);
4063 pthread_mutex_unlock(&info
.mutex
);
4064 pthread_cond_destroy(&info
.cond
);
4065 pthread_mutex_destroy(&info
.mutex
);
4066 pthread_mutex_unlock(&clone_lock
);
4068 if (flags
& CLONE_NPTL_FLAGS2
)
4070 /* This is probably going to die very quickly, but do it anyway. */
4071 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4073 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4075 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4079 /* if no CLONE_VM, we consider it is a fork */
4080 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4085 /* Child Process. */
4086 cpu_clone_regs(env
, newsp
);
4088 #if defined(CONFIG_USE_NPTL)
4089 /* There is a race condition here. The parent process could
4090 theoretically read the TID in the child process before the child
4091 tid is set. This would require using either ptrace
4092 (not implemented) or having *_tidptr to point at a shared memory
4093 mapping. We can't repeat the spinlock hack used above because
4094 the child process gets its own copy of the lock. */
4095 if (flags
& CLONE_CHILD_SETTID
)
4096 put_user_u32(gettid(), child_tidptr
);
4097 if (flags
& CLONE_PARENT_SETTID
)
4098 put_user_u32(gettid(), parent_tidptr
);
4099 ts
= (TaskState
*)env
->opaque
;
4100 if (flags
& CLONE_SETTLS
)
4101 cpu_set_tls (env
, newtls
);
4102 if (flags
& CLONE_CHILD_CLEARTID
)
4103 ts
->child_tidptr
= child_tidptr
;
4112 /* warning : doesn't handle linux specific flags... */
4113 static int target_to_host_fcntl_cmd(int cmd
)
4116 case TARGET_F_DUPFD
:
4117 case TARGET_F_GETFD
:
4118 case TARGET_F_SETFD
:
4119 case TARGET_F_GETFL
:
4120 case TARGET_F_SETFL
:
4122 case TARGET_F_GETLK
:
4124 case TARGET_F_SETLK
:
4126 case TARGET_F_SETLKW
:
4128 case TARGET_F_GETOWN
:
4130 case TARGET_F_SETOWN
:
4132 case TARGET_F_GETSIG
:
4134 case TARGET_F_SETSIG
:
4136 #if TARGET_ABI_BITS == 32
4137 case TARGET_F_GETLK64
:
4139 case TARGET_F_SETLK64
:
4141 case TARGET_F_SETLKW64
:
4144 case TARGET_F_SETLEASE
:
4146 case TARGET_F_GETLEASE
:
4148 #ifdef F_DUPFD_CLOEXEC
4149 case TARGET_F_DUPFD_CLOEXEC
:
4150 return F_DUPFD_CLOEXEC
;
4152 case TARGET_F_NOTIFY
:
4155 return -TARGET_EINVAL
;
4157 return -TARGET_EINVAL
;
4160 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4163 struct target_flock
*target_fl
;
4164 struct flock64 fl64
;
4165 struct target_flock64
*target_fl64
;
4167 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4169 if (host_cmd
== -TARGET_EINVAL
)
4173 case TARGET_F_GETLK
:
4174 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4175 return -TARGET_EFAULT
;
4176 fl
.l_type
= tswap16(target_fl
->l_type
);
4177 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4178 fl
.l_start
= tswapl(target_fl
->l_start
);
4179 fl
.l_len
= tswapl(target_fl
->l_len
);
4180 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4181 unlock_user_struct(target_fl
, arg
, 0);
4182 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4184 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4185 return -TARGET_EFAULT
;
4186 target_fl
->l_type
= tswap16(fl
.l_type
);
4187 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4188 target_fl
->l_start
= tswapl(fl
.l_start
);
4189 target_fl
->l_len
= tswapl(fl
.l_len
);
4190 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4191 unlock_user_struct(target_fl
, arg
, 1);
4195 case TARGET_F_SETLK
:
4196 case TARGET_F_SETLKW
:
4197 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4198 return -TARGET_EFAULT
;
4199 fl
.l_type
= tswap16(target_fl
->l_type
);
4200 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4201 fl
.l_start
= tswapl(target_fl
->l_start
);
4202 fl
.l_len
= tswapl(target_fl
->l_len
);
4203 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4204 unlock_user_struct(target_fl
, arg
, 0);
4205 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4208 case TARGET_F_GETLK64
:
4209 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4210 return -TARGET_EFAULT
;
4211 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4212 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4213 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4214 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4215 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4216 unlock_user_struct(target_fl64
, arg
, 0);
4217 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4219 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4220 return -TARGET_EFAULT
;
4221 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4222 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4223 target_fl64
->l_start
= tswapl(fl64
.l_start
);
4224 target_fl64
->l_len
= tswapl(fl64
.l_len
);
4225 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4226 unlock_user_struct(target_fl64
, arg
, 1);
4229 case TARGET_F_SETLK64
:
4230 case TARGET_F_SETLKW64
:
4231 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4232 return -TARGET_EFAULT
;
4233 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4234 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4235 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4236 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4237 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4238 unlock_user_struct(target_fl64
, arg
, 0);
4239 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4242 case TARGET_F_GETFL
:
4243 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4245 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4249 case TARGET_F_SETFL
:
4250 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4253 case TARGET_F_SETOWN
:
4254 case TARGET_F_GETOWN
:
4255 case TARGET_F_SETSIG
:
4256 case TARGET_F_GETSIG
:
4257 case TARGET_F_SETLEASE
:
4258 case TARGET_F_GETLEASE
:
4259 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4263 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4271 static inline int high2lowuid(int uid
)
4279 static inline int high2lowgid(int gid
)
4287 static inline int low2highuid(int uid
)
4289 if ((int16_t)uid
== -1)
4295 static inline int low2highgid(int gid
)
4297 if ((int16_t)gid
== -1)
4302 static inline int tswapid(int id
)
4306 #else /* !USE_UID16 */
4307 static inline int high2lowuid(int uid
)
4311 static inline int high2lowgid(int gid
)
4315 static inline int low2highuid(int uid
)
4319 static inline int low2highgid(int gid
)
4323 static inline int tswapid(int id
)
4327 #endif /* USE_UID16 */
4329 void syscall_init(void)
4332 const argtype
*arg_type
;
4336 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4337 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4338 #include "syscall_types.h"
4340 #undef STRUCT_SPECIAL
4342 /* we patch the ioctl size if necessary. We rely on the fact that
4343 no ioctl has all the bits at '1' in the size field */
4345 while (ie
->target_cmd
!= 0) {
4346 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4347 TARGET_IOC_SIZEMASK
) {
4348 arg_type
= ie
->arg_type
;
4349 if (arg_type
[0] != TYPE_PTR
) {
4350 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4355 size
= thunk_type_size(arg_type
, 0);
4356 ie
->target_cmd
= (ie
->target_cmd
&
4357 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4358 (size
<< TARGET_IOC_SIZESHIFT
);
4361 /* Build target_to_host_errno_table[] table from
4362 * host_to_target_errno_table[]. */
4363 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4364 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4366 /* automatic consistency check if same arch */
4367 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4368 (defined(__x86_64__) && defined(TARGET_X86_64))
4369 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4370 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4371 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4378 #if TARGET_ABI_BITS == 32
4379 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4381 #ifdef TARGET_WORDS_BIGENDIAN
4382 return ((uint64_t)word0
<< 32) | word1
;
4384 return ((uint64_t)word1
<< 32) | word0
;
4387 #else /* TARGET_ABI_BITS == 32 */
4388 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4392 #endif /* TARGET_ABI_BITS != 32 */
4394 #ifdef TARGET_NR_truncate64
4395 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4400 if (regpairs_aligned(cpu_env
)) {
4404 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4408 #ifdef TARGET_NR_ftruncate64
4409 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4414 if (regpairs_aligned(cpu_env
)) {
4418 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4422 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4423 abi_ulong target_addr
)
4425 struct target_timespec
*target_ts
;
4427 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4428 return -TARGET_EFAULT
;
4429 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4430 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4431 unlock_user_struct(target_ts
, target_addr
, 0);
4435 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4436 struct timespec
*host_ts
)
4438 struct target_timespec
*target_ts
;
4440 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4441 return -TARGET_EFAULT
;
4442 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4443 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4444 unlock_user_struct(target_ts
, target_addr
, 1);
4448 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4449 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4450 abi_ulong target_addr
,
4451 struct stat
*host_st
)
4454 if (((CPUARMState
*)cpu_env
)->eabi
) {
4455 struct target_eabi_stat64
*target_st
;
4457 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4458 return -TARGET_EFAULT
;
4459 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4460 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4461 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4462 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4463 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4465 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4466 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4467 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4468 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4469 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4470 __put_user(host_st
->st_size
, &target_st
->st_size
);
4471 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4472 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4473 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4474 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4475 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4476 unlock_user_struct(target_st
, target_addr
, 1);
4480 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4481 struct target_stat
*target_st
;
4483 struct target_stat64
*target_st
;
4486 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4487 return -TARGET_EFAULT
;
4488 memset(target_st
, 0, sizeof(*target_st
));
4489 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4490 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4491 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4492 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4494 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4495 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4496 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4497 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4498 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4499 /* XXX: better use of kernel struct */
4500 __put_user(host_st
->st_size
, &target_st
->st_size
);
4501 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4502 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4503 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4504 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4505 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4506 unlock_user_struct(target_st
, target_addr
, 1);
4513 #if defined(CONFIG_USE_NPTL)
4514 /* ??? Using host futex calls even when target atomic operations
4515 are not really atomic probably breaks things. However implementing
4516 futexes locally would make futexes shared between multiple processes
4517 tricky. However they're probably useless because guest atomic
4518 operations won't work either. */
4519 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4520 target_ulong uaddr2
, int val3
)
4522 struct timespec ts
, *pts
;
4525 /* ??? We assume FUTEX_* constants are the same on both host
4527 #ifdef FUTEX_CMD_MASK
4528 base_op
= op
& FUTEX_CMD_MASK
;
4536 target_to_host_timespec(pts
, timeout
);
4540 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4543 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4545 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4547 case FUTEX_CMP_REQUEUE
:
4549 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4550 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4551 But the prototype takes a `struct timespec *'; insert casts
4552 to satisfy the compiler. We do not need to tswap TIMEOUT
4553 since it's not compared to guest memory. */
4554 pts
= (struct timespec
*)(uintptr_t) timeout
;
4555 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4557 (base_op
== FUTEX_CMP_REQUEUE
4561 return -TARGET_ENOSYS
;
4566 /* Map host to target signal numbers for the wait family of syscalls.
4567 Assume all other status bits are the same. */
4568 static int host_to_target_waitstatus(int status
)
4570 if (WIFSIGNALED(status
)) {
4571 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4573 if (WIFSTOPPED(status
)) {
4574 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4580 int get_osversion(void)
4582 static int osversion
;
4583 struct new_utsname buf
;
4588 if (qemu_uname_release
&& *qemu_uname_release
) {
4589 s
= qemu_uname_release
;
4591 if (sys_uname(&buf
))
4596 for (i
= 0; i
< 3; i
++) {
4598 while (*s
>= '0' && *s
<= '9') {
4603 tmp
= (tmp
<< 8) + n
;
4611 /* do_syscall() should always have a single exit point at the end so
4612 that actions, such as logging of syscall results, can be performed.
4613 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4614 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4615 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4616 abi_long arg5
, abi_long arg6
, abi_long arg7
,
4625 gemu_log("syscall %d", num
);
4628 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4631 case TARGET_NR_exit
:
4632 #ifdef CONFIG_USE_NPTL
4633 /* In old applications this may be used to implement _exit(2).
4634 However in threaded applictions it is used for thread termination,
4635 and _exit_group is used for application termination.
4636 Do thread termination if we have more then one thread. */
4637 /* FIXME: This probably breaks if a signal arrives. We should probably
4638 be disabling signals. */
4639 if (first_cpu
->next_cpu
) {
4647 while (p
&& p
!= (CPUState
*)cpu_env
) {
4648 lastp
= &p
->next_cpu
;
4651 /* If we didn't find the CPU for this thread then something is
4655 /* Remove the CPU from the list. */
4656 *lastp
= p
->next_cpu
;
4658 ts
= ((CPUState
*)cpu_env
)->opaque
;
4659 if (ts
->child_tidptr
) {
4660 put_user_u32(0, ts
->child_tidptr
);
4661 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4673 gdb_exit(cpu_env
, arg1
);
4675 ret
= 0; /* avoid warning */
4677 case TARGET_NR_read
:
4681 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4683 ret
= get_errno(read(arg1
, p
, arg3
));
4684 unlock_user(p
, arg2
, ret
);
4687 case TARGET_NR_write
:
4688 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4690 ret
= get_errno(write(arg1
, p
, arg3
));
4691 unlock_user(p
, arg2
, 0);
4693 case TARGET_NR_open
:
4694 if (!(p
= lock_user_string(arg1
)))
4696 ret
= get_errno(open(path(p
),
4697 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4699 unlock_user(p
, arg1
, 0);
4701 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4702 case TARGET_NR_openat
:
4703 if (!(p
= lock_user_string(arg2
)))
4705 ret
= get_errno(sys_openat(arg1
,
4707 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4709 unlock_user(p
, arg2
, 0);
4712 case TARGET_NR_close
:
4713 ret
= get_errno(close(arg1
));
4718 case TARGET_NR_fork
:
4719 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4721 #ifdef TARGET_NR_waitpid
4722 case TARGET_NR_waitpid
:
4725 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4726 if (!is_error(ret
) && arg2
4727 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4732 #ifdef TARGET_NR_waitid
4733 case TARGET_NR_waitid
:
4737 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4738 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4739 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4741 host_to_target_siginfo(p
, &info
);
4742 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4747 #ifdef TARGET_NR_creat /* not on alpha */
4748 case TARGET_NR_creat
:
4749 if (!(p
= lock_user_string(arg1
)))
4751 ret
= get_errno(creat(p
, arg2
));
4752 unlock_user(p
, arg1
, 0);
4755 case TARGET_NR_link
:
4758 p
= lock_user_string(arg1
);
4759 p2
= lock_user_string(arg2
);
4761 ret
= -TARGET_EFAULT
;
4763 ret
= get_errno(link(p
, p2
));
4764 unlock_user(p2
, arg2
, 0);
4765 unlock_user(p
, arg1
, 0);
4768 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4769 case TARGET_NR_linkat
:
4774 p
= lock_user_string(arg2
);
4775 p2
= lock_user_string(arg4
);
4777 ret
= -TARGET_EFAULT
;
4779 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4780 unlock_user(p
, arg2
, 0);
4781 unlock_user(p2
, arg4
, 0);
4785 case TARGET_NR_unlink
:
4786 if (!(p
= lock_user_string(arg1
)))
4788 ret
= get_errno(unlink(p
));
4789 unlock_user(p
, arg1
, 0);
4791 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4792 case TARGET_NR_unlinkat
:
4793 if (!(p
= lock_user_string(arg2
)))
4795 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4796 unlock_user(p
, arg2
, 0);
4799 case TARGET_NR_execve
:
4801 char **argp
, **envp
;
4804 abi_ulong guest_argp
;
4805 abi_ulong guest_envp
;
4811 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4812 if (get_user_ual(addr
, gp
))
4820 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4821 if (get_user_ual(addr
, gp
))
4828 argp
= alloca((argc
+ 1) * sizeof(void *));
4829 envp
= alloca((envc
+ 1) * sizeof(void *));
4831 for (gp
= guest_argp
, q
= argp
; gp
;
4832 gp
+= sizeof(abi_ulong
), q
++) {
4833 if (get_user_ual(addr
, gp
))
4837 if (!(*q
= lock_user_string(addr
)))
4842 for (gp
= guest_envp
, q
= envp
; gp
;
4843 gp
+= sizeof(abi_ulong
), q
++) {
4844 if (get_user_ual(addr
, gp
))
4848 if (!(*q
= lock_user_string(addr
)))
4853 if (!(p
= lock_user_string(arg1
)))
4855 ret
= get_errno(execve(p
, argp
, envp
));
4856 unlock_user(p
, arg1
, 0);
4861 ret
= -TARGET_EFAULT
;
4864 for (gp
= guest_argp
, q
= argp
; *q
;
4865 gp
+= sizeof(abi_ulong
), q
++) {
4866 if (get_user_ual(addr
, gp
)
4869 unlock_user(*q
, addr
, 0);
4871 for (gp
= guest_envp
, q
= envp
; *q
;
4872 gp
+= sizeof(abi_ulong
), q
++) {
4873 if (get_user_ual(addr
, gp
)
4876 unlock_user(*q
, addr
, 0);
4880 case TARGET_NR_chdir
:
4881 if (!(p
= lock_user_string(arg1
)))
4883 ret
= get_errno(chdir(p
));
4884 unlock_user(p
, arg1
, 0);
4886 #ifdef TARGET_NR_time
4887 case TARGET_NR_time
:
4890 ret
= get_errno(time(&host_time
));
4893 && put_user_sal(host_time
, arg1
))
4898 case TARGET_NR_mknod
:
4899 if (!(p
= lock_user_string(arg1
)))
4901 ret
= get_errno(mknod(p
, arg2
, arg3
));
4902 unlock_user(p
, arg1
, 0);
4904 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4905 case TARGET_NR_mknodat
:
4906 if (!(p
= lock_user_string(arg2
)))
4908 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4909 unlock_user(p
, arg2
, 0);
4912 case TARGET_NR_chmod
:
4913 if (!(p
= lock_user_string(arg1
)))
4915 ret
= get_errno(chmod(p
, arg2
));
4916 unlock_user(p
, arg1
, 0);
4918 #ifdef TARGET_NR_break
4919 case TARGET_NR_break
:
4922 #ifdef TARGET_NR_oldstat
4923 case TARGET_NR_oldstat
:
4926 case TARGET_NR_lseek
:
4927 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4929 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4930 /* Alpha specific */
4931 case TARGET_NR_getxpid
:
4932 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4933 ret
= get_errno(getpid());
4936 #ifdef TARGET_NR_getpid
4937 case TARGET_NR_getpid
:
4938 ret
= get_errno(getpid());
4941 case TARGET_NR_mount
:
4943 /* need to look at the data field */
4945 p
= lock_user_string(arg1
);
4946 p2
= lock_user_string(arg2
);
4947 p3
= lock_user_string(arg3
);
4948 if (!p
|| !p2
|| !p3
)
4949 ret
= -TARGET_EFAULT
;
4951 /* FIXME - arg5 should be locked, but it isn't clear how to
4952 * do that since it's not guaranteed to be a NULL-terminated
4956 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4958 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4960 unlock_user(p
, arg1
, 0);
4961 unlock_user(p2
, arg2
, 0);
4962 unlock_user(p3
, arg3
, 0);
4965 #ifdef TARGET_NR_umount
4966 case TARGET_NR_umount
:
4967 if (!(p
= lock_user_string(arg1
)))
4969 ret
= get_errno(umount(p
));
4970 unlock_user(p
, arg1
, 0);
4973 #ifdef TARGET_NR_stime /* not on alpha */
4974 case TARGET_NR_stime
:
4977 if (get_user_sal(host_time
, arg1
))
4979 ret
= get_errno(stime(&host_time
));
4983 case TARGET_NR_ptrace
:
4985 #ifdef TARGET_NR_alarm /* not on alpha */
4986 case TARGET_NR_alarm
:
4990 #ifdef TARGET_NR_oldfstat
4991 case TARGET_NR_oldfstat
:
4994 #ifdef TARGET_NR_pause /* not on alpha */
4995 case TARGET_NR_pause
:
4996 ret
= get_errno(pause());
4999 #ifdef TARGET_NR_utime
5000 case TARGET_NR_utime
:
5002 struct utimbuf tbuf
, *host_tbuf
;
5003 struct target_utimbuf
*target_tbuf
;
5005 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5007 tbuf
.actime
= tswapl(target_tbuf
->actime
);
5008 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
5009 unlock_user_struct(target_tbuf
, arg2
, 0);
5014 if (!(p
= lock_user_string(arg1
)))
5016 ret
= get_errno(utime(p
, host_tbuf
));
5017 unlock_user(p
, arg1
, 0);
5021 case TARGET_NR_utimes
:
5023 struct timeval
*tvp
, tv
[2];
5025 if (copy_from_user_timeval(&tv
[0], arg2
)
5026 || copy_from_user_timeval(&tv
[1],
5027 arg2
+ sizeof(struct target_timeval
)))
5033 if (!(p
= lock_user_string(arg1
)))
5035 ret
= get_errno(utimes(p
, tvp
));
5036 unlock_user(p
, arg1
, 0);
5039 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5040 case TARGET_NR_futimesat
:
5042 struct timeval
*tvp
, tv
[2];
5044 if (copy_from_user_timeval(&tv
[0], arg3
)
5045 || copy_from_user_timeval(&tv
[1],
5046 arg3
+ sizeof(struct target_timeval
)))
5052 if (!(p
= lock_user_string(arg2
)))
5054 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5055 unlock_user(p
, arg2
, 0);
5059 #ifdef TARGET_NR_stty
5060 case TARGET_NR_stty
:
5063 #ifdef TARGET_NR_gtty
5064 case TARGET_NR_gtty
:
5067 case TARGET_NR_access
:
5068 if (!(p
= lock_user_string(arg1
)))
5070 ret
= get_errno(access(path(p
), arg2
));
5071 unlock_user(p
, arg1
, 0);
5073 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5074 case TARGET_NR_faccessat
:
5075 if (!(p
= lock_user_string(arg2
)))
5077 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5078 unlock_user(p
, arg2
, 0);
5081 #ifdef TARGET_NR_nice /* not on alpha */
5082 case TARGET_NR_nice
:
5083 ret
= get_errno(nice(arg1
));
5086 #ifdef TARGET_NR_ftime
5087 case TARGET_NR_ftime
:
5090 case TARGET_NR_sync
:
5094 case TARGET_NR_kill
:
5095 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5097 case TARGET_NR_rename
:
5100 p
= lock_user_string(arg1
);
5101 p2
= lock_user_string(arg2
);
5103 ret
= -TARGET_EFAULT
;
5105 ret
= get_errno(rename(p
, p2
));
5106 unlock_user(p2
, arg2
, 0);
5107 unlock_user(p
, arg1
, 0);
5110 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5111 case TARGET_NR_renameat
:
5114 p
= lock_user_string(arg2
);
5115 p2
= lock_user_string(arg4
);
5117 ret
= -TARGET_EFAULT
;
5119 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5120 unlock_user(p2
, arg4
, 0);
5121 unlock_user(p
, arg2
, 0);
5125 case TARGET_NR_mkdir
:
5126 if (!(p
= lock_user_string(arg1
)))
5128 ret
= get_errno(mkdir(p
, arg2
));
5129 unlock_user(p
, arg1
, 0);
5131 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5132 case TARGET_NR_mkdirat
:
5133 if (!(p
= lock_user_string(arg2
)))
5135 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5136 unlock_user(p
, arg2
, 0);
5139 case TARGET_NR_rmdir
:
5140 if (!(p
= lock_user_string(arg1
)))
5142 ret
= get_errno(rmdir(p
));
5143 unlock_user(p
, arg1
, 0);
5146 ret
= get_errno(dup(arg1
));
5148 case TARGET_NR_pipe
:
5149 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5151 #ifdef TARGET_NR_pipe2
5152 case TARGET_NR_pipe2
:
5153 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5156 case TARGET_NR_times
:
5158 struct target_tms
*tmsp
;
5160 ret
= get_errno(times(&tms
));
5162 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5165 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
5166 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
5167 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
5168 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
5171 ret
= host_to_target_clock_t(ret
);
5174 #ifdef TARGET_NR_prof
5175 case TARGET_NR_prof
:
5178 #ifdef TARGET_NR_signal
5179 case TARGET_NR_signal
:
5182 case TARGET_NR_acct
:
5184 ret
= get_errno(acct(NULL
));
5186 if (!(p
= lock_user_string(arg1
)))
5188 ret
= get_errno(acct(path(p
)));
5189 unlock_user(p
, arg1
, 0);
5192 #ifdef TARGET_NR_umount2 /* not on alpha */
5193 case TARGET_NR_umount2
:
5194 if (!(p
= lock_user_string(arg1
)))
5196 ret
= get_errno(umount2(p
, arg2
));
5197 unlock_user(p
, arg1
, 0);
5200 #ifdef TARGET_NR_lock
5201 case TARGET_NR_lock
:
5204 case TARGET_NR_ioctl
:
5205 ret
= do_ioctl(arg1
, arg2
, arg3
);
5207 case TARGET_NR_fcntl
:
5208 ret
= do_fcntl(arg1
, arg2
, arg3
);
5210 #ifdef TARGET_NR_mpx
5214 case TARGET_NR_setpgid
:
5215 ret
= get_errno(setpgid(arg1
, arg2
));
5217 #ifdef TARGET_NR_ulimit
5218 case TARGET_NR_ulimit
:
5221 #ifdef TARGET_NR_oldolduname
5222 case TARGET_NR_oldolduname
:
5225 case TARGET_NR_umask
:
5226 ret
= get_errno(umask(arg1
));
5228 case TARGET_NR_chroot
:
5229 if (!(p
= lock_user_string(arg1
)))
5231 ret
= get_errno(chroot(p
));
5232 unlock_user(p
, arg1
, 0);
5234 case TARGET_NR_ustat
:
5236 case TARGET_NR_dup2
:
5237 ret
= get_errno(dup2(arg1
, arg2
));
5239 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5240 case TARGET_NR_dup3
:
5241 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5244 #ifdef TARGET_NR_getppid /* not on alpha */
5245 case TARGET_NR_getppid
:
5246 ret
= get_errno(getppid());
5249 case TARGET_NR_getpgrp
:
5250 ret
= get_errno(getpgrp());
5252 case TARGET_NR_setsid
:
5253 ret
= get_errno(setsid());
5255 #ifdef TARGET_NR_sigaction
5256 case TARGET_NR_sigaction
:
5258 #if defined(TARGET_ALPHA)
5259 struct target_sigaction act
, oact
, *pact
= 0;
5260 struct target_old_sigaction
*old_act
;
5262 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5264 act
._sa_handler
= old_act
->_sa_handler
;
5265 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5266 act
.sa_flags
= old_act
->sa_flags
;
5267 act
.sa_restorer
= 0;
5268 unlock_user_struct(old_act
, arg2
, 0);
5271 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5272 if (!is_error(ret
) && arg3
) {
5273 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5275 old_act
->_sa_handler
= oact
._sa_handler
;
5276 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5277 old_act
->sa_flags
= oact
.sa_flags
;
5278 unlock_user_struct(old_act
, arg3
, 1);
5280 #elif defined(TARGET_MIPS)
5281 struct target_sigaction act
, oact
, *pact
, *old_act
;
5284 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5286 act
._sa_handler
= old_act
->_sa_handler
;
5287 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5288 act
.sa_flags
= old_act
->sa_flags
;
5289 unlock_user_struct(old_act
, arg2
, 0);
5295 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5297 if (!is_error(ret
) && arg3
) {
5298 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5300 old_act
->_sa_handler
= oact
._sa_handler
;
5301 old_act
->sa_flags
= oact
.sa_flags
;
5302 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5303 old_act
->sa_mask
.sig
[1] = 0;
5304 old_act
->sa_mask
.sig
[2] = 0;
5305 old_act
->sa_mask
.sig
[3] = 0;
5306 unlock_user_struct(old_act
, arg3
, 1);
5309 struct target_old_sigaction
*old_act
;
5310 struct target_sigaction act
, oact
, *pact
;
5312 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5314 act
._sa_handler
= old_act
->_sa_handler
;
5315 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5316 act
.sa_flags
= old_act
->sa_flags
;
5317 act
.sa_restorer
= old_act
->sa_restorer
;
5318 unlock_user_struct(old_act
, arg2
, 0);
5323 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5324 if (!is_error(ret
) && arg3
) {
5325 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5327 old_act
->_sa_handler
= oact
._sa_handler
;
5328 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5329 old_act
->sa_flags
= oact
.sa_flags
;
5330 old_act
->sa_restorer
= oact
.sa_restorer
;
5331 unlock_user_struct(old_act
, arg3
, 1);
5337 case TARGET_NR_rt_sigaction
:
5339 #if defined(TARGET_ALPHA)
5340 struct target_sigaction act
, oact
, *pact
= 0;
5341 struct target_rt_sigaction
*rt_act
;
5342 /* ??? arg4 == sizeof(sigset_t). */
5344 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5346 act
._sa_handler
= rt_act
->_sa_handler
;
5347 act
.sa_mask
= rt_act
->sa_mask
;
5348 act
.sa_flags
= rt_act
->sa_flags
;
5349 act
.sa_restorer
= arg5
;
5350 unlock_user_struct(rt_act
, arg2
, 0);
5353 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5354 if (!is_error(ret
) && arg3
) {
5355 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5357 rt_act
->_sa_handler
= oact
._sa_handler
;
5358 rt_act
->sa_mask
= oact
.sa_mask
;
5359 rt_act
->sa_flags
= oact
.sa_flags
;
5360 unlock_user_struct(rt_act
, arg3
, 1);
5363 struct target_sigaction
*act
;
5364 struct target_sigaction
*oact
;
5367 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5372 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5373 ret
= -TARGET_EFAULT
;
5374 goto rt_sigaction_fail
;
5378 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5381 unlock_user_struct(act
, arg2
, 0);
5383 unlock_user_struct(oact
, arg3
, 1);
5387 #ifdef TARGET_NR_sgetmask /* not on alpha */
5388 case TARGET_NR_sgetmask
:
5391 abi_ulong target_set
;
5392 sigprocmask(0, NULL
, &cur_set
);
5393 host_to_target_old_sigset(&target_set
, &cur_set
);
5398 #ifdef TARGET_NR_ssetmask /* not on alpha */
5399 case TARGET_NR_ssetmask
:
5401 sigset_t set
, oset
, cur_set
;
5402 abi_ulong target_set
= arg1
;
5403 sigprocmask(0, NULL
, &cur_set
);
5404 target_to_host_old_sigset(&set
, &target_set
);
5405 sigorset(&set
, &set
, &cur_set
);
5406 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5407 host_to_target_old_sigset(&target_set
, &oset
);
5412 #ifdef TARGET_NR_sigprocmask
5413 case TARGET_NR_sigprocmask
:
5415 #if defined(TARGET_ALPHA)
5416 sigset_t set
, oldset
;
5421 case TARGET_SIG_BLOCK
:
5424 case TARGET_SIG_UNBLOCK
:
5427 case TARGET_SIG_SETMASK
:
5431 ret
= -TARGET_EINVAL
;
5435 target_to_host_old_sigset(&set
, &mask
);
5437 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5439 if (!is_error(ret
)) {
5440 host_to_target_old_sigset(&mask
, &oldset
);
5442 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5445 sigset_t set
, oldset
, *set_ptr
;
5450 case TARGET_SIG_BLOCK
:
5453 case TARGET_SIG_UNBLOCK
:
5456 case TARGET_SIG_SETMASK
:
5460 ret
= -TARGET_EINVAL
;
5463 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5465 target_to_host_old_sigset(&set
, p
);
5466 unlock_user(p
, arg2
, 0);
5472 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5473 if (!is_error(ret
) && arg3
) {
5474 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5476 host_to_target_old_sigset(p
, &oldset
);
5477 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5483 case TARGET_NR_rt_sigprocmask
:
5486 sigset_t set
, oldset
, *set_ptr
;
5490 case TARGET_SIG_BLOCK
:
5493 case TARGET_SIG_UNBLOCK
:
5496 case TARGET_SIG_SETMASK
:
5500 ret
= -TARGET_EINVAL
;
5503 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5505 target_to_host_sigset(&set
, p
);
5506 unlock_user(p
, arg2
, 0);
5512 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5513 if (!is_error(ret
) && arg3
) {
5514 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5516 host_to_target_sigset(p
, &oldset
);
5517 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5521 #ifdef TARGET_NR_sigpending
5522 case TARGET_NR_sigpending
:
5525 ret
= get_errno(sigpending(&set
));
5526 if (!is_error(ret
)) {
5527 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5529 host_to_target_old_sigset(p
, &set
);
5530 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5535 case TARGET_NR_rt_sigpending
:
5538 ret
= get_errno(sigpending(&set
));
5539 if (!is_error(ret
)) {
5540 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5542 host_to_target_sigset(p
, &set
);
5543 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5547 #ifdef TARGET_NR_sigsuspend
5548 case TARGET_NR_sigsuspend
:
5551 #if defined(TARGET_ALPHA)
5552 abi_ulong mask
= arg1
;
5553 target_to_host_old_sigset(&set
, &mask
);
5555 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5557 target_to_host_old_sigset(&set
, p
);
5558 unlock_user(p
, arg1
, 0);
5560 ret
= get_errno(sigsuspend(&set
));
5564 case TARGET_NR_rt_sigsuspend
:
5567 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5569 target_to_host_sigset(&set
, p
);
5570 unlock_user(p
, arg1
, 0);
5571 ret
= get_errno(sigsuspend(&set
));
5574 case TARGET_NR_rt_sigtimedwait
:
5577 struct timespec uts
, *puts
;
5580 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5582 target_to_host_sigset(&set
, p
);
5583 unlock_user(p
, arg1
, 0);
5586 target_to_host_timespec(puts
, arg3
);
5590 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5591 if (!is_error(ret
) && arg2
) {
5592 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5594 host_to_target_siginfo(p
, &uinfo
);
5595 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5599 case TARGET_NR_rt_sigqueueinfo
:
5602 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5604 target_to_host_siginfo(&uinfo
, p
);
5605 unlock_user(p
, arg1
, 0);
5606 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5609 #ifdef TARGET_NR_sigreturn
5610 case TARGET_NR_sigreturn
:
5611 /* NOTE: ret is eax, so not transcoding must be done */
5612 ret
= do_sigreturn(cpu_env
);
5615 case TARGET_NR_rt_sigreturn
:
5616 /* NOTE: ret is eax, so not transcoding must be done */
5617 ret
= do_rt_sigreturn(cpu_env
);
5619 case TARGET_NR_sethostname
:
5620 if (!(p
= lock_user_string(arg1
)))
5622 ret
= get_errno(sethostname(p
, arg2
));
5623 unlock_user(p
, arg1
, 0);
5625 case TARGET_NR_setrlimit
:
5627 int resource
= target_to_host_resource(arg1
);
5628 struct target_rlimit
*target_rlim
;
5630 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5632 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5633 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5634 unlock_user_struct(target_rlim
, arg2
, 0);
5635 ret
= get_errno(setrlimit(resource
, &rlim
));
5638 case TARGET_NR_getrlimit
:
5640 int resource
= target_to_host_resource(arg1
);
5641 struct target_rlimit
*target_rlim
;
5644 ret
= get_errno(getrlimit(resource
, &rlim
));
5645 if (!is_error(ret
)) {
5646 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5648 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5649 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5650 unlock_user_struct(target_rlim
, arg2
, 1);
5654 case TARGET_NR_getrusage
:
5656 struct rusage rusage
;
5657 ret
= get_errno(getrusage(arg1
, &rusage
));
5658 if (!is_error(ret
)) {
5659 host_to_target_rusage(arg2
, &rusage
);
5663 case TARGET_NR_gettimeofday
:
5666 ret
= get_errno(gettimeofday(&tv
, NULL
));
5667 if (!is_error(ret
)) {
5668 if (copy_to_user_timeval(arg1
, &tv
))
5673 case TARGET_NR_settimeofday
:
5676 if (copy_from_user_timeval(&tv
, arg1
))
5678 ret
= get_errno(settimeofday(&tv
, NULL
));
5681 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5682 case TARGET_NR_select
:
5684 struct target_sel_arg_struct
*sel
;
5685 abi_ulong inp
, outp
, exp
, tvp
;
5688 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5690 nsel
= tswapl(sel
->n
);
5691 inp
= tswapl(sel
->inp
);
5692 outp
= tswapl(sel
->outp
);
5693 exp
= tswapl(sel
->exp
);
5694 tvp
= tswapl(sel
->tvp
);
5695 unlock_user_struct(sel
, arg1
, 0);
5696 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5700 #ifdef TARGET_NR_pselect6
5701 case TARGET_NR_pselect6
:
5703 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
5704 fd_set rfds
, wfds
, efds
;
5705 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
5706 struct timespec ts
, *ts_ptr
;
5709 * The 6th arg is actually two args smashed together,
5710 * so we cannot use the C library.
5718 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
5719 target_sigset_t
*target_sigset
;
5727 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
5731 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
5735 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
5741 * This takes a timespec, and not a timeval, so we cannot
5742 * use the do_select() helper ...
5745 if (target_to_host_timespec(&ts
, ts_addr
)) {
5753 /* Extract the two packed args for the sigset */
5756 sig
.size
= _NSIG
/ 8;
5758 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
5762 arg_sigset
= tswapl(arg7
[0]);
5763 arg_sigsize
= tswapl(arg7
[1]);
5764 unlock_user(arg7
, arg6
, 0);
5768 if (arg_sigsize
!= sizeof(*target_sigset
)) {
5769 /* Like the kernel, we enforce correct size sigsets */
5770 ret
= -TARGET_EINVAL
;
5773 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
5774 sizeof(*target_sigset
), 1);
5775 if (!target_sigset
) {
5778 target_to_host_sigset(&set
, target_sigset
);
5779 unlock_user(target_sigset
, arg_sigset
, 0);
5787 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
5790 if (!is_error(ret
)) {
5791 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
5793 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
5795 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
5798 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
5804 case TARGET_NR_symlink
:
5807 p
= lock_user_string(arg1
);
5808 p2
= lock_user_string(arg2
);
5810 ret
= -TARGET_EFAULT
;
5812 ret
= get_errno(symlink(p
, p2
));
5813 unlock_user(p2
, arg2
, 0);
5814 unlock_user(p
, arg1
, 0);
5817 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5818 case TARGET_NR_symlinkat
:
5821 p
= lock_user_string(arg1
);
5822 p2
= lock_user_string(arg3
);
5824 ret
= -TARGET_EFAULT
;
5826 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5827 unlock_user(p2
, arg3
, 0);
5828 unlock_user(p
, arg1
, 0);
5832 #ifdef TARGET_NR_oldlstat
5833 case TARGET_NR_oldlstat
:
5836 case TARGET_NR_readlink
:
5839 p
= lock_user_string(arg1
);
5840 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5842 ret
= -TARGET_EFAULT
;
5844 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5845 char real
[PATH_MAX
];
5846 temp
= realpath(exec_path
,real
);
5847 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5848 snprintf((char *)p2
, arg3
, "%s", real
);
5851 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5853 unlock_user(p2
, arg2
, ret
);
5854 unlock_user(p
, arg1
, 0);
5857 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5858 case TARGET_NR_readlinkat
:
5861 p
= lock_user_string(arg2
);
5862 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5864 ret
= -TARGET_EFAULT
;
5866 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5867 unlock_user(p2
, arg3
, ret
);
5868 unlock_user(p
, arg2
, 0);
5872 #ifdef TARGET_NR_uselib
5873 case TARGET_NR_uselib
:
5876 #ifdef TARGET_NR_swapon
5877 case TARGET_NR_swapon
:
5878 if (!(p
= lock_user_string(arg1
)))
5880 ret
= get_errno(swapon(p
, arg2
));
5881 unlock_user(p
, arg1
, 0);
5884 case TARGET_NR_reboot
:
5886 #ifdef TARGET_NR_readdir
5887 case TARGET_NR_readdir
:
5890 #ifdef TARGET_NR_mmap
5891 case TARGET_NR_mmap
:
5892 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5893 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5894 || defined(TARGET_S390X)
5897 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5898 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5906 unlock_user(v
, arg1
, 0);
5907 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5908 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5912 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5913 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5919 #ifdef TARGET_NR_mmap2
5920 case TARGET_NR_mmap2
:
5922 #define MMAP_SHIFT 12
5924 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5925 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5927 arg6
<< MMAP_SHIFT
));
5930 case TARGET_NR_munmap
:
5931 ret
= get_errno(target_munmap(arg1
, arg2
));
5933 case TARGET_NR_mprotect
:
5935 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5936 /* Special hack to detect libc making the stack executable. */
5937 if ((arg3
& PROT_GROWSDOWN
)
5938 && arg1
>= ts
->info
->stack_limit
5939 && arg1
<= ts
->info
->start_stack
) {
5940 arg3
&= ~PROT_GROWSDOWN
;
5941 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5942 arg1
= ts
->info
->stack_limit
;
5945 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5947 #ifdef TARGET_NR_mremap
5948 case TARGET_NR_mremap
:
5949 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5952 /* ??? msync/mlock/munlock are broken for softmmu. */
5953 #ifdef TARGET_NR_msync
5954 case TARGET_NR_msync
:
5955 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5958 #ifdef TARGET_NR_mlock
5959 case TARGET_NR_mlock
:
5960 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5963 #ifdef TARGET_NR_munlock
5964 case TARGET_NR_munlock
:
5965 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5968 #ifdef TARGET_NR_mlockall
5969 case TARGET_NR_mlockall
:
5970 ret
= get_errno(mlockall(arg1
));
5973 #ifdef TARGET_NR_munlockall
5974 case TARGET_NR_munlockall
:
5975 ret
= get_errno(munlockall());
5978 case TARGET_NR_truncate
:
5979 if (!(p
= lock_user_string(arg1
)))
5981 ret
= get_errno(truncate(p
, arg2
));
5982 unlock_user(p
, arg1
, 0);
5984 case TARGET_NR_ftruncate
:
5985 ret
= get_errno(ftruncate(arg1
, arg2
));
5987 case TARGET_NR_fchmod
:
5988 ret
= get_errno(fchmod(arg1
, arg2
));
5990 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5991 case TARGET_NR_fchmodat
:
5992 if (!(p
= lock_user_string(arg2
)))
5994 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5995 unlock_user(p
, arg2
, 0);
5998 case TARGET_NR_getpriority
:
5999 /* libc does special remapping of the return value of
6000 * sys_getpriority() so it's just easiest to call
6001 * sys_getpriority() directly rather than through libc. */
6002 ret
= get_errno(sys_getpriority(arg1
, arg2
));
6004 case TARGET_NR_setpriority
:
6005 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6007 #ifdef TARGET_NR_profil
6008 case TARGET_NR_profil
:
6011 case TARGET_NR_statfs
:
6012 if (!(p
= lock_user_string(arg1
)))
6014 ret
= get_errno(statfs(path(p
), &stfs
));
6015 unlock_user(p
, arg1
, 0);
6017 if (!is_error(ret
)) {
6018 struct target_statfs
*target_stfs
;
6020 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6022 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6023 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6024 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6025 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6026 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6027 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6028 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6029 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6030 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6031 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6032 unlock_user_struct(target_stfs
, arg2
, 1);
6035 case TARGET_NR_fstatfs
:
6036 ret
= get_errno(fstatfs(arg1
, &stfs
));
6037 goto convert_statfs
;
6038 #ifdef TARGET_NR_statfs64
6039 case TARGET_NR_statfs64
:
6040 if (!(p
= lock_user_string(arg1
)))
6042 ret
= get_errno(statfs(path(p
), &stfs
));
6043 unlock_user(p
, arg1
, 0);
6045 if (!is_error(ret
)) {
6046 struct target_statfs64
*target_stfs
;
6048 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6050 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6051 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6052 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6053 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6054 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6055 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6056 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6057 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6058 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6059 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6060 unlock_user_struct(target_stfs
, arg3
, 1);
6063 case TARGET_NR_fstatfs64
:
6064 ret
= get_errno(fstatfs(arg1
, &stfs
));
6065 goto convert_statfs64
;
6067 #ifdef TARGET_NR_ioperm
6068 case TARGET_NR_ioperm
:
6071 #ifdef TARGET_NR_socketcall
6072 case TARGET_NR_socketcall
:
6073 ret
= do_socketcall(arg1
, arg2
);
6076 #ifdef TARGET_NR_accept
6077 case TARGET_NR_accept
:
6078 ret
= do_accept(arg1
, arg2
, arg3
);
6081 #ifdef TARGET_NR_bind
6082 case TARGET_NR_bind
:
6083 ret
= do_bind(arg1
, arg2
, arg3
);
6086 #ifdef TARGET_NR_connect
6087 case TARGET_NR_connect
:
6088 ret
= do_connect(arg1
, arg2
, arg3
);
6091 #ifdef TARGET_NR_getpeername
6092 case TARGET_NR_getpeername
:
6093 ret
= do_getpeername(arg1
, arg2
, arg3
);
6096 #ifdef TARGET_NR_getsockname
6097 case TARGET_NR_getsockname
:
6098 ret
= do_getsockname(arg1
, arg2
, arg3
);
6101 #ifdef TARGET_NR_getsockopt
6102 case TARGET_NR_getsockopt
:
6103 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6106 #ifdef TARGET_NR_listen
6107 case TARGET_NR_listen
:
6108 ret
= get_errno(listen(arg1
, arg2
));
6111 #ifdef TARGET_NR_recv
6112 case TARGET_NR_recv
:
6113 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6116 #ifdef TARGET_NR_recvfrom
6117 case TARGET_NR_recvfrom
:
6118 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6121 #ifdef TARGET_NR_recvmsg
6122 case TARGET_NR_recvmsg
:
6123 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6126 #ifdef TARGET_NR_send
6127 case TARGET_NR_send
:
6128 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6131 #ifdef TARGET_NR_sendmsg
6132 case TARGET_NR_sendmsg
:
6133 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6136 #ifdef TARGET_NR_sendto
6137 case TARGET_NR_sendto
:
6138 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6141 #ifdef TARGET_NR_shutdown
6142 case TARGET_NR_shutdown
:
6143 ret
= get_errno(shutdown(arg1
, arg2
));
6146 #ifdef TARGET_NR_socket
6147 case TARGET_NR_socket
:
6148 ret
= do_socket(arg1
, arg2
, arg3
);
6151 #ifdef TARGET_NR_socketpair
6152 case TARGET_NR_socketpair
:
6153 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6156 #ifdef TARGET_NR_setsockopt
6157 case TARGET_NR_setsockopt
:
6158 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6162 case TARGET_NR_syslog
:
6163 if (!(p
= lock_user_string(arg2
)))
6165 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6166 unlock_user(p
, arg2
, 0);
6169 case TARGET_NR_setitimer
:
6171 struct itimerval value
, ovalue
, *pvalue
;
6175 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6176 || copy_from_user_timeval(&pvalue
->it_value
,
6177 arg2
+ sizeof(struct target_timeval
)))
6182 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6183 if (!is_error(ret
) && arg3
) {
6184 if (copy_to_user_timeval(arg3
,
6185 &ovalue
.it_interval
)
6186 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6192 case TARGET_NR_getitimer
:
6194 struct itimerval value
;
6196 ret
= get_errno(getitimer(arg1
, &value
));
6197 if (!is_error(ret
) && arg2
) {
6198 if (copy_to_user_timeval(arg2
,
6200 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6206 case TARGET_NR_stat
:
6207 if (!(p
= lock_user_string(arg1
)))
6209 ret
= get_errno(stat(path(p
), &st
));
6210 unlock_user(p
, arg1
, 0);
6212 case TARGET_NR_lstat
:
6213 if (!(p
= lock_user_string(arg1
)))
6215 ret
= get_errno(lstat(path(p
), &st
));
6216 unlock_user(p
, arg1
, 0);
6218 case TARGET_NR_fstat
:
6220 ret
= get_errno(fstat(arg1
, &st
));
6222 if (!is_error(ret
)) {
6223 struct target_stat
*target_st
;
6225 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6227 memset(target_st
, 0, sizeof(*target_st
));
6228 __put_user(st
.st_dev
, &target_st
->st_dev
);
6229 __put_user(st
.st_ino
, &target_st
->st_ino
);
6230 __put_user(st
.st_mode
, &target_st
->st_mode
);
6231 __put_user(st
.st_uid
, &target_st
->st_uid
);
6232 __put_user(st
.st_gid
, &target_st
->st_gid
);
6233 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6234 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6235 __put_user(st
.st_size
, &target_st
->st_size
);
6236 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6237 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6238 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6239 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6240 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6241 unlock_user_struct(target_st
, arg2
, 1);
6245 #ifdef TARGET_NR_olduname
6246 case TARGET_NR_olduname
:
6249 #ifdef TARGET_NR_iopl
6250 case TARGET_NR_iopl
:
6253 case TARGET_NR_vhangup
:
6254 ret
= get_errno(vhangup());
6256 #ifdef TARGET_NR_idle
6257 case TARGET_NR_idle
:
6260 #ifdef TARGET_NR_syscall
6261 case TARGET_NR_syscall
:
6262 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6263 arg6
, arg7
, arg8
, 0);
6266 case TARGET_NR_wait4
:
6269 abi_long status_ptr
= arg2
;
6270 struct rusage rusage
, *rusage_ptr
;
6271 abi_ulong target_rusage
= arg4
;
6273 rusage_ptr
= &rusage
;
6276 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6277 if (!is_error(ret
)) {
6279 status
= host_to_target_waitstatus(status
);
6280 if (put_user_s32(status
, status_ptr
))
6284 host_to_target_rusage(target_rusage
, &rusage
);
6288 #ifdef TARGET_NR_swapoff
6289 case TARGET_NR_swapoff
:
6290 if (!(p
= lock_user_string(arg1
)))
6292 ret
= get_errno(swapoff(p
));
6293 unlock_user(p
, arg1
, 0);
6296 case TARGET_NR_sysinfo
:
6298 struct target_sysinfo
*target_value
;
6299 struct sysinfo value
;
6300 ret
= get_errno(sysinfo(&value
));
6301 if (!is_error(ret
) && arg1
)
6303 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6305 __put_user(value
.uptime
, &target_value
->uptime
);
6306 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6307 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6308 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6309 __put_user(value
.totalram
, &target_value
->totalram
);
6310 __put_user(value
.freeram
, &target_value
->freeram
);
6311 __put_user(value
.sharedram
, &target_value
->sharedram
);
6312 __put_user(value
.bufferram
, &target_value
->bufferram
);
6313 __put_user(value
.totalswap
, &target_value
->totalswap
);
6314 __put_user(value
.freeswap
, &target_value
->freeswap
);
6315 __put_user(value
.procs
, &target_value
->procs
);
6316 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6317 __put_user(value
.freehigh
, &target_value
->freehigh
);
6318 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6319 unlock_user_struct(target_value
, arg1
, 1);
6323 #ifdef TARGET_NR_ipc
6325 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6328 #ifdef TARGET_NR_semget
6329 case TARGET_NR_semget
:
6330 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6333 #ifdef TARGET_NR_semop
6334 case TARGET_NR_semop
:
6335 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6338 #ifdef TARGET_NR_semctl
6339 case TARGET_NR_semctl
:
6340 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6343 #ifdef TARGET_NR_msgctl
6344 case TARGET_NR_msgctl
:
6345 ret
= do_msgctl(arg1
, arg2
, arg3
);
6348 #ifdef TARGET_NR_msgget
6349 case TARGET_NR_msgget
:
6350 ret
= get_errno(msgget(arg1
, arg2
));
6353 #ifdef TARGET_NR_msgrcv
6354 case TARGET_NR_msgrcv
:
6355 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6358 #ifdef TARGET_NR_msgsnd
6359 case TARGET_NR_msgsnd
:
6360 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6363 #ifdef TARGET_NR_shmget
6364 case TARGET_NR_shmget
:
6365 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6368 #ifdef TARGET_NR_shmctl
6369 case TARGET_NR_shmctl
:
6370 ret
= do_shmctl(arg1
, arg2
, arg3
);
6373 #ifdef TARGET_NR_shmat
6374 case TARGET_NR_shmat
:
6375 ret
= do_shmat(arg1
, arg2
, arg3
);
6378 #ifdef TARGET_NR_shmdt
6379 case TARGET_NR_shmdt
:
6380 ret
= do_shmdt(arg1
);
6383 case TARGET_NR_fsync
:
6384 ret
= get_errno(fsync(arg1
));
6386 case TARGET_NR_clone
:
6387 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6388 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6389 #elif defined(TARGET_CRIS)
6390 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6391 #elif defined(TARGET_S390X)
6392 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6394 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6397 #ifdef __NR_exit_group
6398 /* new thread calls */
6399 case TARGET_NR_exit_group
:
6403 gdb_exit(cpu_env
, arg1
);
6404 ret
= get_errno(exit_group(arg1
));
6407 case TARGET_NR_setdomainname
:
6408 if (!(p
= lock_user_string(arg1
)))
6410 ret
= get_errno(setdomainname(p
, arg2
));
6411 unlock_user(p
, arg1
, 0);
6413 case TARGET_NR_uname
:
6414 /* no need to transcode because we use the linux syscall */
6416 struct new_utsname
* buf
;
6418 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6420 ret
= get_errno(sys_uname(buf
));
6421 if (!is_error(ret
)) {
6422 /* Overrite the native machine name with whatever is being
6424 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6425 /* Allow the user to override the reported release. */
6426 if (qemu_uname_release
&& *qemu_uname_release
)
6427 strcpy (buf
->release
, qemu_uname_release
);
6429 unlock_user_struct(buf
, arg1
, 1);
6433 case TARGET_NR_modify_ldt
:
6434 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6436 #if !defined(TARGET_X86_64)
6437 case TARGET_NR_vm86old
:
6439 case TARGET_NR_vm86
:
6440 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6444 case TARGET_NR_adjtimex
:
6446 #ifdef TARGET_NR_create_module
6447 case TARGET_NR_create_module
:
6449 case TARGET_NR_init_module
:
6450 case TARGET_NR_delete_module
:
6451 #ifdef TARGET_NR_get_kernel_syms
6452 case TARGET_NR_get_kernel_syms
:
6455 case TARGET_NR_quotactl
:
6457 case TARGET_NR_getpgid
:
6458 ret
= get_errno(getpgid(arg1
));
6460 case TARGET_NR_fchdir
:
6461 ret
= get_errno(fchdir(arg1
));
6463 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6464 case TARGET_NR_bdflush
:
6467 #ifdef TARGET_NR_sysfs
6468 case TARGET_NR_sysfs
:
6471 case TARGET_NR_personality
:
6472 ret
= get_errno(personality(arg1
));
6474 #ifdef TARGET_NR_afs_syscall
6475 case TARGET_NR_afs_syscall
:
6478 #ifdef TARGET_NR__llseek /* Not on alpha */
6479 case TARGET_NR__llseek
:
6482 #if !defined(__NR_llseek)
6483 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6485 ret
= get_errno(res
);
6490 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6492 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6498 case TARGET_NR_getdents
:
6499 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6501 struct target_dirent
*target_dirp
;
6502 struct linux_dirent
*dirp
;
6503 abi_long count
= arg3
;
6505 dirp
= malloc(count
);
6507 ret
= -TARGET_ENOMEM
;
6511 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6512 if (!is_error(ret
)) {
6513 struct linux_dirent
*de
;
6514 struct target_dirent
*tde
;
6516 int reclen
, treclen
;
6517 int count1
, tnamelen
;
6521 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6525 reclen
= de
->d_reclen
;
6526 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6527 tde
->d_reclen
= tswap16(treclen
);
6528 tde
->d_ino
= tswapl(de
->d_ino
);
6529 tde
->d_off
= tswapl(de
->d_off
);
6530 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6533 /* XXX: may not be correct */
6534 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6535 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6537 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6541 unlock_user(target_dirp
, arg2
, ret
);
6547 struct linux_dirent
*dirp
;
6548 abi_long count
= arg3
;
6550 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6552 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6553 if (!is_error(ret
)) {
6554 struct linux_dirent
*de
;
6559 reclen
= de
->d_reclen
;
6562 de
->d_reclen
= tswap16(reclen
);
6563 tswapls(&de
->d_ino
);
6564 tswapls(&de
->d_off
);
6565 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6569 unlock_user(dirp
, arg2
, ret
);
6573 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6574 case TARGET_NR_getdents64
:
6576 struct linux_dirent64
*dirp
;
6577 abi_long count
= arg3
;
6578 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6580 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6581 if (!is_error(ret
)) {
6582 struct linux_dirent64
*de
;
6587 reclen
= de
->d_reclen
;
6590 de
->d_reclen
= tswap16(reclen
);
6591 tswap64s((uint64_t *)&de
->d_ino
);
6592 tswap64s((uint64_t *)&de
->d_off
);
6593 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6597 unlock_user(dirp
, arg2
, ret
);
6600 #endif /* TARGET_NR_getdents64 */
6601 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6603 case TARGET_NR_select
:
6605 case TARGET_NR__newselect
:
6607 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6610 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6611 # ifdef TARGET_NR_poll
6612 case TARGET_NR_poll
:
6614 # ifdef TARGET_NR_ppoll
6615 case TARGET_NR_ppoll
:
6618 struct target_pollfd
*target_pfd
;
6619 unsigned int nfds
= arg2
;
6624 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6628 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6629 for(i
= 0; i
< nfds
; i
++) {
6630 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6631 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6634 # ifdef TARGET_NR_ppoll
6635 if (num
== TARGET_NR_ppoll
) {
6636 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6637 target_sigset_t
*target_set
;
6638 sigset_t _set
, *set
= &_set
;
6641 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6642 unlock_user(target_pfd
, arg1
, 0);
6650 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6652 unlock_user(target_pfd
, arg1
, 0);
6655 target_to_host_sigset(set
, target_set
);
6660 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6662 if (!is_error(ret
) && arg3
) {
6663 host_to_target_timespec(arg3
, timeout_ts
);
6666 unlock_user(target_set
, arg4
, 0);
6670 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6672 if (!is_error(ret
)) {
6673 for(i
= 0; i
< nfds
; i
++) {
6674 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6677 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6681 case TARGET_NR_flock
:
6682 /* NOTE: the flock constant seems to be the same for every
6684 ret
= get_errno(flock(arg1
, arg2
));
6686 case TARGET_NR_readv
:
6691 vec
= alloca(count
* sizeof(struct iovec
));
6692 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6694 ret
= get_errno(readv(arg1
, vec
, count
));
6695 unlock_iovec(vec
, arg2
, count
, 1);
6698 case TARGET_NR_writev
:
6703 vec
= alloca(count
* sizeof(struct iovec
));
6704 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6706 ret
= get_errno(writev(arg1
, vec
, count
));
6707 unlock_iovec(vec
, arg2
, count
, 0);
6710 case TARGET_NR_getsid
:
6711 ret
= get_errno(getsid(arg1
));
6713 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6714 case TARGET_NR_fdatasync
:
6715 ret
= get_errno(fdatasync(arg1
));
6718 case TARGET_NR__sysctl
:
6719 /* We don't implement this, but ENOTDIR is always a safe
6721 ret
= -TARGET_ENOTDIR
;
6723 case TARGET_NR_sched_getaffinity
:
6725 unsigned int mask_size
;
6726 unsigned long *mask
;
6729 * sched_getaffinity needs multiples of ulong, so need to take
6730 * care of mismatches between target ulong and host ulong sizes.
6732 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6733 ret
= -TARGET_EINVAL
;
6736 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6738 mask
= alloca(mask_size
);
6739 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6741 if (!is_error(ret
)) {
6742 if (copy_to_user(arg3
, mask
, ret
)) {
6748 case TARGET_NR_sched_setaffinity
:
6750 unsigned int mask_size
;
6751 unsigned long *mask
;
6754 * sched_setaffinity needs multiples of ulong, so need to take
6755 * care of mismatches between target ulong and host ulong sizes.
6757 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6758 ret
= -TARGET_EINVAL
;
6761 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6763 mask
= alloca(mask_size
);
6764 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6767 memcpy(mask
, p
, arg2
);
6768 unlock_user_struct(p
, arg2
, 0);
6770 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6773 case TARGET_NR_sched_setparam
:
6775 struct sched_param
*target_schp
;
6776 struct sched_param schp
;
6778 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6780 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6781 unlock_user_struct(target_schp
, arg2
, 0);
6782 ret
= get_errno(sched_setparam(arg1
, &schp
));
6785 case TARGET_NR_sched_getparam
:
6787 struct sched_param
*target_schp
;
6788 struct sched_param schp
;
6789 ret
= get_errno(sched_getparam(arg1
, &schp
));
6790 if (!is_error(ret
)) {
6791 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6793 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6794 unlock_user_struct(target_schp
, arg2
, 1);
6798 case TARGET_NR_sched_setscheduler
:
6800 struct sched_param
*target_schp
;
6801 struct sched_param schp
;
6802 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6804 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6805 unlock_user_struct(target_schp
, arg3
, 0);
6806 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6809 case TARGET_NR_sched_getscheduler
:
6810 ret
= get_errno(sched_getscheduler(arg1
));
6812 case TARGET_NR_sched_yield
:
6813 ret
= get_errno(sched_yield());
6815 case TARGET_NR_sched_get_priority_max
:
6816 ret
= get_errno(sched_get_priority_max(arg1
));
6818 case TARGET_NR_sched_get_priority_min
:
6819 ret
= get_errno(sched_get_priority_min(arg1
));
6821 case TARGET_NR_sched_rr_get_interval
:
6824 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6825 if (!is_error(ret
)) {
6826 host_to_target_timespec(arg2
, &ts
);
6830 case TARGET_NR_nanosleep
:
6832 struct timespec req
, rem
;
6833 target_to_host_timespec(&req
, arg1
);
6834 ret
= get_errno(nanosleep(&req
, &rem
));
6835 if (is_error(ret
) && arg2
) {
6836 host_to_target_timespec(arg2
, &rem
);
6840 #ifdef TARGET_NR_query_module
6841 case TARGET_NR_query_module
:
6844 #ifdef TARGET_NR_nfsservctl
6845 case TARGET_NR_nfsservctl
:
6848 case TARGET_NR_prctl
:
6851 case PR_GET_PDEATHSIG
:
6854 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6855 if (!is_error(ret
) && arg2
6856 && put_user_ual(deathsig
, arg2
))
6861 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6865 #ifdef TARGET_NR_arch_prctl
6866 case TARGET_NR_arch_prctl
:
6867 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6868 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6874 #ifdef TARGET_NR_pread
6875 case TARGET_NR_pread
:
6876 if (regpairs_aligned(cpu_env
))
6878 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6880 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6881 unlock_user(p
, arg2
, ret
);
6883 case TARGET_NR_pwrite
:
6884 if (regpairs_aligned(cpu_env
))
6886 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6888 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6889 unlock_user(p
, arg2
, 0);
6892 #ifdef TARGET_NR_pread64
6893 case TARGET_NR_pread64
:
6894 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6896 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6897 unlock_user(p
, arg2
, ret
);
6899 case TARGET_NR_pwrite64
:
6900 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6902 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6903 unlock_user(p
, arg2
, 0);
6906 case TARGET_NR_getcwd
:
6907 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6909 ret
= get_errno(sys_getcwd1(p
, arg2
));
6910 unlock_user(p
, arg1
, ret
);
6912 case TARGET_NR_capget
:
6914 case TARGET_NR_capset
:
6916 case TARGET_NR_sigaltstack
:
6917 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6918 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6919 defined(TARGET_M68K) || defined(TARGET_S390X)
6920 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6925 case TARGET_NR_sendfile
:
6927 #ifdef TARGET_NR_getpmsg
6928 case TARGET_NR_getpmsg
:
6931 #ifdef TARGET_NR_putpmsg
6932 case TARGET_NR_putpmsg
:
6935 #ifdef TARGET_NR_vfork
6936 case TARGET_NR_vfork
:
6937 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6941 #ifdef TARGET_NR_ugetrlimit
6942 case TARGET_NR_ugetrlimit
:
6945 int resource
= target_to_host_resource(arg1
);
6946 ret
= get_errno(getrlimit(resource
, &rlim
));
6947 if (!is_error(ret
)) {
6948 struct target_rlimit
*target_rlim
;
6949 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6951 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6952 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6953 unlock_user_struct(target_rlim
, arg2
, 1);
6958 #ifdef TARGET_NR_truncate64
6959 case TARGET_NR_truncate64
:
6960 if (!(p
= lock_user_string(arg1
)))
6962 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6963 unlock_user(p
, arg1
, 0);
6966 #ifdef TARGET_NR_ftruncate64
6967 case TARGET_NR_ftruncate64
:
6968 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6971 #ifdef TARGET_NR_stat64
6972 case TARGET_NR_stat64
:
6973 if (!(p
= lock_user_string(arg1
)))
6975 ret
= get_errno(stat(path(p
), &st
));
6976 unlock_user(p
, arg1
, 0);
6978 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6981 #ifdef TARGET_NR_lstat64
6982 case TARGET_NR_lstat64
:
6983 if (!(p
= lock_user_string(arg1
)))
6985 ret
= get_errno(lstat(path(p
), &st
));
6986 unlock_user(p
, arg1
, 0);
6988 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6991 #ifdef TARGET_NR_fstat64
6992 case TARGET_NR_fstat64
:
6993 ret
= get_errno(fstat(arg1
, &st
));
6995 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6998 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6999 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7000 #ifdef TARGET_NR_fstatat64
7001 case TARGET_NR_fstatat64
:
7003 #ifdef TARGET_NR_newfstatat
7004 case TARGET_NR_newfstatat
:
7006 if (!(p
= lock_user_string(arg2
)))
7008 #ifdef __NR_fstatat64
7009 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7011 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7014 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7017 case TARGET_NR_lchown
:
7018 if (!(p
= lock_user_string(arg1
)))
7020 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7021 unlock_user(p
, arg1
, 0);
7023 #ifdef TARGET_NR_getuid
7024 case TARGET_NR_getuid
:
7025 ret
= get_errno(high2lowuid(getuid()));
7028 #ifdef TARGET_NR_getgid
7029 case TARGET_NR_getgid
:
7030 ret
= get_errno(high2lowgid(getgid()));
7033 #ifdef TARGET_NR_geteuid
7034 case TARGET_NR_geteuid
:
7035 ret
= get_errno(high2lowuid(geteuid()));
7038 #ifdef TARGET_NR_getegid
7039 case TARGET_NR_getegid
:
7040 ret
= get_errno(high2lowgid(getegid()));
7043 case TARGET_NR_setreuid
:
7044 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7046 case TARGET_NR_setregid
:
7047 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7049 case TARGET_NR_getgroups
:
7051 int gidsetsize
= arg1
;
7052 target_id
*target_grouplist
;
7056 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7057 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7058 if (gidsetsize
== 0)
7060 if (!is_error(ret
)) {
7061 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7062 if (!target_grouplist
)
7064 for(i
= 0;i
< ret
; i
++)
7065 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7066 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7070 case TARGET_NR_setgroups
:
7072 int gidsetsize
= arg1
;
7073 target_id
*target_grouplist
;
7077 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7078 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7079 if (!target_grouplist
) {
7080 ret
= -TARGET_EFAULT
;
7083 for(i
= 0;i
< gidsetsize
; i
++)
7084 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7085 unlock_user(target_grouplist
, arg2
, 0);
7086 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7089 case TARGET_NR_fchown
:
7090 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7092 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7093 case TARGET_NR_fchownat
:
7094 if (!(p
= lock_user_string(arg2
)))
7096 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7097 unlock_user(p
, arg2
, 0);
7100 #ifdef TARGET_NR_setresuid
7101 case TARGET_NR_setresuid
:
7102 ret
= get_errno(setresuid(low2highuid(arg1
),
7104 low2highuid(arg3
)));
7107 #ifdef TARGET_NR_getresuid
7108 case TARGET_NR_getresuid
:
7110 uid_t ruid
, euid
, suid
;
7111 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7112 if (!is_error(ret
)) {
7113 if (put_user_u16(high2lowuid(ruid
), arg1
)
7114 || put_user_u16(high2lowuid(euid
), arg2
)
7115 || put_user_u16(high2lowuid(suid
), arg3
))
7121 #ifdef TARGET_NR_getresgid
7122 case TARGET_NR_setresgid
:
7123 ret
= get_errno(setresgid(low2highgid(arg1
),
7125 low2highgid(arg3
)));
7128 #ifdef TARGET_NR_getresgid
7129 case TARGET_NR_getresgid
:
7131 gid_t rgid
, egid
, sgid
;
7132 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7133 if (!is_error(ret
)) {
7134 if (put_user_u16(high2lowgid(rgid
), arg1
)
7135 || put_user_u16(high2lowgid(egid
), arg2
)
7136 || put_user_u16(high2lowgid(sgid
), arg3
))
7142 case TARGET_NR_chown
:
7143 if (!(p
= lock_user_string(arg1
)))
7145 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7146 unlock_user(p
, arg1
, 0);
7148 case TARGET_NR_setuid
:
7149 ret
= get_errno(setuid(low2highuid(arg1
)));
7151 case TARGET_NR_setgid
:
7152 ret
= get_errno(setgid(low2highgid(arg1
)));
7154 case TARGET_NR_setfsuid
:
7155 ret
= get_errno(setfsuid(arg1
));
7157 case TARGET_NR_setfsgid
:
7158 ret
= get_errno(setfsgid(arg1
));
7161 #ifdef TARGET_NR_lchown32
7162 case TARGET_NR_lchown32
:
7163 if (!(p
= lock_user_string(arg1
)))
7165 ret
= get_errno(lchown(p
, arg2
, arg3
));
7166 unlock_user(p
, arg1
, 0);
7169 #ifdef TARGET_NR_getuid32
7170 case TARGET_NR_getuid32
:
7171 ret
= get_errno(getuid());
7175 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7176 /* Alpha specific */
7177 case TARGET_NR_getxuid
:
7181 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7183 ret
= get_errno(getuid());
7186 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7187 /* Alpha specific */
7188 case TARGET_NR_getxgid
:
7192 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7194 ret
= get_errno(getgid());
7197 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7198 /* Alpha specific */
7199 case TARGET_NR_osf_getsysinfo
:
7200 ret
= -TARGET_EOPNOTSUPP
;
7202 case TARGET_GSI_IEEE_FP_CONTROL
:
7204 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7206 /* Copied from linux ieee_fpcr_to_swcr. */
7207 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7208 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7209 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7210 | SWCR_TRAP_ENABLE_DZE
7211 | SWCR_TRAP_ENABLE_OVF
);
7212 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7213 | SWCR_TRAP_ENABLE_INE
);
7214 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7215 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7217 if (put_user_u64 (swcr
, arg2
))
7223 /* case GSI_IEEE_STATE_AT_SIGNAL:
7224 -- Not implemented in linux kernel.
7226 -- Retrieves current unaligned access state; not much used.
7228 -- Retrieves implver information; surely not used.
7230 -- Grabs a copy of the HWRPB; surely not used.
7235 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7236 /* Alpha specific */
7237 case TARGET_NR_osf_setsysinfo
:
7238 ret
= -TARGET_EOPNOTSUPP
;
7240 case TARGET_SSI_IEEE_FP_CONTROL
:
7241 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7243 uint64_t swcr
, fpcr
, orig_fpcr
;
7245 if (get_user_u64 (swcr
, arg2
))
7247 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7248 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7250 /* Copied from linux ieee_swcr_to_fpcr. */
7251 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7252 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7253 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7254 | SWCR_TRAP_ENABLE_DZE
7255 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7256 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7257 | SWCR_TRAP_ENABLE_INE
)) << 57;
7258 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7259 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7261 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7264 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7265 /* Old exceptions are not signaled. */
7266 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7268 /* If any exceptions set by this call, and are unmasked,
7275 /* case SSI_NVPAIRS:
7276 -- Used with SSIN_UACPROC to enable unaligned accesses.
7277 case SSI_IEEE_STATE_AT_SIGNAL:
7278 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7279 -- Not implemented in linux kernel
7284 #ifdef TARGET_NR_osf_sigprocmask
7285 /* Alpha specific. */
7286 case TARGET_NR_osf_sigprocmask
:
7290 sigset_t set
, oldset
;
7293 case TARGET_SIG_BLOCK
:
7296 case TARGET_SIG_UNBLOCK
:
7299 case TARGET_SIG_SETMASK
:
7303 ret
= -TARGET_EINVAL
;
7307 target_to_host_old_sigset(&set
, &mask
);
7308 sigprocmask(how
, &set
, &oldset
);
7309 host_to_target_old_sigset(&mask
, &oldset
);
7315 #ifdef TARGET_NR_getgid32
7316 case TARGET_NR_getgid32
:
7317 ret
= get_errno(getgid());
7320 #ifdef TARGET_NR_geteuid32
7321 case TARGET_NR_geteuid32
:
7322 ret
= get_errno(geteuid());
7325 #ifdef TARGET_NR_getegid32
7326 case TARGET_NR_getegid32
:
7327 ret
= get_errno(getegid());
7330 #ifdef TARGET_NR_setreuid32
7331 case TARGET_NR_setreuid32
:
7332 ret
= get_errno(setreuid(arg1
, arg2
));
7335 #ifdef TARGET_NR_setregid32
7336 case TARGET_NR_setregid32
:
7337 ret
= get_errno(setregid(arg1
, arg2
));
7340 #ifdef TARGET_NR_getgroups32
7341 case TARGET_NR_getgroups32
:
7343 int gidsetsize
= arg1
;
7344 uint32_t *target_grouplist
;
7348 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7349 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7350 if (gidsetsize
== 0)
7352 if (!is_error(ret
)) {
7353 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7354 if (!target_grouplist
) {
7355 ret
= -TARGET_EFAULT
;
7358 for(i
= 0;i
< ret
; i
++)
7359 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7360 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7365 #ifdef TARGET_NR_setgroups32
7366 case TARGET_NR_setgroups32
:
7368 int gidsetsize
= arg1
;
7369 uint32_t *target_grouplist
;
7373 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7374 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7375 if (!target_grouplist
) {
7376 ret
= -TARGET_EFAULT
;
7379 for(i
= 0;i
< gidsetsize
; i
++)
7380 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7381 unlock_user(target_grouplist
, arg2
, 0);
7382 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7386 #ifdef TARGET_NR_fchown32
7387 case TARGET_NR_fchown32
:
7388 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7391 #ifdef TARGET_NR_setresuid32
7392 case TARGET_NR_setresuid32
:
7393 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7396 #ifdef TARGET_NR_getresuid32
7397 case TARGET_NR_getresuid32
:
7399 uid_t ruid
, euid
, suid
;
7400 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7401 if (!is_error(ret
)) {
7402 if (put_user_u32(ruid
, arg1
)
7403 || put_user_u32(euid
, arg2
)
7404 || put_user_u32(suid
, arg3
))
7410 #ifdef TARGET_NR_setresgid32
7411 case TARGET_NR_setresgid32
:
7412 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7415 #ifdef TARGET_NR_getresgid32
7416 case TARGET_NR_getresgid32
:
7418 gid_t rgid
, egid
, sgid
;
7419 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7420 if (!is_error(ret
)) {
7421 if (put_user_u32(rgid
, arg1
)
7422 || put_user_u32(egid
, arg2
)
7423 || put_user_u32(sgid
, arg3
))
7429 #ifdef TARGET_NR_chown32
7430 case TARGET_NR_chown32
:
7431 if (!(p
= lock_user_string(arg1
)))
7433 ret
= get_errno(chown(p
, arg2
, arg3
));
7434 unlock_user(p
, arg1
, 0);
7437 #ifdef TARGET_NR_setuid32
7438 case TARGET_NR_setuid32
:
7439 ret
= get_errno(setuid(arg1
));
7442 #ifdef TARGET_NR_setgid32
7443 case TARGET_NR_setgid32
:
7444 ret
= get_errno(setgid(arg1
));
7447 #ifdef TARGET_NR_setfsuid32
7448 case TARGET_NR_setfsuid32
:
7449 ret
= get_errno(setfsuid(arg1
));
7452 #ifdef TARGET_NR_setfsgid32
7453 case TARGET_NR_setfsgid32
:
7454 ret
= get_errno(setfsgid(arg1
));
7458 case TARGET_NR_pivot_root
:
7460 #ifdef TARGET_NR_mincore
7461 case TARGET_NR_mincore
:
7464 ret
= -TARGET_EFAULT
;
7465 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7467 if (!(p
= lock_user_string(arg3
)))
7469 ret
= get_errno(mincore(a
, arg2
, p
));
7470 unlock_user(p
, arg3
, ret
);
7472 unlock_user(a
, arg1
, 0);
7476 #ifdef TARGET_NR_arm_fadvise64_64
7477 case TARGET_NR_arm_fadvise64_64
:
7480 * arm_fadvise64_64 looks like fadvise64_64 but
7481 * with different argument order
7489 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7490 #ifdef TARGET_NR_fadvise64_64
7491 case TARGET_NR_fadvise64_64
:
7493 #ifdef TARGET_NR_fadvise64
7494 case TARGET_NR_fadvise64
:
7498 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7499 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7500 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7501 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7505 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7508 #ifdef TARGET_NR_madvise
7509 case TARGET_NR_madvise
:
7510 /* A straight passthrough may not be safe because qemu sometimes
7511 turns private flie-backed mappings into anonymous mappings.
7512 This will break MADV_DONTNEED.
7513 This is a hint, so ignoring and returning success is ok. */
7517 #if TARGET_ABI_BITS == 32
7518 case TARGET_NR_fcntl64
:
7522 struct target_flock64
*target_fl
;
7524 struct target_eabi_flock64
*target_efl
;
7527 cmd
= target_to_host_fcntl_cmd(arg2
);
7528 if (cmd
== -TARGET_EINVAL
)
7532 case TARGET_F_GETLK64
:
7534 if (((CPUARMState
*)cpu_env
)->eabi
) {
7535 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7537 fl
.l_type
= tswap16(target_efl
->l_type
);
7538 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7539 fl
.l_start
= tswap64(target_efl
->l_start
);
7540 fl
.l_len
= tswap64(target_efl
->l_len
);
7541 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7542 unlock_user_struct(target_efl
, arg3
, 0);
7546 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7548 fl
.l_type
= tswap16(target_fl
->l_type
);
7549 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7550 fl
.l_start
= tswap64(target_fl
->l_start
);
7551 fl
.l_len
= tswap64(target_fl
->l_len
);
7552 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7553 unlock_user_struct(target_fl
, arg3
, 0);
7555 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7558 if (((CPUARMState
*)cpu_env
)->eabi
) {
7559 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7561 target_efl
->l_type
= tswap16(fl
.l_type
);
7562 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7563 target_efl
->l_start
= tswap64(fl
.l_start
);
7564 target_efl
->l_len
= tswap64(fl
.l_len
);
7565 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7566 unlock_user_struct(target_efl
, arg3
, 1);
7570 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7572 target_fl
->l_type
= tswap16(fl
.l_type
);
7573 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7574 target_fl
->l_start
= tswap64(fl
.l_start
);
7575 target_fl
->l_len
= tswap64(fl
.l_len
);
7576 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7577 unlock_user_struct(target_fl
, arg3
, 1);
7582 case TARGET_F_SETLK64
:
7583 case TARGET_F_SETLKW64
:
7585 if (((CPUARMState
*)cpu_env
)->eabi
) {
7586 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7588 fl
.l_type
= tswap16(target_efl
->l_type
);
7589 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7590 fl
.l_start
= tswap64(target_efl
->l_start
);
7591 fl
.l_len
= tswap64(target_efl
->l_len
);
7592 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7593 unlock_user_struct(target_efl
, arg3
, 0);
7597 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7599 fl
.l_type
= tswap16(target_fl
->l_type
);
7600 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7601 fl
.l_start
= tswap64(target_fl
->l_start
);
7602 fl
.l_len
= tswap64(target_fl
->l_len
);
7603 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7604 unlock_user_struct(target_fl
, arg3
, 0);
7606 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7609 ret
= do_fcntl(arg1
, arg2
, arg3
);
7615 #ifdef TARGET_NR_cacheflush
7616 case TARGET_NR_cacheflush
:
7617 /* self-modifying code is handled automatically, so nothing needed */
7621 #ifdef TARGET_NR_security
7622 case TARGET_NR_security
:
7625 #ifdef TARGET_NR_getpagesize
7626 case TARGET_NR_getpagesize
:
7627 ret
= TARGET_PAGE_SIZE
;
7630 case TARGET_NR_gettid
:
7631 ret
= get_errno(gettid());
7633 #ifdef TARGET_NR_readahead
7634 case TARGET_NR_readahead
:
7635 #if TARGET_ABI_BITS == 32
7636 if (regpairs_aligned(cpu_env
)) {
7641 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7643 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7648 #ifdef TARGET_NR_setxattr
7649 case TARGET_NR_lsetxattr
:
7650 case TARGET_NR_fsetxattr
:
7651 case TARGET_NR_lgetxattr
:
7652 case TARGET_NR_fgetxattr
:
7653 case TARGET_NR_listxattr
:
7654 case TARGET_NR_llistxattr
:
7655 case TARGET_NR_flistxattr
:
7656 case TARGET_NR_lremovexattr
:
7657 case TARGET_NR_fremovexattr
:
7658 ret
= -TARGET_EOPNOTSUPP
;
7660 case TARGET_NR_setxattr
:
7663 p
= lock_user_string(arg1
);
7664 n
= lock_user_string(arg2
);
7665 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
7667 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
7669 ret
= -TARGET_EFAULT
;
7671 unlock_user(p
, arg1
, 0);
7672 unlock_user(n
, arg2
, 0);
7673 unlock_user(v
, arg3
, 0);
7676 case TARGET_NR_getxattr
:
7679 p
= lock_user_string(arg1
);
7680 n
= lock_user_string(arg2
);
7681 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7683 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
7685 ret
= -TARGET_EFAULT
;
7687 unlock_user(p
, arg1
, 0);
7688 unlock_user(n
, arg2
, 0);
7689 unlock_user(v
, arg3
, arg4
);
7692 case TARGET_NR_removexattr
:
7695 p
= lock_user_string(arg1
);
7696 n
= lock_user_string(arg2
);
7698 ret
= get_errno(removexattr(p
, n
));
7700 ret
= -TARGET_EFAULT
;
7702 unlock_user(p
, arg1
, 0);
7703 unlock_user(n
, arg2
, 0);
7707 #endif /* CONFIG_ATTR */
7708 #ifdef TARGET_NR_set_thread_area
7709 case TARGET_NR_set_thread_area
:
7710 #if defined(TARGET_MIPS)
7711 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7714 #elif defined(TARGET_CRIS)
7716 ret
= -TARGET_EINVAL
;
7718 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7722 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7723 ret
= do_set_thread_area(cpu_env
, arg1
);
7726 goto unimplemented_nowarn
;
7729 #ifdef TARGET_NR_get_thread_area
7730 case TARGET_NR_get_thread_area
:
7731 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7732 ret
= do_get_thread_area(cpu_env
, arg1
);
7734 goto unimplemented_nowarn
;
7737 #ifdef TARGET_NR_getdomainname
7738 case TARGET_NR_getdomainname
:
7739 goto unimplemented_nowarn
;
7742 #ifdef TARGET_NR_clock_gettime
7743 case TARGET_NR_clock_gettime
:
7746 ret
= get_errno(clock_gettime(arg1
, &ts
));
7747 if (!is_error(ret
)) {
7748 host_to_target_timespec(arg2
, &ts
);
7753 #ifdef TARGET_NR_clock_getres
7754 case TARGET_NR_clock_getres
:
7757 ret
= get_errno(clock_getres(arg1
, &ts
));
7758 if (!is_error(ret
)) {
7759 host_to_target_timespec(arg2
, &ts
);
7764 #ifdef TARGET_NR_clock_nanosleep
7765 case TARGET_NR_clock_nanosleep
:
7768 target_to_host_timespec(&ts
, arg3
);
7769 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7771 host_to_target_timespec(arg4
, &ts
);
7776 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7777 case TARGET_NR_set_tid_address
:
7778 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7782 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7783 case TARGET_NR_tkill
:
7784 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7788 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7789 case TARGET_NR_tgkill
:
7790 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7791 target_to_host_signal(arg3
)));
7795 #ifdef TARGET_NR_set_robust_list
7796 case TARGET_NR_set_robust_list
:
7797 goto unimplemented_nowarn
;
7800 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7801 case TARGET_NR_utimensat
:
7803 struct timespec
*tsp
, ts
[2];
7807 target_to_host_timespec(ts
, arg3
);
7808 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7812 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7814 if (!(p
= lock_user_string(arg2
))) {
7815 ret
= -TARGET_EFAULT
;
7818 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7819 unlock_user(p
, arg2
, 0);
7824 #if defined(CONFIG_USE_NPTL)
7825 case TARGET_NR_futex
:
7826 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7829 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7830 case TARGET_NR_inotify_init
:
7831 ret
= get_errno(sys_inotify_init());
7834 #ifdef CONFIG_INOTIFY1
7835 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7836 case TARGET_NR_inotify_init1
:
7837 ret
= get_errno(sys_inotify_init1(arg1
));
7841 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7842 case TARGET_NR_inotify_add_watch
:
7843 p
= lock_user_string(arg2
);
7844 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7845 unlock_user(p
, arg2
, 0);
7848 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7849 case TARGET_NR_inotify_rm_watch
:
7850 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7854 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7855 case TARGET_NR_mq_open
:
7857 struct mq_attr posix_mq_attr
;
7859 p
= lock_user_string(arg1
- 1);
7861 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7862 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7863 unlock_user (p
, arg1
, 0);
7867 case TARGET_NR_mq_unlink
:
7868 p
= lock_user_string(arg1
- 1);
7869 ret
= get_errno(mq_unlink(p
));
7870 unlock_user (p
, arg1
, 0);
7873 case TARGET_NR_mq_timedsend
:
7877 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7879 target_to_host_timespec(&ts
, arg5
);
7880 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7881 host_to_target_timespec(arg5
, &ts
);
7884 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7885 unlock_user (p
, arg2
, arg3
);
7889 case TARGET_NR_mq_timedreceive
:
7894 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7896 target_to_host_timespec(&ts
, arg5
);
7897 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7898 host_to_target_timespec(arg5
, &ts
);
7901 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7902 unlock_user (p
, arg2
, arg3
);
7904 put_user_u32(prio
, arg4
);
7908 /* Not implemented for now... */
7909 /* case TARGET_NR_mq_notify: */
7912 case TARGET_NR_mq_getsetattr
:
7914 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7917 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7918 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7921 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7922 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7929 #ifdef CONFIG_SPLICE
7930 #ifdef TARGET_NR_tee
7933 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7937 #ifdef TARGET_NR_splice
7938 case TARGET_NR_splice
:
7940 loff_t loff_in
, loff_out
;
7941 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7943 get_user_u64(loff_in
, arg2
);
7944 ploff_in
= &loff_in
;
7947 get_user_u64(loff_out
, arg2
);
7948 ploff_out
= &loff_out
;
7950 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7954 #ifdef TARGET_NR_vmsplice
7955 case TARGET_NR_vmsplice
:
7960 vec
= alloca(count
* sizeof(struct iovec
));
7961 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7963 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7964 unlock_iovec(vec
, arg2
, count
, 0);
7968 #endif /* CONFIG_SPLICE */
7969 #ifdef CONFIG_EVENTFD
7970 #if defined(TARGET_NR_eventfd)
7971 case TARGET_NR_eventfd
:
7972 ret
= get_errno(eventfd(arg1
, 0));
7975 #if defined(TARGET_NR_eventfd2)
7976 case TARGET_NR_eventfd2
:
7977 ret
= get_errno(eventfd(arg1
, arg2
));
7980 #endif /* CONFIG_EVENTFD */
7981 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7982 case TARGET_NR_fallocate
:
7983 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7986 #if defined(CONFIG_SYNC_FILE_RANGE)
7987 #if defined(TARGET_NR_sync_file_range)
7988 case TARGET_NR_sync_file_range
:
7989 #if TARGET_ABI_BITS == 32
7990 #if defined(TARGET_MIPS)
7991 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7992 target_offset64(arg5
, arg6
), arg7
));
7994 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7995 target_offset64(arg4
, arg5
), arg6
));
7996 #endif /* !TARGET_MIPS */
7998 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8002 #if defined(TARGET_NR_sync_file_range2)
8003 case TARGET_NR_sync_file_range2
:
8004 /* This is like sync_file_range but the arguments are reordered */
8005 #if TARGET_ABI_BITS == 32
8006 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8007 target_offset64(arg5
, arg6
), arg2
));
8009 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8014 #if defined(CONFIG_EPOLL)
8015 #if defined(TARGET_NR_epoll_create)
8016 case TARGET_NR_epoll_create
:
8017 ret
= get_errno(epoll_create(arg1
));
8020 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8021 case TARGET_NR_epoll_create1
:
8022 ret
= get_errno(epoll_create1(arg1
));
8025 #if defined(TARGET_NR_epoll_ctl)
8026 case TARGET_NR_epoll_ctl
:
8028 struct epoll_event ep
;
8029 struct epoll_event
*epp
= 0;
8031 struct target_epoll_event
*target_ep
;
8032 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8035 ep
.events
= tswap32(target_ep
->events
);
8036 /* The epoll_data_t union is just opaque data to the kernel,
8037 * so we transfer all 64 bits across and need not worry what
8038 * actual data type it is.
8040 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8041 unlock_user_struct(target_ep
, arg4
, 0);
8044 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8049 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8050 #define IMPLEMENT_EPOLL_PWAIT
8052 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8053 #if defined(TARGET_NR_epoll_wait)
8054 case TARGET_NR_epoll_wait
:
8056 #if defined(IMPLEMENT_EPOLL_PWAIT)
8057 case TARGET_NR_epoll_pwait
:
8060 struct target_epoll_event
*target_ep
;
8061 struct epoll_event
*ep
;
8063 int maxevents
= arg3
;
8066 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8067 maxevents
* sizeof(struct target_epoll_event
), 1);
8072 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8075 #if defined(IMPLEMENT_EPOLL_PWAIT)
8076 case TARGET_NR_epoll_pwait
:
8078 target_sigset_t
*target_set
;
8079 sigset_t _set
, *set
= &_set
;
8082 target_set
= lock_user(VERIFY_READ
, arg5
,
8083 sizeof(target_sigset_t
), 1);
8085 unlock_user(target_ep
, arg2
, 0);
8088 target_to_host_sigset(set
, target_set
);
8089 unlock_user(target_set
, arg5
, 0);
8094 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8098 #if defined(TARGET_NR_epoll_wait)
8099 case TARGET_NR_epoll_wait
:
8100 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8104 ret
= -TARGET_ENOSYS
;
8106 if (!is_error(ret
)) {
8108 for (i
= 0; i
< ret
; i
++) {
8109 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8110 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8113 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8118 #ifdef TARGET_NR_prlimit64
8119 case TARGET_NR_prlimit64
:
8121 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8122 struct target_rlimit64
*target_rnew
, *target_rold
;
8123 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8125 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8128 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8129 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8130 unlock_user_struct(target_rnew
, arg3
, 0);
8134 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8135 if (!is_error(ret
) && arg4
) {
8136 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8139 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8140 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8141 unlock_user_struct(target_rold
, arg4
, 1);
8148 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8149 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8150 unimplemented_nowarn
:
8152 ret
= -TARGET_ENOSYS
;
8157 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8160 print_syscall_ret(num
, ret
);
8163 ret
= -TARGET_EFAULT
;