4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu-xattr.h"
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 #define _syscall0(type,name) \
127 static type name (void) \
129 return syscall(__NR_##name); \
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
135 return syscall(__NR_##name, arg1); \
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
141 return syscall(__NR_##name, arg1, arg2); \
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 #define __NR__llseek __NR_lseek
208 _syscall0(int, gettid
)
210 /* This is a replacement for the host gettid() and must return a host
212 static int gettid(void) {
216 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
220 _syscall2(int, sys_getpriority
, int, which
, int, who
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group
,int,error_code
)
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address
,int *,tidptr
)
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
242 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
250 unsigned long *, user_mask_ptr
);
251 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
254 static bitmask_transtbl fcntl_flags_tbl
[] = {
255 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
256 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
257 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
258 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
259 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
260 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
261 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
262 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
263 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
264 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
265 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
266 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
267 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
274 #define COPY_UTSNAME_FIELD(dest, src) \
276 /* __NEW_UTS_LEN doesn't include terminating null */ \
277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
278 (dest)[__NEW_UTS_LEN] = '\0'; \
281 static int sys_uname(struct new_utsname
*buf
)
283 struct utsname uts_buf
;
285 if (uname(&uts_buf
) < 0)
289 * Just in case these have some differences, we
290 * translate utsname to new_utsname (which is the
291 * struct linux kernel uses).
294 memset(buf
, 0, sizeof(*buf
));
295 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
296 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
297 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
298 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
299 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
301 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
305 #undef COPY_UTSNAME_FIELD
308 static int sys_getcwd1(char *buf
, size_t size
)
310 if (getcwd(buf
, size
) == NULL
) {
311 /* getcwd() sets errno */
314 return strlen(buf
)+1;
319 * Host system seems to have atfile syscall stubs available. We
320 * now enable them one by one as specified by target syscall_nr.h.
323 #ifdef TARGET_NR_faccessat
324 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
326 return (faccessat(dirfd
, pathname
, mode
, 0));
329 #ifdef TARGET_NR_fchmodat
330 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
332 return (fchmodat(dirfd
, pathname
, mode
, 0));
335 #if defined(TARGET_NR_fchownat)
336 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
337 gid_t group
, int flags
)
339 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
342 #ifdef __NR_fstatat64
343 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
346 return (fstatat(dirfd
, pathname
, buf
, flags
));
349 #ifdef __NR_newfstatat
350 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
353 return (fstatat(dirfd
, pathname
, buf
, flags
));
356 #ifdef TARGET_NR_futimesat
357 static int sys_futimesat(int dirfd
, const char *pathname
,
358 const struct timeval times
[2])
360 return (futimesat(dirfd
, pathname
, times
));
363 #ifdef TARGET_NR_linkat
364 static int sys_linkat(int olddirfd
, const char *oldpath
,
365 int newdirfd
, const char *newpath
, int flags
)
367 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
370 #ifdef TARGET_NR_mkdirat
371 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
373 return (mkdirat(dirfd
, pathname
, mode
));
376 #ifdef TARGET_NR_mknodat
377 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
380 return (mknodat(dirfd
, pathname
, mode
, dev
));
383 #ifdef TARGET_NR_openat
384 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
387 * open(2) has extra parameter 'mode' when called with
390 if ((flags
& O_CREAT
) != 0) {
391 return (openat(dirfd
, pathname
, flags
, mode
));
393 return (openat(dirfd
, pathname
, flags
));
396 #ifdef TARGET_NR_readlinkat
397 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
399 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
402 #ifdef TARGET_NR_renameat
403 static int sys_renameat(int olddirfd
, const char *oldpath
,
404 int newdirfd
, const char *newpath
)
406 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
409 #ifdef TARGET_NR_symlinkat
410 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
412 return (symlinkat(oldpath
, newdirfd
, newpath
));
415 #ifdef TARGET_NR_unlinkat
416 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
418 return (unlinkat(dirfd
, pathname
, flags
));
421 #else /* !CONFIG_ATFILE */
424 * Try direct syscalls instead
426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
427 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
430 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
433 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
434 uid_t
,owner
,gid_t
,group
,int,flags
)
436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
437 defined(__NR_fstatat64)
438 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
439 struct stat
*,buf
,int,flags
)
441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
442 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
443 const struct timeval
*,times
)
445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
446 defined(__NR_newfstatat)
447 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
448 struct stat
*,buf
,int,flags
)
450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
451 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
452 int,newdirfd
,const char *,newpath
,int,flags
)
454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
455 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
458 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
459 mode_t
,mode
,dev_t
,dev
)
461 #if defined(TARGET_NR_openat) && defined(__NR_openat)
462 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
465 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
466 char *,buf
,size_t,bufsize
)
468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
469 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
470 int,newdirfd
,const char *,newpath
)
472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
473 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
474 int,newdirfd
,const char *,newpath
)
476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
477 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
480 #endif /* CONFIG_ATFILE */
482 #ifdef CONFIG_UTIMENSAT
483 static int sys_utimensat(int dirfd
, const char *pathname
,
484 const struct timespec times
[2], int flags
)
486 if (pathname
== NULL
)
487 return futimens(dirfd
, times
);
489 return utimensat(dirfd
, pathname
, times
, flags
);
492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
493 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
494 const struct timespec
*,tsp
,int,flags
)
496 #endif /* CONFIG_UTIMENSAT */
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
502 static int sys_inotify_init(void)
504 return (inotify_init());
507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
508 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
510 return (inotify_add_watch(fd
, pathname
, mask
));
513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
514 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
516 return (inotify_rm_watch(fd
, wd
));
519 #ifdef CONFIG_INOTIFY1
520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
521 static int sys_inotify_init1(int flags
)
523 return (inotify_init1(flags
));
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY */
535 #if defined(TARGET_NR_ppoll)
537 # define __NR_ppoll -1
539 #define __NR_sys_ppoll __NR_ppoll
540 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
541 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
545 #if defined(TARGET_NR_pselect6)
546 #ifndef __NR_pselect6
547 # define __NR_pselect6 -1
549 #define __NR_sys_pselect6 __NR_pselect6
550 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
551 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
554 #if defined(TARGET_NR_prlimit64)
555 #ifndef __NR_prlimit64
556 # define __NR_prlimit64 -1
558 #define __NR_sys_prlimit64 __NR_prlimit64
559 /* The glibc rlimit structure may not be that used by the underlying syscall */
560 struct host_rlimit64
{
564 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
565 const struct host_rlimit64
*, new_limit
,
566 struct host_rlimit64
*, old_limit
)
569 extern int personality(int);
570 extern int flock(int, int);
571 extern int setfsuid(int);
572 extern int setfsgid(int);
573 extern int setgroups(int, gid_t
*);
575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
577 static inline int regpairs_aligned(void *cpu_env
) {
578 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
580 #elif defined(TARGET_MIPS)
581 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
583 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
586 #define ERRNO_TABLE_SIZE 1200
588 /* target_to_host_errno_table[] is initialized from
589 * host_to_target_errno_table[] in syscall_init(). */
590 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
594 * This list is the union of errno values overridden in asm-<arch>/errno.h
595 * minus the errnos that are not actually generic to all archs.
597 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
598 [EIDRM
] = TARGET_EIDRM
,
599 [ECHRNG
] = TARGET_ECHRNG
,
600 [EL2NSYNC
] = TARGET_EL2NSYNC
,
601 [EL3HLT
] = TARGET_EL3HLT
,
602 [EL3RST
] = TARGET_EL3RST
,
603 [ELNRNG
] = TARGET_ELNRNG
,
604 [EUNATCH
] = TARGET_EUNATCH
,
605 [ENOCSI
] = TARGET_ENOCSI
,
606 [EL2HLT
] = TARGET_EL2HLT
,
607 [EDEADLK
] = TARGET_EDEADLK
,
608 [ENOLCK
] = TARGET_ENOLCK
,
609 [EBADE
] = TARGET_EBADE
,
610 [EBADR
] = TARGET_EBADR
,
611 [EXFULL
] = TARGET_EXFULL
,
612 [ENOANO
] = TARGET_ENOANO
,
613 [EBADRQC
] = TARGET_EBADRQC
,
614 [EBADSLT
] = TARGET_EBADSLT
,
615 [EBFONT
] = TARGET_EBFONT
,
616 [ENOSTR
] = TARGET_ENOSTR
,
617 [ENODATA
] = TARGET_ENODATA
,
618 [ETIME
] = TARGET_ETIME
,
619 [ENOSR
] = TARGET_ENOSR
,
620 [ENONET
] = TARGET_ENONET
,
621 [ENOPKG
] = TARGET_ENOPKG
,
622 [EREMOTE
] = TARGET_EREMOTE
,
623 [ENOLINK
] = TARGET_ENOLINK
,
624 [EADV
] = TARGET_EADV
,
625 [ESRMNT
] = TARGET_ESRMNT
,
626 [ECOMM
] = TARGET_ECOMM
,
627 [EPROTO
] = TARGET_EPROTO
,
628 [EDOTDOT
] = TARGET_EDOTDOT
,
629 [EMULTIHOP
] = TARGET_EMULTIHOP
,
630 [EBADMSG
] = TARGET_EBADMSG
,
631 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
632 [EOVERFLOW
] = TARGET_EOVERFLOW
,
633 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
634 [EBADFD
] = TARGET_EBADFD
,
635 [EREMCHG
] = TARGET_EREMCHG
,
636 [ELIBACC
] = TARGET_ELIBACC
,
637 [ELIBBAD
] = TARGET_ELIBBAD
,
638 [ELIBSCN
] = TARGET_ELIBSCN
,
639 [ELIBMAX
] = TARGET_ELIBMAX
,
640 [ELIBEXEC
] = TARGET_ELIBEXEC
,
641 [EILSEQ
] = TARGET_EILSEQ
,
642 [ENOSYS
] = TARGET_ENOSYS
,
643 [ELOOP
] = TARGET_ELOOP
,
644 [ERESTART
] = TARGET_ERESTART
,
645 [ESTRPIPE
] = TARGET_ESTRPIPE
,
646 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
647 [EUSERS
] = TARGET_EUSERS
,
648 [ENOTSOCK
] = TARGET_ENOTSOCK
,
649 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
650 [EMSGSIZE
] = TARGET_EMSGSIZE
,
651 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
652 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
653 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
654 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
655 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
656 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
657 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
658 [EADDRINUSE
] = TARGET_EADDRINUSE
,
659 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
660 [ENETDOWN
] = TARGET_ENETDOWN
,
661 [ENETUNREACH
] = TARGET_ENETUNREACH
,
662 [ENETRESET
] = TARGET_ENETRESET
,
663 [ECONNABORTED
] = TARGET_ECONNABORTED
,
664 [ECONNRESET
] = TARGET_ECONNRESET
,
665 [ENOBUFS
] = TARGET_ENOBUFS
,
666 [EISCONN
] = TARGET_EISCONN
,
667 [ENOTCONN
] = TARGET_ENOTCONN
,
668 [EUCLEAN
] = TARGET_EUCLEAN
,
669 [ENOTNAM
] = TARGET_ENOTNAM
,
670 [ENAVAIL
] = TARGET_ENAVAIL
,
671 [EISNAM
] = TARGET_EISNAM
,
672 [EREMOTEIO
] = TARGET_EREMOTEIO
,
673 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
674 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
675 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
676 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
677 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
678 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
679 [EALREADY
] = TARGET_EALREADY
,
680 [EINPROGRESS
] = TARGET_EINPROGRESS
,
681 [ESTALE
] = TARGET_ESTALE
,
682 [ECANCELED
] = TARGET_ECANCELED
,
683 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
684 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
686 [ENOKEY
] = TARGET_ENOKEY
,
689 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
692 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
695 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
698 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
700 #ifdef ENOTRECOVERABLE
701 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
705 static inline int host_to_target_errno(int err
)
707 if(host_to_target_errno_table
[err
])
708 return host_to_target_errno_table
[err
];
712 static inline int target_to_host_errno(int err
)
714 if (target_to_host_errno_table
[err
])
715 return target_to_host_errno_table
[err
];
719 static inline abi_long
get_errno(abi_long ret
)
722 return -host_to_target_errno(errno
);
727 static inline int is_error(abi_long ret
)
729 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
732 char *target_strerror(int err
)
734 return strerror(target_to_host_errno(err
));
737 static abi_ulong target_brk
;
738 static abi_ulong target_original_brk
;
739 static abi_ulong brk_page
;
741 void target_set_brk(abi_ulong new_brk
)
743 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
744 brk_page
= HOST_PAGE_ALIGN(target_brk
);
747 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
748 #define DEBUGF_BRK(message, args...)
750 /* do_brk() must return target values and target errnos. */
751 abi_long
do_brk(abi_ulong new_brk
)
753 abi_long mapped_addr
;
756 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk
);
759 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk
);
762 if (new_brk
< target_original_brk
) {
763 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk
);
767 /* If the new brk is less than the highest page reserved to the
768 * target heap allocation, set it and we're almost done... */
769 if (new_brk
<= brk_page
) {
770 /* Heap contents are initialized to zero, as for anonymous
772 if (new_brk
> target_brk
) {
773 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
775 target_brk
= new_brk
;
776 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk
);
780 /* We need to allocate more memory after the brk... Note that
781 * we don't use MAP_FIXED because that will map over the top of
782 * any existing mapping (like the one with the host libc or qemu
783 * itself); instead we treat "mapped but at wrong address" as
784 * a failure and unmap again.
786 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
787 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
788 PROT_READ
|PROT_WRITE
,
789 MAP_ANON
|MAP_PRIVATE
, 0, 0));
791 if (mapped_addr
== brk_page
) {
792 /* Heap contents are initialized to zero, as for anonymous
793 * mapped pages. Technically the new pages are already
794 * initialized to zero since they *are* anonymous mapped
795 * pages, however we have to take care with the contents that
796 * come from the remaining part of the previous page: it may
797 * contains garbage data due to a previous heap usage (grown
799 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
801 target_brk
= new_brk
;
802 brk_page
= HOST_PAGE_ALIGN(target_brk
);
803 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk
);
805 } else if (mapped_addr
!= -1) {
806 /* Mapped but at wrong address, meaning there wasn't actually
807 * enough space for this brk.
809 target_munmap(mapped_addr
, new_alloc_size
);
811 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk
);
814 DEBUGF_BRK("%#010x (otherwise)\n", target_brk
);
817 #if defined(TARGET_ALPHA)
818 /* We (partially) emulate OSF/1 on Alpha, which requires we
819 return a proper errno, not an unchanged brk value. */
820 return -TARGET_ENOMEM
;
822 /* For everything else, return the previous break. */
826 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
827 abi_ulong target_fds_addr
,
831 abi_ulong b
, *target_fds
;
833 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
834 if (!(target_fds
= lock_user(VERIFY_READ
,
836 sizeof(abi_ulong
) * nw
,
838 return -TARGET_EFAULT
;
842 for (i
= 0; i
< nw
; i
++) {
843 /* grab the abi_ulong */
844 __get_user(b
, &target_fds
[i
]);
845 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
846 /* check the bit inside the abi_ulong */
853 unlock_user(target_fds
, target_fds_addr
, 0);
858 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
859 abi_ulong target_fds_addr
,
862 if (target_fds_addr
) {
863 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
864 return -TARGET_EFAULT
;
872 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
878 abi_ulong
*target_fds
;
880 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
881 if (!(target_fds
= lock_user(VERIFY_WRITE
,
883 sizeof(abi_ulong
) * nw
,
885 return -TARGET_EFAULT
;
888 for (i
= 0; i
< nw
; i
++) {
890 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
891 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
894 __put_user(v
, &target_fds
[i
]);
897 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
902 #if defined(__alpha__)
908 static inline abi_long
host_to_target_clock_t(long ticks
)
910 #if HOST_HZ == TARGET_HZ
913 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
917 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
918 const struct rusage
*rusage
)
920 struct target_rusage
*target_rusage
;
922 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
923 return -TARGET_EFAULT
;
924 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
925 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
926 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
927 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
928 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
929 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
930 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
931 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
932 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
933 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
934 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
935 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
936 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
937 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
938 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
939 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
940 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
941 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
942 unlock_user_struct(target_rusage
, target_addr
, 1);
947 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
949 abi_ulong target_rlim_swap
;
952 target_rlim_swap
= tswapal(target_rlim
);
953 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
954 return RLIM_INFINITY
;
956 result
= target_rlim_swap
;
957 if (target_rlim_swap
!= (rlim_t
)result
)
958 return RLIM_INFINITY
;
963 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
965 abi_ulong target_rlim_swap
;
968 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
969 target_rlim_swap
= TARGET_RLIM_INFINITY
;
971 target_rlim_swap
= rlim
;
972 result
= tswapal(target_rlim_swap
);
977 static inline int target_to_host_resource(int code
)
980 case TARGET_RLIMIT_AS
:
982 case TARGET_RLIMIT_CORE
:
984 case TARGET_RLIMIT_CPU
:
986 case TARGET_RLIMIT_DATA
:
988 case TARGET_RLIMIT_FSIZE
:
990 case TARGET_RLIMIT_LOCKS
:
992 case TARGET_RLIMIT_MEMLOCK
:
993 return RLIMIT_MEMLOCK
;
994 case TARGET_RLIMIT_MSGQUEUE
:
995 return RLIMIT_MSGQUEUE
;
996 case TARGET_RLIMIT_NICE
:
998 case TARGET_RLIMIT_NOFILE
:
999 return RLIMIT_NOFILE
;
1000 case TARGET_RLIMIT_NPROC
:
1001 return RLIMIT_NPROC
;
1002 case TARGET_RLIMIT_RSS
:
1004 case TARGET_RLIMIT_RTPRIO
:
1005 return RLIMIT_RTPRIO
;
1006 case TARGET_RLIMIT_SIGPENDING
:
1007 return RLIMIT_SIGPENDING
;
1008 case TARGET_RLIMIT_STACK
:
1009 return RLIMIT_STACK
;
1015 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1016 abi_ulong target_tv_addr
)
1018 struct target_timeval
*target_tv
;
1020 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1021 return -TARGET_EFAULT
;
1023 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1024 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1026 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1031 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1032 const struct timeval
*tv
)
1034 struct target_timeval
*target_tv
;
1036 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1037 return -TARGET_EFAULT
;
1039 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1040 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1042 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1047 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1050 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1051 abi_ulong target_mq_attr_addr
)
1053 struct target_mq_attr
*target_mq_attr
;
1055 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1056 target_mq_attr_addr
, 1))
1057 return -TARGET_EFAULT
;
1059 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1060 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1061 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1062 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1064 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1069 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1070 const struct mq_attr
*attr
)
1072 struct target_mq_attr
*target_mq_attr
;
1074 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1075 target_mq_attr_addr
, 0))
1076 return -TARGET_EFAULT
;
1078 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1079 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1080 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1081 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1083 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1089 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1090 /* do_select() must return target values and target errnos. */
1091 static abi_long
do_select(int n
,
1092 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1093 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1095 fd_set rfds
, wfds
, efds
;
1096 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1097 struct timeval tv
, *tv_ptr
;
1100 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1104 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1108 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1113 if (target_tv_addr
) {
1114 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1115 return -TARGET_EFAULT
;
1121 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1123 if (!is_error(ret
)) {
1124 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1125 return -TARGET_EFAULT
;
1126 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1127 return -TARGET_EFAULT
;
1128 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1129 return -TARGET_EFAULT
;
1131 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1132 return -TARGET_EFAULT
;
1139 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1142 return pipe2(host_pipe
, flags
);
1148 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1149 int flags
, int is_pipe2
)
1153 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1156 return get_errno(ret
);
1158 /* Several targets have special calling conventions for the original
1159 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1161 #if defined(TARGET_ALPHA)
1162 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1163 return host_pipe
[0];
1164 #elif defined(TARGET_MIPS)
1165 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1166 return host_pipe
[0];
1167 #elif defined(TARGET_SH4)
1168 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1169 return host_pipe
[0];
1173 if (put_user_s32(host_pipe
[0], pipedes
)
1174 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1175 return -TARGET_EFAULT
;
1176 return get_errno(ret
);
1179 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1180 abi_ulong target_addr
,
1183 struct target_ip_mreqn
*target_smreqn
;
1185 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1187 return -TARGET_EFAULT
;
1188 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1189 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1190 if (len
== sizeof(struct target_ip_mreqn
))
1191 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1192 unlock_user(target_smreqn
, target_addr
, 0);
1197 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1198 abi_ulong target_addr
,
1201 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1202 sa_family_t sa_family
;
1203 struct target_sockaddr
*target_saddr
;
1205 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1207 return -TARGET_EFAULT
;
1209 sa_family
= tswap16(target_saddr
->sa_family
);
1211 /* Oops. The caller might send a incomplete sun_path; sun_path
1212 * must be terminated by \0 (see the manual page), but
1213 * unfortunately it is quite common to specify sockaddr_un
1214 * length as "strlen(x->sun_path)" while it should be
1215 * "strlen(...) + 1". We'll fix that here if needed.
1216 * Linux kernel has a similar feature.
1219 if (sa_family
== AF_UNIX
) {
1220 if (len
< unix_maxlen
&& len
> 0) {
1221 char *cp
= (char*)target_saddr
;
1223 if ( cp
[len
-1] && !cp
[len
] )
1226 if (len
> unix_maxlen
)
1230 memcpy(addr
, target_saddr
, len
);
1231 addr
->sa_family
= sa_family
;
1232 unlock_user(target_saddr
, target_addr
, 0);
1237 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1238 struct sockaddr
*addr
,
1241 struct target_sockaddr
*target_saddr
;
1243 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1245 return -TARGET_EFAULT
;
1246 memcpy(target_saddr
, addr
, len
);
1247 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1248 unlock_user(target_saddr
, target_addr
, len
);
1253 /* ??? Should this also swap msgh->name? */
1254 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1255 struct target_msghdr
*target_msgh
)
1257 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1258 abi_long msg_controllen
;
1259 abi_ulong target_cmsg_addr
;
1260 struct target_cmsghdr
*target_cmsg
;
1261 socklen_t space
= 0;
1263 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1264 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1266 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1267 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1269 return -TARGET_EFAULT
;
1271 while (cmsg
&& target_cmsg
) {
1272 void *data
= CMSG_DATA(cmsg
);
1273 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1275 int len
= tswapal(target_cmsg
->cmsg_len
)
1276 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1278 space
+= CMSG_SPACE(len
);
1279 if (space
> msgh
->msg_controllen
) {
1280 space
-= CMSG_SPACE(len
);
1281 gemu_log("Host cmsg overflow\n");
1285 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1286 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1287 cmsg
->cmsg_len
= CMSG_LEN(len
);
1289 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1290 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1291 memcpy(data
, target_data
, len
);
1293 int *fd
= (int *)data
;
1294 int *target_fd
= (int *)target_data
;
1295 int i
, numfds
= len
/ sizeof(int);
1297 for (i
= 0; i
< numfds
; i
++)
1298 fd
[i
] = tswap32(target_fd
[i
]);
1301 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1302 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1304 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1306 msgh
->msg_controllen
= space
;
1310 /* ??? Should this also swap msgh->name? */
1311 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1312 struct msghdr
*msgh
)
1314 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1315 abi_long msg_controllen
;
1316 abi_ulong target_cmsg_addr
;
1317 struct target_cmsghdr
*target_cmsg
;
1318 socklen_t space
= 0;
1320 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1321 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1323 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1324 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1326 return -TARGET_EFAULT
;
1328 while (cmsg
&& target_cmsg
) {
1329 void *data
= CMSG_DATA(cmsg
);
1330 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1332 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1334 space
+= TARGET_CMSG_SPACE(len
);
1335 if (space
> msg_controllen
) {
1336 space
-= TARGET_CMSG_SPACE(len
);
1337 gemu_log("Target cmsg overflow\n");
1341 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1342 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1343 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1345 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1346 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1347 memcpy(target_data
, data
, len
);
1349 int *fd
= (int *)data
;
1350 int *target_fd
= (int *)target_data
;
1351 int i
, numfds
= len
/ sizeof(int);
1353 for (i
= 0; i
< numfds
; i
++)
1354 target_fd
[i
] = tswap32(fd
[i
]);
1357 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1358 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1360 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1362 target_msgh
->msg_controllen
= tswapal(space
);
1366 /* do_setsockopt() Must return target values and target errnos. */
1367 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1368 abi_ulong optval_addr
, socklen_t optlen
)
1372 struct ip_mreqn
*ip_mreq
;
1373 struct ip_mreq_source
*ip_mreq_source
;
1377 /* TCP options all take an 'int' value. */
1378 if (optlen
< sizeof(uint32_t))
1379 return -TARGET_EINVAL
;
1381 if (get_user_u32(val
, optval_addr
))
1382 return -TARGET_EFAULT
;
1383 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1390 case IP_ROUTER_ALERT
:
1394 case IP_MTU_DISCOVER
:
1400 case IP_MULTICAST_TTL
:
1401 case IP_MULTICAST_LOOP
:
1403 if (optlen
>= sizeof(uint32_t)) {
1404 if (get_user_u32(val
, optval_addr
))
1405 return -TARGET_EFAULT
;
1406 } else if (optlen
>= 1) {
1407 if (get_user_u8(val
, optval_addr
))
1408 return -TARGET_EFAULT
;
1410 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1412 case IP_ADD_MEMBERSHIP
:
1413 case IP_DROP_MEMBERSHIP
:
1414 if (optlen
< sizeof (struct target_ip_mreq
) ||
1415 optlen
> sizeof (struct target_ip_mreqn
))
1416 return -TARGET_EINVAL
;
1418 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1419 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1420 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1423 case IP_BLOCK_SOURCE
:
1424 case IP_UNBLOCK_SOURCE
:
1425 case IP_ADD_SOURCE_MEMBERSHIP
:
1426 case IP_DROP_SOURCE_MEMBERSHIP
:
1427 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1428 return -TARGET_EINVAL
;
1430 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1431 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1432 unlock_user (ip_mreq_source
, optval_addr
, 0);
1439 case TARGET_SOL_SOCKET
:
1441 /* Options with 'int' argument. */
1442 case TARGET_SO_DEBUG
:
1445 case TARGET_SO_REUSEADDR
:
1446 optname
= SO_REUSEADDR
;
1448 case TARGET_SO_TYPE
:
1451 case TARGET_SO_ERROR
:
1454 case TARGET_SO_DONTROUTE
:
1455 optname
= SO_DONTROUTE
;
1457 case TARGET_SO_BROADCAST
:
1458 optname
= SO_BROADCAST
;
1460 case TARGET_SO_SNDBUF
:
1461 optname
= SO_SNDBUF
;
1463 case TARGET_SO_RCVBUF
:
1464 optname
= SO_RCVBUF
;
1466 case TARGET_SO_KEEPALIVE
:
1467 optname
= SO_KEEPALIVE
;
1469 case TARGET_SO_OOBINLINE
:
1470 optname
= SO_OOBINLINE
;
1472 case TARGET_SO_NO_CHECK
:
1473 optname
= SO_NO_CHECK
;
1475 case TARGET_SO_PRIORITY
:
1476 optname
= SO_PRIORITY
;
1479 case TARGET_SO_BSDCOMPAT
:
1480 optname
= SO_BSDCOMPAT
;
1483 case TARGET_SO_PASSCRED
:
1484 optname
= SO_PASSCRED
;
1486 case TARGET_SO_TIMESTAMP
:
1487 optname
= SO_TIMESTAMP
;
1489 case TARGET_SO_RCVLOWAT
:
1490 optname
= SO_RCVLOWAT
;
1492 case TARGET_SO_RCVTIMEO
:
1493 optname
= SO_RCVTIMEO
;
1495 case TARGET_SO_SNDTIMEO
:
1496 optname
= SO_SNDTIMEO
;
1502 if (optlen
< sizeof(uint32_t))
1503 return -TARGET_EINVAL
;
1505 if (get_user_u32(val
, optval_addr
))
1506 return -TARGET_EFAULT
;
1507 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1511 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1512 ret
= -TARGET_ENOPROTOOPT
;
1517 /* do_getsockopt() Must return target values and target errnos. */
1518 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1519 abi_ulong optval_addr
, abi_ulong optlen
)
1526 case TARGET_SOL_SOCKET
:
1529 /* These don't just return a single integer */
1530 case TARGET_SO_LINGER
:
1531 case TARGET_SO_RCVTIMEO
:
1532 case TARGET_SO_SNDTIMEO
:
1533 case TARGET_SO_PEERCRED
:
1534 case TARGET_SO_PEERNAME
:
1536 /* Options with 'int' argument. */
1537 case TARGET_SO_DEBUG
:
1540 case TARGET_SO_REUSEADDR
:
1541 optname
= SO_REUSEADDR
;
1543 case TARGET_SO_TYPE
:
1546 case TARGET_SO_ERROR
:
1549 case TARGET_SO_DONTROUTE
:
1550 optname
= SO_DONTROUTE
;
1552 case TARGET_SO_BROADCAST
:
1553 optname
= SO_BROADCAST
;
1555 case TARGET_SO_SNDBUF
:
1556 optname
= SO_SNDBUF
;
1558 case TARGET_SO_RCVBUF
:
1559 optname
= SO_RCVBUF
;
1561 case TARGET_SO_KEEPALIVE
:
1562 optname
= SO_KEEPALIVE
;
1564 case TARGET_SO_OOBINLINE
:
1565 optname
= SO_OOBINLINE
;
1567 case TARGET_SO_NO_CHECK
:
1568 optname
= SO_NO_CHECK
;
1570 case TARGET_SO_PRIORITY
:
1571 optname
= SO_PRIORITY
;
1574 case TARGET_SO_BSDCOMPAT
:
1575 optname
= SO_BSDCOMPAT
;
1578 case TARGET_SO_PASSCRED
:
1579 optname
= SO_PASSCRED
;
1581 case TARGET_SO_TIMESTAMP
:
1582 optname
= SO_TIMESTAMP
;
1584 case TARGET_SO_RCVLOWAT
:
1585 optname
= SO_RCVLOWAT
;
1592 /* TCP options all take an 'int' value. */
1594 if (get_user_u32(len
, optlen
))
1595 return -TARGET_EFAULT
;
1597 return -TARGET_EINVAL
;
1599 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1605 if (put_user_u32(val
, optval_addr
))
1606 return -TARGET_EFAULT
;
1608 if (put_user_u8(val
, optval_addr
))
1609 return -TARGET_EFAULT
;
1611 if (put_user_u32(len
, optlen
))
1612 return -TARGET_EFAULT
;
1619 case IP_ROUTER_ALERT
:
1623 case IP_MTU_DISCOVER
:
1629 case IP_MULTICAST_TTL
:
1630 case IP_MULTICAST_LOOP
:
1631 if (get_user_u32(len
, optlen
))
1632 return -TARGET_EFAULT
;
1634 return -TARGET_EINVAL
;
1636 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1639 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1641 if (put_user_u32(len
, optlen
)
1642 || put_user_u8(val
, optval_addr
))
1643 return -TARGET_EFAULT
;
1645 if (len
> sizeof(int))
1647 if (put_user_u32(len
, optlen
)
1648 || put_user_u32(val
, optval_addr
))
1649 return -TARGET_EFAULT
;
1653 ret
= -TARGET_ENOPROTOOPT
;
1659 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1661 ret
= -TARGET_EOPNOTSUPP
;
1668 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1669 * other lock functions have a return code of 0 for failure.
1671 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1672 int count
, int copy
)
1674 struct target_iovec
*target_vec
;
1678 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1680 return -TARGET_EFAULT
;
1681 for(i
= 0;i
< count
; i
++) {
1682 base
= tswapal(target_vec
[i
].iov_base
);
1683 vec
[i
].iov_len
= tswapal(target_vec
[i
].iov_len
);
1684 if (vec
[i
].iov_len
!= 0) {
1685 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1686 /* Don't check lock_user return value. We must call writev even
1687 if a element has invalid base address. */
1689 /* zero length pointer is ignored */
1690 vec
[i
].iov_base
= NULL
;
1693 unlock_user (target_vec
, target_addr
, 0);
1697 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1698 int count
, int copy
)
1700 struct target_iovec
*target_vec
;
1704 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1706 return -TARGET_EFAULT
;
1707 for(i
= 0;i
< count
; i
++) {
1708 if (target_vec
[i
].iov_base
) {
1709 base
= tswapal(target_vec
[i
].iov_base
);
1710 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1713 unlock_user (target_vec
, target_addr
, 0);
1718 /* do_socket() Must return target values and target errnos. */
1719 static abi_long
do_socket(int domain
, int type
, int protocol
)
1721 #if defined(TARGET_MIPS)
1723 case TARGET_SOCK_DGRAM
:
1726 case TARGET_SOCK_STREAM
:
1729 case TARGET_SOCK_RAW
:
1732 case TARGET_SOCK_RDM
:
1735 case TARGET_SOCK_SEQPACKET
:
1736 type
= SOCK_SEQPACKET
;
1738 case TARGET_SOCK_PACKET
:
1743 if (domain
== PF_NETLINK
)
1744 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1745 return get_errno(socket(domain
, type
, protocol
));
1748 /* do_bind() Must return target values and target errnos. */
1749 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1755 if ((int)addrlen
< 0) {
1756 return -TARGET_EINVAL
;
1759 addr
= alloca(addrlen
+1);
1761 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1765 return get_errno(bind(sockfd
, addr
, addrlen
));
1768 /* do_connect() Must return target values and target errnos. */
1769 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1775 if ((int)addrlen
< 0) {
1776 return -TARGET_EINVAL
;
1779 addr
= alloca(addrlen
);
1781 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1785 return get_errno(connect(sockfd
, addr
, addrlen
));
1788 /* do_sendrecvmsg() Must return target values and target errnos. */
1789 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1790 int flags
, int send
)
1793 struct target_msghdr
*msgp
;
1797 abi_ulong target_vec
;
1800 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1804 return -TARGET_EFAULT
;
1805 if (msgp
->msg_name
) {
1806 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1807 msg
.msg_name
= alloca(msg
.msg_namelen
);
1808 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1811 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1815 msg
.msg_name
= NULL
;
1816 msg
.msg_namelen
= 0;
1818 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1819 msg
.msg_control
= alloca(msg
.msg_controllen
);
1820 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1822 count
= tswapal(msgp
->msg_iovlen
);
1823 vec
= alloca(count
* sizeof(struct iovec
));
1824 target_vec
= tswapal(msgp
->msg_iov
);
1825 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1826 msg
.msg_iovlen
= count
;
1830 ret
= target_to_host_cmsg(&msg
, msgp
);
1832 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1834 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1835 if (!is_error(ret
)) {
1837 ret
= host_to_target_cmsg(msgp
, &msg
);
1842 unlock_iovec(vec
, target_vec
, count
, !send
);
1843 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1847 /* do_accept() Must return target values and target errnos. */
1848 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1849 abi_ulong target_addrlen_addr
)
1855 if (target_addr
== 0)
1856 return get_errno(accept(fd
, NULL
, NULL
));
1858 /* linux returns EINVAL if addrlen pointer is invalid */
1859 if (get_user_u32(addrlen
, target_addrlen_addr
))
1860 return -TARGET_EINVAL
;
1862 if ((int)addrlen
< 0) {
1863 return -TARGET_EINVAL
;
1866 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1867 return -TARGET_EINVAL
;
1869 addr
= alloca(addrlen
);
1871 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1872 if (!is_error(ret
)) {
1873 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1874 if (put_user_u32(addrlen
, target_addrlen_addr
))
1875 ret
= -TARGET_EFAULT
;
1880 /* do_getpeername() Must return target values and target errnos. */
1881 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1882 abi_ulong target_addrlen_addr
)
1888 if (get_user_u32(addrlen
, target_addrlen_addr
))
1889 return -TARGET_EFAULT
;
1891 if ((int)addrlen
< 0) {
1892 return -TARGET_EINVAL
;
1895 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1896 return -TARGET_EFAULT
;
1898 addr
= alloca(addrlen
);
1900 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1901 if (!is_error(ret
)) {
1902 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1903 if (put_user_u32(addrlen
, target_addrlen_addr
))
1904 ret
= -TARGET_EFAULT
;
1909 /* do_getsockname() Must return target values and target errnos. */
1910 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1911 abi_ulong target_addrlen_addr
)
1917 if (get_user_u32(addrlen
, target_addrlen_addr
))
1918 return -TARGET_EFAULT
;
1920 if ((int)addrlen
< 0) {
1921 return -TARGET_EINVAL
;
1924 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1925 return -TARGET_EFAULT
;
1927 addr
= alloca(addrlen
);
1929 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1930 if (!is_error(ret
)) {
1931 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1932 if (put_user_u32(addrlen
, target_addrlen_addr
))
1933 ret
= -TARGET_EFAULT
;
1938 /* do_socketpair() Must return target values and target errnos. */
1939 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1940 abi_ulong target_tab_addr
)
1945 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1946 if (!is_error(ret
)) {
1947 if (put_user_s32(tab
[0], target_tab_addr
)
1948 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1949 ret
= -TARGET_EFAULT
;
1954 /* do_sendto() Must return target values and target errnos. */
1955 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1956 abi_ulong target_addr
, socklen_t addrlen
)
1962 if ((int)addrlen
< 0) {
1963 return -TARGET_EINVAL
;
1966 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1968 return -TARGET_EFAULT
;
1970 addr
= alloca(addrlen
);
1971 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1973 unlock_user(host_msg
, msg
, 0);
1976 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1978 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1980 unlock_user(host_msg
, msg
, 0);
1984 /* do_recvfrom() Must return target values and target errnos. */
1985 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1986 abi_ulong target_addr
,
1987 abi_ulong target_addrlen
)
1994 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1996 return -TARGET_EFAULT
;
1998 if (get_user_u32(addrlen
, target_addrlen
)) {
1999 ret
= -TARGET_EFAULT
;
2002 if ((int)addrlen
< 0) {
2003 ret
= -TARGET_EINVAL
;
2006 addr
= alloca(addrlen
);
2007 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2009 addr
= NULL
; /* To keep compiler quiet. */
2010 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2012 if (!is_error(ret
)) {
2014 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2015 if (put_user_u32(addrlen
, target_addrlen
)) {
2016 ret
= -TARGET_EFAULT
;
2020 unlock_user(host_msg
, msg
, len
);
2023 unlock_user(host_msg
, msg
, 0);
2028 #ifdef TARGET_NR_socketcall
2029 /* do_socketcall() Must return target values and target errnos. */
2030 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2033 const int n
= sizeof(abi_ulong
);
2038 abi_ulong domain
, type
, protocol
;
2040 if (get_user_ual(domain
, vptr
)
2041 || get_user_ual(type
, vptr
+ n
)
2042 || get_user_ual(protocol
, vptr
+ 2 * n
))
2043 return -TARGET_EFAULT
;
2045 ret
= do_socket(domain
, type
, protocol
);
2051 abi_ulong target_addr
;
2054 if (get_user_ual(sockfd
, vptr
)
2055 || get_user_ual(target_addr
, vptr
+ n
)
2056 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2057 return -TARGET_EFAULT
;
2059 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2062 case SOCKOP_connect
:
2065 abi_ulong target_addr
;
2068 if (get_user_ual(sockfd
, vptr
)
2069 || get_user_ual(target_addr
, vptr
+ n
)
2070 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2071 return -TARGET_EFAULT
;
2073 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2078 abi_ulong sockfd
, backlog
;
2080 if (get_user_ual(sockfd
, vptr
)
2081 || get_user_ual(backlog
, vptr
+ n
))
2082 return -TARGET_EFAULT
;
2084 ret
= get_errno(listen(sockfd
, backlog
));
2090 abi_ulong target_addr
, target_addrlen
;
2092 if (get_user_ual(sockfd
, vptr
)
2093 || get_user_ual(target_addr
, vptr
+ n
)
2094 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2095 return -TARGET_EFAULT
;
2097 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2100 case SOCKOP_getsockname
:
2103 abi_ulong target_addr
, target_addrlen
;
2105 if (get_user_ual(sockfd
, vptr
)
2106 || get_user_ual(target_addr
, vptr
+ n
)
2107 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2108 return -TARGET_EFAULT
;
2110 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2113 case SOCKOP_getpeername
:
2116 abi_ulong target_addr
, target_addrlen
;
2118 if (get_user_ual(sockfd
, vptr
)
2119 || get_user_ual(target_addr
, vptr
+ n
)
2120 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2121 return -TARGET_EFAULT
;
2123 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2126 case SOCKOP_socketpair
:
2128 abi_ulong domain
, type
, protocol
;
2131 if (get_user_ual(domain
, vptr
)
2132 || get_user_ual(type
, vptr
+ n
)
2133 || get_user_ual(protocol
, vptr
+ 2 * n
)
2134 || get_user_ual(tab
, vptr
+ 3 * n
))
2135 return -TARGET_EFAULT
;
2137 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2147 if (get_user_ual(sockfd
, vptr
)
2148 || get_user_ual(msg
, vptr
+ n
)
2149 || get_user_ual(len
, vptr
+ 2 * n
)
2150 || get_user_ual(flags
, vptr
+ 3 * n
))
2151 return -TARGET_EFAULT
;
2153 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2163 if (get_user_ual(sockfd
, vptr
)
2164 || get_user_ual(msg
, vptr
+ n
)
2165 || get_user_ual(len
, vptr
+ 2 * n
)
2166 || get_user_ual(flags
, vptr
+ 3 * n
))
2167 return -TARGET_EFAULT
;
2169 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2181 if (get_user_ual(sockfd
, vptr
)
2182 || get_user_ual(msg
, vptr
+ n
)
2183 || get_user_ual(len
, vptr
+ 2 * n
)
2184 || get_user_ual(flags
, vptr
+ 3 * n
)
2185 || get_user_ual(addr
, vptr
+ 4 * n
)
2186 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2187 return -TARGET_EFAULT
;
2189 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2192 case SOCKOP_recvfrom
:
2201 if (get_user_ual(sockfd
, vptr
)
2202 || get_user_ual(msg
, vptr
+ n
)
2203 || get_user_ual(len
, vptr
+ 2 * n
)
2204 || get_user_ual(flags
, vptr
+ 3 * n
)
2205 || get_user_ual(addr
, vptr
+ 4 * n
)
2206 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2207 return -TARGET_EFAULT
;
2209 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2212 case SOCKOP_shutdown
:
2214 abi_ulong sockfd
, how
;
2216 if (get_user_ual(sockfd
, vptr
)
2217 || get_user_ual(how
, vptr
+ n
))
2218 return -TARGET_EFAULT
;
2220 ret
= get_errno(shutdown(sockfd
, how
));
2223 case SOCKOP_sendmsg
:
2224 case SOCKOP_recvmsg
:
2227 abi_ulong target_msg
;
2230 if (get_user_ual(fd
, vptr
)
2231 || get_user_ual(target_msg
, vptr
+ n
)
2232 || get_user_ual(flags
, vptr
+ 2 * n
))
2233 return -TARGET_EFAULT
;
2235 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2236 (num
== SOCKOP_sendmsg
));
2239 case SOCKOP_setsockopt
:
2247 if (get_user_ual(sockfd
, vptr
)
2248 || get_user_ual(level
, vptr
+ n
)
2249 || get_user_ual(optname
, vptr
+ 2 * n
)
2250 || get_user_ual(optval
, vptr
+ 3 * n
)
2251 || get_user_ual(optlen
, vptr
+ 4 * n
))
2252 return -TARGET_EFAULT
;
2254 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2257 case SOCKOP_getsockopt
:
2265 if (get_user_ual(sockfd
, vptr
)
2266 || get_user_ual(level
, vptr
+ n
)
2267 || get_user_ual(optname
, vptr
+ 2 * n
)
2268 || get_user_ual(optval
, vptr
+ 3 * n
)
2269 || get_user_ual(optlen
, vptr
+ 4 * n
))
2270 return -TARGET_EFAULT
;
2272 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2276 gemu_log("Unsupported socketcall: %d\n", num
);
2277 ret
= -TARGET_ENOSYS
;
2284 #define N_SHM_REGIONS 32
2286 static struct shm_region
{
2289 } shm_regions
[N_SHM_REGIONS
];
2291 struct target_ipc_perm
2298 unsigned short int mode
;
2299 unsigned short int __pad1
;
2300 unsigned short int __seq
;
2301 unsigned short int __pad2
;
2302 abi_ulong __unused1
;
2303 abi_ulong __unused2
;
2306 struct target_semid_ds
2308 struct target_ipc_perm sem_perm
;
2309 abi_ulong sem_otime
;
2310 abi_ulong __unused1
;
2311 abi_ulong sem_ctime
;
2312 abi_ulong __unused2
;
2313 abi_ulong sem_nsems
;
2314 abi_ulong __unused3
;
2315 abi_ulong __unused4
;
2318 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2319 abi_ulong target_addr
)
2321 struct target_ipc_perm
*target_ip
;
2322 struct target_semid_ds
*target_sd
;
2324 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2325 return -TARGET_EFAULT
;
2326 target_ip
= &(target_sd
->sem_perm
);
2327 host_ip
->__key
= tswapal(target_ip
->__key
);
2328 host_ip
->uid
= tswapal(target_ip
->uid
);
2329 host_ip
->gid
= tswapal(target_ip
->gid
);
2330 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2331 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2332 host_ip
->mode
= tswap16(target_ip
->mode
);
2333 unlock_user_struct(target_sd
, target_addr
, 0);
2337 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2338 struct ipc_perm
*host_ip
)
2340 struct target_ipc_perm
*target_ip
;
2341 struct target_semid_ds
*target_sd
;
2343 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2344 return -TARGET_EFAULT
;
2345 target_ip
= &(target_sd
->sem_perm
);
2346 target_ip
->__key
= tswapal(host_ip
->__key
);
2347 target_ip
->uid
= tswapal(host_ip
->uid
);
2348 target_ip
->gid
= tswapal(host_ip
->gid
);
2349 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2350 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2351 target_ip
->mode
= tswap16(host_ip
->mode
);
2352 unlock_user_struct(target_sd
, target_addr
, 1);
2356 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2357 abi_ulong target_addr
)
2359 struct target_semid_ds
*target_sd
;
2361 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2362 return -TARGET_EFAULT
;
2363 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2364 return -TARGET_EFAULT
;
2365 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2366 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2367 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2368 unlock_user_struct(target_sd
, target_addr
, 0);
2372 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2373 struct semid_ds
*host_sd
)
2375 struct target_semid_ds
*target_sd
;
2377 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2378 return -TARGET_EFAULT
;
2379 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2380 return -TARGET_EFAULT
;;
2381 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2382 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2383 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2384 unlock_user_struct(target_sd
, target_addr
, 1);
2388 struct target_seminfo
{
2401 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2402 struct seminfo
*host_seminfo
)
2404 struct target_seminfo
*target_seminfo
;
2405 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2406 return -TARGET_EFAULT
;
2407 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2408 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2409 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2410 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2411 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2412 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2413 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2414 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2415 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2416 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2417 unlock_user_struct(target_seminfo
, target_addr
, 1);
2423 struct semid_ds
*buf
;
2424 unsigned short *array
;
2425 struct seminfo
*__buf
;
2428 union target_semun
{
2435 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2436 abi_ulong target_addr
)
2439 unsigned short *array
;
2441 struct semid_ds semid_ds
;
2444 semun
.buf
= &semid_ds
;
2446 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2448 return get_errno(ret
);
2450 nsems
= semid_ds
.sem_nsems
;
2452 *host_array
= malloc(nsems
*sizeof(unsigned short));
2453 array
= lock_user(VERIFY_READ
, target_addr
,
2454 nsems
*sizeof(unsigned short), 1);
2456 return -TARGET_EFAULT
;
2458 for(i
=0; i
<nsems
; i
++) {
2459 __get_user((*host_array
)[i
], &array
[i
]);
2461 unlock_user(array
, target_addr
, 0);
2466 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2467 unsigned short **host_array
)
2470 unsigned short *array
;
2472 struct semid_ds semid_ds
;
2475 semun
.buf
= &semid_ds
;
2477 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2479 return get_errno(ret
);
2481 nsems
= semid_ds
.sem_nsems
;
2483 array
= lock_user(VERIFY_WRITE
, target_addr
,
2484 nsems
*sizeof(unsigned short), 0);
2486 return -TARGET_EFAULT
;
2488 for(i
=0; i
<nsems
; i
++) {
2489 __put_user((*host_array
)[i
], &array
[i
]);
2492 unlock_user(array
, target_addr
, 1);
2497 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2498 union target_semun target_su
)
2501 struct semid_ds dsarg
;
2502 unsigned short *array
= NULL
;
2503 struct seminfo seminfo
;
2504 abi_long ret
= -TARGET_EINVAL
;
2511 arg
.val
= tswap32(target_su
.val
);
2512 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2513 target_su
.val
= tswap32(arg
.val
);
2517 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2521 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2522 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2529 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2533 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2534 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2540 arg
.__buf
= &seminfo
;
2541 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2542 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2550 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2557 struct target_sembuf
{
2558 unsigned short sem_num
;
2563 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2564 abi_ulong target_addr
,
2567 struct target_sembuf
*target_sembuf
;
2570 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2571 nsops
*sizeof(struct target_sembuf
), 1);
2573 return -TARGET_EFAULT
;
2575 for(i
=0; i
<nsops
; i
++) {
2576 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2577 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2578 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2581 unlock_user(target_sembuf
, target_addr
, 0);
2586 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2588 struct sembuf sops
[nsops
];
2590 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2591 return -TARGET_EFAULT
;
2593 return semop(semid
, sops
, nsops
);
2596 struct target_msqid_ds
2598 struct target_ipc_perm msg_perm
;
2599 abi_ulong msg_stime
;
2600 #if TARGET_ABI_BITS == 32
2601 abi_ulong __unused1
;
2603 abi_ulong msg_rtime
;
2604 #if TARGET_ABI_BITS == 32
2605 abi_ulong __unused2
;
2607 abi_ulong msg_ctime
;
2608 #if TARGET_ABI_BITS == 32
2609 abi_ulong __unused3
;
2611 abi_ulong __msg_cbytes
;
2613 abi_ulong msg_qbytes
;
2614 abi_ulong msg_lspid
;
2615 abi_ulong msg_lrpid
;
2616 abi_ulong __unused4
;
2617 abi_ulong __unused5
;
2620 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2621 abi_ulong target_addr
)
2623 struct target_msqid_ds
*target_md
;
2625 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2626 return -TARGET_EFAULT
;
2627 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2628 return -TARGET_EFAULT
;
2629 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2630 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2631 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2632 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2633 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2634 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2635 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2636 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2637 unlock_user_struct(target_md
, target_addr
, 0);
2641 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2642 struct msqid_ds
*host_md
)
2644 struct target_msqid_ds
*target_md
;
2646 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2647 return -TARGET_EFAULT
;
2648 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2649 return -TARGET_EFAULT
;
2650 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2651 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2652 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2653 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2654 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2655 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2656 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2657 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2658 unlock_user_struct(target_md
, target_addr
, 1);
2662 struct target_msginfo
{
2670 unsigned short int msgseg
;
2673 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2674 struct msginfo
*host_msginfo
)
2676 struct target_msginfo
*target_msginfo
;
2677 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2678 return -TARGET_EFAULT
;
2679 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2680 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2681 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2682 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2683 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2684 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2685 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2686 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2687 unlock_user_struct(target_msginfo
, target_addr
, 1);
2691 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2693 struct msqid_ds dsarg
;
2694 struct msginfo msginfo
;
2695 abi_long ret
= -TARGET_EINVAL
;
2703 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2704 return -TARGET_EFAULT
;
2705 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2706 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2707 return -TARGET_EFAULT
;
2710 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2714 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2715 if (host_to_target_msginfo(ptr
, &msginfo
))
2716 return -TARGET_EFAULT
;
2723 struct target_msgbuf
{
2728 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2729 unsigned int msgsz
, int msgflg
)
2731 struct target_msgbuf
*target_mb
;
2732 struct msgbuf
*host_mb
;
2735 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2736 return -TARGET_EFAULT
;
2737 host_mb
= malloc(msgsz
+sizeof(long));
2738 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2739 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2740 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2742 unlock_user_struct(target_mb
, msgp
, 0);
2747 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2748 unsigned int msgsz
, abi_long msgtyp
,
2751 struct target_msgbuf
*target_mb
;
2753 struct msgbuf
*host_mb
;
2756 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2757 return -TARGET_EFAULT
;
2759 host_mb
= malloc(msgsz
+sizeof(long));
2760 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2763 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2764 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2765 if (!target_mtext
) {
2766 ret
= -TARGET_EFAULT
;
2769 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2770 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2773 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2778 unlock_user_struct(target_mb
, msgp
, 1);
2782 struct target_shmid_ds
2784 struct target_ipc_perm shm_perm
;
2785 abi_ulong shm_segsz
;
2786 abi_ulong shm_atime
;
2787 #if TARGET_ABI_BITS == 32
2788 abi_ulong __unused1
;
2790 abi_ulong shm_dtime
;
2791 #if TARGET_ABI_BITS == 32
2792 abi_ulong __unused2
;
2794 abi_ulong shm_ctime
;
2795 #if TARGET_ABI_BITS == 32
2796 abi_ulong __unused3
;
2800 abi_ulong shm_nattch
;
2801 unsigned long int __unused4
;
2802 unsigned long int __unused5
;
2805 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2806 abi_ulong target_addr
)
2808 struct target_shmid_ds
*target_sd
;
2810 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2811 return -TARGET_EFAULT
;
2812 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2813 return -TARGET_EFAULT
;
2814 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2815 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2816 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2817 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2818 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2819 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2820 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2821 unlock_user_struct(target_sd
, target_addr
, 0);
2825 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2826 struct shmid_ds
*host_sd
)
2828 struct target_shmid_ds
*target_sd
;
2830 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2831 return -TARGET_EFAULT
;
2832 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2833 return -TARGET_EFAULT
;
2834 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2835 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2836 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2837 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2838 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2839 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2840 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2841 unlock_user_struct(target_sd
, target_addr
, 1);
2845 struct target_shminfo
{
2853 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2854 struct shminfo
*host_shminfo
)
2856 struct target_shminfo
*target_shminfo
;
2857 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2858 return -TARGET_EFAULT
;
2859 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2860 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2861 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2862 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2863 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2864 unlock_user_struct(target_shminfo
, target_addr
, 1);
2868 struct target_shm_info
{
2873 abi_ulong swap_attempts
;
2874 abi_ulong swap_successes
;
2877 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2878 struct shm_info
*host_shm_info
)
2880 struct target_shm_info
*target_shm_info
;
2881 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2882 return -TARGET_EFAULT
;
2883 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2884 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2885 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2886 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2887 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2888 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2889 unlock_user_struct(target_shm_info
, target_addr
, 1);
2893 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2895 struct shmid_ds dsarg
;
2896 struct shminfo shminfo
;
2897 struct shm_info shm_info
;
2898 abi_long ret
= -TARGET_EINVAL
;
2906 if (target_to_host_shmid_ds(&dsarg
, buf
))
2907 return -TARGET_EFAULT
;
2908 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2909 if (host_to_target_shmid_ds(buf
, &dsarg
))
2910 return -TARGET_EFAULT
;
2913 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2914 if (host_to_target_shminfo(buf
, &shminfo
))
2915 return -TARGET_EFAULT
;
2918 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2919 if (host_to_target_shm_info(buf
, &shm_info
))
2920 return -TARGET_EFAULT
;
2925 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2932 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2936 struct shmid_ds shm_info
;
2939 /* find out the length of the shared memory segment */
2940 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2941 if (is_error(ret
)) {
2942 /* can't get length, bail out */
2949 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2951 abi_ulong mmap_start
;
2953 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2955 if (mmap_start
== -1) {
2957 host_raddr
= (void *)-1;
2959 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2962 if (host_raddr
== (void *)-1) {
2964 return get_errno((long)host_raddr
);
2966 raddr
=h2g((unsigned long)host_raddr
);
2968 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2969 PAGE_VALID
| PAGE_READ
|
2970 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2972 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2973 if (shm_regions
[i
].start
== 0) {
2974 shm_regions
[i
].start
= raddr
;
2975 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2985 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2989 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2990 if (shm_regions
[i
].start
== shmaddr
) {
2991 shm_regions
[i
].start
= 0;
2992 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2997 return get_errno(shmdt(g2h(shmaddr
)));
3000 #ifdef TARGET_NR_ipc
3001 /* ??? This only works with linear mappings. */
3002 /* do_ipc() must return target values and target errnos. */
3003 static abi_long
do_ipc(unsigned int call
, int first
,
3004 int second
, int third
,
3005 abi_long ptr
, abi_long fifth
)
3010 version
= call
>> 16;
3015 ret
= do_semop(first
, ptr
, second
);
3019 ret
= get_errno(semget(first
, second
, third
));
3023 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3027 ret
= get_errno(msgget(first
, second
));
3031 ret
= do_msgsnd(first
, ptr
, second
, third
);
3035 ret
= do_msgctl(first
, second
, ptr
);
3042 struct target_ipc_kludge
{
3047 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3048 ret
= -TARGET_EFAULT
;
3052 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3054 unlock_user_struct(tmp
, ptr
, 0);
3058 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3067 raddr
= do_shmat(first
, ptr
, second
);
3068 if (is_error(raddr
))
3069 return get_errno(raddr
);
3070 if (put_user_ual(raddr
, third
))
3071 return -TARGET_EFAULT
;
3075 ret
= -TARGET_EINVAL
;
3080 ret
= do_shmdt(ptr
);
3084 /* IPC_* flag values are the same on all linux platforms */
3085 ret
= get_errno(shmget(first
, second
, third
));
3088 /* IPC_* and SHM_* command values are the same on all linux platforms */
3090 ret
= do_shmctl(first
, second
, third
);
3093 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3094 ret
= -TARGET_ENOSYS
;
3101 /* kernel structure types definitions */
3103 #define STRUCT(name, ...) STRUCT_ ## name,
3104 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3106 #include "syscall_types.h"
3109 #undef STRUCT_SPECIAL
3111 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3112 #define STRUCT_SPECIAL(name)
3113 #include "syscall_types.h"
3115 #undef STRUCT_SPECIAL
3117 typedef struct IOCTLEntry IOCTLEntry
;
3119 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3120 int fd
, abi_long cmd
, abi_long arg
);
3123 unsigned int target_cmd
;
3124 unsigned int host_cmd
;
3127 do_ioctl_fn
*do_ioctl
;
3128 const argtype arg_type
[5];
3131 #define IOC_R 0x0001
3132 #define IOC_W 0x0002
3133 #define IOC_RW (IOC_R | IOC_W)
3135 #define MAX_STRUCT_SIZE 4096
3137 #ifdef CONFIG_FIEMAP
3138 /* So fiemap access checks don't overflow on 32 bit systems.
3139 * This is very slightly smaller than the limit imposed by
3140 * the underlying kernel.
3142 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3143 / sizeof(struct fiemap_extent))
3145 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3146 int fd
, abi_long cmd
, abi_long arg
)
3148 /* The parameter for this ioctl is a struct fiemap followed
3149 * by an array of struct fiemap_extent whose size is set
3150 * in fiemap->fm_extent_count. The array is filled in by the
3153 int target_size_in
, target_size_out
;
3155 const argtype
*arg_type
= ie
->arg_type
;
3156 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3159 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3163 assert(arg_type
[0] == TYPE_PTR
);
3164 assert(ie
->access
== IOC_RW
);
3166 target_size_in
= thunk_type_size(arg_type
, 0);
3167 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3169 return -TARGET_EFAULT
;
3171 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3172 unlock_user(argptr
, arg
, 0);
3173 fm
= (struct fiemap
*)buf_temp
;
3174 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3175 return -TARGET_EINVAL
;
3178 outbufsz
= sizeof (*fm
) +
3179 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3181 if (outbufsz
> MAX_STRUCT_SIZE
) {
3182 /* We can't fit all the extents into the fixed size buffer.
3183 * Allocate one that is large enough and use it instead.
3185 fm
= malloc(outbufsz
);
3187 return -TARGET_ENOMEM
;
3189 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3192 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3193 if (!is_error(ret
)) {
3194 target_size_out
= target_size_in
;
3195 /* An extent_count of 0 means we were only counting the extents
3196 * so there are no structs to copy
3198 if (fm
->fm_extent_count
!= 0) {
3199 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3201 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3203 ret
= -TARGET_EFAULT
;
3205 /* Convert the struct fiemap */
3206 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3207 if (fm
->fm_extent_count
!= 0) {
3208 p
= argptr
+ target_size_in
;
3209 /* ...and then all the struct fiemap_extents */
3210 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3211 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3216 unlock_user(argptr
, arg
, target_size_out
);
3226 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3227 int fd
, abi_long cmd
, abi_long arg
)
3229 const argtype
*arg_type
= ie
->arg_type
;
3233 struct ifconf
*host_ifconf
;
3235 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3236 int target_ifreq_size
;
3241 abi_long target_ifc_buf
;
3245 assert(arg_type
[0] == TYPE_PTR
);
3246 assert(ie
->access
== IOC_RW
);
3249 target_size
= thunk_type_size(arg_type
, 0);
3251 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3253 return -TARGET_EFAULT
;
3254 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3255 unlock_user(argptr
, arg
, 0);
3257 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3258 target_ifc_len
= host_ifconf
->ifc_len
;
3259 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3261 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3262 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3263 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3265 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3266 if (outbufsz
> MAX_STRUCT_SIZE
) {
3267 /* We can't fit all the extents into the fixed size buffer.
3268 * Allocate one that is large enough and use it instead.
3270 host_ifconf
= malloc(outbufsz
);
3272 return -TARGET_ENOMEM
;
3274 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3277 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3279 host_ifconf
->ifc_len
= host_ifc_len
;
3280 host_ifconf
->ifc_buf
= host_ifc_buf
;
3282 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3283 if (!is_error(ret
)) {
3284 /* convert host ifc_len to target ifc_len */
3286 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3287 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3288 host_ifconf
->ifc_len
= target_ifc_len
;
3290 /* restore target ifc_buf */
3292 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3294 /* copy struct ifconf to target user */
3296 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3298 return -TARGET_EFAULT
;
3299 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3300 unlock_user(argptr
, arg
, target_size
);
3302 /* copy ifreq[] to target user */
3304 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3305 for (i
= 0; i
< nb_ifreq
; i
++) {
3306 thunk_convert(argptr
+ i
* target_ifreq_size
,
3307 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3308 ifreq_arg_type
, THUNK_TARGET
);
3310 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3320 static IOCTLEntry ioctl_entries
[] = {
3321 #define IOCTL(cmd, access, ...) \
3322 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3323 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3324 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3329 /* ??? Implement proper locking for ioctls. */
3330 /* do_ioctl() Must return target values and target errnos. */
3331 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3333 const IOCTLEntry
*ie
;
3334 const argtype
*arg_type
;
3336 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3342 if (ie
->target_cmd
== 0) {
3343 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3344 return -TARGET_ENOSYS
;
3346 if (ie
->target_cmd
== cmd
)
3350 arg_type
= ie
->arg_type
;
3352 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3355 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3358 switch(arg_type
[0]) {
3361 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3366 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3370 target_size
= thunk_type_size(arg_type
, 0);
3371 switch(ie
->access
) {
3373 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3374 if (!is_error(ret
)) {
3375 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3377 return -TARGET_EFAULT
;
3378 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3379 unlock_user(argptr
, arg
, target_size
);
3383 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3385 return -TARGET_EFAULT
;
3386 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3387 unlock_user(argptr
, arg
, 0);
3388 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3392 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3394 return -TARGET_EFAULT
;
3395 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3396 unlock_user(argptr
, arg
, 0);
3397 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3398 if (!is_error(ret
)) {
3399 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3401 return -TARGET_EFAULT
;
3402 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3403 unlock_user(argptr
, arg
, target_size
);
3409 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3410 (long)cmd
, arg_type
[0]);
3411 ret
= -TARGET_ENOSYS
;
3417 static const bitmask_transtbl iflag_tbl
[] = {
3418 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3419 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3420 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3421 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3422 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3423 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3424 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3425 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3426 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3427 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3428 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3429 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3430 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3431 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3435 static const bitmask_transtbl oflag_tbl
[] = {
3436 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3437 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3438 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3439 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3440 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3441 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3442 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3443 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3444 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3445 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3446 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3447 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3448 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3449 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3450 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3451 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3452 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3453 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3454 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3455 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3456 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3457 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3458 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3459 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3463 static const bitmask_transtbl cflag_tbl
[] = {
3464 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3465 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3466 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3467 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3468 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3469 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3470 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3471 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3472 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3473 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3474 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3475 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3476 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3477 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3478 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3479 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3480 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3481 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3482 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3483 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3484 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3485 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3486 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3487 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3488 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3489 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3490 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3491 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3492 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3493 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3494 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3498 static const bitmask_transtbl lflag_tbl
[] = {
3499 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3500 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3501 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3502 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3503 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3504 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3505 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3506 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3507 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3508 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3509 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3510 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3511 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3512 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3513 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3517 static void target_to_host_termios (void *dst
, const void *src
)
3519 struct host_termios
*host
= dst
;
3520 const struct target_termios
*target
= src
;
3523 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3525 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3527 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3529 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3530 host
->c_line
= target
->c_line
;
3532 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3533 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3534 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3535 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3536 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3537 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3538 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3539 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3540 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3541 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3542 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3543 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3544 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3545 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3546 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3547 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3548 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3549 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3552 static void host_to_target_termios (void *dst
, const void *src
)
3554 struct target_termios
*target
= dst
;
3555 const struct host_termios
*host
= src
;
3558 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3560 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3562 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3564 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3565 target
->c_line
= host
->c_line
;
3567 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3568 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3569 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3570 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3571 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3572 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3573 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3574 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3575 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3576 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3577 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3578 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3579 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3580 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3581 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3582 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3583 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3584 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3587 static const StructEntry struct_termios_def
= {
3588 .convert
= { host_to_target_termios
, target_to_host_termios
},
3589 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3590 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3593 static bitmask_transtbl mmap_flags_tbl
[] = {
3594 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3595 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3596 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3597 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3598 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3599 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3600 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3601 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3605 #if defined(TARGET_I386)
3607 /* NOTE: there is really one LDT for all the threads */
3608 static uint8_t *ldt_table
;
3610 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3617 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3618 if (size
> bytecount
)
3620 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3622 return -TARGET_EFAULT
;
3623 /* ??? Should this by byteswapped? */
3624 memcpy(p
, ldt_table
, size
);
3625 unlock_user(p
, ptr
, size
);
3629 /* XXX: add locking support */
3630 static abi_long
write_ldt(CPUX86State
*env
,
3631 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3633 struct target_modify_ldt_ldt_s ldt_info
;
3634 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3635 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3636 int seg_not_present
, useable
, lm
;
3637 uint32_t *lp
, entry_1
, entry_2
;
3639 if (bytecount
!= sizeof(ldt_info
))
3640 return -TARGET_EINVAL
;
3641 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3642 return -TARGET_EFAULT
;
3643 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3644 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3645 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3646 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3647 unlock_user_struct(target_ldt_info
, ptr
, 0);
3649 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3650 return -TARGET_EINVAL
;
3651 seg_32bit
= ldt_info
.flags
& 1;
3652 contents
= (ldt_info
.flags
>> 1) & 3;
3653 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3654 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3655 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3656 useable
= (ldt_info
.flags
>> 6) & 1;
3660 lm
= (ldt_info
.flags
>> 7) & 1;
3662 if (contents
== 3) {
3664 return -TARGET_EINVAL
;
3665 if (seg_not_present
== 0)
3666 return -TARGET_EINVAL
;
3668 /* allocate the LDT */
3670 env
->ldt
.base
= target_mmap(0,
3671 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3672 PROT_READ
|PROT_WRITE
,
3673 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3674 if (env
->ldt
.base
== -1)
3675 return -TARGET_ENOMEM
;
3676 memset(g2h(env
->ldt
.base
), 0,
3677 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3678 env
->ldt
.limit
= 0xffff;
3679 ldt_table
= g2h(env
->ldt
.base
);
3682 /* NOTE: same code as Linux kernel */
3683 /* Allow LDTs to be cleared by the user. */
3684 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3687 read_exec_only
== 1 &&
3689 limit_in_pages
== 0 &&
3690 seg_not_present
== 1 &&
3698 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3699 (ldt_info
.limit
& 0x0ffff);
3700 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3701 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3702 (ldt_info
.limit
& 0xf0000) |
3703 ((read_exec_only
^ 1) << 9) |
3705 ((seg_not_present
^ 1) << 15) |
3707 (limit_in_pages
<< 23) |
3711 entry_2
|= (useable
<< 20);
3713 /* Install the new entry ... */
3715 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3716 lp
[0] = tswap32(entry_1
);
3717 lp
[1] = tswap32(entry_2
);
3721 /* specific and weird i386 syscalls */
3722 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3723 unsigned long bytecount
)
3729 ret
= read_ldt(ptr
, bytecount
);
3732 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3735 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3738 ret
= -TARGET_ENOSYS
;
3744 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3745 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3747 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3748 struct target_modify_ldt_ldt_s ldt_info
;
3749 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3750 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3751 int seg_not_present
, useable
, lm
;
3752 uint32_t *lp
, entry_1
, entry_2
;
3755 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3756 if (!target_ldt_info
)
3757 return -TARGET_EFAULT
;
3758 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3759 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3760 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3761 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3762 if (ldt_info
.entry_number
== -1) {
3763 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3764 if (gdt_table
[i
] == 0) {
3765 ldt_info
.entry_number
= i
;
3766 target_ldt_info
->entry_number
= tswap32(i
);
3771 unlock_user_struct(target_ldt_info
, ptr
, 1);
3773 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3774 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3775 return -TARGET_EINVAL
;
3776 seg_32bit
= ldt_info
.flags
& 1;
3777 contents
= (ldt_info
.flags
>> 1) & 3;
3778 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3779 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3780 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3781 useable
= (ldt_info
.flags
>> 6) & 1;
3785 lm
= (ldt_info
.flags
>> 7) & 1;
3788 if (contents
== 3) {
3789 if (seg_not_present
== 0)
3790 return -TARGET_EINVAL
;
3793 /* NOTE: same code as Linux kernel */
3794 /* Allow LDTs to be cleared by the user. */
3795 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3796 if ((contents
== 0 &&
3797 read_exec_only
== 1 &&
3799 limit_in_pages
== 0 &&
3800 seg_not_present
== 1 &&
3808 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3809 (ldt_info
.limit
& 0x0ffff);
3810 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3811 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3812 (ldt_info
.limit
& 0xf0000) |
3813 ((read_exec_only
^ 1) << 9) |
3815 ((seg_not_present
^ 1) << 15) |
3817 (limit_in_pages
<< 23) |
3822 /* Install the new entry ... */
3824 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3825 lp
[0] = tswap32(entry_1
);
3826 lp
[1] = tswap32(entry_2
);
3830 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3832 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3833 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3834 uint32_t base_addr
, limit
, flags
;
3835 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3836 int seg_not_present
, useable
, lm
;
3837 uint32_t *lp
, entry_1
, entry_2
;
3839 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3840 if (!target_ldt_info
)
3841 return -TARGET_EFAULT
;
3842 idx
= tswap32(target_ldt_info
->entry_number
);
3843 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3844 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3845 unlock_user_struct(target_ldt_info
, ptr
, 1);
3846 return -TARGET_EINVAL
;
3848 lp
= (uint32_t *)(gdt_table
+ idx
);
3849 entry_1
= tswap32(lp
[0]);
3850 entry_2
= tswap32(lp
[1]);
3852 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3853 contents
= (entry_2
>> 10) & 3;
3854 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3855 seg_32bit
= (entry_2
>> 22) & 1;
3856 limit_in_pages
= (entry_2
>> 23) & 1;
3857 useable
= (entry_2
>> 20) & 1;
3861 lm
= (entry_2
>> 21) & 1;
3863 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3864 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3865 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3866 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3867 base_addr
= (entry_1
>> 16) |
3868 (entry_2
& 0xff000000) |
3869 ((entry_2
& 0xff) << 16);
3870 target_ldt_info
->base_addr
= tswapal(base_addr
);
3871 target_ldt_info
->limit
= tswap32(limit
);
3872 target_ldt_info
->flags
= tswap32(flags
);
3873 unlock_user_struct(target_ldt_info
, ptr
, 1);
3876 #endif /* TARGET_I386 && TARGET_ABI32 */
3878 #ifndef TARGET_ABI32
3879 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3886 case TARGET_ARCH_SET_GS
:
3887 case TARGET_ARCH_SET_FS
:
3888 if (code
== TARGET_ARCH_SET_GS
)
3892 cpu_x86_load_seg(env
, idx
, 0);
3893 env
->segs
[idx
].base
= addr
;
3895 case TARGET_ARCH_GET_GS
:
3896 case TARGET_ARCH_GET_FS
:
3897 if (code
== TARGET_ARCH_GET_GS
)
3901 val
= env
->segs
[idx
].base
;
3902 if (put_user(val
, addr
, abi_ulong
))
3903 ret
= -TARGET_EFAULT
;
3906 ret
= -TARGET_EINVAL
;
3913 #endif /* defined(TARGET_I386) */
3915 #define NEW_STACK_SIZE 0x40000
3917 #if defined(CONFIG_USE_NPTL)
3919 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3922 pthread_mutex_t mutex
;
3923 pthread_cond_t cond
;
3926 abi_ulong child_tidptr
;
3927 abi_ulong parent_tidptr
;
3931 static void *clone_func(void *arg
)
3933 new_thread_info
*info
= arg
;
3939 ts
= (TaskState
*)thread_env
->opaque
;
3940 info
->tid
= gettid();
3941 env
->host_tid
= info
->tid
;
3943 if (info
->child_tidptr
)
3944 put_user_u32(info
->tid
, info
->child_tidptr
);
3945 if (info
->parent_tidptr
)
3946 put_user_u32(info
->tid
, info
->parent_tidptr
);
3947 /* Enable signals. */
3948 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3949 /* Signal to the parent that we're ready. */
3950 pthread_mutex_lock(&info
->mutex
);
3951 pthread_cond_broadcast(&info
->cond
);
3952 pthread_mutex_unlock(&info
->mutex
);
3953 /* Wait until the parent has finshed initializing the tls state. */
3954 pthread_mutex_lock(&clone_lock
);
3955 pthread_mutex_unlock(&clone_lock
);
3962 static int clone_func(void *arg
)
3964 CPUState
*env
= arg
;
3971 /* do_fork() Must return host values and target errnos (unlike most
3972 do_*() functions). */
3973 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3974 abi_ulong parent_tidptr
, target_ulong newtls
,
3975 abi_ulong child_tidptr
)
3980 #if defined(CONFIG_USE_NPTL)
3981 unsigned int nptl_flags
;
3987 /* Emulate vfork() with fork() */
3988 if (flags
& CLONE_VFORK
)
3989 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3991 if (flags
& CLONE_VM
) {
3992 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3993 #if defined(CONFIG_USE_NPTL)
3994 new_thread_info info
;
3995 pthread_attr_t attr
;
3997 ts
= g_malloc0(sizeof(TaskState
));
3998 init_task_state(ts
);
3999 /* we create a new CPU instance. */
4000 new_env
= cpu_copy(env
);
4001 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4004 /* Init regs that differ from the parent. */
4005 cpu_clone_regs(new_env
, newsp
);
4006 new_env
->opaque
= ts
;
4007 ts
->bprm
= parent_ts
->bprm
;
4008 ts
->info
= parent_ts
->info
;
4009 #if defined(CONFIG_USE_NPTL)
4011 flags
&= ~CLONE_NPTL_FLAGS2
;
4013 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4014 ts
->child_tidptr
= child_tidptr
;
4017 if (nptl_flags
& CLONE_SETTLS
)
4018 cpu_set_tls (new_env
, newtls
);
4020 /* Grab a mutex so that thread setup appears atomic. */
4021 pthread_mutex_lock(&clone_lock
);
4023 memset(&info
, 0, sizeof(info
));
4024 pthread_mutex_init(&info
.mutex
, NULL
);
4025 pthread_mutex_lock(&info
.mutex
);
4026 pthread_cond_init(&info
.cond
, NULL
);
4028 if (nptl_flags
& CLONE_CHILD_SETTID
)
4029 info
.child_tidptr
= child_tidptr
;
4030 if (nptl_flags
& CLONE_PARENT_SETTID
)
4031 info
.parent_tidptr
= parent_tidptr
;
4033 ret
= pthread_attr_init(&attr
);
4034 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4035 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4036 /* It is not safe to deliver signals until the child has finished
4037 initializing, so temporarily block all signals. */
4038 sigfillset(&sigmask
);
4039 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4041 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4042 /* TODO: Free new CPU state if thread creation failed. */
4044 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4045 pthread_attr_destroy(&attr
);
4047 /* Wait for the child to initialize. */
4048 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4050 if (flags
& CLONE_PARENT_SETTID
)
4051 put_user_u32(ret
, parent_tidptr
);
4055 pthread_mutex_unlock(&info
.mutex
);
4056 pthread_cond_destroy(&info
.cond
);
4057 pthread_mutex_destroy(&info
.mutex
);
4058 pthread_mutex_unlock(&clone_lock
);
4060 if (flags
& CLONE_NPTL_FLAGS2
)
4062 /* This is probably going to die very quickly, but do it anyway. */
4063 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4065 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4067 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4071 /* if no CLONE_VM, we consider it is a fork */
4072 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4077 /* Child Process. */
4078 cpu_clone_regs(env
, newsp
);
4080 #if defined(CONFIG_USE_NPTL)
4081 /* There is a race condition here. The parent process could
4082 theoretically read the TID in the child process before the child
4083 tid is set. This would require using either ptrace
4084 (not implemented) or having *_tidptr to point at a shared memory
4085 mapping. We can't repeat the spinlock hack used above because
4086 the child process gets its own copy of the lock. */
4087 if (flags
& CLONE_CHILD_SETTID
)
4088 put_user_u32(gettid(), child_tidptr
);
4089 if (flags
& CLONE_PARENT_SETTID
)
4090 put_user_u32(gettid(), parent_tidptr
);
4091 ts
= (TaskState
*)env
->opaque
;
4092 if (flags
& CLONE_SETTLS
)
4093 cpu_set_tls (env
, newtls
);
4094 if (flags
& CLONE_CHILD_CLEARTID
)
4095 ts
->child_tidptr
= child_tidptr
;
4104 /* warning : doesn't handle linux specific flags... */
4105 static int target_to_host_fcntl_cmd(int cmd
)
4108 case TARGET_F_DUPFD
:
4109 case TARGET_F_GETFD
:
4110 case TARGET_F_SETFD
:
4111 case TARGET_F_GETFL
:
4112 case TARGET_F_SETFL
:
4114 case TARGET_F_GETLK
:
4116 case TARGET_F_SETLK
:
4118 case TARGET_F_SETLKW
:
4120 case TARGET_F_GETOWN
:
4122 case TARGET_F_SETOWN
:
4124 case TARGET_F_GETSIG
:
4126 case TARGET_F_SETSIG
:
4128 #if TARGET_ABI_BITS == 32
4129 case TARGET_F_GETLK64
:
4131 case TARGET_F_SETLK64
:
4133 case TARGET_F_SETLKW64
:
4136 case TARGET_F_SETLEASE
:
4138 case TARGET_F_GETLEASE
:
4140 #ifdef F_DUPFD_CLOEXEC
4141 case TARGET_F_DUPFD_CLOEXEC
:
4142 return F_DUPFD_CLOEXEC
;
4144 case TARGET_F_NOTIFY
:
4147 return -TARGET_EINVAL
;
4149 return -TARGET_EINVAL
;
4152 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4155 struct target_flock
*target_fl
;
4156 struct flock64 fl64
;
4157 struct target_flock64
*target_fl64
;
4159 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4161 if (host_cmd
== -TARGET_EINVAL
)
4165 case TARGET_F_GETLK
:
4166 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4167 return -TARGET_EFAULT
;
4168 fl
.l_type
= tswap16(target_fl
->l_type
);
4169 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4170 fl
.l_start
= tswapal(target_fl
->l_start
);
4171 fl
.l_len
= tswapal(target_fl
->l_len
);
4172 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4173 unlock_user_struct(target_fl
, arg
, 0);
4174 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4176 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4177 return -TARGET_EFAULT
;
4178 target_fl
->l_type
= tswap16(fl
.l_type
);
4179 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4180 target_fl
->l_start
= tswapal(fl
.l_start
);
4181 target_fl
->l_len
= tswapal(fl
.l_len
);
4182 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4183 unlock_user_struct(target_fl
, arg
, 1);
4187 case TARGET_F_SETLK
:
4188 case TARGET_F_SETLKW
:
4189 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4190 return -TARGET_EFAULT
;
4191 fl
.l_type
= tswap16(target_fl
->l_type
);
4192 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4193 fl
.l_start
= tswapal(target_fl
->l_start
);
4194 fl
.l_len
= tswapal(target_fl
->l_len
);
4195 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4196 unlock_user_struct(target_fl
, arg
, 0);
4197 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4200 case TARGET_F_GETLK64
:
4201 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4202 return -TARGET_EFAULT
;
4203 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4204 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4205 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4206 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4207 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4208 unlock_user_struct(target_fl64
, arg
, 0);
4209 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4211 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4212 return -TARGET_EFAULT
;
4213 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4214 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4215 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4216 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4217 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4218 unlock_user_struct(target_fl64
, arg
, 1);
4221 case TARGET_F_SETLK64
:
4222 case TARGET_F_SETLKW64
:
4223 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4224 return -TARGET_EFAULT
;
4225 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4226 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4227 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4228 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4229 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4230 unlock_user_struct(target_fl64
, arg
, 0);
4231 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4234 case TARGET_F_GETFL
:
4235 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4237 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4241 case TARGET_F_SETFL
:
4242 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4245 case TARGET_F_SETOWN
:
4246 case TARGET_F_GETOWN
:
4247 case TARGET_F_SETSIG
:
4248 case TARGET_F_GETSIG
:
4249 case TARGET_F_SETLEASE
:
4250 case TARGET_F_GETLEASE
:
4251 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4255 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4263 static inline int high2lowuid(int uid
)
4271 static inline int high2lowgid(int gid
)
4279 static inline int low2highuid(int uid
)
4281 if ((int16_t)uid
== -1)
4287 static inline int low2highgid(int gid
)
4289 if ((int16_t)gid
== -1)
4294 static inline int tswapid(int id
)
4298 #else /* !USE_UID16 */
4299 static inline int high2lowuid(int uid
)
4303 static inline int high2lowgid(int gid
)
4307 static inline int low2highuid(int uid
)
4311 static inline int low2highgid(int gid
)
4315 static inline int tswapid(int id
)
4319 #endif /* USE_UID16 */
4321 void syscall_init(void)
4324 const argtype
*arg_type
;
4328 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4329 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4330 #include "syscall_types.h"
4332 #undef STRUCT_SPECIAL
4334 /* we patch the ioctl size if necessary. We rely on the fact that
4335 no ioctl has all the bits at '1' in the size field */
4337 while (ie
->target_cmd
!= 0) {
4338 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4339 TARGET_IOC_SIZEMASK
) {
4340 arg_type
= ie
->arg_type
;
4341 if (arg_type
[0] != TYPE_PTR
) {
4342 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4347 size
= thunk_type_size(arg_type
, 0);
4348 ie
->target_cmd
= (ie
->target_cmd
&
4349 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4350 (size
<< TARGET_IOC_SIZESHIFT
);
4353 /* Build target_to_host_errno_table[] table from
4354 * host_to_target_errno_table[]. */
4355 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4356 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4358 /* automatic consistency check if same arch */
4359 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4360 (defined(__x86_64__) && defined(TARGET_X86_64))
4361 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4362 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4363 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4370 #if TARGET_ABI_BITS == 32
4371 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4373 #ifdef TARGET_WORDS_BIGENDIAN
4374 return ((uint64_t)word0
<< 32) | word1
;
4376 return ((uint64_t)word1
<< 32) | word0
;
4379 #else /* TARGET_ABI_BITS == 32 */
4380 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4384 #endif /* TARGET_ABI_BITS != 32 */
4386 #ifdef TARGET_NR_truncate64
4387 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4392 if (regpairs_aligned(cpu_env
)) {
4396 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4400 #ifdef TARGET_NR_ftruncate64
4401 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4406 if (regpairs_aligned(cpu_env
)) {
4410 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4414 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4415 abi_ulong target_addr
)
4417 struct target_timespec
*target_ts
;
4419 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4420 return -TARGET_EFAULT
;
4421 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4422 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4423 unlock_user_struct(target_ts
, target_addr
, 0);
4427 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4428 struct timespec
*host_ts
)
4430 struct target_timespec
*target_ts
;
4432 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4433 return -TARGET_EFAULT
;
4434 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4435 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4436 unlock_user_struct(target_ts
, target_addr
, 1);
4440 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4441 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4442 abi_ulong target_addr
,
4443 struct stat
*host_st
)
4446 if (((CPUARMState
*)cpu_env
)->eabi
) {
4447 struct target_eabi_stat64
*target_st
;
4449 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4450 return -TARGET_EFAULT
;
4451 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4452 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4453 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4454 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4455 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4457 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4458 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4459 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4460 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4461 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4462 __put_user(host_st
->st_size
, &target_st
->st_size
);
4463 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4464 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4465 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4466 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4467 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4468 unlock_user_struct(target_st
, target_addr
, 1);
4472 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4473 struct target_stat
*target_st
;
4475 struct target_stat64
*target_st
;
4478 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4479 return -TARGET_EFAULT
;
4480 memset(target_st
, 0, sizeof(*target_st
));
4481 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4482 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4483 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4484 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4486 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4487 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4488 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4489 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4490 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4491 /* XXX: better use of kernel struct */
4492 __put_user(host_st
->st_size
, &target_st
->st_size
);
4493 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4494 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4495 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4496 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4497 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4498 unlock_user_struct(target_st
, target_addr
, 1);
4505 #if defined(CONFIG_USE_NPTL)
4506 /* ??? Using host futex calls even when target atomic operations
4507 are not really atomic probably breaks things. However implementing
4508 futexes locally would make futexes shared between multiple processes
4509 tricky. However they're probably useless because guest atomic
4510 operations won't work either. */
4511 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4512 target_ulong uaddr2
, int val3
)
4514 struct timespec ts
, *pts
;
4517 /* ??? We assume FUTEX_* constants are the same on both host
4519 #ifdef FUTEX_CMD_MASK
4520 base_op
= op
& FUTEX_CMD_MASK
;
4528 target_to_host_timespec(pts
, timeout
);
4532 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4535 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4537 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4539 case FUTEX_CMP_REQUEUE
:
4541 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4542 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4543 But the prototype takes a `struct timespec *'; insert casts
4544 to satisfy the compiler. We do not need to tswap TIMEOUT
4545 since it's not compared to guest memory. */
4546 pts
= (struct timespec
*)(uintptr_t) timeout
;
4547 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4549 (base_op
== FUTEX_CMP_REQUEUE
4553 return -TARGET_ENOSYS
;
4558 /* Map host to target signal numbers for the wait family of syscalls.
4559 Assume all other status bits are the same. */
4560 static int host_to_target_waitstatus(int status
)
4562 if (WIFSIGNALED(status
)) {
4563 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4565 if (WIFSTOPPED(status
)) {
4566 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4572 int get_osversion(void)
4574 static int osversion
;
4575 struct new_utsname buf
;
4580 if (qemu_uname_release
&& *qemu_uname_release
) {
4581 s
= qemu_uname_release
;
4583 if (sys_uname(&buf
))
4588 for (i
= 0; i
< 3; i
++) {
4590 while (*s
>= '0' && *s
<= '9') {
4595 tmp
= (tmp
<< 8) + n
;
4603 /* do_syscall() should always have a single exit point at the end so
4604 that actions, such as logging of syscall results, can be performed.
4605 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4606 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4607 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4608 abi_long arg5
, abi_long arg6
, abi_long arg7
,
4617 gemu_log("syscall %d", num
);
4620 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4623 case TARGET_NR_exit
:
4624 #ifdef CONFIG_USE_NPTL
4625 /* In old applications this may be used to implement _exit(2).
4626 However in threaded applictions it is used for thread termination,
4627 and _exit_group is used for application termination.
4628 Do thread termination if we have more then one thread. */
4629 /* FIXME: This probably breaks if a signal arrives. We should probably
4630 be disabling signals. */
4631 if (first_cpu
->next_cpu
) {
4639 while (p
&& p
!= (CPUState
*)cpu_env
) {
4640 lastp
= &p
->next_cpu
;
4643 /* If we didn't find the CPU for this thread then something is
4647 /* Remove the CPU from the list. */
4648 *lastp
= p
->next_cpu
;
4650 ts
= ((CPUState
*)cpu_env
)->opaque
;
4651 if (ts
->child_tidptr
) {
4652 put_user_u32(0, ts
->child_tidptr
);
4653 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4665 gdb_exit(cpu_env
, arg1
);
4667 ret
= 0; /* avoid warning */
4669 case TARGET_NR_read
:
4673 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4675 ret
= get_errno(read(arg1
, p
, arg3
));
4676 unlock_user(p
, arg2
, ret
);
4679 case TARGET_NR_write
:
4680 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4682 ret
= get_errno(write(arg1
, p
, arg3
));
4683 unlock_user(p
, arg2
, 0);
4685 case TARGET_NR_open
:
4686 if (!(p
= lock_user_string(arg1
)))
4688 ret
= get_errno(open(path(p
),
4689 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4691 unlock_user(p
, arg1
, 0);
4693 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4694 case TARGET_NR_openat
:
4695 if (!(p
= lock_user_string(arg2
)))
4697 ret
= get_errno(sys_openat(arg1
,
4699 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4701 unlock_user(p
, arg2
, 0);
4704 case TARGET_NR_close
:
4705 ret
= get_errno(close(arg1
));
4710 case TARGET_NR_fork
:
4711 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4713 #ifdef TARGET_NR_waitpid
4714 case TARGET_NR_waitpid
:
4717 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4718 if (!is_error(ret
) && arg2
4719 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4724 #ifdef TARGET_NR_waitid
4725 case TARGET_NR_waitid
:
4729 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4730 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4731 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4733 host_to_target_siginfo(p
, &info
);
4734 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4739 #ifdef TARGET_NR_creat /* not on alpha */
4740 case TARGET_NR_creat
:
4741 if (!(p
= lock_user_string(arg1
)))
4743 ret
= get_errno(creat(p
, arg2
));
4744 unlock_user(p
, arg1
, 0);
4747 case TARGET_NR_link
:
4750 p
= lock_user_string(arg1
);
4751 p2
= lock_user_string(arg2
);
4753 ret
= -TARGET_EFAULT
;
4755 ret
= get_errno(link(p
, p2
));
4756 unlock_user(p2
, arg2
, 0);
4757 unlock_user(p
, arg1
, 0);
4760 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4761 case TARGET_NR_linkat
:
4766 p
= lock_user_string(arg2
);
4767 p2
= lock_user_string(arg4
);
4769 ret
= -TARGET_EFAULT
;
4771 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4772 unlock_user(p
, arg2
, 0);
4773 unlock_user(p2
, arg4
, 0);
4777 case TARGET_NR_unlink
:
4778 if (!(p
= lock_user_string(arg1
)))
4780 ret
= get_errno(unlink(p
));
4781 unlock_user(p
, arg1
, 0);
4783 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4784 case TARGET_NR_unlinkat
:
4785 if (!(p
= lock_user_string(arg2
)))
4787 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4788 unlock_user(p
, arg2
, 0);
4791 case TARGET_NR_execve
:
4793 char **argp
, **envp
;
4796 abi_ulong guest_argp
;
4797 abi_ulong guest_envp
;
4803 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4804 if (get_user_ual(addr
, gp
))
4812 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4813 if (get_user_ual(addr
, gp
))
4820 argp
= alloca((argc
+ 1) * sizeof(void *));
4821 envp
= alloca((envc
+ 1) * sizeof(void *));
4823 for (gp
= guest_argp
, q
= argp
; gp
;
4824 gp
+= sizeof(abi_ulong
), q
++) {
4825 if (get_user_ual(addr
, gp
))
4829 if (!(*q
= lock_user_string(addr
)))
4834 for (gp
= guest_envp
, q
= envp
; gp
;
4835 gp
+= sizeof(abi_ulong
), q
++) {
4836 if (get_user_ual(addr
, gp
))
4840 if (!(*q
= lock_user_string(addr
)))
4845 if (!(p
= lock_user_string(arg1
)))
4847 ret
= get_errno(execve(p
, argp
, envp
));
4848 unlock_user(p
, arg1
, 0);
4853 ret
= -TARGET_EFAULT
;
4856 for (gp
= guest_argp
, q
= argp
; *q
;
4857 gp
+= sizeof(abi_ulong
), q
++) {
4858 if (get_user_ual(addr
, gp
)
4861 unlock_user(*q
, addr
, 0);
4863 for (gp
= guest_envp
, q
= envp
; *q
;
4864 gp
+= sizeof(abi_ulong
), q
++) {
4865 if (get_user_ual(addr
, gp
)
4868 unlock_user(*q
, addr
, 0);
4872 case TARGET_NR_chdir
:
4873 if (!(p
= lock_user_string(arg1
)))
4875 ret
= get_errno(chdir(p
));
4876 unlock_user(p
, arg1
, 0);
4878 #ifdef TARGET_NR_time
4879 case TARGET_NR_time
:
4882 ret
= get_errno(time(&host_time
));
4885 && put_user_sal(host_time
, arg1
))
4890 case TARGET_NR_mknod
:
4891 if (!(p
= lock_user_string(arg1
)))
4893 ret
= get_errno(mknod(p
, arg2
, arg3
));
4894 unlock_user(p
, arg1
, 0);
4896 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4897 case TARGET_NR_mknodat
:
4898 if (!(p
= lock_user_string(arg2
)))
4900 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4901 unlock_user(p
, arg2
, 0);
4904 case TARGET_NR_chmod
:
4905 if (!(p
= lock_user_string(arg1
)))
4907 ret
= get_errno(chmod(p
, arg2
));
4908 unlock_user(p
, arg1
, 0);
4910 #ifdef TARGET_NR_break
4911 case TARGET_NR_break
:
4914 #ifdef TARGET_NR_oldstat
4915 case TARGET_NR_oldstat
:
4918 case TARGET_NR_lseek
:
4919 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4921 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4922 /* Alpha specific */
4923 case TARGET_NR_getxpid
:
4924 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4925 ret
= get_errno(getpid());
4928 #ifdef TARGET_NR_getpid
4929 case TARGET_NR_getpid
:
4930 ret
= get_errno(getpid());
4933 case TARGET_NR_mount
:
4935 /* need to look at the data field */
4937 p
= lock_user_string(arg1
);
4938 p2
= lock_user_string(arg2
);
4939 p3
= lock_user_string(arg3
);
4940 if (!p
|| !p2
|| !p3
)
4941 ret
= -TARGET_EFAULT
;
4943 /* FIXME - arg5 should be locked, but it isn't clear how to
4944 * do that since it's not guaranteed to be a NULL-terminated
4948 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4950 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4952 unlock_user(p
, arg1
, 0);
4953 unlock_user(p2
, arg2
, 0);
4954 unlock_user(p3
, arg3
, 0);
4957 #ifdef TARGET_NR_umount
4958 case TARGET_NR_umount
:
4959 if (!(p
= lock_user_string(arg1
)))
4961 ret
= get_errno(umount(p
));
4962 unlock_user(p
, arg1
, 0);
4965 #ifdef TARGET_NR_stime /* not on alpha */
4966 case TARGET_NR_stime
:
4969 if (get_user_sal(host_time
, arg1
))
4971 ret
= get_errno(stime(&host_time
));
4975 case TARGET_NR_ptrace
:
4977 #ifdef TARGET_NR_alarm /* not on alpha */
4978 case TARGET_NR_alarm
:
4982 #ifdef TARGET_NR_oldfstat
4983 case TARGET_NR_oldfstat
:
4986 #ifdef TARGET_NR_pause /* not on alpha */
4987 case TARGET_NR_pause
:
4988 ret
= get_errno(pause());
4991 #ifdef TARGET_NR_utime
4992 case TARGET_NR_utime
:
4994 struct utimbuf tbuf
, *host_tbuf
;
4995 struct target_utimbuf
*target_tbuf
;
4997 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4999 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5000 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5001 unlock_user_struct(target_tbuf
, arg2
, 0);
5006 if (!(p
= lock_user_string(arg1
)))
5008 ret
= get_errno(utime(p
, host_tbuf
));
5009 unlock_user(p
, arg1
, 0);
5013 case TARGET_NR_utimes
:
5015 struct timeval
*tvp
, tv
[2];
5017 if (copy_from_user_timeval(&tv
[0], arg2
)
5018 || copy_from_user_timeval(&tv
[1],
5019 arg2
+ sizeof(struct target_timeval
)))
5025 if (!(p
= lock_user_string(arg1
)))
5027 ret
= get_errno(utimes(p
, tvp
));
5028 unlock_user(p
, arg1
, 0);
5031 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5032 case TARGET_NR_futimesat
:
5034 struct timeval
*tvp
, tv
[2];
5036 if (copy_from_user_timeval(&tv
[0], arg3
)
5037 || copy_from_user_timeval(&tv
[1],
5038 arg3
+ sizeof(struct target_timeval
)))
5044 if (!(p
= lock_user_string(arg2
)))
5046 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5047 unlock_user(p
, arg2
, 0);
5051 #ifdef TARGET_NR_stty
5052 case TARGET_NR_stty
:
5055 #ifdef TARGET_NR_gtty
5056 case TARGET_NR_gtty
:
5059 case TARGET_NR_access
:
5060 if (!(p
= lock_user_string(arg1
)))
5062 ret
= get_errno(access(path(p
), arg2
));
5063 unlock_user(p
, arg1
, 0);
5065 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5066 case TARGET_NR_faccessat
:
5067 if (!(p
= lock_user_string(arg2
)))
5069 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5070 unlock_user(p
, arg2
, 0);
5073 #ifdef TARGET_NR_nice /* not on alpha */
5074 case TARGET_NR_nice
:
5075 ret
= get_errno(nice(arg1
));
5078 #ifdef TARGET_NR_ftime
5079 case TARGET_NR_ftime
:
5082 case TARGET_NR_sync
:
5086 case TARGET_NR_kill
:
5087 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5089 case TARGET_NR_rename
:
5092 p
= lock_user_string(arg1
);
5093 p2
= lock_user_string(arg2
);
5095 ret
= -TARGET_EFAULT
;
5097 ret
= get_errno(rename(p
, p2
));
5098 unlock_user(p2
, arg2
, 0);
5099 unlock_user(p
, arg1
, 0);
5102 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5103 case TARGET_NR_renameat
:
5106 p
= lock_user_string(arg2
);
5107 p2
= lock_user_string(arg4
);
5109 ret
= -TARGET_EFAULT
;
5111 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5112 unlock_user(p2
, arg4
, 0);
5113 unlock_user(p
, arg2
, 0);
5117 case TARGET_NR_mkdir
:
5118 if (!(p
= lock_user_string(arg1
)))
5120 ret
= get_errno(mkdir(p
, arg2
));
5121 unlock_user(p
, arg1
, 0);
5123 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5124 case TARGET_NR_mkdirat
:
5125 if (!(p
= lock_user_string(arg2
)))
5127 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5128 unlock_user(p
, arg2
, 0);
5131 case TARGET_NR_rmdir
:
5132 if (!(p
= lock_user_string(arg1
)))
5134 ret
= get_errno(rmdir(p
));
5135 unlock_user(p
, arg1
, 0);
5138 ret
= get_errno(dup(arg1
));
5140 case TARGET_NR_pipe
:
5141 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5143 #ifdef TARGET_NR_pipe2
5144 case TARGET_NR_pipe2
:
5145 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5148 case TARGET_NR_times
:
5150 struct target_tms
*tmsp
;
5152 ret
= get_errno(times(&tms
));
5154 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5157 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5158 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5159 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5160 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5163 ret
= host_to_target_clock_t(ret
);
5166 #ifdef TARGET_NR_prof
5167 case TARGET_NR_prof
:
5170 #ifdef TARGET_NR_signal
5171 case TARGET_NR_signal
:
5174 case TARGET_NR_acct
:
5176 ret
= get_errno(acct(NULL
));
5178 if (!(p
= lock_user_string(arg1
)))
5180 ret
= get_errno(acct(path(p
)));
5181 unlock_user(p
, arg1
, 0);
5184 #ifdef TARGET_NR_umount2 /* not on alpha */
5185 case TARGET_NR_umount2
:
5186 if (!(p
= lock_user_string(arg1
)))
5188 ret
= get_errno(umount2(p
, arg2
));
5189 unlock_user(p
, arg1
, 0);
5192 #ifdef TARGET_NR_lock
5193 case TARGET_NR_lock
:
5196 case TARGET_NR_ioctl
:
5197 ret
= do_ioctl(arg1
, arg2
, arg3
);
5199 case TARGET_NR_fcntl
:
5200 ret
= do_fcntl(arg1
, arg2
, arg3
);
5202 #ifdef TARGET_NR_mpx
5206 case TARGET_NR_setpgid
:
5207 ret
= get_errno(setpgid(arg1
, arg2
));
5209 #ifdef TARGET_NR_ulimit
5210 case TARGET_NR_ulimit
:
5213 #ifdef TARGET_NR_oldolduname
5214 case TARGET_NR_oldolduname
:
5217 case TARGET_NR_umask
:
5218 ret
= get_errno(umask(arg1
));
5220 case TARGET_NR_chroot
:
5221 if (!(p
= lock_user_string(arg1
)))
5223 ret
= get_errno(chroot(p
));
5224 unlock_user(p
, arg1
, 0);
5226 case TARGET_NR_ustat
:
5228 case TARGET_NR_dup2
:
5229 ret
= get_errno(dup2(arg1
, arg2
));
5231 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5232 case TARGET_NR_dup3
:
5233 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5236 #ifdef TARGET_NR_getppid /* not on alpha */
5237 case TARGET_NR_getppid
:
5238 ret
= get_errno(getppid());
5241 case TARGET_NR_getpgrp
:
5242 ret
= get_errno(getpgrp());
5244 case TARGET_NR_setsid
:
5245 ret
= get_errno(setsid());
5247 #ifdef TARGET_NR_sigaction
5248 case TARGET_NR_sigaction
:
5250 #if defined(TARGET_ALPHA)
5251 struct target_sigaction act
, oact
, *pact
= 0;
5252 struct target_old_sigaction
*old_act
;
5254 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5256 act
._sa_handler
= old_act
->_sa_handler
;
5257 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5258 act
.sa_flags
= old_act
->sa_flags
;
5259 act
.sa_restorer
= 0;
5260 unlock_user_struct(old_act
, arg2
, 0);
5263 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5264 if (!is_error(ret
) && arg3
) {
5265 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5267 old_act
->_sa_handler
= oact
._sa_handler
;
5268 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5269 old_act
->sa_flags
= oact
.sa_flags
;
5270 unlock_user_struct(old_act
, arg3
, 1);
5272 #elif defined(TARGET_MIPS)
5273 struct target_sigaction act
, oact
, *pact
, *old_act
;
5276 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5278 act
._sa_handler
= old_act
->_sa_handler
;
5279 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5280 act
.sa_flags
= old_act
->sa_flags
;
5281 unlock_user_struct(old_act
, arg2
, 0);
5287 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5289 if (!is_error(ret
) && arg3
) {
5290 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5292 old_act
->_sa_handler
= oact
._sa_handler
;
5293 old_act
->sa_flags
= oact
.sa_flags
;
5294 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5295 old_act
->sa_mask
.sig
[1] = 0;
5296 old_act
->sa_mask
.sig
[2] = 0;
5297 old_act
->sa_mask
.sig
[3] = 0;
5298 unlock_user_struct(old_act
, arg3
, 1);
5301 struct target_old_sigaction
*old_act
;
5302 struct target_sigaction act
, oact
, *pact
;
5304 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5306 act
._sa_handler
= old_act
->_sa_handler
;
5307 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5308 act
.sa_flags
= old_act
->sa_flags
;
5309 act
.sa_restorer
= old_act
->sa_restorer
;
5310 unlock_user_struct(old_act
, arg2
, 0);
5315 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5316 if (!is_error(ret
) && arg3
) {
5317 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5319 old_act
->_sa_handler
= oact
._sa_handler
;
5320 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5321 old_act
->sa_flags
= oact
.sa_flags
;
5322 old_act
->sa_restorer
= oact
.sa_restorer
;
5323 unlock_user_struct(old_act
, arg3
, 1);
5329 case TARGET_NR_rt_sigaction
:
5331 #if defined(TARGET_ALPHA)
5332 struct target_sigaction act
, oact
, *pact
= 0;
5333 struct target_rt_sigaction
*rt_act
;
5334 /* ??? arg4 == sizeof(sigset_t). */
5336 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5338 act
._sa_handler
= rt_act
->_sa_handler
;
5339 act
.sa_mask
= rt_act
->sa_mask
;
5340 act
.sa_flags
= rt_act
->sa_flags
;
5341 act
.sa_restorer
= arg5
;
5342 unlock_user_struct(rt_act
, arg2
, 0);
5345 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5346 if (!is_error(ret
) && arg3
) {
5347 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5349 rt_act
->_sa_handler
= oact
._sa_handler
;
5350 rt_act
->sa_mask
= oact
.sa_mask
;
5351 rt_act
->sa_flags
= oact
.sa_flags
;
5352 unlock_user_struct(rt_act
, arg3
, 1);
5355 struct target_sigaction
*act
;
5356 struct target_sigaction
*oact
;
5359 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5364 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5365 ret
= -TARGET_EFAULT
;
5366 goto rt_sigaction_fail
;
5370 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5373 unlock_user_struct(act
, arg2
, 0);
5375 unlock_user_struct(oact
, arg3
, 1);
5379 #ifdef TARGET_NR_sgetmask /* not on alpha */
5380 case TARGET_NR_sgetmask
:
5383 abi_ulong target_set
;
5384 sigprocmask(0, NULL
, &cur_set
);
5385 host_to_target_old_sigset(&target_set
, &cur_set
);
5390 #ifdef TARGET_NR_ssetmask /* not on alpha */
5391 case TARGET_NR_ssetmask
:
5393 sigset_t set
, oset
, cur_set
;
5394 abi_ulong target_set
= arg1
;
5395 sigprocmask(0, NULL
, &cur_set
);
5396 target_to_host_old_sigset(&set
, &target_set
);
5397 sigorset(&set
, &set
, &cur_set
);
5398 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5399 host_to_target_old_sigset(&target_set
, &oset
);
5404 #ifdef TARGET_NR_sigprocmask
5405 case TARGET_NR_sigprocmask
:
5407 #if defined(TARGET_ALPHA)
5408 sigset_t set
, oldset
;
5413 case TARGET_SIG_BLOCK
:
5416 case TARGET_SIG_UNBLOCK
:
5419 case TARGET_SIG_SETMASK
:
5423 ret
= -TARGET_EINVAL
;
5427 target_to_host_old_sigset(&set
, &mask
);
5429 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5431 if (!is_error(ret
)) {
5432 host_to_target_old_sigset(&mask
, &oldset
);
5434 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5437 sigset_t set
, oldset
, *set_ptr
;
5442 case TARGET_SIG_BLOCK
:
5445 case TARGET_SIG_UNBLOCK
:
5448 case TARGET_SIG_SETMASK
:
5452 ret
= -TARGET_EINVAL
;
5455 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5457 target_to_host_old_sigset(&set
, p
);
5458 unlock_user(p
, arg2
, 0);
5464 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5465 if (!is_error(ret
) && arg3
) {
5466 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5468 host_to_target_old_sigset(p
, &oldset
);
5469 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5475 case TARGET_NR_rt_sigprocmask
:
5478 sigset_t set
, oldset
, *set_ptr
;
5482 case TARGET_SIG_BLOCK
:
5485 case TARGET_SIG_UNBLOCK
:
5488 case TARGET_SIG_SETMASK
:
5492 ret
= -TARGET_EINVAL
;
5495 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5497 target_to_host_sigset(&set
, p
);
5498 unlock_user(p
, arg2
, 0);
5504 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5505 if (!is_error(ret
) && arg3
) {
5506 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5508 host_to_target_sigset(p
, &oldset
);
5509 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5513 #ifdef TARGET_NR_sigpending
5514 case TARGET_NR_sigpending
:
5517 ret
= get_errno(sigpending(&set
));
5518 if (!is_error(ret
)) {
5519 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5521 host_to_target_old_sigset(p
, &set
);
5522 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5527 case TARGET_NR_rt_sigpending
:
5530 ret
= get_errno(sigpending(&set
));
5531 if (!is_error(ret
)) {
5532 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5534 host_to_target_sigset(p
, &set
);
5535 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5539 #ifdef TARGET_NR_sigsuspend
5540 case TARGET_NR_sigsuspend
:
5543 #if defined(TARGET_ALPHA)
5544 abi_ulong mask
= arg1
;
5545 target_to_host_old_sigset(&set
, &mask
);
5547 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5549 target_to_host_old_sigset(&set
, p
);
5550 unlock_user(p
, arg1
, 0);
5552 ret
= get_errno(sigsuspend(&set
));
5556 case TARGET_NR_rt_sigsuspend
:
5559 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5561 target_to_host_sigset(&set
, p
);
5562 unlock_user(p
, arg1
, 0);
5563 ret
= get_errno(sigsuspend(&set
));
5566 case TARGET_NR_rt_sigtimedwait
:
5569 struct timespec uts
, *puts
;
5572 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5574 target_to_host_sigset(&set
, p
);
5575 unlock_user(p
, arg1
, 0);
5578 target_to_host_timespec(puts
, arg3
);
5582 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5583 if (!is_error(ret
) && arg2
) {
5584 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5586 host_to_target_siginfo(p
, &uinfo
);
5587 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5591 case TARGET_NR_rt_sigqueueinfo
:
5594 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5596 target_to_host_siginfo(&uinfo
, p
);
5597 unlock_user(p
, arg1
, 0);
5598 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5601 #ifdef TARGET_NR_sigreturn
5602 case TARGET_NR_sigreturn
:
5603 /* NOTE: ret is eax, so not transcoding must be done */
5604 ret
= do_sigreturn(cpu_env
);
5607 case TARGET_NR_rt_sigreturn
:
5608 /* NOTE: ret is eax, so not transcoding must be done */
5609 ret
= do_rt_sigreturn(cpu_env
);
5611 case TARGET_NR_sethostname
:
5612 if (!(p
= lock_user_string(arg1
)))
5614 ret
= get_errno(sethostname(p
, arg2
));
5615 unlock_user(p
, arg1
, 0);
5617 case TARGET_NR_setrlimit
:
5619 int resource
= target_to_host_resource(arg1
);
5620 struct target_rlimit
*target_rlim
;
5622 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5624 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5625 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5626 unlock_user_struct(target_rlim
, arg2
, 0);
5627 ret
= get_errno(setrlimit(resource
, &rlim
));
5630 case TARGET_NR_getrlimit
:
5632 int resource
= target_to_host_resource(arg1
);
5633 struct target_rlimit
*target_rlim
;
5636 ret
= get_errno(getrlimit(resource
, &rlim
));
5637 if (!is_error(ret
)) {
5638 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5640 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5641 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5642 unlock_user_struct(target_rlim
, arg2
, 1);
5646 case TARGET_NR_getrusage
:
5648 struct rusage rusage
;
5649 ret
= get_errno(getrusage(arg1
, &rusage
));
5650 if (!is_error(ret
)) {
5651 host_to_target_rusage(arg2
, &rusage
);
5655 case TARGET_NR_gettimeofday
:
5658 ret
= get_errno(gettimeofday(&tv
, NULL
));
5659 if (!is_error(ret
)) {
5660 if (copy_to_user_timeval(arg1
, &tv
))
5665 case TARGET_NR_settimeofday
:
5668 if (copy_from_user_timeval(&tv
, arg1
))
5670 ret
= get_errno(settimeofday(&tv
, NULL
));
5673 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5674 case TARGET_NR_select
:
5676 struct target_sel_arg_struct
*sel
;
5677 abi_ulong inp
, outp
, exp
, tvp
;
5680 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5682 nsel
= tswapal(sel
->n
);
5683 inp
= tswapal(sel
->inp
);
5684 outp
= tswapal(sel
->outp
);
5685 exp
= tswapal(sel
->exp
);
5686 tvp
= tswapal(sel
->tvp
);
5687 unlock_user_struct(sel
, arg1
, 0);
5688 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5692 #ifdef TARGET_NR_pselect6
5693 case TARGET_NR_pselect6
:
5695 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
5696 fd_set rfds
, wfds
, efds
;
5697 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
5698 struct timespec ts
, *ts_ptr
;
5701 * The 6th arg is actually two args smashed together,
5702 * so we cannot use the C library.
5710 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
5711 target_sigset_t
*target_sigset
;
5719 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
5723 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
5727 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
5733 * This takes a timespec, and not a timeval, so we cannot
5734 * use the do_select() helper ...
5737 if (target_to_host_timespec(&ts
, ts_addr
)) {
5745 /* Extract the two packed args for the sigset */
5748 sig
.size
= _NSIG
/ 8;
5750 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
5754 arg_sigset
= tswapal(arg7
[0]);
5755 arg_sigsize
= tswapal(arg7
[1]);
5756 unlock_user(arg7
, arg6
, 0);
5760 if (arg_sigsize
!= sizeof(*target_sigset
)) {
5761 /* Like the kernel, we enforce correct size sigsets */
5762 ret
= -TARGET_EINVAL
;
5765 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
5766 sizeof(*target_sigset
), 1);
5767 if (!target_sigset
) {
5770 target_to_host_sigset(&set
, target_sigset
);
5771 unlock_user(target_sigset
, arg_sigset
, 0);
5779 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
5782 if (!is_error(ret
)) {
5783 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
5785 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
5787 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
5790 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
5796 case TARGET_NR_symlink
:
5799 p
= lock_user_string(arg1
);
5800 p2
= lock_user_string(arg2
);
5802 ret
= -TARGET_EFAULT
;
5804 ret
= get_errno(symlink(p
, p2
));
5805 unlock_user(p2
, arg2
, 0);
5806 unlock_user(p
, arg1
, 0);
5809 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5810 case TARGET_NR_symlinkat
:
5813 p
= lock_user_string(arg1
);
5814 p2
= lock_user_string(arg3
);
5816 ret
= -TARGET_EFAULT
;
5818 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5819 unlock_user(p2
, arg3
, 0);
5820 unlock_user(p
, arg1
, 0);
5824 #ifdef TARGET_NR_oldlstat
5825 case TARGET_NR_oldlstat
:
5828 case TARGET_NR_readlink
:
5831 p
= lock_user_string(arg1
);
5832 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5834 ret
= -TARGET_EFAULT
;
5836 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5837 char real
[PATH_MAX
];
5838 temp
= realpath(exec_path
,real
);
5839 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5840 snprintf((char *)p2
, arg3
, "%s", real
);
5843 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5845 unlock_user(p2
, arg2
, ret
);
5846 unlock_user(p
, arg1
, 0);
5849 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5850 case TARGET_NR_readlinkat
:
5853 p
= lock_user_string(arg2
);
5854 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5856 ret
= -TARGET_EFAULT
;
5858 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5859 unlock_user(p2
, arg3
, ret
);
5860 unlock_user(p
, arg2
, 0);
5864 #ifdef TARGET_NR_uselib
5865 case TARGET_NR_uselib
:
5868 #ifdef TARGET_NR_swapon
5869 case TARGET_NR_swapon
:
5870 if (!(p
= lock_user_string(arg1
)))
5872 ret
= get_errno(swapon(p
, arg2
));
5873 unlock_user(p
, arg1
, 0);
5876 case TARGET_NR_reboot
:
5877 if (!(p
= lock_user_string(arg4
)))
5879 ret
= reboot(arg1
, arg2
, arg3
, p
);
5880 unlock_user(p
, arg4
, 0);
5882 #ifdef TARGET_NR_readdir
5883 case TARGET_NR_readdir
:
5886 #ifdef TARGET_NR_mmap
5887 case TARGET_NR_mmap
:
5888 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5889 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5890 || defined(TARGET_S390X)
5893 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5894 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5902 unlock_user(v
, arg1
, 0);
5903 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5904 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5908 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5909 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5915 #ifdef TARGET_NR_mmap2
5916 case TARGET_NR_mmap2
:
5918 #define MMAP_SHIFT 12
5920 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5921 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5923 arg6
<< MMAP_SHIFT
));
5926 case TARGET_NR_munmap
:
5927 ret
= get_errno(target_munmap(arg1
, arg2
));
5929 case TARGET_NR_mprotect
:
5931 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5932 /* Special hack to detect libc making the stack executable. */
5933 if ((arg3
& PROT_GROWSDOWN
)
5934 && arg1
>= ts
->info
->stack_limit
5935 && arg1
<= ts
->info
->start_stack
) {
5936 arg3
&= ~PROT_GROWSDOWN
;
5937 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5938 arg1
= ts
->info
->stack_limit
;
5941 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5943 #ifdef TARGET_NR_mremap
5944 case TARGET_NR_mremap
:
5945 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5948 /* ??? msync/mlock/munlock are broken for softmmu. */
5949 #ifdef TARGET_NR_msync
5950 case TARGET_NR_msync
:
5951 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5954 #ifdef TARGET_NR_mlock
5955 case TARGET_NR_mlock
:
5956 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5959 #ifdef TARGET_NR_munlock
5960 case TARGET_NR_munlock
:
5961 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5964 #ifdef TARGET_NR_mlockall
5965 case TARGET_NR_mlockall
:
5966 ret
= get_errno(mlockall(arg1
));
5969 #ifdef TARGET_NR_munlockall
5970 case TARGET_NR_munlockall
:
5971 ret
= get_errno(munlockall());
5974 case TARGET_NR_truncate
:
5975 if (!(p
= lock_user_string(arg1
)))
5977 ret
= get_errno(truncate(p
, arg2
));
5978 unlock_user(p
, arg1
, 0);
5980 case TARGET_NR_ftruncate
:
5981 ret
= get_errno(ftruncate(arg1
, arg2
));
5983 case TARGET_NR_fchmod
:
5984 ret
= get_errno(fchmod(arg1
, arg2
));
5986 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5987 case TARGET_NR_fchmodat
:
5988 if (!(p
= lock_user_string(arg2
)))
5990 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5991 unlock_user(p
, arg2
, 0);
5994 case TARGET_NR_getpriority
:
5995 /* libc does special remapping of the return value of
5996 * sys_getpriority() so it's just easiest to call
5997 * sys_getpriority() directly rather than through libc. */
5998 ret
= get_errno(sys_getpriority(arg1
, arg2
));
6000 case TARGET_NR_setpriority
:
6001 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6003 #ifdef TARGET_NR_profil
6004 case TARGET_NR_profil
:
6007 case TARGET_NR_statfs
:
6008 if (!(p
= lock_user_string(arg1
)))
6010 ret
= get_errno(statfs(path(p
), &stfs
));
6011 unlock_user(p
, arg1
, 0);
6013 if (!is_error(ret
)) {
6014 struct target_statfs
*target_stfs
;
6016 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6018 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6019 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6020 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6021 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6022 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6023 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6024 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6025 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6026 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6027 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6028 unlock_user_struct(target_stfs
, arg2
, 1);
6031 case TARGET_NR_fstatfs
:
6032 ret
= get_errno(fstatfs(arg1
, &stfs
));
6033 goto convert_statfs
;
6034 #ifdef TARGET_NR_statfs64
6035 case TARGET_NR_statfs64
:
6036 if (!(p
= lock_user_string(arg1
)))
6038 ret
= get_errno(statfs(path(p
), &stfs
));
6039 unlock_user(p
, arg1
, 0);
6041 if (!is_error(ret
)) {
6042 struct target_statfs64
*target_stfs
;
6044 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6046 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6047 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6048 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6049 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6050 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6051 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6052 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6053 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6054 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6055 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6056 unlock_user_struct(target_stfs
, arg3
, 1);
6059 case TARGET_NR_fstatfs64
:
6060 ret
= get_errno(fstatfs(arg1
, &stfs
));
6061 goto convert_statfs64
;
6063 #ifdef TARGET_NR_ioperm
6064 case TARGET_NR_ioperm
:
6067 #ifdef TARGET_NR_socketcall
6068 case TARGET_NR_socketcall
:
6069 ret
= do_socketcall(arg1
, arg2
);
6072 #ifdef TARGET_NR_accept
6073 case TARGET_NR_accept
:
6074 ret
= do_accept(arg1
, arg2
, arg3
);
6077 #ifdef TARGET_NR_bind
6078 case TARGET_NR_bind
:
6079 ret
= do_bind(arg1
, arg2
, arg3
);
6082 #ifdef TARGET_NR_connect
6083 case TARGET_NR_connect
:
6084 ret
= do_connect(arg1
, arg2
, arg3
);
6087 #ifdef TARGET_NR_getpeername
6088 case TARGET_NR_getpeername
:
6089 ret
= do_getpeername(arg1
, arg2
, arg3
);
6092 #ifdef TARGET_NR_getsockname
6093 case TARGET_NR_getsockname
:
6094 ret
= do_getsockname(arg1
, arg2
, arg3
);
6097 #ifdef TARGET_NR_getsockopt
6098 case TARGET_NR_getsockopt
:
6099 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6102 #ifdef TARGET_NR_listen
6103 case TARGET_NR_listen
:
6104 ret
= get_errno(listen(arg1
, arg2
));
6107 #ifdef TARGET_NR_recv
6108 case TARGET_NR_recv
:
6109 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6112 #ifdef TARGET_NR_recvfrom
6113 case TARGET_NR_recvfrom
:
6114 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6117 #ifdef TARGET_NR_recvmsg
6118 case TARGET_NR_recvmsg
:
6119 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6122 #ifdef TARGET_NR_send
6123 case TARGET_NR_send
:
6124 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6127 #ifdef TARGET_NR_sendmsg
6128 case TARGET_NR_sendmsg
:
6129 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6132 #ifdef TARGET_NR_sendto
6133 case TARGET_NR_sendto
:
6134 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6137 #ifdef TARGET_NR_shutdown
6138 case TARGET_NR_shutdown
:
6139 ret
= get_errno(shutdown(arg1
, arg2
));
6142 #ifdef TARGET_NR_socket
6143 case TARGET_NR_socket
:
6144 ret
= do_socket(arg1
, arg2
, arg3
);
6147 #ifdef TARGET_NR_socketpair
6148 case TARGET_NR_socketpair
:
6149 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6152 #ifdef TARGET_NR_setsockopt
6153 case TARGET_NR_setsockopt
:
6154 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6158 case TARGET_NR_syslog
:
6159 if (!(p
= lock_user_string(arg2
)))
6161 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6162 unlock_user(p
, arg2
, 0);
6165 case TARGET_NR_setitimer
:
6167 struct itimerval value
, ovalue
, *pvalue
;
6171 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6172 || copy_from_user_timeval(&pvalue
->it_value
,
6173 arg2
+ sizeof(struct target_timeval
)))
6178 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6179 if (!is_error(ret
) && arg3
) {
6180 if (copy_to_user_timeval(arg3
,
6181 &ovalue
.it_interval
)
6182 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6188 case TARGET_NR_getitimer
:
6190 struct itimerval value
;
6192 ret
= get_errno(getitimer(arg1
, &value
));
6193 if (!is_error(ret
) && arg2
) {
6194 if (copy_to_user_timeval(arg2
,
6196 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6202 case TARGET_NR_stat
:
6203 if (!(p
= lock_user_string(arg1
)))
6205 ret
= get_errno(stat(path(p
), &st
));
6206 unlock_user(p
, arg1
, 0);
6208 case TARGET_NR_lstat
:
6209 if (!(p
= lock_user_string(arg1
)))
6211 ret
= get_errno(lstat(path(p
), &st
));
6212 unlock_user(p
, arg1
, 0);
6214 case TARGET_NR_fstat
:
6216 ret
= get_errno(fstat(arg1
, &st
));
6218 if (!is_error(ret
)) {
6219 struct target_stat
*target_st
;
6221 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6223 memset(target_st
, 0, sizeof(*target_st
));
6224 __put_user(st
.st_dev
, &target_st
->st_dev
);
6225 __put_user(st
.st_ino
, &target_st
->st_ino
);
6226 __put_user(st
.st_mode
, &target_st
->st_mode
);
6227 __put_user(st
.st_uid
, &target_st
->st_uid
);
6228 __put_user(st
.st_gid
, &target_st
->st_gid
);
6229 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6230 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6231 __put_user(st
.st_size
, &target_st
->st_size
);
6232 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6233 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6234 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6235 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6236 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6237 unlock_user_struct(target_st
, arg2
, 1);
6241 #ifdef TARGET_NR_olduname
6242 case TARGET_NR_olduname
:
6245 #ifdef TARGET_NR_iopl
6246 case TARGET_NR_iopl
:
6249 case TARGET_NR_vhangup
:
6250 ret
= get_errno(vhangup());
6252 #ifdef TARGET_NR_idle
6253 case TARGET_NR_idle
:
6256 #ifdef TARGET_NR_syscall
6257 case TARGET_NR_syscall
:
6258 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6259 arg6
, arg7
, arg8
, 0);
6262 case TARGET_NR_wait4
:
6265 abi_long status_ptr
= arg2
;
6266 struct rusage rusage
, *rusage_ptr
;
6267 abi_ulong target_rusage
= arg4
;
6269 rusage_ptr
= &rusage
;
6272 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6273 if (!is_error(ret
)) {
6275 status
= host_to_target_waitstatus(status
);
6276 if (put_user_s32(status
, status_ptr
))
6280 host_to_target_rusage(target_rusage
, &rusage
);
6284 #ifdef TARGET_NR_swapoff
6285 case TARGET_NR_swapoff
:
6286 if (!(p
= lock_user_string(arg1
)))
6288 ret
= get_errno(swapoff(p
));
6289 unlock_user(p
, arg1
, 0);
6292 case TARGET_NR_sysinfo
:
6294 struct target_sysinfo
*target_value
;
6295 struct sysinfo value
;
6296 ret
= get_errno(sysinfo(&value
));
6297 if (!is_error(ret
) && arg1
)
6299 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6301 __put_user(value
.uptime
, &target_value
->uptime
);
6302 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6303 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6304 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6305 __put_user(value
.totalram
, &target_value
->totalram
);
6306 __put_user(value
.freeram
, &target_value
->freeram
);
6307 __put_user(value
.sharedram
, &target_value
->sharedram
);
6308 __put_user(value
.bufferram
, &target_value
->bufferram
);
6309 __put_user(value
.totalswap
, &target_value
->totalswap
);
6310 __put_user(value
.freeswap
, &target_value
->freeswap
);
6311 __put_user(value
.procs
, &target_value
->procs
);
6312 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6313 __put_user(value
.freehigh
, &target_value
->freehigh
);
6314 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6315 unlock_user_struct(target_value
, arg1
, 1);
6319 #ifdef TARGET_NR_ipc
6321 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6324 #ifdef TARGET_NR_semget
6325 case TARGET_NR_semget
:
6326 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6329 #ifdef TARGET_NR_semop
6330 case TARGET_NR_semop
:
6331 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6334 #ifdef TARGET_NR_semctl
6335 case TARGET_NR_semctl
:
6336 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6339 #ifdef TARGET_NR_msgctl
6340 case TARGET_NR_msgctl
:
6341 ret
= do_msgctl(arg1
, arg2
, arg3
);
6344 #ifdef TARGET_NR_msgget
6345 case TARGET_NR_msgget
:
6346 ret
= get_errno(msgget(arg1
, arg2
));
6349 #ifdef TARGET_NR_msgrcv
6350 case TARGET_NR_msgrcv
:
6351 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6354 #ifdef TARGET_NR_msgsnd
6355 case TARGET_NR_msgsnd
:
6356 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6359 #ifdef TARGET_NR_shmget
6360 case TARGET_NR_shmget
:
6361 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6364 #ifdef TARGET_NR_shmctl
6365 case TARGET_NR_shmctl
:
6366 ret
= do_shmctl(arg1
, arg2
, arg3
);
6369 #ifdef TARGET_NR_shmat
6370 case TARGET_NR_shmat
:
6371 ret
= do_shmat(arg1
, arg2
, arg3
);
6374 #ifdef TARGET_NR_shmdt
6375 case TARGET_NR_shmdt
:
6376 ret
= do_shmdt(arg1
);
6379 case TARGET_NR_fsync
:
6380 ret
= get_errno(fsync(arg1
));
6382 case TARGET_NR_clone
:
6383 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6384 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6385 #elif defined(TARGET_CRIS)
6386 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6387 #elif defined(TARGET_S390X)
6388 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6390 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6393 #ifdef __NR_exit_group
6394 /* new thread calls */
6395 case TARGET_NR_exit_group
:
6399 gdb_exit(cpu_env
, arg1
);
6400 ret
= get_errno(exit_group(arg1
));
6403 case TARGET_NR_setdomainname
:
6404 if (!(p
= lock_user_string(arg1
)))
6406 ret
= get_errno(setdomainname(p
, arg2
));
6407 unlock_user(p
, arg1
, 0);
6409 case TARGET_NR_uname
:
6410 /* no need to transcode because we use the linux syscall */
6412 struct new_utsname
* buf
;
6414 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6416 ret
= get_errno(sys_uname(buf
));
6417 if (!is_error(ret
)) {
6418 /* Overrite the native machine name with whatever is being
6420 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6421 /* Allow the user to override the reported release. */
6422 if (qemu_uname_release
&& *qemu_uname_release
)
6423 strcpy (buf
->release
, qemu_uname_release
);
6425 unlock_user_struct(buf
, arg1
, 1);
6429 case TARGET_NR_modify_ldt
:
6430 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6432 #if !defined(TARGET_X86_64)
6433 case TARGET_NR_vm86old
:
6435 case TARGET_NR_vm86
:
6436 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6440 case TARGET_NR_adjtimex
:
6442 #ifdef TARGET_NR_create_module
6443 case TARGET_NR_create_module
:
6445 case TARGET_NR_init_module
:
6446 case TARGET_NR_delete_module
:
6447 #ifdef TARGET_NR_get_kernel_syms
6448 case TARGET_NR_get_kernel_syms
:
6451 case TARGET_NR_quotactl
:
6453 case TARGET_NR_getpgid
:
6454 ret
= get_errno(getpgid(arg1
));
6456 case TARGET_NR_fchdir
:
6457 ret
= get_errno(fchdir(arg1
));
6459 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6460 case TARGET_NR_bdflush
:
6463 #ifdef TARGET_NR_sysfs
6464 case TARGET_NR_sysfs
:
6467 case TARGET_NR_personality
:
6468 ret
= get_errno(personality(arg1
));
6470 #ifdef TARGET_NR_afs_syscall
6471 case TARGET_NR_afs_syscall
:
6474 #ifdef TARGET_NR__llseek /* Not on alpha */
6475 case TARGET_NR__llseek
:
6478 #if !defined(__NR_llseek)
6479 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6481 ret
= get_errno(res
);
6486 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6488 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6494 case TARGET_NR_getdents
:
6495 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6497 struct target_dirent
*target_dirp
;
6498 struct linux_dirent
*dirp
;
6499 abi_long count
= arg3
;
6501 dirp
= malloc(count
);
6503 ret
= -TARGET_ENOMEM
;
6507 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6508 if (!is_error(ret
)) {
6509 struct linux_dirent
*de
;
6510 struct target_dirent
*tde
;
6512 int reclen
, treclen
;
6513 int count1
, tnamelen
;
6517 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6521 reclen
= de
->d_reclen
;
6522 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6523 tde
->d_reclen
= tswap16(treclen
);
6524 tde
->d_ino
= tswapal(de
->d_ino
);
6525 tde
->d_off
= tswapal(de
->d_off
);
6526 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6529 /* XXX: may not be correct */
6530 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6531 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6533 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6537 unlock_user(target_dirp
, arg2
, ret
);
6543 struct linux_dirent
*dirp
;
6544 abi_long count
= arg3
;
6546 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6548 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6549 if (!is_error(ret
)) {
6550 struct linux_dirent
*de
;
6555 reclen
= de
->d_reclen
;
6558 de
->d_reclen
= tswap16(reclen
);
6559 tswapls(&de
->d_ino
);
6560 tswapls(&de
->d_off
);
6561 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6565 unlock_user(dirp
, arg2
, ret
);
6569 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6570 case TARGET_NR_getdents64
:
6572 struct linux_dirent64
*dirp
;
6573 abi_long count
= arg3
;
6574 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6576 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6577 if (!is_error(ret
)) {
6578 struct linux_dirent64
*de
;
6583 reclen
= de
->d_reclen
;
6586 de
->d_reclen
= tswap16(reclen
);
6587 tswap64s((uint64_t *)&de
->d_ino
);
6588 tswap64s((uint64_t *)&de
->d_off
);
6589 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6593 unlock_user(dirp
, arg2
, ret
);
6596 #endif /* TARGET_NR_getdents64 */
6597 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6599 case TARGET_NR_select
:
6601 case TARGET_NR__newselect
:
6603 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6606 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6607 # ifdef TARGET_NR_poll
6608 case TARGET_NR_poll
:
6610 # ifdef TARGET_NR_ppoll
6611 case TARGET_NR_ppoll
:
6614 struct target_pollfd
*target_pfd
;
6615 unsigned int nfds
= arg2
;
6620 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6624 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6625 for(i
= 0; i
< nfds
; i
++) {
6626 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6627 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6630 # ifdef TARGET_NR_ppoll
6631 if (num
== TARGET_NR_ppoll
) {
6632 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6633 target_sigset_t
*target_set
;
6634 sigset_t _set
, *set
= &_set
;
6637 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6638 unlock_user(target_pfd
, arg1
, 0);
6646 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6648 unlock_user(target_pfd
, arg1
, 0);
6651 target_to_host_sigset(set
, target_set
);
6656 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6658 if (!is_error(ret
) && arg3
) {
6659 host_to_target_timespec(arg3
, timeout_ts
);
6662 unlock_user(target_set
, arg4
, 0);
6666 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6668 if (!is_error(ret
)) {
6669 for(i
= 0; i
< nfds
; i
++) {
6670 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6673 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6677 case TARGET_NR_flock
:
6678 /* NOTE: the flock constant seems to be the same for every
6680 ret
= get_errno(flock(arg1
, arg2
));
6682 case TARGET_NR_readv
:
6687 vec
= alloca(count
* sizeof(struct iovec
));
6688 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6690 ret
= get_errno(readv(arg1
, vec
, count
));
6691 unlock_iovec(vec
, arg2
, count
, 1);
6694 case TARGET_NR_writev
:
6699 vec
= alloca(count
* sizeof(struct iovec
));
6700 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6702 ret
= get_errno(writev(arg1
, vec
, count
));
6703 unlock_iovec(vec
, arg2
, count
, 0);
6706 case TARGET_NR_getsid
:
6707 ret
= get_errno(getsid(arg1
));
6709 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6710 case TARGET_NR_fdatasync
:
6711 ret
= get_errno(fdatasync(arg1
));
6714 case TARGET_NR__sysctl
:
6715 /* We don't implement this, but ENOTDIR is always a safe
6717 ret
= -TARGET_ENOTDIR
;
6719 case TARGET_NR_sched_getaffinity
:
6721 unsigned int mask_size
;
6722 unsigned long *mask
;
6725 * sched_getaffinity needs multiples of ulong, so need to take
6726 * care of mismatches between target ulong and host ulong sizes.
6728 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6729 ret
= -TARGET_EINVAL
;
6732 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6734 mask
= alloca(mask_size
);
6735 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6737 if (!is_error(ret
)) {
6738 if (copy_to_user(arg3
, mask
, ret
)) {
6744 case TARGET_NR_sched_setaffinity
:
6746 unsigned int mask_size
;
6747 unsigned long *mask
;
6750 * sched_setaffinity needs multiples of ulong, so need to take
6751 * care of mismatches between target ulong and host ulong sizes.
6753 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6754 ret
= -TARGET_EINVAL
;
6757 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6759 mask
= alloca(mask_size
);
6760 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6763 memcpy(mask
, p
, arg2
);
6764 unlock_user_struct(p
, arg2
, 0);
6766 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6769 case TARGET_NR_sched_setparam
:
6771 struct sched_param
*target_schp
;
6772 struct sched_param schp
;
6774 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6776 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6777 unlock_user_struct(target_schp
, arg2
, 0);
6778 ret
= get_errno(sched_setparam(arg1
, &schp
));
6781 case TARGET_NR_sched_getparam
:
6783 struct sched_param
*target_schp
;
6784 struct sched_param schp
;
6785 ret
= get_errno(sched_getparam(arg1
, &schp
));
6786 if (!is_error(ret
)) {
6787 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6789 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6790 unlock_user_struct(target_schp
, arg2
, 1);
6794 case TARGET_NR_sched_setscheduler
:
6796 struct sched_param
*target_schp
;
6797 struct sched_param schp
;
6798 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6800 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6801 unlock_user_struct(target_schp
, arg3
, 0);
6802 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6805 case TARGET_NR_sched_getscheduler
:
6806 ret
= get_errno(sched_getscheduler(arg1
));
6808 case TARGET_NR_sched_yield
:
6809 ret
= get_errno(sched_yield());
6811 case TARGET_NR_sched_get_priority_max
:
6812 ret
= get_errno(sched_get_priority_max(arg1
));
6814 case TARGET_NR_sched_get_priority_min
:
6815 ret
= get_errno(sched_get_priority_min(arg1
));
6817 case TARGET_NR_sched_rr_get_interval
:
6820 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6821 if (!is_error(ret
)) {
6822 host_to_target_timespec(arg2
, &ts
);
6826 case TARGET_NR_nanosleep
:
6828 struct timespec req
, rem
;
6829 target_to_host_timespec(&req
, arg1
);
6830 ret
= get_errno(nanosleep(&req
, &rem
));
6831 if (is_error(ret
) && arg2
) {
6832 host_to_target_timespec(arg2
, &rem
);
6836 #ifdef TARGET_NR_query_module
6837 case TARGET_NR_query_module
:
6840 #ifdef TARGET_NR_nfsservctl
6841 case TARGET_NR_nfsservctl
:
6844 case TARGET_NR_prctl
:
6847 case PR_GET_PDEATHSIG
:
6850 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6851 if (!is_error(ret
) && arg2
6852 && put_user_ual(deathsig
, arg2
))
6857 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6861 #ifdef TARGET_NR_arch_prctl
6862 case TARGET_NR_arch_prctl
:
6863 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6864 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6870 #ifdef TARGET_NR_pread
6871 case TARGET_NR_pread
:
6872 if (regpairs_aligned(cpu_env
))
6874 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6876 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6877 unlock_user(p
, arg2
, ret
);
6879 case TARGET_NR_pwrite
:
6880 if (regpairs_aligned(cpu_env
))
6882 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6884 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6885 unlock_user(p
, arg2
, 0);
6888 #ifdef TARGET_NR_pread64
6889 case TARGET_NR_pread64
:
6890 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6892 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6893 unlock_user(p
, arg2
, ret
);
6895 case TARGET_NR_pwrite64
:
6896 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6898 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6899 unlock_user(p
, arg2
, 0);
6902 case TARGET_NR_getcwd
:
6903 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6905 ret
= get_errno(sys_getcwd1(p
, arg2
));
6906 unlock_user(p
, arg1
, ret
);
6908 case TARGET_NR_capget
:
6910 case TARGET_NR_capset
:
6912 case TARGET_NR_sigaltstack
:
6913 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6914 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6915 defined(TARGET_M68K) || defined(TARGET_S390X)
6916 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6921 case TARGET_NR_sendfile
:
6923 #ifdef TARGET_NR_getpmsg
6924 case TARGET_NR_getpmsg
:
6927 #ifdef TARGET_NR_putpmsg
6928 case TARGET_NR_putpmsg
:
6931 #ifdef TARGET_NR_vfork
6932 case TARGET_NR_vfork
:
6933 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6937 #ifdef TARGET_NR_ugetrlimit
6938 case TARGET_NR_ugetrlimit
:
6941 int resource
= target_to_host_resource(arg1
);
6942 ret
= get_errno(getrlimit(resource
, &rlim
));
6943 if (!is_error(ret
)) {
6944 struct target_rlimit
*target_rlim
;
6945 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6947 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6948 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6949 unlock_user_struct(target_rlim
, arg2
, 1);
6954 #ifdef TARGET_NR_truncate64
6955 case TARGET_NR_truncate64
:
6956 if (!(p
= lock_user_string(arg1
)))
6958 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6959 unlock_user(p
, arg1
, 0);
6962 #ifdef TARGET_NR_ftruncate64
6963 case TARGET_NR_ftruncate64
:
6964 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6967 #ifdef TARGET_NR_stat64
6968 case TARGET_NR_stat64
:
6969 if (!(p
= lock_user_string(arg1
)))
6971 ret
= get_errno(stat(path(p
), &st
));
6972 unlock_user(p
, arg1
, 0);
6974 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6977 #ifdef TARGET_NR_lstat64
6978 case TARGET_NR_lstat64
:
6979 if (!(p
= lock_user_string(arg1
)))
6981 ret
= get_errno(lstat(path(p
), &st
));
6982 unlock_user(p
, arg1
, 0);
6984 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6987 #ifdef TARGET_NR_fstat64
6988 case TARGET_NR_fstat64
:
6989 ret
= get_errno(fstat(arg1
, &st
));
6991 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6994 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6995 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6996 #ifdef TARGET_NR_fstatat64
6997 case TARGET_NR_fstatat64
:
6999 #ifdef TARGET_NR_newfstatat
7000 case TARGET_NR_newfstatat
:
7002 if (!(p
= lock_user_string(arg2
)))
7004 #ifdef __NR_fstatat64
7005 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7007 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7010 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7013 case TARGET_NR_lchown
:
7014 if (!(p
= lock_user_string(arg1
)))
7016 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7017 unlock_user(p
, arg1
, 0);
7019 #ifdef TARGET_NR_getuid
7020 case TARGET_NR_getuid
:
7021 ret
= get_errno(high2lowuid(getuid()));
7024 #ifdef TARGET_NR_getgid
7025 case TARGET_NR_getgid
:
7026 ret
= get_errno(high2lowgid(getgid()));
7029 #ifdef TARGET_NR_geteuid
7030 case TARGET_NR_geteuid
:
7031 ret
= get_errno(high2lowuid(geteuid()));
7034 #ifdef TARGET_NR_getegid
7035 case TARGET_NR_getegid
:
7036 ret
= get_errno(high2lowgid(getegid()));
7039 case TARGET_NR_setreuid
:
7040 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7042 case TARGET_NR_setregid
:
7043 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7045 case TARGET_NR_getgroups
:
7047 int gidsetsize
= arg1
;
7048 target_id
*target_grouplist
;
7052 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7053 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7054 if (gidsetsize
== 0)
7056 if (!is_error(ret
)) {
7057 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7058 if (!target_grouplist
)
7060 for(i
= 0;i
< ret
; i
++)
7061 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7062 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7066 case TARGET_NR_setgroups
:
7068 int gidsetsize
= arg1
;
7069 target_id
*target_grouplist
;
7073 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7074 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7075 if (!target_grouplist
) {
7076 ret
= -TARGET_EFAULT
;
7079 for(i
= 0;i
< gidsetsize
; i
++)
7080 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7081 unlock_user(target_grouplist
, arg2
, 0);
7082 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7085 case TARGET_NR_fchown
:
7086 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7088 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7089 case TARGET_NR_fchownat
:
7090 if (!(p
= lock_user_string(arg2
)))
7092 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7093 unlock_user(p
, arg2
, 0);
7096 #ifdef TARGET_NR_setresuid
7097 case TARGET_NR_setresuid
:
7098 ret
= get_errno(setresuid(low2highuid(arg1
),
7100 low2highuid(arg3
)));
7103 #ifdef TARGET_NR_getresuid
7104 case TARGET_NR_getresuid
:
7106 uid_t ruid
, euid
, suid
;
7107 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7108 if (!is_error(ret
)) {
7109 if (put_user_u16(high2lowuid(ruid
), arg1
)
7110 || put_user_u16(high2lowuid(euid
), arg2
)
7111 || put_user_u16(high2lowuid(suid
), arg3
))
7117 #ifdef TARGET_NR_getresgid
7118 case TARGET_NR_setresgid
:
7119 ret
= get_errno(setresgid(low2highgid(arg1
),
7121 low2highgid(arg3
)));
7124 #ifdef TARGET_NR_getresgid
7125 case TARGET_NR_getresgid
:
7127 gid_t rgid
, egid
, sgid
;
7128 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7129 if (!is_error(ret
)) {
7130 if (put_user_u16(high2lowgid(rgid
), arg1
)
7131 || put_user_u16(high2lowgid(egid
), arg2
)
7132 || put_user_u16(high2lowgid(sgid
), arg3
))
7138 case TARGET_NR_chown
:
7139 if (!(p
= lock_user_string(arg1
)))
7141 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7142 unlock_user(p
, arg1
, 0);
7144 case TARGET_NR_setuid
:
7145 ret
= get_errno(setuid(low2highuid(arg1
)));
7147 case TARGET_NR_setgid
:
7148 ret
= get_errno(setgid(low2highgid(arg1
)));
7150 case TARGET_NR_setfsuid
:
7151 ret
= get_errno(setfsuid(arg1
));
7153 case TARGET_NR_setfsgid
:
7154 ret
= get_errno(setfsgid(arg1
));
7157 #ifdef TARGET_NR_lchown32
7158 case TARGET_NR_lchown32
:
7159 if (!(p
= lock_user_string(arg1
)))
7161 ret
= get_errno(lchown(p
, arg2
, arg3
));
7162 unlock_user(p
, arg1
, 0);
7165 #ifdef TARGET_NR_getuid32
7166 case TARGET_NR_getuid32
:
7167 ret
= get_errno(getuid());
7171 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7172 /* Alpha specific */
7173 case TARGET_NR_getxuid
:
7177 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7179 ret
= get_errno(getuid());
7182 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7183 /* Alpha specific */
7184 case TARGET_NR_getxgid
:
7188 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7190 ret
= get_errno(getgid());
7193 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7194 /* Alpha specific */
7195 case TARGET_NR_osf_getsysinfo
:
7196 ret
= -TARGET_EOPNOTSUPP
;
7198 case TARGET_GSI_IEEE_FP_CONTROL
:
7200 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7202 /* Copied from linux ieee_fpcr_to_swcr. */
7203 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7204 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7205 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7206 | SWCR_TRAP_ENABLE_DZE
7207 | SWCR_TRAP_ENABLE_OVF
);
7208 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7209 | SWCR_TRAP_ENABLE_INE
);
7210 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7211 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7213 if (put_user_u64 (swcr
, arg2
))
7219 /* case GSI_IEEE_STATE_AT_SIGNAL:
7220 -- Not implemented in linux kernel.
7222 -- Retrieves current unaligned access state; not much used.
7224 -- Retrieves implver information; surely not used.
7226 -- Grabs a copy of the HWRPB; surely not used.
7231 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7232 /* Alpha specific */
7233 case TARGET_NR_osf_setsysinfo
:
7234 ret
= -TARGET_EOPNOTSUPP
;
7236 case TARGET_SSI_IEEE_FP_CONTROL
:
7237 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7239 uint64_t swcr
, fpcr
, orig_fpcr
;
7241 if (get_user_u64 (swcr
, arg2
))
7243 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7244 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7246 /* Copied from linux ieee_swcr_to_fpcr. */
7247 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7248 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7249 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7250 | SWCR_TRAP_ENABLE_DZE
7251 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7252 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7253 | SWCR_TRAP_ENABLE_INE
)) << 57;
7254 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7255 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7257 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7260 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7261 /* Old exceptions are not signaled. */
7262 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7264 /* If any exceptions set by this call, and are unmasked,
7271 /* case SSI_NVPAIRS:
7272 -- Used with SSIN_UACPROC to enable unaligned accesses.
7273 case SSI_IEEE_STATE_AT_SIGNAL:
7274 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7275 -- Not implemented in linux kernel
7280 #ifdef TARGET_NR_osf_sigprocmask
7281 /* Alpha specific. */
7282 case TARGET_NR_osf_sigprocmask
:
7286 sigset_t set
, oldset
;
7289 case TARGET_SIG_BLOCK
:
7292 case TARGET_SIG_UNBLOCK
:
7295 case TARGET_SIG_SETMASK
:
7299 ret
= -TARGET_EINVAL
;
7303 target_to_host_old_sigset(&set
, &mask
);
7304 sigprocmask(how
, &set
, &oldset
);
7305 host_to_target_old_sigset(&mask
, &oldset
);
7311 #ifdef TARGET_NR_getgid32
7312 case TARGET_NR_getgid32
:
7313 ret
= get_errno(getgid());
7316 #ifdef TARGET_NR_geteuid32
7317 case TARGET_NR_geteuid32
:
7318 ret
= get_errno(geteuid());
7321 #ifdef TARGET_NR_getegid32
7322 case TARGET_NR_getegid32
:
7323 ret
= get_errno(getegid());
7326 #ifdef TARGET_NR_setreuid32
7327 case TARGET_NR_setreuid32
:
7328 ret
= get_errno(setreuid(arg1
, arg2
));
7331 #ifdef TARGET_NR_setregid32
7332 case TARGET_NR_setregid32
:
7333 ret
= get_errno(setregid(arg1
, arg2
));
7336 #ifdef TARGET_NR_getgroups32
7337 case TARGET_NR_getgroups32
:
7339 int gidsetsize
= arg1
;
7340 uint32_t *target_grouplist
;
7344 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7345 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7346 if (gidsetsize
== 0)
7348 if (!is_error(ret
)) {
7349 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7350 if (!target_grouplist
) {
7351 ret
= -TARGET_EFAULT
;
7354 for(i
= 0;i
< ret
; i
++)
7355 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7356 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7361 #ifdef TARGET_NR_setgroups32
7362 case TARGET_NR_setgroups32
:
7364 int gidsetsize
= arg1
;
7365 uint32_t *target_grouplist
;
7369 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7370 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7371 if (!target_grouplist
) {
7372 ret
= -TARGET_EFAULT
;
7375 for(i
= 0;i
< gidsetsize
; i
++)
7376 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7377 unlock_user(target_grouplist
, arg2
, 0);
7378 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7382 #ifdef TARGET_NR_fchown32
7383 case TARGET_NR_fchown32
:
7384 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7387 #ifdef TARGET_NR_setresuid32
7388 case TARGET_NR_setresuid32
:
7389 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7392 #ifdef TARGET_NR_getresuid32
7393 case TARGET_NR_getresuid32
:
7395 uid_t ruid
, euid
, suid
;
7396 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7397 if (!is_error(ret
)) {
7398 if (put_user_u32(ruid
, arg1
)
7399 || put_user_u32(euid
, arg2
)
7400 || put_user_u32(suid
, arg3
))
7406 #ifdef TARGET_NR_setresgid32
7407 case TARGET_NR_setresgid32
:
7408 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7411 #ifdef TARGET_NR_getresgid32
7412 case TARGET_NR_getresgid32
:
7414 gid_t rgid
, egid
, sgid
;
7415 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7416 if (!is_error(ret
)) {
7417 if (put_user_u32(rgid
, arg1
)
7418 || put_user_u32(egid
, arg2
)
7419 || put_user_u32(sgid
, arg3
))
7425 #ifdef TARGET_NR_chown32
7426 case TARGET_NR_chown32
:
7427 if (!(p
= lock_user_string(arg1
)))
7429 ret
= get_errno(chown(p
, arg2
, arg3
));
7430 unlock_user(p
, arg1
, 0);
7433 #ifdef TARGET_NR_setuid32
7434 case TARGET_NR_setuid32
:
7435 ret
= get_errno(setuid(arg1
));
7438 #ifdef TARGET_NR_setgid32
7439 case TARGET_NR_setgid32
:
7440 ret
= get_errno(setgid(arg1
));
7443 #ifdef TARGET_NR_setfsuid32
7444 case TARGET_NR_setfsuid32
:
7445 ret
= get_errno(setfsuid(arg1
));
7448 #ifdef TARGET_NR_setfsgid32
7449 case TARGET_NR_setfsgid32
:
7450 ret
= get_errno(setfsgid(arg1
));
7454 case TARGET_NR_pivot_root
:
7456 #ifdef TARGET_NR_mincore
7457 case TARGET_NR_mincore
:
7460 ret
= -TARGET_EFAULT
;
7461 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7463 if (!(p
= lock_user_string(arg3
)))
7465 ret
= get_errno(mincore(a
, arg2
, p
));
7466 unlock_user(p
, arg3
, ret
);
7468 unlock_user(a
, arg1
, 0);
7472 #ifdef TARGET_NR_arm_fadvise64_64
7473 case TARGET_NR_arm_fadvise64_64
:
7476 * arm_fadvise64_64 looks like fadvise64_64 but
7477 * with different argument order
7485 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7486 #ifdef TARGET_NR_fadvise64_64
7487 case TARGET_NR_fadvise64_64
:
7489 #ifdef TARGET_NR_fadvise64
7490 case TARGET_NR_fadvise64
:
7494 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7495 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7496 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7497 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7501 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7504 #ifdef TARGET_NR_madvise
7505 case TARGET_NR_madvise
:
7506 /* A straight passthrough may not be safe because qemu sometimes
7507 turns private flie-backed mappings into anonymous mappings.
7508 This will break MADV_DONTNEED.
7509 This is a hint, so ignoring and returning success is ok. */
7513 #if TARGET_ABI_BITS == 32
7514 case TARGET_NR_fcntl64
:
7518 struct target_flock64
*target_fl
;
7520 struct target_eabi_flock64
*target_efl
;
7523 cmd
= target_to_host_fcntl_cmd(arg2
);
7524 if (cmd
== -TARGET_EINVAL
)
7528 case TARGET_F_GETLK64
:
7530 if (((CPUARMState
*)cpu_env
)->eabi
) {
7531 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7533 fl
.l_type
= tswap16(target_efl
->l_type
);
7534 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7535 fl
.l_start
= tswap64(target_efl
->l_start
);
7536 fl
.l_len
= tswap64(target_efl
->l_len
);
7537 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7538 unlock_user_struct(target_efl
, arg3
, 0);
7542 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7544 fl
.l_type
= tswap16(target_fl
->l_type
);
7545 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7546 fl
.l_start
= tswap64(target_fl
->l_start
);
7547 fl
.l_len
= tswap64(target_fl
->l_len
);
7548 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7549 unlock_user_struct(target_fl
, arg3
, 0);
7551 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7554 if (((CPUARMState
*)cpu_env
)->eabi
) {
7555 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7557 target_efl
->l_type
= tswap16(fl
.l_type
);
7558 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7559 target_efl
->l_start
= tswap64(fl
.l_start
);
7560 target_efl
->l_len
= tswap64(fl
.l_len
);
7561 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7562 unlock_user_struct(target_efl
, arg3
, 1);
7566 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7568 target_fl
->l_type
= tswap16(fl
.l_type
);
7569 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7570 target_fl
->l_start
= tswap64(fl
.l_start
);
7571 target_fl
->l_len
= tswap64(fl
.l_len
);
7572 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7573 unlock_user_struct(target_fl
, arg3
, 1);
7578 case TARGET_F_SETLK64
:
7579 case TARGET_F_SETLKW64
:
7581 if (((CPUARMState
*)cpu_env
)->eabi
) {
7582 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7584 fl
.l_type
= tswap16(target_efl
->l_type
);
7585 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7586 fl
.l_start
= tswap64(target_efl
->l_start
);
7587 fl
.l_len
= tswap64(target_efl
->l_len
);
7588 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7589 unlock_user_struct(target_efl
, arg3
, 0);
7593 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7595 fl
.l_type
= tswap16(target_fl
->l_type
);
7596 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7597 fl
.l_start
= tswap64(target_fl
->l_start
);
7598 fl
.l_len
= tswap64(target_fl
->l_len
);
7599 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7600 unlock_user_struct(target_fl
, arg3
, 0);
7602 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7605 ret
= do_fcntl(arg1
, arg2
, arg3
);
7611 #ifdef TARGET_NR_cacheflush
7612 case TARGET_NR_cacheflush
:
7613 /* self-modifying code is handled automatically, so nothing needed */
7617 #ifdef TARGET_NR_security
7618 case TARGET_NR_security
:
7621 #ifdef TARGET_NR_getpagesize
7622 case TARGET_NR_getpagesize
:
7623 ret
= TARGET_PAGE_SIZE
;
7626 case TARGET_NR_gettid
:
7627 ret
= get_errno(gettid());
7629 #ifdef TARGET_NR_readahead
7630 case TARGET_NR_readahead
:
7631 #if TARGET_ABI_BITS == 32
7632 if (regpairs_aligned(cpu_env
)) {
7637 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7639 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7644 #ifdef TARGET_NR_setxattr
7645 case TARGET_NR_lsetxattr
:
7646 case TARGET_NR_fsetxattr
:
7647 case TARGET_NR_lgetxattr
:
7648 case TARGET_NR_fgetxattr
:
7649 case TARGET_NR_listxattr
:
7650 case TARGET_NR_llistxattr
:
7651 case TARGET_NR_flistxattr
:
7652 case TARGET_NR_lremovexattr
:
7653 case TARGET_NR_fremovexattr
:
7654 ret
= -TARGET_EOPNOTSUPP
;
7656 case TARGET_NR_setxattr
:
7659 p
= lock_user_string(arg1
);
7660 n
= lock_user_string(arg2
);
7661 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
7663 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
7665 ret
= -TARGET_EFAULT
;
7667 unlock_user(p
, arg1
, 0);
7668 unlock_user(n
, arg2
, 0);
7669 unlock_user(v
, arg3
, 0);
7672 case TARGET_NR_getxattr
:
7675 p
= lock_user_string(arg1
);
7676 n
= lock_user_string(arg2
);
7677 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7679 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
7681 ret
= -TARGET_EFAULT
;
7683 unlock_user(p
, arg1
, 0);
7684 unlock_user(n
, arg2
, 0);
7685 unlock_user(v
, arg3
, arg4
);
7688 case TARGET_NR_removexattr
:
7691 p
= lock_user_string(arg1
);
7692 n
= lock_user_string(arg2
);
7694 ret
= get_errno(removexattr(p
, n
));
7696 ret
= -TARGET_EFAULT
;
7698 unlock_user(p
, arg1
, 0);
7699 unlock_user(n
, arg2
, 0);
7703 #endif /* CONFIG_ATTR */
7704 #ifdef TARGET_NR_set_thread_area
7705 case TARGET_NR_set_thread_area
:
7706 #if defined(TARGET_MIPS)
7707 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7710 #elif defined(TARGET_CRIS)
7712 ret
= -TARGET_EINVAL
;
7714 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7718 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7719 ret
= do_set_thread_area(cpu_env
, arg1
);
7722 goto unimplemented_nowarn
;
7725 #ifdef TARGET_NR_get_thread_area
7726 case TARGET_NR_get_thread_area
:
7727 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7728 ret
= do_get_thread_area(cpu_env
, arg1
);
7730 goto unimplemented_nowarn
;
7733 #ifdef TARGET_NR_getdomainname
7734 case TARGET_NR_getdomainname
:
7735 goto unimplemented_nowarn
;
7738 #ifdef TARGET_NR_clock_gettime
7739 case TARGET_NR_clock_gettime
:
7742 ret
= get_errno(clock_gettime(arg1
, &ts
));
7743 if (!is_error(ret
)) {
7744 host_to_target_timespec(arg2
, &ts
);
7749 #ifdef TARGET_NR_clock_getres
7750 case TARGET_NR_clock_getres
:
7753 ret
= get_errno(clock_getres(arg1
, &ts
));
7754 if (!is_error(ret
)) {
7755 host_to_target_timespec(arg2
, &ts
);
7760 #ifdef TARGET_NR_clock_nanosleep
7761 case TARGET_NR_clock_nanosleep
:
7764 target_to_host_timespec(&ts
, arg3
);
7765 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7767 host_to_target_timespec(arg4
, &ts
);
7772 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7773 case TARGET_NR_set_tid_address
:
7774 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7778 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7779 case TARGET_NR_tkill
:
7780 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7784 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7785 case TARGET_NR_tgkill
:
7786 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7787 target_to_host_signal(arg3
)));
7791 #ifdef TARGET_NR_set_robust_list
7792 case TARGET_NR_set_robust_list
:
7793 goto unimplemented_nowarn
;
7796 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7797 case TARGET_NR_utimensat
:
7799 struct timespec
*tsp
, ts
[2];
7803 target_to_host_timespec(ts
, arg3
);
7804 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7808 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7810 if (!(p
= lock_user_string(arg2
))) {
7811 ret
= -TARGET_EFAULT
;
7814 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7815 unlock_user(p
, arg2
, 0);
7820 #if defined(CONFIG_USE_NPTL)
7821 case TARGET_NR_futex
:
7822 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7825 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7826 case TARGET_NR_inotify_init
:
7827 ret
= get_errno(sys_inotify_init());
7830 #ifdef CONFIG_INOTIFY1
7831 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7832 case TARGET_NR_inotify_init1
:
7833 ret
= get_errno(sys_inotify_init1(arg1
));
7837 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7838 case TARGET_NR_inotify_add_watch
:
7839 p
= lock_user_string(arg2
);
7840 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7841 unlock_user(p
, arg2
, 0);
7844 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7845 case TARGET_NR_inotify_rm_watch
:
7846 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7850 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7851 case TARGET_NR_mq_open
:
7853 struct mq_attr posix_mq_attr
;
7855 p
= lock_user_string(arg1
- 1);
7857 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7858 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7859 unlock_user (p
, arg1
, 0);
7863 case TARGET_NR_mq_unlink
:
7864 p
= lock_user_string(arg1
- 1);
7865 ret
= get_errno(mq_unlink(p
));
7866 unlock_user (p
, arg1
, 0);
7869 case TARGET_NR_mq_timedsend
:
7873 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7875 target_to_host_timespec(&ts
, arg5
);
7876 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7877 host_to_target_timespec(arg5
, &ts
);
7880 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7881 unlock_user (p
, arg2
, arg3
);
7885 case TARGET_NR_mq_timedreceive
:
7890 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7892 target_to_host_timespec(&ts
, arg5
);
7893 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7894 host_to_target_timespec(arg5
, &ts
);
7897 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7898 unlock_user (p
, arg2
, arg3
);
7900 put_user_u32(prio
, arg4
);
7904 /* Not implemented for now... */
7905 /* case TARGET_NR_mq_notify: */
7908 case TARGET_NR_mq_getsetattr
:
7910 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7913 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7914 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7917 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7918 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7925 #ifdef CONFIG_SPLICE
7926 #ifdef TARGET_NR_tee
7929 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7933 #ifdef TARGET_NR_splice
7934 case TARGET_NR_splice
:
7936 loff_t loff_in
, loff_out
;
7937 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7939 get_user_u64(loff_in
, arg2
);
7940 ploff_in
= &loff_in
;
7943 get_user_u64(loff_out
, arg2
);
7944 ploff_out
= &loff_out
;
7946 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7950 #ifdef TARGET_NR_vmsplice
7951 case TARGET_NR_vmsplice
:
7956 vec
= alloca(count
* sizeof(struct iovec
));
7957 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7959 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7960 unlock_iovec(vec
, arg2
, count
, 0);
7964 #endif /* CONFIG_SPLICE */
7965 #ifdef CONFIG_EVENTFD
7966 #if defined(TARGET_NR_eventfd)
7967 case TARGET_NR_eventfd
:
7968 ret
= get_errno(eventfd(arg1
, 0));
7971 #if defined(TARGET_NR_eventfd2)
7972 case TARGET_NR_eventfd2
:
7973 ret
= get_errno(eventfd(arg1
, arg2
));
7976 #endif /* CONFIG_EVENTFD */
7977 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7978 case TARGET_NR_fallocate
:
7979 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7982 #if defined(CONFIG_SYNC_FILE_RANGE)
7983 #if defined(TARGET_NR_sync_file_range)
7984 case TARGET_NR_sync_file_range
:
7985 #if TARGET_ABI_BITS == 32
7986 #if defined(TARGET_MIPS)
7987 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7988 target_offset64(arg5
, arg6
), arg7
));
7990 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7991 target_offset64(arg4
, arg5
), arg6
));
7992 #endif /* !TARGET_MIPS */
7994 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7998 #if defined(TARGET_NR_sync_file_range2)
7999 case TARGET_NR_sync_file_range2
:
8000 /* This is like sync_file_range but the arguments are reordered */
8001 #if TARGET_ABI_BITS == 32
8002 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8003 target_offset64(arg5
, arg6
), arg2
));
8005 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8010 #if defined(CONFIG_EPOLL)
8011 #if defined(TARGET_NR_epoll_create)
8012 case TARGET_NR_epoll_create
:
8013 ret
= get_errno(epoll_create(arg1
));
8016 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8017 case TARGET_NR_epoll_create1
:
8018 ret
= get_errno(epoll_create1(arg1
));
8021 #if defined(TARGET_NR_epoll_ctl)
8022 case TARGET_NR_epoll_ctl
:
8024 struct epoll_event ep
;
8025 struct epoll_event
*epp
= 0;
8027 struct target_epoll_event
*target_ep
;
8028 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8031 ep
.events
= tswap32(target_ep
->events
);
8032 /* The epoll_data_t union is just opaque data to the kernel,
8033 * so we transfer all 64 bits across and need not worry what
8034 * actual data type it is.
8036 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8037 unlock_user_struct(target_ep
, arg4
, 0);
8040 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8045 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8046 #define IMPLEMENT_EPOLL_PWAIT
8048 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8049 #if defined(TARGET_NR_epoll_wait)
8050 case TARGET_NR_epoll_wait
:
8052 #if defined(IMPLEMENT_EPOLL_PWAIT)
8053 case TARGET_NR_epoll_pwait
:
8056 struct target_epoll_event
*target_ep
;
8057 struct epoll_event
*ep
;
8059 int maxevents
= arg3
;
8062 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8063 maxevents
* sizeof(struct target_epoll_event
), 1);
8068 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8071 #if defined(IMPLEMENT_EPOLL_PWAIT)
8072 case TARGET_NR_epoll_pwait
:
8074 target_sigset_t
*target_set
;
8075 sigset_t _set
, *set
= &_set
;
8078 target_set
= lock_user(VERIFY_READ
, arg5
,
8079 sizeof(target_sigset_t
), 1);
8081 unlock_user(target_ep
, arg2
, 0);
8084 target_to_host_sigset(set
, target_set
);
8085 unlock_user(target_set
, arg5
, 0);
8090 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8094 #if defined(TARGET_NR_epoll_wait)
8095 case TARGET_NR_epoll_wait
:
8096 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8100 ret
= -TARGET_ENOSYS
;
8102 if (!is_error(ret
)) {
8104 for (i
= 0; i
< ret
; i
++) {
8105 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8106 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8109 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8114 #ifdef TARGET_NR_prlimit64
8115 case TARGET_NR_prlimit64
:
8117 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8118 struct target_rlimit64
*target_rnew
, *target_rold
;
8119 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8121 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8124 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8125 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8126 unlock_user_struct(target_rnew
, arg3
, 0);
8130 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8131 if (!is_error(ret
) && arg4
) {
8132 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8135 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8136 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8137 unlock_user_struct(target_rold
, arg4
, 1);
8144 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8145 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8146 unimplemented_nowarn
:
8148 ret
= -TARGET_ENOSYS
;
8153 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8156 print_syscall_ret(num
, ret
);
8159 ret
= -TARGET_EFAULT
;