4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include <qemu-common.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #define termios host_termios
75 #define winsize host_winsize
76 #define termio host_termio
77 #define sgttyb host_sgttyb /* same as target */
78 #define tchars host_tchars /* same as target */
79 #define ltchars host_ltchars /* same as target */
81 #include <linux/termios.h>
82 #include <linux/unistd.h>
83 #include <linux/utsname.h>
84 #include <linux/cdrom.h>
85 #include <linux/hdreg.h>
86 #include <linux/soundcard.h>
88 #include <linux/mtio.h>
90 #if defined(CONFIG_FIEMAP)
91 #include <linux/fiemap.h>
95 #include "linux_loop.h"
96 #include "cpu-uname.h"
99 #include "qemu-common.h"
101 #if defined(CONFIG_USE_NPTL)
102 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
103 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
105 /* XXX: Hardcode the above values. */
106 #define CLONE_NPTL_FLAGS2 0
111 //#include <linux/msdos_fs.h>
112 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
113 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
124 #define _syscall0(type,name) \
125 static type name (void) \
127 return syscall(__NR_##name); \
130 #define _syscall1(type,name,type1,arg1) \
131 static type name (type1 arg1) \
133 return syscall(__NR_##name, arg1); \
136 #define _syscall2(type,name,type1,arg1,type2,arg2) \
137 static type name (type1 arg1,type2 arg2) \
139 return syscall(__NR_##name, arg1, arg2); \
142 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
143 static type name (type1 arg1,type2 arg2,type3 arg3) \
145 return syscall(__NR_##name, arg1, arg2, arg3); \
148 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
154 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
162 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
163 type5,arg5,type6,arg6) \
164 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
171 #define __NR_sys_uname __NR_uname
172 #define __NR_sys_faccessat __NR_faccessat
173 #define __NR_sys_fchmodat __NR_fchmodat
174 #define __NR_sys_fchownat __NR_fchownat
175 #define __NR_sys_fstatat64 __NR_fstatat64
176 #define __NR_sys_futimesat __NR_futimesat
177 #define __NR_sys_getcwd1 __NR_getcwd
178 #define __NR_sys_getdents __NR_getdents
179 #define __NR_sys_getdents64 __NR_getdents64
180 #define __NR_sys_getpriority __NR_getpriority
181 #define __NR_sys_linkat __NR_linkat
182 #define __NR_sys_mkdirat __NR_mkdirat
183 #define __NR_sys_mknodat __NR_mknodat
184 #define __NR_sys_newfstatat __NR_newfstatat
185 #define __NR_sys_openat __NR_openat
186 #define __NR_sys_readlinkat __NR_readlinkat
187 #define __NR_sys_renameat __NR_renameat
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_symlinkat __NR_symlinkat
190 #define __NR_sys_syslog __NR_syslog
191 #define __NR_sys_tgkill __NR_tgkill
192 #define __NR_sys_tkill __NR_tkill
193 #define __NR_sys_unlinkat __NR_unlinkat
194 #define __NR_sys_utimensat __NR_utimensat
195 #define __NR_sys_futex __NR_futex
196 #define __NR_sys_inotify_init __NR_inotify_init
197 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
198 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
200 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
202 #define __NR__llseek __NR_lseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
215 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
216 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
218 _syscall2(int, sys_getpriority
, int, which
, int, who
);
219 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
220 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
221 loff_t
*, res
, uint
, wh
);
223 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
224 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
225 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
226 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
228 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
229 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
231 #ifdef __NR_exit_group
232 _syscall1(int,exit_group
,int,error_code
)
234 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
235 _syscall1(int,set_tid_address
,int *,tidptr
)
237 #if defined(CONFIG_USE_NPTL)
238 #if defined(TARGET_NR_futex) && defined(__NR_futex)
239 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
240 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
243 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
244 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
245 unsigned long *, user_mask_ptr
);
246 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
247 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
248 unsigned long *, user_mask_ptr
);
250 static bitmask_transtbl fcntl_flags_tbl
[] = {
251 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
252 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
253 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
254 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
255 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
256 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
257 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
258 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
259 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
260 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
261 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
262 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
263 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
264 #if defined(O_DIRECT)
265 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
270 #define COPY_UTSNAME_FIELD(dest, src) \
272 /* __NEW_UTS_LEN doesn't include terminating null */ \
273 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
274 (dest)[__NEW_UTS_LEN] = '\0'; \
277 static int sys_uname(struct new_utsname
*buf
)
279 struct utsname uts_buf
;
281 if (uname(&uts_buf
) < 0)
285 * Just in case these have some differences, we
286 * translate utsname to new_utsname (which is the
287 * struct linux kernel uses).
290 memset(buf
, 0, sizeof(*buf
));
291 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
292 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
293 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
294 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
295 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
297 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
301 #undef COPY_UTSNAME_FIELD
304 static int sys_getcwd1(char *buf
, size_t size
)
306 if (getcwd(buf
, size
) == NULL
) {
307 /* getcwd() sets errno */
310 return strlen(buf
)+1;
315 * Host system seems to have atfile syscall stubs available. We
316 * now enable them one by one as specified by target syscall_nr.h.
319 #ifdef TARGET_NR_faccessat
320 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
322 return (faccessat(dirfd
, pathname
, mode
, 0));
325 #ifdef TARGET_NR_fchmodat
326 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
328 return (fchmodat(dirfd
, pathname
, mode
, 0));
331 #if defined(TARGET_NR_fchownat)
332 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
333 gid_t group
, int flags
)
335 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
338 #ifdef __NR_fstatat64
339 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
342 return (fstatat(dirfd
, pathname
, buf
, flags
));
345 #ifdef __NR_newfstatat
346 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
349 return (fstatat(dirfd
, pathname
, buf
, flags
));
352 #ifdef TARGET_NR_futimesat
353 static int sys_futimesat(int dirfd
, const char *pathname
,
354 const struct timeval times
[2])
356 return (futimesat(dirfd
, pathname
, times
));
359 #ifdef TARGET_NR_linkat
360 static int sys_linkat(int olddirfd
, const char *oldpath
,
361 int newdirfd
, const char *newpath
, int flags
)
363 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
366 #ifdef TARGET_NR_mkdirat
367 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
369 return (mkdirat(dirfd
, pathname
, mode
));
372 #ifdef TARGET_NR_mknodat
373 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
376 return (mknodat(dirfd
, pathname
, mode
, dev
));
379 #ifdef TARGET_NR_openat
380 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
383 * open(2) has extra parameter 'mode' when called with
386 if ((flags
& O_CREAT
) != 0) {
391 * Get the 'mode' parameter and translate it to
395 mode
= va_arg(ap
, mode_t
);
396 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
399 return (openat(dirfd
, pathname
, flags
, mode
));
401 return (openat(dirfd
, pathname
, flags
));
404 #ifdef TARGET_NR_readlinkat
405 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
407 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
410 #ifdef TARGET_NR_renameat
411 static int sys_renameat(int olddirfd
, const char *oldpath
,
412 int newdirfd
, const char *newpath
)
414 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
417 #ifdef TARGET_NR_symlinkat
418 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
420 return (symlinkat(oldpath
, newdirfd
, newpath
));
423 #ifdef TARGET_NR_unlinkat
424 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
426 return (unlinkat(dirfd
, pathname
, flags
));
429 #else /* !CONFIG_ATFILE */
432 * Try direct syscalls instead
434 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
435 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
437 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
438 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
440 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
441 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
442 uid_t
,owner
,gid_t
,group
,int,flags
)
444 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
445 defined(__NR_fstatat64)
446 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
447 struct stat
*,buf
,int,flags
)
449 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
450 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
451 const struct timeval
*,times
)
453 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
454 defined(__NR_newfstatat)
455 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
456 struct stat
*,buf
,int,flags
)
458 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
459 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
460 int,newdirfd
,const char *,newpath
,int,flags
)
462 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
463 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
465 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
466 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
467 mode_t
,mode
,dev_t
,dev
)
469 #if defined(TARGET_NR_openat) && defined(__NR_openat)
470 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
472 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
473 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
474 char *,buf
,size_t,bufsize
)
476 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
477 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
478 int,newdirfd
,const char *,newpath
)
480 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
481 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
482 int,newdirfd
,const char *,newpath
)
484 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
485 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
488 #endif /* CONFIG_ATFILE */
490 #ifdef CONFIG_UTIMENSAT
491 static int sys_utimensat(int dirfd
, const char *pathname
,
492 const struct timespec times
[2], int flags
)
494 if (pathname
== NULL
)
495 return futimens(dirfd
, times
);
497 return utimensat(dirfd
, pathname
, times
, flags
);
500 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
501 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
502 const struct timespec
*,tsp
,int,flags
)
504 #endif /* CONFIG_UTIMENSAT */
506 #ifdef CONFIG_INOTIFY
507 #include <sys/inotify.h>
509 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
510 static int sys_inotify_init(void)
512 return (inotify_init());
515 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
516 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
518 return (inotify_add_watch(fd
, pathname
, mask
));
521 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
522 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
524 return (inotify_rm_watch(fd
, wd
));
527 #ifdef CONFIG_INOTIFY1
528 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
529 static int sys_inotify_init1(int flags
)
531 return (inotify_init1(flags
));
536 /* Userspace can usually survive runtime without inotify */
537 #undef TARGET_NR_inotify_init
538 #undef TARGET_NR_inotify_init1
539 #undef TARGET_NR_inotify_add_watch
540 #undef TARGET_NR_inotify_rm_watch
541 #endif /* CONFIG_INOTIFY */
543 #if defined(TARGET_NR_ppoll)
545 # define __NR_ppoll -1
547 #define __NR_sys_ppoll __NR_ppoll
548 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
549 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
553 extern int personality(int);
554 extern int flock(int, int);
555 extern int setfsuid(int);
556 extern int setfsgid(int);
557 extern int setgroups(int, gid_t
*);
559 #define ERRNO_TABLE_SIZE 1200
561 /* target_to_host_errno_table[] is initialized from
562 * host_to_target_errno_table[] in syscall_init(). */
563 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
567 * This list is the union of errno values overridden in asm-<arch>/errno.h
568 * minus the errnos that are not actually generic to all archs.
570 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
571 [EIDRM
] = TARGET_EIDRM
,
572 [ECHRNG
] = TARGET_ECHRNG
,
573 [EL2NSYNC
] = TARGET_EL2NSYNC
,
574 [EL3HLT
] = TARGET_EL3HLT
,
575 [EL3RST
] = TARGET_EL3RST
,
576 [ELNRNG
] = TARGET_ELNRNG
,
577 [EUNATCH
] = TARGET_EUNATCH
,
578 [ENOCSI
] = TARGET_ENOCSI
,
579 [EL2HLT
] = TARGET_EL2HLT
,
580 [EDEADLK
] = TARGET_EDEADLK
,
581 [ENOLCK
] = TARGET_ENOLCK
,
582 [EBADE
] = TARGET_EBADE
,
583 [EBADR
] = TARGET_EBADR
,
584 [EXFULL
] = TARGET_EXFULL
,
585 [ENOANO
] = TARGET_ENOANO
,
586 [EBADRQC
] = TARGET_EBADRQC
,
587 [EBADSLT
] = TARGET_EBADSLT
,
588 [EBFONT
] = TARGET_EBFONT
,
589 [ENOSTR
] = TARGET_ENOSTR
,
590 [ENODATA
] = TARGET_ENODATA
,
591 [ETIME
] = TARGET_ETIME
,
592 [ENOSR
] = TARGET_ENOSR
,
593 [ENONET
] = TARGET_ENONET
,
594 [ENOPKG
] = TARGET_ENOPKG
,
595 [EREMOTE
] = TARGET_EREMOTE
,
596 [ENOLINK
] = TARGET_ENOLINK
,
597 [EADV
] = TARGET_EADV
,
598 [ESRMNT
] = TARGET_ESRMNT
,
599 [ECOMM
] = TARGET_ECOMM
,
600 [EPROTO
] = TARGET_EPROTO
,
601 [EDOTDOT
] = TARGET_EDOTDOT
,
602 [EMULTIHOP
] = TARGET_EMULTIHOP
,
603 [EBADMSG
] = TARGET_EBADMSG
,
604 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
605 [EOVERFLOW
] = TARGET_EOVERFLOW
,
606 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
607 [EBADFD
] = TARGET_EBADFD
,
608 [EREMCHG
] = TARGET_EREMCHG
,
609 [ELIBACC
] = TARGET_ELIBACC
,
610 [ELIBBAD
] = TARGET_ELIBBAD
,
611 [ELIBSCN
] = TARGET_ELIBSCN
,
612 [ELIBMAX
] = TARGET_ELIBMAX
,
613 [ELIBEXEC
] = TARGET_ELIBEXEC
,
614 [EILSEQ
] = TARGET_EILSEQ
,
615 [ENOSYS
] = TARGET_ENOSYS
,
616 [ELOOP
] = TARGET_ELOOP
,
617 [ERESTART
] = TARGET_ERESTART
,
618 [ESTRPIPE
] = TARGET_ESTRPIPE
,
619 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
620 [EUSERS
] = TARGET_EUSERS
,
621 [ENOTSOCK
] = TARGET_ENOTSOCK
,
622 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
623 [EMSGSIZE
] = TARGET_EMSGSIZE
,
624 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
625 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
626 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
627 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
628 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
629 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
630 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
631 [EADDRINUSE
] = TARGET_EADDRINUSE
,
632 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
633 [ENETDOWN
] = TARGET_ENETDOWN
,
634 [ENETUNREACH
] = TARGET_ENETUNREACH
,
635 [ENETRESET
] = TARGET_ENETRESET
,
636 [ECONNABORTED
] = TARGET_ECONNABORTED
,
637 [ECONNRESET
] = TARGET_ECONNRESET
,
638 [ENOBUFS
] = TARGET_ENOBUFS
,
639 [EISCONN
] = TARGET_EISCONN
,
640 [ENOTCONN
] = TARGET_ENOTCONN
,
641 [EUCLEAN
] = TARGET_EUCLEAN
,
642 [ENOTNAM
] = TARGET_ENOTNAM
,
643 [ENAVAIL
] = TARGET_ENAVAIL
,
644 [EISNAM
] = TARGET_EISNAM
,
645 [EREMOTEIO
] = TARGET_EREMOTEIO
,
646 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
647 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
648 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
649 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
650 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
651 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
652 [EALREADY
] = TARGET_EALREADY
,
653 [EINPROGRESS
] = TARGET_EINPROGRESS
,
654 [ESTALE
] = TARGET_ESTALE
,
655 [ECANCELED
] = TARGET_ECANCELED
,
656 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
657 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
659 [ENOKEY
] = TARGET_ENOKEY
,
662 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
665 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
668 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
671 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
673 #ifdef ENOTRECOVERABLE
674 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
678 static inline int host_to_target_errno(int err
)
680 if(host_to_target_errno_table
[err
])
681 return host_to_target_errno_table
[err
];
685 static inline int target_to_host_errno(int err
)
687 if (target_to_host_errno_table
[err
])
688 return target_to_host_errno_table
[err
];
692 static inline abi_long
get_errno(abi_long ret
)
695 return -host_to_target_errno(errno
);
700 static inline int is_error(abi_long ret
)
702 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
705 char *target_strerror(int err
)
707 return strerror(target_to_host_errno(err
));
710 static abi_ulong target_brk
;
711 static abi_ulong target_original_brk
;
713 void target_set_brk(abi_ulong new_brk
)
715 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
718 /* do_brk() must return target values and target errnos. */
719 abi_long
do_brk(abi_ulong new_brk
)
722 abi_long mapped_addr
;
727 if (new_brk
< target_original_brk
)
730 brk_page
= HOST_PAGE_ALIGN(target_brk
);
732 /* If the new brk is less than this, set it and we're done... */
733 if (new_brk
< brk_page
) {
734 target_brk
= new_brk
;
738 /* We need to allocate more memory after the brk... */
739 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
740 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
741 PROT_READ
|PROT_WRITE
,
742 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
744 #if defined(TARGET_ALPHA)
745 /* We (partially) emulate OSF/1 on Alpha, which requires we
746 return a proper errno, not an unchanged brk value. */
747 if (is_error(mapped_addr
)) {
748 return -TARGET_ENOMEM
;
752 if (!is_error(mapped_addr
)) {
753 target_brk
= new_brk
;
758 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
759 abi_ulong target_fds_addr
,
763 abi_ulong b
, *target_fds
;
765 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
766 if (!(target_fds
= lock_user(VERIFY_READ
,
768 sizeof(abi_ulong
) * nw
,
770 return -TARGET_EFAULT
;
774 for (i
= 0; i
< nw
; i
++) {
775 /* grab the abi_ulong */
776 __get_user(b
, &target_fds
[i
]);
777 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
778 /* check the bit inside the abi_ulong */
785 unlock_user(target_fds
, target_fds_addr
, 0);
790 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
796 abi_ulong
*target_fds
;
798 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
799 if (!(target_fds
= lock_user(VERIFY_WRITE
,
801 sizeof(abi_ulong
) * nw
,
803 return -TARGET_EFAULT
;
806 for (i
= 0; i
< nw
; i
++) {
808 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
809 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
812 __put_user(v
, &target_fds
[i
]);
815 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
820 #if defined(__alpha__)
826 static inline abi_long
host_to_target_clock_t(long ticks
)
828 #if HOST_HZ == TARGET_HZ
831 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
835 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
836 const struct rusage
*rusage
)
838 struct target_rusage
*target_rusage
;
840 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
841 return -TARGET_EFAULT
;
842 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
843 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
844 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
845 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
846 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
847 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
848 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
849 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
850 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
851 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
852 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
853 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
854 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
855 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
856 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
857 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
858 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
859 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
860 unlock_user_struct(target_rusage
, target_addr
, 1);
865 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
867 if (target_rlim
== TARGET_RLIM_INFINITY
)
868 return RLIM_INFINITY
;
870 return tswapl(target_rlim
);
873 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
875 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
876 return TARGET_RLIM_INFINITY
;
881 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
882 abi_ulong target_tv_addr
)
884 struct target_timeval
*target_tv
;
886 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
887 return -TARGET_EFAULT
;
889 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
890 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
892 unlock_user_struct(target_tv
, target_tv_addr
, 0);
897 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
898 const struct timeval
*tv
)
900 struct target_timeval
*target_tv
;
902 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
903 return -TARGET_EFAULT
;
905 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
906 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
908 unlock_user_struct(target_tv
, target_tv_addr
, 1);
913 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
916 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
917 abi_ulong target_mq_attr_addr
)
919 struct target_mq_attr
*target_mq_attr
;
921 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
922 target_mq_attr_addr
, 1))
923 return -TARGET_EFAULT
;
925 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
926 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
927 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
928 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
930 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
935 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
936 const struct mq_attr
*attr
)
938 struct target_mq_attr
*target_mq_attr
;
940 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
941 target_mq_attr_addr
, 0))
942 return -TARGET_EFAULT
;
944 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
945 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
946 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
947 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
949 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
955 /* do_select() must return target values and target errnos. */
956 static abi_long
do_select(int n
,
957 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
958 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
960 fd_set rfds
, wfds
, efds
;
961 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
962 struct timeval tv
, *tv_ptr
;
966 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
967 return -TARGET_EFAULT
;
973 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
974 return -TARGET_EFAULT
;
980 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
981 return -TARGET_EFAULT
;
987 if (target_tv_addr
) {
988 if (copy_from_user_timeval(&tv
, target_tv_addr
))
989 return -TARGET_EFAULT
;
995 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
997 if (!is_error(ret
)) {
998 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
999 return -TARGET_EFAULT
;
1000 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1001 return -TARGET_EFAULT
;
1002 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1003 return -TARGET_EFAULT
;
1005 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1006 return -TARGET_EFAULT
;
1012 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1015 return pipe2(host_pipe
, flags
);
1021 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1022 int flags
, int is_pipe2
)
1026 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1029 return get_errno(ret
);
1031 /* Several targets have special calling conventions for the original
1032 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1034 #if defined(TARGET_ALPHA)
1035 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1036 return host_pipe
[0];
1037 #elif defined(TARGET_MIPS)
1038 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1039 return host_pipe
[0];
1040 #elif defined(TARGET_SH4)
1041 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1042 return host_pipe
[0];
1046 if (put_user_s32(host_pipe
[0], pipedes
)
1047 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1048 return -TARGET_EFAULT
;
1049 return get_errno(ret
);
1052 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1053 abi_ulong target_addr
,
1056 struct target_ip_mreqn
*target_smreqn
;
1058 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1060 return -TARGET_EFAULT
;
1061 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1062 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1063 if (len
== sizeof(struct target_ip_mreqn
))
1064 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1065 unlock_user(target_smreqn
, target_addr
, 0);
1070 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1071 abi_ulong target_addr
,
1074 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1075 sa_family_t sa_family
;
1076 struct target_sockaddr
*target_saddr
;
1078 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1080 return -TARGET_EFAULT
;
1082 sa_family
= tswap16(target_saddr
->sa_family
);
1084 /* Oops. The caller might send a incomplete sun_path; sun_path
1085 * must be terminated by \0 (see the manual page), but
1086 * unfortunately it is quite common to specify sockaddr_un
1087 * length as "strlen(x->sun_path)" while it should be
1088 * "strlen(...) + 1". We'll fix that here if needed.
1089 * Linux kernel has a similar feature.
1092 if (sa_family
== AF_UNIX
) {
1093 if (len
< unix_maxlen
&& len
> 0) {
1094 char *cp
= (char*)target_saddr
;
1096 if ( cp
[len
-1] && !cp
[len
] )
1099 if (len
> unix_maxlen
)
1103 memcpy(addr
, target_saddr
, len
);
1104 addr
->sa_family
= sa_family
;
1105 unlock_user(target_saddr
, target_addr
, 0);
1110 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1111 struct sockaddr
*addr
,
1114 struct target_sockaddr
*target_saddr
;
1116 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1118 return -TARGET_EFAULT
;
1119 memcpy(target_saddr
, addr
, len
);
1120 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1121 unlock_user(target_saddr
, target_addr
, len
);
1126 /* ??? Should this also swap msgh->name? */
1127 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1128 struct target_msghdr
*target_msgh
)
1130 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1131 abi_long msg_controllen
;
1132 abi_ulong target_cmsg_addr
;
1133 struct target_cmsghdr
*target_cmsg
;
1134 socklen_t space
= 0;
1136 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1137 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1139 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1140 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1142 return -TARGET_EFAULT
;
1144 while (cmsg
&& target_cmsg
) {
1145 void *data
= CMSG_DATA(cmsg
);
1146 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1148 int len
= tswapl(target_cmsg
->cmsg_len
)
1149 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1151 space
+= CMSG_SPACE(len
);
1152 if (space
> msgh
->msg_controllen
) {
1153 space
-= CMSG_SPACE(len
);
1154 gemu_log("Host cmsg overflow\n");
1158 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1159 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1160 cmsg
->cmsg_len
= CMSG_LEN(len
);
1162 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1163 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1164 memcpy(data
, target_data
, len
);
1166 int *fd
= (int *)data
;
1167 int *target_fd
= (int *)target_data
;
1168 int i
, numfds
= len
/ sizeof(int);
1170 for (i
= 0; i
< numfds
; i
++)
1171 fd
[i
] = tswap32(target_fd
[i
]);
1174 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1175 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1177 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1179 msgh
->msg_controllen
= space
;
1183 /* ??? Should this also swap msgh->name? */
1184 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1185 struct msghdr
*msgh
)
1187 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1188 abi_long msg_controllen
;
1189 abi_ulong target_cmsg_addr
;
1190 struct target_cmsghdr
*target_cmsg
;
1191 socklen_t space
= 0;
1193 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1194 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1196 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1197 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1199 return -TARGET_EFAULT
;
1201 while (cmsg
&& target_cmsg
) {
1202 void *data
= CMSG_DATA(cmsg
);
1203 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1205 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1207 space
+= TARGET_CMSG_SPACE(len
);
1208 if (space
> msg_controllen
) {
1209 space
-= TARGET_CMSG_SPACE(len
);
1210 gemu_log("Target cmsg overflow\n");
1214 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1215 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1216 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1218 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1219 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1220 memcpy(target_data
, data
, len
);
1222 int *fd
= (int *)data
;
1223 int *target_fd
= (int *)target_data
;
1224 int i
, numfds
= len
/ sizeof(int);
1226 for (i
= 0; i
< numfds
; i
++)
1227 target_fd
[i
] = tswap32(fd
[i
]);
1230 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1231 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1233 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1235 target_msgh
->msg_controllen
= tswapl(space
);
1239 /* do_setsockopt() Must return target values and target errnos. */
1240 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1241 abi_ulong optval_addr
, socklen_t optlen
)
1245 struct ip_mreqn
*ip_mreq
;
1246 struct ip_mreq_source
*ip_mreq_source
;
1250 /* TCP options all take an 'int' value. */
1251 if (optlen
< sizeof(uint32_t))
1252 return -TARGET_EINVAL
;
1254 if (get_user_u32(val
, optval_addr
))
1255 return -TARGET_EFAULT
;
1256 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1263 case IP_ROUTER_ALERT
:
1267 case IP_MTU_DISCOVER
:
1273 case IP_MULTICAST_TTL
:
1274 case IP_MULTICAST_LOOP
:
1276 if (optlen
>= sizeof(uint32_t)) {
1277 if (get_user_u32(val
, optval_addr
))
1278 return -TARGET_EFAULT
;
1279 } else if (optlen
>= 1) {
1280 if (get_user_u8(val
, optval_addr
))
1281 return -TARGET_EFAULT
;
1283 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1285 case IP_ADD_MEMBERSHIP
:
1286 case IP_DROP_MEMBERSHIP
:
1287 if (optlen
< sizeof (struct target_ip_mreq
) ||
1288 optlen
> sizeof (struct target_ip_mreqn
))
1289 return -TARGET_EINVAL
;
1291 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1292 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1293 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1296 case IP_BLOCK_SOURCE
:
1297 case IP_UNBLOCK_SOURCE
:
1298 case IP_ADD_SOURCE_MEMBERSHIP
:
1299 case IP_DROP_SOURCE_MEMBERSHIP
:
1300 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1301 return -TARGET_EINVAL
;
1303 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1304 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1305 unlock_user (ip_mreq_source
, optval_addr
, 0);
1312 case TARGET_SOL_SOCKET
:
1314 /* Options with 'int' argument. */
1315 case TARGET_SO_DEBUG
:
1318 case TARGET_SO_REUSEADDR
:
1319 optname
= SO_REUSEADDR
;
1321 case TARGET_SO_TYPE
:
1324 case TARGET_SO_ERROR
:
1327 case TARGET_SO_DONTROUTE
:
1328 optname
= SO_DONTROUTE
;
1330 case TARGET_SO_BROADCAST
:
1331 optname
= SO_BROADCAST
;
1333 case TARGET_SO_SNDBUF
:
1334 optname
= SO_SNDBUF
;
1336 case TARGET_SO_RCVBUF
:
1337 optname
= SO_RCVBUF
;
1339 case TARGET_SO_KEEPALIVE
:
1340 optname
= SO_KEEPALIVE
;
1342 case TARGET_SO_OOBINLINE
:
1343 optname
= SO_OOBINLINE
;
1345 case TARGET_SO_NO_CHECK
:
1346 optname
= SO_NO_CHECK
;
1348 case TARGET_SO_PRIORITY
:
1349 optname
= SO_PRIORITY
;
1352 case TARGET_SO_BSDCOMPAT
:
1353 optname
= SO_BSDCOMPAT
;
1356 case TARGET_SO_PASSCRED
:
1357 optname
= SO_PASSCRED
;
1359 case TARGET_SO_TIMESTAMP
:
1360 optname
= SO_TIMESTAMP
;
1362 case TARGET_SO_RCVLOWAT
:
1363 optname
= SO_RCVLOWAT
;
1365 case TARGET_SO_RCVTIMEO
:
1366 optname
= SO_RCVTIMEO
;
1368 case TARGET_SO_SNDTIMEO
:
1369 optname
= SO_SNDTIMEO
;
1375 if (optlen
< sizeof(uint32_t))
1376 return -TARGET_EINVAL
;
1378 if (get_user_u32(val
, optval_addr
))
1379 return -TARGET_EFAULT
;
1380 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1384 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1385 ret
= -TARGET_ENOPROTOOPT
;
1390 /* do_getsockopt() Must return target values and target errnos. */
1391 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1392 abi_ulong optval_addr
, abi_ulong optlen
)
1399 case TARGET_SOL_SOCKET
:
1402 /* These don't just return a single integer */
1403 case TARGET_SO_LINGER
:
1404 case TARGET_SO_RCVTIMEO
:
1405 case TARGET_SO_SNDTIMEO
:
1406 case TARGET_SO_PEERCRED
:
1407 case TARGET_SO_PEERNAME
:
1409 /* Options with 'int' argument. */
1410 case TARGET_SO_DEBUG
:
1413 case TARGET_SO_REUSEADDR
:
1414 optname
= SO_REUSEADDR
;
1416 case TARGET_SO_TYPE
:
1419 case TARGET_SO_ERROR
:
1422 case TARGET_SO_DONTROUTE
:
1423 optname
= SO_DONTROUTE
;
1425 case TARGET_SO_BROADCAST
:
1426 optname
= SO_BROADCAST
;
1428 case TARGET_SO_SNDBUF
:
1429 optname
= SO_SNDBUF
;
1431 case TARGET_SO_RCVBUF
:
1432 optname
= SO_RCVBUF
;
1434 case TARGET_SO_KEEPALIVE
:
1435 optname
= SO_KEEPALIVE
;
1437 case TARGET_SO_OOBINLINE
:
1438 optname
= SO_OOBINLINE
;
1440 case TARGET_SO_NO_CHECK
:
1441 optname
= SO_NO_CHECK
;
1443 case TARGET_SO_PRIORITY
:
1444 optname
= SO_PRIORITY
;
1447 case TARGET_SO_BSDCOMPAT
:
1448 optname
= SO_BSDCOMPAT
;
1451 case TARGET_SO_PASSCRED
:
1452 optname
= SO_PASSCRED
;
1454 case TARGET_SO_TIMESTAMP
:
1455 optname
= SO_TIMESTAMP
;
1457 case TARGET_SO_RCVLOWAT
:
1458 optname
= SO_RCVLOWAT
;
1465 /* TCP options all take an 'int' value. */
1467 if (get_user_u32(len
, optlen
))
1468 return -TARGET_EFAULT
;
1470 return -TARGET_EINVAL
;
1472 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1478 if (put_user_u32(val
, optval_addr
))
1479 return -TARGET_EFAULT
;
1481 if (put_user_u8(val
, optval_addr
))
1482 return -TARGET_EFAULT
;
1484 if (put_user_u32(len
, optlen
))
1485 return -TARGET_EFAULT
;
1492 case IP_ROUTER_ALERT
:
1496 case IP_MTU_DISCOVER
:
1502 case IP_MULTICAST_TTL
:
1503 case IP_MULTICAST_LOOP
:
1504 if (get_user_u32(len
, optlen
))
1505 return -TARGET_EFAULT
;
1507 return -TARGET_EINVAL
;
1509 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1512 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1514 if (put_user_u32(len
, optlen
)
1515 || put_user_u8(val
, optval_addr
))
1516 return -TARGET_EFAULT
;
1518 if (len
> sizeof(int))
1520 if (put_user_u32(len
, optlen
)
1521 || put_user_u32(val
, optval_addr
))
1522 return -TARGET_EFAULT
;
1526 ret
= -TARGET_ENOPROTOOPT
;
1532 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1534 ret
= -TARGET_EOPNOTSUPP
;
1541 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1542 * other lock functions have a return code of 0 for failure.
1544 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1545 int count
, int copy
)
1547 struct target_iovec
*target_vec
;
1551 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1553 return -TARGET_EFAULT
;
1554 for(i
= 0;i
< count
; i
++) {
1555 base
= tswapl(target_vec
[i
].iov_base
);
1556 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1557 if (vec
[i
].iov_len
!= 0) {
1558 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1559 /* Don't check lock_user return value. We must call writev even
1560 if a element has invalid base address. */
1562 /* zero length pointer is ignored */
1563 vec
[i
].iov_base
= NULL
;
1566 unlock_user (target_vec
, target_addr
, 0);
1570 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1571 int count
, int copy
)
1573 struct target_iovec
*target_vec
;
1577 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1579 return -TARGET_EFAULT
;
1580 for(i
= 0;i
< count
; i
++) {
1581 if (target_vec
[i
].iov_base
) {
1582 base
= tswapl(target_vec
[i
].iov_base
);
1583 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1586 unlock_user (target_vec
, target_addr
, 0);
1591 /* do_socket() Must return target values and target errnos. */
1592 static abi_long
do_socket(int domain
, int type
, int protocol
)
1594 #if defined(TARGET_MIPS)
1596 case TARGET_SOCK_DGRAM
:
1599 case TARGET_SOCK_STREAM
:
1602 case TARGET_SOCK_RAW
:
1605 case TARGET_SOCK_RDM
:
1608 case TARGET_SOCK_SEQPACKET
:
1609 type
= SOCK_SEQPACKET
;
1611 case TARGET_SOCK_PACKET
:
1616 if (domain
== PF_NETLINK
)
1617 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1618 return get_errno(socket(domain
, type
, protocol
));
1621 /* do_bind() Must return target values and target errnos. */
1622 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1628 if ((int)addrlen
< 0) {
1629 return -TARGET_EINVAL
;
1632 addr
= alloca(addrlen
+1);
1634 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1638 return get_errno(bind(sockfd
, addr
, addrlen
));
1641 /* do_connect() Must return target values and target errnos. */
1642 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1648 if ((int)addrlen
< 0) {
1649 return -TARGET_EINVAL
;
1652 addr
= alloca(addrlen
);
1654 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1658 return get_errno(connect(sockfd
, addr
, addrlen
));
1661 /* do_sendrecvmsg() Must return target values and target errnos. */
1662 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1663 int flags
, int send
)
1666 struct target_msghdr
*msgp
;
1670 abi_ulong target_vec
;
1673 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1677 return -TARGET_EFAULT
;
1678 if (msgp
->msg_name
) {
1679 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1680 msg
.msg_name
= alloca(msg
.msg_namelen
);
1681 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1684 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1688 msg
.msg_name
= NULL
;
1689 msg
.msg_namelen
= 0;
1691 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1692 msg
.msg_control
= alloca(msg
.msg_controllen
);
1693 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1695 count
= tswapl(msgp
->msg_iovlen
);
1696 vec
= alloca(count
* sizeof(struct iovec
));
1697 target_vec
= tswapl(msgp
->msg_iov
);
1698 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1699 msg
.msg_iovlen
= count
;
1703 ret
= target_to_host_cmsg(&msg
, msgp
);
1705 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1707 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1708 if (!is_error(ret
)) {
1710 ret
= host_to_target_cmsg(msgp
, &msg
);
1715 unlock_iovec(vec
, target_vec
, count
, !send
);
1716 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1720 /* do_accept() Must return target values and target errnos. */
1721 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1722 abi_ulong target_addrlen_addr
)
1728 if (target_addr
== 0)
1729 return get_errno(accept(fd
, NULL
, NULL
));
1731 /* linux returns EINVAL if addrlen pointer is invalid */
1732 if (get_user_u32(addrlen
, target_addrlen_addr
))
1733 return -TARGET_EINVAL
;
1735 if ((int)addrlen
< 0) {
1736 return -TARGET_EINVAL
;
1739 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1740 return -TARGET_EINVAL
;
1742 addr
= alloca(addrlen
);
1744 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1745 if (!is_error(ret
)) {
1746 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1747 if (put_user_u32(addrlen
, target_addrlen_addr
))
1748 ret
= -TARGET_EFAULT
;
1753 /* do_getpeername() Must return target values and target errnos. */
1754 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1755 abi_ulong target_addrlen_addr
)
1761 if (get_user_u32(addrlen
, target_addrlen_addr
))
1762 return -TARGET_EFAULT
;
1764 if ((int)addrlen
< 0) {
1765 return -TARGET_EINVAL
;
1768 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1769 return -TARGET_EFAULT
;
1771 addr
= alloca(addrlen
);
1773 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1774 if (!is_error(ret
)) {
1775 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1776 if (put_user_u32(addrlen
, target_addrlen_addr
))
1777 ret
= -TARGET_EFAULT
;
1782 /* do_getsockname() Must return target values and target errnos. */
1783 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1784 abi_ulong target_addrlen_addr
)
1790 if (get_user_u32(addrlen
, target_addrlen_addr
))
1791 return -TARGET_EFAULT
;
1793 if ((int)addrlen
< 0) {
1794 return -TARGET_EINVAL
;
1797 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1798 return -TARGET_EFAULT
;
1800 addr
= alloca(addrlen
);
1802 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1803 if (!is_error(ret
)) {
1804 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1805 if (put_user_u32(addrlen
, target_addrlen_addr
))
1806 ret
= -TARGET_EFAULT
;
1811 /* do_socketpair() Must return target values and target errnos. */
1812 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1813 abi_ulong target_tab_addr
)
1818 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1819 if (!is_error(ret
)) {
1820 if (put_user_s32(tab
[0], target_tab_addr
)
1821 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1822 ret
= -TARGET_EFAULT
;
1827 /* do_sendto() Must return target values and target errnos. */
1828 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1829 abi_ulong target_addr
, socklen_t addrlen
)
1835 if ((int)addrlen
< 0) {
1836 return -TARGET_EINVAL
;
1839 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1841 return -TARGET_EFAULT
;
1843 addr
= alloca(addrlen
);
1844 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1846 unlock_user(host_msg
, msg
, 0);
1849 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1851 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1853 unlock_user(host_msg
, msg
, 0);
1857 /* do_recvfrom() Must return target values and target errnos. */
1858 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1859 abi_ulong target_addr
,
1860 abi_ulong target_addrlen
)
1867 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1869 return -TARGET_EFAULT
;
1871 if (get_user_u32(addrlen
, target_addrlen
)) {
1872 ret
= -TARGET_EFAULT
;
1875 if ((int)addrlen
< 0) {
1876 ret
= -TARGET_EINVAL
;
1879 addr
= alloca(addrlen
);
1880 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1882 addr
= NULL
; /* To keep compiler quiet. */
1883 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1885 if (!is_error(ret
)) {
1887 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1888 if (put_user_u32(addrlen
, target_addrlen
)) {
1889 ret
= -TARGET_EFAULT
;
1893 unlock_user(host_msg
, msg
, len
);
1896 unlock_user(host_msg
, msg
, 0);
1901 #ifdef TARGET_NR_socketcall
1902 /* do_socketcall() Must return target values and target errnos. */
1903 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1906 const int n
= sizeof(abi_ulong
);
1911 abi_ulong domain
, type
, protocol
;
1913 if (get_user_ual(domain
, vptr
)
1914 || get_user_ual(type
, vptr
+ n
)
1915 || get_user_ual(protocol
, vptr
+ 2 * n
))
1916 return -TARGET_EFAULT
;
1918 ret
= do_socket(domain
, type
, protocol
);
1924 abi_ulong target_addr
;
1927 if (get_user_ual(sockfd
, vptr
)
1928 || get_user_ual(target_addr
, vptr
+ n
)
1929 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1930 return -TARGET_EFAULT
;
1932 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1935 case SOCKOP_connect
:
1938 abi_ulong target_addr
;
1941 if (get_user_ual(sockfd
, vptr
)
1942 || get_user_ual(target_addr
, vptr
+ n
)
1943 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1944 return -TARGET_EFAULT
;
1946 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1951 abi_ulong sockfd
, backlog
;
1953 if (get_user_ual(sockfd
, vptr
)
1954 || get_user_ual(backlog
, vptr
+ n
))
1955 return -TARGET_EFAULT
;
1957 ret
= get_errno(listen(sockfd
, backlog
));
1963 abi_ulong target_addr
, target_addrlen
;
1965 if (get_user_ual(sockfd
, vptr
)
1966 || get_user_ual(target_addr
, vptr
+ n
)
1967 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1968 return -TARGET_EFAULT
;
1970 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1973 case SOCKOP_getsockname
:
1976 abi_ulong target_addr
, target_addrlen
;
1978 if (get_user_ual(sockfd
, vptr
)
1979 || get_user_ual(target_addr
, vptr
+ n
)
1980 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1981 return -TARGET_EFAULT
;
1983 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1986 case SOCKOP_getpeername
:
1989 abi_ulong target_addr
, target_addrlen
;
1991 if (get_user_ual(sockfd
, vptr
)
1992 || get_user_ual(target_addr
, vptr
+ n
)
1993 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1994 return -TARGET_EFAULT
;
1996 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1999 case SOCKOP_socketpair
:
2001 abi_ulong domain
, type
, protocol
;
2004 if (get_user_ual(domain
, vptr
)
2005 || get_user_ual(type
, vptr
+ n
)
2006 || get_user_ual(protocol
, vptr
+ 2 * n
)
2007 || get_user_ual(tab
, vptr
+ 3 * n
))
2008 return -TARGET_EFAULT
;
2010 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2020 if (get_user_ual(sockfd
, vptr
)
2021 || get_user_ual(msg
, vptr
+ n
)
2022 || get_user_ual(len
, vptr
+ 2 * n
)
2023 || get_user_ual(flags
, vptr
+ 3 * n
))
2024 return -TARGET_EFAULT
;
2026 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2036 if (get_user_ual(sockfd
, vptr
)
2037 || get_user_ual(msg
, vptr
+ n
)
2038 || get_user_ual(len
, vptr
+ 2 * n
)
2039 || get_user_ual(flags
, vptr
+ 3 * n
))
2040 return -TARGET_EFAULT
;
2042 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2054 if (get_user_ual(sockfd
, vptr
)
2055 || get_user_ual(msg
, vptr
+ n
)
2056 || get_user_ual(len
, vptr
+ 2 * n
)
2057 || get_user_ual(flags
, vptr
+ 3 * n
)
2058 || get_user_ual(addr
, vptr
+ 4 * n
)
2059 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2060 return -TARGET_EFAULT
;
2062 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2065 case SOCKOP_recvfrom
:
2074 if (get_user_ual(sockfd
, vptr
)
2075 || get_user_ual(msg
, vptr
+ n
)
2076 || get_user_ual(len
, vptr
+ 2 * n
)
2077 || get_user_ual(flags
, vptr
+ 3 * n
)
2078 || get_user_ual(addr
, vptr
+ 4 * n
)
2079 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2080 return -TARGET_EFAULT
;
2082 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2085 case SOCKOP_shutdown
:
2087 abi_ulong sockfd
, how
;
2089 if (get_user_ual(sockfd
, vptr
)
2090 || get_user_ual(how
, vptr
+ n
))
2091 return -TARGET_EFAULT
;
2093 ret
= get_errno(shutdown(sockfd
, how
));
2096 case SOCKOP_sendmsg
:
2097 case SOCKOP_recvmsg
:
2100 abi_ulong target_msg
;
2103 if (get_user_ual(fd
, vptr
)
2104 || get_user_ual(target_msg
, vptr
+ n
)
2105 || get_user_ual(flags
, vptr
+ 2 * n
))
2106 return -TARGET_EFAULT
;
2108 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2109 (num
== SOCKOP_sendmsg
));
2112 case SOCKOP_setsockopt
:
2120 if (get_user_ual(sockfd
, vptr
)
2121 || get_user_ual(level
, vptr
+ n
)
2122 || get_user_ual(optname
, vptr
+ 2 * n
)
2123 || get_user_ual(optval
, vptr
+ 3 * n
)
2124 || get_user_ual(optlen
, vptr
+ 4 * n
))
2125 return -TARGET_EFAULT
;
2127 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2130 case SOCKOP_getsockopt
:
2138 if (get_user_ual(sockfd
, vptr
)
2139 || get_user_ual(level
, vptr
+ n
)
2140 || get_user_ual(optname
, vptr
+ 2 * n
)
2141 || get_user_ual(optval
, vptr
+ 3 * n
)
2142 || get_user_ual(optlen
, vptr
+ 4 * n
))
2143 return -TARGET_EFAULT
;
2145 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2149 gemu_log("Unsupported socketcall: %d\n", num
);
2150 ret
= -TARGET_ENOSYS
;
2157 #define N_SHM_REGIONS 32
2159 static struct shm_region
{
2162 } shm_regions
[N_SHM_REGIONS
];
2164 struct target_ipc_perm
2171 unsigned short int mode
;
2172 unsigned short int __pad1
;
2173 unsigned short int __seq
;
2174 unsigned short int __pad2
;
2175 abi_ulong __unused1
;
2176 abi_ulong __unused2
;
2179 struct target_semid_ds
2181 struct target_ipc_perm sem_perm
;
2182 abi_ulong sem_otime
;
2183 abi_ulong __unused1
;
2184 abi_ulong sem_ctime
;
2185 abi_ulong __unused2
;
2186 abi_ulong sem_nsems
;
2187 abi_ulong __unused3
;
2188 abi_ulong __unused4
;
2191 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2192 abi_ulong target_addr
)
2194 struct target_ipc_perm
*target_ip
;
2195 struct target_semid_ds
*target_sd
;
2197 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2198 return -TARGET_EFAULT
;
2199 target_ip
= &(target_sd
->sem_perm
);
2200 host_ip
->__key
= tswapl(target_ip
->__key
);
2201 host_ip
->uid
= tswapl(target_ip
->uid
);
2202 host_ip
->gid
= tswapl(target_ip
->gid
);
2203 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2204 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2205 host_ip
->mode
= tswapl(target_ip
->mode
);
2206 unlock_user_struct(target_sd
, target_addr
, 0);
2210 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2211 struct ipc_perm
*host_ip
)
2213 struct target_ipc_perm
*target_ip
;
2214 struct target_semid_ds
*target_sd
;
2216 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2217 return -TARGET_EFAULT
;
2218 target_ip
= &(target_sd
->sem_perm
);
2219 target_ip
->__key
= tswapl(host_ip
->__key
);
2220 target_ip
->uid
= tswapl(host_ip
->uid
);
2221 target_ip
->gid
= tswapl(host_ip
->gid
);
2222 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2223 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2224 target_ip
->mode
= tswapl(host_ip
->mode
);
2225 unlock_user_struct(target_sd
, target_addr
, 1);
2229 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2230 abi_ulong target_addr
)
2232 struct target_semid_ds
*target_sd
;
2234 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2235 return -TARGET_EFAULT
;
2236 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2237 return -TARGET_EFAULT
;
2238 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2239 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2240 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2241 unlock_user_struct(target_sd
, target_addr
, 0);
2245 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2246 struct semid_ds
*host_sd
)
2248 struct target_semid_ds
*target_sd
;
2250 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2251 return -TARGET_EFAULT
;
2252 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2253 return -TARGET_EFAULT
;;
2254 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2255 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2256 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2257 unlock_user_struct(target_sd
, target_addr
, 1);
2261 struct target_seminfo
{
2274 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2275 struct seminfo
*host_seminfo
)
2277 struct target_seminfo
*target_seminfo
;
2278 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2279 return -TARGET_EFAULT
;
2280 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2281 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2282 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2283 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2284 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2285 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2286 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2287 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2288 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2289 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2290 unlock_user_struct(target_seminfo
, target_addr
, 1);
2296 struct semid_ds
*buf
;
2297 unsigned short *array
;
2298 struct seminfo
*__buf
;
2301 union target_semun
{
2308 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2309 abi_ulong target_addr
)
2312 unsigned short *array
;
2314 struct semid_ds semid_ds
;
2317 semun
.buf
= &semid_ds
;
2319 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2321 return get_errno(ret
);
2323 nsems
= semid_ds
.sem_nsems
;
2325 *host_array
= malloc(nsems
*sizeof(unsigned short));
2326 array
= lock_user(VERIFY_READ
, target_addr
,
2327 nsems
*sizeof(unsigned short), 1);
2329 return -TARGET_EFAULT
;
2331 for(i
=0; i
<nsems
; i
++) {
2332 __get_user((*host_array
)[i
], &array
[i
]);
2334 unlock_user(array
, target_addr
, 0);
2339 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2340 unsigned short **host_array
)
2343 unsigned short *array
;
2345 struct semid_ds semid_ds
;
2348 semun
.buf
= &semid_ds
;
2350 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2352 return get_errno(ret
);
2354 nsems
= semid_ds
.sem_nsems
;
2356 array
= lock_user(VERIFY_WRITE
, target_addr
,
2357 nsems
*sizeof(unsigned short), 0);
2359 return -TARGET_EFAULT
;
2361 for(i
=0; i
<nsems
; i
++) {
2362 __put_user((*host_array
)[i
], &array
[i
]);
2365 unlock_user(array
, target_addr
, 1);
2370 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2371 union target_semun target_su
)
2374 struct semid_ds dsarg
;
2375 unsigned short *array
= NULL
;
2376 struct seminfo seminfo
;
2377 abi_long ret
= -TARGET_EINVAL
;
2384 arg
.val
= tswapl(target_su
.val
);
2385 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2386 target_su
.val
= tswapl(arg
.val
);
2390 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2394 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2395 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2402 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2406 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2407 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2413 arg
.__buf
= &seminfo
;
2414 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2415 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2423 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2430 struct target_sembuf
{
2431 unsigned short sem_num
;
2436 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2437 abi_ulong target_addr
,
2440 struct target_sembuf
*target_sembuf
;
2443 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2444 nsops
*sizeof(struct target_sembuf
), 1);
2446 return -TARGET_EFAULT
;
2448 for(i
=0; i
<nsops
; i
++) {
2449 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2450 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2451 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2454 unlock_user(target_sembuf
, target_addr
, 0);
2459 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2461 struct sembuf sops
[nsops
];
2463 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2464 return -TARGET_EFAULT
;
2466 return semop(semid
, sops
, nsops
);
2469 struct target_msqid_ds
2471 struct target_ipc_perm msg_perm
;
2472 abi_ulong msg_stime
;
2473 #if TARGET_ABI_BITS == 32
2474 abi_ulong __unused1
;
2476 abi_ulong msg_rtime
;
2477 #if TARGET_ABI_BITS == 32
2478 abi_ulong __unused2
;
2480 abi_ulong msg_ctime
;
2481 #if TARGET_ABI_BITS == 32
2482 abi_ulong __unused3
;
2484 abi_ulong __msg_cbytes
;
2486 abi_ulong msg_qbytes
;
2487 abi_ulong msg_lspid
;
2488 abi_ulong msg_lrpid
;
2489 abi_ulong __unused4
;
2490 abi_ulong __unused5
;
2493 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2494 abi_ulong target_addr
)
2496 struct target_msqid_ds
*target_md
;
2498 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2499 return -TARGET_EFAULT
;
2500 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2501 return -TARGET_EFAULT
;
2502 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2503 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2504 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2505 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2506 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2507 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2508 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2509 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2510 unlock_user_struct(target_md
, target_addr
, 0);
2514 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2515 struct msqid_ds
*host_md
)
2517 struct target_msqid_ds
*target_md
;
2519 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2520 return -TARGET_EFAULT
;
2521 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2522 return -TARGET_EFAULT
;
2523 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2524 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2525 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2526 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2527 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2528 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2529 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2530 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2531 unlock_user_struct(target_md
, target_addr
, 1);
2535 struct target_msginfo
{
2543 unsigned short int msgseg
;
2546 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2547 struct msginfo
*host_msginfo
)
2549 struct target_msginfo
*target_msginfo
;
2550 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2551 return -TARGET_EFAULT
;
2552 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2553 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2554 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2555 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2556 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2557 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2558 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2559 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2560 unlock_user_struct(target_msginfo
, target_addr
, 1);
2564 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2566 struct msqid_ds dsarg
;
2567 struct msginfo msginfo
;
2568 abi_long ret
= -TARGET_EINVAL
;
2576 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2577 return -TARGET_EFAULT
;
2578 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2579 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2580 return -TARGET_EFAULT
;
2583 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2587 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2588 if (host_to_target_msginfo(ptr
, &msginfo
))
2589 return -TARGET_EFAULT
;
2596 struct target_msgbuf
{
2601 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2602 unsigned int msgsz
, int msgflg
)
2604 struct target_msgbuf
*target_mb
;
2605 struct msgbuf
*host_mb
;
2608 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2609 return -TARGET_EFAULT
;
2610 host_mb
= malloc(msgsz
+sizeof(long));
2611 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2612 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2613 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2615 unlock_user_struct(target_mb
, msgp
, 0);
2620 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2621 unsigned int msgsz
, abi_long msgtyp
,
2624 struct target_msgbuf
*target_mb
;
2626 struct msgbuf
*host_mb
;
2629 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2630 return -TARGET_EFAULT
;
2632 host_mb
= malloc(msgsz
+sizeof(long));
2633 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2636 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2637 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2638 if (!target_mtext
) {
2639 ret
= -TARGET_EFAULT
;
2642 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2643 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2646 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2651 unlock_user_struct(target_mb
, msgp
, 1);
2655 struct target_shmid_ds
2657 struct target_ipc_perm shm_perm
;
2658 abi_ulong shm_segsz
;
2659 abi_ulong shm_atime
;
2660 #if TARGET_ABI_BITS == 32
2661 abi_ulong __unused1
;
2663 abi_ulong shm_dtime
;
2664 #if TARGET_ABI_BITS == 32
2665 abi_ulong __unused2
;
2667 abi_ulong shm_ctime
;
2668 #if TARGET_ABI_BITS == 32
2669 abi_ulong __unused3
;
2673 abi_ulong shm_nattch
;
2674 unsigned long int __unused4
;
2675 unsigned long int __unused5
;
2678 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2679 abi_ulong target_addr
)
2681 struct target_shmid_ds
*target_sd
;
2683 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2684 return -TARGET_EFAULT
;
2685 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2686 return -TARGET_EFAULT
;
2687 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2688 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2689 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2690 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2691 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2692 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2693 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2694 unlock_user_struct(target_sd
, target_addr
, 0);
2698 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2699 struct shmid_ds
*host_sd
)
2701 struct target_shmid_ds
*target_sd
;
2703 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2704 return -TARGET_EFAULT
;
2705 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2706 return -TARGET_EFAULT
;
2707 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2708 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2709 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2710 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2711 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2712 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2713 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2714 unlock_user_struct(target_sd
, target_addr
, 1);
2718 struct target_shminfo
{
2726 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2727 struct shminfo
*host_shminfo
)
2729 struct target_shminfo
*target_shminfo
;
2730 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2731 return -TARGET_EFAULT
;
2732 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2733 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2734 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2735 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2736 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2737 unlock_user_struct(target_shminfo
, target_addr
, 1);
2741 struct target_shm_info
{
2746 abi_ulong swap_attempts
;
2747 abi_ulong swap_successes
;
2750 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2751 struct shm_info
*host_shm_info
)
2753 struct target_shm_info
*target_shm_info
;
2754 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2755 return -TARGET_EFAULT
;
2756 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2757 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2758 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2759 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2760 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2761 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2762 unlock_user_struct(target_shm_info
, target_addr
, 1);
2766 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2768 struct shmid_ds dsarg
;
2769 struct shminfo shminfo
;
2770 struct shm_info shm_info
;
2771 abi_long ret
= -TARGET_EINVAL
;
2779 if (target_to_host_shmid_ds(&dsarg
, buf
))
2780 return -TARGET_EFAULT
;
2781 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2782 if (host_to_target_shmid_ds(buf
, &dsarg
))
2783 return -TARGET_EFAULT
;
2786 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2787 if (host_to_target_shminfo(buf
, &shminfo
))
2788 return -TARGET_EFAULT
;
2791 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2792 if (host_to_target_shm_info(buf
, &shm_info
))
2793 return -TARGET_EFAULT
;
2798 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2805 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2809 struct shmid_ds shm_info
;
2812 /* find out the length of the shared memory segment */
2813 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2814 if (is_error(ret
)) {
2815 /* can't get length, bail out */
2822 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2824 abi_ulong mmap_start
;
2826 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2828 if (mmap_start
== -1) {
2830 host_raddr
= (void *)-1;
2832 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2835 if (host_raddr
== (void *)-1) {
2837 return get_errno((long)host_raddr
);
2839 raddr
=h2g((unsigned long)host_raddr
);
2841 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2842 PAGE_VALID
| PAGE_READ
|
2843 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2845 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2846 if (shm_regions
[i
].start
== 0) {
2847 shm_regions
[i
].start
= raddr
;
2848 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2858 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2862 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2863 if (shm_regions
[i
].start
== shmaddr
) {
2864 shm_regions
[i
].start
= 0;
2865 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2870 return get_errno(shmdt(g2h(shmaddr
)));
2873 #ifdef TARGET_NR_ipc
2874 /* ??? This only works with linear mappings. */
2875 /* do_ipc() must return target values and target errnos. */
2876 static abi_long
do_ipc(unsigned int call
, int first
,
2877 int second
, int third
,
2878 abi_long ptr
, abi_long fifth
)
2883 version
= call
>> 16;
2888 ret
= do_semop(first
, ptr
, second
);
2892 ret
= get_errno(semget(first
, second
, third
));
2896 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2900 ret
= get_errno(msgget(first
, second
));
2904 ret
= do_msgsnd(first
, ptr
, second
, third
);
2908 ret
= do_msgctl(first
, second
, ptr
);
2915 struct target_ipc_kludge
{
2920 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2921 ret
= -TARGET_EFAULT
;
2925 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2927 unlock_user_struct(tmp
, ptr
, 0);
2931 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2940 raddr
= do_shmat(first
, ptr
, second
);
2941 if (is_error(raddr
))
2942 return get_errno(raddr
);
2943 if (put_user_ual(raddr
, third
))
2944 return -TARGET_EFAULT
;
2948 ret
= -TARGET_EINVAL
;
2953 ret
= do_shmdt(ptr
);
2957 /* IPC_* flag values are the same on all linux platforms */
2958 ret
= get_errno(shmget(first
, second
, third
));
2961 /* IPC_* and SHM_* command values are the same on all linux platforms */
2963 ret
= do_shmctl(first
, second
, third
);
2966 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2967 ret
= -TARGET_ENOSYS
;
2974 /* kernel structure types definitions */
2976 #define STRUCT(name, ...) STRUCT_ ## name,
2977 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2979 #include "syscall_types.h"
2982 #undef STRUCT_SPECIAL
2984 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2985 #define STRUCT_SPECIAL(name)
2986 #include "syscall_types.h"
2988 #undef STRUCT_SPECIAL
2990 typedef struct IOCTLEntry IOCTLEntry
;
2992 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
2993 int fd
, abi_long cmd
, abi_long arg
);
2996 unsigned int target_cmd
;
2997 unsigned int host_cmd
;
3000 do_ioctl_fn
*do_ioctl
;
3001 const argtype arg_type
[5];
3004 #define IOC_R 0x0001
3005 #define IOC_W 0x0002
3006 #define IOC_RW (IOC_R | IOC_W)
3008 #define MAX_STRUCT_SIZE 4096
3010 #ifdef CONFIG_FIEMAP
3011 /* So fiemap access checks don't overflow on 32 bit systems.
3012 * This is very slightly smaller than the limit imposed by
3013 * the underlying kernel.
3015 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3016 / sizeof(struct fiemap_extent))
3018 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3019 int fd
, abi_long cmd
, abi_long arg
)
3021 /* The parameter for this ioctl is a struct fiemap followed
3022 * by an array of struct fiemap_extent whose size is set
3023 * in fiemap->fm_extent_count. The array is filled in by the
3026 int target_size_in
, target_size_out
;
3028 const argtype
*arg_type
= ie
->arg_type
;
3029 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3032 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3036 assert(arg_type
[0] == TYPE_PTR
);
3037 assert(ie
->access
== IOC_RW
);
3039 target_size_in
= thunk_type_size(arg_type
, 0);
3040 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3042 return -TARGET_EFAULT
;
3044 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3045 unlock_user(argptr
, arg
, 0);
3046 fm
= (struct fiemap
*)buf_temp
;
3047 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3048 return -TARGET_EINVAL
;
3051 outbufsz
= sizeof (*fm
) +
3052 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3054 if (outbufsz
> MAX_STRUCT_SIZE
) {
3055 /* We can't fit all the extents into the fixed size buffer.
3056 * Allocate one that is large enough and use it instead.
3058 fm
= malloc(outbufsz
);
3060 return -TARGET_ENOMEM
;
3062 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3065 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3066 if (!is_error(ret
)) {
3067 target_size_out
= target_size_in
;
3068 /* An extent_count of 0 means we were only counting the extents
3069 * so there are no structs to copy
3071 if (fm
->fm_extent_count
!= 0) {
3072 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3074 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3076 ret
= -TARGET_EFAULT
;
3078 /* Convert the struct fiemap */
3079 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3080 if (fm
->fm_extent_count
!= 0) {
3081 p
= argptr
+ target_size_in
;
3082 /* ...and then all the struct fiemap_extents */
3083 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3084 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3089 unlock_user(argptr
, arg
, target_size_out
);
3099 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3100 int fd
, abi_long cmd
, abi_long arg
)
3102 const argtype
*arg_type
= ie
->arg_type
;
3106 struct ifconf
*host_ifconf
;
3108 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3109 int target_ifreq_size
;
3114 abi_long target_ifc_buf
;
3118 assert(arg_type
[0] == TYPE_PTR
);
3119 assert(ie
->access
== IOC_RW
);
3122 target_size
= thunk_type_size(arg_type
, 0);
3124 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3126 return -TARGET_EFAULT
;
3127 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3128 unlock_user(argptr
, arg
, 0);
3130 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3131 target_ifc_len
= host_ifconf
->ifc_len
;
3132 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3134 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3135 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3136 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3138 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3139 if (outbufsz
> MAX_STRUCT_SIZE
) {
3140 /* We can't fit all the extents into the fixed size buffer.
3141 * Allocate one that is large enough and use it instead.
3143 host_ifconf
= malloc(outbufsz
);
3145 return -TARGET_ENOMEM
;
3147 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3150 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3152 host_ifconf
->ifc_len
= host_ifc_len
;
3153 host_ifconf
->ifc_buf
= host_ifc_buf
;
3155 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3156 if (!is_error(ret
)) {
3157 /* convert host ifc_len to target ifc_len */
3159 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3160 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3161 host_ifconf
->ifc_len
= target_ifc_len
;
3163 /* restore target ifc_buf */
3165 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3167 /* copy struct ifconf to target user */
3169 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3171 return -TARGET_EFAULT
;
3172 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3173 unlock_user(argptr
, arg
, target_size
);
3175 /* copy ifreq[] to target user */
3177 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3178 for (i
= 0; i
< nb_ifreq
; i
++) {
3179 thunk_convert(argptr
+ i
* target_ifreq_size
,
3180 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3181 ifreq_arg_type
, THUNK_TARGET
);
3183 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3193 static IOCTLEntry ioctl_entries
[] = {
3194 #define IOCTL(cmd, access, ...) \
3195 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3196 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3197 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3202 /* ??? Implement proper locking for ioctls. */
3203 /* do_ioctl() Must return target values and target errnos. */
3204 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3206 const IOCTLEntry
*ie
;
3207 const argtype
*arg_type
;
3209 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3215 if (ie
->target_cmd
== 0) {
3216 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3217 return -TARGET_ENOSYS
;
3219 if (ie
->target_cmd
== cmd
)
3223 arg_type
= ie
->arg_type
;
3225 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3228 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3231 switch(arg_type
[0]) {
3234 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3239 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3243 target_size
= thunk_type_size(arg_type
, 0);
3244 switch(ie
->access
) {
3246 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3247 if (!is_error(ret
)) {
3248 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3250 return -TARGET_EFAULT
;
3251 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3252 unlock_user(argptr
, arg
, target_size
);
3256 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3258 return -TARGET_EFAULT
;
3259 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3260 unlock_user(argptr
, arg
, 0);
3261 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3265 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3267 return -TARGET_EFAULT
;
3268 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3269 unlock_user(argptr
, arg
, 0);
3270 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3271 if (!is_error(ret
)) {
3272 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3274 return -TARGET_EFAULT
;
3275 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3276 unlock_user(argptr
, arg
, target_size
);
3282 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3283 (long)cmd
, arg_type
[0]);
3284 ret
= -TARGET_ENOSYS
;
3290 static const bitmask_transtbl iflag_tbl
[] = {
3291 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3292 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3293 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3294 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3295 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3296 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3297 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3298 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3299 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3300 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3301 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3302 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3303 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3304 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3308 static const bitmask_transtbl oflag_tbl
[] = {
3309 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3310 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3311 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3312 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3313 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3314 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3315 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3316 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3317 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3318 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3319 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3320 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3321 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3322 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3323 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3324 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3325 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3326 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3327 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3328 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3329 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3330 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3331 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3332 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3336 static const bitmask_transtbl cflag_tbl
[] = {
3337 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3338 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3339 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3340 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3341 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3342 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3343 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3344 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3345 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3346 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3347 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3348 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3349 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3350 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3351 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3352 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3353 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3354 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3355 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3356 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3357 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3358 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3359 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3360 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3361 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3362 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3363 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3364 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3365 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3366 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3367 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3371 static const bitmask_transtbl lflag_tbl
[] = {
3372 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3373 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3374 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3375 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3376 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3377 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3378 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3379 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3380 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3381 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3382 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3383 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3384 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3385 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3386 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3390 static void target_to_host_termios (void *dst
, const void *src
)
3392 struct host_termios
*host
= dst
;
3393 const struct target_termios
*target
= src
;
3396 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3398 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3400 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3402 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3403 host
->c_line
= target
->c_line
;
3405 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3406 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3407 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3408 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3409 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3410 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3411 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3412 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3413 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3414 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3415 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3416 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3417 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3418 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3419 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3420 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3421 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3422 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3425 static void host_to_target_termios (void *dst
, const void *src
)
3427 struct target_termios
*target
= dst
;
3428 const struct host_termios
*host
= src
;
3431 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3433 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3435 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3437 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3438 target
->c_line
= host
->c_line
;
3440 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3441 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3442 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3443 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3444 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3445 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3446 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3447 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3448 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3449 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3450 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3451 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3452 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3453 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3454 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3455 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3456 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3457 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3460 static const StructEntry struct_termios_def
= {
3461 .convert
= { host_to_target_termios
, target_to_host_termios
},
3462 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3463 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3466 static bitmask_transtbl mmap_flags_tbl
[] = {
3467 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3468 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3469 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3470 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3471 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3472 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3473 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3474 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3478 #if defined(TARGET_I386)
3480 /* NOTE: there is really one LDT for all the threads */
3481 static uint8_t *ldt_table
;
3483 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3490 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3491 if (size
> bytecount
)
3493 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3495 return -TARGET_EFAULT
;
3496 /* ??? Should this by byteswapped? */
3497 memcpy(p
, ldt_table
, size
);
3498 unlock_user(p
, ptr
, size
);
3502 /* XXX: add locking support */
3503 static abi_long
write_ldt(CPUX86State
*env
,
3504 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3506 struct target_modify_ldt_ldt_s ldt_info
;
3507 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3508 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3509 int seg_not_present
, useable
, lm
;
3510 uint32_t *lp
, entry_1
, entry_2
;
3512 if (bytecount
!= sizeof(ldt_info
))
3513 return -TARGET_EINVAL
;
3514 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3515 return -TARGET_EFAULT
;
3516 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3517 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3518 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3519 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3520 unlock_user_struct(target_ldt_info
, ptr
, 0);
3522 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3523 return -TARGET_EINVAL
;
3524 seg_32bit
= ldt_info
.flags
& 1;
3525 contents
= (ldt_info
.flags
>> 1) & 3;
3526 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3527 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3528 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3529 useable
= (ldt_info
.flags
>> 6) & 1;
3533 lm
= (ldt_info
.flags
>> 7) & 1;
3535 if (contents
== 3) {
3537 return -TARGET_EINVAL
;
3538 if (seg_not_present
== 0)
3539 return -TARGET_EINVAL
;
3541 /* allocate the LDT */
3543 env
->ldt
.base
= target_mmap(0,
3544 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3545 PROT_READ
|PROT_WRITE
,
3546 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3547 if (env
->ldt
.base
== -1)
3548 return -TARGET_ENOMEM
;
3549 memset(g2h(env
->ldt
.base
), 0,
3550 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3551 env
->ldt
.limit
= 0xffff;
3552 ldt_table
= g2h(env
->ldt
.base
);
3555 /* NOTE: same code as Linux kernel */
3556 /* Allow LDTs to be cleared by the user. */
3557 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3560 read_exec_only
== 1 &&
3562 limit_in_pages
== 0 &&
3563 seg_not_present
== 1 &&
3571 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3572 (ldt_info
.limit
& 0x0ffff);
3573 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3574 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3575 (ldt_info
.limit
& 0xf0000) |
3576 ((read_exec_only
^ 1) << 9) |
3578 ((seg_not_present
^ 1) << 15) |
3580 (limit_in_pages
<< 23) |
3584 entry_2
|= (useable
<< 20);
3586 /* Install the new entry ... */
3588 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3589 lp
[0] = tswap32(entry_1
);
3590 lp
[1] = tswap32(entry_2
);
3594 /* specific and weird i386 syscalls */
3595 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3596 unsigned long bytecount
)
3602 ret
= read_ldt(ptr
, bytecount
);
3605 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3608 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3611 ret
= -TARGET_ENOSYS
;
3617 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3618 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3620 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3621 struct target_modify_ldt_ldt_s ldt_info
;
3622 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3623 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3624 int seg_not_present
, useable
, lm
;
3625 uint32_t *lp
, entry_1
, entry_2
;
3628 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3629 if (!target_ldt_info
)
3630 return -TARGET_EFAULT
;
3631 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3632 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3633 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3634 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3635 if (ldt_info
.entry_number
== -1) {
3636 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3637 if (gdt_table
[i
] == 0) {
3638 ldt_info
.entry_number
= i
;
3639 target_ldt_info
->entry_number
= tswap32(i
);
3644 unlock_user_struct(target_ldt_info
, ptr
, 1);
3646 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3647 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3648 return -TARGET_EINVAL
;
3649 seg_32bit
= ldt_info
.flags
& 1;
3650 contents
= (ldt_info
.flags
>> 1) & 3;
3651 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3652 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3653 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3654 useable
= (ldt_info
.flags
>> 6) & 1;
3658 lm
= (ldt_info
.flags
>> 7) & 1;
3661 if (contents
== 3) {
3662 if (seg_not_present
== 0)
3663 return -TARGET_EINVAL
;
3666 /* NOTE: same code as Linux kernel */
3667 /* Allow LDTs to be cleared by the user. */
3668 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3669 if ((contents
== 0 &&
3670 read_exec_only
== 1 &&
3672 limit_in_pages
== 0 &&
3673 seg_not_present
== 1 &&
3681 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3682 (ldt_info
.limit
& 0x0ffff);
3683 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3684 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3685 (ldt_info
.limit
& 0xf0000) |
3686 ((read_exec_only
^ 1) << 9) |
3688 ((seg_not_present
^ 1) << 15) |
3690 (limit_in_pages
<< 23) |
3695 /* Install the new entry ... */
3697 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3698 lp
[0] = tswap32(entry_1
);
3699 lp
[1] = tswap32(entry_2
);
3703 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3705 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3706 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3707 uint32_t base_addr
, limit
, flags
;
3708 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3709 int seg_not_present
, useable
, lm
;
3710 uint32_t *lp
, entry_1
, entry_2
;
3712 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3713 if (!target_ldt_info
)
3714 return -TARGET_EFAULT
;
3715 idx
= tswap32(target_ldt_info
->entry_number
);
3716 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3717 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3718 unlock_user_struct(target_ldt_info
, ptr
, 1);
3719 return -TARGET_EINVAL
;
3721 lp
= (uint32_t *)(gdt_table
+ idx
);
3722 entry_1
= tswap32(lp
[0]);
3723 entry_2
= tswap32(lp
[1]);
3725 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3726 contents
= (entry_2
>> 10) & 3;
3727 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3728 seg_32bit
= (entry_2
>> 22) & 1;
3729 limit_in_pages
= (entry_2
>> 23) & 1;
3730 useable
= (entry_2
>> 20) & 1;
3734 lm
= (entry_2
>> 21) & 1;
3736 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3737 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3738 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3739 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3740 base_addr
= (entry_1
>> 16) |
3741 (entry_2
& 0xff000000) |
3742 ((entry_2
& 0xff) << 16);
3743 target_ldt_info
->base_addr
= tswapl(base_addr
);
3744 target_ldt_info
->limit
= tswap32(limit
);
3745 target_ldt_info
->flags
= tswap32(flags
);
3746 unlock_user_struct(target_ldt_info
, ptr
, 1);
3749 #endif /* TARGET_I386 && TARGET_ABI32 */
3751 #ifndef TARGET_ABI32
3752 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3759 case TARGET_ARCH_SET_GS
:
3760 case TARGET_ARCH_SET_FS
:
3761 if (code
== TARGET_ARCH_SET_GS
)
3765 cpu_x86_load_seg(env
, idx
, 0);
3766 env
->segs
[idx
].base
= addr
;
3768 case TARGET_ARCH_GET_GS
:
3769 case TARGET_ARCH_GET_FS
:
3770 if (code
== TARGET_ARCH_GET_GS
)
3774 val
= env
->segs
[idx
].base
;
3775 if (put_user(val
, addr
, abi_ulong
))
3776 return -TARGET_EFAULT
;
3779 ret
= -TARGET_EINVAL
;
3786 #endif /* defined(TARGET_I386) */
3788 #define NEW_STACK_SIZE 0x40000
3790 #if defined(CONFIG_USE_NPTL)
3792 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3795 pthread_mutex_t mutex
;
3796 pthread_cond_t cond
;
3799 abi_ulong child_tidptr
;
3800 abi_ulong parent_tidptr
;
3804 static void *clone_func(void *arg
)
3806 new_thread_info
*info
= arg
;
3812 ts
= (TaskState
*)thread_env
->opaque
;
3813 info
->tid
= gettid();
3814 env
->host_tid
= info
->tid
;
3816 if (info
->child_tidptr
)
3817 put_user_u32(info
->tid
, info
->child_tidptr
);
3818 if (info
->parent_tidptr
)
3819 put_user_u32(info
->tid
, info
->parent_tidptr
);
3820 /* Enable signals. */
3821 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3822 /* Signal to the parent that we're ready. */
3823 pthread_mutex_lock(&info
->mutex
);
3824 pthread_cond_broadcast(&info
->cond
);
3825 pthread_mutex_unlock(&info
->mutex
);
3826 /* Wait until the parent has finshed initializing the tls state. */
3827 pthread_mutex_lock(&clone_lock
);
3828 pthread_mutex_unlock(&clone_lock
);
3835 static int clone_func(void *arg
)
3837 CPUState
*env
= arg
;
3844 /* do_fork() Must return host values and target errnos (unlike most
3845 do_*() functions). */
3846 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3847 abi_ulong parent_tidptr
, target_ulong newtls
,
3848 abi_ulong child_tidptr
)
3853 #if defined(CONFIG_USE_NPTL)
3854 unsigned int nptl_flags
;
3860 /* Emulate vfork() with fork() */
3861 if (flags
& CLONE_VFORK
)
3862 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3864 if (flags
& CLONE_VM
) {
3865 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3866 #if defined(CONFIG_USE_NPTL)
3867 new_thread_info info
;
3868 pthread_attr_t attr
;
3870 ts
= qemu_mallocz(sizeof(TaskState
));
3871 init_task_state(ts
);
3872 /* we create a new CPU instance. */
3873 new_env
= cpu_copy(env
);
3874 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3877 /* Init regs that differ from the parent. */
3878 cpu_clone_regs(new_env
, newsp
);
3879 new_env
->opaque
= ts
;
3880 ts
->bprm
= parent_ts
->bprm
;
3881 ts
->info
= parent_ts
->info
;
3882 #if defined(CONFIG_USE_NPTL)
3884 flags
&= ~CLONE_NPTL_FLAGS2
;
3886 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3887 ts
->child_tidptr
= child_tidptr
;
3890 if (nptl_flags
& CLONE_SETTLS
)
3891 cpu_set_tls (new_env
, newtls
);
3893 /* Grab a mutex so that thread setup appears atomic. */
3894 pthread_mutex_lock(&clone_lock
);
3896 memset(&info
, 0, sizeof(info
));
3897 pthread_mutex_init(&info
.mutex
, NULL
);
3898 pthread_mutex_lock(&info
.mutex
);
3899 pthread_cond_init(&info
.cond
, NULL
);
3901 if (nptl_flags
& CLONE_CHILD_SETTID
)
3902 info
.child_tidptr
= child_tidptr
;
3903 if (nptl_flags
& CLONE_PARENT_SETTID
)
3904 info
.parent_tidptr
= parent_tidptr
;
3906 ret
= pthread_attr_init(&attr
);
3907 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
3908 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
3909 /* It is not safe to deliver signals until the child has finished
3910 initializing, so temporarily block all signals. */
3911 sigfillset(&sigmask
);
3912 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3914 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3915 /* TODO: Free new CPU state if thread creation failed. */
3917 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3918 pthread_attr_destroy(&attr
);
3920 /* Wait for the child to initialize. */
3921 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3923 if (flags
& CLONE_PARENT_SETTID
)
3924 put_user_u32(ret
, parent_tidptr
);
3928 pthread_mutex_unlock(&info
.mutex
);
3929 pthread_cond_destroy(&info
.cond
);
3930 pthread_mutex_destroy(&info
.mutex
);
3931 pthread_mutex_unlock(&clone_lock
);
3933 if (flags
& CLONE_NPTL_FLAGS2
)
3935 /* This is probably going to die very quickly, but do it anyway. */
3936 new_stack
= qemu_mallocz (NEW_STACK_SIZE
);
3938 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
3940 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3944 /* if no CLONE_VM, we consider it is a fork */
3945 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3950 /* Child Process. */
3951 cpu_clone_regs(env
, newsp
);
3953 #if defined(CONFIG_USE_NPTL)
3954 /* There is a race condition here. The parent process could
3955 theoretically read the TID in the child process before the child
3956 tid is set. This would require using either ptrace
3957 (not implemented) or having *_tidptr to point at a shared memory
3958 mapping. We can't repeat the spinlock hack used above because
3959 the child process gets its own copy of the lock. */
3960 if (flags
& CLONE_CHILD_SETTID
)
3961 put_user_u32(gettid(), child_tidptr
);
3962 if (flags
& CLONE_PARENT_SETTID
)
3963 put_user_u32(gettid(), parent_tidptr
);
3964 ts
= (TaskState
*)env
->opaque
;
3965 if (flags
& CLONE_SETTLS
)
3966 cpu_set_tls (env
, newtls
);
3967 if (flags
& CLONE_CHILD_CLEARTID
)
3968 ts
->child_tidptr
= child_tidptr
;
3977 /* warning : doesn't handle linux specific flags... */
3978 static int target_to_host_fcntl_cmd(int cmd
)
3981 case TARGET_F_DUPFD
:
3982 case TARGET_F_GETFD
:
3983 case TARGET_F_SETFD
:
3984 case TARGET_F_GETFL
:
3985 case TARGET_F_SETFL
:
3987 case TARGET_F_GETLK
:
3989 case TARGET_F_SETLK
:
3991 case TARGET_F_SETLKW
:
3993 case TARGET_F_GETOWN
:
3995 case TARGET_F_SETOWN
:
3997 case TARGET_F_GETSIG
:
3999 case TARGET_F_SETSIG
:
4001 #if TARGET_ABI_BITS == 32
4002 case TARGET_F_GETLK64
:
4004 case TARGET_F_SETLK64
:
4006 case TARGET_F_SETLKW64
:
4009 case TARGET_F_SETLEASE
:
4011 case TARGET_F_GETLEASE
:
4013 #ifdef F_DUPFD_CLOEXEC
4014 case TARGET_F_DUPFD_CLOEXEC
:
4015 return F_DUPFD_CLOEXEC
;
4017 case TARGET_F_NOTIFY
:
4020 return -TARGET_EINVAL
;
4022 return -TARGET_EINVAL
;
4025 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4028 struct target_flock
*target_fl
;
4029 struct flock64 fl64
;
4030 struct target_flock64
*target_fl64
;
4032 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4034 if (host_cmd
== -TARGET_EINVAL
)
4038 case TARGET_F_GETLK
:
4039 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4040 return -TARGET_EFAULT
;
4041 fl
.l_type
= tswap16(target_fl
->l_type
);
4042 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4043 fl
.l_start
= tswapl(target_fl
->l_start
);
4044 fl
.l_len
= tswapl(target_fl
->l_len
);
4045 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4046 unlock_user_struct(target_fl
, arg
, 0);
4047 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4049 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4050 return -TARGET_EFAULT
;
4051 target_fl
->l_type
= tswap16(fl
.l_type
);
4052 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4053 target_fl
->l_start
= tswapl(fl
.l_start
);
4054 target_fl
->l_len
= tswapl(fl
.l_len
);
4055 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4056 unlock_user_struct(target_fl
, arg
, 1);
4060 case TARGET_F_SETLK
:
4061 case TARGET_F_SETLKW
:
4062 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4063 return -TARGET_EFAULT
;
4064 fl
.l_type
= tswap16(target_fl
->l_type
);
4065 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4066 fl
.l_start
= tswapl(target_fl
->l_start
);
4067 fl
.l_len
= tswapl(target_fl
->l_len
);
4068 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4069 unlock_user_struct(target_fl
, arg
, 0);
4070 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4073 case TARGET_F_GETLK64
:
4074 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4075 return -TARGET_EFAULT
;
4076 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4077 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4078 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4079 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4080 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4081 unlock_user_struct(target_fl64
, arg
, 0);
4082 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4084 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4085 return -TARGET_EFAULT
;
4086 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4087 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4088 target_fl64
->l_start
= tswapl(fl64
.l_start
);
4089 target_fl64
->l_len
= tswapl(fl64
.l_len
);
4090 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4091 unlock_user_struct(target_fl64
, arg
, 1);
4094 case TARGET_F_SETLK64
:
4095 case TARGET_F_SETLKW64
:
4096 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4097 return -TARGET_EFAULT
;
4098 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4099 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4100 fl64
.l_start
= tswapl(target_fl64
->l_start
);
4101 fl64
.l_len
= tswapl(target_fl64
->l_len
);
4102 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4103 unlock_user_struct(target_fl64
, arg
, 0);
4104 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4107 case TARGET_F_GETFL
:
4108 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4110 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4114 case TARGET_F_SETFL
:
4115 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4118 case TARGET_F_SETOWN
:
4119 case TARGET_F_GETOWN
:
4120 case TARGET_F_SETSIG
:
4121 case TARGET_F_GETSIG
:
4122 case TARGET_F_SETLEASE
:
4123 case TARGET_F_GETLEASE
:
4124 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4128 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4136 static inline int high2lowuid(int uid
)
4144 static inline int high2lowgid(int gid
)
4152 static inline int low2highuid(int uid
)
4154 if ((int16_t)uid
== -1)
4160 static inline int low2highgid(int gid
)
4162 if ((int16_t)gid
== -1)
4167 static inline int tswapid(int id
)
4171 #else /* !USE_UID16 */
4172 static inline int high2lowuid(int uid
)
4176 static inline int high2lowgid(int gid
)
4180 static inline int low2highuid(int uid
)
4184 static inline int low2highgid(int gid
)
4188 static inline int tswapid(int id
)
4192 #endif /* USE_UID16 */
4194 void syscall_init(void)
4197 const argtype
*arg_type
;
4201 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4202 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4203 #include "syscall_types.h"
4205 #undef STRUCT_SPECIAL
4207 /* we patch the ioctl size if necessary. We rely on the fact that
4208 no ioctl has all the bits at '1' in the size field */
4210 while (ie
->target_cmd
!= 0) {
4211 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4212 TARGET_IOC_SIZEMASK
) {
4213 arg_type
= ie
->arg_type
;
4214 if (arg_type
[0] != TYPE_PTR
) {
4215 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4220 size
= thunk_type_size(arg_type
, 0);
4221 ie
->target_cmd
= (ie
->target_cmd
&
4222 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4223 (size
<< TARGET_IOC_SIZESHIFT
);
4226 /* Build target_to_host_errno_table[] table from
4227 * host_to_target_errno_table[]. */
4228 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4229 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4231 /* automatic consistency check if same arch */
4232 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4233 (defined(__x86_64__) && defined(TARGET_X86_64))
4234 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4235 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4236 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4243 #if TARGET_ABI_BITS == 32
4244 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4246 #ifdef TARGET_WORDS_BIGENDIAN
4247 return ((uint64_t)word0
<< 32) | word1
;
4249 return ((uint64_t)word1
<< 32) | word0
;
4252 #else /* TARGET_ABI_BITS == 32 */
4253 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4257 #endif /* TARGET_ABI_BITS != 32 */
4259 #ifdef TARGET_NR_truncate64
4260 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4266 if (((CPUARMState
*)cpu_env
)->eabi
)
4272 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4276 #ifdef TARGET_NR_ftruncate64
4277 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4283 if (((CPUARMState
*)cpu_env
)->eabi
)
4289 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4293 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4294 abi_ulong target_addr
)
4296 struct target_timespec
*target_ts
;
4298 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4299 return -TARGET_EFAULT
;
4300 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4301 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4302 unlock_user_struct(target_ts
, target_addr
, 0);
4306 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4307 struct timespec
*host_ts
)
4309 struct target_timespec
*target_ts
;
4311 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4312 return -TARGET_EFAULT
;
4313 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4314 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4315 unlock_user_struct(target_ts
, target_addr
, 1);
4319 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4320 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4321 abi_ulong target_addr
,
4322 struct stat
*host_st
)
4325 if (((CPUARMState
*)cpu_env
)->eabi
) {
4326 struct target_eabi_stat64
*target_st
;
4328 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4329 return -TARGET_EFAULT
;
4330 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4331 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4332 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4333 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4334 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4336 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4337 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4338 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4339 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4340 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4341 __put_user(host_st
->st_size
, &target_st
->st_size
);
4342 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4343 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4344 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4345 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4346 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4347 unlock_user_struct(target_st
, target_addr
, 1);
4351 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4352 struct target_stat
*target_st
;
4354 struct target_stat64
*target_st
;
4357 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4358 return -TARGET_EFAULT
;
4359 memset(target_st
, 0, sizeof(*target_st
));
4360 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4361 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4362 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4363 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4365 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4366 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4367 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4368 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4369 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4370 /* XXX: better use of kernel struct */
4371 __put_user(host_st
->st_size
, &target_st
->st_size
);
4372 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4373 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4374 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4375 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4376 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4377 unlock_user_struct(target_st
, target_addr
, 1);
4384 #if defined(CONFIG_USE_NPTL)
4385 /* ??? Using host futex calls even when target atomic operations
4386 are not really atomic probably breaks things. However implementing
4387 futexes locally would make futexes shared between multiple processes
4388 tricky. However they're probably useless because guest atomic
4389 operations won't work either. */
4390 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4391 target_ulong uaddr2
, int val3
)
4393 struct timespec ts
, *pts
;
4396 /* ??? We assume FUTEX_* constants are the same on both host
4398 #ifdef FUTEX_CMD_MASK
4399 base_op
= op
& FUTEX_CMD_MASK
;
4407 target_to_host_timespec(pts
, timeout
);
4411 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4414 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4416 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4418 case FUTEX_CMP_REQUEUE
:
4420 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4421 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4422 But the prototype takes a `struct timespec *'; insert casts
4423 to satisfy the compiler. We do not need to tswap TIMEOUT
4424 since it's not compared to guest memory. */
4425 pts
= (struct timespec
*)(uintptr_t) timeout
;
4426 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4428 (base_op
== FUTEX_CMP_REQUEUE
4432 return -TARGET_ENOSYS
;
4437 /* Map host to target signal numbers for the wait family of syscalls.
4438 Assume all other status bits are the same. */
4439 static int host_to_target_waitstatus(int status
)
4441 if (WIFSIGNALED(status
)) {
4442 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4444 if (WIFSTOPPED(status
)) {
4445 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4451 int get_osversion(void)
4453 static int osversion
;
4454 struct new_utsname buf
;
4459 if (qemu_uname_release
&& *qemu_uname_release
) {
4460 s
= qemu_uname_release
;
4462 if (sys_uname(&buf
))
4467 for (i
= 0; i
< 3; i
++) {
4469 while (*s
>= '0' && *s
<= '9') {
4474 tmp
= (tmp
<< 8) + n
;
4482 /* do_syscall() should always have a single exit point at the end so
4483 that actions, such as logging of syscall results, can be performed.
4484 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4485 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4486 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4487 abi_long arg5
, abi_long arg6
)
4495 gemu_log("syscall %d", num
);
4498 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4501 case TARGET_NR_exit
:
4502 #ifdef CONFIG_USE_NPTL
4503 /* In old applications this may be used to implement _exit(2).
4504 However in threaded applictions it is used for thread termination,
4505 and _exit_group is used for application termination.
4506 Do thread termination if we have more then one thread. */
4507 /* FIXME: This probably breaks if a signal arrives. We should probably
4508 be disabling signals. */
4509 if (first_cpu
->next_cpu
) {
4517 while (p
&& p
!= (CPUState
*)cpu_env
) {
4518 lastp
= &p
->next_cpu
;
4521 /* If we didn't find the CPU for this thread then something is
4525 /* Remove the CPU from the list. */
4526 *lastp
= p
->next_cpu
;
4528 ts
= ((CPUState
*)cpu_env
)->opaque
;
4529 if (ts
->child_tidptr
) {
4530 put_user_u32(0, ts
->child_tidptr
);
4531 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4543 gdb_exit(cpu_env
, arg1
);
4545 ret
= 0; /* avoid warning */
4547 case TARGET_NR_read
:
4551 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4553 ret
= get_errno(read(arg1
, p
, arg3
));
4554 unlock_user(p
, arg2
, ret
);
4557 case TARGET_NR_write
:
4558 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4560 ret
= get_errno(write(arg1
, p
, arg3
));
4561 unlock_user(p
, arg2
, 0);
4563 case TARGET_NR_open
:
4564 if (!(p
= lock_user_string(arg1
)))
4566 ret
= get_errno(open(path(p
),
4567 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4569 unlock_user(p
, arg1
, 0);
4571 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4572 case TARGET_NR_openat
:
4573 if (!(p
= lock_user_string(arg2
)))
4575 ret
= get_errno(sys_openat(arg1
,
4577 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4579 unlock_user(p
, arg2
, 0);
4582 case TARGET_NR_close
:
4583 ret
= get_errno(close(arg1
));
4588 case TARGET_NR_fork
:
4589 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4591 #ifdef TARGET_NR_waitpid
4592 case TARGET_NR_waitpid
:
4595 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4596 if (!is_error(ret
) && arg2
4597 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4602 #ifdef TARGET_NR_waitid
4603 case TARGET_NR_waitid
:
4607 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4608 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4609 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4611 host_to_target_siginfo(p
, &info
);
4612 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4617 #ifdef TARGET_NR_creat /* not on alpha */
4618 case TARGET_NR_creat
:
4619 if (!(p
= lock_user_string(arg1
)))
4621 ret
= get_errno(creat(p
, arg2
));
4622 unlock_user(p
, arg1
, 0);
4625 case TARGET_NR_link
:
4628 p
= lock_user_string(arg1
);
4629 p2
= lock_user_string(arg2
);
4631 ret
= -TARGET_EFAULT
;
4633 ret
= get_errno(link(p
, p2
));
4634 unlock_user(p2
, arg2
, 0);
4635 unlock_user(p
, arg1
, 0);
4638 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4639 case TARGET_NR_linkat
:
4644 p
= lock_user_string(arg2
);
4645 p2
= lock_user_string(arg4
);
4647 ret
= -TARGET_EFAULT
;
4649 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4650 unlock_user(p
, arg2
, 0);
4651 unlock_user(p2
, arg4
, 0);
4655 case TARGET_NR_unlink
:
4656 if (!(p
= lock_user_string(arg1
)))
4658 ret
= get_errno(unlink(p
));
4659 unlock_user(p
, arg1
, 0);
4661 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4662 case TARGET_NR_unlinkat
:
4663 if (!(p
= lock_user_string(arg2
)))
4665 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4666 unlock_user(p
, arg2
, 0);
4669 case TARGET_NR_execve
:
4671 char **argp
, **envp
;
4674 abi_ulong guest_argp
;
4675 abi_ulong guest_envp
;
4681 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4682 if (get_user_ual(addr
, gp
))
4690 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4691 if (get_user_ual(addr
, gp
))
4698 argp
= alloca((argc
+ 1) * sizeof(void *));
4699 envp
= alloca((envc
+ 1) * sizeof(void *));
4701 for (gp
= guest_argp
, q
= argp
; gp
;
4702 gp
+= sizeof(abi_ulong
), q
++) {
4703 if (get_user_ual(addr
, gp
))
4707 if (!(*q
= lock_user_string(addr
)))
4712 for (gp
= guest_envp
, q
= envp
; gp
;
4713 gp
+= sizeof(abi_ulong
), q
++) {
4714 if (get_user_ual(addr
, gp
))
4718 if (!(*q
= lock_user_string(addr
)))
4723 if (!(p
= lock_user_string(arg1
)))
4725 ret
= get_errno(execve(p
, argp
, envp
));
4726 unlock_user(p
, arg1
, 0);
4731 ret
= -TARGET_EFAULT
;
4734 for (gp
= guest_argp
, q
= argp
; *q
;
4735 gp
+= sizeof(abi_ulong
), q
++) {
4736 if (get_user_ual(addr
, gp
)
4739 unlock_user(*q
, addr
, 0);
4741 for (gp
= guest_envp
, q
= envp
; *q
;
4742 gp
+= sizeof(abi_ulong
), q
++) {
4743 if (get_user_ual(addr
, gp
)
4746 unlock_user(*q
, addr
, 0);
4750 case TARGET_NR_chdir
:
4751 if (!(p
= lock_user_string(arg1
)))
4753 ret
= get_errno(chdir(p
));
4754 unlock_user(p
, arg1
, 0);
4756 #ifdef TARGET_NR_time
4757 case TARGET_NR_time
:
4760 ret
= get_errno(time(&host_time
));
4763 && put_user_sal(host_time
, arg1
))
4768 case TARGET_NR_mknod
:
4769 if (!(p
= lock_user_string(arg1
)))
4771 ret
= get_errno(mknod(p
, arg2
, arg3
));
4772 unlock_user(p
, arg1
, 0);
4774 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4775 case TARGET_NR_mknodat
:
4776 if (!(p
= lock_user_string(arg2
)))
4778 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4779 unlock_user(p
, arg2
, 0);
4782 case TARGET_NR_chmod
:
4783 if (!(p
= lock_user_string(arg1
)))
4785 ret
= get_errno(chmod(p
, arg2
));
4786 unlock_user(p
, arg1
, 0);
4788 #ifdef TARGET_NR_break
4789 case TARGET_NR_break
:
4792 #ifdef TARGET_NR_oldstat
4793 case TARGET_NR_oldstat
:
4796 case TARGET_NR_lseek
:
4797 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4799 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4800 /* Alpha specific */
4801 case TARGET_NR_getxpid
:
4802 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4803 ret
= get_errno(getpid());
4806 #ifdef TARGET_NR_getpid
4807 case TARGET_NR_getpid
:
4808 ret
= get_errno(getpid());
4811 case TARGET_NR_mount
:
4813 /* need to look at the data field */
4815 p
= lock_user_string(arg1
);
4816 p2
= lock_user_string(arg2
);
4817 p3
= lock_user_string(arg3
);
4818 if (!p
|| !p2
|| !p3
)
4819 ret
= -TARGET_EFAULT
;
4821 /* FIXME - arg5 should be locked, but it isn't clear how to
4822 * do that since it's not guaranteed to be a NULL-terminated
4826 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4828 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4830 unlock_user(p
, arg1
, 0);
4831 unlock_user(p2
, arg2
, 0);
4832 unlock_user(p3
, arg3
, 0);
4835 #ifdef TARGET_NR_umount
4836 case TARGET_NR_umount
:
4837 if (!(p
= lock_user_string(arg1
)))
4839 ret
= get_errno(umount(p
));
4840 unlock_user(p
, arg1
, 0);
4843 #ifdef TARGET_NR_stime /* not on alpha */
4844 case TARGET_NR_stime
:
4847 if (get_user_sal(host_time
, arg1
))
4849 ret
= get_errno(stime(&host_time
));
4853 case TARGET_NR_ptrace
:
4855 #ifdef TARGET_NR_alarm /* not on alpha */
4856 case TARGET_NR_alarm
:
4860 #ifdef TARGET_NR_oldfstat
4861 case TARGET_NR_oldfstat
:
4864 #ifdef TARGET_NR_pause /* not on alpha */
4865 case TARGET_NR_pause
:
4866 ret
= get_errno(pause());
4869 #ifdef TARGET_NR_utime
4870 case TARGET_NR_utime
:
4872 struct utimbuf tbuf
, *host_tbuf
;
4873 struct target_utimbuf
*target_tbuf
;
4875 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4877 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4878 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4879 unlock_user_struct(target_tbuf
, arg2
, 0);
4884 if (!(p
= lock_user_string(arg1
)))
4886 ret
= get_errno(utime(p
, host_tbuf
));
4887 unlock_user(p
, arg1
, 0);
4891 case TARGET_NR_utimes
:
4893 struct timeval
*tvp
, tv
[2];
4895 if (copy_from_user_timeval(&tv
[0], arg2
)
4896 || copy_from_user_timeval(&tv
[1],
4897 arg2
+ sizeof(struct target_timeval
)))
4903 if (!(p
= lock_user_string(arg1
)))
4905 ret
= get_errno(utimes(p
, tvp
));
4906 unlock_user(p
, arg1
, 0);
4909 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4910 case TARGET_NR_futimesat
:
4912 struct timeval
*tvp
, tv
[2];
4914 if (copy_from_user_timeval(&tv
[0], arg3
)
4915 || copy_from_user_timeval(&tv
[1],
4916 arg3
+ sizeof(struct target_timeval
)))
4922 if (!(p
= lock_user_string(arg2
)))
4924 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4925 unlock_user(p
, arg2
, 0);
4929 #ifdef TARGET_NR_stty
4930 case TARGET_NR_stty
:
4933 #ifdef TARGET_NR_gtty
4934 case TARGET_NR_gtty
:
4937 case TARGET_NR_access
:
4938 if (!(p
= lock_user_string(arg1
)))
4940 ret
= get_errno(access(path(p
), arg2
));
4941 unlock_user(p
, arg1
, 0);
4943 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4944 case TARGET_NR_faccessat
:
4945 if (!(p
= lock_user_string(arg2
)))
4947 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4948 unlock_user(p
, arg2
, 0);
4951 #ifdef TARGET_NR_nice /* not on alpha */
4952 case TARGET_NR_nice
:
4953 ret
= get_errno(nice(arg1
));
4956 #ifdef TARGET_NR_ftime
4957 case TARGET_NR_ftime
:
4960 case TARGET_NR_sync
:
4964 case TARGET_NR_kill
:
4965 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4967 case TARGET_NR_rename
:
4970 p
= lock_user_string(arg1
);
4971 p2
= lock_user_string(arg2
);
4973 ret
= -TARGET_EFAULT
;
4975 ret
= get_errno(rename(p
, p2
));
4976 unlock_user(p2
, arg2
, 0);
4977 unlock_user(p
, arg1
, 0);
4980 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4981 case TARGET_NR_renameat
:
4984 p
= lock_user_string(arg2
);
4985 p2
= lock_user_string(arg4
);
4987 ret
= -TARGET_EFAULT
;
4989 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4990 unlock_user(p2
, arg4
, 0);
4991 unlock_user(p
, arg2
, 0);
4995 case TARGET_NR_mkdir
:
4996 if (!(p
= lock_user_string(arg1
)))
4998 ret
= get_errno(mkdir(p
, arg2
));
4999 unlock_user(p
, arg1
, 0);
5001 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5002 case TARGET_NR_mkdirat
:
5003 if (!(p
= lock_user_string(arg2
)))
5005 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5006 unlock_user(p
, arg2
, 0);
5009 case TARGET_NR_rmdir
:
5010 if (!(p
= lock_user_string(arg1
)))
5012 ret
= get_errno(rmdir(p
));
5013 unlock_user(p
, arg1
, 0);
5016 ret
= get_errno(dup(arg1
));
5018 case TARGET_NR_pipe
:
5019 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5021 #ifdef TARGET_NR_pipe2
5022 case TARGET_NR_pipe2
:
5023 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5026 case TARGET_NR_times
:
5028 struct target_tms
*tmsp
;
5030 ret
= get_errno(times(&tms
));
5032 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5035 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
5036 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
5037 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
5038 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
5041 ret
= host_to_target_clock_t(ret
);
5044 #ifdef TARGET_NR_prof
5045 case TARGET_NR_prof
:
5048 #ifdef TARGET_NR_signal
5049 case TARGET_NR_signal
:
5052 case TARGET_NR_acct
:
5054 ret
= get_errno(acct(NULL
));
5056 if (!(p
= lock_user_string(arg1
)))
5058 ret
= get_errno(acct(path(p
)));
5059 unlock_user(p
, arg1
, 0);
5062 #ifdef TARGET_NR_umount2 /* not on alpha */
5063 case TARGET_NR_umount2
:
5064 if (!(p
= lock_user_string(arg1
)))
5066 ret
= get_errno(umount2(p
, arg2
));
5067 unlock_user(p
, arg1
, 0);
5070 #ifdef TARGET_NR_lock
5071 case TARGET_NR_lock
:
5074 case TARGET_NR_ioctl
:
5075 ret
= do_ioctl(arg1
, arg2
, arg3
);
5077 case TARGET_NR_fcntl
:
5078 ret
= do_fcntl(arg1
, arg2
, arg3
);
5080 #ifdef TARGET_NR_mpx
5084 case TARGET_NR_setpgid
:
5085 ret
= get_errno(setpgid(arg1
, arg2
));
5087 #ifdef TARGET_NR_ulimit
5088 case TARGET_NR_ulimit
:
5091 #ifdef TARGET_NR_oldolduname
5092 case TARGET_NR_oldolduname
:
5095 case TARGET_NR_umask
:
5096 ret
= get_errno(umask(arg1
));
5098 case TARGET_NR_chroot
:
5099 if (!(p
= lock_user_string(arg1
)))
5101 ret
= get_errno(chroot(p
));
5102 unlock_user(p
, arg1
, 0);
5104 case TARGET_NR_ustat
:
5106 case TARGET_NR_dup2
:
5107 ret
= get_errno(dup2(arg1
, arg2
));
5109 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5110 case TARGET_NR_dup3
:
5111 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5114 #ifdef TARGET_NR_getppid /* not on alpha */
5115 case TARGET_NR_getppid
:
5116 ret
= get_errno(getppid());
5119 case TARGET_NR_getpgrp
:
5120 ret
= get_errno(getpgrp());
5122 case TARGET_NR_setsid
:
5123 ret
= get_errno(setsid());
5125 #ifdef TARGET_NR_sigaction
5126 case TARGET_NR_sigaction
:
5128 #if defined(TARGET_ALPHA)
5129 struct target_sigaction act
, oact
, *pact
= 0;
5130 struct target_old_sigaction
*old_act
;
5132 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5134 act
._sa_handler
= old_act
->_sa_handler
;
5135 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5136 act
.sa_flags
= old_act
->sa_flags
;
5137 act
.sa_restorer
= 0;
5138 unlock_user_struct(old_act
, arg2
, 0);
5141 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5142 if (!is_error(ret
) && arg3
) {
5143 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5145 old_act
->_sa_handler
= oact
._sa_handler
;
5146 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5147 old_act
->sa_flags
= oact
.sa_flags
;
5148 unlock_user_struct(old_act
, arg3
, 1);
5150 #elif defined(TARGET_MIPS)
5151 struct target_sigaction act
, oact
, *pact
, *old_act
;
5154 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5156 act
._sa_handler
= old_act
->_sa_handler
;
5157 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5158 act
.sa_flags
= old_act
->sa_flags
;
5159 unlock_user_struct(old_act
, arg2
, 0);
5165 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5167 if (!is_error(ret
) && arg3
) {
5168 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5170 old_act
->_sa_handler
= oact
._sa_handler
;
5171 old_act
->sa_flags
= oact
.sa_flags
;
5172 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5173 old_act
->sa_mask
.sig
[1] = 0;
5174 old_act
->sa_mask
.sig
[2] = 0;
5175 old_act
->sa_mask
.sig
[3] = 0;
5176 unlock_user_struct(old_act
, arg3
, 1);
5179 struct target_old_sigaction
*old_act
;
5180 struct target_sigaction act
, oact
, *pact
;
5182 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5184 act
._sa_handler
= old_act
->_sa_handler
;
5185 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5186 act
.sa_flags
= old_act
->sa_flags
;
5187 act
.sa_restorer
= old_act
->sa_restorer
;
5188 unlock_user_struct(old_act
, arg2
, 0);
5193 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5194 if (!is_error(ret
) && arg3
) {
5195 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5197 old_act
->_sa_handler
= oact
._sa_handler
;
5198 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5199 old_act
->sa_flags
= oact
.sa_flags
;
5200 old_act
->sa_restorer
= oact
.sa_restorer
;
5201 unlock_user_struct(old_act
, arg3
, 1);
5207 case TARGET_NR_rt_sigaction
:
5209 #if defined(TARGET_ALPHA)
5210 struct target_sigaction act
, oact
, *pact
= 0;
5211 struct target_rt_sigaction
*rt_act
;
5212 /* ??? arg4 == sizeof(sigset_t). */
5214 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5216 act
._sa_handler
= rt_act
->_sa_handler
;
5217 act
.sa_mask
= rt_act
->sa_mask
;
5218 act
.sa_flags
= rt_act
->sa_flags
;
5219 act
.sa_restorer
= arg5
;
5220 unlock_user_struct(rt_act
, arg2
, 0);
5223 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5224 if (!is_error(ret
) && arg3
) {
5225 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5227 rt_act
->_sa_handler
= oact
._sa_handler
;
5228 rt_act
->sa_mask
= oact
.sa_mask
;
5229 rt_act
->sa_flags
= oact
.sa_flags
;
5230 unlock_user_struct(rt_act
, arg3
, 1);
5233 struct target_sigaction
*act
;
5234 struct target_sigaction
*oact
;
5237 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5242 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5243 ret
= -TARGET_EFAULT
;
5244 goto rt_sigaction_fail
;
5248 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5251 unlock_user_struct(act
, arg2
, 0);
5253 unlock_user_struct(oact
, arg3
, 1);
5257 #ifdef TARGET_NR_sgetmask /* not on alpha */
5258 case TARGET_NR_sgetmask
:
5261 abi_ulong target_set
;
5262 sigprocmask(0, NULL
, &cur_set
);
5263 host_to_target_old_sigset(&target_set
, &cur_set
);
5268 #ifdef TARGET_NR_ssetmask /* not on alpha */
5269 case TARGET_NR_ssetmask
:
5271 sigset_t set
, oset
, cur_set
;
5272 abi_ulong target_set
= arg1
;
5273 sigprocmask(0, NULL
, &cur_set
);
5274 target_to_host_old_sigset(&set
, &target_set
);
5275 sigorset(&set
, &set
, &cur_set
);
5276 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5277 host_to_target_old_sigset(&target_set
, &oset
);
5282 #ifdef TARGET_NR_sigprocmask
5283 case TARGET_NR_sigprocmask
:
5285 #if defined(TARGET_ALPHA)
5286 sigset_t set
, oldset
;
5291 case TARGET_SIG_BLOCK
:
5294 case TARGET_SIG_UNBLOCK
:
5297 case TARGET_SIG_SETMASK
:
5301 ret
= -TARGET_EINVAL
;
5305 target_to_host_old_sigset(&set
, &mask
);
5307 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5309 if (!is_error(ret
)) {
5310 host_to_target_old_sigset(&mask
, &oldset
);
5312 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5315 sigset_t set
, oldset
, *set_ptr
;
5320 case TARGET_SIG_BLOCK
:
5323 case TARGET_SIG_UNBLOCK
:
5326 case TARGET_SIG_SETMASK
:
5330 ret
= -TARGET_EINVAL
;
5333 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5335 target_to_host_old_sigset(&set
, p
);
5336 unlock_user(p
, arg2
, 0);
5342 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5343 if (!is_error(ret
) && arg3
) {
5344 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5346 host_to_target_old_sigset(p
, &oldset
);
5347 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5353 case TARGET_NR_rt_sigprocmask
:
5356 sigset_t set
, oldset
, *set_ptr
;
5360 case TARGET_SIG_BLOCK
:
5363 case TARGET_SIG_UNBLOCK
:
5366 case TARGET_SIG_SETMASK
:
5370 ret
= -TARGET_EINVAL
;
5373 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5375 target_to_host_sigset(&set
, p
);
5376 unlock_user(p
, arg2
, 0);
5382 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5383 if (!is_error(ret
) && arg3
) {
5384 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5386 host_to_target_sigset(p
, &oldset
);
5387 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5391 #ifdef TARGET_NR_sigpending
5392 case TARGET_NR_sigpending
:
5395 ret
= get_errno(sigpending(&set
));
5396 if (!is_error(ret
)) {
5397 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5399 host_to_target_old_sigset(p
, &set
);
5400 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5405 case TARGET_NR_rt_sigpending
:
5408 ret
= get_errno(sigpending(&set
));
5409 if (!is_error(ret
)) {
5410 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5412 host_to_target_sigset(p
, &set
);
5413 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5417 #ifdef TARGET_NR_sigsuspend
5418 case TARGET_NR_sigsuspend
:
5421 #if defined(TARGET_ALPHA)
5422 abi_ulong mask
= arg1
;
5423 target_to_host_old_sigset(&set
, &mask
);
5425 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5427 target_to_host_old_sigset(&set
, p
);
5428 unlock_user(p
, arg1
, 0);
5430 ret
= get_errno(sigsuspend(&set
));
5434 case TARGET_NR_rt_sigsuspend
:
5437 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5439 target_to_host_sigset(&set
, p
);
5440 unlock_user(p
, arg1
, 0);
5441 ret
= get_errno(sigsuspend(&set
));
5444 case TARGET_NR_rt_sigtimedwait
:
5447 struct timespec uts
, *puts
;
5450 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5452 target_to_host_sigset(&set
, p
);
5453 unlock_user(p
, arg1
, 0);
5456 target_to_host_timespec(puts
, arg3
);
5460 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5461 if (!is_error(ret
) && arg2
) {
5462 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5464 host_to_target_siginfo(p
, &uinfo
);
5465 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5469 case TARGET_NR_rt_sigqueueinfo
:
5472 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5474 target_to_host_siginfo(&uinfo
, p
);
5475 unlock_user(p
, arg1
, 0);
5476 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5479 #ifdef TARGET_NR_sigreturn
5480 case TARGET_NR_sigreturn
:
5481 /* NOTE: ret is eax, so not transcoding must be done */
5482 ret
= do_sigreturn(cpu_env
);
5485 case TARGET_NR_rt_sigreturn
:
5486 /* NOTE: ret is eax, so not transcoding must be done */
5487 ret
= do_rt_sigreturn(cpu_env
);
5489 case TARGET_NR_sethostname
:
5490 if (!(p
= lock_user_string(arg1
)))
5492 ret
= get_errno(sethostname(p
, arg2
));
5493 unlock_user(p
, arg1
, 0);
5495 case TARGET_NR_setrlimit
:
5497 int resource
= arg1
;
5498 struct target_rlimit
*target_rlim
;
5500 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5502 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5503 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5504 unlock_user_struct(target_rlim
, arg2
, 0);
5505 ret
= get_errno(setrlimit(resource
, &rlim
));
5508 case TARGET_NR_getrlimit
:
5510 int resource
= arg1
;
5511 struct target_rlimit
*target_rlim
;
5514 ret
= get_errno(getrlimit(resource
, &rlim
));
5515 if (!is_error(ret
)) {
5516 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5518 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5519 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5520 unlock_user_struct(target_rlim
, arg2
, 1);
5524 case TARGET_NR_getrusage
:
5526 struct rusage rusage
;
5527 ret
= get_errno(getrusage(arg1
, &rusage
));
5528 if (!is_error(ret
)) {
5529 host_to_target_rusage(arg2
, &rusage
);
5533 case TARGET_NR_gettimeofday
:
5536 ret
= get_errno(gettimeofday(&tv
, NULL
));
5537 if (!is_error(ret
)) {
5538 if (copy_to_user_timeval(arg1
, &tv
))
5543 case TARGET_NR_settimeofday
:
5546 if (copy_from_user_timeval(&tv
, arg1
))
5548 ret
= get_errno(settimeofday(&tv
, NULL
));
5551 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5552 case TARGET_NR_select
:
5554 struct target_sel_arg_struct
*sel
;
5555 abi_ulong inp
, outp
, exp
, tvp
;
5558 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5560 nsel
= tswapl(sel
->n
);
5561 inp
= tswapl(sel
->inp
);
5562 outp
= tswapl(sel
->outp
);
5563 exp
= tswapl(sel
->exp
);
5564 tvp
= tswapl(sel
->tvp
);
5565 unlock_user_struct(sel
, arg1
, 0);
5566 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5570 #ifdef TARGET_NR_pselect6
5571 case TARGET_NR_pselect6
:
5572 goto unimplemented_nowarn
;
5574 case TARGET_NR_symlink
:
5577 p
= lock_user_string(arg1
);
5578 p2
= lock_user_string(arg2
);
5580 ret
= -TARGET_EFAULT
;
5582 ret
= get_errno(symlink(p
, p2
));
5583 unlock_user(p2
, arg2
, 0);
5584 unlock_user(p
, arg1
, 0);
5587 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5588 case TARGET_NR_symlinkat
:
5591 p
= lock_user_string(arg1
);
5592 p2
= lock_user_string(arg3
);
5594 ret
= -TARGET_EFAULT
;
5596 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5597 unlock_user(p2
, arg3
, 0);
5598 unlock_user(p
, arg1
, 0);
5602 #ifdef TARGET_NR_oldlstat
5603 case TARGET_NR_oldlstat
:
5606 case TARGET_NR_readlink
:
5609 p
= lock_user_string(arg1
);
5610 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5612 ret
= -TARGET_EFAULT
;
5614 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5615 char real
[PATH_MAX
];
5616 temp
= realpath(exec_path
,real
);
5617 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5618 snprintf((char *)p2
, arg3
, "%s", real
);
5621 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5623 unlock_user(p2
, arg2
, ret
);
5624 unlock_user(p
, arg1
, 0);
5627 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5628 case TARGET_NR_readlinkat
:
5631 p
= lock_user_string(arg2
);
5632 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5634 ret
= -TARGET_EFAULT
;
5636 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5637 unlock_user(p2
, arg3
, ret
);
5638 unlock_user(p
, arg2
, 0);
5642 #ifdef TARGET_NR_uselib
5643 case TARGET_NR_uselib
:
5646 #ifdef TARGET_NR_swapon
5647 case TARGET_NR_swapon
:
5648 if (!(p
= lock_user_string(arg1
)))
5650 ret
= get_errno(swapon(p
, arg2
));
5651 unlock_user(p
, arg1
, 0);
5654 case TARGET_NR_reboot
:
5656 #ifdef TARGET_NR_readdir
5657 case TARGET_NR_readdir
:
5660 #ifdef TARGET_NR_mmap
5661 case TARGET_NR_mmap
:
5662 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5663 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5664 || defined(TARGET_S390X)
5667 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5668 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5676 unlock_user(v
, arg1
, 0);
5677 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5678 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5682 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5683 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5689 #ifdef TARGET_NR_mmap2
5690 case TARGET_NR_mmap2
:
5692 #define MMAP_SHIFT 12
5694 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5695 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5697 arg6
<< MMAP_SHIFT
));
5700 case TARGET_NR_munmap
:
5701 ret
= get_errno(target_munmap(arg1
, arg2
));
5703 case TARGET_NR_mprotect
:
5705 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
5706 /* Special hack to detect libc making the stack executable. */
5707 if ((arg3
& PROT_GROWSDOWN
)
5708 && arg1
>= ts
->info
->stack_limit
5709 && arg1
<= ts
->info
->start_stack
) {
5710 arg3
&= ~PROT_GROWSDOWN
;
5711 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
5712 arg1
= ts
->info
->stack_limit
;
5715 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5717 #ifdef TARGET_NR_mremap
5718 case TARGET_NR_mremap
:
5719 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5722 /* ??? msync/mlock/munlock are broken for softmmu. */
5723 #ifdef TARGET_NR_msync
5724 case TARGET_NR_msync
:
5725 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5728 #ifdef TARGET_NR_mlock
5729 case TARGET_NR_mlock
:
5730 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5733 #ifdef TARGET_NR_munlock
5734 case TARGET_NR_munlock
:
5735 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5738 #ifdef TARGET_NR_mlockall
5739 case TARGET_NR_mlockall
:
5740 ret
= get_errno(mlockall(arg1
));
5743 #ifdef TARGET_NR_munlockall
5744 case TARGET_NR_munlockall
:
5745 ret
= get_errno(munlockall());
5748 case TARGET_NR_truncate
:
5749 if (!(p
= lock_user_string(arg1
)))
5751 ret
= get_errno(truncate(p
, arg2
));
5752 unlock_user(p
, arg1
, 0);
5754 case TARGET_NR_ftruncate
:
5755 ret
= get_errno(ftruncate(arg1
, arg2
));
5757 case TARGET_NR_fchmod
:
5758 ret
= get_errno(fchmod(arg1
, arg2
));
5760 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5761 case TARGET_NR_fchmodat
:
5762 if (!(p
= lock_user_string(arg2
)))
5764 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5765 unlock_user(p
, arg2
, 0);
5768 case TARGET_NR_getpriority
:
5769 /* libc does special remapping of the return value of
5770 * sys_getpriority() so it's just easiest to call
5771 * sys_getpriority() directly rather than through libc. */
5772 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5774 case TARGET_NR_setpriority
:
5775 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5777 #ifdef TARGET_NR_profil
5778 case TARGET_NR_profil
:
5781 case TARGET_NR_statfs
:
5782 if (!(p
= lock_user_string(arg1
)))
5784 ret
= get_errno(statfs(path(p
), &stfs
));
5785 unlock_user(p
, arg1
, 0);
5787 if (!is_error(ret
)) {
5788 struct target_statfs
*target_stfs
;
5790 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5792 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5793 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5794 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5795 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5796 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5797 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5798 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5799 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5800 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5801 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5802 unlock_user_struct(target_stfs
, arg2
, 1);
5805 case TARGET_NR_fstatfs
:
5806 ret
= get_errno(fstatfs(arg1
, &stfs
));
5807 goto convert_statfs
;
5808 #ifdef TARGET_NR_statfs64
5809 case TARGET_NR_statfs64
:
5810 if (!(p
= lock_user_string(arg1
)))
5812 ret
= get_errno(statfs(path(p
), &stfs
));
5813 unlock_user(p
, arg1
, 0);
5815 if (!is_error(ret
)) {
5816 struct target_statfs64
*target_stfs
;
5818 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5820 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5821 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5822 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5823 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5824 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5825 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5826 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5827 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5828 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5829 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5830 unlock_user_struct(target_stfs
, arg3
, 1);
5833 case TARGET_NR_fstatfs64
:
5834 ret
= get_errno(fstatfs(arg1
, &stfs
));
5835 goto convert_statfs64
;
5837 #ifdef TARGET_NR_ioperm
5838 case TARGET_NR_ioperm
:
5841 #ifdef TARGET_NR_socketcall
5842 case TARGET_NR_socketcall
:
5843 ret
= do_socketcall(arg1
, arg2
);
5846 #ifdef TARGET_NR_accept
5847 case TARGET_NR_accept
:
5848 ret
= do_accept(arg1
, arg2
, arg3
);
5851 #ifdef TARGET_NR_bind
5852 case TARGET_NR_bind
:
5853 ret
= do_bind(arg1
, arg2
, arg3
);
5856 #ifdef TARGET_NR_connect
5857 case TARGET_NR_connect
:
5858 ret
= do_connect(arg1
, arg2
, arg3
);
5861 #ifdef TARGET_NR_getpeername
5862 case TARGET_NR_getpeername
:
5863 ret
= do_getpeername(arg1
, arg2
, arg3
);
5866 #ifdef TARGET_NR_getsockname
5867 case TARGET_NR_getsockname
:
5868 ret
= do_getsockname(arg1
, arg2
, arg3
);
5871 #ifdef TARGET_NR_getsockopt
5872 case TARGET_NR_getsockopt
:
5873 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5876 #ifdef TARGET_NR_listen
5877 case TARGET_NR_listen
:
5878 ret
= get_errno(listen(arg1
, arg2
));
5881 #ifdef TARGET_NR_recv
5882 case TARGET_NR_recv
:
5883 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5886 #ifdef TARGET_NR_recvfrom
5887 case TARGET_NR_recvfrom
:
5888 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5891 #ifdef TARGET_NR_recvmsg
5892 case TARGET_NR_recvmsg
:
5893 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5896 #ifdef TARGET_NR_send
5897 case TARGET_NR_send
:
5898 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5901 #ifdef TARGET_NR_sendmsg
5902 case TARGET_NR_sendmsg
:
5903 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5906 #ifdef TARGET_NR_sendto
5907 case TARGET_NR_sendto
:
5908 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5911 #ifdef TARGET_NR_shutdown
5912 case TARGET_NR_shutdown
:
5913 ret
= get_errno(shutdown(arg1
, arg2
));
5916 #ifdef TARGET_NR_socket
5917 case TARGET_NR_socket
:
5918 ret
= do_socket(arg1
, arg2
, arg3
);
5921 #ifdef TARGET_NR_socketpair
5922 case TARGET_NR_socketpair
:
5923 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5926 #ifdef TARGET_NR_setsockopt
5927 case TARGET_NR_setsockopt
:
5928 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5932 case TARGET_NR_syslog
:
5933 if (!(p
= lock_user_string(arg2
)))
5935 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5936 unlock_user(p
, arg2
, 0);
5939 case TARGET_NR_setitimer
:
5941 struct itimerval value
, ovalue
, *pvalue
;
5945 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5946 || copy_from_user_timeval(&pvalue
->it_value
,
5947 arg2
+ sizeof(struct target_timeval
)))
5952 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5953 if (!is_error(ret
) && arg3
) {
5954 if (copy_to_user_timeval(arg3
,
5955 &ovalue
.it_interval
)
5956 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5962 case TARGET_NR_getitimer
:
5964 struct itimerval value
;
5966 ret
= get_errno(getitimer(arg1
, &value
));
5967 if (!is_error(ret
) && arg2
) {
5968 if (copy_to_user_timeval(arg2
,
5970 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5976 case TARGET_NR_stat
:
5977 if (!(p
= lock_user_string(arg1
)))
5979 ret
= get_errno(stat(path(p
), &st
));
5980 unlock_user(p
, arg1
, 0);
5982 case TARGET_NR_lstat
:
5983 if (!(p
= lock_user_string(arg1
)))
5985 ret
= get_errno(lstat(path(p
), &st
));
5986 unlock_user(p
, arg1
, 0);
5988 case TARGET_NR_fstat
:
5990 ret
= get_errno(fstat(arg1
, &st
));
5992 if (!is_error(ret
)) {
5993 struct target_stat
*target_st
;
5995 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5997 memset(target_st
, 0, sizeof(*target_st
));
5998 __put_user(st
.st_dev
, &target_st
->st_dev
);
5999 __put_user(st
.st_ino
, &target_st
->st_ino
);
6000 __put_user(st
.st_mode
, &target_st
->st_mode
);
6001 __put_user(st
.st_uid
, &target_st
->st_uid
);
6002 __put_user(st
.st_gid
, &target_st
->st_gid
);
6003 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6004 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6005 __put_user(st
.st_size
, &target_st
->st_size
);
6006 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6007 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6008 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6009 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6010 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6011 unlock_user_struct(target_st
, arg2
, 1);
6015 #ifdef TARGET_NR_olduname
6016 case TARGET_NR_olduname
:
6019 #ifdef TARGET_NR_iopl
6020 case TARGET_NR_iopl
:
6023 case TARGET_NR_vhangup
:
6024 ret
= get_errno(vhangup());
6026 #ifdef TARGET_NR_idle
6027 case TARGET_NR_idle
:
6030 #ifdef TARGET_NR_syscall
6031 case TARGET_NR_syscall
:
6032 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
6035 case TARGET_NR_wait4
:
6038 abi_long status_ptr
= arg2
;
6039 struct rusage rusage
, *rusage_ptr
;
6040 abi_ulong target_rusage
= arg4
;
6042 rusage_ptr
= &rusage
;
6045 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6046 if (!is_error(ret
)) {
6048 status
= host_to_target_waitstatus(status
);
6049 if (put_user_s32(status
, status_ptr
))
6053 host_to_target_rusage(target_rusage
, &rusage
);
6057 #ifdef TARGET_NR_swapoff
6058 case TARGET_NR_swapoff
:
6059 if (!(p
= lock_user_string(arg1
)))
6061 ret
= get_errno(swapoff(p
));
6062 unlock_user(p
, arg1
, 0);
6065 case TARGET_NR_sysinfo
:
6067 struct target_sysinfo
*target_value
;
6068 struct sysinfo value
;
6069 ret
= get_errno(sysinfo(&value
));
6070 if (!is_error(ret
) && arg1
)
6072 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6074 __put_user(value
.uptime
, &target_value
->uptime
);
6075 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6076 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6077 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6078 __put_user(value
.totalram
, &target_value
->totalram
);
6079 __put_user(value
.freeram
, &target_value
->freeram
);
6080 __put_user(value
.sharedram
, &target_value
->sharedram
);
6081 __put_user(value
.bufferram
, &target_value
->bufferram
);
6082 __put_user(value
.totalswap
, &target_value
->totalswap
);
6083 __put_user(value
.freeswap
, &target_value
->freeswap
);
6084 __put_user(value
.procs
, &target_value
->procs
);
6085 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6086 __put_user(value
.freehigh
, &target_value
->freehigh
);
6087 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6088 unlock_user_struct(target_value
, arg1
, 1);
6092 #ifdef TARGET_NR_ipc
6094 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6097 #ifdef TARGET_NR_semget
6098 case TARGET_NR_semget
:
6099 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6102 #ifdef TARGET_NR_semop
6103 case TARGET_NR_semop
:
6104 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6107 #ifdef TARGET_NR_semctl
6108 case TARGET_NR_semctl
:
6109 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6112 #ifdef TARGET_NR_msgctl
6113 case TARGET_NR_msgctl
:
6114 ret
= do_msgctl(arg1
, arg2
, arg3
);
6117 #ifdef TARGET_NR_msgget
6118 case TARGET_NR_msgget
:
6119 ret
= get_errno(msgget(arg1
, arg2
));
6122 #ifdef TARGET_NR_msgrcv
6123 case TARGET_NR_msgrcv
:
6124 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6127 #ifdef TARGET_NR_msgsnd
6128 case TARGET_NR_msgsnd
:
6129 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6132 #ifdef TARGET_NR_shmget
6133 case TARGET_NR_shmget
:
6134 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6137 #ifdef TARGET_NR_shmctl
6138 case TARGET_NR_shmctl
:
6139 ret
= do_shmctl(arg1
, arg2
, arg3
);
6142 #ifdef TARGET_NR_shmat
6143 case TARGET_NR_shmat
:
6144 ret
= do_shmat(arg1
, arg2
, arg3
);
6147 #ifdef TARGET_NR_shmdt
6148 case TARGET_NR_shmdt
:
6149 ret
= do_shmdt(arg1
);
6152 case TARGET_NR_fsync
:
6153 ret
= get_errno(fsync(arg1
));
6155 case TARGET_NR_clone
:
6156 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6157 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6158 #elif defined(TARGET_CRIS)
6159 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6160 #elif defined(TARGET_S390X)
6161 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6163 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6166 #ifdef __NR_exit_group
6167 /* new thread calls */
6168 case TARGET_NR_exit_group
:
6172 gdb_exit(cpu_env
, arg1
);
6173 ret
= get_errno(exit_group(arg1
));
6176 case TARGET_NR_setdomainname
:
6177 if (!(p
= lock_user_string(arg1
)))
6179 ret
= get_errno(setdomainname(p
, arg2
));
6180 unlock_user(p
, arg1
, 0);
6182 case TARGET_NR_uname
:
6183 /* no need to transcode because we use the linux syscall */
6185 struct new_utsname
* buf
;
6187 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6189 ret
= get_errno(sys_uname(buf
));
6190 if (!is_error(ret
)) {
6191 /* Overrite the native machine name with whatever is being
6193 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6194 /* Allow the user to override the reported release. */
6195 if (qemu_uname_release
&& *qemu_uname_release
)
6196 strcpy (buf
->release
, qemu_uname_release
);
6198 unlock_user_struct(buf
, arg1
, 1);
6202 case TARGET_NR_modify_ldt
:
6203 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6205 #if !defined(TARGET_X86_64)
6206 case TARGET_NR_vm86old
:
6208 case TARGET_NR_vm86
:
6209 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6213 case TARGET_NR_adjtimex
:
6215 #ifdef TARGET_NR_create_module
6216 case TARGET_NR_create_module
:
6218 case TARGET_NR_init_module
:
6219 case TARGET_NR_delete_module
:
6220 #ifdef TARGET_NR_get_kernel_syms
6221 case TARGET_NR_get_kernel_syms
:
6224 case TARGET_NR_quotactl
:
6226 case TARGET_NR_getpgid
:
6227 ret
= get_errno(getpgid(arg1
));
6229 case TARGET_NR_fchdir
:
6230 ret
= get_errno(fchdir(arg1
));
6232 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6233 case TARGET_NR_bdflush
:
6236 #ifdef TARGET_NR_sysfs
6237 case TARGET_NR_sysfs
:
6240 case TARGET_NR_personality
:
6241 ret
= get_errno(personality(arg1
));
6243 #ifdef TARGET_NR_afs_syscall
6244 case TARGET_NR_afs_syscall
:
6247 #ifdef TARGET_NR__llseek /* Not on alpha */
6248 case TARGET_NR__llseek
:
6251 #if !defined(__NR_llseek)
6252 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6254 ret
= get_errno(res
);
6259 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6261 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6267 case TARGET_NR_getdents
:
6268 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6270 struct target_dirent
*target_dirp
;
6271 struct linux_dirent
*dirp
;
6272 abi_long count
= arg3
;
6274 dirp
= malloc(count
);
6276 ret
= -TARGET_ENOMEM
;
6280 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6281 if (!is_error(ret
)) {
6282 struct linux_dirent
*de
;
6283 struct target_dirent
*tde
;
6285 int reclen
, treclen
;
6286 int count1
, tnamelen
;
6290 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6294 reclen
= de
->d_reclen
;
6295 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6296 tde
->d_reclen
= tswap16(treclen
);
6297 tde
->d_ino
= tswapl(de
->d_ino
);
6298 tde
->d_off
= tswapl(de
->d_off
);
6299 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6302 /* XXX: may not be correct */
6303 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6304 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6306 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6310 unlock_user(target_dirp
, arg2
, ret
);
6316 struct linux_dirent
*dirp
;
6317 abi_long count
= arg3
;
6319 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6321 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6322 if (!is_error(ret
)) {
6323 struct linux_dirent
*de
;
6328 reclen
= de
->d_reclen
;
6331 de
->d_reclen
= tswap16(reclen
);
6332 tswapls(&de
->d_ino
);
6333 tswapls(&de
->d_off
);
6334 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6338 unlock_user(dirp
, arg2
, ret
);
6342 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6343 case TARGET_NR_getdents64
:
6345 struct linux_dirent64
*dirp
;
6346 abi_long count
= arg3
;
6347 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6349 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6350 if (!is_error(ret
)) {
6351 struct linux_dirent64
*de
;
6356 reclen
= de
->d_reclen
;
6359 de
->d_reclen
= tswap16(reclen
);
6360 tswap64s((uint64_t *)&de
->d_ino
);
6361 tswap64s((uint64_t *)&de
->d_off
);
6362 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6366 unlock_user(dirp
, arg2
, ret
);
6369 #endif /* TARGET_NR_getdents64 */
6370 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6372 case TARGET_NR_select
:
6374 case TARGET_NR__newselect
:
6376 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6379 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6380 # ifdef TARGET_NR_poll
6381 case TARGET_NR_poll
:
6383 # ifdef TARGET_NR_ppoll
6384 case TARGET_NR_ppoll
:
6387 struct target_pollfd
*target_pfd
;
6388 unsigned int nfds
= arg2
;
6393 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6397 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6398 for(i
= 0; i
< nfds
; i
++) {
6399 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6400 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6403 # ifdef TARGET_NR_ppoll
6404 if (num
== TARGET_NR_ppoll
) {
6405 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6406 target_sigset_t
*target_set
;
6407 sigset_t _set
, *set
= &_set
;
6410 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6411 unlock_user(target_pfd
, arg1
, 0);
6419 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6421 unlock_user(target_pfd
, arg1
, 0);
6424 target_to_host_sigset(set
, target_set
);
6429 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6431 if (!is_error(ret
) && arg3
) {
6432 host_to_target_timespec(arg3
, timeout_ts
);
6435 unlock_user(target_set
, arg4
, 0);
6439 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6441 if (!is_error(ret
)) {
6442 for(i
= 0; i
< nfds
; i
++) {
6443 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6446 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6450 case TARGET_NR_flock
:
6451 /* NOTE: the flock constant seems to be the same for every
6453 ret
= get_errno(flock(arg1
, arg2
));
6455 case TARGET_NR_readv
:
6460 vec
= alloca(count
* sizeof(struct iovec
));
6461 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6463 ret
= get_errno(readv(arg1
, vec
, count
));
6464 unlock_iovec(vec
, arg2
, count
, 1);
6467 case TARGET_NR_writev
:
6472 vec
= alloca(count
* sizeof(struct iovec
));
6473 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6475 ret
= get_errno(writev(arg1
, vec
, count
));
6476 unlock_iovec(vec
, arg2
, count
, 0);
6479 case TARGET_NR_getsid
:
6480 ret
= get_errno(getsid(arg1
));
6482 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6483 case TARGET_NR_fdatasync
:
6484 ret
= get_errno(fdatasync(arg1
));
6487 case TARGET_NR__sysctl
:
6488 /* We don't implement this, but ENOTDIR is always a safe
6490 ret
= -TARGET_ENOTDIR
;
6492 case TARGET_NR_sched_getaffinity
:
6494 unsigned int mask_size
;
6495 unsigned long *mask
;
6498 * sched_getaffinity needs multiples of ulong, so need to take
6499 * care of mismatches between target ulong and host ulong sizes.
6501 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6502 ret
= -TARGET_EINVAL
;
6505 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6507 mask
= alloca(mask_size
);
6508 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6510 if (!is_error(ret
)) {
6511 if (copy_to_user(arg3
, mask
, ret
)) {
6517 case TARGET_NR_sched_setaffinity
:
6519 unsigned int mask_size
;
6520 unsigned long *mask
;
6523 * sched_setaffinity needs multiples of ulong, so need to take
6524 * care of mismatches between target ulong and host ulong sizes.
6526 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6527 ret
= -TARGET_EINVAL
;
6530 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6532 mask
= alloca(mask_size
);
6533 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6536 memcpy(mask
, p
, arg2
);
6537 unlock_user_struct(p
, arg2
, 0);
6539 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6542 case TARGET_NR_sched_setparam
:
6544 struct sched_param
*target_schp
;
6545 struct sched_param schp
;
6547 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6549 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6550 unlock_user_struct(target_schp
, arg2
, 0);
6551 ret
= get_errno(sched_setparam(arg1
, &schp
));
6554 case TARGET_NR_sched_getparam
:
6556 struct sched_param
*target_schp
;
6557 struct sched_param schp
;
6558 ret
= get_errno(sched_getparam(arg1
, &schp
));
6559 if (!is_error(ret
)) {
6560 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6562 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6563 unlock_user_struct(target_schp
, arg2
, 1);
6567 case TARGET_NR_sched_setscheduler
:
6569 struct sched_param
*target_schp
;
6570 struct sched_param schp
;
6571 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6573 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6574 unlock_user_struct(target_schp
, arg3
, 0);
6575 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6578 case TARGET_NR_sched_getscheduler
:
6579 ret
= get_errno(sched_getscheduler(arg1
));
6581 case TARGET_NR_sched_yield
:
6582 ret
= get_errno(sched_yield());
6584 case TARGET_NR_sched_get_priority_max
:
6585 ret
= get_errno(sched_get_priority_max(arg1
));
6587 case TARGET_NR_sched_get_priority_min
:
6588 ret
= get_errno(sched_get_priority_min(arg1
));
6590 case TARGET_NR_sched_rr_get_interval
:
6593 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6594 if (!is_error(ret
)) {
6595 host_to_target_timespec(arg2
, &ts
);
6599 case TARGET_NR_nanosleep
:
6601 struct timespec req
, rem
;
6602 target_to_host_timespec(&req
, arg1
);
6603 ret
= get_errno(nanosleep(&req
, &rem
));
6604 if (is_error(ret
) && arg2
) {
6605 host_to_target_timespec(arg2
, &rem
);
6609 #ifdef TARGET_NR_query_module
6610 case TARGET_NR_query_module
:
6613 #ifdef TARGET_NR_nfsservctl
6614 case TARGET_NR_nfsservctl
:
6617 case TARGET_NR_prctl
:
6620 case PR_GET_PDEATHSIG
:
6623 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6624 if (!is_error(ret
) && arg2
6625 && put_user_ual(deathsig
, arg2
))
6630 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6634 #ifdef TARGET_NR_arch_prctl
6635 case TARGET_NR_arch_prctl
:
6636 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6637 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6643 #ifdef TARGET_NR_pread
6644 case TARGET_NR_pread
:
6646 if (((CPUARMState
*)cpu_env
)->eabi
)
6649 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6651 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6652 unlock_user(p
, arg2
, ret
);
6654 case TARGET_NR_pwrite
:
6656 if (((CPUARMState
*)cpu_env
)->eabi
)
6659 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6661 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6662 unlock_user(p
, arg2
, 0);
6665 #ifdef TARGET_NR_pread64
6666 case TARGET_NR_pread64
:
6667 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6669 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6670 unlock_user(p
, arg2
, ret
);
6672 case TARGET_NR_pwrite64
:
6673 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6675 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6676 unlock_user(p
, arg2
, 0);
6679 case TARGET_NR_getcwd
:
6680 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6682 ret
= get_errno(sys_getcwd1(p
, arg2
));
6683 unlock_user(p
, arg1
, ret
);
6685 case TARGET_NR_capget
:
6687 case TARGET_NR_capset
:
6689 case TARGET_NR_sigaltstack
:
6690 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6691 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6692 defined(TARGET_M68K) || defined(TARGET_S390X)
6693 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6698 case TARGET_NR_sendfile
:
6700 #ifdef TARGET_NR_getpmsg
6701 case TARGET_NR_getpmsg
:
6704 #ifdef TARGET_NR_putpmsg
6705 case TARGET_NR_putpmsg
:
6708 #ifdef TARGET_NR_vfork
6709 case TARGET_NR_vfork
:
6710 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6714 #ifdef TARGET_NR_ugetrlimit
6715 case TARGET_NR_ugetrlimit
:
6718 ret
= get_errno(getrlimit(arg1
, &rlim
));
6719 if (!is_error(ret
)) {
6720 struct target_rlimit
*target_rlim
;
6721 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6723 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6724 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6725 unlock_user_struct(target_rlim
, arg2
, 1);
6730 #ifdef TARGET_NR_truncate64
6731 case TARGET_NR_truncate64
:
6732 if (!(p
= lock_user_string(arg1
)))
6734 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6735 unlock_user(p
, arg1
, 0);
6738 #ifdef TARGET_NR_ftruncate64
6739 case TARGET_NR_ftruncate64
:
6740 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6743 #ifdef TARGET_NR_stat64
6744 case TARGET_NR_stat64
:
6745 if (!(p
= lock_user_string(arg1
)))
6747 ret
= get_errno(stat(path(p
), &st
));
6748 unlock_user(p
, arg1
, 0);
6750 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6753 #ifdef TARGET_NR_lstat64
6754 case TARGET_NR_lstat64
:
6755 if (!(p
= lock_user_string(arg1
)))
6757 ret
= get_errno(lstat(path(p
), &st
));
6758 unlock_user(p
, arg1
, 0);
6760 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6763 #ifdef TARGET_NR_fstat64
6764 case TARGET_NR_fstat64
:
6765 ret
= get_errno(fstat(arg1
, &st
));
6767 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6770 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6771 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6772 #ifdef TARGET_NR_fstatat64
6773 case TARGET_NR_fstatat64
:
6775 #ifdef TARGET_NR_newfstatat
6776 case TARGET_NR_newfstatat
:
6778 if (!(p
= lock_user_string(arg2
)))
6780 #ifdef __NR_fstatat64
6781 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6783 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6786 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6789 case TARGET_NR_lchown
:
6790 if (!(p
= lock_user_string(arg1
)))
6792 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6793 unlock_user(p
, arg1
, 0);
6795 #ifdef TARGET_NR_getuid
6796 case TARGET_NR_getuid
:
6797 ret
= get_errno(high2lowuid(getuid()));
6800 #ifdef TARGET_NR_getgid
6801 case TARGET_NR_getgid
:
6802 ret
= get_errno(high2lowgid(getgid()));
6805 #ifdef TARGET_NR_geteuid
6806 case TARGET_NR_geteuid
:
6807 ret
= get_errno(high2lowuid(geteuid()));
6810 #ifdef TARGET_NR_getegid
6811 case TARGET_NR_getegid
:
6812 ret
= get_errno(high2lowgid(getegid()));
6815 case TARGET_NR_setreuid
:
6816 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6818 case TARGET_NR_setregid
:
6819 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6821 case TARGET_NR_getgroups
:
6823 int gidsetsize
= arg1
;
6824 target_id
*target_grouplist
;
6828 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6829 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6830 if (gidsetsize
== 0)
6832 if (!is_error(ret
)) {
6833 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6834 if (!target_grouplist
)
6836 for(i
= 0;i
< ret
; i
++)
6837 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
6838 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6842 case TARGET_NR_setgroups
:
6844 int gidsetsize
= arg1
;
6845 target_id
*target_grouplist
;
6849 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6850 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6851 if (!target_grouplist
) {
6852 ret
= -TARGET_EFAULT
;
6855 for(i
= 0;i
< gidsetsize
; i
++)
6856 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
6857 unlock_user(target_grouplist
, arg2
, 0);
6858 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6861 case TARGET_NR_fchown
:
6862 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6864 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6865 case TARGET_NR_fchownat
:
6866 if (!(p
= lock_user_string(arg2
)))
6868 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6869 unlock_user(p
, arg2
, 0);
6872 #ifdef TARGET_NR_setresuid
6873 case TARGET_NR_setresuid
:
6874 ret
= get_errno(setresuid(low2highuid(arg1
),
6876 low2highuid(arg3
)));
6879 #ifdef TARGET_NR_getresuid
6880 case TARGET_NR_getresuid
:
6882 uid_t ruid
, euid
, suid
;
6883 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6884 if (!is_error(ret
)) {
6885 if (put_user_u16(high2lowuid(ruid
), arg1
)
6886 || put_user_u16(high2lowuid(euid
), arg2
)
6887 || put_user_u16(high2lowuid(suid
), arg3
))
6893 #ifdef TARGET_NR_getresgid
6894 case TARGET_NR_setresgid
:
6895 ret
= get_errno(setresgid(low2highgid(arg1
),
6897 low2highgid(arg3
)));
6900 #ifdef TARGET_NR_getresgid
6901 case TARGET_NR_getresgid
:
6903 gid_t rgid
, egid
, sgid
;
6904 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6905 if (!is_error(ret
)) {
6906 if (put_user_u16(high2lowgid(rgid
), arg1
)
6907 || put_user_u16(high2lowgid(egid
), arg2
)
6908 || put_user_u16(high2lowgid(sgid
), arg3
))
6914 case TARGET_NR_chown
:
6915 if (!(p
= lock_user_string(arg1
)))
6917 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6918 unlock_user(p
, arg1
, 0);
6920 case TARGET_NR_setuid
:
6921 ret
= get_errno(setuid(low2highuid(arg1
)));
6923 case TARGET_NR_setgid
:
6924 ret
= get_errno(setgid(low2highgid(arg1
)));
6926 case TARGET_NR_setfsuid
:
6927 ret
= get_errno(setfsuid(arg1
));
6929 case TARGET_NR_setfsgid
:
6930 ret
= get_errno(setfsgid(arg1
));
6933 #ifdef TARGET_NR_lchown32
6934 case TARGET_NR_lchown32
:
6935 if (!(p
= lock_user_string(arg1
)))
6937 ret
= get_errno(lchown(p
, arg2
, arg3
));
6938 unlock_user(p
, arg1
, 0);
6941 #ifdef TARGET_NR_getuid32
6942 case TARGET_NR_getuid32
:
6943 ret
= get_errno(getuid());
6947 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6948 /* Alpha specific */
6949 case TARGET_NR_getxuid
:
6953 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6955 ret
= get_errno(getuid());
6958 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6959 /* Alpha specific */
6960 case TARGET_NR_getxgid
:
6964 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6966 ret
= get_errno(getgid());
6969 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6970 /* Alpha specific */
6971 case TARGET_NR_osf_getsysinfo
:
6972 ret
= -TARGET_EOPNOTSUPP
;
6974 case TARGET_GSI_IEEE_FP_CONTROL
:
6976 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6978 /* Copied from linux ieee_fpcr_to_swcr. */
6979 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6980 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6981 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6982 | SWCR_TRAP_ENABLE_DZE
6983 | SWCR_TRAP_ENABLE_OVF
);
6984 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6985 | SWCR_TRAP_ENABLE_INE
);
6986 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6987 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6989 if (put_user_u64 (swcr
, arg2
))
6995 /* case GSI_IEEE_STATE_AT_SIGNAL:
6996 -- Not implemented in linux kernel.
6998 -- Retrieves current unaligned access state; not much used.
7000 -- Retrieves implver information; surely not used.
7002 -- Grabs a copy of the HWRPB; surely not used.
7007 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7008 /* Alpha specific */
7009 case TARGET_NR_osf_setsysinfo
:
7010 ret
= -TARGET_EOPNOTSUPP
;
7012 case TARGET_SSI_IEEE_FP_CONTROL
:
7013 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7015 uint64_t swcr
, fpcr
, orig_fpcr
;
7017 if (get_user_u64 (swcr
, arg2
))
7019 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7020 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7022 /* Copied from linux ieee_swcr_to_fpcr. */
7023 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7024 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7025 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7026 | SWCR_TRAP_ENABLE_DZE
7027 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7028 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7029 | SWCR_TRAP_ENABLE_INE
)) << 57;
7030 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7031 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7033 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7036 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7037 /* Old exceptions are not signaled. */
7038 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7040 /* If any exceptions set by this call, and are unmasked,
7047 /* case SSI_NVPAIRS:
7048 -- Used with SSIN_UACPROC to enable unaligned accesses.
7049 case SSI_IEEE_STATE_AT_SIGNAL:
7050 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7051 -- Not implemented in linux kernel
7056 #ifdef TARGET_NR_osf_sigprocmask
7057 /* Alpha specific. */
7058 case TARGET_NR_osf_sigprocmask
:
7062 sigset_t set
, oldset
;
7065 case TARGET_SIG_BLOCK
:
7068 case TARGET_SIG_UNBLOCK
:
7071 case TARGET_SIG_SETMASK
:
7075 ret
= -TARGET_EINVAL
;
7079 target_to_host_old_sigset(&set
, &mask
);
7080 sigprocmask(arg1
, &set
, &oldset
);
7081 host_to_target_old_sigset(&mask
, &oldset
);
7087 #ifdef TARGET_NR_getgid32
7088 case TARGET_NR_getgid32
:
7089 ret
= get_errno(getgid());
7092 #ifdef TARGET_NR_geteuid32
7093 case TARGET_NR_geteuid32
:
7094 ret
= get_errno(geteuid());
7097 #ifdef TARGET_NR_getegid32
7098 case TARGET_NR_getegid32
:
7099 ret
= get_errno(getegid());
7102 #ifdef TARGET_NR_setreuid32
7103 case TARGET_NR_setreuid32
:
7104 ret
= get_errno(setreuid(arg1
, arg2
));
7107 #ifdef TARGET_NR_setregid32
7108 case TARGET_NR_setregid32
:
7109 ret
= get_errno(setregid(arg1
, arg2
));
7112 #ifdef TARGET_NR_getgroups32
7113 case TARGET_NR_getgroups32
:
7115 int gidsetsize
= arg1
;
7116 uint32_t *target_grouplist
;
7120 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7121 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7122 if (gidsetsize
== 0)
7124 if (!is_error(ret
)) {
7125 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7126 if (!target_grouplist
) {
7127 ret
= -TARGET_EFAULT
;
7130 for(i
= 0;i
< ret
; i
++)
7131 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7132 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7137 #ifdef TARGET_NR_setgroups32
7138 case TARGET_NR_setgroups32
:
7140 int gidsetsize
= arg1
;
7141 uint32_t *target_grouplist
;
7145 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7146 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7147 if (!target_grouplist
) {
7148 ret
= -TARGET_EFAULT
;
7151 for(i
= 0;i
< gidsetsize
; i
++)
7152 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7153 unlock_user(target_grouplist
, arg2
, 0);
7154 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7158 #ifdef TARGET_NR_fchown32
7159 case TARGET_NR_fchown32
:
7160 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7163 #ifdef TARGET_NR_setresuid32
7164 case TARGET_NR_setresuid32
:
7165 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7168 #ifdef TARGET_NR_getresuid32
7169 case TARGET_NR_getresuid32
:
7171 uid_t ruid
, euid
, suid
;
7172 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7173 if (!is_error(ret
)) {
7174 if (put_user_u32(ruid
, arg1
)
7175 || put_user_u32(euid
, arg2
)
7176 || put_user_u32(suid
, arg3
))
7182 #ifdef TARGET_NR_setresgid32
7183 case TARGET_NR_setresgid32
:
7184 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7187 #ifdef TARGET_NR_getresgid32
7188 case TARGET_NR_getresgid32
:
7190 gid_t rgid
, egid
, sgid
;
7191 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7192 if (!is_error(ret
)) {
7193 if (put_user_u32(rgid
, arg1
)
7194 || put_user_u32(egid
, arg2
)
7195 || put_user_u32(sgid
, arg3
))
7201 #ifdef TARGET_NR_chown32
7202 case TARGET_NR_chown32
:
7203 if (!(p
= lock_user_string(arg1
)))
7205 ret
= get_errno(chown(p
, arg2
, arg3
));
7206 unlock_user(p
, arg1
, 0);
7209 #ifdef TARGET_NR_setuid32
7210 case TARGET_NR_setuid32
:
7211 ret
= get_errno(setuid(arg1
));
7214 #ifdef TARGET_NR_setgid32
7215 case TARGET_NR_setgid32
:
7216 ret
= get_errno(setgid(arg1
));
7219 #ifdef TARGET_NR_setfsuid32
7220 case TARGET_NR_setfsuid32
:
7221 ret
= get_errno(setfsuid(arg1
));
7224 #ifdef TARGET_NR_setfsgid32
7225 case TARGET_NR_setfsgid32
:
7226 ret
= get_errno(setfsgid(arg1
));
7230 case TARGET_NR_pivot_root
:
7232 #ifdef TARGET_NR_mincore
7233 case TARGET_NR_mincore
:
7236 ret
= -TARGET_EFAULT
;
7237 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7239 if (!(p
= lock_user_string(arg3
)))
7241 ret
= get_errno(mincore(a
, arg2
, p
));
7242 unlock_user(p
, arg3
, ret
);
7244 unlock_user(a
, arg1
, 0);
7248 #ifdef TARGET_NR_arm_fadvise64_64
7249 case TARGET_NR_arm_fadvise64_64
:
7252 * arm_fadvise64_64 looks like fadvise64_64 but
7253 * with different argument order
7261 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7262 #ifdef TARGET_NR_fadvise64_64
7263 case TARGET_NR_fadvise64_64
:
7265 #ifdef TARGET_NR_fadvise64
7266 case TARGET_NR_fadvise64
:
7270 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7271 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7272 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7273 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7277 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7280 #ifdef TARGET_NR_madvise
7281 case TARGET_NR_madvise
:
7282 /* A straight passthrough may not be safe because qemu sometimes
7283 turns private flie-backed mappings into anonymous mappings.
7284 This will break MADV_DONTNEED.
7285 This is a hint, so ignoring and returning success is ok. */
7289 #if TARGET_ABI_BITS == 32
7290 case TARGET_NR_fcntl64
:
7294 struct target_flock64
*target_fl
;
7296 struct target_eabi_flock64
*target_efl
;
7299 cmd
= target_to_host_fcntl_cmd(arg2
);
7300 if (cmd
== -TARGET_EINVAL
)
7304 case TARGET_F_GETLK64
:
7306 if (((CPUARMState
*)cpu_env
)->eabi
) {
7307 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7309 fl
.l_type
= tswap16(target_efl
->l_type
);
7310 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7311 fl
.l_start
= tswap64(target_efl
->l_start
);
7312 fl
.l_len
= tswap64(target_efl
->l_len
);
7313 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7314 unlock_user_struct(target_efl
, arg3
, 0);
7318 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7320 fl
.l_type
= tswap16(target_fl
->l_type
);
7321 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7322 fl
.l_start
= tswap64(target_fl
->l_start
);
7323 fl
.l_len
= tswap64(target_fl
->l_len
);
7324 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7325 unlock_user_struct(target_fl
, arg3
, 0);
7327 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7330 if (((CPUARMState
*)cpu_env
)->eabi
) {
7331 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7333 target_efl
->l_type
= tswap16(fl
.l_type
);
7334 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7335 target_efl
->l_start
= tswap64(fl
.l_start
);
7336 target_efl
->l_len
= tswap64(fl
.l_len
);
7337 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7338 unlock_user_struct(target_efl
, arg3
, 1);
7342 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7344 target_fl
->l_type
= tswap16(fl
.l_type
);
7345 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7346 target_fl
->l_start
= tswap64(fl
.l_start
);
7347 target_fl
->l_len
= tswap64(fl
.l_len
);
7348 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7349 unlock_user_struct(target_fl
, arg3
, 1);
7354 case TARGET_F_SETLK64
:
7355 case TARGET_F_SETLKW64
:
7357 if (((CPUARMState
*)cpu_env
)->eabi
) {
7358 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7360 fl
.l_type
= tswap16(target_efl
->l_type
);
7361 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7362 fl
.l_start
= tswap64(target_efl
->l_start
);
7363 fl
.l_len
= tswap64(target_efl
->l_len
);
7364 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7365 unlock_user_struct(target_efl
, arg3
, 0);
7369 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7371 fl
.l_type
= tswap16(target_fl
->l_type
);
7372 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7373 fl
.l_start
= tswap64(target_fl
->l_start
);
7374 fl
.l_len
= tswap64(target_fl
->l_len
);
7375 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7376 unlock_user_struct(target_fl
, arg3
, 0);
7378 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7381 ret
= do_fcntl(arg1
, arg2
, arg3
);
7387 #ifdef TARGET_NR_cacheflush
7388 case TARGET_NR_cacheflush
:
7389 /* self-modifying code is handled automatically, so nothing needed */
7393 #ifdef TARGET_NR_security
7394 case TARGET_NR_security
:
7397 #ifdef TARGET_NR_getpagesize
7398 case TARGET_NR_getpagesize
:
7399 ret
= TARGET_PAGE_SIZE
;
7402 case TARGET_NR_gettid
:
7403 ret
= get_errno(gettid());
7405 #ifdef TARGET_NR_readahead
7406 case TARGET_NR_readahead
:
7407 #if TARGET_ABI_BITS == 32
7409 if (((CPUARMState
*)cpu_env
)->eabi
)
7416 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7418 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7422 #ifdef TARGET_NR_setxattr
7423 case TARGET_NR_setxattr
:
7424 case TARGET_NR_lsetxattr
:
7425 case TARGET_NR_fsetxattr
:
7426 case TARGET_NR_getxattr
:
7427 case TARGET_NR_lgetxattr
:
7428 case TARGET_NR_fgetxattr
:
7429 case TARGET_NR_listxattr
:
7430 case TARGET_NR_llistxattr
:
7431 case TARGET_NR_flistxattr
:
7432 case TARGET_NR_removexattr
:
7433 case TARGET_NR_lremovexattr
:
7434 case TARGET_NR_fremovexattr
:
7435 ret
= -TARGET_EOPNOTSUPP
;
7438 #ifdef TARGET_NR_set_thread_area
7439 case TARGET_NR_set_thread_area
:
7440 #if defined(TARGET_MIPS)
7441 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7444 #elif defined(TARGET_CRIS)
7446 ret
= -TARGET_EINVAL
;
7448 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7452 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7453 ret
= do_set_thread_area(cpu_env
, arg1
);
7456 goto unimplemented_nowarn
;
7459 #ifdef TARGET_NR_get_thread_area
7460 case TARGET_NR_get_thread_area
:
7461 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7462 ret
= do_get_thread_area(cpu_env
, arg1
);
7464 goto unimplemented_nowarn
;
7467 #ifdef TARGET_NR_getdomainname
7468 case TARGET_NR_getdomainname
:
7469 goto unimplemented_nowarn
;
7472 #ifdef TARGET_NR_clock_gettime
7473 case TARGET_NR_clock_gettime
:
7476 ret
= get_errno(clock_gettime(arg1
, &ts
));
7477 if (!is_error(ret
)) {
7478 host_to_target_timespec(arg2
, &ts
);
7483 #ifdef TARGET_NR_clock_getres
7484 case TARGET_NR_clock_getres
:
7487 ret
= get_errno(clock_getres(arg1
, &ts
));
7488 if (!is_error(ret
)) {
7489 host_to_target_timespec(arg2
, &ts
);
7494 #ifdef TARGET_NR_clock_nanosleep
7495 case TARGET_NR_clock_nanosleep
:
7498 target_to_host_timespec(&ts
, arg3
);
7499 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7501 host_to_target_timespec(arg4
, &ts
);
7506 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7507 case TARGET_NR_set_tid_address
:
7508 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7512 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7513 case TARGET_NR_tkill
:
7514 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7518 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7519 case TARGET_NR_tgkill
:
7520 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7521 target_to_host_signal(arg3
)));
7525 #ifdef TARGET_NR_set_robust_list
7526 case TARGET_NR_set_robust_list
:
7527 goto unimplemented_nowarn
;
7530 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7531 case TARGET_NR_utimensat
:
7533 struct timespec
*tsp
, ts
[2];
7537 target_to_host_timespec(ts
, arg3
);
7538 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7542 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7544 if (!(p
= lock_user_string(arg2
))) {
7545 ret
= -TARGET_EFAULT
;
7548 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7549 unlock_user(p
, arg2
, 0);
7554 #if defined(CONFIG_USE_NPTL)
7555 case TARGET_NR_futex
:
7556 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7559 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7560 case TARGET_NR_inotify_init
:
7561 ret
= get_errno(sys_inotify_init());
7564 #ifdef CONFIG_INOTIFY1
7565 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7566 case TARGET_NR_inotify_init1
:
7567 ret
= get_errno(sys_inotify_init1(arg1
));
7571 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7572 case TARGET_NR_inotify_add_watch
:
7573 p
= lock_user_string(arg2
);
7574 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7575 unlock_user(p
, arg2
, 0);
7578 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7579 case TARGET_NR_inotify_rm_watch
:
7580 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7584 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7585 case TARGET_NR_mq_open
:
7587 struct mq_attr posix_mq_attr
;
7589 p
= lock_user_string(arg1
- 1);
7591 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7592 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7593 unlock_user (p
, arg1
, 0);
7597 case TARGET_NR_mq_unlink
:
7598 p
= lock_user_string(arg1
- 1);
7599 ret
= get_errno(mq_unlink(p
));
7600 unlock_user (p
, arg1
, 0);
7603 case TARGET_NR_mq_timedsend
:
7607 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7609 target_to_host_timespec(&ts
, arg5
);
7610 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7611 host_to_target_timespec(arg5
, &ts
);
7614 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7615 unlock_user (p
, arg2
, arg3
);
7619 case TARGET_NR_mq_timedreceive
:
7624 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7626 target_to_host_timespec(&ts
, arg5
);
7627 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7628 host_to_target_timespec(arg5
, &ts
);
7631 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7632 unlock_user (p
, arg2
, arg3
);
7634 put_user_u32(prio
, arg4
);
7638 /* Not implemented for now... */
7639 /* case TARGET_NR_mq_notify: */
7642 case TARGET_NR_mq_getsetattr
:
7644 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7647 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7648 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7651 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7652 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7659 #ifdef CONFIG_SPLICE
7660 #ifdef TARGET_NR_tee
7663 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7667 #ifdef TARGET_NR_splice
7668 case TARGET_NR_splice
:
7670 loff_t loff_in
, loff_out
;
7671 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7673 get_user_u64(loff_in
, arg2
);
7674 ploff_in
= &loff_in
;
7677 get_user_u64(loff_out
, arg2
);
7678 ploff_out
= &loff_out
;
7680 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7684 #ifdef TARGET_NR_vmsplice
7685 case TARGET_NR_vmsplice
:
7690 vec
= alloca(count
* sizeof(struct iovec
));
7691 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7693 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7694 unlock_iovec(vec
, arg2
, count
, 0);
7698 #endif /* CONFIG_SPLICE */
7699 #ifdef CONFIG_EVENTFD
7700 #if defined(TARGET_NR_eventfd)
7701 case TARGET_NR_eventfd
:
7702 ret
= get_errno(eventfd(arg1
, 0));
7705 #if defined(TARGET_NR_eventfd2)
7706 case TARGET_NR_eventfd2
:
7707 ret
= get_errno(eventfd(arg1
, arg2
));
7710 #endif /* CONFIG_EVENTFD */
7711 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7712 case TARGET_NR_fallocate
:
7713 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7716 #if defined(CONFIG_SYNC_FILE_RANGE)
7717 #if defined(TARGET_NR_sync_file_range)
7718 case TARGET_NR_sync_file_range
:
7719 #if TARGET_ABI_BITS == 32
7720 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
7721 target_offset64(arg4
, arg5
), arg6
));
7723 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
7727 #if defined(TARGET_NR_sync_file_range2)
7728 case TARGET_NR_sync_file_range2
:
7729 /* This is like sync_file_range but the arguments are reordered */
7730 #if TARGET_ABI_BITS == 32
7731 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
7732 target_offset64(arg5
, arg6
), arg2
));
7734 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
7739 #if defined(CONFIG_EPOLL)
7740 #if defined(TARGET_NR_epoll_create)
7741 case TARGET_NR_epoll_create
:
7742 ret
= get_errno(epoll_create(arg1
));
7745 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
7746 case TARGET_NR_epoll_create1
:
7747 ret
= get_errno(epoll_create1(arg1
));
7750 #if defined(TARGET_NR_epoll_ctl)
7751 case TARGET_NR_epoll_ctl
:
7753 struct epoll_event ep
;
7754 struct epoll_event
*epp
= 0;
7756 struct target_epoll_event
*target_ep
;
7757 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
7760 ep
.events
= tswap32(target_ep
->events
);
7761 /* The epoll_data_t union is just opaque data to the kernel,
7762 * so we transfer all 64 bits across and need not worry what
7763 * actual data type it is.
7765 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
7766 unlock_user_struct(target_ep
, arg4
, 0);
7769 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
7774 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
7775 #define IMPLEMENT_EPOLL_PWAIT
7777 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
7778 #if defined(TARGET_NR_epoll_wait)
7779 case TARGET_NR_epoll_wait
:
7781 #if defined(IMPLEMENT_EPOLL_PWAIT)
7782 case TARGET_NR_epoll_pwait
:
7785 struct target_epoll_event
*target_ep
;
7786 struct epoll_event
*ep
;
7788 int maxevents
= arg3
;
7791 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
7792 maxevents
* sizeof(struct target_epoll_event
), 1);
7797 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
7800 #if defined(IMPLEMENT_EPOLL_PWAIT)
7801 case TARGET_NR_epoll_pwait
:
7803 target_sigset_t
*target_set
;
7804 sigset_t _set
, *set
= &_set
;
7807 target_set
= lock_user(VERIFY_READ
, arg5
,
7808 sizeof(target_sigset_t
), 1);
7810 unlock_user(target_ep
, arg2
, 0);
7813 target_to_host_sigset(set
, target_set
);
7814 unlock_user(target_set
, arg5
, 0);
7819 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
7823 #if defined(TARGET_NR_epoll_wait)
7824 case TARGET_NR_epoll_wait
:
7825 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
7829 ret
= -TARGET_ENOSYS
;
7831 if (!is_error(ret
)) {
7833 for (i
= 0; i
< ret
; i
++) {
7834 target_ep
[i
].events
= tswap32(ep
[i
].events
);
7835 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
7838 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
7845 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7846 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7847 unimplemented_nowarn
:
7849 ret
= -TARGET_ENOSYS
;
7854 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7857 print_syscall_ret(num
, ret
);
7860 ret
= -TARGET_EFAULT
;