qapi: dealloc visitor, fix premature free and iteration logic
[qemu/mdroth.git] / linux-user / syscall.c
blob7735008d6aa8b2d4dd12b05ce7f8c51530d39e61
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <elf.h>
25 #include <endian.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 #include <time.h>
30 #include <limits.h>
31 #include <sys/types.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/time.h>
36 #include <sys/stat.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
40 #include <sys/mman.h>
41 #include <sys/swap.h>
42 #include <signal.h>
43 #include <sched.h>
44 #ifdef __ia64__
45 int __clone2(int (*fn)(void *), void *child_stack_base,
46 size_t stack_size, int flags, void *arg, ...);
47 #endif
48 #include <sys/socket.h>
49 #include <sys/un.h>
50 #include <sys/uio.h>
51 #include <sys/poll.h>
52 #include <sys/times.h>
53 #include <sys/shm.h>
54 #include <sys/sem.h>
55 #include <sys/statfs.h>
56 #include <utime.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include <attr/xattr.h>
75 #endif
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
101 #include "qemu.h"
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
106 #else
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
109 #endif
111 //#define DEBUG
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
118 #undef _syscall0
119 #undef _syscall1
120 #undef _syscall2
121 #undef _syscall3
122 #undef _syscall4
123 #undef _syscall5
124 #undef _syscall6
126 #define _syscall0(type,name) \
127 static type name (void) \
129 return syscall(__NR_##name); \
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
135 return syscall(__NR_##name, arg1); \
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
141 return syscall(__NR_##name, arg1, arg2); \
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
157 type5,arg5) \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
167 type6 arg6) \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
203 defined(__s390x__)
204 #define __NR__llseek __NR_lseek
205 #endif
207 #ifdef __NR_gettid
208 _syscall0(int, gettid)
209 #else
210 /* This is a replacement for the host gettid() and must return a host
211 errno. */
212 static int gettid(void) {
213 return -ENOSYS;
215 #endif
216 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
219 #endif
220 _syscall2(int, sys_getpriority, int, which, int, who);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
229 #endif
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill,int,tid,int,sig)
232 #endif
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group,int,error_code)
235 #endif
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address,int *,tidptr)
238 #endif
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
242 const struct timespec *,timeout,int *,uaddr2,int,val3)
243 #endif
244 #endif
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
247 unsigned long *, user_mask_ptr);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
250 unsigned long *, user_mask_ptr);
252 static bitmask_transtbl fcntl_flags_tbl[] = {
253 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
254 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
255 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
256 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
257 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
258 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
259 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
260 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
261 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
262 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
263 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
264 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
265 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
266 #if defined(O_DIRECT)
267 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
268 #endif
269 { 0, 0, 0, 0 }
272 #define COPY_UTSNAME_FIELD(dest, src) \
273 do { \
274 /* __NEW_UTS_LEN doesn't include terminating null */ \
275 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
276 (dest)[__NEW_UTS_LEN] = '\0'; \
277 } while (0)
279 static int sys_uname(struct new_utsname *buf)
281 struct utsname uts_buf;
283 if (uname(&uts_buf) < 0)
284 return (-1);
287 * Just in case these have some differences, we
288 * translate utsname to new_utsname (which is the
289 * struct linux kernel uses).
292 memset(buf, 0, sizeof(*buf));
293 COPY_UTSNAME_FIELD(buf->sysname, uts_buf.sysname);
294 COPY_UTSNAME_FIELD(buf->nodename, uts_buf.nodename);
295 COPY_UTSNAME_FIELD(buf->release, uts_buf.release);
296 COPY_UTSNAME_FIELD(buf->version, uts_buf.version);
297 COPY_UTSNAME_FIELD(buf->machine, uts_buf.machine);
298 #ifdef _GNU_SOURCE
299 COPY_UTSNAME_FIELD(buf->domainname, uts_buf.domainname);
300 #endif
301 return (0);
303 #undef COPY_UTSNAME_FIELD
306 static int sys_getcwd1(char *buf, size_t size)
308 if (getcwd(buf, size) == NULL) {
309 /* getcwd() sets errno */
310 return (-1);
312 return strlen(buf)+1;
315 #ifdef CONFIG_ATFILE
317 * Host system seems to have atfile syscall stubs available. We
318 * now enable them one by one as specified by target syscall_nr.h.
321 #ifdef TARGET_NR_faccessat
322 static int sys_faccessat(int dirfd, const char *pathname, int mode)
324 return (faccessat(dirfd, pathname, mode, 0));
326 #endif
327 #ifdef TARGET_NR_fchmodat
328 static int sys_fchmodat(int dirfd, const char *pathname, mode_t mode)
330 return (fchmodat(dirfd, pathname, mode, 0));
332 #endif
333 #if defined(TARGET_NR_fchownat)
334 static int sys_fchownat(int dirfd, const char *pathname, uid_t owner,
335 gid_t group, int flags)
337 return (fchownat(dirfd, pathname, owner, group, flags));
339 #endif
340 #ifdef __NR_fstatat64
341 static int sys_fstatat64(int dirfd, const char *pathname, struct stat *buf,
342 int flags)
344 return (fstatat(dirfd, pathname, buf, flags));
346 #endif
347 #ifdef __NR_newfstatat
348 static int sys_newfstatat(int dirfd, const char *pathname, struct stat *buf,
349 int flags)
351 return (fstatat(dirfd, pathname, buf, flags));
353 #endif
354 #ifdef TARGET_NR_futimesat
355 static int sys_futimesat(int dirfd, const char *pathname,
356 const struct timeval times[2])
358 return (futimesat(dirfd, pathname, times));
360 #endif
361 #ifdef TARGET_NR_linkat
362 static int sys_linkat(int olddirfd, const char *oldpath,
363 int newdirfd, const char *newpath, int flags)
365 return (linkat(olddirfd, oldpath, newdirfd, newpath, flags));
367 #endif
368 #ifdef TARGET_NR_mkdirat
369 static int sys_mkdirat(int dirfd, const char *pathname, mode_t mode)
371 return (mkdirat(dirfd, pathname, mode));
373 #endif
374 #ifdef TARGET_NR_mknodat
375 static int sys_mknodat(int dirfd, const char *pathname, mode_t mode,
376 dev_t dev)
378 return (mknodat(dirfd, pathname, mode, dev));
380 #endif
381 #ifdef TARGET_NR_openat
382 static int sys_openat(int dirfd, const char *pathname, int flags, ...)
385 * open(2) has extra parameter 'mode' when called with
386 * flag O_CREAT.
388 if ((flags & O_CREAT) != 0) {
389 va_list ap;
390 mode_t mode;
393 * Get the 'mode' parameter and translate it to
394 * host bits.
396 va_start(ap, flags);
397 mode = va_arg(ap, mode_t);
398 mode = target_to_host_bitmask(mode, fcntl_flags_tbl);
399 va_end(ap);
401 return (openat(dirfd, pathname, flags, mode));
403 return (openat(dirfd, pathname, flags));
405 #endif
406 #ifdef TARGET_NR_readlinkat
407 static int sys_readlinkat(int dirfd, const char *pathname, char *buf, size_t bufsiz)
409 return (readlinkat(dirfd, pathname, buf, bufsiz));
411 #endif
412 #ifdef TARGET_NR_renameat
413 static int sys_renameat(int olddirfd, const char *oldpath,
414 int newdirfd, const char *newpath)
416 return (renameat(olddirfd, oldpath, newdirfd, newpath));
418 #endif
419 #ifdef TARGET_NR_symlinkat
420 static int sys_symlinkat(const char *oldpath, int newdirfd, const char *newpath)
422 return (symlinkat(oldpath, newdirfd, newpath));
424 #endif
425 #ifdef TARGET_NR_unlinkat
426 static int sys_unlinkat(int dirfd, const char *pathname, int flags)
428 return (unlinkat(dirfd, pathname, flags));
430 #endif
431 #else /* !CONFIG_ATFILE */
434 * Try direct syscalls instead
436 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
437 _syscall3(int,sys_faccessat,int,dirfd,const char *,pathname,int,mode)
438 #endif
439 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
440 _syscall3(int,sys_fchmodat,int,dirfd,const char *,pathname, mode_t,mode)
441 #endif
442 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
443 _syscall5(int,sys_fchownat,int,dirfd,const char *,pathname,
444 uid_t,owner,gid_t,group,int,flags)
445 #endif
446 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
447 defined(__NR_fstatat64)
448 _syscall4(int,sys_fstatat64,int,dirfd,const char *,pathname,
449 struct stat *,buf,int,flags)
450 #endif
451 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
452 _syscall3(int,sys_futimesat,int,dirfd,const char *,pathname,
453 const struct timeval *,times)
454 #endif
455 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
456 defined(__NR_newfstatat)
457 _syscall4(int,sys_newfstatat,int,dirfd,const char *,pathname,
458 struct stat *,buf,int,flags)
459 #endif
460 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
461 _syscall5(int,sys_linkat,int,olddirfd,const char *,oldpath,
462 int,newdirfd,const char *,newpath,int,flags)
463 #endif
464 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
465 _syscall3(int,sys_mkdirat,int,dirfd,const char *,pathname,mode_t,mode)
466 #endif
467 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
468 _syscall4(int,sys_mknodat,int,dirfd,const char *,pathname,
469 mode_t,mode,dev_t,dev)
470 #endif
471 #if defined(TARGET_NR_openat) && defined(__NR_openat)
472 _syscall4(int,sys_openat,int,dirfd,const char *,pathname,int,flags,mode_t,mode)
473 #endif
474 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
475 _syscall4(int,sys_readlinkat,int,dirfd,const char *,pathname,
476 char *,buf,size_t,bufsize)
477 #endif
478 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
479 _syscall4(int,sys_renameat,int,olddirfd,const char *,oldpath,
480 int,newdirfd,const char *,newpath)
481 #endif
482 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
483 _syscall3(int,sys_symlinkat,const char *,oldpath,
484 int,newdirfd,const char *,newpath)
485 #endif
486 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
487 _syscall3(int,sys_unlinkat,int,dirfd,const char *,pathname,int,flags)
488 #endif
490 #endif /* CONFIG_ATFILE */
492 #ifdef CONFIG_UTIMENSAT
493 static int sys_utimensat(int dirfd, const char *pathname,
494 const struct timespec times[2], int flags)
496 if (pathname == NULL)
497 return futimens(dirfd, times);
498 else
499 return utimensat(dirfd, pathname, times, flags);
501 #else
502 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
503 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
504 const struct timespec *,tsp,int,flags)
505 #endif
506 #endif /* CONFIG_UTIMENSAT */
508 #ifdef CONFIG_INOTIFY
509 #include <sys/inotify.h>
511 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
512 static int sys_inotify_init(void)
514 return (inotify_init());
516 #endif
517 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
518 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
520 return (inotify_add_watch(fd, pathname, mask));
522 #endif
523 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
524 static int sys_inotify_rm_watch(int fd, int32_t wd)
526 return (inotify_rm_watch(fd, wd));
528 #endif
529 #ifdef CONFIG_INOTIFY1
530 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
531 static int sys_inotify_init1(int flags)
533 return (inotify_init1(flags));
535 #endif
536 #endif
537 #else
538 /* Userspace can usually survive runtime without inotify */
539 #undef TARGET_NR_inotify_init
540 #undef TARGET_NR_inotify_init1
541 #undef TARGET_NR_inotify_add_watch
542 #undef TARGET_NR_inotify_rm_watch
543 #endif /* CONFIG_INOTIFY */
545 #if defined(TARGET_NR_ppoll)
546 #ifndef __NR_ppoll
547 # define __NR_ppoll -1
548 #endif
549 #define __NR_sys_ppoll __NR_ppoll
550 _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
551 struct timespec *, timeout, const __sigset_t *, sigmask,
552 size_t, sigsetsize)
553 #endif
555 #if defined(TARGET_NR_pselect6)
556 #ifndef __NR_pselect6
557 # define __NR_pselect6 -1
558 #endif
559 #define __NR_sys_pselect6 __NR_pselect6
560 _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
561 fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
562 #endif
564 #if defined(TARGET_NR_prlimit64)
565 #ifndef __NR_prlimit64
566 # define __NR_prlimit64 -1
567 #endif
568 #define __NR_sys_prlimit64 __NR_prlimit64
569 /* The glibc rlimit structure may not be that used by the underlying syscall */
570 struct host_rlimit64 {
571 uint64_t rlim_cur;
572 uint64_t rlim_max;
574 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
575 const struct host_rlimit64 *, new_limit,
576 struct host_rlimit64 *, old_limit)
577 #endif
579 extern int personality(int);
580 extern int flock(int, int);
581 extern int setfsuid(int);
582 extern int setfsgid(int);
583 extern int setgroups(int, gid_t *);
585 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
586 #ifdef TARGET_ARM
587 static inline int regpairs_aligned(void *cpu_env) {
588 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
590 #elif defined(TARGET_MIPS)
591 static inline int regpairs_aligned(void *cpu_env) { return 1; }
592 #else
593 static inline int regpairs_aligned(void *cpu_env) { return 0; }
594 #endif
596 #define ERRNO_TABLE_SIZE 1200
598 /* target_to_host_errno_table[] is initialized from
599 * host_to_target_errno_table[] in syscall_init(). */
600 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
604 * This list is the union of errno values overridden in asm-<arch>/errno.h
605 * minus the errnos that are not actually generic to all archs.
607 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
608 [EIDRM] = TARGET_EIDRM,
609 [ECHRNG] = TARGET_ECHRNG,
610 [EL2NSYNC] = TARGET_EL2NSYNC,
611 [EL3HLT] = TARGET_EL3HLT,
612 [EL3RST] = TARGET_EL3RST,
613 [ELNRNG] = TARGET_ELNRNG,
614 [EUNATCH] = TARGET_EUNATCH,
615 [ENOCSI] = TARGET_ENOCSI,
616 [EL2HLT] = TARGET_EL2HLT,
617 [EDEADLK] = TARGET_EDEADLK,
618 [ENOLCK] = TARGET_ENOLCK,
619 [EBADE] = TARGET_EBADE,
620 [EBADR] = TARGET_EBADR,
621 [EXFULL] = TARGET_EXFULL,
622 [ENOANO] = TARGET_ENOANO,
623 [EBADRQC] = TARGET_EBADRQC,
624 [EBADSLT] = TARGET_EBADSLT,
625 [EBFONT] = TARGET_EBFONT,
626 [ENOSTR] = TARGET_ENOSTR,
627 [ENODATA] = TARGET_ENODATA,
628 [ETIME] = TARGET_ETIME,
629 [ENOSR] = TARGET_ENOSR,
630 [ENONET] = TARGET_ENONET,
631 [ENOPKG] = TARGET_ENOPKG,
632 [EREMOTE] = TARGET_EREMOTE,
633 [ENOLINK] = TARGET_ENOLINK,
634 [EADV] = TARGET_EADV,
635 [ESRMNT] = TARGET_ESRMNT,
636 [ECOMM] = TARGET_ECOMM,
637 [EPROTO] = TARGET_EPROTO,
638 [EDOTDOT] = TARGET_EDOTDOT,
639 [EMULTIHOP] = TARGET_EMULTIHOP,
640 [EBADMSG] = TARGET_EBADMSG,
641 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
642 [EOVERFLOW] = TARGET_EOVERFLOW,
643 [ENOTUNIQ] = TARGET_ENOTUNIQ,
644 [EBADFD] = TARGET_EBADFD,
645 [EREMCHG] = TARGET_EREMCHG,
646 [ELIBACC] = TARGET_ELIBACC,
647 [ELIBBAD] = TARGET_ELIBBAD,
648 [ELIBSCN] = TARGET_ELIBSCN,
649 [ELIBMAX] = TARGET_ELIBMAX,
650 [ELIBEXEC] = TARGET_ELIBEXEC,
651 [EILSEQ] = TARGET_EILSEQ,
652 [ENOSYS] = TARGET_ENOSYS,
653 [ELOOP] = TARGET_ELOOP,
654 [ERESTART] = TARGET_ERESTART,
655 [ESTRPIPE] = TARGET_ESTRPIPE,
656 [ENOTEMPTY] = TARGET_ENOTEMPTY,
657 [EUSERS] = TARGET_EUSERS,
658 [ENOTSOCK] = TARGET_ENOTSOCK,
659 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
660 [EMSGSIZE] = TARGET_EMSGSIZE,
661 [EPROTOTYPE] = TARGET_EPROTOTYPE,
662 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
663 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
664 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
665 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
666 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
667 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
668 [EADDRINUSE] = TARGET_EADDRINUSE,
669 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
670 [ENETDOWN] = TARGET_ENETDOWN,
671 [ENETUNREACH] = TARGET_ENETUNREACH,
672 [ENETRESET] = TARGET_ENETRESET,
673 [ECONNABORTED] = TARGET_ECONNABORTED,
674 [ECONNRESET] = TARGET_ECONNRESET,
675 [ENOBUFS] = TARGET_ENOBUFS,
676 [EISCONN] = TARGET_EISCONN,
677 [ENOTCONN] = TARGET_ENOTCONN,
678 [EUCLEAN] = TARGET_EUCLEAN,
679 [ENOTNAM] = TARGET_ENOTNAM,
680 [ENAVAIL] = TARGET_ENAVAIL,
681 [EISNAM] = TARGET_EISNAM,
682 [EREMOTEIO] = TARGET_EREMOTEIO,
683 [ESHUTDOWN] = TARGET_ESHUTDOWN,
684 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
685 [ETIMEDOUT] = TARGET_ETIMEDOUT,
686 [ECONNREFUSED] = TARGET_ECONNREFUSED,
687 [EHOSTDOWN] = TARGET_EHOSTDOWN,
688 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
689 [EALREADY] = TARGET_EALREADY,
690 [EINPROGRESS] = TARGET_EINPROGRESS,
691 [ESTALE] = TARGET_ESTALE,
692 [ECANCELED] = TARGET_ECANCELED,
693 [ENOMEDIUM] = TARGET_ENOMEDIUM,
694 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
695 #ifdef ENOKEY
696 [ENOKEY] = TARGET_ENOKEY,
697 #endif
698 #ifdef EKEYEXPIRED
699 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
700 #endif
701 #ifdef EKEYREVOKED
702 [EKEYREVOKED] = TARGET_EKEYREVOKED,
703 #endif
704 #ifdef EKEYREJECTED
705 [EKEYREJECTED] = TARGET_EKEYREJECTED,
706 #endif
707 #ifdef EOWNERDEAD
708 [EOWNERDEAD] = TARGET_EOWNERDEAD,
709 #endif
710 #ifdef ENOTRECOVERABLE
711 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
712 #endif
715 static inline int host_to_target_errno(int err)
717 if(host_to_target_errno_table[err])
718 return host_to_target_errno_table[err];
719 return err;
722 static inline int target_to_host_errno(int err)
724 if (target_to_host_errno_table[err])
725 return target_to_host_errno_table[err];
726 return err;
729 static inline abi_long get_errno(abi_long ret)
731 if (ret == -1)
732 return -host_to_target_errno(errno);
733 else
734 return ret;
737 static inline int is_error(abi_long ret)
739 return (abi_ulong)ret >= (abi_ulong)(-4096);
742 char *target_strerror(int err)
744 return strerror(target_to_host_errno(err));
747 static abi_ulong target_brk;
748 static abi_ulong target_original_brk;
749 static abi_ulong brk_page;
751 void target_set_brk(abi_ulong new_brk)
753 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
754 brk_page = HOST_PAGE_ALIGN(target_brk);
757 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
758 #define DEBUGF_BRK(message, args...)
760 /* do_brk() must return target values and target errnos. */
761 abi_long do_brk(abi_ulong new_brk)
763 abi_long mapped_addr;
764 int new_alloc_size;
766 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk);
768 if (!new_brk) {
769 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk);
770 return target_brk;
772 if (new_brk < target_original_brk) {
773 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk);
774 return target_brk;
777 /* If the new brk is less than the highest page reserved to the
778 * target heap allocation, set it and we're almost done... */
779 if (new_brk <= brk_page) {
780 /* Heap contents are initialized to zero, as for anonymous
781 * mapped pages. */
782 if (new_brk > target_brk) {
783 memset(g2h(target_brk), 0, new_brk - target_brk);
785 target_brk = new_brk;
786 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk);
787 return target_brk;
790 /* We need to allocate more memory after the brk... Note that
791 * we don't use MAP_FIXED because that will map over the top of
792 * any existing mapping (like the one with the host libc or qemu
793 * itself); instead we treat "mapped but at wrong address" as
794 * a failure and unmap again.
796 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
797 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
798 PROT_READ|PROT_WRITE,
799 MAP_ANON|MAP_PRIVATE, 0, 0));
801 if (mapped_addr == brk_page) {
802 /* Heap contents are initialized to zero, as for anonymous
803 * mapped pages. Technically the new pages are already
804 * initialized to zero since they *are* anonymous mapped
805 * pages, however we have to take care with the contents that
806 * come from the remaining part of the previous page: it may
807 * contains garbage data due to a previous heap usage (grown
808 * then shrunken). */
809 memset(g2h(target_brk), 0, brk_page - target_brk);
811 target_brk = new_brk;
812 brk_page = HOST_PAGE_ALIGN(target_brk);
813 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk);
814 return target_brk;
815 } else if (mapped_addr != -1) {
816 /* Mapped but at wrong address, meaning there wasn't actually
817 * enough space for this brk.
819 target_munmap(mapped_addr, new_alloc_size);
820 mapped_addr = -1;
821 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk);
823 else {
824 DEBUGF_BRK("%#010x (otherwise)\n", target_brk);
827 #if defined(TARGET_ALPHA)
828 /* We (partially) emulate OSF/1 on Alpha, which requires we
829 return a proper errno, not an unchanged brk value. */
830 return -TARGET_ENOMEM;
831 #endif
832 /* For everything else, return the previous break. */
833 return target_brk;
836 static inline abi_long copy_from_user_fdset(fd_set *fds,
837 abi_ulong target_fds_addr,
838 int n)
840 int i, nw, j, k;
841 abi_ulong b, *target_fds;
843 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
844 if (!(target_fds = lock_user(VERIFY_READ,
845 target_fds_addr,
846 sizeof(abi_ulong) * nw,
847 1)))
848 return -TARGET_EFAULT;
850 FD_ZERO(fds);
851 k = 0;
852 for (i = 0; i < nw; i++) {
853 /* grab the abi_ulong */
854 __get_user(b, &target_fds[i]);
855 for (j = 0; j < TARGET_ABI_BITS; j++) {
856 /* check the bit inside the abi_ulong */
857 if ((b >> j) & 1)
858 FD_SET(k, fds);
859 k++;
863 unlock_user(target_fds, target_fds_addr, 0);
865 return 0;
868 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
869 abi_ulong target_fds_addr,
870 int n)
872 if (target_fds_addr) {
873 if (copy_from_user_fdset(fds, target_fds_addr, n))
874 return -TARGET_EFAULT;
875 *fds_ptr = fds;
876 } else {
877 *fds_ptr = NULL;
879 return 0;
882 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
883 const fd_set *fds,
884 int n)
886 int i, nw, j, k;
887 abi_long v;
888 abi_ulong *target_fds;
890 nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
891 if (!(target_fds = lock_user(VERIFY_WRITE,
892 target_fds_addr,
893 sizeof(abi_ulong) * nw,
894 0)))
895 return -TARGET_EFAULT;
897 k = 0;
898 for (i = 0; i < nw; i++) {
899 v = 0;
900 for (j = 0; j < TARGET_ABI_BITS; j++) {
901 v |= ((FD_ISSET(k, fds) != 0) << j);
902 k++;
904 __put_user(v, &target_fds[i]);
907 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
909 return 0;
912 #if defined(__alpha__)
913 #define HOST_HZ 1024
914 #else
915 #define HOST_HZ 100
916 #endif
918 static inline abi_long host_to_target_clock_t(long ticks)
920 #if HOST_HZ == TARGET_HZ
921 return ticks;
922 #else
923 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
924 #endif
927 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
928 const struct rusage *rusage)
930 struct target_rusage *target_rusage;
932 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
933 return -TARGET_EFAULT;
934 target_rusage->ru_utime.tv_sec = tswapl(rusage->ru_utime.tv_sec);
935 target_rusage->ru_utime.tv_usec = tswapl(rusage->ru_utime.tv_usec);
936 target_rusage->ru_stime.tv_sec = tswapl(rusage->ru_stime.tv_sec);
937 target_rusage->ru_stime.tv_usec = tswapl(rusage->ru_stime.tv_usec);
938 target_rusage->ru_maxrss = tswapl(rusage->ru_maxrss);
939 target_rusage->ru_ixrss = tswapl(rusage->ru_ixrss);
940 target_rusage->ru_idrss = tswapl(rusage->ru_idrss);
941 target_rusage->ru_isrss = tswapl(rusage->ru_isrss);
942 target_rusage->ru_minflt = tswapl(rusage->ru_minflt);
943 target_rusage->ru_majflt = tswapl(rusage->ru_majflt);
944 target_rusage->ru_nswap = tswapl(rusage->ru_nswap);
945 target_rusage->ru_inblock = tswapl(rusage->ru_inblock);
946 target_rusage->ru_oublock = tswapl(rusage->ru_oublock);
947 target_rusage->ru_msgsnd = tswapl(rusage->ru_msgsnd);
948 target_rusage->ru_msgrcv = tswapl(rusage->ru_msgrcv);
949 target_rusage->ru_nsignals = tswapl(rusage->ru_nsignals);
950 target_rusage->ru_nvcsw = tswapl(rusage->ru_nvcsw);
951 target_rusage->ru_nivcsw = tswapl(rusage->ru_nivcsw);
952 unlock_user_struct(target_rusage, target_addr, 1);
954 return 0;
957 static inline rlim_t target_to_host_rlim(target_ulong target_rlim)
959 target_ulong target_rlim_swap;
960 rlim_t result;
962 target_rlim_swap = tswapl(target_rlim);
963 if (target_rlim_swap == TARGET_RLIM_INFINITY || target_rlim_swap != (rlim_t)target_rlim_swap)
964 result = RLIM_INFINITY;
965 else
966 result = target_rlim_swap;
968 return result;
971 static inline target_ulong host_to_target_rlim(rlim_t rlim)
973 target_ulong target_rlim_swap;
974 target_ulong result;
976 if (rlim == RLIM_INFINITY || rlim != (target_long)rlim)
977 target_rlim_swap = TARGET_RLIM_INFINITY;
978 else
979 target_rlim_swap = rlim;
980 result = tswapl(target_rlim_swap);
982 return result;
985 static inline int target_to_host_resource(int code)
987 switch (code) {
988 case TARGET_RLIMIT_AS:
989 return RLIMIT_AS;
990 case TARGET_RLIMIT_CORE:
991 return RLIMIT_CORE;
992 case TARGET_RLIMIT_CPU:
993 return RLIMIT_CPU;
994 case TARGET_RLIMIT_DATA:
995 return RLIMIT_DATA;
996 case TARGET_RLIMIT_FSIZE:
997 return RLIMIT_FSIZE;
998 case TARGET_RLIMIT_LOCKS:
999 return RLIMIT_LOCKS;
1000 case TARGET_RLIMIT_MEMLOCK:
1001 return RLIMIT_MEMLOCK;
1002 case TARGET_RLIMIT_MSGQUEUE:
1003 return RLIMIT_MSGQUEUE;
1004 case TARGET_RLIMIT_NICE:
1005 return RLIMIT_NICE;
1006 case TARGET_RLIMIT_NOFILE:
1007 return RLIMIT_NOFILE;
1008 case TARGET_RLIMIT_NPROC:
1009 return RLIMIT_NPROC;
1010 case TARGET_RLIMIT_RSS:
1011 return RLIMIT_RSS;
1012 case TARGET_RLIMIT_RTPRIO:
1013 return RLIMIT_RTPRIO;
1014 case TARGET_RLIMIT_SIGPENDING:
1015 return RLIMIT_SIGPENDING;
1016 case TARGET_RLIMIT_STACK:
1017 return RLIMIT_STACK;
1018 default:
1019 return code;
1023 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1024 abi_ulong target_tv_addr)
1026 struct target_timeval *target_tv;
1028 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1029 return -TARGET_EFAULT;
1031 __get_user(tv->tv_sec, &target_tv->tv_sec);
1032 __get_user(tv->tv_usec, &target_tv->tv_usec);
1034 unlock_user_struct(target_tv, target_tv_addr, 0);
1036 return 0;
1039 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1040 const struct timeval *tv)
1042 struct target_timeval *target_tv;
1044 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1045 return -TARGET_EFAULT;
1047 __put_user(tv->tv_sec, &target_tv->tv_sec);
1048 __put_user(tv->tv_usec, &target_tv->tv_usec);
1050 unlock_user_struct(target_tv, target_tv_addr, 1);
1052 return 0;
1055 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1056 #include <mqueue.h>
1058 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1059 abi_ulong target_mq_attr_addr)
1061 struct target_mq_attr *target_mq_attr;
1063 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1064 target_mq_attr_addr, 1))
1065 return -TARGET_EFAULT;
1067 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1068 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1069 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1070 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1072 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1074 return 0;
1077 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1078 const struct mq_attr *attr)
1080 struct target_mq_attr *target_mq_attr;
1082 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1083 target_mq_attr_addr, 0))
1084 return -TARGET_EFAULT;
1086 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1087 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1088 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1089 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1091 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1093 return 0;
1095 #endif
1097 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1098 /* do_select() must return target values and target errnos. */
1099 static abi_long do_select(int n,
1100 abi_ulong rfd_addr, abi_ulong wfd_addr,
1101 abi_ulong efd_addr, abi_ulong target_tv_addr)
1103 fd_set rfds, wfds, efds;
1104 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1105 struct timeval tv, *tv_ptr;
1106 abi_long ret;
1108 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1109 if (ret) {
1110 return ret;
1112 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1113 if (ret) {
1114 return ret;
1116 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1117 if (ret) {
1118 return ret;
1121 if (target_tv_addr) {
1122 if (copy_from_user_timeval(&tv, target_tv_addr))
1123 return -TARGET_EFAULT;
1124 tv_ptr = &tv;
1125 } else {
1126 tv_ptr = NULL;
1129 ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
1131 if (!is_error(ret)) {
1132 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1133 return -TARGET_EFAULT;
1134 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1135 return -TARGET_EFAULT;
1136 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1137 return -TARGET_EFAULT;
1139 if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
1140 return -TARGET_EFAULT;
1143 return ret;
1145 #endif
1147 static abi_long do_pipe2(int host_pipe[], int flags)
1149 #ifdef CONFIG_PIPE2
1150 return pipe2(host_pipe, flags);
1151 #else
1152 return -ENOSYS;
1153 #endif
1156 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1157 int flags, int is_pipe2)
1159 int host_pipe[2];
1160 abi_long ret;
1161 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1163 if (is_error(ret))
1164 return get_errno(ret);
1166 /* Several targets have special calling conventions for the original
1167 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1168 if (!is_pipe2) {
1169 #if defined(TARGET_ALPHA)
1170 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1171 return host_pipe[0];
1172 #elif defined(TARGET_MIPS)
1173 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1174 return host_pipe[0];
1175 #elif defined(TARGET_SH4)
1176 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1177 return host_pipe[0];
1178 #endif
1181 if (put_user_s32(host_pipe[0], pipedes)
1182 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1183 return -TARGET_EFAULT;
1184 return get_errno(ret);
1187 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1188 abi_ulong target_addr,
1189 socklen_t len)
1191 struct target_ip_mreqn *target_smreqn;
1193 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1194 if (!target_smreqn)
1195 return -TARGET_EFAULT;
1196 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1197 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1198 if (len == sizeof(struct target_ip_mreqn))
1199 mreqn->imr_ifindex = tswapl(target_smreqn->imr_ifindex);
1200 unlock_user(target_smreqn, target_addr, 0);
1202 return 0;
1205 static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
1206 abi_ulong target_addr,
1207 socklen_t len)
1209 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1210 sa_family_t sa_family;
1211 struct target_sockaddr *target_saddr;
1213 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1214 if (!target_saddr)
1215 return -TARGET_EFAULT;
1217 sa_family = tswap16(target_saddr->sa_family);
1219 /* Oops. The caller might send a incomplete sun_path; sun_path
1220 * must be terminated by \0 (see the manual page), but
1221 * unfortunately it is quite common to specify sockaddr_un
1222 * length as "strlen(x->sun_path)" while it should be
1223 * "strlen(...) + 1". We'll fix that here if needed.
1224 * Linux kernel has a similar feature.
1227 if (sa_family == AF_UNIX) {
1228 if (len < unix_maxlen && len > 0) {
1229 char *cp = (char*)target_saddr;
1231 if ( cp[len-1] && !cp[len] )
1232 len++;
1234 if (len > unix_maxlen)
1235 len = unix_maxlen;
1238 memcpy(addr, target_saddr, len);
1239 addr->sa_family = sa_family;
1240 unlock_user(target_saddr, target_addr, 0);
1242 return 0;
1245 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1246 struct sockaddr *addr,
1247 socklen_t len)
1249 struct target_sockaddr *target_saddr;
1251 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1252 if (!target_saddr)
1253 return -TARGET_EFAULT;
1254 memcpy(target_saddr, addr, len);
1255 target_saddr->sa_family = tswap16(addr->sa_family);
1256 unlock_user(target_saddr, target_addr, len);
1258 return 0;
1261 /* ??? Should this also swap msgh->name? */
1262 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1263 struct target_msghdr *target_msgh)
1265 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1266 abi_long msg_controllen;
1267 abi_ulong target_cmsg_addr;
1268 struct target_cmsghdr *target_cmsg;
1269 socklen_t space = 0;
1271 msg_controllen = tswapl(target_msgh->msg_controllen);
1272 if (msg_controllen < sizeof (struct target_cmsghdr))
1273 goto the_end;
1274 target_cmsg_addr = tswapl(target_msgh->msg_control);
1275 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1276 if (!target_cmsg)
1277 return -TARGET_EFAULT;
1279 while (cmsg && target_cmsg) {
1280 void *data = CMSG_DATA(cmsg);
1281 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1283 int len = tswapl(target_cmsg->cmsg_len)
1284 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1286 space += CMSG_SPACE(len);
1287 if (space > msgh->msg_controllen) {
1288 space -= CMSG_SPACE(len);
1289 gemu_log("Host cmsg overflow\n");
1290 break;
1293 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1294 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1295 cmsg->cmsg_len = CMSG_LEN(len);
1297 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1298 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1299 memcpy(data, target_data, len);
1300 } else {
1301 int *fd = (int *)data;
1302 int *target_fd = (int *)target_data;
1303 int i, numfds = len / sizeof(int);
1305 for (i = 0; i < numfds; i++)
1306 fd[i] = tswap32(target_fd[i]);
1309 cmsg = CMSG_NXTHDR(msgh, cmsg);
1310 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1312 unlock_user(target_cmsg, target_cmsg_addr, 0);
1313 the_end:
1314 msgh->msg_controllen = space;
1315 return 0;
1318 /* ??? Should this also swap msgh->name? */
1319 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1320 struct msghdr *msgh)
1322 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1323 abi_long msg_controllen;
1324 abi_ulong target_cmsg_addr;
1325 struct target_cmsghdr *target_cmsg;
1326 socklen_t space = 0;
1328 msg_controllen = tswapl(target_msgh->msg_controllen);
1329 if (msg_controllen < sizeof (struct target_cmsghdr))
1330 goto the_end;
1331 target_cmsg_addr = tswapl(target_msgh->msg_control);
1332 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1333 if (!target_cmsg)
1334 return -TARGET_EFAULT;
1336 while (cmsg && target_cmsg) {
1337 void *data = CMSG_DATA(cmsg);
1338 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1340 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1342 space += TARGET_CMSG_SPACE(len);
1343 if (space > msg_controllen) {
1344 space -= TARGET_CMSG_SPACE(len);
1345 gemu_log("Target cmsg overflow\n");
1346 break;
1349 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1350 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1351 target_cmsg->cmsg_len = tswapl(TARGET_CMSG_LEN(len));
1353 if (cmsg->cmsg_level != TARGET_SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
1354 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
1355 memcpy(target_data, data, len);
1356 } else {
1357 int *fd = (int *)data;
1358 int *target_fd = (int *)target_data;
1359 int i, numfds = len / sizeof(int);
1361 for (i = 0; i < numfds; i++)
1362 target_fd[i] = tswap32(fd[i]);
1365 cmsg = CMSG_NXTHDR(msgh, cmsg);
1366 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
1368 unlock_user(target_cmsg, target_cmsg_addr, space);
1369 the_end:
1370 target_msgh->msg_controllen = tswapl(space);
1371 return 0;
1374 /* do_setsockopt() Must return target values and target errnos. */
1375 static abi_long do_setsockopt(int sockfd, int level, int optname,
1376 abi_ulong optval_addr, socklen_t optlen)
1378 abi_long ret;
1379 int val;
1380 struct ip_mreqn *ip_mreq;
1381 struct ip_mreq_source *ip_mreq_source;
1383 switch(level) {
1384 case SOL_TCP:
1385 /* TCP options all take an 'int' value. */
1386 if (optlen < sizeof(uint32_t))
1387 return -TARGET_EINVAL;
1389 if (get_user_u32(val, optval_addr))
1390 return -TARGET_EFAULT;
1391 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1392 break;
1393 case SOL_IP:
1394 switch(optname) {
1395 case IP_TOS:
1396 case IP_TTL:
1397 case IP_HDRINCL:
1398 case IP_ROUTER_ALERT:
1399 case IP_RECVOPTS:
1400 case IP_RETOPTS:
1401 case IP_PKTINFO:
1402 case IP_MTU_DISCOVER:
1403 case IP_RECVERR:
1404 case IP_RECVTOS:
1405 #ifdef IP_FREEBIND
1406 case IP_FREEBIND:
1407 #endif
1408 case IP_MULTICAST_TTL:
1409 case IP_MULTICAST_LOOP:
1410 val = 0;
1411 if (optlen >= sizeof(uint32_t)) {
1412 if (get_user_u32(val, optval_addr))
1413 return -TARGET_EFAULT;
1414 } else if (optlen >= 1) {
1415 if (get_user_u8(val, optval_addr))
1416 return -TARGET_EFAULT;
1418 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
1419 break;
1420 case IP_ADD_MEMBERSHIP:
1421 case IP_DROP_MEMBERSHIP:
1422 if (optlen < sizeof (struct target_ip_mreq) ||
1423 optlen > sizeof (struct target_ip_mreqn))
1424 return -TARGET_EINVAL;
1426 ip_mreq = (struct ip_mreqn *) alloca(optlen);
1427 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
1428 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
1429 break;
1431 case IP_BLOCK_SOURCE:
1432 case IP_UNBLOCK_SOURCE:
1433 case IP_ADD_SOURCE_MEMBERSHIP:
1434 case IP_DROP_SOURCE_MEMBERSHIP:
1435 if (optlen != sizeof (struct target_ip_mreq_source))
1436 return -TARGET_EINVAL;
1438 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
1439 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
1440 unlock_user (ip_mreq_source, optval_addr, 0);
1441 break;
1443 default:
1444 goto unimplemented;
1446 break;
1447 case TARGET_SOL_SOCKET:
1448 switch (optname) {
1449 /* Options with 'int' argument. */
1450 case TARGET_SO_DEBUG:
1451 optname = SO_DEBUG;
1452 break;
1453 case TARGET_SO_REUSEADDR:
1454 optname = SO_REUSEADDR;
1455 break;
1456 case TARGET_SO_TYPE:
1457 optname = SO_TYPE;
1458 break;
1459 case TARGET_SO_ERROR:
1460 optname = SO_ERROR;
1461 break;
1462 case TARGET_SO_DONTROUTE:
1463 optname = SO_DONTROUTE;
1464 break;
1465 case TARGET_SO_BROADCAST:
1466 optname = SO_BROADCAST;
1467 break;
1468 case TARGET_SO_SNDBUF:
1469 optname = SO_SNDBUF;
1470 break;
1471 case TARGET_SO_RCVBUF:
1472 optname = SO_RCVBUF;
1473 break;
1474 case TARGET_SO_KEEPALIVE:
1475 optname = SO_KEEPALIVE;
1476 break;
1477 case TARGET_SO_OOBINLINE:
1478 optname = SO_OOBINLINE;
1479 break;
1480 case TARGET_SO_NO_CHECK:
1481 optname = SO_NO_CHECK;
1482 break;
1483 case TARGET_SO_PRIORITY:
1484 optname = SO_PRIORITY;
1485 break;
1486 #ifdef SO_BSDCOMPAT
1487 case TARGET_SO_BSDCOMPAT:
1488 optname = SO_BSDCOMPAT;
1489 break;
1490 #endif
1491 case TARGET_SO_PASSCRED:
1492 optname = SO_PASSCRED;
1493 break;
1494 case TARGET_SO_TIMESTAMP:
1495 optname = SO_TIMESTAMP;
1496 break;
1497 case TARGET_SO_RCVLOWAT:
1498 optname = SO_RCVLOWAT;
1499 break;
1500 case TARGET_SO_RCVTIMEO:
1501 optname = SO_RCVTIMEO;
1502 break;
1503 case TARGET_SO_SNDTIMEO:
1504 optname = SO_SNDTIMEO;
1505 break;
1506 break;
1507 default:
1508 goto unimplemented;
1510 if (optlen < sizeof(uint32_t))
1511 return -TARGET_EINVAL;
1513 if (get_user_u32(val, optval_addr))
1514 return -TARGET_EFAULT;
1515 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
1516 break;
1517 default:
1518 unimplemented:
1519 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
1520 ret = -TARGET_ENOPROTOOPT;
1522 return ret;
1525 /* do_getsockopt() Must return target values and target errnos. */
1526 static abi_long do_getsockopt(int sockfd, int level, int optname,
1527 abi_ulong optval_addr, abi_ulong optlen)
1529 abi_long ret;
1530 int len, val;
1531 socklen_t lv;
1533 switch(level) {
1534 case TARGET_SOL_SOCKET:
1535 level = SOL_SOCKET;
1536 switch (optname) {
1537 /* These don't just return a single integer */
1538 case TARGET_SO_LINGER:
1539 case TARGET_SO_RCVTIMEO:
1540 case TARGET_SO_SNDTIMEO:
1541 case TARGET_SO_PEERCRED:
1542 case TARGET_SO_PEERNAME:
1543 goto unimplemented;
1544 /* Options with 'int' argument. */
1545 case TARGET_SO_DEBUG:
1546 optname = SO_DEBUG;
1547 goto int_case;
1548 case TARGET_SO_REUSEADDR:
1549 optname = SO_REUSEADDR;
1550 goto int_case;
1551 case TARGET_SO_TYPE:
1552 optname = SO_TYPE;
1553 goto int_case;
1554 case TARGET_SO_ERROR:
1555 optname = SO_ERROR;
1556 goto int_case;
1557 case TARGET_SO_DONTROUTE:
1558 optname = SO_DONTROUTE;
1559 goto int_case;
1560 case TARGET_SO_BROADCAST:
1561 optname = SO_BROADCAST;
1562 goto int_case;
1563 case TARGET_SO_SNDBUF:
1564 optname = SO_SNDBUF;
1565 goto int_case;
1566 case TARGET_SO_RCVBUF:
1567 optname = SO_RCVBUF;
1568 goto int_case;
1569 case TARGET_SO_KEEPALIVE:
1570 optname = SO_KEEPALIVE;
1571 goto int_case;
1572 case TARGET_SO_OOBINLINE:
1573 optname = SO_OOBINLINE;
1574 goto int_case;
1575 case TARGET_SO_NO_CHECK:
1576 optname = SO_NO_CHECK;
1577 goto int_case;
1578 case TARGET_SO_PRIORITY:
1579 optname = SO_PRIORITY;
1580 goto int_case;
1581 #ifdef SO_BSDCOMPAT
1582 case TARGET_SO_BSDCOMPAT:
1583 optname = SO_BSDCOMPAT;
1584 goto int_case;
1585 #endif
1586 case TARGET_SO_PASSCRED:
1587 optname = SO_PASSCRED;
1588 goto int_case;
1589 case TARGET_SO_TIMESTAMP:
1590 optname = SO_TIMESTAMP;
1591 goto int_case;
1592 case TARGET_SO_RCVLOWAT:
1593 optname = SO_RCVLOWAT;
1594 goto int_case;
1595 default:
1596 goto int_case;
1598 break;
1599 case SOL_TCP:
1600 /* TCP options all take an 'int' value. */
1601 int_case:
1602 if (get_user_u32(len, optlen))
1603 return -TARGET_EFAULT;
1604 if (len < 0)
1605 return -TARGET_EINVAL;
1606 lv = sizeof(lv);
1607 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1608 if (ret < 0)
1609 return ret;
1610 if (len > lv)
1611 len = lv;
1612 if (len == 4) {
1613 if (put_user_u32(val, optval_addr))
1614 return -TARGET_EFAULT;
1615 } else {
1616 if (put_user_u8(val, optval_addr))
1617 return -TARGET_EFAULT;
1619 if (put_user_u32(len, optlen))
1620 return -TARGET_EFAULT;
1621 break;
1622 case SOL_IP:
1623 switch(optname) {
1624 case IP_TOS:
1625 case IP_TTL:
1626 case IP_HDRINCL:
1627 case IP_ROUTER_ALERT:
1628 case IP_RECVOPTS:
1629 case IP_RETOPTS:
1630 case IP_PKTINFO:
1631 case IP_MTU_DISCOVER:
1632 case IP_RECVERR:
1633 case IP_RECVTOS:
1634 #ifdef IP_FREEBIND
1635 case IP_FREEBIND:
1636 #endif
1637 case IP_MULTICAST_TTL:
1638 case IP_MULTICAST_LOOP:
1639 if (get_user_u32(len, optlen))
1640 return -TARGET_EFAULT;
1641 if (len < 0)
1642 return -TARGET_EINVAL;
1643 lv = sizeof(lv);
1644 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
1645 if (ret < 0)
1646 return ret;
1647 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
1648 len = 1;
1649 if (put_user_u32(len, optlen)
1650 || put_user_u8(val, optval_addr))
1651 return -TARGET_EFAULT;
1652 } else {
1653 if (len > sizeof(int))
1654 len = sizeof(int);
1655 if (put_user_u32(len, optlen)
1656 || put_user_u32(val, optval_addr))
1657 return -TARGET_EFAULT;
1659 break;
1660 default:
1661 ret = -TARGET_ENOPROTOOPT;
1662 break;
1664 break;
1665 default:
1666 unimplemented:
1667 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1668 level, optname);
1669 ret = -TARGET_EOPNOTSUPP;
1670 break;
1672 return ret;
1675 /* FIXME
1676 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1677 * other lock functions have a return code of 0 for failure.
1679 static abi_long lock_iovec(int type, struct iovec *vec, abi_ulong target_addr,
1680 int count, int copy)
1682 struct target_iovec *target_vec;
1683 abi_ulong base;
1684 int i;
1686 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1687 if (!target_vec)
1688 return -TARGET_EFAULT;
1689 for(i = 0;i < count; i++) {
1690 base = tswapl(target_vec[i].iov_base);
1691 vec[i].iov_len = tswapl(target_vec[i].iov_len);
1692 if (vec[i].iov_len != 0) {
1693 vec[i].iov_base = lock_user(type, base, vec[i].iov_len, copy);
1694 /* Don't check lock_user return value. We must call writev even
1695 if a element has invalid base address. */
1696 } else {
1697 /* zero length pointer is ignored */
1698 vec[i].iov_base = NULL;
1701 unlock_user (target_vec, target_addr, 0);
1702 return 0;
1705 static abi_long unlock_iovec(struct iovec *vec, abi_ulong target_addr,
1706 int count, int copy)
1708 struct target_iovec *target_vec;
1709 abi_ulong base;
1710 int i;
1712 target_vec = lock_user(VERIFY_READ, target_addr, count * sizeof(struct target_iovec), 1);
1713 if (!target_vec)
1714 return -TARGET_EFAULT;
1715 for(i = 0;i < count; i++) {
1716 if (target_vec[i].iov_base) {
1717 base = tswapl(target_vec[i].iov_base);
1718 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
1721 unlock_user (target_vec, target_addr, 0);
1723 return 0;
1726 /* do_socket() Must return target values and target errnos. */
1727 static abi_long do_socket(int domain, int type, int protocol)
1729 #if defined(TARGET_MIPS)
1730 switch(type) {
1731 case TARGET_SOCK_DGRAM:
1732 type = SOCK_DGRAM;
1733 break;
1734 case TARGET_SOCK_STREAM:
1735 type = SOCK_STREAM;
1736 break;
1737 case TARGET_SOCK_RAW:
1738 type = SOCK_RAW;
1739 break;
1740 case TARGET_SOCK_RDM:
1741 type = SOCK_RDM;
1742 break;
1743 case TARGET_SOCK_SEQPACKET:
1744 type = SOCK_SEQPACKET;
1745 break;
1746 case TARGET_SOCK_PACKET:
1747 type = SOCK_PACKET;
1748 break;
1750 #endif
1751 if (domain == PF_NETLINK)
1752 return -EAFNOSUPPORT; /* do not NETLINK socket connections possible */
1753 return get_errno(socket(domain, type, protocol));
1756 /* do_bind() Must return target values and target errnos. */
1757 static abi_long do_bind(int sockfd, abi_ulong target_addr,
1758 socklen_t addrlen)
1760 void *addr;
1761 abi_long ret;
1763 if ((int)addrlen < 0) {
1764 return -TARGET_EINVAL;
1767 addr = alloca(addrlen+1);
1769 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1770 if (ret)
1771 return ret;
1773 return get_errno(bind(sockfd, addr, addrlen));
1776 /* do_connect() Must return target values and target errnos. */
1777 static abi_long do_connect(int sockfd, abi_ulong target_addr,
1778 socklen_t addrlen)
1780 void *addr;
1781 abi_long ret;
1783 if ((int)addrlen < 0) {
1784 return -TARGET_EINVAL;
1787 addr = alloca(addrlen);
1789 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1790 if (ret)
1791 return ret;
1793 return get_errno(connect(sockfd, addr, addrlen));
1796 /* do_sendrecvmsg() Must return target values and target errnos. */
1797 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
1798 int flags, int send)
1800 abi_long ret, len;
1801 struct target_msghdr *msgp;
1802 struct msghdr msg;
1803 int count;
1804 struct iovec *vec;
1805 abi_ulong target_vec;
1807 /* FIXME */
1808 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
1809 msgp,
1810 target_msg,
1811 send ? 1 : 0))
1812 return -TARGET_EFAULT;
1813 if (msgp->msg_name) {
1814 msg.msg_namelen = tswap32(msgp->msg_namelen);
1815 msg.msg_name = alloca(msg.msg_namelen);
1816 ret = target_to_host_sockaddr(msg.msg_name, tswapl(msgp->msg_name),
1817 msg.msg_namelen);
1818 if (ret) {
1819 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1820 return ret;
1822 } else {
1823 msg.msg_name = NULL;
1824 msg.msg_namelen = 0;
1826 msg.msg_controllen = 2 * tswapl(msgp->msg_controllen);
1827 msg.msg_control = alloca(msg.msg_controllen);
1828 msg.msg_flags = tswap32(msgp->msg_flags);
1830 count = tswapl(msgp->msg_iovlen);
1831 vec = alloca(count * sizeof(struct iovec));
1832 target_vec = tswapl(msgp->msg_iov);
1833 lock_iovec(send ? VERIFY_READ : VERIFY_WRITE, vec, target_vec, count, send);
1834 msg.msg_iovlen = count;
1835 msg.msg_iov = vec;
1837 if (send) {
1838 ret = target_to_host_cmsg(&msg, msgp);
1839 if (ret == 0)
1840 ret = get_errno(sendmsg(fd, &msg, flags));
1841 } else {
1842 ret = get_errno(recvmsg(fd, &msg, flags));
1843 if (!is_error(ret)) {
1844 len = ret;
1845 ret = host_to_target_cmsg(msgp, &msg);
1846 if (!is_error(ret))
1847 ret = len;
1850 unlock_iovec(vec, target_vec, count, !send);
1851 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
1852 return ret;
1855 /* do_accept() Must return target values and target errnos. */
1856 static abi_long do_accept(int fd, abi_ulong target_addr,
1857 abi_ulong target_addrlen_addr)
1859 socklen_t addrlen;
1860 void *addr;
1861 abi_long ret;
1863 if (target_addr == 0)
1864 return get_errno(accept(fd, NULL, NULL));
1866 /* linux returns EINVAL if addrlen pointer is invalid */
1867 if (get_user_u32(addrlen, target_addrlen_addr))
1868 return -TARGET_EINVAL;
1870 if ((int)addrlen < 0) {
1871 return -TARGET_EINVAL;
1874 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1875 return -TARGET_EINVAL;
1877 addr = alloca(addrlen);
1879 ret = get_errno(accept(fd, addr, &addrlen));
1880 if (!is_error(ret)) {
1881 host_to_target_sockaddr(target_addr, addr, addrlen);
1882 if (put_user_u32(addrlen, target_addrlen_addr))
1883 ret = -TARGET_EFAULT;
1885 return ret;
1888 /* do_getpeername() Must return target values and target errnos. */
1889 static abi_long do_getpeername(int fd, abi_ulong target_addr,
1890 abi_ulong target_addrlen_addr)
1892 socklen_t addrlen;
1893 void *addr;
1894 abi_long ret;
1896 if (get_user_u32(addrlen, target_addrlen_addr))
1897 return -TARGET_EFAULT;
1899 if ((int)addrlen < 0) {
1900 return -TARGET_EINVAL;
1903 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1904 return -TARGET_EFAULT;
1906 addr = alloca(addrlen);
1908 ret = get_errno(getpeername(fd, addr, &addrlen));
1909 if (!is_error(ret)) {
1910 host_to_target_sockaddr(target_addr, addr, addrlen);
1911 if (put_user_u32(addrlen, target_addrlen_addr))
1912 ret = -TARGET_EFAULT;
1914 return ret;
1917 /* do_getsockname() Must return target values and target errnos. */
1918 static abi_long do_getsockname(int fd, abi_ulong target_addr,
1919 abi_ulong target_addrlen_addr)
1921 socklen_t addrlen;
1922 void *addr;
1923 abi_long ret;
1925 if (get_user_u32(addrlen, target_addrlen_addr))
1926 return -TARGET_EFAULT;
1928 if ((int)addrlen < 0) {
1929 return -TARGET_EINVAL;
1932 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
1933 return -TARGET_EFAULT;
1935 addr = alloca(addrlen);
1937 ret = get_errno(getsockname(fd, addr, &addrlen));
1938 if (!is_error(ret)) {
1939 host_to_target_sockaddr(target_addr, addr, addrlen);
1940 if (put_user_u32(addrlen, target_addrlen_addr))
1941 ret = -TARGET_EFAULT;
1943 return ret;
1946 /* do_socketpair() Must return target values and target errnos. */
1947 static abi_long do_socketpair(int domain, int type, int protocol,
1948 abi_ulong target_tab_addr)
1950 int tab[2];
1951 abi_long ret;
1953 ret = get_errno(socketpair(domain, type, protocol, tab));
1954 if (!is_error(ret)) {
1955 if (put_user_s32(tab[0], target_tab_addr)
1956 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
1957 ret = -TARGET_EFAULT;
1959 return ret;
1962 /* do_sendto() Must return target values and target errnos. */
1963 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
1964 abi_ulong target_addr, socklen_t addrlen)
1966 void *addr;
1967 void *host_msg;
1968 abi_long ret;
1970 if ((int)addrlen < 0) {
1971 return -TARGET_EINVAL;
1974 host_msg = lock_user(VERIFY_READ, msg, len, 1);
1975 if (!host_msg)
1976 return -TARGET_EFAULT;
1977 if (target_addr) {
1978 addr = alloca(addrlen);
1979 ret = target_to_host_sockaddr(addr, target_addr, addrlen);
1980 if (ret) {
1981 unlock_user(host_msg, msg, 0);
1982 return ret;
1984 ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
1985 } else {
1986 ret = get_errno(send(fd, host_msg, len, flags));
1988 unlock_user(host_msg, msg, 0);
1989 return ret;
1992 /* do_recvfrom() Must return target values and target errnos. */
1993 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
1994 abi_ulong target_addr,
1995 abi_ulong target_addrlen)
1997 socklen_t addrlen;
1998 void *addr;
1999 void *host_msg;
2000 abi_long ret;
2002 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
2003 if (!host_msg)
2004 return -TARGET_EFAULT;
2005 if (target_addr) {
2006 if (get_user_u32(addrlen, target_addrlen)) {
2007 ret = -TARGET_EFAULT;
2008 goto fail;
2010 if ((int)addrlen < 0) {
2011 ret = -TARGET_EINVAL;
2012 goto fail;
2014 addr = alloca(addrlen);
2015 ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
2016 } else {
2017 addr = NULL; /* To keep compiler quiet. */
2018 ret = get_errno(qemu_recv(fd, host_msg, len, flags));
2020 if (!is_error(ret)) {
2021 if (target_addr) {
2022 host_to_target_sockaddr(target_addr, addr, addrlen);
2023 if (put_user_u32(addrlen, target_addrlen)) {
2024 ret = -TARGET_EFAULT;
2025 goto fail;
2028 unlock_user(host_msg, msg, len);
2029 } else {
2030 fail:
2031 unlock_user(host_msg, msg, 0);
2033 return ret;
2036 #ifdef TARGET_NR_socketcall
2037 /* do_socketcall() Must return target values and target errnos. */
2038 static abi_long do_socketcall(int num, abi_ulong vptr)
2040 abi_long ret;
2041 const int n = sizeof(abi_ulong);
2043 switch(num) {
2044 case SOCKOP_socket:
2046 abi_ulong domain, type, protocol;
2048 if (get_user_ual(domain, vptr)
2049 || get_user_ual(type, vptr + n)
2050 || get_user_ual(protocol, vptr + 2 * n))
2051 return -TARGET_EFAULT;
2053 ret = do_socket(domain, type, protocol);
2055 break;
2056 case SOCKOP_bind:
2058 abi_ulong sockfd;
2059 abi_ulong target_addr;
2060 socklen_t addrlen;
2062 if (get_user_ual(sockfd, vptr)
2063 || get_user_ual(target_addr, vptr + n)
2064 || get_user_ual(addrlen, vptr + 2 * n))
2065 return -TARGET_EFAULT;
2067 ret = do_bind(sockfd, target_addr, addrlen);
2069 break;
2070 case SOCKOP_connect:
2072 abi_ulong sockfd;
2073 abi_ulong target_addr;
2074 socklen_t addrlen;
2076 if (get_user_ual(sockfd, vptr)
2077 || get_user_ual(target_addr, vptr + n)
2078 || get_user_ual(addrlen, vptr + 2 * n))
2079 return -TARGET_EFAULT;
2081 ret = do_connect(sockfd, target_addr, addrlen);
2083 break;
2084 case SOCKOP_listen:
2086 abi_ulong sockfd, backlog;
2088 if (get_user_ual(sockfd, vptr)
2089 || get_user_ual(backlog, vptr + n))
2090 return -TARGET_EFAULT;
2092 ret = get_errno(listen(sockfd, backlog));
2094 break;
2095 case SOCKOP_accept:
2097 abi_ulong sockfd;
2098 abi_ulong target_addr, target_addrlen;
2100 if (get_user_ual(sockfd, vptr)
2101 || get_user_ual(target_addr, vptr + n)
2102 || get_user_ual(target_addrlen, vptr + 2 * n))
2103 return -TARGET_EFAULT;
2105 ret = do_accept(sockfd, target_addr, target_addrlen);
2107 break;
2108 case SOCKOP_getsockname:
2110 abi_ulong sockfd;
2111 abi_ulong target_addr, target_addrlen;
2113 if (get_user_ual(sockfd, vptr)
2114 || get_user_ual(target_addr, vptr + n)
2115 || get_user_ual(target_addrlen, vptr + 2 * n))
2116 return -TARGET_EFAULT;
2118 ret = do_getsockname(sockfd, target_addr, target_addrlen);
2120 break;
2121 case SOCKOP_getpeername:
2123 abi_ulong sockfd;
2124 abi_ulong target_addr, target_addrlen;
2126 if (get_user_ual(sockfd, vptr)
2127 || get_user_ual(target_addr, vptr + n)
2128 || get_user_ual(target_addrlen, vptr + 2 * n))
2129 return -TARGET_EFAULT;
2131 ret = do_getpeername(sockfd, target_addr, target_addrlen);
2133 break;
2134 case SOCKOP_socketpair:
2136 abi_ulong domain, type, protocol;
2137 abi_ulong tab;
2139 if (get_user_ual(domain, vptr)
2140 || get_user_ual(type, vptr + n)
2141 || get_user_ual(protocol, vptr + 2 * n)
2142 || get_user_ual(tab, vptr + 3 * n))
2143 return -TARGET_EFAULT;
2145 ret = do_socketpair(domain, type, protocol, tab);
2147 break;
2148 case SOCKOP_send:
2150 abi_ulong sockfd;
2151 abi_ulong msg;
2152 size_t len;
2153 abi_ulong flags;
2155 if (get_user_ual(sockfd, vptr)
2156 || get_user_ual(msg, vptr + n)
2157 || get_user_ual(len, vptr + 2 * n)
2158 || get_user_ual(flags, vptr + 3 * n))
2159 return -TARGET_EFAULT;
2161 ret = do_sendto(sockfd, msg, len, flags, 0, 0);
2163 break;
2164 case SOCKOP_recv:
2166 abi_ulong sockfd;
2167 abi_ulong msg;
2168 size_t len;
2169 abi_ulong flags;
2171 if (get_user_ual(sockfd, vptr)
2172 || get_user_ual(msg, vptr + n)
2173 || get_user_ual(len, vptr + 2 * n)
2174 || get_user_ual(flags, vptr + 3 * n))
2175 return -TARGET_EFAULT;
2177 ret = do_recvfrom(sockfd, msg, len, flags, 0, 0);
2179 break;
2180 case SOCKOP_sendto:
2182 abi_ulong sockfd;
2183 abi_ulong msg;
2184 size_t len;
2185 abi_ulong flags;
2186 abi_ulong addr;
2187 socklen_t addrlen;
2189 if (get_user_ual(sockfd, vptr)
2190 || get_user_ual(msg, vptr + n)
2191 || get_user_ual(len, vptr + 2 * n)
2192 || get_user_ual(flags, vptr + 3 * n)
2193 || get_user_ual(addr, vptr + 4 * n)
2194 || get_user_ual(addrlen, vptr + 5 * n))
2195 return -TARGET_EFAULT;
2197 ret = do_sendto(sockfd, msg, len, flags, addr, addrlen);
2199 break;
2200 case SOCKOP_recvfrom:
2202 abi_ulong sockfd;
2203 abi_ulong msg;
2204 size_t len;
2205 abi_ulong flags;
2206 abi_ulong addr;
2207 socklen_t addrlen;
2209 if (get_user_ual(sockfd, vptr)
2210 || get_user_ual(msg, vptr + n)
2211 || get_user_ual(len, vptr + 2 * n)
2212 || get_user_ual(flags, vptr + 3 * n)
2213 || get_user_ual(addr, vptr + 4 * n)
2214 || get_user_ual(addrlen, vptr + 5 * n))
2215 return -TARGET_EFAULT;
2217 ret = do_recvfrom(sockfd, msg, len, flags, addr, addrlen);
2219 break;
2220 case SOCKOP_shutdown:
2222 abi_ulong sockfd, how;
2224 if (get_user_ual(sockfd, vptr)
2225 || get_user_ual(how, vptr + n))
2226 return -TARGET_EFAULT;
2228 ret = get_errno(shutdown(sockfd, how));
2230 break;
2231 case SOCKOP_sendmsg:
2232 case SOCKOP_recvmsg:
2234 abi_ulong fd;
2235 abi_ulong target_msg;
2236 abi_ulong flags;
2238 if (get_user_ual(fd, vptr)
2239 || get_user_ual(target_msg, vptr + n)
2240 || get_user_ual(flags, vptr + 2 * n))
2241 return -TARGET_EFAULT;
2243 ret = do_sendrecvmsg(fd, target_msg, flags,
2244 (num == SOCKOP_sendmsg));
2246 break;
2247 case SOCKOP_setsockopt:
2249 abi_ulong sockfd;
2250 abi_ulong level;
2251 abi_ulong optname;
2252 abi_ulong optval;
2253 socklen_t optlen;
2255 if (get_user_ual(sockfd, vptr)
2256 || get_user_ual(level, vptr + n)
2257 || get_user_ual(optname, vptr + 2 * n)
2258 || get_user_ual(optval, vptr + 3 * n)
2259 || get_user_ual(optlen, vptr + 4 * n))
2260 return -TARGET_EFAULT;
2262 ret = do_setsockopt(sockfd, level, optname, optval, optlen);
2264 break;
2265 case SOCKOP_getsockopt:
2267 abi_ulong sockfd;
2268 abi_ulong level;
2269 abi_ulong optname;
2270 abi_ulong optval;
2271 socklen_t optlen;
2273 if (get_user_ual(sockfd, vptr)
2274 || get_user_ual(level, vptr + n)
2275 || get_user_ual(optname, vptr + 2 * n)
2276 || get_user_ual(optval, vptr + 3 * n)
2277 || get_user_ual(optlen, vptr + 4 * n))
2278 return -TARGET_EFAULT;
2280 ret = do_getsockopt(sockfd, level, optname, optval, optlen);
2282 break;
2283 default:
2284 gemu_log("Unsupported socketcall: %d\n", num);
2285 ret = -TARGET_ENOSYS;
2286 break;
2288 return ret;
2290 #endif
2292 #define N_SHM_REGIONS 32
2294 static struct shm_region {
2295 abi_ulong start;
2296 abi_ulong size;
2297 } shm_regions[N_SHM_REGIONS];
2299 struct target_ipc_perm
2301 abi_long __key;
2302 abi_ulong uid;
2303 abi_ulong gid;
2304 abi_ulong cuid;
2305 abi_ulong cgid;
2306 unsigned short int mode;
2307 unsigned short int __pad1;
2308 unsigned short int __seq;
2309 unsigned short int __pad2;
2310 abi_ulong __unused1;
2311 abi_ulong __unused2;
2314 struct target_semid_ds
2316 struct target_ipc_perm sem_perm;
2317 abi_ulong sem_otime;
2318 abi_ulong __unused1;
2319 abi_ulong sem_ctime;
2320 abi_ulong __unused2;
2321 abi_ulong sem_nsems;
2322 abi_ulong __unused3;
2323 abi_ulong __unused4;
2326 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
2327 abi_ulong target_addr)
2329 struct target_ipc_perm *target_ip;
2330 struct target_semid_ds *target_sd;
2332 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2333 return -TARGET_EFAULT;
2334 target_ip = &(target_sd->sem_perm);
2335 host_ip->__key = tswapl(target_ip->__key);
2336 host_ip->uid = tswapl(target_ip->uid);
2337 host_ip->gid = tswapl(target_ip->gid);
2338 host_ip->cuid = tswapl(target_ip->cuid);
2339 host_ip->cgid = tswapl(target_ip->cgid);
2340 host_ip->mode = tswapl(target_ip->mode);
2341 unlock_user_struct(target_sd, target_addr, 0);
2342 return 0;
2345 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
2346 struct ipc_perm *host_ip)
2348 struct target_ipc_perm *target_ip;
2349 struct target_semid_ds *target_sd;
2351 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2352 return -TARGET_EFAULT;
2353 target_ip = &(target_sd->sem_perm);
2354 target_ip->__key = tswapl(host_ip->__key);
2355 target_ip->uid = tswapl(host_ip->uid);
2356 target_ip->gid = tswapl(host_ip->gid);
2357 target_ip->cuid = tswapl(host_ip->cuid);
2358 target_ip->cgid = tswapl(host_ip->cgid);
2359 target_ip->mode = tswapl(host_ip->mode);
2360 unlock_user_struct(target_sd, target_addr, 1);
2361 return 0;
2364 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
2365 abi_ulong target_addr)
2367 struct target_semid_ds *target_sd;
2369 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2370 return -TARGET_EFAULT;
2371 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
2372 return -TARGET_EFAULT;
2373 host_sd->sem_nsems = tswapl(target_sd->sem_nsems);
2374 host_sd->sem_otime = tswapl(target_sd->sem_otime);
2375 host_sd->sem_ctime = tswapl(target_sd->sem_ctime);
2376 unlock_user_struct(target_sd, target_addr, 0);
2377 return 0;
2380 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
2381 struct semid_ds *host_sd)
2383 struct target_semid_ds *target_sd;
2385 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2386 return -TARGET_EFAULT;
2387 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
2388 return -TARGET_EFAULT;;
2389 target_sd->sem_nsems = tswapl(host_sd->sem_nsems);
2390 target_sd->sem_otime = tswapl(host_sd->sem_otime);
2391 target_sd->sem_ctime = tswapl(host_sd->sem_ctime);
2392 unlock_user_struct(target_sd, target_addr, 1);
2393 return 0;
2396 struct target_seminfo {
2397 int semmap;
2398 int semmni;
2399 int semmns;
2400 int semmnu;
2401 int semmsl;
2402 int semopm;
2403 int semume;
2404 int semusz;
2405 int semvmx;
2406 int semaem;
2409 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
2410 struct seminfo *host_seminfo)
2412 struct target_seminfo *target_seminfo;
2413 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
2414 return -TARGET_EFAULT;
2415 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
2416 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
2417 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
2418 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
2419 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
2420 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
2421 __put_user(host_seminfo->semume, &target_seminfo->semume);
2422 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
2423 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
2424 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
2425 unlock_user_struct(target_seminfo, target_addr, 1);
2426 return 0;
2429 union semun {
2430 int val;
2431 struct semid_ds *buf;
2432 unsigned short *array;
2433 struct seminfo *__buf;
2436 union target_semun {
2437 int val;
2438 abi_ulong buf;
2439 abi_ulong array;
2440 abi_ulong __buf;
2443 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
2444 abi_ulong target_addr)
2446 int nsems;
2447 unsigned short *array;
2448 union semun semun;
2449 struct semid_ds semid_ds;
2450 int i, ret;
2452 semun.buf = &semid_ds;
2454 ret = semctl(semid, 0, IPC_STAT, semun);
2455 if (ret == -1)
2456 return get_errno(ret);
2458 nsems = semid_ds.sem_nsems;
2460 *host_array = malloc(nsems*sizeof(unsigned short));
2461 array = lock_user(VERIFY_READ, target_addr,
2462 nsems*sizeof(unsigned short), 1);
2463 if (!array)
2464 return -TARGET_EFAULT;
2466 for(i=0; i<nsems; i++) {
2467 __get_user((*host_array)[i], &array[i]);
2469 unlock_user(array, target_addr, 0);
2471 return 0;
2474 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
2475 unsigned short **host_array)
2477 int nsems;
2478 unsigned short *array;
2479 union semun semun;
2480 struct semid_ds semid_ds;
2481 int i, ret;
2483 semun.buf = &semid_ds;
2485 ret = semctl(semid, 0, IPC_STAT, semun);
2486 if (ret == -1)
2487 return get_errno(ret);
2489 nsems = semid_ds.sem_nsems;
2491 array = lock_user(VERIFY_WRITE, target_addr,
2492 nsems*sizeof(unsigned short), 0);
2493 if (!array)
2494 return -TARGET_EFAULT;
2496 for(i=0; i<nsems; i++) {
2497 __put_user((*host_array)[i], &array[i]);
2499 free(*host_array);
2500 unlock_user(array, target_addr, 1);
2502 return 0;
2505 static inline abi_long do_semctl(int semid, int semnum, int cmd,
2506 union target_semun target_su)
2508 union semun arg;
2509 struct semid_ds dsarg;
2510 unsigned short *array = NULL;
2511 struct seminfo seminfo;
2512 abi_long ret = -TARGET_EINVAL;
2513 abi_long err;
2514 cmd &= 0xff;
2516 switch( cmd ) {
2517 case GETVAL:
2518 case SETVAL:
2519 arg.val = tswapl(target_su.val);
2520 ret = get_errno(semctl(semid, semnum, cmd, arg));
2521 target_su.val = tswapl(arg.val);
2522 break;
2523 case GETALL:
2524 case SETALL:
2525 err = target_to_host_semarray(semid, &array, target_su.array);
2526 if (err)
2527 return err;
2528 arg.array = array;
2529 ret = get_errno(semctl(semid, semnum, cmd, arg));
2530 err = host_to_target_semarray(semid, target_su.array, &array);
2531 if (err)
2532 return err;
2533 break;
2534 case IPC_STAT:
2535 case IPC_SET:
2536 case SEM_STAT:
2537 err = target_to_host_semid_ds(&dsarg, target_su.buf);
2538 if (err)
2539 return err;
2540 arg.buf = &dsarg;
2541 ret = get_errno(semctl(semid, semnum, cmd, arg));
2542 err = host_to_target_semid_ds(target_su.buf, &dsarg);
2543 if (err)
2544 return err;
2545 break;
2546 case IPC_INFO:
2547 case SEM_INFO:
2548 arg.__buf = &seminfo;
2549 ret = get_errno(semctl(semid, semnum, cmd, arg));
2550 err = host_to_target_seminfo(target_su.__buf, &seminfo);
2551 if (err)
2552 return err;
2553 break;
2554 case IPC_RMID:
2555 case GETPID:
2556 case GETNCNT:
2557 case GETZCNT:
2558 ret = get_errno(semctl(semid, semnum, cmd, NULL));
2559 break;
2562 return ret;
2565 struct target_sembuf {
2566 unsigned short sem_num;
2567 short sem_op;
2568 short sem_flg;
2571 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
2572 abi_ulong target_addr,
2573 unsigned nsops)
2575 struct target_sembuf *target_sembuf;
2576 int i;
2578 target_sembuf = lock_user(VERIFY_READ, target_addr,
2579 nsops*sizeof(struct target_sembuf), 1);
2580 if (!target_sembuf)
2581 return -TARGET_EFAULT;
2583 for(i=0; i<nsops; i++) {
2584 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
2585 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
2586 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
2589 unlock_user(target_sembuf, target_addr, 0);
2591 return 0;
2594 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
2596 struct sembuf sops[nsops];
2598 if (target_to_host_sembuf(sops, ptr, nsops))
2599 return -TARGET_EFAULT;
2601 return semop(semid, sops, nsops);
2604 struct target_msqid_ds
2606 struct target_ipc_perm msg_perm;
2607 abi_ulong msg_stime;
2608 #if TARGET_ABI_BITS == 32
2609 abi_ulong __unused1;
2610 #endif
2611 abi_ulong msg_rtime;
2612 #if TARGET_ABI_BITS == 32
2613 abi_ulong __unused2;
2614 #endif
2615 abi_ulong msg_ctime;
2616 #if TARGET_ABI_BITS == 32
2617 abi_ulong __unused3;
2618 #endif
2619 abi_ulong __msg_cbytes;
2620 abi_ulong msg_qnum;
2621 abi_ulong msg_qbytes;
2622 abi_ulong msg_lspid;
2623 abi_ulong msg_lrpid;
2624 abi_ulong __unused4;
2625 abi_ulong __unused5;
2628 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
2629 abi_ulong target_addr)
2631 struct target_msqid_ds *target_md;
2633 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
2634 return -TARGET_EFAULT;
2635 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
2636 return -TARGET_EFAULT;
2637 host_md->msg_stime = tswapl(target_md->msg_stime);
2638 host_md->msg_rtime = tswapl(target_md->msg_rtime);
2639 host_md->msg_ctime = tswapl(target_md->msg_ctime);
2640 host_md->__msg_cbytes = tswapl(target_md->__msg_cbytes);
2641 host_md->msg_qnum = tswapl(target_md->msg_qnum);
2642 host_md->msg_qbytes = tswapl(target_md->msg_qbytes);
2643 host_md->msg_lspid = tswapl(target_md->msg_lspid);
2644 host_md->msg_lrpid = tswapl(target_md->msg_lrpid);
2645 unlock_user_struct(target_md, target_addr, 0);
2646 return 0;
2649 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
2650 struct msqid_ds *host_md)
2652 struct target_msqid_ds *target_md;
2654 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
2655 return -TARGET_EFAULT;
2656 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
2657 return -TARGET_EFAULT;
2658 target_md->msg_stime = tswapl(host_md->msg_stime);
2659 target_md->msg_rtime = tswapl(host_md->msg_rtime);
2660 target_md->msg_ctime = tswapl(host_md->msg_ctime);
2661 target_md->__msg_cbytes = tswapl(host_md->__msg_cbytes);
2662 target_md->msg_qnum = tswapl(host_md->msg_qnum);
2663 target_md->msg_qbytes = tswapl(host_md->msg_qbytes);
2664 target_md->msg_lspid = tswapl(host_md->msg_lspid);
2665 target_md->msg_lrpid = tswapl(host_md->msg_lrpid);
2666 unlock_user_struct(target_md, target_addr, 1);
2667 return 0;
2670 struct target_msginfo {
2671 int msgpool;
2672 int msgmap;
2673 int msgmax;
2674 int msgmnb;
2675 int msgmni;
2676 int msgssz;
2677 int msgtql;
2678 unsigned short int msgseg;
2681 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
2682 struct msginfo *host_msginfo)
2684 struct target_msginfo *target_msginfo;
2685 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
2686 return -TARGET_EFAULT;
2687 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
2688 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
2689 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
2690 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
2691 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
2692 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
2693 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
2694 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
2695 unlock_user_struct(target_msginfo, target_addr, 1);
2696 return 0;
2699 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
2701 struct msqid_ds dsarg;
2702 struct msginfo msginfo;
2703 abi_long ret = -TARGET_EINVAL;
2705 cmd &= 0xff;
2707 switch (cmd) {
2708 case IPC_STAT:
2709 case IPC_SET:
2710 case MSG_STAT:
2711 if (target_to_host_msqid_ds(&dsarg,ptr))
2712 return -TARGET_EFAULT;
2713 ret = get_errno(msgctl(msgid, cmd, &dsarg));
2714 if (host_to_target_msqid_ds(ptr,&dsarg))
2715 return -TARGET_EFAULT;
2716 break;
2717 case IPC_RMID:
2718 ret = get_errno(msgctl(msgid, cmd, NULL));
2719 break;
2720 case IPC_INFO:
2721 case MSG_INFO:
2722 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
2723 if (host_to_target_msginfo(ptr, &msginfo))
2724 return -TARGET_EFAULT;
2725 break;
2728 return ret;
2731 struct target_msgbuf {
2732 abi_long mtype;
2733 char mtext[1];
2736 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
2737 unsigned int msgsz, int msgflg)
2739 struct target_msgbuf *target_mb;
2740 struct msgbuf *host_mb;
2741 abi_long ret = 0;
2743 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
2744 return -TARGET_EFAULT;
2745 host_mb = malloc(msgsz+sizeof(long));
2746 host_mb->mtype = (abi_long) tswapl(target_mb->mtype);
2747 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
2748 ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
2749 free(host_mb);
2750 unlock_user_struct(target_mb, msgp, 0);
2752 return ret;
2755 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
2756 unsigned int msgsz, abi_long msgtyp,
2757 int msgflg)
2759 struct target_msgbuf *target_mb;
2760 char *target_mtext;
2761 struct msgbuf *host_mb;
2762 abi_long ret = 0;
2764 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
2765 return -TARGET_EFAULT;
2767 host_mb = malloc(msgsz+sizeof(long));
2768 ret = get_errno(msgrcv(msqid, host_mb, msgsz, tswapl(msgtyp), msgflg));
2770 if (ret > 0) {
2771 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
2772 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
2773 if (!target_mtext) {
2774 ret = -TARGET_EFAULT;
2775 goto end;
2777 memcpy(target_mb->mtext, host_mb->mtext, ret);
2778 unlock_user(target_mtext, target_mtext_addr, ret);
2781 target_mb->mtype = tswapl(host_mb->mtype);
2782 free(host_mb);
2784 end:
2785 if (target_mb)
2786 unlock_user_struct(target_mb, msgp, 1);
2787 return ret;
2790 struct target_shmid_ds
2792 struct target_ipc_perm shm_perm;
2793 abi_ulong shm_segsz;
2794 abi_ulong shm_atime;
2795 #if TARGET_ABI_BITS == 32
2796 abi_ulong __unused1;
2797 #endif
2798 abi_ulong shm_dtime;
2799 #if TARGET_ABI_BITS == 32
2800 abi_ulong __unused2;
2801 #endif
2802 abi_ulong shm_ctime;
2803 #if TARGET_ABI_BITS == 32
2804 abi_ulong __unused3;
2805 #endif
2806 int shm_cpid;
2807 int shm_lpid;
2808 abi_ulong shm_nattch;
2809 unsigned long int __unused4;
2810 unsigned long int __unused5;
2813 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
2814 abi_ulong target_addr)
2816 struct target_shmid_ds *target_sd;
2818 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
2819 return -TARGET_EFAULT;
2820 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
2821 return -TARGET_EFAULT;
2822 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2823 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
2824 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2825 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2826 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2827 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2828 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2829 unlock_user_struct(target_sd, target_addr, 0);
2830 return 0;
2833 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
2834 struct shmid_ds *host_sd)
2836 struct target_shmid_ds *target_sd;
2838 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
2839 return -TARGET_EFAULT;
2840 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
2841 return -TARGET_EFAULT;
2842 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
2843 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
2844 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
2845 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
2846 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
2847 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
2848 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
2849 unlock_user_struct(target_sd, target_addr, 1);
2850 return 0;
2853 struct target_shminfo {
2854 abi_ulong shmmax;
2855 abi_ulong shmmin;
2856 abi_ulong shmmni;
2857 abi_ulong shmseg;
2858 abi_ulong shmall;
2861 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
2862 struct shminfo *host_shminfo)
2864 struct target_shminfo *target_shminfo;
2865 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
2866 return -TARGET_EFAULT;
2867 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
2868 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
2869 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
2870 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
2871 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
2872 unlock_user_struct(target_shminfo, target_addr, 1);
2873 return 0;
2876 struct target_shm_info {
2877 int used_ids;
2878 abi_ulong shm_tot;
2879 abi_ulong shm_rss;
2880 abi_ulong shm_swp;
2881 abi_ulong swap_attempts;
2882 abi_ulong swap_successes;
2885 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
2886 struct shm_info *host_shm_info)
2888 struct target_shm_info *target_shm_info;
2889 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
2890 return -TARGET_EFAULT;
2891 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
2892 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
2893 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
2894 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
2895 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
2896 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
2897 unlock_user_struct(target_shm_info, target_addr, 1);
2898 return 0;
2901 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
2903 struct shmid_ds dsarg;
2904 struct shminfo shminfo;
2905 struct shm_info shm_info;
2906 abi_long ret = -TARGET_EINVAL;
2908 cmd &= 0xff;
2910 switch(cmd) {
2911 case IPC_STAT:
2912 case IPC_SET:
2913 case SHM_STAT:
2914 if (target_to_host_shmid_ds(&dsarg, buf))
2915 return -TARGET_EFAULT;
2916 ret = get_errno(shmctl(shmid, cmd, &dsarg));
2917 if (host_to_target_shmid_ds(buf, &dsarg))
2918 return -TARGET_EFAULT;
2919 break;
2920 case IPC_INFO:
2921 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
2922 if (host_to_target_shminfo(buf, &shminfo))
2923 return -TARGET_EFAULT;
2924 break;
2925 case SHM_INFO:
2926 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
2927 if (host_to_target_shm_info(buf, &shm_info))
2928 return -TARGET_EFAULT;
2929 break;
2930 case IPC_RMID:
2931 case SHM_LOCK:
2932 case SHM_UNLOCK:
2933 ret = get_errno(shmctl(shmid, cmd, NULL));
2934 break;
2937 return ret;
2940 static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
2942 abi_long raddr;
2943 void *host_raddr;
2944 struct shmid_ds shm_info;
2945 int i,ret;
2947 /* find out the length of the shared memory segment */
2948 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
2949 if (is_error(ret)) {
2950 /* can't get length, bail out */
2951 return ret;
2954 mmap_lock();
2956 if (shmaddr)
2957 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
2958 else {
2959 abi_ulong mmap_start;
2961 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
2963 if (mmap_start == -1) {
2964 errno = ENOMEM;
2965 host_raddr = (void *)-1;
2966 } else
2967 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
2970 if (host_raddr == (void *)-1) {
2971 mmap_unlock();
2972 return get_errno((long)host_raddr);
2974 raddr=h2g((unsigned long)host_raddr);
2976 page_set_flags(raddr, raddr + shm_info.shm_segsz,
2977 PAGE_VALID | PAGE_READ |
2978 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
2980 for (i = 0; i < N_SHM_REGIONS; i++) {
2981 if (shm_regions[i].start == 0) {
2982 shm_regions[i].start = raddr;
2983 shm_regions[i].size = shm_info.shm_segsz;
2984 break;
2988 mmap_unlock();
2989 return raddr;
2993 static inline abi_long do_shmdt(abi_ulong shmaddr)
2995 int i;
2997 for (i = 0; i < N_SHM_REGIONS; ++i) {
2998 if (shm_regions[i].start == shmaddr) {
2999 shm_regions[i].start = 0;
3000 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
3001 break;
3005 return get_errno(shmdt(g2h(shmaddr)));
3008 #ifdef TARGET_NR_ipc
3009 /* ??? This only works with linear mappings. */
3010 /* do_ipc() must return target values and target errnos. */
3011 static abi_long do_ipc(unsigned int call, int first,
3012 int second, int third,
3013 abi_long ptr, abi_long fifth)
3015 int version;
3016 abi_long ret = 0;
3018 version = call >> 16;
3019 call &= 0xffff;
3021 switch (call) {
3022 case IPCOP_semop:
3023 ret = do_semop(first, ptr, second);
3024 break;
3026 case IPCOP_semget:
3027 ret = get_errno(semget(first, second, third));
3028 break;
3030 case IPCOP_semctl:
3031 ret = do_semctl(first, second, third, (union target_semun)(abi_ulong) ptr);
3032 break;
3034 case IPCOP_msgget:
3035 ret = get_errno(msgget(first, second));
3036 break;
3038 case IPCOP_msgsnd:
3039 ret = do_msgsnd(first, ptr, second, third);
3040 break;
3042 case IPCOP_msgctl:
3043 ret = do_msgctl(first, second, ptr);
3044 break;
3046 case IPCOP_msgrcv:
3047 switch (version) {
3048 case 0:
3050 struct target_ipc_kludge {
3051 abi_long msgp;
3052 abi_long msgtyp;
3053 } *tmp;
3055 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
3056 ret = -TARGET_EFAULT;
3057 break;
3060 ret = do_msgrcv(first, tmp->msgp, second, tmp->msgtyp, third);
3062 unlock_user_struct(tmp, ptr, 0);
3063 break;
3065 default:
3066 ret = do_msgrcv(first, ptr, second, fifth, third);
3068 break;
3070 case IPCOP_shmat:
3071 switch (version) {
3072 default:
3074 abi_ulong raddr;
3075 raddr = do_shmat(first, ptr, second);
3076 if (is_error(raddr))
3077 return get_errno(raddr);
3078 if (put_user_ual(raddr, third))
3079 return -TARGET_EFAULT;
3080 break;
3082 case 1:
3083 ret = -TARGET_EINVAL;
3084 break;
3086 break;
3087 case IPCOP_shmdt:
3088 ret = do_shmdt(ptr);
3089 break;
3091 case IPCOP_shmget:
3092 /* IPC_* flag values are the same on all linux platforms */
3093 ret = get_errno(shmget(first, second, third));
3094 break;
3096 /* IPC_* and SHM_* command values are the same on all linux platforms */
3097 case IPCOP_shmctl:
3098 ret = do_shmctl(first, second, third);
3099 break;
3100 default:
3101 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
3102 ret = -TARGET_ENOSYS;
3103 break;
3105 return ret;
3107 #endif
3109 /* kernel structure types definitions */
3111 #define STRUCT(name, ...) STRUCT_ ## name,
3112 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3113 enum {
3114 #include "syscall_types.h"
3116 #undef STRUCT
3117 #undef STRUCT_SPECIAL
3119 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3120 #define STRUCT_SPECIAL(name)
3121 #include "syscall_types.h"
3122 #undef STRUCT
3123 #undef STRUCT_SPECIAL
3125 typedef struct IOCTLEntry IOCTLEntry;
3127 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
3128 int fd, abi_long cmd, abi_long arg);
3130 struct IOCTLEntry {
3131 unsigned int target_cmd;
3132 unsigned int host_cmd;
3133 const char *name;
3134 int access;
3135 do_ioctl_fn *do_ioctl;
3136 const argtype arg_type[5];
3139 #define IOC_R 0x0001
3140 #define IOC_W 0x0002
3141 #define IOC_RW (IOC_R | IOC_W)
3143 #define MAX_STRUCT_SIZE 4096
3145 #ifdef CONFIG_FIEMAP
3146 /* So fiemap access checks don't overflow on 32 bit systems.
3147 * This is very slightly smaller than the limit imposed by
3148 * the underlying kernel.
3150 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3151 / sizeof(struct fiemap_extent))
3153 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
3154 int fd, abi_long cmd, abi_long arg)
3156 /* The parameter for this ioctl is a struct fiemap followed
3157 * by an array of struct fiemap_extent whose size is set
3158 * in fiemap->fm_extent_count. The array is filled in by the
3159 * ioctl.
3161 int target_size_in, target_size_out;
3162 struct fiemap *fm;
3163 const argtype *arg_type = ie->arg_type;
3164 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
3165 void *argptr, *p;
3166 abi_long ret;
3167 int i, extent_size = thunk_type_size(extent_arg_type, 0);
3168 uint32_t outbufsz;
3169 int free_fm = 0;
3171 assert(arg_type[0] == TYPE_PTR);
3172 assert(ie->access == IOC_RW);
3173 arg_type++;
3174 target_size_in = thunk_type_size(arg_type, 0);
3175 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
3176 if (!argptr) {
3177 return -TARGET_EFAULT;
3179 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3180 unlock_user(argptr, arg, 0);
3181 fm = (struct fiemap *)buf_temp;
3182 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
3183 return -TARGET_EINVAL;
3186 outbufsz = sizeof (*fm) +
3187 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
3189 if (outbufsz > MAX_STRUCT_SIZE) {
3190 /* We can't fit all the extents into the fixed size buffer.
3191 * Allocate one that is large enough and use it instead.
3193 fm = malloc(outbufsz);
3194 if (!fm) {
3195 return -TARGET_ENOMEM;
3197 memcpy(fm, buf_temp, sizeof(struct fiemap));
3198 free_fm = 1;
3200 ret = get_errno(ioctl(fd, ie->host_cmd, fm));
3201 if (!is_error(ret)) {
3202 target_size_out = target_size_in;
3203 /* An extent_count of 0 means we were only counting the extents
3204 * so there are no structs to copy
3206 if (fm->fm_extent_count != 0) {
3207 target_size_out += fm->fm_mapped_extents * extent_size;
3209 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
3210 if (!argptr) {
3211 ret = -TARGET_EFAULT;
3212 } else {
3213 /* Convert the struct fiemap */
3214 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
3215 if (fm->fm_extent_count != 0) {
3216 p = argptr + target_size_in;
3217 /* ...and then all the struct fiemap_extents */
3218 for (i = 0; i < fm->fm_mapped_extents; i++) {
3219 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
3220 THUNK_TARGET);
3221 p += extent_size;
3224 unlock_user(argptr, arg, target_size_out);
3227 if (free_fm) {
3228 free(fm);
3230 return ret;
3232 #endif
3234 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
3235 int fd, abi_long cmd, abi_long arg)
3237 const argtype *arg_type = ie->arg_type;
3238 int target_size;
3239 void *argptr;
3240 int ret;
3241 struct ifconf *host_ifconf;
3242 uint32_t outbufsz;
3243 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
3244 int target_ifreq_size;
3245 int nb_ifreq;
3246 int free_buf = 0;
3247 int i;
3248 int target_ifc_len;
3249 abi_long target_ifc_buf;
3250 int host_ifc_len;
3251 char *host_ifc_buf;
3253 assert(arg_type[0] == TYPE_PTR);
3254 assert(ie->access == IOC_RW);
3256 arg_type++;
3257 target_size = thunk_type_size(arg_type, 0);
3259 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3260 if (!argptr)
3261 return -TARGET_EFAULT;
3262 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3263 unlock_user(argptr, arg, 0);
3265 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
3266 target_ifc_len = host_ifconf->ifc_len;
3267 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
3269 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
3270 nb_ifreq = target_ifc_len / target_ifreq_size;
3271 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
3273 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
3274 if (outbufsz > MAX_STRUCT_SIZE) {
3275 /* We can't fit all the extents into the fixed size buffer.
3276 * Allocate one that is large enough and use it instead.
3278 host_ifconf = malloc(outbufsz);
3279 if (!host_ifconf) {
3280 return -TARGET_ENOMEM;
3282 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
3283 free_buf = 1;
3285 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
3287 host_ifconf->ifc_len = host_ifc_len;
3288 host_ifconf->ifc_buf = host_ifc_buf;
3290 ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
3291 if (!is_error(ret)) {
3292 /* convert host ifc_len to target ifc_len */
3294 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
3295 target_ifc_len = nb_ifreq * target_ifreq_size;
3296 host_ifconf->ifc_len = target_ifc_len;
3298 /* restore target ifc_buf */
3300 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
3302 /* copy struct ifconf to target user */
3304 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3305 if (!argptr)
3306 return -TARGET_EFAULT;
3307 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
3308 unlock_user(argptr, arg, target_size);
3310 /* copy ifreq[] to target user */
3312 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
3313 for (i = 0; i < nb_ifreq ; i++) {
3314 thunk_convert(argptr + i * target_ifreq_size,
3315 host_ifc_buf + i * sizeof(struct ifreq),
3316 ifreq_arg_type, THUNK_TARGET);
3318 unlock_user(argptr, target_ifc_buf, target_ifc_len);
3321 if (free_buf) {
3322 free(host_ifconf);
3325 return ret;
3328 static IOCTLEntry ioctl_entries[] = {
3329 #define IOCTL(cmd, access, ...) \
3330 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3331 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3332 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3333 #include "ioctls.h"
3334 { 0, 0, },
3337 /* ??? Implement proper locking for ioctls. */
3338 /* do_ioctl() Must return target values and target errnos. */
3339 static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
3341 const IOCTLEntry *ie;
3342 const argtype *arg_type;
3343 abi_long ret;
3344 uint8_t buf_temp[MAX_STRUCT_SIZE];
3345 int target_size;
3346 void *argptr;
3348 ie = ioctl_entries;
3349 for(;;) {
3350 if (ie->target_cmd == 0) {
3351 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
3352 return -TARGET_ENOSYS;
3354 if (ie->target_cmd == cmd)
3355 break;
3356 ie++;
3358 arg_type = ie->arg_type;
3359 #if defined(DEBUG)
3360 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
3361 #endif
3362 if (ie->do_ioctl) {
3363 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
3366 switch(arg_type[0]) {
3367 case TYPE_NULL:
3368 /* no argument */
3369 ret = get_errno(ioctl(fd, ie->host_cmd));
3370 break;
3371 case TYPE_PTRVOID:
3372 case TYPE_INT:
3373 /* int argment */
3374 ret = get_errno(ioctl(fd, ie->host_cmd, arg));
3375 break;
3376 case TYPE_PTR:
3377 arg_type++;
3378 target_size = thunk_type_size(arg_type, 0);
3379 switch(ie->access) {
3380 case IOC_R:
3381 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3382 if (!is_error(ret)) {
3383 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3384 if (!argptr)
3385 return -TARGET_EFAULT;
3386 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3387 unlock_user(argptr, arg, target_size);
3389 break;
3390 case IOC_W:
3391 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3392 if (!argptr)
3393 return -TARGET_EFAULT;
3394 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3395 unlock_user(argptr, arg, 0);
3396 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3397 break;
3398 default:
3399 case IOC_RW:
3400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
3401 if (!argptr)
3402 return -TARGET_EFAULT;
3403 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
3404 unlock_user(argptr, arg, 0);
3405 ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
3406 if (!is_error(ret)) {
3407 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
3408 if (!argptr)
3409 return -TARGET_EFAULT;
3410 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
3411 unlock_user(argptr, arg, target_size);
3413 break;
3415 break;
3416 default:
3417 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3418 (long)cmd, arg_type[0]);
3419 ret = -TARGET_ENOSYS;
3420 break;
3422 return ret;
3425 static const bitmask_transtbl iflag_tbl[] = {
3426 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
3427 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
3428 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
3429 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
3430 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
3431 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
3432 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
3433 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
3434 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
3435 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
3436 { TARGET_IXON, TARGET_IXON, IXON, IXON },
3437 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
3438 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
3439 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
3440 { 0, 0, 0, 0 }
3443 static const bitmask_transtbl oflag_tbl[] = {
3444 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
3445 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
3446 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
3447 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
3448 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
3449 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
3450 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
3451 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
3452 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
3453 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
3454 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
3455 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
3456 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
3457 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
3458 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
3459 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
3460 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
3461 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
3462 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
3463 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
3464 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
3465 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
3466 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
3467 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
3468 { 0, 0, 0, 0 }
3471 static const bitmask_transtbl cflag_tbl[] = {
3472 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
3473 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
3474 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
3475 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
3476 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
3477 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
3478 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
3479 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
3480 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
3481 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
3482 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
3483 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
3484 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
3485 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
3486 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
3487 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
3488 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
3489 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
3490 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
3491 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
3492 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
3493 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
3494 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
3495 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
3496 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
3497 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
3498 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
3499 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
3500 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
3501 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
3502 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
3503 { 0, 0, 0, 0 }
3506 static const bitmask_transtbl lflag_tbl[] = {
3507 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
3508 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
3509 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
3510 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
3511 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
3512 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
3513 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
3514 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
3515 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
3516 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
3517 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
3518 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
3519 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
3520 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
3521 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
3522 { 0, 0, 0, 0 }
3525 static void target_to_host_termios (void *dst, const void *src)
3527 struct host_termios *host = dst;
3528 const struct target_termios *target = src;
3530 host->c_iflag =
3531 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
3532 host->c_oflag =
3533 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
3534 host->c_cflag =
3535 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
3536 host->c_lflag =
3537 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
3538 host->c_line = target->c_line;
3540 memset(host->c_cc, 0, sizeof(host->c_cc));
3541 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
3542 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
3543 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
3544 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
3545 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
3546 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
3547 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
3548 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
3549 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
3550 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
3551 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
3552 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
3553 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
3554 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
3555 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
3556 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
3557 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
3560 static void host_to_target_termios (void *dst, const void *src)
3562 struct target_termios *target = dst;
3563 const struct host_termios *host = src;
3565 target->c_iflag =
3566 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
3567 target->c_oflag =
3568 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
3569 target->c_cflag =
3570 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
3571 target->c_lflag =
3572 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
3573 target->c_line = host->c_line;
3575 memset(target->c_cc, 0, sizeof(target->c_cc));
3576 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
3577 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
3578 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
3579 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
3580 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
3581 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
3582 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
3583 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
3584 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
3585 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
3586 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
3587 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
3588 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
3589 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
3590 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
3591 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
3592 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
3595 static const StructEntry struct_termios_def = {
3596 .convert = { host_to_target_termios, target_to_host_termios },
3597 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
3598 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
3601 static bitmask_transtbl mmap_flags_tbl[] = {
3602 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
3603 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
3604 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
3605 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
3606 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
3607 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
3608 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
3609 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
3610 { 0, 0, 0, 0 }
3613 #if defined(TARGET_I386)
3615 /* NOTE: there is really one LDT for all the threads */
3616 static uint8_t *ldt_table;
3618 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
3620 int size;
3621 void *p;
3623 if (!ldt_table)
3624 return 0;
3625 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
3626 if (size > bytecount)
3627 size = bytecount;
3628 p = lock_user(VERIFY_WRITE, ptr, size, 0);
3629 if (!p)
3630 return -TARGET_EFAULT;
3631 /* ??? Should this by byteswapped? */
3632 memcpy(p, ldt_table, size);
3633 unlock_user(p, ptr, size);
3634 return size;
3637 /* XXX: add locking support */
3638 static abi_long write_ldt(CPUX86State *env,
3639 abi_ulong ptr, unsigned long bytecount, int oldmode)
3641 struct target_modify_ldt_ldt_s ldt_info;
3642 struct target_modify_ldt_ldt_s *target_ldt_info;
3643 int seg_32bit, contents, read_exec_only, limit_in_pages;
3644 int seg_not_present, useable, lm;
3645 uint32_t *lp, entry_1, entry_2;
3647 if (bytecount != sizeof(ldt_info))
3648 return -TARGET_EINVAL;
3649 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
3650 return -TARGET_EFAULT;
3651 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3652 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3653 ldt_info.limit = tswap32(target_ldt_info->limit);
3654 ldt_info.flags = tswap32(target_ldt_info->flags);
3655 unlock_user_struct(target_ldt_info, ptr, 0);
3657 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
3658 return -TARGET_EINVAL;
3659 seg_32bit = ldt_info.flags & 1;
3660 contents = (ldt_info.flags >> 1) & 3;
3661 read_exec_only = (ldt_info.flags >> 3) & 1;
3662 limit_in_pages = (ldt_info.flags >> 4) & 1;
3663 seg_not_present = (ldt_info.flags >> 5) & 1;
3664 useable = (ldt_info.flags >> 6) & 1;
3665 #ifdef TARGET_ABI32
3666 lm = 0;
3667 #else
3668 lm = (ldt_info.flags >> 7) & 1;
3669 #endif
3670 if (contents == 3) {
3671 if (oldmode)
3672 return -TARGET_EINVAL;
3673 if (seg_not_present == 0)
3674 return -TARGET_EINVAL;
3676 /* allocate the LDT */
3677 if (!ldt_table) {
3678 env->ldt.base = target_mmap(0,
3679 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
3680 PROT_READ|PROT_WRITE,
3681 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
3682 if (env->ldt.base == -1)
3683 return -TARGET_ENOMEM;
3684 memset(g2h(env->ldt.base), 0,
3685 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
3686 env->ldt.limit = 0xffff;
3687 ldt_table = g2h(env->ldt.base);
3690 /* NOTE: same code as Linux kernel */
3691 /* Allow LDTs to be cleared by the user. */
3692 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3693 if (oldmode ||
3694 (contents == 0 &&
3695 read_exec_only == 1 &&
3696 seg_32bit == 0 &&
3697 limit_in_pages == 0 &&
3698 seg_not_present == 1 &&
3699 useable == 0 )) {
3700 entry_1 = 0;
3701 entry_2 = 0;
3702 goto install;
3706 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3707 (ldt_info.limit & 0x0ffff);
3708 entry_2 = (ldt_info.base_addr & 0xff000000) |
3709 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3710 (ldt_info.limit & 0xf0000) |
3711 ((read_exec_only ^ 1) << 9) |
3712 (contents << 10) |
3713 ((seg_not_present ^ 1) << 15) |
3714 (seg_32bit << 22) |
3715 (limit_in_pages << 23) |
3716 (lm << 21) |
3717 0x7000;
3718 if (!oldmode)
3719 entry_2 |= (useable << 20);
3721 /* Install the new entry ... */
3722 install:
3723 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
3724 lp[0] = tswap32(entry_1);
3725 lp[1] = tswap32(entry_2);
3726 return 0;
3729 /* specific and weird i386 syscalls */
3730 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
3731 unsigned long bytecount)
3733 abi_long ret;
3735 switch (func) {
3736 case 0:
3737 ret = read_ldt(ptr, bytecount);
3738 break;
3739 case 1:
3740 ret = write_ldt(env, ptr, bytecount, 1);
3741 break;
3742 case 0x11:
3743 ret = write_ldt(env, ptr, bytecount, 0);
3744 break;
3745 default:
3746 ret = -TARGET_ENOSYS;
3747 break;
3749 return ret;
3752 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3753 static abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
3755 uint64_t *gdt_table = g2h(env->gdt.base);
3756 struct target_modify_ldt_ldt_s ldt_info;
3757 struct target_modify_ldt_ldt_s *target_ldt_info;
3758 int seg_32bit, contents, read_exec_only, limit_in_pages;
3759 int seg_not_present, useable, lm;
3760 uint32_t *lp, entry_1, entry_2;
3761 int i;
3763 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3764 if (!target_ldt_info)
3765 return -TARGET_EFAULT;
3766 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
3767 ldt_info.base_addr = tswapl(target_ldt_info->base_addr);
3768 ldt_info.limit = tswap32(target_ldt_info->limit);
3769 ldt_info.flags = tswap32(target_ldt_info->flags);
3770 if (ldt_info.entry_number == -1) {
3771 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
3772 if (gdt_table[i] == 0) {
3773 ldt_info.entry_number = i;
3774 target_ldt_info->entry_number = tswap32(i);
3775 break;
3779 unlock_user_struct(target_ldt_info, ptr, 1);
3781 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
3782 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
3783 return -TARGET_EINVAL;
3784 seg_32bit = ldt_info.flags & 1;
3785 contents = (ldt_info.flags >> 1) & 3;
3786 read_exec_only = (ldt_info.flags >> 3) & 1;
3787 limit_in_pages = (ldt_info.flags >> 4) & 1;
3788 seg_not_present = (ldt_info.flags >> 5) & 1;
3789 useable = (ldt_info.flags >> 6) & 1;
3790 #ifdef TARGET_ABI32
3791 lm = 0;
3792 #else
3793 lm = (ldt_info.flags >> 7) & 1;
3794 #endif
3796 if (contents == 3) {
3797 if (seg_not_present == 0)
3798 return -TARGET_EINVAL;
3801 /* NOTE: same code as Linux kernel */
3802 /* Allow LDTs to be cleared by the user. */
3803 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
3804 if ((contents == 0 &&
3805 read_exec_only == 1 &&
3806 seg_32bit == 0 &&
3807 limit_in_pages == 0 &&
3808 seg_not_present == 1 &&
3809 useable == 0 )) {
3810 entry_1 = 0;
3811 entry_2 = 0;
3812 goto install;
3816 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
3817 (ldt_info.limit & 0x0ffff);
3818 entry_2 = (ldt_info.base_addr & 0xff000000) |
3819 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
3820 (ldt_info.limit & 0xf0000) |
3821 ((read_exec_only ^ 1) << 9) |
3822 (contents << 10) |
3823 ((seg_not_present ^ 1) << 15) |
3824 (seg_32bit << 22) |
3825 (limit_in_pages << 23) |
3826 (useable << 20) |
3827 (lm << 21) |
3828 0x7000;
3830 /* Install the new entry ... */
3831 install:
3832 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
3833 lp[0] = tswap32(entry_1);
3834 lp[1] = tswap32(entry_2);
3835 return 0;
3838 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
3840 struct target_modify_ldt_ldt_s *target_ldt_info;
3841 uint64_t *gdt_table = g2h(env->gdt.base);
3842 uint32_t base_addr, limit, flags;
3843 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
3844 int seg_not_present, useable, lm;
3845 uint32_t *lp, entry_1, entry_2;
3847 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
3848 if (!target_ldt_info)
3849 return -TARGET_EFAULT;
3850 idx = tswap32(target_ldt_info->entry_number);
3851 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
3852 idx > TARGET_GDT_ENTRY_TLS_MAX) {
3853 unlock_user_struct(target_ldt_info, ptr, 1);
3854 return -TARGET_EINVAL;
3856 lp = (uint32_t *)(gdt_table + idx);
3857 entry_1 = tswap32(lp[0]);
3858 entry_2 = tswap32(lp[1]);
3860 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
3861 contents = (entry_2 >> 10) & 3;
3862 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
3863 seg_32bit = (entry_2 >> 22) & 1;
3864 limit_in_pages = (entry_2 >> 23) & 1;
3865 useable = (entry_2 >> 20) & 1;
3866 #ifdef TARGET_ABI32
3867 lm = 0;
3868 #else
3869 lm = (entry_2 >> 21) & 1;
3870 #endif
3871 flags = (seg_32bit << 0) | (contents << 1) |
3872 (read_exec_only << 3) | (limit_in_pages << 4) |
3873 (seg_not_present << 5) | (useable << 6) | (lm << 7);
3874 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
3875 base_addr = (entry_1 >> 16) |
3876 (entry_2 & 0xff000000) |
3877 ((entry_2 & 0xff) << 16);
3878 target_ldt_info->base_addr = tswapl(base_addr);
3879 target_ldt_info->limit = tswap32(limit);
3880 target_ldt_info->flags = tswap32(flags);
3881 unlock_user_struct(target_ldt_info, ptr, 1);
3882 return 0;
3884 #endif /* TARGET_I386 && TARGET_ABI32 */
3886 #ifndef TARGET_ABI32
3887 static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
3889 abi_long ret = 0;
3890 abi_ulong val;
3891 int idx;
3893 switch(code) {
3894 case TARGET_ARCH_SET_GS:
3895 case TARGET_ARCH_SET_FS:
3896 if (code == TARGET_ARCH_SET_GS)
3897 idx = R_GS;
3898 else
3899 idx = R_FS;
3900 cpu_x86_load_seg(env, idx, 0);
3901 env->segs[idx].base = addr;
3902 break;
3903 case TARGET_ARCH_GET_GS:
3904 case TARGET_ARCH_GET_FS:
3905 if (code == TARGET_ARCH_GET_GS)
3906 idx = R_GS;
3907 else
3908 idx = R_FS;
3909 val = env->segs[idx].base;
3910 if (put_user(val, addr, abi_ulong))
3911 ret = -TARGET_EFAULT;
3912 break;
3913 default:
3914 ret = -TARGET_EINVAL;
3915 break;
3917 return ret;
3919 #endif
3921 #endif /* defined(TARGET_I386) */
3923 #define NEW_STACK_SIZE 0x40000
3925 #if defined(CONFIG_USE_NPTL)
3927 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
3928 typedef struct {
3929 CPUState *env;
3930 pthread_mutex_t mutex;
3931 pthread_cond_t cond;
3932 pthread_t thread;
3933 uint32_t tid;
3934 abi_ulong child_tidptr;
3935 abi_ulong parent_tidptr;
3936 sigset_t sigmask;
3937 } new_thread_info;
3939 static void *clone_func(void *arg)
3941 new_thread_info *info = arg;
3942 CPUState *env;
3943 TaskState *ts;
3945 env = info->env;
3946 thread_env = env;
3947 ts = (TaskState *)thread_env->opaque;
3948 info->tid = gettid();
3949 env->host_tid = info->tid;
3950 task_settid(ts);
3951 if (info->child_tidptr)
3952 put_user_u32(info->tid, info->child_tidptr);
3953 if (info->parent_tidptr)
3954 put_user_u32(info->tid, info->parent_tidptr);
3955 /* Enable signals. */
3956 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
3957 /* Signal to the parent that we're ready. */
3958 pthread_mutex_lock(&info->mutex);
3959 pthread_cond_broadcast(&info->cond);
3960 pthread_mutex_unlock(&info->mutex);
3961 /* Wait until the parent has finshed initializing the tls state. */
3962 pthread_mutex_lock(&clone_lock);
3963 pthread_mutex_unlock(&clone_lock);
3964 cpu_loop(env);
3965 /* never exits */
3966 return NULL;
3968 #else
3970 static int clone_func(void *arg)
3972 CPUState *env = arg;
3973 cpu_loop(env);
3974 /* never exits */
3975 return 0;
3977 #endif
3979 /* do_fork() Must return host values and target errnos (unlike most
3980 do_*() functions). */
3981 static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
3982 abi_ulong parent_tidptr, target_ulong newtls,
3983 abi_ulong child_tidptr)
3985 int ret;
3986 TaskState *ts;
3987 CPUState *new_env;
3988 #if defined(CONFIG_USE_NPTL)
3989 unsigned int nptl_flags;
3990 sigset_t sigmask;
3991 #else
3992 uint8_t *new_stack;
3993 #endif
3995 /* Emulate vfork() with fork() */
3996 if (flags & CLONE_VFORK)
3997 flags &= ~(CLONE_VFORK | CLONE_VM);
3999 if (flags & CLONE_VM) {
4000 TaskState *parent_ts = (TaskState *)env->opaque;
4001 #if defined(CONFIG_USE_NPTL)
4002 new_thread_info info;
4003 pthread_attr_t attr;
4004 #endif
4005 ts = g_malloc0(sizeof(TaskState));
4006 init_task_state(ts);
4007 /* we create a new CPU instance. */
4008 new_env = cpu_copy(env);
4009 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4010 cpu_reset(new_env);
4011 #endif
4012 /* Init regs that differ from the parent. */
4013 cpu_clone_regs(new_env, newsp);
4014 new_env->opaque = ts;
4015 ts->bprm = parent_ts->bprm;
4016 ts->info = parent_ts->info;
4017 #if defined(CONFIG_USE_NPTL)
4018 nptl_flags = flags;
4019 flags &= ~CLONE_NPTL_FLAGS2;
4021 if (nptl_flags & CLONE_CHILD_CLEARTID) {
4022 ts->child_tidptr = child_tidptr;
4025 if (nptl_flags & CLONE_SETTLS)
4026 cpu_set_tls (new_env, newtls);
4028 /* Grab a mutex so that thread setup appears atomic. */
4029 pthread_mutex_lock(&clone_lock);
4031 memset(&info, 0, sizeof(info));
4032 pthread_mutex_init(&info.mutex, NULL);
4033 pthread_mutex_lock(&info.mutex);
4034 pthread_cond_init(&info.cond, NULL);
4035 info.env = new_env;
4036 if (nptl_flags & CLONE_CHILD_SETTID)
4037 info.child_tidptr = child_tidptr;
4038 if (nptl_flags & CLONE_PARENT_SETTID)
4039 info.parent_tidptr = parent_tidptr;
4041 ret = pthread_attr_init(&attr);
4042 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
4043 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
4044 /* It is not safe to deliver signals until the child has finished
4045 initializing, so temporarily block all signals. */
4046 sigfillset(&sigmask);
4047 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
4049 ret = pthread_create(&info.thread, &attr, clone_func, &info);
4050 /* TODO: Free new CPU state if thread creation failed. */
4052 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
4053 pthread_attr_destroy(&attr);
4054 if (ret == 0) {
4055 /* Wait for the child to initialize. */
4056 pthread_cond_wait(&info.cond, &info.mutex);
4057 ret = info.tid;
4058 if (flags & CLONE_PARENT_SETTID)
4059 put_user_u32(ret, parent_tidptr);
4060 } else {
4061 ret = -1;
4063 pthread_mutex_unlock(&info.mutex);
4064 pthread_cond_destroy(&info.cond);
4065 pthread_mutex_destroy(&info.mutex);
4066 pthread_mutex_unlock(&clone_lock);
4067 #else
4068 if (flags & CLONE_NPTL_FLAGS2)
4069 return -EINVAL;
4070 /* This is probably going to die very quickly, but do it anyway. */
4071 new_stack = g_malloc0 (NEW_STACK_SIZE);
4072 #ifdef __ia64__
4073 ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env);
4074 #else
4075 ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env);
4076 #endif
4077 #endif
4078 } else {
4079 /* if no CLONE_VM, we consider it is a fork */
4080 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
4081 return -EINVAL;
4082 fork_start();
4083 ret = fork();
4084 if (ret == 0) {
4085 /* Child Process. */
4086 cpu_clone_regs(env, newsp);
4087 fork_end(1);
4088 #if defined(CONFIG_USE_NPTL)
4089 /* There is a race condition here. The parent process could
4090 theoretically read the TID in the child process before the child
4091 tid is set. This would require using either ptrace
4092 (not implemented) or having *_tidptr to point at a shared memory
4093 mapping. We can't repeat the spinlock hack used above because
4094 the child process gets its own copy of the lock. */
4095 if (flags & CLONE_CHILD_SETTID)
4096 put_user_u32(gettid(), child_tidptr);
4097 if (flags & CLONE_PARENT_SETTID)
4098 put_user_u32(gettid(), parent_tidptr);
4099 ts = (TaskState *)env->opaque;
4100 if (flags & CLONE_SETTLS)
4101 cpu_set_tls (env, newtls);
4102 if (flags & CLONE_CHILD_CLEARTID)
4103 ts->child_tidptr = child_tidptr;
4104 #endif
4105 } else {
4106 fork_end(0);
4109 return ret;
4112 /* warning : doesn't handle linux specific flags... */
4113 static int target_to_host_fcntl_cmd(int cmd)
4115 switch(cmd) {
4116 case TARGET_F_DUPFD:
4117 case TARGET_F_GETFD:
4118 case TARGET_F_SETFD:
4119 case TARGET_F_GETFL:
4120 case TARGET_F_SETFL:
4121 return cmd;
4122 case TARGET_F_GETLK:
4123 return F_GETLK;
4124 case TARGET_F_SETLK:
4125 return F_SETLK;
4126 case TARGET_F_SETLKW:
4127 return F_SETLKW;
4128 case TARGET_F_GETOWN:
4129 return F_GETOWN;
4130 case TARGET_F_SETOWN:
4131 return F_SETOWN;
4132 case TARGET_F_GETSIG:
4133 return F_GETSIG;
4134 case TARGET_F_SETSIG:
4135 return F_SETSIG;
4136 #if TARGET_ABI_BITS == 32
4137 case TARGET_F_GETLK64:
4138 return F_GETLK64;
4139 case TARGET_F_SETLK64:
4140 return F_SETLK64;
4141 case TARGET_F_SETLKW64:
4142 return F_SETLKW64;
4143 #endif
4144 case TARGET_F_SETLEASE:
4145 return F_SETLEASE;
4146 case TARGET_F_GETLEASE:
4147 return F_GETLEASE;
4148 #ifdef F_DUPFD_CLOEXEC
4149 case TARGET_F_DUPFD_CLOEXEC:
4150 return F_DUPFD_CLOEXEC;
4151 #endif
4152 case TARGET_F_NOTIFY:
4153 return F_NOTIFY;
4154 default:
4155 return -TARGET_EINVAL;
4157 return -TARGET_EINVAL;
4160 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
4162 struct flock fl;
4163 struct target_flock *target_fl;
4164 struct flock64 fl64;
4165 struct target_flock64 *target_fl64;
4166 abi_long ret;
4167 int host_cmd = target_to_host_fcntl_cmd(cmd);
4169 if (host_cmd == -TARGET_EINVAL)
4170 return host_cmd;
4172 switch(cmd) {
4173 case TARGET_F_GETLK:
4174 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4175 return -TARGET_EFAULT;
4176 fl.l_type = tswap16(target_fl->l_type);
4177 fl.l_whence = tswap16(target_fl->l_whence);
4178 fl.l_start = tswapl(target_fl->l_start);
4179 fl.l_len = tswapl(target_fl->l_len);
4180 fl.l_pid = tswap32(target_fl->l_pid);
4181 unlock_user_struct(target_fl, arg, 0);
4182 ret = get_errno(fcntl(fd, host_cmd, &fl));
4183 if (ret == 0) {
4184 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
4185 return -TARGET_EFAULT;
4186 target_fl->l_type = tswap16(fl.l_type);
4187 target_fl->l_whence = tswap16(fl.l_whence);
4188 target_fl->l_start = tswapl(fl.l_start);
4189 target_fl->l_len = tswapl(fl.l_len);
4190 target_fl->l_pid = tswap32(fl.l_pid);
4191 unlock_user_struct(target_fl, arg, 1);
4193 break;
4195 case TARGET_F_SETLK:
4196 case TARGET_F_SETLKW:
4197 if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
4198 return -TARGET_EFAULT;
4199 fl.l_type = tswap16(target_fl->l_type);
4200 fl.l_whence = tswap16(target_fl->l_whence);
4201 fl.l_start = tswapl(target_fl->l_start);
4202 fl.l_len = tswapl(target_fl->l_len);
4203 fl.l_pid = tswap32(target_fl->l_pid);
4204 unlock_user_struct(target_fl, arg, 0);
4205 ret = get_errno(fcntl(fd, host_cmd, &fl));
4206 break;
4208 case TARGET_F_GETLK64:
4209 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4210 return -TARGET_EFAULT;
4211 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4212 fl64.l_whence = tswap16(target_fl64->l_whence);
4213 fl64.l_start = tswapl(target_fl64->l_start);
4214 fl64.l_len = tswapl(target_fl64->l_len);
4215 fl64.l_pid = tswap32(target_fl64->l_pid);
4216 unlock_user_struct(target_fl64, arg, 0);
4217 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4218 if (ret == 0) {
4219 if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
4220 return -TARGET_EFAULT;
4221 target_fl64->l_type = tswap16(fl64.l_type) >> 1;
4222 target_fl64->l_whence = tswap16(fl64.l_whence);
4223 target_fl64->l_start = tswapl(fl64.l_start);
4224 target_fl64->l_len = tswapl(fl64.l_len);
4225 target_fl64->l_pid = tswap32(fl64.l_pid);
4226 unlock_user_struct(target_fl64, arg, 1);
4228 break;
4229 case TARGET_F_SETLK64:
4230 case TARGET_F_SETLKW64:
4231 if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
4232 return -TARGET_EFAULT;
4233 fl64.l_type = tswap16(target_fl64->l_type) >> 1;
4234 fl64.l_whence = tswap16(target_fl64->l_whence);
4235 fl64.l_start = tswapl(target_fl64->l_start);
4236 fl64.l_len = tswapl(target_fl64->l_len);
4237 fl64.l_pid = tswap32(target_fl64->l_pid);
4238 unlock_user_struct(target_fl64, arg, 0);
4239 ret = get_errno(fcntl(fd, host_cmd, &fl64));
4240 break;
4242 case TARGET_F_GETFL:
4243 ret = get_errno(fcntl(fd, host_cmd, arg));
4244 if (ret >= 0) {
4245 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
4247 break;
4249 case TARGET_F_SETFL:
4250 ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
4251 break;
4253 case TARGET_F_SETOWN:
4254 case TARGET_F_GETOWN:
4255 case TARGET_F_SETSIG:
4256 case TARGET_F_GETSIG:
4257 case TARGET_F_SETLEASE:
4258 case TARGET_F_GETLEASE:
4259 ret = get_errno(fcntl(fd, host_cmd, arg));
4260 break;
4262 default:
4263 ret = get_errno(fcntl(fd, cmd, arg));
4264 break;
4266 return ret;
4269 #ifdef USE_UID16
4271 static inline int high2lowuid(int uid)
4273 if (uid > 65535)
4274 return 65534;
4275 else
4276 return uid;
4279 static inline int high2lowgid(int gid)
4281 if (gid > 65535)
4282 return 65534;
4283 else
4284 return gid;
4287 static inline int low2highuid(int uid)
4289 if ((int16_t)uid == -1)
4290 return -1;
4291 else
4292 return uid;
4295 static inline int low2highgid(int gid)
4297 if ((int16_t)gid == -1)
4298 return -1;
4299 else
4300 return gid;
4302 static inline int tswapid(int id)
4304 return tswap16(id);
4306 #else /* !USE_UID16 */
4307 static inline int high2lowuid(int uid)
4309 return uid;
4311 static inline int high2lowgid(int gid)
4313 return gid;
4315 static inline int low2highuid(int uid)
4317 return uid;
4319 static inline int low2highgid(int gid)
4321 return gid;
4323 static inline int tswapid(int id)
4325 return tswap32(id);
4327 #endif /* USE_UID16 */
4329 void syscall_init(void)
4331 IOCTLEntry *ie;
4332 const argtype *arg_type;
4333 int size;
4334 int i;
4336 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4337 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4338 #include "syscall_types.h"
4339 #undef STRUCT
4340 #undef STRUCT_SPECIAL
4342 /* we patch the ioctl size if necessary. We rely on the fact that
4343 no ioctl has all the bits at '1' in the size field */
4344 ie = ioctl_entries;
4345 while (ie->target_cmd != 0) {
4346 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
4347 TARGET_IOC_SIZEMASK) {
4348 arg_type = ie->arg_type;
4349 if (arg_type[0] != TYPE_PTR) {
4350 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
4351 ie->target_cmd);
4352 exit(1);
4354 arg_type++;
4355 size = thunk_type_size(arg_type, 0);
4356 ie->target_cmd = (ie->target_cmd &
4357 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
4358 (size << TARGET_IOC_SIZESHIFT);
4361 /* Build target_to_host_errno_table[] table from
4362 * host_to_target_errno_table[]. */
4363 for (i=0; i < ERRNO_TABLE_SIZE; i++)
4364 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
4366 /* automatic consistency check if same arch */
4367 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4368 (defined(__x86_64__) && defined(TARGET_X86_64))
4369 if (unlikely(ie->target_cmd != ie->host_cmd)) {
4370 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4371 ie->name, ie->target_cmd, ie->host_cmd);
4373 #endif
4374 ie++;
4378 #if TARGET_ABI_BITS == 32
4379 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
4381 #ifdef TARGET_WORDS_BIGENDIAN
4382 return ((uint64_t)word0 << 32) | word1;
4383 #else
4384 return ((uint64_t)word1 << 32) | word0;
4385 #endif
4387 #else /* TARGET_ABI_BITS == 32 */
4388 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
4390 return word0;
4392 #endif /* TARGET_ABI_BITS != 32 */
4394 #ifdef TARGET_NR_truncate64
4395 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
4396 abi_long arg2,
4397 abi_long arg3,
4398 abi_long arg4)
4400 if (regpairs_aligned(cpu_env)) {
4401 arg2 = arg3;
4402 arg3 = arg4;
4404 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
4406 #endif
4408 #ifdef TARGET_NR_ftruncate64
4409 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
4410 abi_long arg2,
4411 abi_long arg3,
4412 abi_long arg4)
4414 if (regpairs_aligned(cpu_env)) {
4415 arg2 = arg3;
4416 arg3 = arg4;
4418 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
4420 #endif
4422 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
4423 abi_ulong target_addr)
4425 struct target_timespec *target_ts;
4427 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
4428 return -TARGET_EFAULT;
4429 host_ts->tv_sec = tswapl(target_ts->tv_sec);
4430 host_ts->tv_nsec = tswapl(target_ts->tv_nsec);
4431 unlock_user_struct(target_ts, target_addr, 0);
4432 return 0;
4435 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
4436 struct timespec *host_ts)
4438 struct target_timespec *target_ts;
4440 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
4441 return -TARGET_EFAULT;
4442 target_ts->tv_sec = tswapl(host_ts->tv_sec);
4443 target_ts->tv_nsec = tswapl(host_ts->tv_nsec);
4444 unlock_user_struct(target_ts, target_addr, 1);
4445 return 0;
4448 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4449 static inline abi_long host_to_target_stat64(void *cpu_env,
4450 abi_ulong target_addr,
4451 struct stat *host_st)
4453 #ifdef TARGET_ARM
4454 if (((CPUARMState *)cpu_env)->eabi) {
4455 struct target_eabi_stat64 *target_st;
4457 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4458 return -TARGET_EFAULT;
4459 memset(target_st, 0, sizeof(struct target_eabi_stat64));
4460 __put_user(host_st->st_dev, &target_st->st_dev);
4461 __put_user(host_st->st_ino, &target_st->st_ino);
4462 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4463 __put_user(host_st->st_ino, &target_st->__st_ino);
4464 #endif
4465 __put_user(host_st->st_mode, &target_st->st_mode);
4466 __put_user(host_st->st_nlink, &target_st->st_nlink);
4467 __put_user(host_st->st_uid, &target_st->st_uid);
4468 __put_user(host_st->st_gid, &target_st->st_gid);
4469 __put_user(host_st->st_rdev, &target_st->st_rdev);
4470 __put_user(host_st->st_size, &target_st->st_size);
4471 __put_user(host_st->st_blksize, &target_st->st_blksize);
4472 __put_user(host_st->st_blocks, &target_st->st_blocks);
4473 __put_user(host_st->st_atime, &target_st->target_st_atime);
4474 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4475 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4476 unlock_user_struct(target_st, target_addr, 1);
4477 } else
4478 #endif
4480 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4481 struct target_stat *target_st;
4482 #else
4483 struct target_stat64 *target_st;
4484 #endif
4486 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
4487 return -TARGET_EFAULT;
4488 memset(target_st, 0, sizeof(*target_st));
4489 __put_user(host_st->st_dev, &target_st->st_dev);
4490 __put_user(host_st->st_ino, &target_st->st_ino);
4491 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4492 __put_user(host_st->st_ino, &target_st->__st_ino);
4493 #endif
4494 __put_user(host_st->st_mode, &target_st->st_mode);
4495 __put_user(host_st->st_nlink, &target_st->st_nlink);
4496 __put_user(host_st->st_uid, &target_st->st_uid);
4497 __put_user(host_st->st_gid, &target_st->st_gid);
4498 __put_user(host_st->st_rdev, &target_st->st_rdev);
4499 /* XXX: better use of kernel struct */
4500 __put_user(host_st->st_size, &target_st->st_size);
4501 __put_user(host_st->st_blksize, &target_st->st_blksize);
4502 __put_user(host_st->st_blocks, &target_st->st_blocks);
4503 __put_user(host_st->st_atime, &target_st->target_st_atime);
4504 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
4505 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
4506 unlock_user_struct(target_st, target_addr, 1);
4509 return 0;
4511 #endif
4513 #if defined(CONFIG_USE_NPTL)
4514 /* ??? Using host futex calls even when target atomic operations
4515 are not really atomic probably breaks things. However implementing
4516 futexes locally would make futexes shared between multiple processes
4517 tricky. However they're probably useless because guest atomic
4518 operations won't work either. */
4519 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
4520 target_ulong uaddr2, int val3)
4522 struct timespec ts, *pts;
4523 int base_op;
4525 /* ??? We assume FUTEX_* constants are the same on both host
4526 and target. */
4527 #ifdef FUTEX_CMD_MASK
4528 base_op = op & FUTEX_CMD_MASK;
4529 #else
4530 base_op = op;
4531 #endif
4532 switch (base_op) {
4533 case FUTEX_WAIT:
4534 if (timeout) {
4535 pts = &ts;
4536 target_to_host_timespec(pts, timeout);
4537 } else {
4538 pts = NULL;
4540 return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
4541 pts, NULL, 0));
4542 case FUTEX_WAKE:
4543 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4544 case FUTEX_FD:
4545 return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
4546 case FUTEX_REQUEUE:
4547 case FUTEX_CMP_REQUEUE:
4548 case FUTEX_WAKE_OP:
4549 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4550 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4551 But the prototype takes a `struct timespec *'; insert casts
4552 to satisfy the compiler. We do not need to tswap TIMEOUT
4553 since it's not compared to guest memory. */
4554 pts = (struct timespec *)(uintptr_t) timeout;
4555 return get_errno(sys_futex(g2h(uaddr), op, val, pts,
4556 g2h(uaddr2),
4557 (base_op == FUTEX_CMP_REQUEUE
4558 ? tswap32(val3)
4559 : val3)));
4560 default:
4561 return -TARGET_ENOSYS;
4564 #endif
4566 /* Map host to target signal numbers for the wait family of syscalls.
4567 Assume all other status bits are the same. */
4568 static int host_to_target_waitstatus(int status)
4570 if (WIFSIGNALED(status)) {
4571 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
4573 if (WIFSTOPPED(status)) {
4574 return (host_to_target_signal(WSTOPSIG(status)) << 8)
4575 | (status & 0xff);
4577 return status;
4580 int get_osversion(void)
4582 static int osversion;
4583 struct new_utsname buf;
4584 const char *s;
4585 int i, n, tmp;
4586 if (osversion)
4587 return osversion;
4588 if (qemu_uname_release && *qemu_uname_release) {
4589 s = qemu_uname_release;
4590 } else {
4591 if (sys_uname(&buf))
4592 return 0;
4593 s = buf.release;
4595 tmp = 0;
4596 for (i = 0; i < 3; i++) {
4597 n = 0;
4598 while (*s >= '0' && *s <= '9') {
4599 n *= 10;
4600 n += *s - '0';
4601 s++;
4603 tmp = (tmp << 8) + n;
4604 if (*s == '.')
4605 s++;
4607 osversion = tmp;
4608 return osversion;
4611 /* do_syscall() should always have a single exit point at the end so
4612 that actions, such as logging of syscall results, can be performed.
4613 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4614 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
4615 abi_long arg2, abi_long arg3, abi_long arg4,
4616 abi_long arg5, abi_long arg6, abi_long arg7,
4617 abi_long arg8)
4619 abi_long ret;
4620 struct stat st;
4621 struct statfs stfs;
4622 void *p;
4624 #ifdef DEBUG
4625 gemu_log("syscall %d", num);
4626 #endif
4627 if(do_strace)
4628 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
4630 switch(num) {
4631 case TARGET_NR_exit:
4632 #ifdef CONFIG_USE_NPTL
4633 /* In old applications this may be used to implement _exit(2).
4634 However in threaded applictions it is used for thread termination,
4635 and _exit_group is used for application termination.
4636 Do thread termination if we have more then one thread. */
4637 /* FIXME: This probably breaks if a signal arrives. We should probably
4638 be disabling signals. */
4639 if (first_cpu->next_cpu) {
4640 TaskState *ts;
4641 CPUState **lastp;
4642 CPUState *p;
4644 cpu_list_lock();
4645 lastp = &first_cpu;
4646 p = first_cpu;
4647 while (p && p != (CPUState *)cpu_env) {
4648 lastp = &p->next_cpu;
4649 p = p->next_cpu;
4651 /* If we didn't find the CPU for this thread then something is
4652 horribly wrong. */
4653 if (!p)
4654 abort();
4655 /* Remove the CPU from the list. */
4656 *lastp = p->next_cpu;
4657 cpu_list_unlock();
4658 ts = ((CPUState *)cpu_env)->opaque;
4659 if (ts->child_tidptr) {
4660 put_user_u32(0, ts->child_tidptr);
4661 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
4662 NULL, NULL, 0);
4664 thread_env = NULL;
4665 g_free(cpu_env);
4666 g_free(ts);
4667 pthread_exit(NULL);
4669 #endif
4670 #ifdef TARGET_GPROF
4671 _mcleanup();
4672 #endif
4673 gdb_exit(cpu_env, arg1);
4674 _exit(arg1);
4675 ret = 0; /* avoid warning */
4676 break;
4677 case TARGET_NR_read:
4678 if (arg3 == 0)
4679 ret = 0;
4680 else {
4681 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
4682 goto efault;
4683 ret = get_errno(read(arg1, p, arg3));
4684 unlock_user(p, arg2, ret);
4686 break;
4687 case TARGET_NR_write:
4688 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
4689 goto efault;
4690 ret = get_errno(write(arg1, p, arg3));
4691 unlock_user(p, arg2, 0);
4692 break;
4693 case TARGET_NR_open:
4694 if (!(p = lock_user_string(arg1)))
4695 goto efault;
4696 ret = get_errno(open(path(p),
4697 target_to_host_bitmask(arg2, fcntl_flags_tbl),
4698 arg3));
4699 unlock_user(p, arg1, 0);
4700 break;
4701 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4702 case TARGET_NR_openat:
4703 if (!(p = lock_user_string(arg2)))
4704 goto efault;
4705 ret = get_errno(sys_openat(arg1,
4706 path(p),
4707 target_to_host_bitmask(arg3, fcntl_flags_tbl),
4708 arg4));
4709 unlock_user(p, arg2, 0);
4710 break;
4711 #endif
4712 case TARGET_NR_close:
4713 ret = get_errno(close(arg1));
4714 break;
4715 case TARGET_NR_brk:
4716 ret = do_brk(arg1);
4717 break;
4718 case TARGET_NR_fork:
4719 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
4720 break;
4721 #ifdef TARGET_NR_waitpid
4722 case TARGET_NR_waitpid:
4724 int status;
4725 ret = get_errno(waitpid(arg1, &status, arg3));
4726 if (!is_error(ret) && arg2
4727 && put_user_s32(host_to_target_waitstatus(status), arg2))
4728 goto efault;
4730 break;
4731 #endif
4732 #ifdef TARGET_NR_waitid
4733 case TARGET_NR_waitid:
4735 siginfo_t info;
4736 info.si_pid = 0;
4737 ret = get_errno(waitid(arg1, arg2, &info, arg4));
4738 if (!is_error(ret) && arg3 && info.si_pid != 0) {
4739 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
4740 goto efault;
4741 host_to_target_siginfo(p, &info);
4742 unlock_user(p, arg3, sizeof(target_siginfo_t));
4745 break;
4746 #endif
4747 #ifdef TARGET_NR_creat /* not on alpha */
4748 case TARGET_NR_creat:
4749 if (!(p = lock_user_string(arg1)))
4750 goto efault;
4751 ret = get_errno(creat(p, arg2));
4752 unlock_user(p, arg1, 0);
4753 break;
4754 #endif
4755 case TARGET_NR_link:
4757 void * p2;
4758 p = lock_user_string(arg1);
4759 p2 = lock_user_string(arg2);
4760 if (!p || !p2)
4761 ret = -TARGET_EFAULT;
4762 else
4763 ret = get_errno(link(p, p2));
4764 unlock_user(p2, arg2, 0);
4765 unlock_user(p, arg1, 0);
4767 break;
4768 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4769 case TARGET_NR_linkat:
4771 void * p2 = NULL;
4772 if (!arg2 || !arg4)
4773 goto efault;
4774 p = lock_user_string(arg2);
4775 p2 = lock_user_string(arg4);
4776 if (!p || !p2)
4777 ret = -TARGET_EFAULT;
4778 else
4779 ret = get_errno(sys_linkat(arg1, p, arg3, p2, arg5));
4780 unlock_user(p, arg2, 0);
4781 unlock_user(p2, arg4, 0);
4783 break;
4784 #endif
4785 case TARGET_NR_unlink:
4786 if (!(p = lock_user_string(arg1)))
4787 goto efault;
4788 ret = get_errno(unlink(p));
4789 unlock_user(p, arg1, 0);
4790 break;
4791 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4792 case TARGET_NR_unlinkat:
4793 if (!(p = lock_user_string(arg2)))
4794 goto efault;
4795 ret = get_errno(sys_unlinkat(arg1, p, arg3));
4796 unlock_user(p, arg2, 0);
4797 break;
4798 #endif
4799 case TARGET_NR_execve:
4801 char **argp, **envp;
4802 int argc, envc;
4803 abi_ulong gp;
4804 abi_ulong guest_argp;
4805 abi_ulong guest_envp;
4806 abi_ulong addr;
4807 char **q;
4809 argc = 0;
4810 guest_argp = arg2;
4811 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
4812 if (get_user_ual(addr, gp))
4813 goto efault;
4814 if (!addr)
4815 break;
4816 argc++;
4818 envc = 0;
4819 guest_envp = arg3;
4820 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
4821 if (get_user_ual(addr, gp))
4822 goto efault;
4823 if (!addr)
4824 break;
4825 envc++;
4828 argp = alloca((argc + 1) * sizeof(void *));
4829 envp = alloca((envc + 1) * sizeof(void *));
4831 for (gp = guest_argp, q = argp; gp;
4832 gp += sizeof(abi_ulong), q++) {
4833 if (get_user_ual(addr, gp))
4834 goto execve_efault;
4835 if (!addr)
4836 break;
4837 if (!(*q = lock_user_string(addr)))
4838 goto execve_efault;
4840 *q = NULL;
4842 for (gp = guest_envp, q = envp; gp;
4843 gp += sizeof(abi_ulong), q++) {
4844 if (get_user_ual(addr, gp))
4845 goto execve_efault;
4846 if (!addr)
4847 break;
4848 if (!(*q = lock_user_string(addr)))
4849 goto execve_efault;
4851 *q = NULL;
4853 if (!(p = lock_user_string(arg1)))
4854 goto execve_efault;
4855 ret = get_errno(execve(p, argp, envp));
4856 unlock_user(p, arg1, 0);
4858 goto execve_end;
4860 execve_efault:
4861 ret = -TARGET_EFAULT;
4863 execve_end:
4864 for (gp = guest_argp, q = argp; *q;
4865 gp += sizeof(abi_ulong), q++) {
4866 if (get_user_ual(addr, gp)
4867 || !addr)
4868 break;
4869 unlock_user(*q, addr, 0);
4871 for (gp = guest_envp, q = envp; *q;
4872 gp += sizeof(abi_ulong), q++) {
4873 if (get_user_ual(addr, gp)
4874 || !addr)
4875 break;
4876 unlock_user(*q, addr, 0);
4879 break;
4880 case TARGET_NR_chdir:
4881 if (!(p = lock_user_string(arg1)))
4882 goto efault;
4883 ret = get_errno(chdir(p));
4884 unlock_user(p, arg1, 0);
4885 break;
4886 #ifdef TARGET_NR_time
4887 case TARGET_NR_time:
4889 time_t host_time;
4890 ret = get_errno(time(&host_time));
4891 if (!is_error(ret)
4892 && arg1
4893 && put_user_sal(host_time, arg1))
4894 goto efault;
4896 break;
4897 #endif
4898 case TARGET_NR_mknod:
4899 if (!(p = lock_user_string(arg1)))
4900 goto efault;
4901 ret = get_errno(mknod(p, arg2, arg3));
4902 unlock_user(p, arg1, 0);
4903 break;
4904 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4905 case TARGET_NR_mknodat:
4906 if (!(p = lock_user_string(arg2)))
4907 goto efault;
4908 ret = get_errno(sys_mknodat(arg1, p, arg3, arg4));
4909 unlock_user(p, arg2, 0);
4910 break;
4911 #endif
4912 case TARGET_NR_chmod:
4913 if (!(p = lock_user_string(arg1)))
4914 goto efault;
4915 ret = get_errno(chmod(p, arg2));
4916 unlock_user(p, arg1, 0);
4917 break;
4918 #ifdef TARGET_NR_break
4919 case TARGET_NR_break:
4920 goto unimplemented;
4921 #endif
4922 #ifdef TARGET_NR_oldstat
4923 case TARGET_NR_oldstat:
4924 goto unimplemented;
4925 #endif
4926 case TARGET_NR_lseek:
4927 ret = get_errno(lseek(arg1, arg2, arg3));
4928 break;
4929 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4930 /* Alpha specific */
4931 case TARGET_NR_getxpid:
4932 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
4933 ret = get_errno(getpid());
4934 break;
4935 #endif
4936 #ifdef TARGET_NR_getpid
4937 case TARGET_NR_getpid:
4938 ret = get_errno(getpid());
4939 break;
4940 #endif
4941 case TARGET_NR_mount:
4943 /* need to look at the data field */
4944 void *p2, *p3;
4945 p = lock_user_string(arg1);
4946 p2 = lock_user_string(arg2);
4947 p3 = lock_user_string(arg3);
4948 if (!p || !p2 || !p3)
4949 ret = -TARGET_EFAULT;
4950 else {
4951 /* FIXME - arg5 should be locked, but it isn't clear how to
4952 * do that since it's not guaranteed to be a NULL-terminated
4953 * string.
4955 if ( ! arg5 )
4956 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, NULL));
4957 else
4958 ret = get_errno(mount(p, p2, p3, (unsigned long)arg4, g2h(arg5)));
4960 unlock_user(p, arg1, 0);
4961 unlock_user(p2, arg2, 0);
4962 unlock_user(p3, arg3, 0);
4963 break;
4965 #ifdef TARGET_NR_umount
4966 case TARGET_NR_umount:
4967 if (!(p = lock_user_string(arg1)))
4968 goto efault;
4969 ret = get_errno(umount(p));
4970 unlock_user(p, arg1, 0);
4971 break;
4972 #endif
4973 #ifdef TARGET_NR_stime /* not on alpha */
4974 case TARGET_NR_stime:
4976 time_t host_time;
4977 if (get_user_sal(host_time, arg1))
4978 goto efault;
4979 ret = get_errno(stime(&host_time));
4981 break;
4982 #endif
4983 case TARGET_NR_ptrace:
4984 goto unimplemented;
4985 #ifdef TARGET_NR_alarm /* not on alpha */
4986 case TARGET_NR_alarm:
4987 ret = alarm(arg1);
4988 break;
4989 #endif
4990 #ifdef TARGET_NR_oldfstat
4991 case TARGET_NR_oldfstat:
4992 goto unimplemented;
4993 #endif
4994 #ifdef TARGET_NR_pause /* not on alpha */
4995 case TARGET_NR_pause:
4996 ret = get_errno(pause());
4997 break;
4998 #endif
4999 #ifdef TARGET_NR_utime
5000 case TARGET_NR_utime:
5002 struct utimbuf tbuf, *host_tbuf;
5003 struct target_utimbuf *target_tbuf;
5004 if (arg2) {
5005 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
5006 goto efault;
5007 tbuf.actime = tswapl(target_tbuf->actime);
5008 tbuf.modtime = tswapl(target_tbuf->modtime);
5009 unlock_user_struct(target_tbuf, arg2, 0);
5010 host_tbuf = &tbuf;
5011 } else {
5012 host_tbuf = NULL;
5014 if (!(p = lock_user_string(arg1)))
5015 goto efault;
5016 ret = get_errno(utime(p, host_tbuf));
5017 unlock_user(p, arg1, 0);
5019 break;
5020 #endif
5021 case TARGET_NR_utimes:
5023 struct timeval *tvp, tv[2];
5024 if (arg2) {
5025 if (copy_from_user_timeval(&tv[0], arg2)
5026 || copy_from_user_timeval(&tv[1],
5027 arg2 + sizeof(struct target_timeval)))
5028 goto efault;
5029 tvp = tv;
5030 } else {
5031 tvp = NULL;
5033 if (!(p = lock_user_string(arg1)))
5034 goto efault;
5035 ret = get_errno(utimes(p, tvp));
5036 unlock_user(p, arg1, 0);
5038 break;
5039 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5040 case TARGET_NR_futimesat:
5042 struct timeval *tvp, tv[2];
5043 if (arg3) {
5044 if (copy_from_user_timeval(&tv[0], arg3)
5045 || copy_from_user_timeval(&tv[1],
5046 arg3 + sizeof(struct target_timeval)))
5047 goto efault;
5048 tvp = tv;
5049 } else {
5050 tvp = NULL;
5052 if (!(p = lock_user_string(arg2)))
5053 goto efault;
5054 ret = get_errno(sys_futimesat(arg1, path(p), tvp));
5055 unlock_user(p, arg2, 0);
5057 break;
5058 #endif
5059 #ifdef TARGET_NR_stty
5060 case TARGET_NR_stty:
5061 goto unimplemented;
5062 #endif
5063 #ifdef TARGET_NR_gtty
5064 case TARGET_NR_gtty:
5065 goto unimplemented;
5066 #endif
5067 case TARGET_NR_access:
5068 if (!(p = lock_user_string(arg1)))
5069 goto efault;
5070 ret = get_errno(access(path(p), arg2));
5071 unlock_user(p, arg1, 0);
5072 break;
5073 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5074 case TARGET_NR_faccessat:
5075 if (!(p = lock_user_string(arg2)))
5076 goto efault;
5077 ret = get_errno(sys_faccessat(arg1, p, arg3));
5078 unlock_user(p, arg2, 0);
5079 break;
5080 #endif
5081 #ifdef TARGET_NR_nice /* not on alpha */
5082 case TARGET_NR_nice:
5083 ret = get_errno(nice(arg1));
5084 break;
5085 #endif
5086 #ifdef TARGET_NR_ftime
5087 case TARGET_NR_ftime:
5088 goto unimplemented;
5089 #endif
5090 case TARGET_NR_sync:
5091 sync();
5092 ret = 0;
5093 break;
5094 case TARGET_NR_kill:
5095 ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
5096 break;
5097 case TARGET_NR_rename:
5099 void *p2;
5100 p = lock_user_string(arg1);
5101 p2 = lock_user_string(arg2);
5102 if (!p || !p2)
5103 ret = -TARGET_EFAULT;
5104 else
5105 ret = get_errno(rename(p, p2));
5106 unlock_user(p2, arg2, 0);
5107 unlock_user(p, arg1, 0);
5109 break;
5110 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5111 case TARGET_NR_renameat:
5113 void *p2;
5114 p = lock_user_string(arg2);
5115 p2 = lock_user_string(arg4);
5116 if (!p || !p2)
5117 ret = -TARGET_EFAULT;
5118 else
5119 ret = get_errno(sys_renameat(arg1, p, arg3, p2));
5120 unlock_user(p2, arg4, 0);
5121 unlock_user(p, arg2, 0);
5123 break;
5124 #endif
5125 case TARGET_NR_mkdir:
5126 if (!(p = lock_user_string(arg1)))
5127 goto efault;
5128 ret = get_errno(mkdir(p, arg2));
5129 unlock_user(p, arg1, 0);
5130 break;
5131 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5132 case TARGET_NR_mkdirat:
5133 if (!(p = lock_user_string(arg2)))
5134 goto efault;
5135 ret = get_errno(sys_mkdirat(arg1, p, arg3));
5136 unlock_user(p, arg2, 0);
5137 break;
5138 #endif
5139 case TARGET_NR_rmdir:
5140 if (!(p = lock_user_string(arg1)))
5141 goto efault;
5142 ret = get_errno(rmdir(p));
5143 unlock_user(p, arg1, 0);
5144 break;
5145 case TARGET_NR_dup:
5146 ret = get_errno(dup(arg1));
5147 break;
5148 case TARGET_NR_pipe:
5149 ret = do_pipe(cpu_env, arg1, 0, 0);
5150 break;
5151 #ifdef TARGET_NR_pipe2
5152 case TARGET_NR_pipe2:
5153 ret = do_pipe(cpu_env, arg1, arg2, 1);
5154 break;
5155 #endif
5156 case TARGET_NR_times:
5158 struct target_tms *tmsp;
5159 struct tms tms;
5160 ret = get_errno(times(&tms));
5161 if (arg1) {
5162 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
5163 if (!tmsp)
5164 goto efault;
5165 tmsp->tms_utime = tswapl(host_to_target_clock_t(tms.tms_utime));
5166 tmsp->tms_stime = tswapl(host_to_target_clock_t(tms.tms_stime));
5167 tmsp->tms_cutime = tswapl(host_to_target_clock_t(tms.tms_cutime));
5168 tmsp->tms_cstime = tswapl(host_to_target_clock_t(tms.tms_cstime));
5170 if (!is_error(ret))
5171 ret = host_to_target_clock_t(ret);
5173 break;
5174 #ifdef TARGET_NR_prof
5175 case TARGET_NR_prof:
5176 goto unimplemented;
5177 #endif
5178 #ifdef TARGET_NR_signal
5179 case TARGET_NR_signal:
5180 goto unimplemented;
5181 #endif
5182 case TARGET_NR_acct:
5183 if (arg1 == 0) {
5184 ret = get_errno(acct(NULL));
5185 } else {
5186 if (!(p = lock_user_string(arg1)))
5187 goto efault;
5188 ret = get_errno(acct(path(p)));
5189 unlock_user(p, arg1, 0);
5191 break;
5192 #ifdef TARGET_NR_umount2 /* not on alpha */
5193 case TARGET_NR_umount2:
5194 if (!(p = lock_user_string(arg1)))
5195 goto efault;
5196 ret = get_errno(umount2(p, arg2));
5197 unlock_user(p, arg1, 0);
5198 break;
5199 #endif
5200 #ifdef TARGET_NR_lock
5201 case TARGET_NR_lock:
5202 goto unimplemented;
5203 #endif
5204 case TARGET_NR_ioctl:
5205 ret = do_ioctl(arg1, arg2, arg3);
5206 break;
5207 case TARGET_NR_fcntl:
5208 ret = do_fcntl(arg1, arg2, arg3);
5209 break;
5210 #ifdef TARGET_NR_mpx
5211 case TARGET_NR_mpx:
5212 goto unimplemented;
5213 #endif
5214 case TARGET_NR_setpgid:
5215 ret = get_errno(setpgid(arg1, arg2));
5216 break;
5217 #ifdef TARGET_NR_ulimit
5218 case TARGET_NR_ulimit:
5219 goto unimplemented;
5220 #endif
5221 #ifdef TARGET_NR_oldolduname
5222 case TARGET_NR_oldolduname:
5223 goto unimplemented;
5224 #endif
5225 case TARGET_NR_umask:
5226 ret = get_errno(umask(arg1));
5227 break;
5228 case TARGET_NR_chroot:
5229 if (!(p = lock_user_string(arg1)))
5230 goto efault;
5231 ret = get_errno(chroot(p));
5232 unlock_user(p, arg1, 0);
5233 break;
5234 case TARGET_NR_ustat:
5235 goto unimplemented;
5236 case TARGET_NR_dup2:
5237 ret = get_errno(dup2(arg1, arg2));
5238 break;
5239 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5240 case TARGET_NR_dup3:
5241 ret = get_errno(dup3(arg1, arg2, arg3));
5242 break;
5243 #endif
5244 #ifdef TARGET_NR_getppid /* not on alpha */
5245 case TARGET_NR_getppid:
5246 ret = get_errno(getppid());
5247 break;
5248 #endif
5249 case TARGET_NR_getpgrp:
5250 ret = get_errno(getpgrp());
5251 break;
5252 case TARGET_NR_setsid:
5253 ret = get_errno(setsid());
5254 break;
5255 #ifdef TARGET_NR_sigaction
5256 case TARGET_NR_sigaction:
5258 #if defined(TARGET_ALPHA)
5259 struct target_sigaction act, oact, *pact = 0;
5260 struct target_old_sigaction *old_act;
5261 if (arg2) {
5262 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5263 goto efault;
5264 act._sa_handler = old_act->_sa_handler;
5265 target_siginitset(&act.sa_mask, old_act->sa_mask);
5266 act.sa_flags = old_act->sa_flags;
5267 act.sa_restorer = 0;
5268 unlock_user_struct(old_act, arg2, 0);
5269 pact = &act;
5271 ret = get_errno(do_sigaction(arg1, pact, &oact));
5272 if (!is_error(ret) && arg3) {
5273 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5274 goto efault;
5275 old_act->_sa_handler = oact._sa_handler;
5276 old_act->sa_mask = oact.sa_mask.sig[0];
5277 old_act->sa_flags = oact.sa_flags;
5278 unlock_user_struct(old_act, arg3, 1);
5280 #elif defined(TARGET_MIPS)
5281 struct target_sigaction act, oact, *pact, *old_act;
5283 if (arg2) {
5284 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5285 goto efault;
5286 act._sa_handler = old_act->_sa_handler;
5287 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
5288 act.sa_flags = old_act->sa_flags;
5289 unlock_user_struct(old_act, arg2, 0);
5290 pact = &act;
5291 } else {
5292 pact = NULL;
5295 ret = get_errno(do_sigaction(arg1, pact, &oact));
5297 if (!is_error(ret) && arg3) {
5298 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5299 goto efault;
5300 old_act->_sa_handler = oact._sa_handler;
5301 old_act->sa_flags = oact.sa_flags;
5302 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
5303 old_act->sa_mask.sig[1] = 0;
5304 old_act->sa_mask.sig[2] = 0;
5305 old_act->sa_mask.sig[3] = 0;
5306 unlock_user_struct(old_act, arg3, 1);
5308 #else
5309 struct target_old_sigaction *old_act;
5310 struct target_sigaction act, oact, *pact;
5311 if (arg2) {
5312 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
5313 goto efault;
5314 act._sa_handler = old_act->_sa_handler;
5315 target_siginitset(&act.sa_mask, old_act->sa_mask);
5316 act.sa_flags = old_act->sa_flags;
5317 act.sa_restorer = old_act->sa_restorer;
5318 unlock_user_struct(old_act, arg2, 0);
5319 pact = &act;
5320 } else {
5321 pact = NULL;
5323 ret = get_errno(do_sigaction(arg1, pact, &oact));
5324 if (!is_error(ret) && arg3) {
5325 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
5326 goto efault;
5327 old_act->_sa_handler = oact._sa_handler;
5328 old_act->sa_mask = oact.sa_mask.sig[0];
5329 old_act->sa_flags = oact.sa_flags;
5330 old_act->sa_restorer = oact.sa_restorer;
5331 unlock_user_struct(old_act, arg3, 1);
5333 #endif
5335 break;
5336 #endif
5337 case TARGET_NR_rt_sigaction:
5339 #if defined(TARGET_ALPHA)
5340 struct target_sigaction act, oact, *pact = 0;
5341 struct target_rt_sigaction *rt_act;
5342 /* ??? arg4 == sizeof(sigset_t). */
5343 if (arg2) {
5344 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
5345 goto efault;
5346 act._sa_handler = rt_act->_sa_handler;
5347 act.sa_mask = rt_act->sa_mask;
5348 act.sa_flags = rt_act->sa_flags;
5349 act.sa_restorer = arg5;
5350 unlock_user_struct(rt_act, arg2, 0);
5351 pact = &act;
5353 ret = get_errno(do_sigaction(arg1, pact, &oact));
5354 if (!is_error(ret) && arg3) {
5355 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
5356 goto efault;
5357 rt_act->_sa_handler = oact._sa_handler;
5358 rt_act->sa_mask = oact.sa_mask;
5359 rt_act->sa_flags = oact.sa_flags;
5360 unlock_user_struct(rt_act, arg3, 1);
5362 #else
5363 struct target_sigaction *act;
5364 struct target_sigaction *oact;
5366 if (arg2) {
5367 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
5368 goto efault;
5369 } else
5370 act = NULL;
5371 if (arg3) {
5372 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
5373 ret = -TARGET_EFAULT;
5374 goto rt_sigaction_fail;
5376 } else
5377 oact = NULL;
5378 ret = get_errno(do_sigaction(arg1, act, oact));
5379 rt_sigaction_fail:
5380 if (act)
5381 unlock_user_struct(act, arg2, 0);
5382 if (oact)
5383 unlock_user_struct(oact, arg3, 1);
5384 #endif
5386 break;
5387 #ifdef TARGET_NR_sgetmask /* not on alpha */
5388 case TARGET_NR_sgetmask:
5390 sigset_t cur_set;
5391 abi_ulong target_set;
5392 sigprocmask(0, NULL, &cur_set);
5393 host_to_target_old_sigset(&target_set, &cur_set);
5394 ret = target_set;
5396 break;
5397 #endif
5398 #ifdef TARGET_NR_ssetmask /* not on alpha */
5399 case TARGET_NR_ssetmask:
5401 sigset_t set, oset, cur_set;
5402 abi_ulong target_set = arg1;
5403 sigprocmask(0, NULL, &cur_set);
5404 target_to_host_old_sigset(&set, &target_set);
5405 sigorset(&set, &set, &cur_set);
5406 sigprocmask(SIG_SETMASK, &set, &oset);
5407 host_to_target_old_sigset(&target_set, &oset);
5408 ret = target_set;
5410 break;
5411 #endif
5412 #ifdef TARGET_NR_sigprocmask
5413 case TARGET_NR_sigprocmask:
5415 #if defined(TARGET_ALPHA)
5416 sigset_t set, oldset;
5417 abi_ulong mask;
5418 int how;
5420 switch (arg1) {
5421 case TARGET_SIG_BLOCK:
5422 how = SIG_BLOCK;
5423 break;
5424 case TARGET_SIG_UNBLOCK:
5425 how = SIG_UNBLOCK;
5426 break;
5427 case TARGET_SIG_SETMASK:
5428 how = SIG_SETMASK;
5429 break;
5430 default:
5431 ret = -TARGET_EINVAL;
5432 goto fail;
5434 mask = arg2;
5435 target_to_host_old_sigset(&set, &mask);
5437 ret = get_errno(sigprocmask(how, &set, &oldset));
5439 if (!is_error(ret)) {
5440 host_to_target_old_sigset(&mask, &oldset);
5441 ret = mask;
5442 ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
5444 #else
5445 sigset_t set, oldset, *set_ptr;
5446 int how;
5448 if (arg2) {
5449 switch (arg1) {
5450 case TARGET_SIG_BLOCK:
5451 how = SIG_BLOCK;
5452 break;
5453 case TARGET_SIG_UNBLOCK:
5454 how = SIG_UNBLOCK;
5455 break;
5456 case TARGET_SIG_SETMASK:
5457 how = SIG_SETMASK;
5458 break;
5459 default:
5460 ret = -TARGET_EINVAL;
5461 goto fail;
5463 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5464 goto efault;
5465 target_to_host_old_sigset(&set, p);
5466 unlock_user(p, arg2, 0);
5467 set_ptr = &set;
5468 } else {
5469 how = 0;
5470 set_ptr = NULL;
5472 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5473 if (!is_error(ret) && arg3) {
5474 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5475 goto efault;
5476 host_to_target_old_sigset(p, &oldset);
5477 unlock_user(p, arg3, sizeof(target_sigset_t));
5479 #endif
5481 break;
5482 #endif
5483 case TARGET_NR_rt_sigprocmask:
5485 int how = arg1;
5486 sigset_t set, oldset, *set_ptr;
5488 if (arg2) {
5489 switch(how) {
5490 case TARGET_SIG_BLOCK:
5491 how = SIG_BLOCK;
5492 break;
5493 case TARGET_SIG_UNBLOCK:
5494 how = SIG_UNBLOCK;
5495 break;
5496 case TARGET_SIG_SETMASK:
5497 how = SIG_SETMASK;
5498 break;
5499 default:
5500 ret = -TARGET_EINVAL;
5501 goto fail;
5503 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
5504 goto efault;
5505 target_to_host_sigset(&set, p);
5506 unlock_user(p, arg2, 0);
5507 set_ptr = &set;
5508 } else {
5509 how = 0;
5510 set_ptr = NULL;
5512 ret = get_errno(sigprocmask(how, set_ptr, &oldset));
5513 if (!is_error(ret) && arg3) {
5514 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
5515 goto efault;
5516 host_to_target_sigset(p, &oldset);
5517 unlock_user(p, arg3, sizeof(target_sigset_t));
5520 break;
5521 #ifdef TARGET_NR_sigpending
5522 case TARGET_NR_sigpending:
5524 sigset_t set;
5525 ret = get_errno(sigpending(&set));
5526 if (!is_error(ret)) {
5527 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5528 goto efault;
5529 host_to_target_old_sigset(p, &set);
5530 unlock_user(p, arg1, sizeof(target_sigset_t));
5533 break;
5534 #endif
5535 case TARGET_NR_rt_sigpending:
5537 sigset_t set;
5538 ret = get_errno(sigpending(&set));
5539 if (!is_error(ret)) {
5540 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
5541 goto efault;
5542 host_to_target_sigset(p, &set);
5543 unlock_user(p, arg1, sizeof(target_sigset_t));
5546 break;
5547 #ifdef TARGET_NR_sigsuspend
5548 case TARGET_NR_sigsuspend:
5550 sigset_t set;
5551 #if defined(TARGET_ALPHA)
5552 abi_ulong mask = arg1;
5553 target_to_host_old_sigset(&set, &mask);
5554 #else
5555 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5556 goto efault;
5557 target_to_host_old_sigset(&set, p);
5558 unlock_user(p, arg1, 0);
5559 #endif
5560 ret = get_errno(sigsuspend(&set));
5562 break;
5563 #endif
5564 case TARGET_NR_rt_sigsuspend:
5566 sigset_t set;
5567 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5568 goto efault;
5569 target_to_host_sigset(&set, p);
5570 unlock_user(p, arg1, 0);
5571 ret = get_errno(sigsuspend(&set));
5573 break;
5574 case TARGET_NR_rt_sigtimedwait:
5576 sigset_t set;
5577 struct timespec uts, *puts;
5578 siginfo_t uinfo;
5580 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
5581 goto efault;
5582 target_to_host_sigset(&set, p);
5583 unlock_user(p, arg1, 0);
5584 if (arg3) {
5585 puts = &uts;
5586 target_to_host_timespec(puts, arg3);
5587 } else {
5588 puts = NULL;
5590 ret = get_errno(sigtimedwait(&set, &uinfo, puts));
5591 if (!is_error(ret) && arg2) {
5592 if (!(p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t), 0)))
5593 goto efault;
5594 host_to_target_siginfo(p, &uinfo);
5595 unlock_user(p, arg2, sizeof(target_siginfo_t));
5598 break;
5599 case TARGET_NR_rt_sigqueueinfo:
5601 siginfo_t uinfo;
5602 if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
5603 goto efault;
5604 target_to_host_siginfo(&uinfo, p);
5605 unlock_user(p, arg1, 0);
5606 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
5608 break;
5609 #ifdef TARGET_NR_sigreturn
5610 case TARGET_NR_sigreturn:
5611 /* NOTE: ret is eax, so not transcoding must be done */
5612 ret = do_sigreturn(cpu_env);
5613 break;
5614 #endif
5615 case TARGET_NR_rt_sigreturn:
5616 /* NOTE: ret is eax, so not transcoding must be done */
5617 ret = do_rt_sigreturn(cpu_env);
5618 break;
5619 case TARGET_NR_sethostname:
5620 if (!(p = lock_user_string(arg1)))
5621 goto efault;
5622 ret = get_errno(sethostname(p, arg2));
5623 unlock_user(p, arg1, 0);
5624 break;
5625 case TARGET_NR_setrlimit:
5627 int resource = target_to_host_resource(arg1);
5628 struct target_rlimit *target_rlim;
5629 struct rlimit rlim;
5630 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
5631 goto efault;
5632 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
5633 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
5634 unlock_user_struct(target_rlim, arg2, 0);
5635 ret = get_errno(setrlimit(resource, &rlim));
5637 break;
5638 case TARGET_NR_getrlimit:
5640 int resource = target_to_host_resource(arg1);
5641 struct target_rlimit *target_rlim;
5642 struct rlimit rlim;
5644 ret = get_errno(getrlimit(resource, &rlim));
5645 if (!is_error(ret)) {
5646 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
5647 goto efault;
5648 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
5649 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
5650 unlock_user_struct(target_rlim, arg2, 1);
5653 break;
5654 case TARGET_NR_getrusage:
5656 struct rusage rusage;
5657 ret = get_errno(getrusage(arg1, &rusage));
5658 if (!is_error(ret)) {
5659 host_to_target_rusage(arg2, &rusage);
5662 break;
5663 case TARGET_NR_gettimeofday:
5665 struct timeval tv;
5666 ret = get_errno(gettimeofday(&tv, NULL));
5667 if (!is_error(ret)) {
5668 if (copy_to_user_timeval(arg1, &tv))
5669 goto efault;
5672 break;
5673 case TARGET_NR_settimeofday:
5675 struct timeval tv;
5676 if (copy_from_user_timeval(&tv, arg1))
5677 goto efault;
5678 ret = get_errno(settimeofday(&tv, NULL));
5680 break;
5681 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5682 case TARGET_NR_select:
5684 struct target_sel_arg_struct *sel;
5685 abi_ulong inp, outp, exp, tvp;
5686 long nsel;
5688 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
5689 goto efault;
5690 nsel = tswapl(sel->n);
5691 inp = tswapl(sel->inp);
5692 outp = tswapl(sel->outp);
5693 exp = tswapl(sel->exp);
5694 tvp = tswapl(sel->tvp);
5695 unlock_user_struct(sel, arg1, 0);
5696 ret = do_select(nsel, inp, outp, exp, tvp);
5698 break;
5699 #endif
5700 #ifdef TARGET_NR_pselect6
5701 case TARGET_NR_pselect6:
5703 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
5704 fd_set rfds, wfds, efds;
5705 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
5706 struct timespec ts, *ts_ptr;
5709 * The 6th arg is actually two args smashed together,
5710 * so we cannot use the C library.
5712 sigset_t set;
5713 struct {
5714 sigset_t *set;
5715 size_t size;
5716 } sig, *sig_ptr;
5718 abi_ulong arg_sigset, arg_sigsize, *arg7;
5719 target_sigset_t *target_sigset;
5721 n = arg1;
5722 rfd_addr = arg2;
5723 wfd_addr = arg3;
5724 efd_addr = arg4;
5725 ts_addr = arg5;
5727 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
5728 if (ret) {
5729 goto fail;
5731 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
5732 if (ret) {
5733 goto fail;
5735 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
5736 if (ret) {
5737 goto fail;
5741 * This takes a timespec, and not a timeval, so we cannot
5742 * use the do_select() helper ...
5744 if (ts_addr) {
5745 if (target_to_host_timespec(&ts, ts_addr)) {
5746 goto efault;
5748 ts_ptr = &ts;
5749 } else {
5750 ts_ptr = NULL;
5753 /* Extract the two packed args for the sigset */
5754 if (arg6) {
5755 sig_ptr = &sig;
5756 sig.size = _NSIG / 8;
5758 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
5759 if (!arg7) {
5760 goto efault;
5762 arg_sigset = tswapl(arg7[0]);
5763 arg_sigsize = tswapl(arg7[1]);
5764 unlock_user(arg7, arg6, 0);
5766 if (arg_sigset) {
5767 sig.set = &set;
5768 if (arg_sigsize != sizeof(*target_sigset)) {
5769 /* Like the kernel, we enforce correct size sigsets */
5770 ret = -TARGET_EINVAL;
5771 goto fail;
5773 target_sigset = lock_user(VERIFY_READ, arg_sigset,
5774 sizeof(*target_sigset), 1);
5775 if (!target_sigset) {
5776 goto efault;
5778 target_to_host_sigset(&set, target_sigset);
5779 unlock_user(target_sigset, arg_sigset, 0);
5780 } else {
5781 sig.set = NULL;
5783 } else {
5784 sig_ptr = NULL;
5787 ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
5788 ts_ptr, sig_ptr));
5790 if (!is_error(ret)) {
5791 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
5792 goto efault;
5793 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
5794 goto efault;
5795 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
5796 goto efault;
5798 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
5799 goto efault;
5802 break;
5803 #endif
5804 case TARGET_NR_symlink:
5806 void *p2;
5807 p = lock_user_string(arg1);
5808 p2 = lock_user_string(arg2);
5809 if (!p || !p2)
5810 ret = -TARGET_EFAULT;
5811 else
5812 ret = get_errno(symlink(p, p2));
5813 unlock_user(p2, arg2, 0);
5814 unlock_user(p, arg1, 0);
5816 break;
5817 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5818 case TARGET_NR_symlinkat:
5820 void *p2;
5821 p = lock_user_string(arg1);
5822 p2 = lock_user_string(arg3);
5823 if (!p || !p2)
5824 ret = -TARGET_EFAULT;
5825 else
5826 ret = get_errno(sys_symlinkat(p, arg2, p2));
5827 unlock_user(p2, arg3, 0);
5828 unlock_user(p, arg1, 0);
5830 break;
5831 #endif
5832 #ifdef TARGET_NR_oldlstat
5833 case TARGET_NR_oldlstat:
5834 goto unimplemented;
5835 #endif
5836 case TARGET_NR_readlink:
5838 void *p2, *temp;
5839 p = lock_user_string(arg1);
5840 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
5841 if (!p || !p2)
5842 ret = -TARGET_EFAULT;
5843 else {
5844 if (strncmp((const char *)p, "/proc/self/exe", 14) == 0) {
5845 char real[PATH_MAX];
5846 temp = realpath(exec_path,real);
5847 ret = (temp==NULL) ? get_errno(-1) : strlen(real) ;
5848 snprintf((char *)p2, arg3, "%s", real);
5850 else
5851 ret = get_errno(readlink(path(p), p2, arg3));
5853 unlock_user(p2, arg2, ret);
5854 unlock_user(p, arg1, 0);
5856 break;
5857 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5858 case TARGET_NR_readlinkat:
5860 void *p2;
5861 p = lock_user_string(arg2);
5862 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
5863 if (!p || !p2)
5864 ret = -TARGET_EFAULT;
5865 else
5866 ret = get_errno(sys_readlinkat(arg1, path(p), p2, arg4));
5867 unlock_user(p2, arg3, ret);
5868 unlock_user(p, arg2, 0);
5870 break;
5871 #endif
5872 #ifdef TARGET_NR_uselib
5873 case TARGET_NR_uselib:
5874 goto unimplemented;
5875 #endif
5876 #ifdef TARGET_NR_swapon
5877 case TARGET_NR_swapon:
5878 if (!(p = lock_user_string(arg1)))
5879 goto efault;
5880 ret = get_errno(swapon(p, arg2));
5881 unlock_user(p, arg1, 0);
5882 break;
5883 #endif
5884 case TARGET_NR_reboot:
5885 goto unimplemented;
5886 #ifdef TARGET_NR_readdir
5887 case TARGET_NR_readdir:
5888 goto unimplemented;
5889 #endif
5890 #ifdef TARGET_NR_mmap
5891 case TARGET_NR_mmap:
5892 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
5893 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
5894 || defined(TARGET_S390X)
5896 abi_ulong *v;
5897 abi_ulong v1, v2, v3, v4, v5, v6;
5898 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
5899 goto efault;
5900 v1 = tswapl(v[0]);
5901 v2 = tswapl(v[1]);
5902 v3 = tswapl(v[2]);
5903 v4 = tswapl(v[3]);
5904 v5 = tswapl(v[4]);
5905 v6 = tswapl(v[5]);
5906 unlock_user(v, arg1, 0);
5907 ret = get_errno(target_mmap(v1, v2, v3,
5908 target_to_host_bitmask(v4, mmap_flags_tbl),
5909 v5, v6));
5911 #else
5912 ret = get_errno(target_mmap(arg1, arg2, arg3,
5913 target_to_host_bitmask(arg4, mmap_flags_tbl),
5914 arg5,
5915 arg6));
5916 #endif
5917 break;
5918 #endif
5919 #ifdef TARGET_NR_mmap2
5920 case TARGET_NR_mmap2:
5921 #ifndef MMAP_SHIFT
5922 #define MMAP_SHIFT 12
5923 #endif
5924 ret = get_errno(target_mmap(arg1, arg2, arg3,
5925 target_to_host_bitmask(arg4, mmap_flags_tbl),
5926 arg5,
5927 arg6 << MMAP_SHIFT));
5928 break;
5929 #endif
5930 case TARGET_NR_munmap:
5931 ret = get_errno(target_munmap(arg1, arg2));
5932 break;
5933 case TARGET_NR_mprotect:
5935 TaskState *ts = ((CPUState *)cpu_env)->opaque;
5936 /* Special hack to detect libc making the stack executable. */
5937 if ((arg3 & PROT_GROWSDOWN)
5938 && arg1 >= ts->info->stack_limit
5939 && arg1 <= ts->info->start_stack) {
5940 arg3 &= ~PROT_GROWSDOWN;
5941 arg2 = arg2 + arg1 - ts->info->stack_limit;
5942 arg1 = ts->info->stack_limit;
5945 ret = get_errno(target_mprotect(arg1, arg2, arg3));
5946 break;
5947 #ifdef TARGET_NR_mremap
5948 case TARGET_NR_mremap:
5949 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
5950 break;
5951 #endif
5952 /* ??? msync/mlock/munlock are broken for softmmu. */
5953 #ifdef TARGET_NR_msync
5954 case TARGET_NR_msync:
5955 ret = get_errno(msync(g2h(arg1), arg2, arg3));
5956 break;
5957 #endif
5958 #ifdef TARGET_NR_mlock
5959 case TARGET_NR_mlock:
5960 ret = get_errno(mlock(g2h(arg1), arg2));
5961 break;
5962 #endif
5963 #ifdef TARGET_NR_munlock
5964 case TARGET_NR_munlock:
5965 ret = get_errno(munlock(g2h(arg1), arg2));
5966 break;
5967 #endif
5968 #ifdef TARGET_NR_mlockall
5969 case TARGET_NR_mlockall:
5970 ret = get_errno(mlockall(arg1));
5971 break;
5972 #endif
5973 #ifdef TARGET_NR_munlockall
5974 case TARGET_NR_munlockall:
5975 ret = get_errno(munlockall());
5976 break;
5977 #endif
5978 case TARGET_NR_truncate:
5979 if (!(p = lock_user_string(arg1)))
5980 goto efault;
5981 ret = get_errno(truncate(p, arg2));
5982 unlock_user(p, arg1, 0);
5983 break;
5984 case TARGET_NR_ftruncate:
5985 ret = get_errno(ftruncate(arg1, arg2));
5986 break;
5987 case TARGET_NR_fchmod:
5988 ret = get_errno(fchmod(arg1, arg2));
5989 break;
5990 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5991 case TARGET_NR_fchmodat:
5992 if (!(p = lock_user_string(arg2)))
5993 goto efault;
5994 ret = get_errno(sys_fchmodat(arg1, p, arg3));
5995 unlock_user(p, arg2, 0);
5996 break;
5997 #endif
5998 case TARGET_NR_getpriority:
5999 /* libc does special remapping of the return value of
6000 * sys_getpriority() so it's just easiest to call
6001 * sys_getpriority() directly rather than through libc. */
6002 ret = get_errno(sys_getpriority(arg1, arg2));
6003 break;
6004 case TARGET_NR_setpriority:
6005 ret = get_errno(setpriority(arg1, arg2, arg3));
6006 break;
6007 #ifdef TARGET_NR_profil
6008 case TARGET_NR_profil:
6009 goto unimplemented;
6010 #endif
6011 case TARGET_NR_statfs:
6012 if (!(p = lock_user_string(arg1)))
6013 goto efault;
6014 ret = get_errno(statfs(path(p), &stfs));
6015 unlock_user(p, arg1, 0);
6016 convert_statfs:
6017 if (!is_error(ret)) {
6018 struct target_statfs *target_stfs;
6020 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
6021 goto efault;
6022 __put_user(stfs.f_type, &target_stfs->f_type);
6023 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6024 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6025 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6026 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6027 __put_user(stfs.f_files, &target_stfs->f_files);
6028 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6029 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6030 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6031 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6032 unlock_user_struct(target_stfs, arg2, 1);
6034 break;
6035 case TARGET_NR_fstatfs:
6036 ret = get_errno(fstatfs(arg1, &stfs));
6037 goto convert_statfs;
6038 #ifdef TARGET_NR_statfs64
6039 case TARGET_NR_statfs64:
6040 if (!(p = lock_user_string(arg1)))
6041 goto efault;
6042 ret = get_errno(statfs(path(p), &stfs));
6043 unlock_user(p, arg1, 0);
6044 convert_statfs64:
6045 if (!is_error(ret)) {
6046 struct target_statfs64 *target_stfs;
6048 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
6049 goto efault;
6050 __put_user(stfs.f_type, &target_stfs->f_type);
6051 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
6052 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
6053 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
6054 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
6055 __put_user(stfs.f_files, &target_stfs->f_files);
6056 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
6057 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
6058 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
6059 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
6060 unlock_user_struct(target_stfs, arg3, 1);
6062 break;
6063 case TARGET_NR_fstatfs64:
6064 ret = get_errno(fstatfs(arg1, &stfs));
6065 goto convert_statfs64;
6066 #endif
6067 #ifdef TARGET_NR_ioperm
6068 case TARGET_NR_ioperm:
6069 goto unimplemented;
6070 #endif
6071 #ifdef TARGET_NR_socketcall
6072 case TARGET_NR_socketcall:
6073 ret = do_socketcall(arg1, arg2);
6074 break;
6075 #endif
6076 #ifdef TARGET_NR_accept
6077 case TARGET_NR_accept:
6078 ret = do_accept(arg1, arg2, arg3);
6079 break;
6080 #endif
6081 #ifdef TARGET_NR_bind
6082 case TARGET_NR_bind:
6083 ret = do_bind(arg1, arg2, arg3);
6084 break;
6085 #endif
6086 #ifdef TARGET_NR_connect
6087 case TARGET_NR_connect:
6088 ret = do_connect(arg1, arg2, arg3);
6089 break;
6090 #endif
6091 #ifdef TARGET_NR_getpeername
6092 case TARGET_NR_getpeername:
6093 ret = do_getpeername(arg1, arg2, arg3);
6094 break;
6095 #endif
6096 #ifdef TARGET_NR_getsockname
6097 case TARGET_NR_getsockname:
6098 ret = do_getsockname(arg1, arg2, arg3);
6099 break;
6100 #endif
6101 #ifdef TARGET_NR_getsockopt
6102 case TARGET_NR_getsockopt:
6103 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
6104 break;
6105 #endif
6106 #ifdef TARGET_NR_listen
6107 case TARGET_NR_listen:
6108 ret = get_errno(listen(arg1, arg2));
6109 break;
6110 #endif
6111 #ifdef TARGET_NR_recv
6112 case TARGET_NR_recv:
6113 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
6114 break;
6115 #endif
6116 #ifdef TARGET_NR_recvfrom
6117 case TARGET_NR_recvfrom:
6118 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
6119 break;
6120 #endif
6121 #ifdef TARGET_NR_recvmsg
6122 case TARGET_NR_recvmsg:
6123 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
6124 break;
6125 #endif
6126 #ifdef TARGET_NR_send
6127 case TARGET_NR_send:
6128 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
6129 break;
6130 #endif
6131 #ifdef TARGET_NR_sendmsg
6132 case TARGET_NR_sendmsg:
6133 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
6134 break;
6135 #endif
6136 #ifdef TARGET_NR_sendto
6137 case TARGET_NR_sendto:
6138 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
6139 break;
6140 #endif
6141 #ifdef TARGET_NR_shutdown
6142 case TARGET_NR_shutdown:
6143 ret = get_errno(shutdown(arg1, arg2));
6144 break;
6145 #endif
6146 #ifdef TARGET_NR_socket
6147 case TARGET_NR_socket:
6148 ret = do_socket(arg1, arg2, arg3);
6149 break;
6150 #endif
6151 #ifdef TARGET_NR_socketpair
6152 case TARGET_NR_socketpair:
6153 ret = do_socketpair(arg1, arg2, arg3, arg4);
6154 break;
6155 #endif
6156 #ifdef TARGET_NR_setsockopt
6157 case TARGET_NR_setsockopt:
6158 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
6159 break;
6160 #endif
6162 case TARGET_NR_syslog:
6163 if (!(p = lock_user_string(arg2)))
6164 goto efault;
6165 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
6166 unlock_user(p, arg2, 0);
6167 break;
6169 case TARGET_NR_setitimer:
6171 struct itimerval value, ovalue, *pvalue;
6173 if (arg2) {
6174 pvalue = &value;
6175 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
6176 || copy_from_user_timeval(&pvalue->it_value,
6177 arg2 + sizeof(struct target_timeval)))
6178 goto efault;
6179 } else {
6180 pvalue = NULL;
6182 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
6183 if (!is_error(ret) && arg3) {
6184 if (copy_to_user_timeval(arg3,
6185 &ovalue.it_interval)
6186 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
6187 &ovalue.it_value))
6188 goto efault;
6191 break;
6192 case TARGET_NR_getitimer:
6194 struct itimerval value;
6196 ret = get_errno(getitimer(arg1, &value));
6197 if (!is_error(ret) && arg2) {
6198 if (copy_to_user_timeval(arg2,
6199 &value.it_interval)
6200 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
6201 &value.it_value))
6202 goto efault;
6205 break;
6206 case TARGET_NR_stat:
6207 if (!(p = lock_user_string(arg1)))
6208 goto efault;
6209 ret = get_errno(stat(path(p), &st));
6210 unlock_user(p, arg1, 0);
6211 goto do_stat;
6212 case TARGET_NR_lstat:
6213 if (!(p = lock_user_string(arg1)))
6214 goto efault;
6215 ret = get_errno(lstat(path(p), &st));
6216 unlock_user(p, arg1, 0);
6217 goto do_stat;
6218 case TARGET_NR_fstat:
6220 ret = get_errno(fstat(arg1, &st));
6221 do_stat:
6222 if (!is_error(ret)) {
6223 struct target_stat *target_st;
6225 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
6226 goto efault;
6227 memset(target_st, 0, sizeof(*target_st));
6228 __put_user(st.st_dev, &target_st->st_dev);
6229 __put_user(st.st_ino, &target_st->st_ino);
6230 __put_user(st.st_mode, &target_st->st_mode);
6231 __put_user(st.st_uid, &target_st->st_uid);
6232 __put_user(st.st_gid, &target_st->st_gid);
6233 __put_user(st.st_nlink, &target_st->st_nlink);
6234 __put_user(st.st_rdev, &target_st->st_rdev);
6235 __put_user(st.st_size, &target_st->st_size);
6236 __put_user(st.st_blksize, &target_st->st_blksize);
6237 __put_user(st.st_blocks, &target_st->st_blocks);
6238 __put_user(st.st_atime, &target_st->target_st_atime);
6239 __put_user(st.st_mtime, &target_st->target_st_mtime);
6240 __put_user(st.st_ctime, &target_st->target_st_ctime);
6241 unlock_user_struct(target_st, arg2, 1);
6244 break;
6245 #ifdef TARGET_NR_olduname
6246 case TARGET_NR_olduname:
6247 goto unimplemented;
6248 #endif
6249 #ifdef TARGET_NR_iopl
6250 case TARGET_NR_iopl:
6251 goto unimplemented;
6252 #endif
6253 case TARGET_NR_vhangup:
6254 ret = get_errno(vhangup());
6255 break;
6256 #ifdef TARGET_NR_idle
6257 case TARGET_NR_idle:
6258 goto unimplemented;
6259 #endif
6260 #ifdef TARGET_NR_syscall
6261 case TARGET_NR_syscall:
6262 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
6263 arg6, arg7, arg8, 0);
6264 break;
6265 #endif
6266 case TARGET_NR_wait4:
6268 int status;
6269 abi_long status_ptr = arg2;
6270 struct rusage rusage, *rusage_ptr;
6271 abi_ulong target_rusage = arg4;
6272 if (target_rusage)
6273 rusage_ptr = &rusage;
6274 else
6275 rusage_ptr = NULL;
6276 ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
6277 if (!is_error(ret)) {
6278 if (status_ptr) {
6279 status = host_to_target_waitstatus(status);
6280 if (put_user_s32(status, status_ptr))
6281 goto efault;
6283 if (target_rusage)
6284 host_to_target_rusage(target_rusage, &rusage);
6287 break;
6288 #ifdef TARGET_NR_swapoff
6289 case TARGET_NR_swapoff:
6290 if (!(p = lock_user_string(arg1)))
6291 goto efault;
6292 ret = get_errno(swapoff(p));
6293 unlock_user(p, arg1, 0);
6294 break;
6295 #endif
6296 case TARGET_NR_sysinfo:
6298 struct target_sysinfo *target_value;
6299 struct sysinfo value;
6300 ret = get_errno(sysinfo(&value));
6301 if (!is_error(ret) && arg1)
6303 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
6304 goto efault;
6305 __put_user(value.uptime, &target_value->uptime);
6306 __put_user(value.loads[0], &target_value->loads[0]);
6307 __put_user(value.loads[1], &target_value->loads[1]);
6308 __put_user(value.loads[2], &target_value->loads[2]);
6309 __put_user(value.totalram, &target_value->totalram);
6310 __put_user(value.freeram, &target_value->freeram);
6311 __put_user(value.sharedram, &target_value->sharedram);
6312 __put_user(value.bufferram, &target_value->bufferram);
6313 __put_user(value.totalswap, &target_value->totalswap);
6314 __put_user(value.freeswap, &target_value->freeswap);
6315 __put_user(value.procs, &target_value->procs);
6316 __put_user(value.totalhigh, &target_value->totalhigh);
6317 __put_user(value.freehigh, &target_value->freehigh);
6318 __put_user(value.mem_unit, &target_value->mem_unit);
6319 unlock_user_struct(target_value, arg1, 1);
6322 break;
6323 #ifdef TARGET_NR_ipc
6324 case TARGET_NR_ipc:
6325 ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
6326 break;
6327 #endif
6328 #ifdef TARGET_NR_semget
6329 case TARGET_NR_semget:
6330 ret = get_errno(semget(arg1, arg2, arg3));
6331 break;
6332 #endif
6333 #ifdef TARGET_NR_semop
6334 case TARGET_NR_semop:
6335 ret = get_errno(do_semop(arg1, arg2, arg3));
6336 break;
6337 #endif
6338 #ifdef TARGET_NR_semctl
6339 case TARGET_NR_semctl:
6340 ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
6341 break;
6342 #endif
6343 #ifdef TARGET_NR_msgctl
6344 case TARGET_NR_msgctl:
6345 ret = do_msgctl(arg1, arg2, arg3);
6346 break;
6347 #endif
6348 #ifdef TARGET_NR_msgget
6349 case TARGET_NR_msgget:
6350 ret = get_errno(msgget(arg1, arg2));
6351 break;
6352 #endif
6353 #ifdef TARGET_NR_msgrcv
6354 case TARGET_NR_msgrcv:
6355 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
6356 break;
6357 #endif
6358 #ifdef TARGET_NR_msgsnd
6359 case TARGET_NR_msgsnd:
6360 ret = do_msgsnd(arg1, arg2, arg3, arg4);
6361 break;
6362 #endif
6363 #ifdef TARGET_NR_shmget
6364 case TARGET_NR_shmget:
6365 ret = get_errno(shmget(arg1, arg2, arg3));
6366 break;
6367 #endif
6368 #ifdef TARGET_NR_shmctl
6369 case TARGET_NR_shmctl:
6370 ret = do_shmctl(arg1, arg2, arg3);
6371 break;
6372 #endif
6373 #ifdef TARGET_NR_shmat
6374 case TARGET_NR_shmat:
6375 ret = do_shmat(arg1, arg2, arg3);
6376 break;
6377 #endif
6378 #ifdef TARGET_NR_shmdt
6379 case TARGET_NR_shmdt:
6380 ret = do_shmdt(arg1);
6381 break;
6382 #endif
6383 case TARGET_NR_fsync:
6384 ret = get_errno(fsync(arg1));
6385 break;
6386 case TARGET_NR_clone:
6387 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6388 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
6389 #elif defined(TARGET_CRIS)
6390 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg4, arg5));
6391 #elif defined(TARGET_S390X)
6392 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
6393 #else
6394 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
6395 #endif
6396 break;
6397 #ifdef __NR_exit_group
6398 /* new thread calls */
6399 case TARGET_NR_exit_group:
6400 #ifdef TARGET_GPROF
6401 _mcleanup();
6402 #endif
6403 gdb_exit(cpu_env, arg1);
6404 ret = get_errno(exit_group(arg1));
6405 break;
6406 #endif
6407 case TARGET_NR_setdomainname:
6408 if (!(p = lock_user_string(arg1)))
6409 goto efault;
6410 ret = get_errno(setdomainname(p, arg2));
6411 unlock_user(p, arg1, 0);
6412 break;
6413 case TARGET_NR_uname:
6414 /* no need to transcode because we use the linux syscall */
6416 struct new_utsname * buf;
6418 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
6419 goto efault;
6420 ret = get_errno(sys_uname(buf));
6421 if (!is_error(ret)) {
6422 /* Overrite the native machine name with whatever is being
6423 emulated. */
6424 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
6425 /* Allow the user to override the reported release. */
6426 if (qemu_uname_release && *qemu_uname_release)
6427 strcpy (buf->release, qemu_uname_release);
6429 unlock_user_struct(buf, arg1, 1);
6431 break;
6432 #ifdef TARGET_I386
6433 case TARGET_NR_modify_ldt:
6434 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
6435 break;
6436 #if !defined(TARGET_X86_64)
6437 case TARGET_NR_vm86old:
6438 goto unimplemented;
6439 case TARGET_NR_vm86:
6440 ret = do_vm86(cpu_env, arg1, arg2);
6441 break;
6442 #endif
6443 #endif
6444 case TARGET_NR_adjtimex:
6445 goto unimplemented;
6446 #ifdef TARGET_NR_create_module
6447 case TARGET_NR_create_module:
6448 #endif
6449 case TARGET_NR_init_module:
6450 case TARGET_NR_delete_module:
6451 #ifdef TARGET_NR_get_kernel_syms
6452 case TARGET_NR_get_kernel_syms:
6453 #endif
6454 goto unimplemented;
6455 case TARGET_NR_quotactl:
6456 goto unimplemented;
6457 case TARGET_NR_getpgid:
6458 ret = get_errno(getpgid(arg1));
6459 break;
6460 case TARGET_NR_fchdir:
6461 ret = get_errno(fchdir(arg1));
6462 break;
6463 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6464 case TARGET_NR_bdflush:
6465 goto unimplemented;
6466 #endif
6467 #ifdef TARGET_NR_sysfs
6468 case TARGET_NR_sysfs:
6469 goto unimplemented;
6470 #endif
6471 case TARGET_NR_personality:
6472 ret = get_errno(personality(arg1));
6473 break;
6474 #ifdef TARGET_NR_afs_syscall
6475 case TARGET_NR_afs_syscall:
6476 goto unimplemented;
6477 #endif
6478 #ifdef TARGET_NR__llseek /* Not on alpha */
6479 case TARGET_NR__llseek:
6481 int64_t res;
6482 #if !defined(__NR_llseek)
6483 res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
6484 if (res == -1) {
6485 ret = get_errno(res);
6486 } else {
6487 ret = 0;
6489 #else
6490 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
6491 #endif
6492 if ((ret == 0) && put_user_s64(res, arg4)) {
6493 goto efault;
6496 break;
6497 #endif
6498 case TARGET_NR_getdents:
6499 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6501 struct target_dirent *target_dirp;
6502 struct linux_dirent *dirp;
6503 abi_long count = arg3;
6505 dirp = malloc(count);
6506 if (!dirp) {
6507 ret = -TARGET_ENOMEM;
6508 goto fail;
6511 ret = get_errno(sys_getdents(arg1, dirp, count));
6512 if (!is_error(ret)) {
6513 struct linux_dirent *de;
6514 struct target_dirent *tde;
6515 int len = ret;
6516 int reclen, treclen;
6517 int count1, tnamelen;
6519 count1 = 0;
6520 de = dirp;
6521 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6522 goto efault;
6523 tde = target_dirp;
6524 while (len > 0) {
6525 reclen = de->d_reclen;
6526 treclen = reclen - (2 * (sizeof(long) - sizeof(abi_long)));
6527 tde->d_reclen = tswap16(treclen);
6528 tde->d_ino = tswapl(de->d_ino);
6529 tde->d_off = tswapl(de->d_off);
6530 tnamelen = treclen - (2 * sizeof(abi_long) + 2);
6531 if (tnamelen > 256)
6532 tnamelen = 256;
6533 /* XXX: may not be correct */
6534 pstrcpy(tde->d_name, tnamelen, de->d_name);
6535 de = (struct linux_dirent *)((char *)de + reclen);
6536 len -= reclen;
6537 tde = (struct target_dirent *)((char *)tde + treclen);
6538 count1 += treclen;
6540 ret = count1;
6541 unlock_user(target_dirp, arg2, ret);
6543 free(dirp);
6545 #else
6547 struct linux_dirent *dirp;
6548 abi_long count = arg3;
6550 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6551 goto efault;
6552 ret = get_errno(sys_getdents(arg1, dirp, count));
6553 if (!is_error(ret)) {
6554 struct linux_dirent *de;
6555 int len = ret;
6556 int reclen;
6557 de = dirp;
6558 while (len > 0) {
6559 reclen = de->d_reclen;
6560 if (reclen > len)
6561 break;
6562 de->d_reclen = tswap16(reclen);
6563 tswapls(&de->d_ino);
6564 tswapls(&de->d_off);
6565 de = (struct linux_dirent *)((char *)de + reclen);
6566 len -= reclen;
6569 unlock_user(dirp, arg2, ret);
6571 #endif
6572 break;
6573 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6574 case TARGET_NR_getdents64:
6576 struct linux_dirent64 *dirp;
6577 abi_long count = arg3;
6578 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
6579 goto efault;
6580 ret = get_errno(sys_getdents64(arg1, dirp, count));
6581 if (!is_error(ret)) {
6582 struct linux_dirent64 *de;
6583 int len = ret;
6584 int reclen;
6585 de = dirp;
6586 while (len > 0) {
6587 reclen = de->d_reclen;
6588 if (reclen > len)
6589 break;
6590 de->d_reclen = tswap16(reclen);
6591 tswap64s((uint64_t *)&de->d_ino);
6592 tswap64s((uint64_t *)&de->d_off);
6593 de = (struct linux_dirent64 *)((char *)de + reclen);
6594 len -= reclen;
6597 unlock_user(dirp, arg2, ret);
6599 break;
6600 #endif /* TARGET_NR_getdents64 */
6601 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6602 #ifdef TARGET_S390X
6603 case TARGET_NR_select:
6604 #else
6605 case TARGET_NR__newselect:
6606 #endif
6607 ret = do_select(arg1, arg2, arg3, arg4, arg5);
6608 break;
6609 #endif
6610 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6611 # ifdef TARGET_NR_poll
6612 case TARGET_NR_poll:
6613 # endif
6614 # ifdef TARGET_NR_ppoll
6615 case TARGET_NR_ppoll:
6616 # endif
6618 struct target_pollfd *target_pfd;
6619 unsigned int nfds = arg2;
6620 int timeout = arg3;
6621 struct pollfd *pfd;
6622 unsigned int i;
6624 target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
6625 if (!target_pfd)
6626 goto efault;
6628 pfd = alloca(sizeof(struct pollfd) * nfds);
6629 for(i = 0; i < nfds; i++) {
6630 pfd[i].fd = tswap32(target_pfd[i].fd);
6631 pfd[i].events = tswap16(target_pfd[i].events);
6634 # ifdef TARGET_NR_ppoll
6635 if (num == TARGET_NR_ppoll) {
6636 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
6637 target_sigset_t *target_set;
6638 sigset_t _set, *set = &_set;
6640 if (arg3) {
6641 if (target_to_host_timespec(timeout_ts, arg3)) {
6642 unlock_user(target_pfd, arg1, 0);
6643 goto efault;
6645 } else {
6646 timeout_ts = NULL;
6649 if (arg4) {
6650 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
6651 if (!target_set) {
6652 unlock_user(target_pfd, arg1, 0);
6653 goto efault;
6655 target_to_host_sigset(set, target_set);
6656 } else {
6657 set = NULL;
6660 ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
6662 if (!is_error(ret) && arg3) {
6663 host_to_target_timespec(arg3, timeout_ts);
6665 if (arg4) {
6666 unlock_user(target_set, arg4, 0);
6668 } else
6669 # endif
6670 ret = get_errno(poll(pfd, nfds, timeout));
6672 if (!is_error(ret)) {
6673 for(i = 0; i < nfds; i++) {
6674 target_pfd[i].revents = tswap16(pfd[i].revents);
6677 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
6679 break;
6680 #endif
6681 case TARGET_NR_flock:
6682 /* NOTE: the flock constant seems to be the same for every
6683 Linux platform */
6684 ret = get_errno(flock(arg1, arg2));
6685 break;
6686 case TARGET_NR_readv:
6688 int count = arg3;
6689 struct iovec *vec;
6691 vec = alloca(count * sizeof(struct iovec));
6692 if (lock_iovec(VERIFY_WRITE, vec, arg2, count, 0) < 0)
6693 goto efault;
6694 ret = get_errno(readv(arg1, vec, count));
6695 unlock_iovec(vec, arg2, count, 1);
6697 break;
6698 case TARGET_NR_writev:
6700 int count = arg3;
6701 struct iovec *vec;
6703 vec = alloca(count * sizeof(struct iovec));
6704 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
6705 goto efault;
6706 ret = get_errno(writev(arg1, vec, count));
6707 unlock_iovec(vec, arg2, count, 0);
6709 break;
6710 case TARGET_NR_getsid:
6711 ret = get_errno(getsid(arg1));
6712 break;
6713 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6714 case TARGET_NR_fdatasync:
6715 ret = get_errno(fdatasync(arg1));
6716 break;
6717 #endif
6718 case TARGET_NR__sysctl:
6719 /* We don't implement this, but ENOTDIR is always a safe
6720 return value. */
6721 ret = -TARGET_ENOTDIR;
6722 break;
6723 case TARGET_NR_sched_getaffinity:
6725 unsigned int mask_size;
6726 unsigned long *mask;
6729 * sched_getaffinity needs multiples of ulong, so need to take
6730 * care of mismatches between target ulong and host ulong sizes.
6732 if (arg2 & (sizeof(abi_ulong) - 1)) {
6733 ret = -TARGET_EINVAL;
6734 break;
6736 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6738 mask = alloca(mask_size);
6739 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
6741 if (!is_error(ret)) {
6742 if (copy_to_user(arg3, mask, ret)) {
6743 goto efault;
6747 break;
6748 case TARGET_NR_sched_setaffinity:
6750 unsigned int mask_size;
6751 unsigned long *mask;
6754 * sched_setaffinity needs multiples of ulong, so need to take
6755 * care of mismatches between target ulong and host ulong sizes.
6757 if (arg2 & (sizeof(abi_ulong) - 1)) {
6758 ret = -TARGET_EINVAL;
6759 break;
6761 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
6763 mask = alloca(mask_size);
6764 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
6765 goto efault;
6767 memcpy(mask, p, arg2);
6768 unlock_user_struct(p, arg2, 0);
6770 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
6772 break;
6773 case TARGET_NR_sched_setparam:
6775 struct sched_param *target_schp;
6776 struct sched_param schp;
6778 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
6779 goto efault;
6780 schp.sched_priority = tswap32(target_schp->sched_priority);
6781 unlock_user_struct(target_schp, arg2, 0);
6782 ret = get_errno(sched_setparam(arg1, &schp));
6784 break;
6785 case TARGET_NR_sched_getparam:
6787 struct sched_param *target_schp;
6788 struct sched_param schp;
6789 ret = get_errno(sched_getparam(arg1, &schp));
6790 if (!is_error(ret)) {
6791 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
6792 goto efault;
6793 target_schp->sched_priority = tswap32(schp.sched_priority);
6794 unlock_user_struct(target_schp, arg2, 1);
6797 break;
6798 case TARGET_NR_sched_setscheduler:
6800 struct sched_param *target_schp;
6801 struct sched_param schp;
6802 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
6803 goto efault;
6804 schp.sched_priority = tswap32(target_schp->sched_priority);
6805 unlock_user_struct(target_schp, arg3, 0);
6806 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
6808 break;
6809 case TARGET_NR_sched_getscheduler:
6810 ret = get_errno(sched_getscheduler(arg1));
6811 break;
6812 case TARGET_NR_sched_yield:
6813 ret = get_errno(sched_yield());
6814 break;
6815 case TARGET_NR_sched_get_priority_max:
6816 ret = get_errno(sched_get_priority_max(arg1));
6817 break;
6818 case TARGET_NR_sched_get_priority_min:
6819 ret = get_errno(sched_get_priority_min(arg1));
6820 break;
6821 case TARGET_NR_sched_rr_get_interval:
6823 struct timespec ts;
6824 ret = get_errno(sched_rr_get_interval(arg1, &ts));
6825 if (!is_error(ret)) {
6826 host_to_target_timespec(arg2, &ts);
6829 break;
6830 case TARGET_NR_nanosleep:
6832 struct timespec req, rem;
6833 target_to_host_timespec(&req, arg1);
6834 ret = get_errno(nanosleep(&req, &rem));
6835 if (is_error(ret) && arg2) {
6836 host_to_target_timespec(arg2, &rem);
6839 break;
6840 #ifdef TARGET_NR_query_module
6841 case TARGET_NR_query_module:
6842 goto unimplemented;
6843 #endif
6844 #ifdef TARGET_NR_nfsservctl
6845 case TARGET_NR_nfsservctl:
6846 goto unimplemented;
6847 #endif
6848 case TARGET_NR_prctl:
6849 switch (arg1)
6851 case PR_GET_PDEATHSIG:
6853 int deathsig;
6854 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
6855 if (!is_error(ret) && arg2
6856 && put_user_ual(deathsig, arg2))
6857 goto efault;
6859 break;
6860 default:
6861 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
6862 break;
6864 break;
6865 #ifdef TARGET_NR_arch_prctl
6866 case TARGET_NR_arch_prctl:
6867 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6868 ret = do_arch_prctl(cpu_env, arg1, arg2);
6869 break;
6870 #else
6871 goto unimplemented;
6872 #endif
6873 #endif
6874 #ifdef TARGET_NR_pread
6875 case TARGET_NR_pread:
6876 if (regpairs_aligned(cpu_env))
6877 arg4 = arg5;
6878 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6879 goto efault;
6880 ret = get_errno(pread(arg1, p, arg3, arg4));
6881 unlock_user(p, arg2, ret);
6882 break;
6883 case TARGET_NR_pwrite:
6884 if (regpairs_aligned(cpu_env))
6885 arg4 = arg5;
6886 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6887 goto efault;
6888 ret = get_errno(pwrite(arg1, p, arg3, arg4));
6889 unlock_user(p, arg2, 0);
6890 break;
6891 #endif
6892 #ifdef TARGET_NR_pread64
6893 case TARGET_NR_pread64:
6894 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
6895 goto efault;
6896 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
6897 unlock_user(p, arg2, ret);
6898 break;
6899 case TARGET_NR_pwrite64:
6900 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
6901 goto efault;
6902 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
6903 unlock_user(p, arg2, 0);
6904 break;
6905 #endif
6906 case TARGET_NR_getcwd:
6907 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
6908 goto efault;
6909 ret = get_errno(sys_getcwd1(p, arg2));
6910 unlock_user(p, arg1, ret);
6911 break;
6912 case TARGET_NR_capget:
6913 goto unimplemented;
6914 case TARGET_NR_capset:
6915 goto unimplemented;
6916 case TARGET_NR_sigaltstack:
6917 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6918 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6919 defined(TARGET_M68K) || defined(TARGET_S390X)
6920 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUState *)cpu_env));
6921 break;
6922 #else
6923 goto unimplemented;
6924 #endif
6925 case TARGET_NR_sendfile:
6926 goto unimplemented;
6927 #ifdef TARGET_NR_getpmsg
6928 case TARGET_NR_getpmsg:
6929 goto unimplemented;
6930 #endif
6931 #ifdef TARGET_NR_putpmsg
6932 case TARGET_NR_putpmsg:
6933 goto unimplemented;
6934 #endif
6935 #ifdef TARGET_NR_vfork
6936 case TARGET_NR_vfork:
6937 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
6938 0, 0, 0, 0));
6939 break;
6940 #endif
6941 #ifdef TARGET_NR_ugetrlimit
6942 case TARGET_NR_ugetrlimit:
6944 struct rlimit rlim;
6945 int resource = target_to_host_resource(arg1);
6946 ret = get_errno(getrlimit(resource, &rlim));
6947 if (!is_error(ret)) {
6948 struct target_rlimit *target_rlim;
6949 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
6950 goto efault;
6951 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
6952 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
6953 unlock_user_struct(target_rlim, arg2, 1);
6955 break;
6957 #endif
6958 #ifdef TARGET_NR_truncate64
6959 case TARGET_NR_truncate64:
6960 if (!(p = lock_user_string(arg1)))
6961 goto efault;
6962 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
6963 unlock_user(p, arg1, 0);
6964 break;
6965 #endif
6966 #ifdef TARGET_NR_ftruncate64
6967 case TARGET_NR_ftruncate64:
6968 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
6969 break;
6970 #endif
6971 #ifdef TARGET_NR_stat64
6972 case TARGET_NR_stat64:
6973 if (!(p = lock_user_string(arg1)))
6974 goto efault;
6975 ret = get_errno(stat(path(p), &st));
6976 unlock_user(p, arg1, 0);
6977 if (!is_error(ret))
6978 ret = host_to_target_stat64(cpu_env, arg2, &st);
6979 break;
6980 #endif
6981 #ifdef TARGET_NR_lstat64
6982 case TARGET_NR_lstat64:
6983 if (!(p = lock_user_string(arg1)))
6984 goto efault;
6985 ret = get_errno(lstat(path(p), &st));
6986 unlock_user(p, arg1, 0);
6987 if (!is_error(ret))
6988 ret = host_to_target_stat64(cpu_env, arg2, &st);
6989 break;
6990 #endif
6991 #ifdef TARGET_NR_fstat64
6992 case TARGET_NR_fstat64:
6993 ret = get_errno(fstat(arg1, &st));
6994 if (!is_error(ret))
6995 ret = host_to_target_stat64(cpu_env, arg2, &st);
6996 break;
6997 #endif
6998 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6999 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7000 #ifdef TARGET_NR_fstatat64
7001 case TARGET_NR_fstatat64:
7002 #endif
7003 #ifdef TARGET_NR_newfstatat
7004 case TARGET_NR_newfstatat:
7005 #endif
7006 if (!(p = lock_user_string(arg2)))
7007 goto efault;
7008 #ifdef __NR_fstatat64
7009 ret = get_errno(sys_fstatat64(arg1, path(p), &st, arg4));
7010 #else
7011 ret = get_errno(sys_newfstatat(arg1, path(p), &st, arg4));
7012 #endif
7013 if (!is_error(ret))
7014 ret = host_to_target_stat64(cpu_env, arg3, &st);
7015 break;
7016 #endif
7017 case TARGET_NR_lchown:
7018 if (!(p = lock_user_string(arg1)))
7019 goto efault;
7020 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
7021 unlock_user(p, arg1, 0);
7022 break;
7023 #ifdef TARGET_NR_getuid
7024 case TARGET_NR_getuid:
7025 ret = get_errno(high2lowuid(getuid()));
7026 break;
7027 #endif
7028 #ifdef TARGET_NR_getgid
7029 case TARGET_NR_getgid:
7030 ret = get_errno(high2lowgid(getgid()));
7031 break;
7032 #endif
7033 #ifdef TARGET_NR_geteuid
7034 case TARGET_NR_geteuid:
7035 ret = get_errno(high2lowuid(geteuid()));
7036 break;
7037 #endif
7038 #ifdef TARGET_NR_getegid
7039 case TARGET_NR_getegid:
7040 ret = get_errno(high2lowgid(getegid()));
7041 break;
7042 #endif
7043 case TARGET_NR_setreuid:
7044 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
7045 break;
7046 case TARGET_NR_setregid:
7047 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
7048 break;
7049 case TARGET_NR_getgroups:
7051 int gidsetsize = arg1;
7052 target_id *target_grouplist;
7053 gid_t *grouplist;
7054 int i;
7056 grouplist = alloca(gidsetsize * sizeof(gid_t));
7057 ret = get_errno(getgroups(gidsetsize, grouplist));
7058 if (gidsetsize == 0)
7059 break;
7060 if (!is_error(ret)) {
7061 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 2, 0);
7062 if (!target_grouplist)
7063 goto efault;
7064 for(i = 0;i < ret; i++)
7065 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
7066 unlock_user(target_grouplist, arg2, gidsetsize * 2);
7069 break;
7070 case TARGET_NR_setgroups:
7072 int gidsetsize = arg1;
7073 target_id *target_grouplist;
7074 gid_t *grouplist;
7075 int i;
7077 grouplist = alloca(gidsetsize * sizeof(gid_t));
7078 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 2, 1);
7079 if (!target_grouplist) {
7080 ret = -TARGET_EFAULT;
7081 goto fail;
7083 for(i = 0;i < gidsetsize; i++)
7084 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
7085 unlock_user(target_grouplist, arg2, 0);
7086 ret = get_errno(setgroups(gidsetsize, grouplist));
7088 break;
7089 case TARGET_NR_fchown:
7090 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
7091 break;
7092 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7093 case TARGET_NR_fchownat:
7094 if (!(p = lock_user_string(arg2)))
7095 goto efault;
7096 ret = get_errno(sys_fchownat(arg1, p, low2highuid(arg3), low2highgid(arg4), arg5));
7097 unlock_user(p, arg2, 0);
7098 break;
7099 #endif
7100 #ifdef TARGET_NR_setresuid
7101 case TARGET_NR_setresuid:
7102 ret = get_errno(setresuid(low2highuid(arg1),
7103 low2highuid(arg2),
7104 low2highuid(arg3)));
7105 break;
7106 #endif
7107 #ifdef TARGET_NR_getresuid
7108 case TARGET_NR_getresuid:
7110 uid_t ruid, euid, suid;
7111 ret = get_errno(getresuid(&ruid, &euid, &suid));
7112 if (!is_error(ret)) {
7113 if (put_user_u16(high2lowuid(ruid), arg1)
7114 || put_user_u16(high2lowuid(euid), arg2)
7115 || put_user_u16(high2lowuid(suid), arg3))
7116 goto efault;
7119 break;
7120 #endif
7121 #ifdef TARGET_NR_getresgid
7122 case TARGET_NR_setresgid:
7123 ret = get_errno(setresgid(low2highgid(arg1),
7124 low2highgid(arg2),
7125 low2highgid(arg3)));
7126 break;
7127 #endif
7128 #ifdef TARGET_NR_getresgid
7129 case TARGET_NR_getresgid:
7131 gid_t rgid, egid, sgid;
7132 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7133 if (!is_error(ret)) {
7134 if (put_user_u16(high2lowgid(rgid), arg1)
7135 || put_user_u16(high2lowgid(egid), arg2)
7136 || put_user_u16(high2lowgid(sgid), arg3))
7137 goto efault;
7140 break;
7141 #endif
7142 case TARGET_NR_chown:
7143 if (!(p = lock_user_string(arg1)))
7144 goto efault;
7145 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
7146 unlock_user(p, arg1, 0);
7147 break;
7148 case TARGET_NR_setuid:
7149 ret = get_errno(setuid(low2highuid(arg1)));
7150 break;
7151 case TARGET_NR_setgid:
7152 ret = get_errno(setgid(low2highgid(arg1)));
7153 break;
7154 case TARGET_NR_setfsuid:
7155 ret = get_errno(setfsuid(arg1));
7156 break;
7157 case TARGET_NR_setfsgid:
7158 ret = get_errno(setfsgid(arg1));
7159 break;
7161 #ifdef TARGET_NR_lchown32
7162 case TARGET_NR_lchown32:
7163 if (!(p = lock_user_string(arg1)))
7164 goto efault;
7165 ret = get_errno(lchown(p, arg2, arg3));
7166 unlock_user(p, arg1, 0);
7167 break;
7168 #endif
7169 #ifdef TARGET_NR_getuid32
7170 case TARGET_NR_getuid32:
7171 ret = get_errno(getuid());
7172 break;
7173 #endif
7175 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7176 /* Alpha specific */
7177 case TARGET_NR_getxuid:
7179 uid_t euid;
7180 euid=geteuid();
7181 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
7183 ret = get_errno(getuid());
7184 break;
7185 #endif
7186 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7187 /* Alpha specific */
7188 case TARGET_NR_getxgid:
7190 uid_t egid;
7191 egid=getegid();
7192 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
7194 ret = get_errno(getgid());
7195 break;
7196 #endif
7197 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7198 /* Alpha specific */
7199 case TARGET_NR_osf_getsysinfo:
7200 ret = -TARGET_EOPNOTSUPP;
7201 switch (arg1) {
7202 case TARGET_GSI_IEEE_FP_CONTROL:
7204 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
7206 /* Copied from linux ieee_fpcr_to_swcr. */
7207 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
7208 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
7209 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
7210 | SWCR_TRAP_ENABLE_DZE
7211 | SWCR_TRAP_ENABLE_OVF);
7212 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
7213 | SWCR_TRAP_ENABLE_INE);
7214 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
7215 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
7217 if (put_user_u64 (swcr, arg2))
7218 goto efault;
7219 ret = 0;
7221 break;
7223 /* case GSI_IEEE_STATE_AT_SIGNAL:
7224 -- Not implemented in linux kernel.
7225 case GSI_UACPROC:
7226 -- Retrieves current unaligned access state; not much used.
7227 case GSI_PROC_TYPE:
7228 -- Retrieves implver information; surely not used.
7229 case GSI_GET_HWRPB:
7230 -- Grabs a copy of the HWRPB; surely not used.
7233 break;
7234 #endif
7235 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7236 /* Alpha specific */
7237 case TARGET_NR_osf_setsysinfo:
7238 ret = -TARGET_EOPNOTSUPP;
7239 switch (arg1) {
7240 case TARGET_SSI_IEEE_FP_CONTROL:
7241 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
7243 uint64_t swcr, fpcr, orig_fpcr;
7245 if (get_user_u64 (swcr, arg2))
7246 goto efault;
7247 orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
7248 fpcr = orig_fpcr & FPCR_DYN_MASK;
7250 /* Copied from linux ieee_swcr_to_fpcr. */
7251 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
7252 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
7253 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
7254 | SWCR_TRAP_ENABLE_DZE
7255 | SWCR_TRAP_ENABLE_OVF)) << 48;
7256 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
7257 | SWCR_TRAP_ENABLE_INE)) << 57;
7258 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
7259 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
7261 cpu_alpha_store_fpcr (cpu_env, fpcr);
7262 ret = 0;
7264 if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
7265 /* Old exceptions are not signaled. */
7266 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
7268 /* If any exceptions set by this call, and are unmasked,
7269 send a signal. */
7270 /* ??? FIXME */
7273 break;
7275 /* case SSI_NVPAIRS:
7276 -- Used with SSIN_UACPROC to enable unaligned accesses.
7277 case SSI_IEEE_STATE_AT_SIGNAL:
7278 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7279 -- Not implemented in linux kernel
7282 break;
7283 #endif
7284 #ifdef TARGET_NR_osf_sigprocmask
7285 /* Alpha specific. */
7286 case TARGET_NR_osf_sigprocmask:
7288 abi_ulong mask;
7289 int how;
7290 sigset_t set, oldset;
7292 switch(arg1) {
7293 case TARGET_SIG_BLOCK:
7294 how = SIG_BLOCK;
7295 break;
7296 case TARGET_SIG_UNBLOCK:
7297 how = SIG_UNBLOCK;
7298 break;
7299 case TARGET_SIG_SETMASK:
7300 how = SIG_SETMASK;
7301 break;
7302 default:
7303 ret = -TARGET_EINVAL;
7304 goto fail;
7306 mask = arg2;
7307 target_to_host_old_sigset(&set, &mask);
7308 sigprocmask(how, &set, &oldset);
7309 host_to_target_old_sigset(&mask, &oldset);
7310 ret = mask;
7312 break;
7313 #endif
7315 #ifdef TARGET_NR_getgid32
7316 case TARGET_NR_getgid32:
7317 ret = get_errno(getgid());
7318 break;
7319 #endif
7320 #ifdef TARGET_NR_geteuid32
7321 case TARGET_NR_geteuid32:
7322 ret = get_errno(geteuid());
7323 break;
7324 #endif
7325 #ifdef TARGET_NR_getegid32
7326 case TARGET_NR_getegid32:
7327 ret = get_errno(getegid());
7328 break;
7329 #endif
7330 #ifdef TARGET_NR_setreuid32
7331 case TARGET_NR_setreuid32:
7332 ret = get_errno(setreuid(arg1, arg2));
7333 break;
7334 #endif
7335 #ifdef TARGET_NR_setregid32
7336 case TARGET_NR_setregid32:
7337 ret = get_errno(setregid(arg1, arg2));
7338 break;
7339 #endif
7340 #ifdef TARGET_NR_getgroups32
7341 case TARGET_NR_getgroups32:
7343 int gidsetsize = arg1;
7344 uint32_t *target_grouplist;
7345 gid_t *grouplist;
7346 int i;
7348 grouplist = alloca(gidsetsize * sizeof(gid_t));
7349 ret = get_errno(getgroups(gidsetsize, grouplist));
7350 if (gidsetsize == 0)
7351 break;
7352 if (!is_error(ret)) {
7353 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
7354 if (!target_grouplist) {
7355 ret = -TARGET_EFAULT;
7356 goto fail;
7358 for(i = 0;i < ret; i++)
7359 target_grouplist[i] = tswap32(grouplist[i]);
7360 unlock_user(target_grouplist, arg2, gidsetsize * 4);
7363 break;
7364 #endif
7365 #ifdef TARGET_NR_setgroups32
7366 case TARGET_NR_setgroups32:
7368 int gidsetsize = arg1;
7369 uint32_t *target_grouplist;
7370 gid_t *grouplist;
7371 int i;
7373 grouplist = alloca(gidsetsize * sizeof(gid_t));
7374 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
7375 if (!target_grouplist) {
7376 ret = -TARGET_EFAULT;
7377 goto fail;
7379 for(i = 0;i < gidsetsize; i++)
7380 grouplist[i] = tswap32(target_grouplist[i]);
7381 unlock_user(target_grouplist, arg2, 0);
7382 ret = get_errno(setgroups(gidsetsize, grouplist));
7384 break;
7385 #endif
7386 #ifdef TARGET_NR_fchown32
7387 case TARGET_NR_fchown32:
7388 ret = get_errno(fchown(arg1, arg2, arg3));
7389 break;
7390 #endif
7391 #ifdef TARGET_NR_setresuid32
7392 case TARGET_NR_setresuid32:
7393 ret = get_errno(setresuid(arg1, arg2, arg3));
7394 break;
7395 #endif
7396 #ifdef TARGET_NR_getresuid32
7397 case TARGET_NR_getresuid32:
7399 uid_t ruid, euid, suid;
7400 ret = get_errno(getresuid(&ruid, &euid, &suid));
7401 if (!is_error(ret)) {
7402 if (put_user_u32(ruid, arg1)
7403 || put_user_u32(euid, arg2)
7404 || put_user_u32(suid, arg3))
7405 goto efault;
7408 break;
7409 #endif
7410 #ifdef TARGET_NR_setresgid32
7411 case TARGET_NR_setresgid32:
7412 ret = get_errno(setresgid(arg1, arg2, arg3));
7413 break;
7414 #endif
7415 #ifdef TARGET_NR_getresgid32
7416 case TARGET_NR_getresgid32:
7418 gid_t rgid, egid, sgid;
7419 ret = get_errno(getresgid(&rgid, &egid, &sgid));
7420 if (!is_error(ret)) {
7421 if (put_user_u32(rgid, arg1)
7422 || put_user_u32(egid, arg2)
7423 || put_user_u32(sgid, arg3))
7424 goto efault;
7427 break;
7428 #endif
7429 #ifdef TARGET_NR_chown32
7430 case TARGET_NR_chown32:
7431 if (!(p = lock_user_string(arg1)))
7432 goto efault;
7433 ret = get_errno(chown(p, arg2, arg3));
7434 unlock_user(p, arg1, 0);
7435 break;
7436 #endif
7437 #ifdef TARGET_NR_setuid32
7438 case TARGET_NR_setuid32:
7439 ret = get_errno(setuid(arg1));
7440 break;
7441 #endif
7442 #ifdef TARGET_NR_setgid32
7443 case TARGET_NR_setgid32:
7444 ret = get_errno(setgid(arg1));
7445 break;
7446 #endif
7447 #ifdef TARGET_NR_setfsuid32
7448 case TARGET_NR_setfsuid32:
7449 ret = get_errno(setfsuid(arg1));
7450 break;
7451 #endif
7452 #ifdef TARGET_NR_setfsgid32
7453 case TARGET_NR_setfsgid32:
7454 ret = get_errno(setfsgid(arg1));
7455 break;
7456 #endif
7458 case TARGET_NR_pivot_root:
7459 goto unimplemented;
7460 #ifdef TARGET_NR_mincore
7461 case TARGET_NR_mincore:
7463 void *a;
7464 ret = -TARGET_EFAULT;
7465 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
7466 goto efault;
7467 if (!(p = lock_user_string(arg3)))
7468 goto mincore_fail;
7469 ret = get_errno(mincore(a, arg2, p));
7470 unlock_user(p, arg3, ret);
7471 mincore_fail:
7472 unlock_user(a, arg1, 0);
7474 break;
7475 #endif
7476 #ifdef TARGET_NR_arm_fadvise64_64
7477 case TARGET_NR_arm_fadvise64_64:
7480 * arm_fadvise64_64 looks like fadvise64_64 but
7481 * with different argument order
7483 abi_long temp;
7484 temp = arg3;
7485 arg3 = arg4;
7486 arg4 = temp;
7488 #endif
7489 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7490 #ifdef TARGET_NR_fadvise64_64
7491 case TARGET_NR_fadvise64_64:
7492 #endif
7493 #ifdef TARGET_NR_fadvise64
7494 case TARGET_NR_fadvise64:
7495 #endif
7496 #ifdef TARGET_S390X
7497 switch (arg4) {
7498 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
7499 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
7500 case 6: arg4 = POSIX_FADV_DONTNEED; break;
7501 case 7: arg4 = POSIX_FADV_NOREUSE; break;
7502 default: break;
7504 #endif
7505 ret = -posix_fadvise(arg1, arg2, arg3, arg4);
7506 break;
7507 #endif
7508 #ifdef TARGET_NR_madvise
7509 case TARGET_NR_madvise:
7510 /* A straight passthrough may not be safe because qemu sometimes
7511 turns private flie-backed mappings into anonymous mappings.
7512 This will break MADV_DONTNEED.
7513 This is a hint, so ignoring and returning success is ok. */
7514 ret = get_errno(0);
7515 break;
7516 #endif
7517 #if TARGET_ABI_BITS == 32
7518 case TARGET_NR_fcntl64:
7520 int cmd;
7521 struct flock64 fl;
7522 struct target_flock64 *target_fl;
7523 #ifdef TARGET_ARM
7524 struct target_eabi_flock64 *target_efl;
7525 #endif
7527 cmd = target_to_host_fcntl_cmd(arg2);
7528 if (cmd == -TARGET_EINVAL)
7529 return cmd;
7531 switch(arg2) {
7532 case TARGET_F_GETLK64:
7533 #ifdef TARGET_ARM
7534 if (((CPUARMState *)cpu_env)->eabi) {
7535 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7536 goto efault;
7537 fl.l_type = tswap16(target_efl->l_type);
7538 fl.l_whence = tswap16(target_efl->l_whence);
7539 fl.l_start = tswap64(target_efl->l_start);
7540 fl.l_len = tswap64(target_efl->l_len);
7541 fl.l_pid = tswap32(target_efl->l_pid);
7542 unlock_user_struct(target_efl, arg3, 0);
7543 } else
7544 #endif
7546 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7547 goto efault;
7548 fl.l_type = tswap16(target_fl->l_type);
7549 fl.l_whence = tswap16(target_fl->l_whence);
7550 fl.l_start = tswap64(target_fl->l_start);
7551 fl.l_len = tswap64(target_fl->l_len);
7552 fl.l_pid = tswap32(target_fl->l_pid);
7553 unlock_user_struct(target_fl, arg3, 0);
7555 ret = get_errno(fcntl(arg1, cmd, &fl));
7556 if (ret == 0) {
7557 #ifdef TARGET_ARM
7558 if (((CPUARMState *)cpu_env)->eabi) {
7559 if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
7560 goto efault;
7561 target_efl->l_type = tswap16(fl.l_type);
7562 target_efl->l_whence = tswap16(fl.l_whence);
7563 target_efl->l_start = tswap64(fl.l_start);
7564 target_efl->l_len = tswap64(fl.l_len);
7565 target_efl->l_pid = tswap32(fl.l_pid);
7566 unlock_user_struct(target_efl, arg3, 1);
7567 } else
7568 #endif
7570 if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
7571 goto efault;
7572 target_fl->l_type = tswap16(fl.l_type);
7573 target_fl->l_whence = tswap16(fl.l_whence);
7574 target_fl->l_start = tswap64(fl.l_start);
7575 target_fl->l_len = tswap64(fl.l_len);
7576 target_fl->l_pid = tswap32(fl.l_pid);
7577 unlock_user_struct(target_fl, arg3, 1);
7580 break;
7582 case TARGET_F_SETLK64:
7583 case TARGET_F_SETLKW64:
7584 #ifdef TARGET_ARM
7585 if (((CPUARMState *)cpu_env)->eabi) {
7586 if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
7587 goto efault;
7588 fl.l_type = tswap16(target_efl->l_type);
7589 fl.l_whence = tswap16(target_efl->l_whence);
7590 fl.l_start = tswap64(target_efl->l_start);
7591 fl.l_len = tswap64(target_efl->l_len);
7592 fl.l_pid = tswap32(target_efl->l_pid);
7593 unlock_user_struct(target_efl, arg3, 0);
7594 } else
7595 #endif
7597 if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
7598 goto efault;
7599 fl.l_type = tswap16(target_fl->l_type);
7600 fl.l_whence = tswap16(target_fl->l_whence);
7601 fl.l_start = tswap64(target_fl->l_start);
7602 fl.l_len = tswap64(target_fl->l_len);
7603 fl.l_pid = tswap32(target_fl->l_pid);
7604 unlock_user_struct(target_fl, arg3, 0);
7606 ret = get_errno(fcntl(arg1, cmd, &fl));
7607 break;
7608 default:
7609 ret = do_fcntl(arg1, arg2, arg3);
7610 break;
7612 break;
7614 #endif
7615 #ifdef TARGET_NR_cacheflush
7616 case TARGET_NR_cacheflush:
7617 /* self-modifying code is handled automatically, so nothing needed */
7618 ret = 0;
7619 break;
7620 #endif
7621 #ifdef TARGET_NR_security
7622 case TARGET_NR_security:
7623 goto unimplemented;
7624 #endif
7625 #ifdef TARGET_NR_getpagesize
7626 case TARGET_NR_getpagesize:
7627 ret = TARGET_PAGE_SIZE;
7628 break;
7629 #endif
7630 case TARGET_NR_gettid:
7631 ret = get_errno(gettid());
7632 break;
7633 #ifdef TARGET_NR_readahead
7634 case TARGET_NR_readahead:
7635 #if TARGET_ABI_BITS == 32
7636 if (regpairs_aligned(cpu_env)) {
7637 arg2 = arg3;
7638 arg3 = arg4;
7639 arg4 = arg5;
7641 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
7642 #else
7643 ret = get_errno(readahead(arg1, arg2, arg3));
7644 #endif
7645 break;
7646 #endif
7647 #ifdef CONFIG_ATTR
7648 #ifdef TARGET_NR_setxattr
7649 case TARGET_NR_lsetxattr:
7650 case TARGET_NR_fsetxattr:
7651 case TARGET_NR_lgetxattr:
7652 case TARGET_NR_fgetxattr:
7653 case TARGET_NR_listxattr:
7654 case TARGET_NR_llistxattr:
7655 case TARGET_NR_flistxattr:
7656 case TARGET_NR_lremovexattr:
7657 case TARGET_NR_fremovexattr:
7658 ret = -TARGET_EOPNOTSUPP;
7659 break;
7660 case TARGET_NR_setxattr:
7662 void *p, *n, *v;
7663 p = lock_user_string(arg1);
7664 n = lock_user_string(arg2);
7665 v = lock_user(VERIFY_READ, arg3, arg4, 1);
7666 if (p && n && v) {
7667 ret = get_errno(setxattr(p, n, v, arg4, arg5));
7668 } else {
7669 ret = -TARGET_EFAULT;
7671 unlock_user(p, arg1, 0);
7672 unlock_user(n, arg2, 0);
7673 unlock_user(v, arg3, 0);
7675 break;
7676 case TARGET_NR_getxattr:
7678 void *p, *n, *v;
7679 p = lock_user_string(arg1);
7680 n = lock_user_string(arg2);
7681 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
7682 if (p && n && v) {
7683 ret = get_errno(getxattr(p, n, v, arg4));
7684 } else {
7685 ret = -TARGET_EFAULT;
7687 unlock_user(p, arg1, 0);
7688 unlock_user(n, arg2, 0);
7689 unlock_user(v, arg3, arg4);
7691 break;
7692 case TARGET_NR_removexattr:
7694 void *p, *n;
7695 p = lock_user_string(arg1);
7696 n = lock_user_string(arg2);
7697 if (p && n) {
7698 ret = get_errno(removexattr(p, n));
7699 } else {
7700 ret = -TARGET_EFAULT;
7702 unlock_user(p, arg1, 0);
7703 unlock_user(n, arg2, 0);
7705 break;
7706 #endif
7707 #endif /* CONFIG_ATTR */
7708 #ifdef TARGET_NR_set_thread_area
7709 case TARGET_NR_set_thread_area:
7710 #if defined(TARGET_MIPS)
7711 ((CPUMIPSState *) cpu_env)->tls_value = arg1;
7712 ret = 0;
7713 break;
7714 #elif defined(TARGET_CRIS)
7715 if (arg1 & 0xff)
7716 ret = -TARGET_EINVAL;
7717 else {
7718 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
7719 ret = 0;
7721 break;
7722 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7723 ret = do_set_thread_area(cpu_env, arg1);
7724 break;
7725 #else
7726 goto unimplemented_nowarn;
7727 #endif
7728 #endif
7729 #ifdef TARGET_NR_get_thread_area
7730 case TARGET_NR_get_thread_area:
7731 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7732 ret = do_get_thread_area(cpu_env, arg1);
7733 #else
7734 goto unimplemented_nowarn;
7735 #endif
7736 #endif
7737 #ifdef TARGET_NR_getdomainname
7738 case TARGET_NR_getdomainname:
7739 goto unimplemented_nowarn;
7740 #endif
7742 #ifdef TARGET_NR_clock_gettime
7743 case TARGET_NR_clock_gettime:
7745 struct timespec ts;
7746 ret = get_errno(clock_gettime(arg1, &ts));
7747 if (!is_error(ret)) {
7748 host_to_target_timespec(arg2, &ts);
7750 break;
7752 #endif
7753 #ifdef TARGET_NR_clock_getres
7754 case TARGET_NR_clock_getres:
7756 struct timespec ts;
7757 ret = get_errno(clock_getres(arg1, &ts));
7758 if (!is_error(ret)) {
7759 host_to_target_timespec(arg2, &ts);
7761 break;
7763 #endif
7764 #ifdef TARGET_NR_clock_nanosleep
7765 case TARGET_NR_clock_nanosleep:
7767 struct timespec ts;
7768 target_to_host_timespec(&ts, arg3);
7769 ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
7770 if (arg4)
7771 host_to_target_timespec(arg4, &ts);
7772 break;
7774 #endif
7776 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7777 case TARGET_NR_set_tid_address:
7778 ret = get_errno(set_tid_address((int *)g2h(arg1)));
7779 break;
7780 #endif
7782 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7783 case TARGET_NR_tkill:
7784 ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
7785 break;
7786 #endif
7788 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7789 case TARGET_NR_tgkill:
7790 ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
7791 target_to_host_signal(arg3)));
7792 break;
7793 #endif
7795 #ifdef TARGET_NR_set_robust_list
7796 case TARGET_NR_set_robust_list:
7797 goto unimplemented_nowarn;
7798 #endif
7800 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7801 case TARGET_NR_utimensat:
7803 struct timespec *tsp, ts[2];
7804 if (!arg3) {
7805 tsp = NULL;
7806 } else {
7807 target_to_host_timespec(ts, arg3);
7808 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
7809 tsp = ts;
7811 if (!arg2)
7812 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
7813 else {
7814 if (!(p = lock_user_string(arg2))) {
7815 ret = -TARGET_EFAULT;
7816 goto fail;
7818 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
7819 unlock_user(p, arg2, 0);
7822 break;
7823 #endif
7824 #if defined(CONFIG_USE_NPTL)
7825 case TARGET_NR_futex:
7826 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
7827 break;
7828 #endif
7829 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7830 case TARGET_NR_inotify_init:
7831 ret = get_errno(sys_inotify_init());
7832 break;
7833 #endif
7834 #ifdef CONFIG_INOTIFY1
7835 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7836 case TARGET_NR_inotify_init1:
7837 ret = get_errno(sys_inotify_init1(arg1));
7838 break;
7839 #endif
7840 #endif
7841 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7842 case TARGET_NR_inotify_add_watch:
7843 p = lock_user_string(arg2);
7844 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
7845 unlock_user(p, arg2, 0);
7846 break;
7847 #endif
7848 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7849 case TARGET_NR_inotify_rm_watch:
7850 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
7851 break;
7852 #endif
7854 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7855 case TARGET_NR_mq_open:
7857 struct mq_attr posix_mq_attr;
7859 p = lock_user_string(arg1 - 1);
7860 if (arg4 != 0)
7861 copy_from_user_mq_attr (&posix_mq_attr, arg4);
7862 ret = get_errno(mq_open(p, arg2, arg3, &posix_mq_attr));
7863 unlock_user (p, arg1, 0);
7865 break;
7867 case TARGET_NR_mq_unlink:
7868 p = lock_user_string(arg1 - 1);
7869 ret = get_errno(mq_unlink(p));
7870 unlock_user (p, arg1, 0);
7871 break;
7873 case TARGET_NR_mq_timedsend:
7875 struct timespec ts;
7877 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7878 if (arg5 != 0) {
7879 target_to_host_timespec(&ts, arg5);
7880 ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
7881 host_to_target_timespec(arg5, &ts);
7883 else
7884 ret = get_errno(mq_send(arg1, p, arg3, arg4));
7885 unlock_user (p, arg2, arg3);
7887 break;
7889 case TARGET_NR_mq_timedreceive:
7891 struct timespec ts;
7892 unsigned int prio;
7894 p = lock_user (VERIFY_READ, arg2, arg3, 1);
7895 if (arg5 != 0) {
7896 target_to_host_timespec(&ts, arg5);
7897 ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
7898 host_to_target_timespec(arg5, &ts);
7900 else
7901 ret = get_errno(mq_receive(arg1, p, arg3, &prio));
7902 unlock_user (p, arg2, arg3);
7903 if (arg4 != 0)
7904 put_user_u32(prio, arg4);
7906 break;
7908 /* Not implemented for now... */
7909 /* case TARGET_NR_mq_notify: */
7910 /* break; */
7912 case TARGET_NR_mq_getsetattr:
7914 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
7915 ret = 0;
7916 if (arg3 != 0) {
7917 ret = mq_getattr(arg1, &posix_mq_attr_out);
7918 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
7920 if (arg2 != 0) {
7921 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
7922 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
7926 break;
7927 #endif
7929 #ifdef CONFIG_SPLICE
7930 #ifdef TARGET_NR_tee
7931 case TARGET_NR_tee:
7933 ret = get_errno(tee(arg1,arg2,arg3,arg4));
7935 break;
7936 #endif
7937 #ifdef TARGET_NR_splice
7938 case TARGET_NR_splice:
7940 loff_t loff_in, loff_out;
7941 loff_t *ploff_in = NULL, *ploff_out = NULL;
7942 if(arg2) {
7943 get_user_u64(loff_in, arg2);
7944 ploff_in = &loff_in;
7946 if(arg4) {
7947 get_user_u64(loff_out, arg2);
7948 ploff_out = &loff_out;
7950 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
7952 break;
7953 #endif
7954 #ifdef TARGET_NR_vmsplice
7955 case TARGET_NR_vmsplice:
7957 int count = arg3;
7958 struct iovec *vec;
7960 vec = alloca(count * sizeof(struct iovec));
7961 if (lock_iovec(VERIFY_READ, vec, arg2, count, 1) < 0)
7962 goto efault;
7963 ret = get_errno(vmsplice(arg1, vec, count, arg4));
7964 unlock_iovec(vec, arg2, count, 0);
7966 break;
7967 #endif
7968 #endif /* CONFIG_SPLICE */
7969 #ifdef CONFIG_EVENTFD
7970 #if defined(TARGET_NR_eventfd)
7971 case TARGET_NR_eventfd:
7972 ret = get_errno(eventfd(arg1, 0));
7973 break;
7974 #endif
7975 #if defined(TARGET_NR_eventfd2)
7976 case TARGET_NR_eventfd2:
7977 ret = get_errno(eventfd(arg1, arg2));
7978 break;
7979 #endif
7980 #endif /* CONFIG_EVENTFD */
7981 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7982 case TARGET_NR_fallocate:
7983 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
7984 break;
7985 #endif
7986 #if defined(CONFIG_SYNC_FILE_RANGE)
7987 #if defined(TARGET_NR_sync_file_range)
7988 case TARGET_NR_sync_file_range:
7989 #if TARGET_ABI_BITS == 32
7990 #if defined(TARGET_MIPS)
7991 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
7992 target_offset64(arg5, arg6), arg7));
7993 #else
7994 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
7995 target_offset64(arg4, arg5), arg6));
7996 #endif /* !TARGET_MIPS */
7997 #else
7998 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
7999 #endif
8000 break;
8001 #endif
8002 #if defined(TARGET_NR_sync_file_range2)
8003 case TARGET_NR_sync_file_range2:
8004 /* This is like sync_file_range but the arguments are reordered */
8005 #if TARGET_ABI_BITS == 32
8006 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
8007 target_offset64(arg5, arg6), arg2));
8008 #else
8009 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
8010 #endif
8011 break;
8012 #endif
8013 #endif
8014 #if defined(CONFIG_EPOLL)
8015 #if defined(TARGET_NR_epoll_create)
8016 case TARGET_NR_epoll_create:
8017 ret = get_errno(epoll_create(arg1));
8018 break;
8019 #endif
8020 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8021 case TARGET_NR_epoll_create1:
8022 ret = get_errno(epoll_create1(arg1));
8023 break;
8024 #endif
8025 #if defined(TARGET_NR_epoll_ctl)
8026 case TARGET_NR_epoll_ctl:
8028 struct epoll_event ep;
8029 struct epoll_event *epp = 0;
8030 if (arg4) {
8031 struct target_epoll_event *target_ep;
8032 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
8033 goto efault;
8035 ep.events = tswap32(target_ep->events);
8036 /* The epoll_data_t union is just opaque data to the kernel,
8037 * so we transfer all 64 bits across and need not worry what
8038 * actual data type it is.
8040 ep.data.u64 = tswap64(target_ep->data.u64);
8041 unlock_user_struct(target_ep, arg4, 0);
8042 epp = &ep;
8044 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
8045 break;
8047 #endif
8049 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8050 #define IMPLEMENT_EPOLL_PWAIT
8051 #endif
8052 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8053 #if defined(TARGET_NR_epoll_wait)
8054 case TARGET_NR_epoll_wait:
8055 #endif
8056 #if defined(IMPLEMENT_EPOLL_PWAIT)
8057 case TARGET_NR_epoll_pwait:
8058 #endif
8060 struct target_epoll_event *target_ep;
8061 struct epoll_event *ep;
8062 int epfd = arg1;
8063 int maxevents = arg3;
8064 int timeout = arg4;
8066 target_ep = lock_user(VERIFY_WRITE, arg2,
8067 maxevents * sizeof(struct target_epoll_event), 1);
8068 if (!target_ep) {
8069 goto efault;
8072 ep = alloca(maxevents * sizeof(struct epoll_event));
8074 switch (num) {
8075 #if defined(IMPLEMENT_EPOLL_PWAIT)
8076 case TARGET_NR_epoll_pwait:
8078 target_sigset_t *target_set;
8079 sigset_t _set, *set = &_set;
8081 if (arg5) {
8082 target_set = lock_user(VERIFY_READ, arg5,
8083 sizeof(target_sigset_t), 1);
8084 if (!target_set) {
8085 unlock_user(target_ep, arg2, 0);
8086 goto efault;
8088 target_to_host_sigset(set, target_set);
8089 unlock_user(target_set, arg5, 0);
8090 } else {
8091 set = NULL;
8094 ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
8095 break;
8097 #endif
8098 #if defined(TARGET_NR_epoll_wait)
8099 case TARGET_NR_epoll_wait:
8100 ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
8101 break;
8102 #endif
8103 default:
8104 ret = -TARGET_ENOSYS;
8106 if (!is_error(ret)) {
8107 int i;
8108 for (i = 0; i < ret; i++) {
8109 target_ep[i].events = tswap32(ep[i].events);
8110 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
8113 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
8114 break;
8116 #endif
8117 #endif
8118 #ifdef TARGET_NR_prlimit64
8119 case TARGET_NR_prlimit64:
8121 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8122 struct target_rlimit64 *target_rnew, *target_rold;
8123 struct host_rlimit64 rnew, rold, *rnewp = 0;
8124 if (arg3) {
8125 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
8126 goto efault;
8128 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
8129 rnew.rlim_max = tswap64(target_rnew->rlim_max);
8130 unlock_user_struct(target_rnew, arg3, 0);
8131 rnewp = &rnew;
8134 ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
8135 if (!is_error(ret) && arg4) {
8136 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
8137 goto efault;
8139 target_rold->rlim_cur = tswap64(rold.rlim_cur);
8140 target_rold->rlim_max = tswap64(rold.rlim_max);
8141 unlock_user_struct(target_rold, arg4, 1);
8143 break;
8145 #endif
8146 default:
8147 unimplemented:
8148 gemu_log("qemu: Unsupported syscall: %d\n", num);
8149 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8150 unimplemented_nowarn:
8151 #endif
8152 ret = -TARGET_ENOSYS;
8153 break;
8155 fail:
8156 #ifdef DEBUG
8157 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
8158 #endif
8159 if(do_strace)
8160 print_syscall_ret(num, ret);
8161 return ret;
8162 efault:
8163 ret = -TARGET_EFAULT;
8164 goto fail;