4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
35 #include <sys/mount.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
42 #include <linux/capability.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 //#include <sys/user.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
70 #include <sys/timerfd.h>
73 #include <sys/eventfd.h>
76 #include <sys/epoll.h>
79 #include "qemu/xattr.h"
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
84 #ifdef HAVE_SYS_KCOV_H
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
124 #include <linux/btrfs.h>
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
130 #include "linux_loop.h"
134 #include "user-internals.h"
136 #include "signal-common.h"
138 #include "user-mmap.h"
139 #include "user/safe-syscall.h"
140 #include "qemu/guest-random.h"
141 #include "qemu/selfmap.h"
142 #include "user/syscall-trace.h"
143 #include "special-errno.h"
144 #include "qapi/error.h"
145 #include "fd-trans.h"
146 #include "cpu_loop-common.h"
149 #define CLONE_IO 0x80000000 /* Clone io context */
152 /* We can't directly call the host clone syscall, because this will
153 * badly confuse libc (breaking mutexes, for example). So we must
154 * divide clone flags into:
155 * * flag combinations that look like pthread_create()
156 * * flag combinations that look like fork()
157 * * flags we can implement within QEMU itself
158 * * flags we can't support and will return an error for
160 /* For thread creation, all these flags must be present; for
161 * fork, none must be present.
163 #define CLONE_THREAD_FLAGS \
164 (CLONE_VM | CLONE_FS | CLONE_FILES | \
165 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
167 /* These flags are ignored:
168 * CLONE_DETACHED is now ignored by the kernel;
169 * CLONE_IO is just an optimisation hint to the I/O scheduler
171 #define CLONE_IGNORED_FLAGS \
172 (CLONE_DETACHED | CLONE_IO)
175 # define CLONE_PIDFD 0x00001000
178 /* Flags for fork which we can implement within QEMU itself */
179 #define CLONE_OPTIONAL_FORK_FLAGS \
180 (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
181 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
183 /* Flags for thread creation which we can implement within QEMU itself */
184 #define CLONE_OPTIONAL_THREAD_FLAGS \
185 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
186 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
188 #define CLONE_INVALID_FORK_FLAGS \
189 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
191 #define CLONE_INVALID_THREAD_FLAGS \
192 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
193 CLONE_IGNORED_FLAGS))
195 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
196 * have almost all been allocated. We cannot support any of
197 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
198 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
199 * The checks against the invalid thread masks above will catch these.
200 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
203 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
204 * once. This exercises the codepaths for restart.
206 //#define DEBUG_ERESTARTSYS
208 //#include <linux/msdos_fs.h>
209 #define VFAT_IOCTL_READDIR_BOTH \
210 _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
211 #define VFAT_IOCTL_READDIR_SHORT \
212 _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
222 #define _syscall0(type,name) \
223 static type name (void) \
225 return syscall(__NR_##name); \
228 #define _syscall1(type,name,type1,arg1) \
229 static type name (type1 arg1) \
231 return syscall(__NR_##name, arg1); \
234 #define _syscall2(type,name,type1,arg1,type2,arg2) \
235 static type name (type1 arg1,type2 arg2) \
237 return syscall(__NR_##name, arg1, arg2); \
240 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
241 static type name (type1 arg1,type2 arg2,type3 arg3) \
243 return syscall(__NR_##name, arg1, arg2, arg3); \
246 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
249 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
252 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
256 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
260 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
261 type5,arg5,type6,arg6) \
262 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
265 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
269 #define __NR_sys_uname __NR_uname
270 #define __NR_sys_getcwd1 __NR_getcwd
271 #define __NR_sys_getdents __NR_getdents
272 #define __NR_sys_getdents64 __NR_getdents64
273 #define __NR_sys_getpriority __NR_getpriority
274 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
275 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
276 #define __NR_sys_syslog __NR_syslog
277 #if defined(__NR_futex)
278 # define __NR_sys_futex __NR_futex
280 #if defined(__NR_futex_time64)
281 # define __NR_sys_futex_time64 __NR_futex_time64
283 #define __NR_sys_statx __NR_statx
285 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
286 #define __NR__llseek __NR_lseek
289 /* Newer kernel ports have llseek() instead of _llseek() */
290 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
291 #define TARGET_NR__llseek TARGET_NR_llseek
294 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
295 #ifndef TARGET_O_NONBLOCK_MASK
296 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
299 #define __NR_sys_gettid __NR_gettid
300 _syscall0(int, sys_gettid
)
302 /* For the 64-bit guest on 32-bit host case we must emulate
303 * getdents using getdents64, because otherwise the host
304 * might hand us back more dirent records than we can fit
305 * into the guest buffer after structure format conversion.
306 * Otherwise we emulate getdents with getdents if the host has it.
308 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
309 #define EMULATE_GETDENTS_WITH_GETDENTS
312 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
313 _syscall3(int, sys_getdents
, unsigned int, fd
, struct linux_dirent
*, dirp
, unsigned int, count
);
315 #if (defined(TARGET_NR_getdents) && \
316 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
317 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
318 _syscall3(int, sys_getdents64
, unsigned int, fd
, struct linux_dirent64
*, dirp
, unsigned int, count
);
320 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
321 _syscall5(int, _llseek
, unsigned int, fd
, unsigned long, hi
, unsigned long, lo
,
322 loff_t
*, res
, unsigned int, wh
);
324 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
325 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
327 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
328 #ifdef __NR_exit_group
329 _syscall1(int,exit_group
,int,error_code
)
331 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
332 #define __NR_sys_close_range __NR_close_range
333 _syscall3(int,sys_close_range
,int,first
,int,last
,int,flags
)
334 #ifndef CLOSE_RANGE_CLOEXEC
335 #define CLOSE_RANGE_CLOEXEC (1U << 2)
338 #if defined(__NR_futex)
339 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
340 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
342 #if defined(__NR_futex_time64)
343 _syscall6(int,sys_futex_time64
,int *,uaddr
,int,op
,int,val
,
344 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
346 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
347 _syscall2(int, pidfd_open
, pid_t
, pid
, unsigned int, flags
);
349 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
350 _syscall4(int, pidfd_send_signal
, int, pidfd
, int, sig
, siginfo_t
*, info
,
351 unsigned int, flags
);
353 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
354 _syscall3(int, pidfd_getfd
, int, pidfd
, int, targetfd
, unsigned int, flags
);
356 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
357 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
358 unsigned long *, user_mask_ptr
);
359 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
360 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
361 unsigned long *, user_mask_ptr
);
362 /* sched_attr is not defined in glibc */
365 uint32_t sched_policy
;
366 uint64_t sched_flags
;
368 uint32_t sched_priority
;
369 uint64_t sched_runtime
;
370 uint64_t sched_deadline
;
371 uint64_t sched_period
;
372 uint32_t sched_util_min
;
373 uint32_t sched_util_max
;
375 #define __NR_sys_sched_getattr __NR_sched_getattr
376 _syscall4(int, sys_sched_getattr
, pid_t
, pid
, struct sched_attr
*, attr
,
377 unsigned int, size
, unsigned int, flags
);
378 #define __NR_sys_sched_setattr __NR_sched_setattr
379 _syscall3(int, sys_sched_setattr
, pid_t
, pid
, struct sched_attr
*, attr
,
380 unsigned int, flags
);
381 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
382 _syscall1(int, sys_sched_getscheduler
, pid_t
, pid
);
383 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
384 _syscall3(int, sys_sched_setscheduler
, pid_t
, pid
, int, policy
,
385 const struct sched_param
*, param
);
386 #define __NR_sys_sched_getparam __NR_sched_getparam
387 _syscall2(int, sys_sched_getparam
, pid_t
, pid
,
388 struct sched_param
*, param
);
389 #define __NR_sys_sched_setparam __NR_sched_setparam
390 _syscall2(int, sys_sched_setparam
, pid_t
, pid
,
391 const struct sched_param
*, param
);
392 #define __NR_sys_getcpu __NR_getcpu
393 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
394 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
396 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
397 struct __user_cap_data_struct
*, data
);
398 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
399 struct __user_cap_data_struct
*, data
);
400 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
401 _syscall2(int, ioprio_get
, int, which
, int, who
)
403 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
404 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
406 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
407 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
410 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
411 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
412 unsigned long, idx1
, unsigned long, idx2
)
416 * It is assumed that struct statx is architecture independent.
418 #if defined(TARGET_NR_statx) && defined(__NR_statx)
419 _syscall5(int, sys_statx
, int, dirfd
, const char *, pathname
, int, flags
,
420 unsigned int, mask
, struct target_statx
*, statxbuf
)
422 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
423 _syscall2(int, membarrier
, int, cmd
, int, flags
)
426 static const bitmask_transtbl fcntl_flags_tbl
[] = {
427 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
428 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
429 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
430 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
431 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
432 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
433 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
434 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
435 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
436 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
437 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
438 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
439 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
440 #if defined(O_DIRECT)
441 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
443 #if defined(O_NOATIME)
444 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
446 #if defined(O_CLOEXEC)
447 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
450 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
452 #if defined(O_TMPFILE)
453 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
455 /* Don't terminate the list prematurely on 64-bit host+guest. */
456 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
457 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
461 _syscall2(int, sys_getcwd1
, char *, buf
, size_t, size
)
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
467 const struct timespec
*,tsp
,int,flags
)
469 static int sys_utimensat(int dirfd
, const char *pathname
,
470 const struct timespec times
[2], int flags
)
476 #endif /* TARGET_NR_utimensat */
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
482 const char *, new, unsigned int, flags
)
484 static int sys_renameat2(int oldfd
, const char *old
,
485 int newfd
, const char *new, int flags
)
488 return renameat(oldfd
, old
, newfd
, new);
494 #endif /* TARGET_NR_renameat2 */
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY */
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64
{
516 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
517 const struct host_rlimit64
*, new_limit
,
518 struct host_rlimit64
*, old_limit
)
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers
[GUEST_TIMER_MAX
];
526 static int g_posix_timer_allocated
[GUEST_TIMER_MAX
];
528 static inline int next_free_host_timer(void)
531 for (k
= 0; k
< ARRAY_SIZE(g_posix_timer_allocated
); k
++) {
532 if (qatomic_xchg(g_posix_timer_allocated
+ k
, 1) == 0) {
539 static inline void free_host_timer_slot(int id
)
541 qatomic_store_release(g_posix_timer_allocated
+ id
, 0);
545 static inline int host_to_target_errno(int host_errno
)
547 switch (host_errno
) {
548 #define E(X) case X: return TARGET_##X;
549 #include "errnos.c.inc"
556 static inline int target_to_host_errno(int target_errno
)
558 switch (target_errno
) {
559 #define E(X) case TARGET_##X: return X;
560 #include "errnos.c.inc"
567 abi_long
get_errno(abi_long ret
)
570 return -host_to_target_errno(errno
);
575 const char *target_strerror(int err
)
577 if (err
== QEMU_ERESTARTSYS
) {
578 return "To be restarted";
580 if (err
== QEMU_ESIGRETURN
) {
581 return "Successful exit from sigreturn";
584 return strerror(target_to_host_errno(err
));
587 static int check_zeroed_user(abi_long addr
, size_t ksize
, size_t usize
)
591 if (usize
<= ksize
) {
594 for (i
= ksize
; i
< usize
; i
++) {
595 if (get_user_u8(b
, addr
+ i
)) {
596 return -TARGET_EFAULT
;
606 * Copies a target struct to a host struct, in a way that guarantees
607 * backwards-compatibility for struct syscall arguments.
609 * Similar to kernels uaccess.h:copy_struct_from_user()
611 int copy_struct_from_user(void *dst
, size_t ksize
, abi_ptr src
, size_t usize
)
613 size_t size
= MIN(ksize
, usize
);
614 size_t rest
= MAX(ksize
, usize
) - size
;
616 /* Deal with trailing bytes. */
618 memset(dst
+ size
, 0, rest
);
619 } else if (usize
> ksize
) {
620 int ret
= check_zeroed_user(src
, ksize
, usize
);
622 return ret
?: -TARGET_E2BIG
;
625 /* Copy the interoperable parts of the struct. */
626 if (copy_from_user(dst
, src
, size
)) {
627 return -TARGET_EFAULT
;
632 #define safe_syscall0(type, name) \
633 static type safe_##name(void) \
635 return safe_syscall(__NR_##name); \
638 #define safe_syscall1(type, name, type1, arg1) \
639 static type safe_##name(type1 arg1) \
641 return safe_syscall(__NR_##name, arg1); \
644 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
645 static type safe_##name(type1 arg1, type2 arg2) \
647 return safe_syscall(__NR_##name, arg1, arg2); \
650 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
651 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
653 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
656 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
658 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
660 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
663 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
664 type4, arg4, type5, arg5) \
665 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
668 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
671 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
672 type4, arg4, type5, arg5, type6, arg6) \
673 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
674 type5 arg5, type6 arg6) \
676 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
679 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
680 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
681 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
682 int, flags
, mode_t
, mode
)
684 safe_syscall4(int, openat2
, int, dirfd
, const char *, pathname
, \
685 const struct open_how_ver0
*, how
, size_t, size
)
687 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
688 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
689 struct rusage
*, rusage
)
691 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
692 int, options
, struct rusage
*, rusage
)
693 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
694 safe_syscall5(int, execveat
, int, dirfd
, const char *, filename
,
695 char **, argv
, char **, envp
, int, flags
)
696 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
697 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
698 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
699 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
701 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
702 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
703 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
706 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
707 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
709 #if defined(__NR_futex)
710 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
711 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
713 #if defined(__NR_futex_time64)
714 safe_syscall6(int,futex_time64
,int *,uaddr
,int,op
,int,val
, \
715 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
717 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
718 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
719 safe_syscall2(int, tkill
, int, tid
, int, sig
)
720 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
721 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
722 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
723 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
724 unsigned long, pos_l
, unsigned long, pos_h
)
725 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
726 unsigned long, pos_l
, unsigned long, pos_h
)
727 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
729 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
730 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
731 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
732 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
733 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
734 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
735 safe_syscall2(int, flock
, int, fd
, int, operation
)
736 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
737 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
738 const struct timespec
*, uts
, size_t, sigsetsize
)
740 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
742 #if defined(TARGET_NR_nanosleep)
743 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
744 struct timespec
*, rem
)
746 #if defined(TARGET_NR_clock_nanosleep) || \
747 defined(TARGET_NR_clock_nanosleep_time64)
748 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
749 const struct timespec
*, req
, struct timespec
*, rem
)
753 safe_syscall5(int, ipc
, int, call
, long, first
, long, second
, long, third
,
756 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
757 void *, ptr
, long, fifth
)
761 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
765 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
766 long, msgtype
, int, flags
)
768 #ifdef __NR_semtimedop
769 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
770 unsigned, nsops
, const struct timespec
*, timeout
)
772 #if defined(TARGET_NR_mq_timedsend) || \
773 defined(TARGET_NR_mq_timedsend_time64)
774 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
775 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
777 #if defined(TARGET_NR_mq_timedreceive) || \
778 defined(TARGET_NR_mq_timedreceive_time64)
779 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
780 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
782 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
783 safe_syscall6(ssize_t
, copy_file_range
, int, infd
, loff_t
*, pinoff
,
784 int, outfd
, loff_t
*, poutoff
, size_t, length
,
788 /* We do ioctl like this rather than via safe_syscall3 to preserve the
789 * "third argument might be integer or pointer or not present" behaviour of
792 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
793 /* Similarly for fcntl. Since we always build with LFS enabled,
794 * we should be using the 64-bit structures automatically.
797 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
799 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
802 static inline int host_to_target_sock_type(int host_type
)
806 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
808 target_type
= TARGET_SOCK_DGRAM
;
811 target_type
= TARGET_SOCK_STREAM
;
814 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
818 #if defined(SOCK_CLOEXEC)
819 if (host_type
& SOCK_CLOEXEC
) {
820 target_type
|= TARGET_SOCK_CLOEXEC
;
824 #if defined(SOCK_NONBLOCK)
825 if (host_type
& SOCK_NONBLOCK
) {
826 target_type
|= TARGET_SOCK_NONBLOCK
;
833 static abi_ulong target_brk
, initial_target_brk
;
835 void target_set_brk(abi_ulong new_brk
)
837 target_brk
= TARGET_PAGE_ALIGN(new_brk
);
838 initial_target_brk
= target_brk
;
841 /* do_brk() must return target values and target errnos. */
842 abi_long
do_brk(abi_ulong brk_val
)
844 abi_long mapped_addr
;
848 /* brk pointers are always untagged */
850 /* do not allow to shrink below initial brk value */
851 if (brk_val
< initial_target_brk
) {
855 new_brk
= TARGET_PAGE_ALIGN(brk_val
);
856 old_brk
= TARGET_PAGE_ALIGN(target_brk
);
858 /* new and old target_brk might be on the same page */
859 if (new_brk
== old_brk
) {
860 target_brk
= brk_val
;
864 /* Release heap if necessary */
865 if (new_brk
< old_brk
) {
866 target_munmap(new_brk
, old_brk
- new_brk
);
868 target_brk
= brk_val
;
872 mapped_addr
= target_mmap(old_brk
, new_brk
- old_brk
,
873 PROT_READ
| PROT_WRITE
,
874 MAP_FIXED_NOREPLACE
| MAP_ANON
| MAP_PRIVATE
,
877 if (mapped_addr
== old_brk
) {
878 target_brk
= brk_val
;
882 #if defined(TARGET_ALPHA)
883 /* We (partially) emulate OSF/1 on Alpha, which requires we
884 return a proper errno, not an unchanged brk value. */
885 return -TARGET_ENOMEM
;
887 /* For everything else, return the previous break. */
891 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
892 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
893 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
894 abi_ulong target_fds_addr
,
898 abi_ulong b
, *target_fds
;
900 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
901 if (!(target_fds
= lock_user(VERIFY_READ
,
903 sizeof(abi_ulong
) * nw
,
905 return -TARGET_EFAULT
;
909 for (i
= 0; i
< nw
; i
++) {
910 /* grab the abi_ulong */
911 __get_user(b
, &target_fds
[i
]);
912 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
913 /* check the bit inside the abi_ulong */
920 unlock_user(target_fds
, target_fds_addr
, 0);
925 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
926 abi_ulong target_fds_addr
,
929 if (target_fds_addr
) {
930 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
931 return -TARGET_EFAULT
;
939 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
945 abi_ulong
*target_fds
;
947 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
948 if (!(target_fds
= lock_user(VERIFY_WRITE
,
950 sizeof(abi_ulong
) * nw
,
952 return -TARGET_EFAULT
;
955 for (i
= 0; i
< nw
; i
++) {
957 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
958 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
961 __put_user(v
, &target_fds
[i
]);
964 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
970 #if defined(__alpha__)
976 static inline abi_long
host_to_target_clock_t(long ticks
)
978 #if HOST_HZ == TARGET_HZ
981 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
985 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
986 const struct rusage
*rusage
)
988 struct target_rusage
*target_rusage
;
990 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
991 return -TARGET_EFAULT
;
992 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
993 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
994 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
995 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
996 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
997 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
998 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
999 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1000 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1001 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1002 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1003 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1004 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1005 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1006 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1007 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1008 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1009 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1010 unlock_user_struct(target_rusage
, target_addr
, 1);
1015 #ifdef TARGET_NR_setrlimit
1016 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1018 abi_ulong target_rlim_swap
;
1021 target_rlim_swap
= tswapal(target_rlim
);
1022 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1023 return RLIM_INFINITY
;
1025 result
= target_rlim_swap
;
1026 if (target_rlim_swap
!= (rlim_t
)result
)
1027 return RLIM_INFINITY
;
1033 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1034 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1036 abi_ulong target_rlim_swap
;
1039 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1040 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1042 target_rlim_swap
= rlim
;
1043 result
= tswapal(target_rlim_swap
);
1049 static inline int target_to_host_resource(int code
)
1052 case TARGET_RLIMIT_AS
:
1054 case TARGET_RLIMIT_CORE
:
1056 case TARGET_RLIMIT_CPU
:
1058 case TARGET_RLIMIT_DATA
:
1060 case TARGET_RLIMIT_FSIZE
:
1061 return RLIMIT_FSIZE
;
1062 case TARGET_RLIMIT_LOCKS
:
1063 return RLIMIT_LOCKS
;
1064 case TARGET_RLIMIT_MEMLOCK
:
1065 return RLIMIT_MEMLOCK
;
1066 case TARGET_RLIMIT_MSGQUEUE
:
1067 return RLIMIT_MSGQUEUE
;
1068 case TARGET_RLIMIT_NICE
:
1070 case TARGET_RLIMIT_NOFILE
:
1071 return RLIMIT_NOFILE
;
1072 case TARGET_RLIMIT_NPROC
:
1073 return RLIMIT_NPROC
;
1074 case TARGET_RLIMIT_RSS
:
1076 case TARGET_RLIMIT_RTPRIO
:
1077 return RLIMIT_RTPRIO
;
1078 #ifdef RLIMIT_RTTIME
1079 case TARGET_RLIMIT_RTTIME
:
1080 return RLIMIT_RTTIME
;
1082 case TARGET_RLIMIT_SIGPENDING
:
1083 return RLIMIT_SIGPENDING
;
1084 case TARGET_RLIMIT_STACK
:
1085 return RLIMIT_STACK
;
1091 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1092 abi_ulong target_tv_addr
)
1094 struct target_timeval
*target_tv
;
1096 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1097 return -TARGET_EFAULT
;
1100 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1101 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1103 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1108 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1109 const struct timeval
*tv
)
1111 struct target_timeval
*target_tv
;
1113 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1114 return -TARGET_EFAULT
;
1117 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1118 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1120 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1125 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1126 static inline abi_long
copy_from_user_timeval64(struct timeval
*tv
,
1127 abi_ulong target_tv_addr
)
1129 struct target__kernel_sock_timeval
*target_tv
;
1131 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1)) {
1132 return -TARGET_EFAULT
;
1135 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1136 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1138 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1144 static inline abi_long
copy_to_user_timeval64(abi_ulong target_tv_addr
,
1145 const struct timeval
*tv
)
1147 struct target__kernel_sock_timeval
*target_tv
;
1149 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0)) {
1150 return -TARGET_EFAULT
;
1153 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1154 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1156 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1161 #if defined(TARGET_NR_futex) || \
1162 defined(TARGET_NR_rt_sigtimedwait) || \
1163 defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1164 defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1165 defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1166 defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1167 defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1168 defined(TARGET_NR_timer_settime) || \
1169 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1170 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
1171 abi_ulong target_addr
)
1173 struct target_timespec
*target_ts
;
1175 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1176 return -TARGET_EFAULT
;
1178 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1179 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1180 unlock_user_struct(target_ts
, target_addr
, 0);
1185 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1186 defined(TARGET_NR_timer_settime64) || \
1187 defined(TARGET_NR_mq_timedsend_time64) || \
1188 defined(TARGET_NR_mq_timedreceive_time64) || \
1189 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1190 defined(TARGET_NR_clock_nanosleep_time64) || \
1191 defined(TARGET_NR_rt_sigtimedwait_time64) || \
1192 defined(TARGET_NR_utimensat) || \
1193 defined(TARGET_NR_utimensat_time64) || \
1194 defined(TARGET_NR_semtimedop_time64) || \
1195 defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1196 static inline abi_long
target_to_host_timespec64(struct timespec
*host_ts
,
1197 abi_ulong target_addr
)
1199 struct target__kernel_timespec
*target_ts
;
1201 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1)) {
1202 return -TARGET_EFAULT
;
1204 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1205 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1206 /* in 32bit mode, this drops the padding */
1207 host_ts
->tv_nsec
= (long)(abi_long
)host_ts
->tv_nsec
;
1208 unlock_user_struct(target_ts
, target_addr
, 0);
1213 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
1214 struct timespec
*host_ts
)
1216 struct target_timespec
*target_ts
;
1218 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1219 return -TARGET_EFAULT
;
1221 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1222 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1223 unlock_user_struct(target_ts
, target_addr
, 1);
1227 static inline abi_long
host_to_target_timespec64(abi_ulong target_addr
,
1228 struct timespec
*host_ts
)
1230 struct target__kernel_timespec
*target_ts
;
1232 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0)) {
1233 return -TARGET_EFAULT
;
1235 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
1236 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
1237 unlock_user_struct(target_ts
, target_addr
, 1);
1241 #if defined(TARGET_NR_gettimeofday)
1242 static inline abi_long
copy_to_user_timezone(abi_ulong target_tz_addr
,
1243 struct timezone
*tz
)
1245 struct target_timezone
*target_tz
;
1247 if (!lock_user_struct(VERIFY_WRITE
, target_tz
, target_tz_addr
, 1)) {
1248 return -TARGET_EFAULT
;
1251 __put_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1252 __put_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1254 unlock_user_struct(target_tz
, target_tz_addr
, 1);
1260 #if defined(TARGET_NR_settimeofday)
1261 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1262 abi_ulong target_tz_addr
)
1264 struct target_timezone
*target_tz
;
1266 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1267 return -TARGET_EFAULT
;
1270 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1271 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1273 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1279 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1282 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1283 abi_ulong target_mq_attr_addr
)
1285 struct target_mq_attr
*target_mq_attr
;
1287 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1288 target_mq_attr_addr
, 1))
1289 return -TARGET_EFAULT
;
1291 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1292 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1293 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1294 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1296 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1301 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1302 const struct mq_attr
*attr
)
1304 struct target_mq_attr
*target_mq_attr
;
1306 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1307 target_mq_attr_addr
, 0))
1308 return -TARGET_EFAULT
;
1310 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1311 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1312 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1313 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1315 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1321 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1322 /* do_select() must return target values and target errnos. */
1323 static abi_long
do_select(int n
,
1324 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1325 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1327 fd_set rfds
, wfds
, efds
;
1328 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1330 struct timespec ts
, *ts_ptr
;
1333 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1337 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1341 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1346 if (target_tv_addr
) {
1347 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1348 return -TARGET_EFAULT
;
1349 ts
.tv_sec
= tv
.tv_sec
;
1350 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1356 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1359 if (!is_error(ret
)) {
1360 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1361 return -TARGET_EFAULT
;
1362 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1363 return -TARGET_EFAULT
;
1364 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1365 return -TARGET_EFAULT
;
1367 if (target_tv_addr
) {
1368 tv
.tv_sec
= ts
.tv_sec
;
1369 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1370 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1371 return -TARGET_EFAULT
;
1379 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1380 static abi_long
do_old_select(abi_ulong arg1
)
1382 struct target_sel_arg_struct
*sel
;
1383 abi_ulong inp
, outp
, exp
, tvp
;
1386 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1387 return -TARGET_EFAULT
;
1390 nsel
= tswapal(sel
->n
);
1391 inp
= tswapal(sel
->inp
);
1392 outp
= tswapal(sel
->outp
);
1393 exp
= tswapal(sel
->exp
);
1394 tvp
= tswapal(sel
->tvp
);
1396 unlock_user_struct(sel
, arg1
, 0);
1398 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1403 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1404 static abi_long
do_pselect6(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1405 abi_long arg4
, abi_long arg5
, abi_long arg6
,
1408 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
1409 fd_set rfds
, wfds
, efds
;
1410 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1411 struct timespec ts
, *ts_ptr
;
1415 * The 6th arg is actually two args smashed together,
1416 * so we cannot use the C library.
1423 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
1431 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1435 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1439 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1445 * This takes a timespec, and not a timeval, so we cannot
1446 * use the do_select() helper ...
1450 if (target_to_host_timespec64(&ts
, ts_addr
)) {
1451 return -TARGET_EFAULT
;
1454 if (target_to_host_timespec(&ts
, ts_addr
)) {
1455 return -TARGET_EFAULT
;
1463 /* Extract the two packed args for the sigset */
1466 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
1468 return -TARGET_EFAULT
;
1470 arg_sigset
= tswapal(arg7
[0]);
1471 arg_sigsize
= tswapal(arg7
[1]);
1472 unlock_user(arg7
, arg6
, 0);
1475 ret
= process_sigsuspend_mask(&sig
.set
, arg_sigset
, arg_sigsize
);
1480 sig
.size
= SIGSET_T_SIZE
;
1484 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1488 finish_sigsuspend_mask(ret
);
1491 if (!is_error(ret
)) {
1492 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
)) {
1493 return -TARGET_EFAULT
;
1495 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
)) {
1496 return -TARGET_EFAULT
;
1498 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
)) {
1499 return -TARGET_EFAULT
;
1502 if (ts_addr
&& host_to_target_timespec64(ts_addr
, &ts
)) {
1503 return -TARGET_EFAULT
;
1506 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
)) {
1507 return -TARGET_EFAULT
;
1515 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1516 defined(TARGET_NR_ppoll_time64)
1517 static abi_long
do_ppoll(abi_long arg1
, abi_long arg2
, abi_long arg3
,
1518 abi_long arg4
, abi_long arg5
, bool ppoll
, bool time64
)
1520 struct target_pollfd
*target_pfd
;
1521 unsigned int nfds
= arg2
;
1529 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
1530 return -TARGET_EINVAL
;
1532 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
1533 sizeof(struct target_pollfd
) * nfds
, 1);
1535 return -TARGET_EFAULT
;
1538 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
1539 for (i
= 0; i
< nfds
; i
++) {
1540 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
1541 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
1545 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
1546 sigset_t
*set
= NULL
;
1550 if (target_to_host_timespec64(timeout_ts
, arg3
)) {
1551 unlock_user(target_pfd
, arg1
, 0);
1552 return -TARGET_EFAULT
;
1555 if (target_to_host_timespec(timeout_ts
, arg3
)) {
1556 unlock_user(target_pfd
, arg1
, 0);
1557 return -TARGET_EFAULT
;
1565 ret
= process_sigsuspend_mask(&set
, arg4
, arg5
);
1567 unlock_user(target_pfd
, arg1
, 0);
1572 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
1573 set
, SIGSET_T_SIZE
));
1576 finish_sigsuspend_mask(ret
);
1578 if (!is_error(ret
) && arg3
) {
1580 if (host_to_target_timespec64(arg3
, timeout_ts
)) {
1581 return -TARGET_EFAULT
;
1584 if (host_to_target_timespec(arg3
, timeout_ts
)) {
1585 return -TARGET_EFAULT
;
1590 struct timespec ts
, *pts
;
1593 /* Convert ms to secs, ns */
1594 ts
.tv_sec
= arg3
/ 1000;
1595 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
1598 /* -ve poll() timeout means "infinite" */
1601 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
1604 if (!is_error(ret
)) {
1605 for (i
= 0; i
< nfds
; i
++) {
1606 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
1609 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
1614 static abi_long
do_pipe(CPUArchState
*cpu_env
, abi_ulong pipedes
,
1615 int flags
, int is_pipe2
)
1619 ret
= pipe2(host_pipe
, flags
);
1622 return get_errno(ret
);
1624 /* Several targets have special calling conventions for the original
1625 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1627 #if defined(TARGET_ALPHA)
1628 cpu_env
->ir
[IR_A4
] = host_pipe
[1];
1629 return host_pipe
[0];
1630 #elif defined(TARGET_MIPS)
1631 cpu_env
->active_tc
.gpr
[3] = host_pipe
[1];
1632 return host_pipe
[0];
1633 #elif defined(TARGET_SH4)
1634 cpu_env
->gregs
[1] = host_pipe
[1];
1635 return host_pipe
[0];
1636 #elif defined(TARGET_SPARC)
1637 cpu_env
->regwptr
[1] = host_pipe
[1];
1638 return host_pipe
[0];
1642 if (put_user_s32(host_pipe
[0], pipedes
)
1643 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(abi_int
)))
1644 return -TARGET_EFAULT
;
1645 return get_errno(ret
);
1648 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1649 abi_ulong target_addr
,
1652 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1653 sa_family_t sa_family
;
1654 struct target_sockaddr
*target_saddr
;
1656 if (fd_trans_target_to_host_addr(fd
)) {
1657 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1660 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1662 return -TARGET_EFAULT
;
1664 sa_family
= tswap16(target_saddr
->sa_family
);
1666 /* Oops. The caller might send a incomplete sun_path; sun_path
1667 * must be terminated by \0 (see the manual page), but
1668 * unfortunately it is quite common to specify sockaddr_un
1669 * length as "strlen(x->sun_path)" while it should be
1670 * "strlen(...) + 1". We'll fix that here if needed.
1671 * Linux kernel has a similar feature.
1674 if (sa_family
== AF_UNIX
) {
1675 if (len
< unix_maxlen
&& len
> 0) {
1676 char *cp
= (char*)target_saddr
;
1678 if ( cp
[len
-1] && !cp
[len
] )
1681 if (len
> unix_maxlen
)
1685 memcpy(addr
, target_saddr
, len
);
1686 addr
->sa_family
= sa_family
;
1687 if (sa_family
== AF_NETLINK
) {
1688 struct sockaddr_nl
*nladdr
;
1690 nladdr
= (struct sockaddr_nl
*)addr
;
1691 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1692 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1693 } else if (sa_family
== AF_PACKET
) {
1694 struct target_sockaddr_ll
*lladdr
;
1696 lladdr
= (struct target_sockaddr_ll
*)addr
;
1697 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1698 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1699 } else if (sa_family
== AF_INET6
) {
1700 struct sockaddr_in6
*in6addr
;
1702 in6addr
= (struct sockaddr_in6
*)addr
;
1703 in6addr
->sin6_scope_id
= tswap32(in6addr
->sin6_scope_id
);
1705 unlock_user(target_saddr
, target_addr
, 0);
1710 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1711 struct sockaddr
*addr
,
1714 struct target_sockaddr
*target_saddr
;
1721 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1723 return -TARGET_EFAULT
;
1724 memcpy(target_saddr
, addr
, len
);
1725 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1726 sizeof(target_saddr
->sa_family
)) {
1727 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1729 if (addr
->sa_family
== AF_NETLINK
&&
1730 len
>= sizeof(struct target_sockaddr_nl
)) {
1731 struct target_sockaddr_nl
*target_nl
=
1732 (struct target_sockaddr_nl
*)target_saddr
;
1733 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1734 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1735 } else if (addr
->sa_family
== AF_PACKET
) {
1736 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1737 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1738 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1739 } else if (addr
->sa_family
== AF_INET6
&&
1740 len
>= sizeof(struct target_sockaddr_in6
)) {
1741 struct target_sockaddr_in6
*target_in6
=
1742 (struct target_sockaddr_in6
*)target_saddr
;
1743 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1745 unlock_user(target_saddr
, target_addr
, len
);
1750 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1751 struct target_msghdr
*target_msgh
)
1753 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1754 abi_long msg_controllen
;
1755 abi_ulong target_cmsg_addr
;
1756 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1757 socklen_t space
= 0;
1759 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1760 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1762 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1763 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1764 target_cmsg_start
= target_cmsg
;
1766 return -TARGET_EFAULT
;
1768 while (cmsg
&& target_cmsg
) {
1769 void *data
= CMSG_DATA(cmsg
);
1770 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1772 int len
= tswapal(target_cmsg
->cmsg_len
)
1773 - sizeof(struct target_cmsghdr
);
1775 space
+= CMSG_SPACE(len
);
1776 if (space
> msgh
->msg_controllen
) {
1777 space
-= CMSG_SPACE(len
);
1778 /* This is a QEMU bug, since we allocated the payload
1779 * area ourselves (unlike overflow in host-to-target
1780 * conversion, which is just the guest giving us a buffer
1781 * that's too small). It can't happen for the payload types
1782 * we currently support; if it becomes an issue in future
1783 * we would need to improve our allocation strategy to
1784 * something more intelligent than "twice the size of the
1785 * target buffer we're reading from".
1787 qemu_log_mask(LOG_UNIMP
,
1788 ("Unsupported ancillary data %d/%d: "
1789 "unhandled msg size\n"),
1790 tswap32(target_cmsg
->cmsg_level
),
1791 tswap32(target_cmsg
->cmsg_type
));
1795 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1796 cmsg
->cmsg_level
= SOL_SOCKET
;
1798 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1800 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1801 cmsg
->cmsg_len
= CMSG_LEN(len
);
1803 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1804 int *fd
= (int *)data
;
1805 int *target_fd
= (int *)target_data
;
1806 int i
, numfds
= len
/ sizeof(int);
1808 for (i
= 0; i
< numfds
; i
++) {
1809 __get_user(fd
[i
], target_fd
+ i
);
1811 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1812 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1813 struct ucred
*cred
= (struct ucred
*)data
;
1814 struct target_ucred
*target_cred
=
1815 (struct target_ucred
*)target_data
;
1817 __get_user(cred
->pid
, &target_cred
->pid
);
1818 __get_user(cred
->uid
, &target_cred
->uid
);
1819 __get_user(cred
->gid
, &target_cred
->gid
);
1820 } else if (cmsg
->cmsg_level
== SOL_ALG
) {
1821 uint32_t *dst
= (uint32_t *)data
;
1823 memcpy(dst
, target_data
, len
);
1824 /* fix endianness of first 32-bit word */
1825 if (len
>= sizeof(uint32_t)) {
1826 *dst
= tswap32(*dst
);
1829 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
1830 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1831 memcpy(data
, target_data
, len
);
1834 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1835 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1838 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1840 msgh
->msg_controllen
= space
;
1844 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1845 struct msghdr
*msgh
)
1847 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1848 abi_long msg_controllen
;
1849 abi_ulong target_cmsg_addr
;
1850 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1851 socklen_t space
= 0;
1853 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1854 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1856 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1857 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1858 target_cmsg_start
= target_cmsg
;
1860 return -TARGET_EFAULT
;
1862 while (cmsg
&& target_cmsg
) {
1863 void *data
= CMSG_DATA(cmsg
);
1864 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1866 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1867 int tgt_len
, tgt_space
;
1869 /* We never copy a half-header but may copy half-data;
1870 * this is Linux's behaviour in put_cmsg(). Note that
1871 * truncation here is a guest problem (which we report
1872 * to the guest via the CTRUNC bit), unlike truncation
1873 * in target_to_host_cmsg, which is a QEMU bug.
1875 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1876 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1880 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1881 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1883 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1885 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1887 /* Payload types which need a different size of payload on
1888 * the target must adjust tgt_len here.
1891 switch (cmsg
->cmsg_level
) {
1893 switch (cmsg
->cmsg_type
) {
1895 tgt_len
= sizeof(struct target_timeval
);
1905 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1906 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1907 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1910 /* We must now copy-and-convert len bytes of payload
1911 * into tgt_len bytes of destination space. Bear in mind
1912 * that in both source and destination we may be dealing
1913 * with a truncated value!
1915 switch (cmsg
->cmsg_level
) {
1917 switch (cmsg
->cmsg_type
) {
1920 int *fd
= (int *)data
;
1921 int *target_fd
= (int *)target_data
;
1922 int i
, numfds
= tgt_len
/ sizeof(int);
1924 for (i
= 0; i
< numfds
; i
++) {
1925 __put_user(fd
[i
], target_fd
+ i
);
1931 struct timeval
*tv
= (struct timeval
*)data
;
1932 struct target_timeval
*target_tv
=
1933 (struct target_timeval
*)target_data
;
1935 if (len
!= sizeof(struct timeval
) ||
1936 tgt_len
!= sizeof(struct target_timeval
)) {
1940 /* copy struct timeval to target */
1941 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1942 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1945 case SCM_CREDENTIALS
:
1947 struct ucred
*cred
= (struct ucred
*)data
;
1948 struct target_ucred
*target_cred
=
1949 (struct target_ucred
*)target_data
;
1951 __put_user(cred
->pid
, &target_cred
->pid
);
1952 __put_user(cred
->uid
, &target_cred
->uid
);
1953 __put_user(cred
->gid
, &target_cred
->gid
);
1962 switch (cmsg
->cmsg_type
) {
1965 uint32_t *v
= (uint32_t *)data
;
1966 uint32_t *t_int
= (uint32_t *)target_data
;
1968 if (len
!= sizeof(uint32_t) ||
1969 tgt_len
!= sizeof(uint32_t)) {
1972 __put_user(*v
, t_int
);
1978 struct sock_extended_err ee
;
1979 struct sockaddr_in offender
;
1981 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1982 struct errhdr_t
*target_errh
=
1983 (struct errhdr_t
*)target_data
;
1985 if (len
!= sizeof(struct errhdr_t
) ||
1986 tgt_len
!= sizeof(struct errhdr_t
)) {
1989 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1990 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1991 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1992 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1993 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1994 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1995 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1996 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1997 (void *) &errh
->offender
, sizeof(errh
->offender
));
2006 switch (cmsg
->cmsg_type
) {
2009 uint32_t *v
= (uint32_t *)data
;
2010 uint32_t *t_int
= (uint32_t *)target_data
;
2012 if (len
!= sizeof(uint32_t) ||
2013 tgt_len
!= sizeof(uint32_t)) {
2016 __put_user(*v
, t_int
);
2022 struct sock_extended_err ee
;
2023 struct sockaddr_in6 offender
;
2025 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2026 struct errhdr6_t
*target_errh
=
2027 (struct errhdr6_t
*)target_data
;
2029 if (len
!= sizeof(struct errhdr6_t
) ||
2030 tgt_len
!= sizeof(struct errhdr6_t
)) {
2033 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2034 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2035 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2036 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2037 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2038 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2039 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2040 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2041 (void *) &errh
->offender
, sizeof(errh
->offender
));
2051 qemu_log_mask(LOG_UNIMP
, "Unsupported ancillary data: %d/%d\n",
2052 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2053 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2054 if (tgt_len
> len
) {
2055 memset(target_data
+ len
, 0, tgt_len
- len
);
2059 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2060 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2061 if (msg_controllen
< tgt_space
) {
2062 tgt_space
= msg_controllen
;
2064 msg_controllen
-= tgt_space
;
2066 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2067 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2070 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2072 target_msgh
->msg_controllen
= tswapal(space
);
2076 /* do_setsockopt() Must return target values and target errnos. */
2077 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2078 abi_ulong optval_addr
, socklen_t optlen
)
2086 /* TCP and UDP options all take an 'int' value. */
2087 if (optlen
< sizeof(uint32_t))
2088 return -TARGET_EINVAL
;
2090 if (get_user_u32(val
, optval_addr
))
2091 return -TARGET_EFAULT
;
2092 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2099 case IP_ROUTER_ALERT
:
2103 case IP_MTU_DISCOVER
:
2110 case IP_MULTICAST_TTL
:
2111 case IP_MULTICAST_LOOP
:
2113 if (optlen
>= sizeof(uint32_t)) {
2114 if (get_user_u32(val
, optval_addr
))
2115 return -TARGET_EFAULT
;
2116 } else if (optlen
>= 1) {
2117 if (get_user_u8(val
, optval_addr
))
2118 return -TARGET_EFAULT
;
2120 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2122 case IP_ADD_MEMBERSHIP
:
2123 case IP_DROP_MEMBERSHIP
:
2125 struct ip_mreqn ip_mreq
;
2126 struct target_ip_mreqn
*target_smreqn
;
2128 QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq
) !=
2129 sizeof(struct target_ip_mreq
));
2131 if (optlen
< sizeof (struct target_ip_mreq
) ||
2132 optlen
> sizeof (struct target_ip_mreqn
)) {
2133 return -TARGET_EINVAL
;
2136 target_smreqn
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2137 if (!target_smreqn
) {
2138 return -TARGET_EFAULT
;
2140 ip_mreq
.imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
2141 ip_mreq
.imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
2142 if (optlen
== sizeof(struct target_ip_mreqn
)) {
2143 ip_mreq
.imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
2144 optlen
= sizeof(struct ip_mreqn
);
2146 unlock_user(target_smreqn
, optval_addr
, 0);
2148 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &ip_mreq
, optlen
));
2151 case IP_BLOCK_SOURCE
:
2152 case IP_UNBLOCK_SOURCE
:
2153 case IP_ADD_SOURCE_MEMBERSHIP
:
2154 case IP_DROP_SOURCE_MEMBERSHIP
:
2156 struct ip_mreq_source
*ip_mreq_source
;
2158 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2159 return -TARGET_EINVAL
;
2161 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2162 if (!ip_mreq_source
) {
2163 return -TARGET_EFAULT
;
2165 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2166 unlock_user (ip_mreq_source
, optval_addr
, 0);
2175 case IPV6_MTU_DISCOVER
:
2178 case IPV6_RECVPKTINFO
:
2179 case IPV6_UNICAST_HOPS
:
2180 case IPV6_MULTICAST_HOPS
:
2181 case IPV6_MULTICAST_LOOP
:
2183 case IPV6_RECVHOPLIMIT
:
2184 case IPV6_2292HOPLIMIT
:
2187 case IPV6_2292PKTINFO
:
2188 case IPV6_RECVTCLASS
:
2189 case IPV6_RECVRTHDR
:
2190 case IPV6_2292RTHDR
:
2191 case IPV6_RECVHOPOPTS
:
2192 case IPV6_2292HOPOPTS
:
2193 case IPV6_RECVDSTOPTS
:
2194 case IPV6_2292DSTOPTS
:
2196 case IPV6_ADDR_PREFERENCES
:
2197 #ifdef IPV6_RECVPATHMTU
2198 case IPV6_RECVPATHMTU
:
2200 #ifdef IPV6_TRANSPARENT
2201 case IPV6_TRANSPARENT
:
2203 #ifdef IPV6_FREEBIND
2206 #ifdef IPV6_RECVORIGDSTADDR
2207 case IPV6_RECVORIGDSTADDR
:
2210 if (optlen
< sizeof(uint32_t)) {
2211 return -TARGET_EINVAL
;
2213 if (get_user_u32(val
, optval_addr
)) {
2214 return -TARGET_EFAULT
;
2216 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2217 &val
, sizeof(val
)));
2221 struct in6_pktinfo pki
;
2223 if (optlen
< sizeof(pki
)) {
2224 return -TARGET_EINVAL
;
2227 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2228 return -TARGET_EFAULT
;
2231 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2233 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2234 &pki
, sizeof(pki
)));
2237 case IPV6_ADD_MEMBERSHIP
:
2238 case IPV6_DROP_MEMBERSHIP
:
2240 struct ipv6_mreq ipv6mreq
;
2242 if (optlen
< sizeof(ipv6mreq
)) {
2243 return -TARGET_EINVAL
;
2246 if (copy_from_user(&ipv6mreq
, optval_addr
, sizeof(ipv6mreq
))) {
2247 return -TARGET_EFAULT
;
2250 ipv6mreq
.ipv6mr_interface
= tswap32(ipv6mreq
.ipv6mr_interface
);
2252 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2253 &ipv6mreq
, sizeof(ipv6mreq
)));
2264 struct icmp6_filter icmp6f
;
2266 if (optlen
> sizeof(icmp6f
)) {
2267 optlen
= sizeof(icmp6f
);
2270 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2271 return -TARGET_EFAULT
;
2274 for (val
= 0; val
< 8; val
++) {
2275 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2278 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2290 /* those take an u32 value */
2291 if (optlen
< sizeof(uint32_t)) {
2292 return -TARGET_EINVAL
;
2295 if (get_user_u32(val
, optval_addr
)) {
2296 return -TARGET_EFAULT
;
2298 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2299 &val
, sizeof(val
)));
2306 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2311 char *alg_key
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2313 return -TARGET_EFAULT
;
2315 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2317 unlock_user(alg_key
, optval_addr
, optlen
);
2320 case ALG_SET_AEAD_AUTHSIZE
:
2322 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2331 case TARGET_SOL_SOCKET
:
2333 case TARGET_SO_RCVTIMEO
:
2334 case TARGET_SO_SNDTIMEO
:
2338 if (optlen
!= sizeof(struct target_timeval
)) {
2339 return -TARGET_EINVAL
;
2342 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2343 return -TARGET_EFAULT
;
2346 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2347 optname
== TARGET_SO_RCVTIMEO
?
2348 SO_RCVTIMEO
: SO_SNDTIMEO
,
2352 case TARGET_SO_ATTACH_FILTER
:
2354 struct target_sock_fprog
*tfprog
;
2355 struct target_sock_filter
*tfilter
;
2356 struct sock_fprog fprog
;
2357 struct sock_filter
*filter
;
2360 if (optlen
!= sizeof(*tfprog
)) {
2361 return -TARGET_EINVAL
;
2363 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2364 return -TARGET_EFAULT
;
2366 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2367 tswapal(tfprog
->filter
), 0)) {
2368 unlock_user_struct(tfprog
, optval_addr
, 1);
2369 return -TARGET_EFAULT
;
2372 fprog
.len
= tswap16(tfprog
->len
);
2373 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2374 if (filter
== NULL
) {
2375 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2376 unlock_user_struct(tfprog
, optval_addr
, 1);
2377 return -TARGET_ENOMEM
;
2379 for (i
= 0; i
< fprog
.len
; i
++) {
2380 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2381 filter
[i
].jt
= tfilter
[i
].jt
;
2382 filter
[i
].jf
= tfilter
[i
].jf
;
2383 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2385 fprog
.filter
= filter
;
2387 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2388 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2391 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2392 unlock_user_struct(tfprog
, optval_addr
, 1);
2395 case TARGET_SO_BINDTODEVICE
:
2397 char *dev_ifname
, *addr_ifname
;
2399 if (optlen
> IFNAMSIZ
- 1) {
2400 optlen
= IFNAMSIZ
- 1;
2402 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2404 return -TARGET_EFAULT
;
2406 optname
= SO_BINDTODEVICE
;
2407 addr_ifname
= alloca(IFNAMSIZ
);
2408 memcpy(addr_ifname
, dev_ifname
, optlen
);
2409 addr_ifname
[optlen
] = 0;
2410 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2411 addr_ifname
, optlen
));
2412 unlock_user (dev_ifname
, optval_addr
, 0);
2415 case TARGET_SO_LINGER
:
2418 struct target_linger
*tlg
;
2420 if (optlen
!= sizeof(struct target_linger
)) {
2421 return -TARGET_EINVAL
;
2423 if (!lock_user_struct(VERIFY_READ
, tlg
, optval_addr
, 1)) {
2424 return -TARGET_EFAULT
;
2426 __get_user(lg
.l_onoff
, &tlg
->l_onoff
);
2427 __get_user(lg
.l_linger
, &tlg
->l_linger
);
2428 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, SO_LINGER
,
2430 unlock_user_struct(tlg
, optval_addr
, 0);
2433 /* Options with 'int' argument. */
2434 case TARGET_SO_DEBUG
:
2437 case TARGET_SO_REUSEADDR
:
2438 optname
= SO_REUSEADDR
;
2441 case TARGET_SO_REUSEPORT
:
2442 optname
= SO_REUSEPORT
;
2445 case TARGET_SO_TYPE
:
2448 case TARGET_SO_ERROR
:
2451 case TARGET_SO_DONTROUTE
:
2452 optname
= SO_DONTROUTE
;
2454 case TARGET_SO_BROADCAST
:
2455 optname
= SO_BROADCAST
;
2457 case TARGET_SO_SNDBUF
:
2458 optname
= SO_SNDBUF
;
2460 case TARGET_SO_SNDBUFFORCE
:
2461 optname
= SO_SNDBUFFORCE
;
2463 case TARGET_SO_RCVBUF
:
2464 optname
= SO_RCVBUF
;
2466 case TARGET_SO_RCVBUFFORCE
:
2467 optname
= SO_RCVBUFFORCE
;
2469 case TARGET_SO_KEEPALIVE
:
2470 optname
= SO_KEEPALIVE
;
2472 case TARGET_SO_OOBINLINE
:
2473 optname
= SO_OOBINLINE
;
2475 case TARGET_SO_NO_CHECK
:
2476 optname
= SO_NO_CHECK
;
2478 case TARGET_SO_PRIORITY
:
2479 optname
= SO_PRIORITY
;
2482 case TARGET_SO_BSDCOMPAT
:
2483 optname
= SO_BSDCOMPAT
;
2486 case TARGET_SO_PASSCRED
:
2487 optname
= SO_PASSCRED
;
2489 case TARGET_SO_PASSSEC
:
2490 optname
= SO_PASSSEC
;
2492 case TARGET_SO_TIMESTAMP
:
2493 optname
= SO_TIMESTAMP
;
2495 case TARGET_SO_RCVLOWAT
:
2496 optname
= SO_RCVLOWAT
;
2501 if (optlen
< sizeof(uint32_t))
2502 return -TARGET_EINVAL
;
2504 if (get_user_u32(val
, optval_addr
))
2505 return -TARGET_EFAULT
;
2506 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2511 case NETLINK_PKTINFO
:
2512 case NETLINK_ADD_MEMBERSHIP
:
2513 case NETLINK_DROP_MEMBERSHIP
:
2514 case NETLINK_BROADCAST_ERROR
:
2515 case NETLINK_NO_ENOBUFS
:
2516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2517 case NETLINK_LISTEN_ALL_NSID
:
2518 case NETLINK_CAP_ACK
:
2519 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2521 case NETLINK_EXT_ACK
:
2522 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2523 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2524 case NETLINK_GET_STRICT_CHK
:
2525 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2531 if (optlen
< sizeof(uint32_t)) {
2532 return -TARGET_EINVAL
;
2534 if (get_user_u32(val
, optval_addr
)) {
2535 return -TARGET_EFAULT
;
2537 ret
= get_errno(setsockopt(sockfd
, SOL_NETLINK
, optname
, &val
,
2540 #endif /* SOL_NETLINK */
2543 qemu_log_mask(LOG_UNIMP
, "Unsupported setsockopt level=%d optname=%d\n",
2545 ret
= -TARGET_ENOPROTOOPT
;
2550 /* do_getsockopt() Must return target values and target errnos. */
2551 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2552 abi_ulong optval_addr
, abi_ulong optlen
)
2559 case TARGET_SOL_SOCKET
:
2562 /* These don't just return a single integer */
2563 case TARGET_SO_PEERNAME
:
2565 case TARGET_SO_RCVTIMEO
: {
2569 optname
= SO_RCVTIMEO
;
2572 if (get_user_u32(len
, optlen
)) {
2573 return -TARGET_EFAULT
;
2576 return -TARGET_EINVAL
;
2580 ret
= get_errno(getsockopt(sockfd
, level
, optname
,
2585 if (len
> sizeof(struct target_timeval
)) {
2586 len
= sizeof(struct target_timeval
);
2588 if (copy_to_user_timeval(optval_addr
, &tv
)) {
2589 return -TARGET_EFAULT
;
2591 if (put_user_u32(len
, optlen
)) {
2592 return -TARGET_EFAULT
;
2596 case TARGET_SO_SNDTIMEO
:
2597 optname
= SO_SNDTIMEO
;
2599 case TARGET_SO_PEERCRED
: {
2602 struct target_ucred
*tcr
;
2604 if (get_user_u32(len
, optlen
)) {
2605 return -TARGET_EFAULT
;
2608 return -TARGET_EINVAL
;
2612 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2620 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2621 return -TARGET_EFAULT
;
2623 __put_user(cr
.pid
, &tcr
->pid
);
2624 __put_user(cr
.uid
, &tcr
->uid
);
2625 __put_user(cr
.gid
, &tcr
->gid
);
2626 unlock_user_struct(tcr
, optval_addr
, 1);
2627 if (put_user_u32(len
, optlen
)) {
2628 return -TARGET_EFAULT
;
2632 case TARGET_SO_PEERSEC
: {
2635 if (get_user_u32(len
, optlen
)) {
2636 return -TARGET_EFAULT
;
2639 return -TARGET_EINVAL
;
2641 name
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 0);
2643 return -TARGET_EFAULT
;
2646 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERSEC
,
2648 if (put_user_u32(lv
, optlen
)) {
2649 ret
= -TARGET_EFAULT
;
2651 unlock_user(name
, optval_addr
, lv
);
2654 case TARGET_SO_LINGER
:
2658 struct target_linger
*tlg
;
2660 if (get_user_u32(len
, optlen
)) {
2661 return -TARGET_EFAULT
;
2664 return -TARGET_EINVAL
;
2668 ret
= get_errno(getsockopt(sockfd
, level
, SO_LINGER
,
2676 if (!lock_user_struct(VERIFY_WRITE
, tlg
, optval_addr
, 0)) {
2677 return -TARGET_EFAULT
;
2679 __put_user(lg
.l_onoff
, &tlg
->l_onoff
);
2680 __put_user(lg
.l_linger
, &tlg
->l_linger
);
2681 unlock_user_struct(tlg
, optval_addr
, 1);
2682 if (put_user_u32(len
, optlen
)) {
2683 return -TARGET_EFAULT
;
2687 /* Options with 'int' argument. */
2688 case TARGET_SO_DEBUG
:
2691 case TARGET_SO_REUSEADDR
:
2692 optname
= SO_REUSEADDR
;
2695 case TARGET_SO_REUSEPORT
:
2696 optname
= SO_REUSEPORT
;
2699 case TARGET_SO_TYPE
:
2702 case TARGET_SO_ERROR
:
2705 case TARGET_SO_DONTROUTE
:
2706 optname
= SO_DONTROUTE
;
2708 case TARGET_SO_BROADCAST
:
2709 optname
= SO_BROADCAST
;
2711 case TARGET_SO_SNDBUF
:
2712 optname
= SO_SNDBUF
;
2714 case TARGET_SO_RCVBUF
:
2715 optname
= SO_RCVBUF
;
2717 case TARGET_SO_KEEPALIVE
:
2718 optname
= SO_KEEPALIVE
;
2720 case TARGET_SO_OOBINLINE
:
2721 optname
= SO_OOBINLINE
;
2723 case TARGET_SO_NO_CHECK
:
2724 optname
= SO_NO_CHECK
;
2726 case TARGET_SO_PRIORITY
:
2727 optname
= SO_PRIORITY
;
2730 case TARGET_SO_BSDCOMPAT
:
2731 optname
= SO_BSDCOMPAT
;
2734 case TARGET_SO_PASSCRED
:
2735 optname
= SO_PASSCRED
;
2737 case TARGET_SO_TIMESTAMP
:
2738 optname
= SO_TIMESTAMP
;
2740 case TARGET_SO_RCVLOWAT
:
2741 optname
= SO_RCVLOWAT
;
2743 case TARGET_SO_ACCEPTCONN
:
2744 optname
= SO_ACCEPTCONN
;
2746 case TARGET_SO_PROTOCOL
:
2747 optname
= SO_PROTOCOL
;
2749 case TARGET_SO_DOMAIN
:
2750 optname
= SO_DOMAIN
;
2758 /* TCP and UDP options all take an 'int' value. */
2760 if (get_user_u32(len
, optlen
))
2761 return -TARGET_EFAULT
;
2763 return -TARGET_EINVAL
;
2765 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2770 val
= host_to_target_sock_type(val
);
2773 val
= host_to_target_errno(val
);
2779 if (put_user_u32(val
, optval_addr
))
2780 return -TARGET_EFAULT
;
2782 if (put_user_u8(val
, optval_addr
))
2783 return -TARGET_EFAULT
;
2785 if (put_user_u32(len
, optlen
))
2786 return -TARGET_EFAULT
;
2793 case IP_ROUTER_ALERT
:
2797 case IP_MTU_DISCOVER
:
2803 case IP_MULTICAST_TTL
:
2804 case IP_MULTICAST_LOOP
:
2805 if (get_user_u32(len
, optlen
))
2806 return -TARGET_EFAULT
;
2808 return -TARGET_EINVAL
;
2810 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2813 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2815 if (put_user_u32(len
, optlen
)
2816 || put_user_u8(val
, optval_addr
))
2817 return -TARGET_EFAULT
;
2819 if (len
> sizeof(int))
2821 if (put_user_u32(len
, optlen
)
2822 || put_user_u32(val
, optval_addr
))
2823 return -TARGET_EFAULT
;
2827 ret
= -TARGET_ENOPROTOOPT
;
2833 case IPV6_MTU_DISCOVER
:
2836 case IPV6_RECVPKTINFO
:
2837 case IPV6_UNICAST_HOPS
:
2838 case IPV6_MULTICAST_HOPS
:
2839 case IPV6_MULTICAST_LOOP
:
2841 case IPV6_RECVHOPLIMIT
:
2842 case IPV6_2292HOPLIMIT
:
2845 case IPV6_2292PKTINFO
:
2846 case IPV6_RECVTCLASS
:
2847 case IPV6_RECVRTHDR
:
2848 case IPV6_2292RTHDR
:
2849 case IPV6_RECVHOPOPTS
:
2850 case IPV6_2292HOPOPTS
:
2851 case IPV6_RECVDSTOPTS
:
2852 case IPV6_2292DSTOPTS
:
2854 case IPV6_ADDR_PREFERENCES
:
2855 #ifdef IPV6_RECVPATHMTU
2856 case IPV6_RECVPATHMTU
:
2858 #ifdef IPV6_TRANSPARENT
2859 case IPV6_TRANSPARENT
:
2861 #ifdef IPV6_FREEBIND
2864 #ifdef IPV6_RECVORIGDSTADDR
2865 case IPV6_RECVORIGDSTADDR
:
2867 if (get_user_u32(len
, optlen
))
2868 return -TARGET_EFAULT
;
2870 return -TARGET_EINVAL
;
2872 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2875 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
2877 if (put_user_u32(len
, optlen
)
2878 || put_user_u8(val
, optval_addr
))
2879 return -TARGET_EFAULT
;
2881 if (len
> sizeof(int))
2883 if (put_user_u32(len
, optlen
)
2884 || put_user_u32(val
, optval_addr
))
2885 return -TARGET_EFAULT
;
2889 ret
= -TARGET_ENOPROTOOPT
;
2896 case NETLINK_PKTINFO
:
2897 case NETLINK_BROADCAST_ERROR
:
2898 case NETLINK_NO_ENOBUFS
:
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900 case NETLINK_LISTEN_ALL_NSID
:
2901 case NETLINK_CAP_ACK
:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2904 case NETLINK_EXT_ACK
:
2905 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2906 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2907 case NETLINK_GET_STRICT_CHK
:
2908 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2909 if (get_user_u32(len
, optlen
)) {
2910 return -TARGET_EFAULT
;
2912 if (len
!= sizeof(val
)) {
2913 return -TARGET_EINVAL
;
2916 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
2920 if (put_user_u32(lv
, optlen
)
2921 || put_user_u32(val
, optval_addr
)) {
2922 return -TARGET_EFAULT
;
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2926 case NETLINK_LIST_MEMBERSHIPS
:
2930 if (get_user_u32(len
, optlen
)) {
2931 return -TARGET_EFAULT
;
2934 return -TARGET_EINVAL
;
2936 results
= lock_user(VERIFY_WRITE
, optval_addr
, len
, 1);
2937 if (!results
&& len
> 0) {
2938 return -TARGET_EFAULT
;
2941 ret
= get_errno(getsockopt(sockfd
, level
, optname
, results
, &lv
));
2943 unlock_user(results
, optval_addr
, 0);
2946 /* swap host endianness to target endianness. */
2947 for (i
= 0; i
< (len
/ sizeof(uint32_t)); i
++) {
2948 results
[i
] = tswap32(results
[i
]);
2950 if (put_user_u32(lv
, optlen
)) {
2951 return -TARGET_EFAULT
;
2953 unlock_user(results
, optval_addr
, 0);
2956 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2961 #endif /* SOL_NETLINK */
2964 qemu_log_mask(LOG_UNIMP
,
2965 "getsockopt level=%d optname=%d not yet supported\n",
2967 ret
= -TARGET_EOPNOTSUPP
;
2973 /* Convert target low/high pair representing file offset into the host
2974 * low/high pair. This function doesn't handle offsets bigger than 64 bits
2975 * as the kernel doesn't handle them either.
2977 static void target_to_host_low_high(abi_ulong tlow
,
2979 unsigned long *hlow
,
2980 unsigned long *hhigh
)
2982 uint64_t off
= tlow
|
2983 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
2984 TARGET_LONG_BITS
/ 2;
2987 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
2990 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
2991 abi_ulong count
, int copy
)
2993 struct target_iovec
*target_vec
;
2995 abi_ulong total_len
, max_len
;
2998 bool bad_address
= false;
3004 if (count
> IOV_MAX
) {
3009 vec
= g_try_new0(struct iovec
, count
);
3015 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3016 count
* sizeof(struct target_iovec
), 1);
3017 if (target_vec
== NULL
) {
3022 /* ??? If host page size > target page size, this will result in a
3023 value larger than what we can actually support. */
3024 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3027 for (i
= 0; i
< count
; i
++) {
3028 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3029 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3034 } else if (len
== 0) {
3035 /* Zero length pointer is ignored. */
3036 vec
[i
].iov_base
= 0;
3038 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3039 /* If the first buffer pointer is bad, this is a fault. But
3040 * subsequent bad buffers will result in a partial write; this
3041 * is realized by filling the vector with null pointers and
3043 if (!vec
[i
].iov_base
) {
3054 if (len
> max_len
- total_len
) {
3055 len
= max_len
- total_len
;
3058 vec
[i
].iov_len
= len
;
3062 unlock_user(target_vec
, target_addr
, 0);
3067 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3068 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3071 unlock_user(target_vec
, target_addr
, 0);
3078 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3079 abi_ulong count
, int copy
)
3081 struct target_iovec
*target_vec
;
3084 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3085 count
* sizeof(struct target_iovec
), 1);
3087 for (i
= 0; i
< count
; i
++) {
3088 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3089 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3093 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3095 unlock_user(target_vec
, target_addr
, 0);
3101 static inline int target_to_host_sock_type(int *type
)
3104 int target_type
= *type
;
3106 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3107 case TARGET_SOCK_DGRAM
:
3108 host_type
= SOCK_DGRAM
;
3110 case TARGET_SOCK_STREAM
:
3111 host_type
= SOCK_STREAM
;
3114 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3117 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3118 #if defined(SOCK_CLOEXEC)
3119 host_type
|= SOCK_CLOEXEC
;
3121 return -TARGET_EINVAL
;
3124 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3125 #if defined(SOCK_NONBLOCK)
3126 host_type
|= SOCK_NONBLOCK
;
3127 #elif !defined(O_NONBLOCK)
3128 return -TARGET_EINVAL
;
3135 /* Try to emulate socket type flags after socket creation. */
3136 static int sock_flags_fixup(int fd
, int target_type
)
3138 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3139 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3140 int flags
= fcntl(fd
, F_GETFL
);
3141 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3143 return -TARGET_EINVAL
;
3150 /* do_socket() Must return target values and target errnos. */
3151 static abi_long
do_socket(int domain
, int type
, int protocol
)
3153 int target_type
= type
;
3156 ret
= target_to_host_sock_type(&type
);
3161 if (domain
== PF_NETLINK
&& !(
3162 #ifdef CONFIG_RTNETLINK
3163 protocol
== NETLINK_ROUTE
||
3165 protocol
== NETLINK_KOBJECT_UEVENT
||
3166 protocol
== NETLINK_AUDIT
)) {
3167 return -TARGET_EPROTONOSUPPORT
;
3170 if (domain
== AF_PACKET
||
3171 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3172 protocol
= tswap16(protocol
);
3175 ret
= get_errno(socket(domain
, type
, protocol
));
3177 ret
= sock_flags_fixup(ret
, target_type
);
3178 if (type
== SOCK_PACKET
) {
3179 /* Manage an obsolete case :
3180 * if socket type is SOCK_PACKET, bind by name
3182 fd_trans_register(ret
, &target_packet_trans
);
3183 } else if (domain
== PF_NETLINK
) {
3185 #ifdef CONFIG_RTNETLINK
3187 fd_trans_register(ret
, &target_netlink_route_trans
);
3190 case NETLINK_KOBJECT_UEVENT
:
3191 /* nothing to do: messages are strings */
3194 fd_trans_register(ret
, &target_netlink_audit_trans
);
3197 g_assert_not_reached();
3204 /* do_bind() Must return target values and target errnos. */
3205 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3211 if ((int)addrlen
< 0) {
3212 return -TARGET_EINVAL
;
3215 addr
= alloca(addrlen
+1);
3217 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3221 return get_errno(bind(sockfd
, addr
, addrlen
));
3224 /* do_connect() Must return target values and target errnos. */
3225 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3231 if ((int)addrlen
< 0) {
3232 return -TARGET_EINVAL
;
3235 addr
= alloca(addrlen
+1);
3237 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3241 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3244 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3245 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3246 int flags
, int send
)
3252 abi_ulong target_vec
;
3254 if (msgp
->msg_name
) {
3255 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3256 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3257 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3258 tswapal(msgp
->msg_name
),
3260 if (ret
== -TARGET_EFAULT
) {
3261 /* For connected sockets msg_name and msg_namelen must
3262 * be ignored, so returning EFAULT immediately is wrong.
3263 * Instead, pass a bad msg_name to the host kernel, and
3264 * let it decide whether to return EFAULT or not.
3266 msg
.msg_name
= (void *)-1;
3271 msg
.msg_name
= NULL
;
3272 msg
.msg_namelen
= 0;
3274 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3275 msg
.msg_control
= alloca(msg
.msg_controllen
);
3276 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3278 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3280 count
= tswapal(msgp
->msg_iovlen
);
3281 target_vec
= tswapal(msgp
->msg_iov
);
3283 if (count
> IOV_MAX
) {
3284 /* sendrcvmsg returns a different errno for this condition than
3285 * readv/writev, so we must catch it here before lock_iovec() does.
3287 ret
= -TARGET_EMSGSIZE
;
3291 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3292 target_vec
, count
, send
);
3294 ret
= -host_to_target_errno(errno
);
3295 /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3300 msg
.msg_iovlen
= count
;
3304 if (fd_trans_target_to_host_data(fd
)) {
3307 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3308 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3309 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3310 msg
.msg_iov
->iov_len
);
3312 msg
.msg_iov
->iov_base
= host_msg
;
3313 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3317 ret
= target_to_host_cmsg(&msg
, msgp
);
3319 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3323 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3324 if (!is_error(ret
)) {
3326 if (fd_trans_host_to_target_data(fd
)) {
3327 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3328 MIN(msg
.msg_iov
->iov_len
, len
));
3330 if (!is_error(ret
)) {
3331 ret
= host_to_target_cmsg(msgp
, &msg
);
3333 if (!is_error(ret
)) {
3334 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3335 msgp
->msg_flags
= tswap32(msg
.msg_flags
);
3336 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3337 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3338 msg
.msg_name
, msg
.msg_namelen
);
3351 unlock_iovec(vec
, target_vec
, count
, !send
);
3357 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3358 int flags
, int send
)
3361 struct target_msghdr
*msgp
;
3363 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3367 return -TARGET_EFAULT
;
3369 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3370 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3374 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3375 * so it might not have this *mmsg-specific flag either.
3377 #ifndef MSG_WAITFORONE
3378 #define MSG_WAITFORONE 0x10000
3381 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3382 unsigned int vlen
, unsigned int flags
,
3385 struct target_mmsghdr
*mmsgp
;
3389 if (vlen
> UIO_MAXIOV
) {
3393 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3395 return -TARGET_EFAULT
;
3398 for (i
= 0; i
< vlen
; i
++) {
3399 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3400 if (is_error(ret
)) {
3403 mmsgp
[i
].msg_len
= tswap32(ret
);
3404 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3405 if (flags
& MSG_WAITFORONE
) {
3406 flags
|= MSG_DONTWAIT
;
3410 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3412 /* Return number of datagrams sent if we sent any at all;
3413 * otherwise return the error.
3421 /* do_accept4() Must return target values and target errnos. */
3422 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3423 abi_ulong target_addrlen_addr
, int flags
)
3425 socklen_t addrlen
, ret_addrlen
;
3430 if (flags
& ~(TARGET_SOCK_CLOEXEC
| TARGET_SOCK_NONBLOCK
)) {
3431 return -TARGET_EINVAL
;
3435 if (flags
& TARGET_SOCK_NONBLOCK
) {
3436 host_flags
|= SOCK_NONBLOCK
;
3438 if (flags
& TARGET_SOCK_CLOEXEC
) {
3439 host_flags
|= SOCK_CLOEXEC
;
3442 if (target_addr
== 0) {
3443 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3446 /* linux returns EFAULT if addrlen pointer is invalid */
3447 if (get_user_u32(addrlen
, target_addrlen_addr
))
3448 return -TARGET_EFAULT
;
3450 if ((int)addrlen
< 0) {
3451 return -TARGET_EINVAL
;
3454 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3455 return -TARGET_EFAULT
;
3458 addr
= alloca(addrlen
);
3460 ret_addrlen
= addrlen
;
3461 ret
= get_errno(safe_accept4(fd
, addr
, &ret_addrlen
, host_flags
));
3462 if (!is_error(ret
)) {
3463 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3464 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3465 ret
= -TARGET_EFAULT
;
3471 /* do_getpeername() Must return target values and target errnos. */
3472 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3473 abi_ulong target_addrlen_addr
)
3475 socklen_t addrlen
, ret_addrlen
;
3479 if (get_user_u32(addrlen
, target_addrlen_addr
))
3480 return -TARGET_EFAULT
;
3482 if ((int)addrlen
< 0) {
3483 return -TARGET_EINVAL
;
3486 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3487 return -TARGET_EFAULT
;
3490 addr
= alloca(addrlen
);
3492 ret_addrlen
= addrlen
;
3493 ret
= get_errno(getpeername(fd
, addr
, &ret_addrlen
));
3494 if (!is_error(ret
)) {
3495 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3496 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3497 ret
= -TARGET_EFAULT
;
3503 /* do_getsockname() Must return target values and target errnos. */
3504 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3505 abi_ulong target_addrlen_addr
)
3507 socklen_t addrlen
, ret_addrlen
;
3511 if (get_user_u32(addrlen
, target_addrlen_addr
))
3512 return -TARGET_EFAULT
;
3514 if ((int)addrlen
< 0) {
3515 return -TARGET_EINVAL
;
3518 if (!access_ok(thread_cpu
, VERIFY_WRITE
, target_addr
, addrlen
)) {
3519 return -TARGET_EFAULT
;
3522 addr
= alloca(addrlen
);
3524 ret_addrlen
= addrlen
;
3525 ret
= get_errno(getsockname(fd
, addr
, &ret_addrlen
));
3526 if (!is_error(ret
)) {
3527 host_to_target_sockaddr(target_addr
, addr
, MIN(addrlen
, ret_addrlen
));
3528 if (put_user_u32(ret_addrlen
, target_addrlen_addr
)) {
3529 ret
= -TARGET_EFAULT
;
3535 /* do_socketpair() Must return target values and target errnos. */
3536 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3537 abi_ulong target_tab_addr
)
3542 target_to_host_sock_type(&type
);
3544 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3545 if (!is_error(ret
)) {
3546 if (put_user_s32(tab
[0], target_tab_addr
)
3547 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3548 ret
= -TARGET_EFAULT
;
3553 /* do_sendto() Must return target values and target errnos. */
3554 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3555 abi_ulong target_addr
, socklen_t addrlen
)
3559 void *copy_msg
= NULL
;
3562 if ((int)addrlen
< 0) {
3563 return -TARGET_EINVAL
;
3566 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3568 return -TARGET_EFAULT
;
3569 if (fd_trans_target_to_host_data(fd
)) {
3570 copy_msg
= host_msg
;
3571 host_msg
= g_malloc(len
);
3572 memcpy(host_msg
, copy_msg
, len
);
3573 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3579 addr
= alloca(addrlen
+1);
3580 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3584 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3586 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3591 host_msg
= copy_msg
;
3593 unlock_user(host_msg
, msg
, 0);
3597 /* do_recvfrom() Must return target values and target errnos. */
3598 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3599 abi_ulong target_addr
,
3600 abi_ulong target_addrlen
)
3602 socklen_t addrlen
, ret_addrlen
;
3610 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3612 return -TARGET_EFAULT
;
3616 if (get_user_u32(addrlen
, target_addrlen
)) {
3617 ret
= -TARGET_EFAULT
;
3620 if ((int)addrlen
< 0) {
3621 ret
= -TARGET_EINVAL
;
3624 addr
= alloca(addrlen
);
3625 ret_addrlen
= addrlen
;
3626 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3627 addr
, &ret_addrlen
));
3629 addr
= NULL
; /* To keep compiler quiet. */
3630 addrlen
= 0; /* To keep compiler quiet. */
3631 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3633 if (!is_error(ret
)) {
3634 if (fd_trans_host_to_target_data(fd
)) {
3636 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
3637 if (is_error(trans
)) {
3643 host_to_target_sockaddr(target_addr
, addr
,
3644 MIN(addrlen
, ret_addrlen
));
3645 if (put_user_u32(ret_addrlen
, target_addrlen
)) {
3646 ret
= -TARGET_EFAULT
;
3650 unlock_user(host_msg
, msg
, len
);
3653 unlock_user(host_msg
, msg
, 0);
3658 #ifdef TARGET_NR_socketcall
3659 /* do_socketcall() must return target values and target errnos. */
3660 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3662 static const unsigned nargs
[] = { /* number of arguments per operation */
3663 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3664 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3665 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3666 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3667 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3668 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3669 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3670 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3671 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3672 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3673 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3674 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3675 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3676 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3677 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3678 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3679 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3680 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3681 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3682 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3684 abi_long a
[6]; /* max 6 args */
3687 /* check the range of the first argument num */
3688 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3689 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3690 return -TARGET_EINVAL
;
3692 /* ensure we have space for args */
3693 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3694 return -TARGET_EINVAL
;
3696 /* collect the arguments in a[] according to nargs[] */
3697 for (i
= 0; i
< nargs
[num
]; ++i
) {
3698 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3699 return -TARGET_EFAULT
;
3702 /* now when we have the args, invoke the appropriate underlying function */
3704 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3705 return do_socket(a
[0], a
[1], a
[2]);
3706 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3707 return do_bind(a
[0], a
[1], a
[2]);
3708 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3709 return do_connect(a
[0], a
[1], a
[2]);
3710 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3711 return get_errno(listen(a
[0], a
[1]));
3712 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3713 return do_accept4(a
[0], a
[1], a
[2], 0);
3714 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3715 return do_getsockname(a
[0], a
[1], a
[2]);
3716 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3717 return do_getpeername(a
[0], a
[1], a
[2]);
3718 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3719 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3720 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3721 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3722 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3723 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3724 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3725 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3726 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3727 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3728 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3729 return get_errno(shutdown(a
[0], a
[1]));
3730 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3731 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3732 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3733 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3734 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3735 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3736 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3737 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3738 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3739 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3740 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3741 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3742 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3743 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3745 qemu_log_mask(LOG_UNIMP
, "Unsupported socketcall: %d\n", num
);
3746 return -TARGET_EINVAL
;
3751 #ifndef TARGET_SEMID64_DS
3752 /* asm-generic version of this struct */
3753 struct target_semid64_ds
3755 struct target_ipc_perm sem_perm
;
3756 abi_ulong sem_otime
;
3757 #if TARGET_ABI_BITS == 32
3758 abi_ulong __unused1
;
3760 abi_ulong sem_ctime
;
3761 #if TARGET_ABI_BITS == 32
3762 abi_ulong __unused2
;
3764 abi_ulong sem_nsems
;
3765 abi_ulong __unused3
;
3766 abi_ulong __unused4
;
3770 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3771 abi_ulong target_addr
)
3773 struct target_ipc_perm
*target_ip
;
3774 struct target_semid64_ds
*target_sd
;
3776 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3777 return -TARGET_EFAULT
;
3778 target_ip
= &(target_sd
->sem_perm
);
3779 host_ip
->__key
= tswap32(target_ip
->__key
);
3780 host_ip
->uid
= tswap32(target_ip
->uid
);
3781 host_ip
->gid
= tswap32(target_ip
->gid
);
3782 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3783 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3784 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3785 host_ip
->mode
= tswap32(target_ip
->mode
);
3787 host_ip
->mode
= tswap16(target_ip
->mode
);
3789 #if defined(TARGET_PPC)
3790 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3792 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3794 unlock_user_struct(target_sd
, target_addr
, 0);
3798 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3799 struct ipc_perm
*host_ip
)
3801 struct target_ipc_perm
*target_ip
;
3802 struct target_semid64_ds
*target_sd
;
3804 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3805 return -TARGET_EFAULT
;
3806 target_ip
= &(target_sd
->sem_perm
);
3807 target_ip
->__key
= tswap32(host_ip
->__key
);
3808 target_ip
->uid
= tswap32(host_ip
->uid
);
3809 target_ip
->gid
= tswap32(host_ip
->gid
);
3810 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3811 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3812 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3813 target_ip
->mode
= tswap32(host_ip
->mode
);
3815 target_ip
->mode
= tswap16(host_ip
->mode
);
3817 #if defined(TARGET_PPC)
3818 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3820 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3822 unlock_user_struct(target_sd
, target_addr
, 1);
3826 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3827 abi_ulong target_addr
)
3829 struct target_semid64_ds
*target_sd
;
3831 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3832 return -TARGET_EFAULT
;
3833 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3834 return -TARGET_EFAULT
;
3835 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3836 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3837 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3838 unlock_user_struct(target_sd
, target_addr
, 0);
3842 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
3843 struct semid_ds
*host_sd
)
3845 struct target_semid64_ds
*target_sd
;
3847 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3848 return -TARGET_EFAULT
;
3849 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
3850 return -TARGET_EFAULT
;
3851 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
3852 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
3853 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
3854 unlock_user_struct(target_sd
, target_addr
, 1);
3858 struct target_seminfo
{
3871 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
3872 struct seminfo
*host_seminfo
)
3874 struct target_seminfo
*target_seminfo
;
3875 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
3876 return -TARGET_EFAULT
;
3877 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
3878 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
3879 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
3880 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
3881 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
3882 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
3883 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
3884 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
3885 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
3886 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
3887 unlock_user_struct(target_seminfo
, target_addr
, 1);
3893 struct semid_ds
*buf
;
3894 unsigned short *array
;
3895 struct seminfo
*__buf
;
3898 union target_semun
{
3905 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
3906 abi_ulong target_addr
)
3909 unsigned short *array
;
3911 struct semid_ds semid_ds
;
3914 semun
.buf
= &semid_ds
;
3916 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3918 return get_errno(ret
);
3920 nsems
= semid_ds
.sem_nsems
;
3922 *host_array
= g_try_new(unsigned short, nsems
);
3924 return -TARGET_ENOMEM
;
3926 array
= lock_user(VERIFY_READ
, target_addr
,
3927 nsems
*sizeof(unsigned short), 1);
3929 g_free(*host_array
);
3930 return -TARGET_EFAULT
;
3933 for(i
=0; i
<nsems
; i
++) {
3934 __get_user((*host_array
)[i
], &array
[i
]);
3936 unlock_user(array
, target_addr
, 0);
3941 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
3942 unsigned short **host_array
)
3945 unsigned short *array
;
3947 struct semid_ds semid_ds
;
3950 semun
.buf
= &semid_ds
;
3952 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
3954 return get_errno(ret
);
3956 nsems
= semid_ds
.sem_nsems
;
3958 array
= lock_user(VERIFY_WRITE
, target_addr
,
3959 nsems
*sizeof(unsigned short), 0);
3961 return -TARGET_EFAULT
;
3963 for(i
=0; i
<nsems
; i
++) {
3964 __put_user((*host_array
)[i
], &array
[i
]);
3966 g_free(*host_array
);
3967 unlock_user(array
, target_addr
, 1);
3972 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
3973 abi_ulong target_arg
)
3975 union target_semun target_su
= { .buf
= target_arg
};
3977 struct semid_ds dsarg
;
3978 unsigned short *array
= NULL
;
3979 struct seminfo seminfo
;
3980 abi_long ret
= -TARGET_EINVAL
;
3987 /* In 64 bit cross-endian situations, we will erroneously pick up
3988 * the wrong half of the union for the "val" element. To rectify
3989 * this, the entire 8-byte structure is byteswapped, followed by
3990 * a swap of the 4 byte val field. In other cases, the data is
3991 * already in proper host byte order. */
3992 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
3993 target_su
.buf
= tswapal(target_su
.buf
);
3994 arg
.val
= tswap32(target_su
.val
);
3996 arg
.val
= target_su
.val
;
3998 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4002 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4006 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4007 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4014 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4018 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4019 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4025 arg
.__buf
= &seminfo
;
4026 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4027 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4035 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4042 struct target_sembuf
{
4043 unsigned short sem_num
;
4048 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4049 abi_ulong target_addr
,
4052 struct target_sembuf
*target_sembuf
;
4055 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4056 nsops
*sizeof(struct target_sembuf
), 1);
4058 return -TARGET_EFAULT
;
4060 for(i
=0; i
<nsops
; i
++) {
4061 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4062 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4063 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4066 unlock_user(target_sembuf
, target_addr
, 0);
4071 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4072 defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4075 * This macro is required to handle the s390 variants, which passes the
4076 * arguments in a different order than default.
4079 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4080 (__nsops), (__timeout), (__sops)
4082 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4083 (__nsops), 0, (__sops), (__timeout)
4086 static inline abi_long
do_semtimedop(int semid
,
4089 abi_long timeout
, bool time64
)
4091 struct sembuf
*sops
;
4092 struct timespec ts
, *pts
= NULL
;
4098 if (target_to_host_timespec64(pts
, timeout
)) {
4099 return -TARGET_EFAULT
;
4102 if (target_to_host_timespec(pts
, timeout
)) {
4103 return -TARGET_EFAULT
;
4108 if (nsops
> TARGET_SEMOPM
) {
4109 return -TARGET_E2BIG
;
4112 sops
= g_new(struct sembuf
, nsops
);
4114 if (target_to_host_sembuf(sops
, ptr
, nsops
)) {
4116 return -TARGET_EFAULT
;
4119 ret
= -TARGET_ENOSYS
;
4120 #ifdef __NR_semtimedop
4121 ret
= get_errno(safe_semtimedop(semid
, sops
, nsops
, pts
));
4124 if (ret
== -TARGET_ENOSYS
) {
4125 ret
= get_errno(safe_ipc(IPCOP_semtimedop
, semid
,
4126 SEMTIMEDOP_IPC_ARGS(nsops
, sops
, (long)pts
)));
4134 struct target_msqid_ds
4136 struct target_ipc_perm msg_perm
;
4137 abi_ulong msg_stime
;
4138 #if TARGET_ABI_BITS == 32
4139 abi_ulong __unused1
;
4141 abi_ulong msg_rtime
;
4142 #if TARGET_ABI_BITS == 32
4143 abi_ulong __unused2
;
4145 abi_ulong msg_ctime
;
4146 #if TARGET_ABI_BITS == 32
4147 abi_ulong __unused3
;
4149 abi_ulong __msg_cbytes
;
4151 abi_ulong msg_qbytes
;
4152 abi_ulong msg_lspid
;
4153 abi_ulong msg_lrpid
;
4154 abi_ulong __unused4
;
4155 abi_ulong __unused5
;
4158 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4159 abi_ulong target_addr
)
4161 struct target_msqid_ds
*target_md
;
4163 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4164 return -TARGET_EFAULT
;
4165 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4166 return -TARGET_EFAULT
;
4167 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4168 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4169 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4170 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4171 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4172 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4173 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4174 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4175 unlock_user_struct(target_md
, target_addr
, 0);
4179 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4180 struct msqid_ds
*host_md
)
4182 struct target_msqid_ds
*target_md
;
4184 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4185 return -TARGET_EFAULT
;
4186 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4187 return -TARGET_EFAULT
;
4188 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4189 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4190 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4191 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4192 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4193 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4194 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4195 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4196 unlock_user_struct(target_md
, target_addr
, 1);
4200 struct target_msginfo
{
4208 unsigned short int msgseg
;
4211 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4212 struct msginfo
*host_msginfo
)
4214 struct target_msginfo
*target_msginfo
;
4215 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4216 return -TARGET_EFAULT
;
4217 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4218 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4219 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4220 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4221 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4222 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4223 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4224 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4225 unlock_user_struct(target_msginfo
, target_addr
, 1);
4229 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4231 struct msqid_ds dsarg
;
4232 struct msginfo msginfo
;
4233 abi_long ret
= -TARGET_EINVAL
;
4241 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4242 return -TARGET_EFAULT
;
4243 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4244 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4245 return -TARGET_EFAULT
;
4248 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4252 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4253 if (host_to_target_msginfo(ptr
, &msginfo
))
4254 return -TARGET_EFAULT
;
4261 struct target_msgbuf
{
4266 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4267 ssize_t msgsz
, int msgflg
)
4269 struct target_msgbuf
*target_mb
;
4270 struct msgbuf
*host_mb
;
4274 return -TARGET_EINVAL
;
4277 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4278 return -TARGET_EFAULT
;
4279 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4281 unlock_user_struct(target_mb
, msgp
, 0);
4282 return -TARGET_ENOMEM
;
4284 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4285 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4286 ret
= -TARGET_ENOSYS
;
4288 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4291 if (ret
== -TARGET_ENOSYS
) {
4293 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4296 ret
= get_errno(safe_ipc(IPCOP_msgsnd
, msqid
, msgsz
, msgflg
,
4302 unlock_user_struct(target_mb
, msgp
, 0);
4308 #if defined(__sparc__)
4309 /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
4310 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4311 #elif defined(__s390x__)
4312 /* The s390 sys_ipc variant has only five parameters. */
4313 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4314 ((long int[]){(long int)__msgp, __msgtyp})
4316 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4317 ((long int[]){(long int)__msgp, __msgtyp}), 0
4321 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4322 ssize_t msgsz
, abi_long msgtyp
,
4325 struct target_msgbuf
*target_mb
;
4327 struct msgbuf
*host_mb
;
4331 return -TARGET_EINVAL
;
4334 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4335 return -TARGET_EFAULT
;
4337 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4339 ret
= -TARGET_ENOMEM
;
4342 ret
= -TARGET_ENOSYS
;
4344 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4347 if (ret
== -TARGET_ENOSYS
) {
4348 ret
= get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv
), msqid
, msgsz
,
4349 msgflg
, MSGRCV_ARGS(host_mb
, msgtyp
)));
4354 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4355 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4356 if (!target_mtext
) {
4357 ret
= -TARGET_EFAULT
;
4360 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4361 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4364 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4368 unlock_user_struct(target_mb
, msgp
, 1);
4373 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4374 abi_ulong target_addr
)
4376 struct target_shmid_ds
*target_sd
;
4378 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4379 return -TARGET_EFAULT
;
4380 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4381 return -TARGET_EFAULT
;
4382 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4383 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4384 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4385 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4386 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4387 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4388 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4389 unlock_user_struct(target_sd
, target_addr
, 0);
4393 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4394 struct shmid_ds
*host_sd
)
4396 struct target_shmid_ds
*target_sd
;
4398 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4399 return -TARGET_EFAULT
;
4400 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4401 return -TARGET_EFAULT
;
4402 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4403 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4404 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4405 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4406 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4407 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4408 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4409 unlock_user_struct(target_sd
, target_addr
, 1);
4413 struct target_shminfo
{
4421 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4422 struct shminfo
*host_shminfo
)
4424 struct target_shminfo
*target_shminfo
;
4425 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4426 return -TARGET_EFAULT
;
4427 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4428 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4429 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4430 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4431 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4432 unlock_user_struct(target_shminfo
, target_addr
, 1);
4436 struct target_shm_info
{
4441 abi_ulong swap_attempts
;
4442 abi_ulong swap_successes
;
4445 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4446 struct shm_info
*host_shm_info
)
4448 struct target_shm_info
*target_shm_info
;
4449 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4450 return -TARGET_EFAULT
;
4451 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4452 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4453 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4454 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4455 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4456 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4457 unlock_user_struct(target_shm_info
, target_addr
, 1);
4461 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4463 struct shmid_ds dsarg
;
4464 struct shminfo shminfo
;
4465 struct shm_info shm_info
;
4466 abi_long ret
= -TARGET_EINVAL
;
4474 if (target_to_host_shmid_ds(&dsarg
, buf
))
4475 return -TARGET_EFAULT
;
4476 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4477 if (host_to_target_shmid_ds(buf
, &dsarg
))
4478 return -TARGET_EFAULT
;
4481 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4482 if (host_to_target_shminfo(buf
, &shminfo
))
4483 return -TARGET_EFAULT
;
4486 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4487 if (host_to_target_shm_info(buf
, &shm_info
))
4488 return -TARGET_EFAULT
;
4493 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4500 #ifdef TARGET_NR_ipc
4501 /* ??? This only works with linear mappings. */
4502 /* do_ipc() must return target values and target errnos. */
4503 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4504 unsigned int call
, abi_long first
,
4505 abi_long second
, abi_long third
,
4506 abi_long ptr
, abi_long fifth
)
4511 version
= call
>> 16;
4516 ret
= do_semtimedop(first
, ptr
, second
, 0, false);
4518 case IPCOP_semtimedop
:
4520 * The s390 sys_ipc variant has only five parameters instead of six
4521 * (as for default variant) and the only difference is the handling of
4522 * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4523 * to a struct timespec where the generic variant uses fifth parameter.
4525 #if defined(TARGET_S390X)
4526 ret
= do_semtimedop(first
, ptr
, second
, third
, TARGET_ABI_BITS
== 64);
4528 ret
= do_semtimedop(first
, ptr
, second
, fifth
, TARGET_ABI_BITS
== 64);
4533 ret
= get_errno(semget(first
, second
, third
));
4536 case IPCOP_semctl
: {
4537 /* The semun argument to semctl is passed by value, so dereference the
4540 get_user_ual(atptr
, ptr
);
4541 ret
= do_semctl(first
, second
, third
, atptr
);
4546 ret
= get_errno(msgget(first
, second
));
4550 ret
= do_msgsnd(first
, ptr
, second
, third
);
4554 ret
= do_msgctl(first
, second
, ptr
);
4561 struct target_ipc_kludge
{
4566 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4567 ret
= -TARGET_EFAULT
;
4571 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4573 unlock_user_struct(tmp
, ptr
, 0);
4577 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4586 raddr
= target_shmat(cpu_env
, first
, ptr
, second
);
4587 if (is_error(raddr
))
4588 return get_errno(raddr
);
4589 if (put_user_ual(raddr
, third
))
4590 return -TARGET_EFAULT
;
4594 ret
= -TARGET_EINVAL
;
4599 ret
= target_shmdt(ptr
);
4603 /* IPC_* flag values are the same on all linux platforms */
4604 ret
= get_errno(shmget(first
, second
, third
));
4607 /* IPC_* and SHM_* command values are the same on all linux platforms */
4609 ret
= do_shmctl(first
, second
, ptr
);
4612 qemu_log_mask(LOG_UNIMP
, "Unsupported ipc call: %d (version %d)\n",
4614 ret
= -TARGET_ENOSYS
;
4621 /* kernel structure types definitions */
4623 #define STRUCT(name, ...) STRUCT_ ## name,
4624 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4626 #include "syscall_types.h"
4630 #undef STRUCT_SPECIAL
4632 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4633 #define STRUCT_SPECIAL(name)
4634 #include "syscall_types.h"
4636 #undef STRUCT_SPECIAL
4638 #define MAX_STRUCT_SIZE 4096
4640 #ifdef CONFIG_FIEMAP
4641 /* So fiemap access checks don't overflow on 32 bit systems.
4642 * This is very slightly smaller than the limit imposed by
4643 * the underlying kernel.
4645 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4646 / sizeof(struct fiemap_extent))
4648 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4649 int fd
, int cmd
, abi_long arg
)
4651 /* The parameter for this ioctl is a struct fiemap followed
4652 * by an array of struct fiemap_extent whose size is set
4653 * in fiemap->fm_extent_count. The array is filled in by the
4656 int target_size_in
, target_size_out
;
4658 const argtype
*arg_type
= ie
->arg_type
;
4659 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4662 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4666 assert(arg_type
[0] == TYPE_PTR
);
4667 assert(ie
->access
== IOC_RW
);
4669 target_size_in
= thunk_type_size(arg_type
, 0);
4670 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4672 return -TARGET_EFAULT
;
4674 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4675 unlock_user(argptr
, arg
, 0);
4676 fm
= (struct fiemap
*)buf_temp
;
4677 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4678 return -TARGET_EINVAL
;
4681 outbufsz
= sizeof (*fm
) +
4682 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4684 if (outbufsz
> MAX_STRUCT_SIZE
) {
4685 /* We can't fit all the extents into the fixed size buffer.
4686 * Allocate one that is large enough and use it instead.
4688 fm
= g_try_malloc(outbufsz
);
4690 return -TARGET_ENOMEM
;
4692 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4695 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4696 if (!is_error(ret
)) {
4697 target_size_out
= target_size_in
;
4698 /* An extent_count of 0 means we were only counting the extents
4699 * so there are no structs to copy
4701 if (fm
->fm_extent_count
!= 0) {
4702 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4704 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4706 ret
= -TARGET_EFAULT
;
4708 /* Convert the struct fiemap */
4709 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4710 if (fm
->fm_extent_count
!= 0) {
4711 p
= argptr
+ target_size_in
;
4712 /* ...and then all the struct fiemap_extents */
4713 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4714 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4719 unlock_user(argptr
, arg
, target_size_out
);
4729 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4730 int fd
, int cmd
, abi_long arg
)
4732 const argtype
*arg_type
= ie
->arg_type
;
4736 struct ifconf
*host_ifconf
;
4738 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4739 const argtype ifreq_max_type
[] = { MK_STRUCT(STRUCT_ifmap_ifreq
) };
4740 int target_ifreq_size
;
4745 abi_long target_ifc_buf
;
4749 assert(arg_type
[0] == TYPE_PTR
);
4750 assert(ie
->access
== IOC_RW
);
4753 target_size
= thunk_type_size(arg_type
, 0);
4755 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4757 return -TARGET_EFAULT
;
4758 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4759 unlock_user(argptr
, arg
, 0);
4761 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4762 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4763 target_ifreq_size
= thunk_type_size(ifreq_max_type
, 0);
4765 if (target_ifc_buf
!= 0) {
4766 target_ifc_len
= host_ifconf
->ifc_len
;
4767 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4768 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4770 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4771 if (outbufsz
> MAX_STRUCT_SIZE
) {
4773 * We can't fit all the extents into the fixed size buffer.
4774 * Allocate one that is large enough and use it instead.
4776 host_ifconf
= g_try_malloc(outbufsz
);
4778 return -TARGET_ENOMEM
;
4780 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4783 host_ifc_buf
= (char *)host_ifconf
+ sizeof(*host_ifconf
);
4785 host_ifconf
->ifc_len
= host_ifc_len
;
4787 host_ifc_buf
= NULL
;
4789 host_ifconf
->ifc_buf
= host_ifc_buf
;
4791 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4792 if (!is_error(ret
)) {
4793 /* convert host ifc_len to target ifc_len */
4795 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4796 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4797 host_ifconf
->ifc_len
= target_ifc_len
;
4799 /* restore target ifc_buf */
4801 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4803 /* copy struct ifconf to target user */
4805 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4807 return -TARGET_EFAULT
;
4808 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4809 unlock_user(argptr
, arg
, target_size
);
4811 if (target_ifc_buf
!= 0) {
4812 /* copy ifreq[] to target user */
4813 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4814 for (i
= 0; i
< nb_ifreq
; i
++) {
4815 thunk_convert(argptr
+ i
* target_ifreq_size
,
4816 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4817 ifreq_arg_type
, THUNK_TARGET
);
4819 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4824 g_free(host_ifconf
);
4830 #if defined(CONFIG_USBFS)
4831 #if HOST_LONG_BITS > 64
4832 #error USBDEVFS thunks do not support >64 bit hosts yet.
4835 uint64_t target_urb_adr
;
4836 uint64_t target_buf_adr
;
4837 char *target_buf_ptr
;
4838 struct usbdevfs_urb host_urb
;
4841 static GHashTable
*usbdevfs_urb_hashtable(void)
4843 static GHashTable
*urb_hashtable
;
4845 if (!urb_hashtable
) {
4846 urb_hashtable
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
4848 return urb_hashtable
;
4851 static void urb_hashtable_insert(struct live_urb
*urb
)
4853 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4854 g_hash_table_insert(urb_hashtable
, urb
, urb
);
4857 static struct live_urb
*urb_hashtable_lookup(uint64_t target_urb_adr
)
4859 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4860 return g_hash_table_lookup(urb_hashtable
, &target_urb_adr
);
4863 static void urb_hashtable_remove(struct live_urb
*urb
)
4865 GHashTable
*urb_hashtable
= usbdevfs_urb_hashtable();
4866 g_hash_table_remove(urb_hashtable
, urb
);
4870 do_ioctl_usbdevfs_reapurb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4871 int fd
, int cmd
, abi_long arg
)
4873 const argtype usbfsurb_arg_type
[] = { MK_STRUCT(STRUCT_usbdevfs_urb
) };
4874 const argtype ptrvoid_arg_type
[] = { TYPE_PTRVOID
, 0, 0 };
4875 struct live_urb
*lurb
;
4879 uintptr_t target_urb_adr
;
4882 target_size
= thunk_type_size(usbfsurb_arg_type
, THUNK_TARGET
);
4884 memset(buf_temp
, 0, sizeof(uint64_t));
4885 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
4886 if (is_error(ret
)) {
4890 memcpy(&hurb
, buf_temp
, sizeof(uint64_t));
4891 lurb
= (void *)((uintptr_t)hurb
- offsetof(struct live_urb
, host_urb
));
4892 if (!lurb
->target_urb_adr
) {
4893 return -TARGET_EFAULT
;
4895 urb_hashtable_remove(lurb
);
4896 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
,
4897 lurb
->host_urb
.buffer_length
);
4898 lurb
->target_buf_ptr
= NULL
;
4900 /* restore the guest buffer pointer */
4901 lurb
->host_urb
.buffer
= (void *)(uintptr_t)lurb
->target_buf_adr
;
4903 /* update the guest urb struct */
4904 argptr
= lock_user(VERIFY_WRITE
, lurb
->target_urb_adr
, target_size
, 0);
4907 return -TARGET_EFAULT
;
4909 thunk_convert(argptr
, &lurb
->host_urb
, usbfsurb_arg_type
, THUNK_TARGET
);
4910 unlock_user(argptr
, lurb
->target_urb_adr
, target_size
);
4912 target_size
= thunk_type_size(ptrvoid_arg_type
, THUNK_TARGET
);
4913 /* write back the urb handle */
4914 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4917 return -TARGET_EFAULT
;
4920 /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4921 target_urb_adr
= lurb
->target_urb_adr
;
4922 thunk_convert(argptr
, &target_urb_adr
, ptrvoid_arg_type
, THUNK_TARGET
);
4923 unlock_user(argptr
, arg
, target_size
);
4930 do_ioctl_usbdevfs_discardurb(const IOCTLEntry
*ie
,
4931 uint8_t *buf_temp
__attribute__((unused
)),
4932 int fd
, int cmd
, abi_long arg
)
4934 struct live_urb
*lurb
;
4936 /* map target address back to host URB with metadata. */
4937 lurb
= urb_hashtable_lookup(arg
);
4939 return -TARGET_EFAULT
;
4941 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4945 do_ioctl_usbdevfs_submiturb(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4946 int fd
, int cmd
, abi_long arg
)
4948 const argtype
*arg_type
= ie
->arg_type
;
4953 struct live_urb
*lurb
;
4956 * each submitted URB needs to map to a unique ID for the
4957 * kernel, and that unique ID needs to be a pointer to
4958 * host memory. hence, we need to malloc for each URB.
4959 * isochronous transfers have a variable length struct.
4962 target_size
= thunk_type_size(arg_type
, THUNK_TARGET
);
4964 /* construct host copy of urb and metadata */
4965 lurb
= g_try_new0(struct live_urb
, 1);
4967 return -TARGET_ENOMEM
;
4970 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4973 return -TARGET_EFAULT
;
4975 thunk_convert(&lurb
->host_urb
, argptr
, arg_type
, THUNK_HOST
);
4976 unlock_user(argptr
, arg
, 0);
4978 lurb
->target_urb_adr
= arg
;
4979 lurb
->target_buf_adr
= (uintptr_t)lurb
->host_urb
.buffer
;
4981 /* buffer space used depends on endpoint type so lock the entire buffer */
4982 /* control type urbs should check the buffer contents for true direction */
4983 rw_dir
= lurb
->host_urb
.endpoint
& USB_DIR_IN
? VERIFY_WRITE
: VERIFY_READ
;
4984 lurb
->target_buf_ptr
= lock_user(rw_dir
, lurb
->target_buf_adr
,
4985 lurb
->host_urb
.buffer_length
, 1);
4986 if (lurb
->target_buf_ptr
== NULL
) {
4988 return -TARGET_EFAULT
;
4991 /* update buffer pointer in host copy */
4992 lurb
->host_urb
.buffer
= lurb
->target_buf_ptr
;
4994 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, &lurb
->host_urb
));
4995 if (is_error(ret
)) {
4996 unlock_user(lurb
->target_buf_ptr
, lurb
->target_buf_adr
, 0);
4999 urb_hashtable_insert(lurb
);
5004 #endif /* CONFIG_USBFS */
5006 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5007 int cmd
, abi_long arg
)
5010 struct dm_ioctl
*host_dm
;
5011 abi_long guest_data
;
5012 uint32_t guest_data_size
;
5014 const argtype
*arg_type
= ie
->arg_type
;
5016 void *big_buf
= NULL
;
5020 target_size
= thunk_type_size(arg_type
, 0);
5021 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5023 ret
= -TARGET_EFAULT
;
5026 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5027 unlock_user(argptr
, arg
, 0);
5029 /* buf_temp is too small, so fetch things into a bigger buffer */
5030 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5031 memcpy(big_buf
, buf_temp
, target_size
);
5035 guest_data
= arg
+ host_dm
->data_start
;
5036 if ((guest_data
- arg
) < 0) {
5037 ret
= -TARGET_EINVAL
;
5040 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5041 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5043 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5045 ret
= -TARGET_EFAULT
;
5049 switch (ie
->host_cmd
) {
5051 case DM_LIST_DEVICES
:
5054 case DM_DEV_SUSPEND
:
5057 case DM_TABLE_STATUS
:
5058 case DM_TABLE_CLEAR
:
5060 case DM_LIST_VERSIONS
:
5064 case DM_DEV_SET_GEOMETRY
:
5065 /* data contains only strings */
5066 memcpy(host_data
, argptr
, guest_data_size
);
5069 memcpy(host_data
, argptr
, guest_data_size
);
5070 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5074 void *gspec
= argptr
;
5075 void *cur_data
= host_data
;
5076 const argtype dm_arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5077 int spec_size
= thunk_type_size(dm_arg_type
, 0);
5080 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5081 struct dm_target_spec
*spec
= cur_data
;
5085 thunk_convert(spec
, gspec
, dm_arg_type
, THUNK_HOST
);
5086 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5088 spec
->next
= sizeof(*spec
) + slen
;
5089 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5091 cur_data
+= spec
->next
;
5096 ret
= -TARGET_EINVAL
;
5097 unlock_user(argptr
, guest_data
, 0);
5100 unlock_user(argptr
, guest_data
, 0);
5102 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5103 if (!is_error(ret
)) {
5104 guest_data
= arg
+ host_dm
->data_start
;
5105 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5106 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5107 switch (ie
->host_cmd
) {
5112 case DM_DEV_SUSPEND
:
5115 case DM_TABLE_CLEAR
:
5117 case DM_DEV_SET_GEOMETRY
:
5118 /* no return data */
5120 case DM_LIST_DEVICES
:
5122 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5123 uint32_t remaining_data
= guest_data_size
;
5124 void *cur_data
= argptr
;
5125 const argtype dm_arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5126 int nl_size
= 12; /* can't use thunk_size due to alignment */
5129 uint32_t next
= nl
->next
;
5131 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5133 if (remaining_data
< nl
->next
) {
5134 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5137 thunk_convert(cur_data
, nl
, dm_arg_type
, THUNK_TARGET
);
5138 strcpy(cur_data
+ nl_size
, nl
->name
);
5139 cur_data
+= nl
->next
;
5140 remaining_data
-= nl
->next
;
5144 nl
= (void*)nl
+ next
;
5149 case DM_TABLE_STATUS
:
5151 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5152 void *cur_data
= argptr
;
5153 const argtype dm_arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5154 int spec_size
= thunk_type_size(dm_arg_type
, 0);
5157 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5158 uint32_t next
= spec
->next
;
5159 int slen
= strlen((char*)&spec
[1]) + 1;
5160 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5161 if (guest_data_size
< spec
->next
) {
5162 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5165 thunk_convert(cur_data
, spec
, dm_arg_type
, THUNK_TARGET
);
5166 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5167 cur_data
= argptr
+ spec
->next
;
5168 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5174 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5175 int count
= *(uint32_t*)hdata
;
5176 uint64_t *hdev
= hdata
+ 8;
5177 uint64_t *gdev
= argptr
+ 8;
5180 *(uint32_t*)argptr
= tswap32(count
);
5181 for (i
= 0; i
< count
; i
++) {
5182 *gdev
= tswap64(*hdev
);
5188 case DM_LIST_VERSIONS
:
5190 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5191 uint32_t remaining_data
= guest_data_size
;
5192 void *cur_data
= argptr
;
5193 const argtype dm_arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5194 int vers_size
= thunk_type_size(dm_arg_type
, 0);
5197 uint32_t next
= vers
->next
;
5199 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5201 if (remaining_data
< vers
->next
) {
5202 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5205 thunk_convert(cur_data
, vers
, dm_arg_type
, THUNK_TARGET
);
5206 strcpy(cur_data
+ vers_size
, vers
->name
);
5207 cur_data
+= vers
->next
;
5208 remaining_data
-= vers
->next
;
5212 vers
= (void*)vers
+ next
;
5217 unlock_user(argptr
, guest_data
, 0);
5218 ret
= -TARGET_EINVAL
;
5221 unlock_user(argptr
, guest_data
, guest_data_size
);
5223 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5225 ret
= -TARGET_EFAULT
;
5228 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5229 unlock_user(argptr
, arg
, target_size
);
5236 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5237 int cmd
, abi_long arg
)
5241 const argtype
*arg_type
= ie
->arg_type
;
5242 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5245 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5246 struct blkpg_partition host_part
;
5248 /* Read and convert blkpg */
5250 target_size
= thunk_type_size(arg_type
, 0);
5251 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5253 ret
= -TARGET_EFAULT
;
5256 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5257 unlock_user(argptr
, arg
, 0);
5259 switch (host_blkpg
->op
) {
5260 case BLKPG_ADD_PARTITION
:
5261 case BLKPG_DEL_PARTITION
:
5262 /* payload is struct blkpg_partition */
5265 /* Unknown opcode */
5266 ret
= -TARGET_EINVAL
;
5270 /* Read and convert blkpg->data */
5271 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5272 target_size
= thunk_type_size(part_arg_type
, 0);
5273 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5275 ret
= -TARGET_EFAULT
;
5278 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5279 unlock_user(argptr
, arg
, 0);
5281 /* Swizzle the data pointer to our local copy and call! */
5282 host_blkpg
->data
= &host_part
;
5283 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5289 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5290 int fd
, int cmd
, abi_long arg
)
5292 const argtype
*arg_type
= ie
->arg_type
;
5293 const StructEntry
*se
;
5294 const argtype
*field_types
;
5295 const int *dst_offsets
, *src_offsets
;
5298 abi_ulong
*target_rt_dev_ptr
= NULL
;
5299 unsigned long *host_rt_dev_ptr
= NULL
;
5303 assert(ie
->access
== IOC_W
);
5304 assert(*arg_type
== TYPE_PTR
);
5306 assert(*arg_type
== TYPE_STRUCT
);
5307 target_size
= thunk_type_size(arg_type
, 0);
5308 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5310 return -TARGET_EFAULT
;
5313 assert(*arg_type
== (int)STRUCT_rtentry
);
5314 se
= struct_entries
+ *arg_type
++;
5315 assert(se
->convert
[0] == NULL
);
5316 /* convert struct here to be able to catch rt_dev string */
5317 field_types
= se
->field_types
;
5318 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5319 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5320 for (i
= 0; i
< se
->nb_fields
; i
++) {
5321 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5322 assert(*field_types
== TYPE_PTRVOID
);
5323 target_rt_dev_ptr
= argptr
+ src_offsets
[i
];
5324 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5325 if (*target_rt_dev_ptr
!= 0) {
5326 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5327 tswapal(*target_rt_dev_ptr
));
5328 if (!*host_rt_dev_ptr
) {
5329 unlock_user(argptr
, arg
, 0);
5330 return -TARGET_EFAULT
;
5333 *host_rt_dev_ptr
= 0;
5338 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5339 argptr
+ src_offsets
[i
],
5340 field_types
, THUNK_HOST
);
5342 unlock_user(argptr
, arg
, 0);
5344 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5346 assert(host_rt_dev_ptr
!= NULL
);
5347 assert(target_rt_dev_ptr
!= NULL
);
5348 if (*host_rt_dev_ptr
!= 0) {
5349 unlock_user((void *)*host_rt_dev_ptr
,
5350 *target_rt_dev_ptr
, 0);
5355 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5356 int fd
, int cmd
, abi_long arg
)
5358 int sig
= target_to_host_signal(arg
);
5359 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5362 static abi_long
do_ioctl_SIOCGSTAMP(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5363 int fd
, int cmd
, abi_long arg
)
5368 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMP
, &tv
));
5369 if (is_error(ret
)) {
5373 if (cmd
== (int)TARGET_SIOCGSTAMP_OLD
) {
5374 if (copy_to_user_timeval(arg
, &tv
)) {
5375 return -TARGET_EFAULT
;
5378 if (copy_to_user_timeval64(arg
, &tv
)) {
5379 return -TARGET_EFAULT
;
5386 static abi_long
do_ioctl_SIOCGSTAMPNS(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5387 int fd
, int cmd
, abi_long arg
)
5392 ret
= get_errno(safe_ioctl(fd
, SIOCGSTAMPNS
, &ts
));
5393 if (is_error(ret
)) {
5397 if (cmd
== (int)TARGET_SIOCGSTAMPNS_OLD
) {
5398 if (host_to_target_timespec(arg
, &ts
)) {
5399 return -TARGET_EFAULT
;
5402 if (host_to_target_timespec64(arg
, &ts
)) {
5403 return -TARGET_EFAULT
;
5411 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5412 int fd
, int cmd
, abi_long arg
)
5414 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5415 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5421 static void unlock_drm_version(struct drm_version
*host_ver
,
5422 struct target_drm_version
*target_ver
,
5425 unlock_user(host_ver
->name
, target_ver
->name
,
5426 copy
? host_ver
->name_len
: 0);
5427 unlock_user(host_ver
->date
, target_ver
->date
,
5428 copy
? host_ver
->date_len
: 0);
5429 unlock_user(host_ver
->desc
, target_ver
->desc
,
5430 copy
? host_ver
->desc_len
: 0);
5433 static inline abi_long
target_to_host_drmversion(struct drm_version
*host_ver
,
5434 struct target_drm_version
*target_ver
)
5436 memset(host_ver
, 0, sizeof(*host_ver
));
5438 __get_user(host_ver
->name_len
, &target_ver
->name_len
);
5439 if (host_ver
->name_len
) {
5440 host_ver
->name
= lock_user(VERIFY_WRITE
, target_ver
->name
,
5441 target_ver
->name_len
, 0);
5442 if (!host_ver
->name
) {
5447 __get_user(host_ver
->date_len
, &target_ver
->date_len
);
5448 if (host_ver
->date_len
) {
5449 host_ver
->date
= lock_user(VERIFY_WRITE
, target_ver
->date
,
5450 target_ver
->date_len
, 0);
5451 if (!host_ver
->date
) {
5456 __get_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5457 if (host_ver
->desc_len
) {
5458 host_ver
->desc
= lock_user(VERIFY_WRITE
, target_ver
->desc
,
5459 target_ver
->desc_len
, 0);
5460 if (!host_ver
->desc
) {
5467 unlock_drm_version(host_ver
, target_ver
, false);
5471 static inline void host_to_target_drmversion(
5472 struct target_drm_version
*target_ver
,
5473 struct drm_version
*host_ver
)
5475 __put_user(host_ver
->version_major
, &target_ver
->version_major
);
5476 __put_user(host_ver
->version_minor
, &target_ver
->version_minor
);
5477 __put_user(host_ver
->version_patchlevel
, &target_ver
->version_patchlevel
);
5478 __put_user(host_ver
->name_len
, &target_ver
->name_len
);
5479 __put_user(host_ver
->date_len
, &target_ver
->date_len
);
5480 __put_user(host_ver
->desc_len
, &target_ver
->desc_len
);
5481 unlock_drm_version(host_ver
, target_ver
, true);
5484 static abi_long
do_ioctl_drm(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5485 int fd
, int cmd
, abi_long arg
)
5487 struct drm_version
*ver
;
5488 struct target_drm_version
*target_ver
;
5491 switch (ie
->host_cmd
) {
5492 case DRM_IOCTL_VERSION
:
5493 if (!lock_user_struct(VERIFY_WRITE
, target_ver
, arg
, 0)) {
5494 return -TARGET_EFAULT
;
5496 ver
= (struct drm_version
*)buf_temp
;
5497 ret
= target_to_host_drmversion(ver
, target_ver
);
5498 if (!is_error(ret
)) {
5499 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, ver
));
5500 if (is_error(ret
)) {
5501 unlock_drm_version(ver
, target_ver
, false);
5503 host_to_target_drmversion(target_ver
, ver
);
5506 unlock_user_struct(target_ver
, arg
, 0);
5509 return -TARGET_ENOSYS
;
5512 static abi_long
do_ioctl_drm_i915_getparam(const IOCTLEntry
*ie
,
5513 struct drm_i915_getparam
*gparam
,
5514 int fd
, abi_long arg
)
5518 struct target_drm_i915_getparam
*target_gparam
;
5520 if (!lock_user_struct(VERIFY_READ
, target_gparam
, arg
, 0)) {
5521 return -TARGET_EFAULT
;
5524 __get_user(gparam
->param
, &target_gparam
->param
);
5525 gparam
->value
= &value
;
5526 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, gparam
));
5527 put_user_s32(value
, target_gparam
->value
);
5529 unlock_user_struct(target_gparam
, arg
, 0);
5533 static abi_long
do_ioctl_drm_i915(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5534 int fd
, int cmd
, abi_long arg
)
5536 switch (ie
->host_cmd
) {
5537 case DRM_IOCTL_I915_GETPARAM
:
5538 return do_ioctl_drm_i915_getparam(ie
,
5539 (struct drm_i915_getparam
*)buf_temp
,
5542 return -TARGET_ENOSYS
;
5548 static abi_long
do_ioctl_TUNSETTXFILTER(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5549 int fd
, int cmd
, abi_long arg
)
5551 struct tun_filter
*filter
= (struct tun_filter
*)buf_temp
;
5552 struct tun_filter
*target_filter
;
5555 assert(ie
->access
== IOC_W
);
5557 target_filter
= lock_user(VERIFY_READ
, arg
, sizeof(*target_filter
), 1);
5558 if (!target_filter
) {
5559 return -TARGET_EFAULT
;
5561 filter
->flags
= tswap16(target_filter
->flags
);
5562 filter
->count
= tswap16(target_filter
->count
);
5563 unlock_user(target_filter
, arg
, 0);
5565 if (filter
->count
) {
5566 if (offsetof(struct tun_filter
, addr
) + filter
->count
* ETH_ALEN
>
5568 return -TARGET_EFAULT
;
5571 target_addr
= lock_user(VERIFY_READ
,
5572 arg
+ offsetof(struct tun_filter
, addr
),
5573 filter
->count
* ETH_ALEN
, 1);
5575 return -TARGET_EFAULT
;
5577 memcpy(filter
->addr
, target_addr
, filter
->count
* ETH_ALEN
);
5578 unlock_user(target_addr
, arg
+ offsetof(struct tun_filter
, addr
), 0);
5581 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, filter
));
5584 IOCTLEntry ioctl_entries
[] = {
5585 #define IOCTL(cmd, access, ...) \
5586 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5587 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5588 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5589 #define IOCTL_IGNORE(cmd) \
5590 { TARGET_ ## cmd, 0, #cmd },
5595 /* ??? Implement proper locking for ioctls. */
5596 /* do_ioctl() Must return target values and target errnos. */
5597 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5599 const IOCTLEntry
*ie
;
5600 const argtype
*arg_type
;
5602 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5608 if (ie
->target_cmd
== 0) {
5610 LOG_UNIMP
, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5611 return -TARGET_ENOTTY
;
5613 if (ie
->target_cmd
== cmd
)
5617 arg_type
= ie
->arg_type
;
5619 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5620 } else if (!ie
->host_cmd
) {
5621 /* Some architectures define BSD ioctls in their headers
5622 that are not implemented in Linux. */
5623 return -TARGET_ENOTTY
;
5626 switch(arg_type
[0]) {
5629 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5635 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5639 target_size
= thunk_type_size(arg_type
, 0);
5640 switch(ie
->access
) {
5642 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5643 if (!is_error(ret
)) {
5644 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5646 return -TARGET_EFAULT
;
5647 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5648 unlock_user(argptr
, arg
, target_size
);
5652 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5654 return -TARGET_EFAULT
;
5655 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5656 unlock_user(argptr
, arg
, 0);
5657 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5661 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5663 return -TARGET_EFAULT
;
5664 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5665 unlock_user(argptr
, arg
, 0);
5666 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5667 if (!is_error(ret
)) {
5668 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5670 return -TARGET_EFAULT
;
5671 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5672 unlock_user(argptr
, arg
, target_size
);
5678 qemu_log_mask(LOG_UNIMP
,
5679 "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5680 (long)cmd
, arg_type
[0]);
5681 ret
= -TARGET_ENOTTY
;
5687 static const bitmask_transtbl iflag_tbl
[] = {
5688 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5689 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5690 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5691 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5692 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5693 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5694 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5695 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5696 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5697 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5698 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5699 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5700 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5701 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5702 { TARGET_IUTF8
, TARGET_IUTF8
, IUTF8
, IUTF8
},
5705 static const bitmask_transtbl oflag_tbl
[] = {
5706 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5707 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5708 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5709 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5710 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5711 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5712 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5713 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5714 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5715 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5716 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5717 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5718 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5719 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5720 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5721 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5722 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5723 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5724 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5725 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5726 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5727 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5728 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5729 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5732 static const bitmask_transtbl cflag_tbl
[] = {
5733 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5734 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5735 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5736 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5737 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5738 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5739 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5740 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5741 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5742 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5743 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5744 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5745 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5746 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5747 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5748 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5749 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5750 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5751 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5752 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5753 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5754 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5755 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5756 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5757 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5758 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5759 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5760 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5761 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5762 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5763 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5766 static const bitmask_transtbl lflag_tbl
[] = {
5767 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5768 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5769 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5770 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5771 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5772 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5773 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5774 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5775 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5776 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5777 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5778 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5779 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5780 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5781 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5782 { TARGET_EXTPROC
, TARGET_EXTPROC
, EXTPROC
, EXTPROC
},
5785 static void target_to_host_termios (void *dst
, const void *src
)
5787 struct host_termios
*host
= dst
;
5788 const struct target_termios
*target
= src
;
5791 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5793 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5795 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5797 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5798 host
->c_line
= target
->c_line
;
5800 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5801 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5802 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5803 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5804 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5805 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5806 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5807 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5808 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5809 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5810 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5811 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5812 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5813 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5814 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5815 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5816 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5817 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5820 static void host_to_target_termios (void *dst
, const void *src
)
5822 struct target_termios
*target
= dst
;
5823 const struct host_termios
*host
= src
;
5826 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5828 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5830 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5832 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5833 target
->c_line
= host
->c_line
;
5835 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5836 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5837 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5838 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5839 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5840 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5841 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5842 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5843 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5844 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5845 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5846 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5847 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5848 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5849 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5850 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5851 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5852 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5855 static const StructEntry struct_termios_def
= {
5856 .convert
= { host_to_target_termios
, target_to_host_termios
},
5857 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5858 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5859 .print
= print_termios
,
5862 /* If the host does not provide these bits, they may be safely discarded. */
5866 #ifndef MAP_UNINITIALIZED
5867 #define MAP_UNINITIALIZED 0
5870 static const bitmask_transtbl mmap_flags_tbl
[] = {
5871 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5872 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5873 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5874 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5875 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5876 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5877 MAP_DENYWRITE
, MAP_DENYWRITE
},
5878 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5879 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5880 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5881 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5882 MAP_NORESERVE
, MAP_NORESERVE
},
5883 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5884 /* MAP_STACK had been ignored by the kernel for quite some time.
5885 Recognize it for the target insofar as we do not want to pass
5886 it through to the host. */
5887 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5888 { TARGET_MAP_NONBLOCK
, TARGET_MAP_NONBLOCK
, MAP_NONBLOCK
, MAP_NONBLOCK
},
5889 { TARGET_MAP_POPULATE
, TARGET_MAP_POPULATE
, MAP_POPULATE
, MAP_POPULATE
},
5890 { TARGET_MAP_FIXED_NOREPLACE
, TARGET_MAP_FIXED_NOREPLACE
,
5891 MAP_FIXED_NOREPLACE
, MAP_FIXED_NOREPLACE
},
5892 { TARGET_MAP_UNINITIALIZED
, TARGET_MAP_UNINITIALIZED
,
5893 MAP_UNINITIALIZED
, MAP_UNINITIALIZED
},
5897 * Arrange for legacy / undefined architecture specific flags to be
5898 * ignored by mmap handling code.
5900 #ifndef TARGET_MAP_32BIT
5901 #define TARGET_MAP_32BIT 0
5903 #ifndef TARGET_MAP_HUGE_2MB
5904 #define TARGET_MAP_HUGE_2MB 0
5906 #ifndef TARGET_MAP_HUGE_1GB
5907 #define TARGET_MAP_HUGE_1GB 0
5910 static abi_long
do_mmap(abi_ulong addr
, abi_ulong len
, int prot
,
5911 int target_flags
, int fd
, off_t offset
)
5914 * The historical set of flags that all mmap types implicitly support.
5917 TARGET_LEGACY_MAP_MASK
= TARGET_MAP_SHARED
5918 | TARGET_MAP_PRIVATE
5920 | TARGET_MAP_ANONYMOUS
5921 | TARGET_MAP_DENYWRITE
5922 | TARGET_MAP_EXECUTABLE
5923 | TARGET_MAP_UNINITIALIZED
5924 | TARGET_MAP_GROWSDOWN
5926 | TARGET_MAP_NORESERVE
5927 | TARGET_MAP_POPULATE
5928 | TARGET_MAP_NONBLOCK
5930 | TARGET_MAP_HUGETLB
5932 | TARGET_MAP_HUGE_2MB
5933 | TARGET_MAP_HUGE_1GB
5937 switch (target_flags
& TARGET_MAP_TYPE
) {
5938 case TARGET_MAP_PRIVATE
:
5939 host_flags
= MAP_PRIVATE
;
5941 case TARGET_MAP_SHARED
:
5942 host_flags
= MAP_SHARED
;
5944 case TARGET_MAP_SHARED_VALIDATE
:
5946 * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5947 * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5949 if (target_flags
& ~(TARGET_LEGACY_MAP_MASK
| TARGET_MAP_SYNC
)) {
5950 return -TARGET_EOPNOTSUPP
;
5952 host_flags
= MAP_SHARED_VALIDATE
;
5953 if (target_flags
& TARGET_MAP_SYNC
) {
5954 host_flags
|= MAP_SYNC
;
5958 return -TARGET_EINVAL
;
5960 host_flags
|= target_to_host_bitmask(target_flags
, mmap_flags_tbl
);
5962 return get_errno(target_mmap(addr
, len
, prot
, host_flags
, fd
, offset
));
5966 * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5967 * TARGET_I386 is defined if TARGET_X86_64 is defined
5969 #if defined(TARGET_I386)
5971 /* NOTE: there is really one LDT for all the threads */
5972 static uint8_t *ldt_table
;
5974 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5981 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5982 if (size
> bytecount
)
5984 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5986 return -TARGET_EFAULT
;
5987 /* ??? Should this by byteswapped? */
5988 memcpy(p
, ldt_table
, size
);
5989 unlock_user(p
, ptr
, size
);
5993 /* XXX: add locking support */
5994 static abi_long
write_ldt(CPUX86State
*env
,
5995 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5997 struct target_modify_ldt_ldt_s ldt_info
;
5998 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5999 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6000 int seg_not_present
, useable
, lm
;
6001 uint32_t *lp
, entry_1
, entry_2
;
6003 if (bytecount
!= sizeof(ldt_info
))
6004 return -TARGET_EINVAL
;
6005 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6006 return -TARGET_EFAULT
;
6007 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6008 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6009 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6010 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6011 unlock_user_struct(target_ldt_info
, ptr
, 0);
6013 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6014 return -TARGET_EINVAL
;
6015 seg_32bit
= ldt_info
.flags
& 1;
6016 contents
= (ldt_info
.flags
>> 1) & 3;
6017 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6018 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6019 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6020 useable
= (ldt_info
.flags
>> 6) & 1;
6024 lm
= (ldt_info
.flags
>> 7) & 1;
6026 if (contents
== 3) {
6028 return -TARGET_EINVAL
;
6029 if (seg_not_present
== 0)
6030 return -TARGET_EINVAL
;
6032 /* allocate the LDT */
6034 env
->ldt
.base
= target_mmap(0,
6035 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6036 PROT_READ
|PROT_WRITE
,
6037 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6038 if (env
->ldt
.base
== -1)
6039 return -TARGET_ENOMEM
;
6040 memset(g2h_untagged(env
->ldt
.base
), 0,
6041 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6042 env
->ldt
.limit
= 0xffff;
6043 ldt_table
= g2h_untagged(env
->ldt
.base
);
6046 /* NOTE: same code as Linux kernel */
6047 /* Allow LDTs to be cleared by the user. */
6048 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6051 read_exec_only
== 1 &&
6053 limit_in_pages
== 0 &&
6054 seg_not_present
== 1 &&
6062 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6063 (ldt_info
.limit
& 0x0ffff);
6064 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6065 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6066 (ldt_info
.limit
& 0xf0000) |
6067 ((read_exec_only
^ 1) << 9) |
6069 ((seg_not_present
^ 1) << 15) |
6071 (limit_in_pages
<< 23) |
6075 entry_2
|= (useable
<< 20);
6077 /* Install the new entry ... */
6079 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6080 lp
[0] = tswap32(entry_1
);
6081 lp
[1] = tswap32(entry_2
);
6085 /* specific and weird i386 syscalls */
6086 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6087 unsigned long bytecount
)
6093 ret
= read_ldt(ptr
, bytecount
);
6096 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6099 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6102 ret
= -TARGET_ENOSYS
;
6108 #if defined(TARGET_ABI32)
6109 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6111 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6112 struct target_modify_ldt_ldt_s ldt_info
;
6113 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6114 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6115 int seg_not_present
, useable
, lm
;
6116 uint32_t *lp
, entry_1
, entry_2
;
6119 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6120 if (!target_ldt_info
)
6121 return -TARGET_EFAULT
;
6122 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6123 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6124 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6125 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6126 if (ldt_info
.entry_number
== -1) {
6127 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6128 if (gdt_table
[i
] == 0) {
6129 ldt_info
.entry_number
= i
;
6130 target_ldt_info
->entry_number
= tswap32(i
);
6135 unlock_user_struct(target_ldt_info
, ptr
, 1);
6137 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6138 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6139 return -TARGET_EINVAL
;
6140 seg_32bit
= ldt_info
.flags
& 1;
6141 contents
= (ldt_info
.flags
>> 1) & 3;
6142 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6143 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6144 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6145 useable
= (ldt_info
.flags
>> 6) & 1;
6149 lm
= (ldt_info
.flags
>> 7) & 1;
6152 if (contents
== 3) {
6153 if (seg_not_present
== 0)
6154 return -TARGET_EINVAL
;
6157 /* NOTE: same code as Linux kernel */
6158 /* Allow LDTs to be cleared by the user. */
6159 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6160 if ((contents
== 0 &&
6161 read_exec_only
== 1 &&
6163 limit_in_pages
== 0 &&
6164 seg_not_present
== 1 &&
6172 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6173 (ldt_info
.limit
& 0x0ffff);
6174 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6175 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6176 (ldt_info
.limit
& 0xf0000) |
6177 ((read_exec_only
^ 1) << 9) |
6179 ((seg_not_present
^ 1) << 15) |
6181 (limit_in_pages
<< 23) |
6186 /* Install the new entry ... */
6188 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6189 lp
[0] = tswap32(entry_1
);
6190 lp
[1] = tswap32(entry_2
);
6194 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6196 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6197 uint64_t *gdt_table
= g2h_untagged(env
->gdt
.base
);
6198 uint32_t base_addr
, limit
, flags
;
6199 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6200 int seg_not_present
, useable
, lm
;
6201 uint32_t *lp
, entry_1
, entry_2
;
6203 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6204 if (!target_ldt_info
)
6205 return -TARGET_EFAULT
;
6206 idx
= tswap32(target_ldt_info
->entry_number
);
6207 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6208 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6209 unlock_user_struct(target_ldt_info
, ptr
, 1);
6210 return -TARGET_EINVAL
;
6212 lp
= (uint32_t *)(gdt_table
+ idx
);
6213 entry_1
= tswap32(lp
[0]);
6214 entry_2
= tswap32(lp
[1]);
6216 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6217 contents
= (entry_2
>> 10) & 3;
6218 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6219 seg_32bit
= (entry_2
>> 22) & 1;
6220 limit_in_pages
= (entry_2
>> 23) & 1;
6221 useable
= (entry_2
>> 20) & 1;
6225 lm
= (entry_2
>> 21) & 1;
6227 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6228 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6229 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6230 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6231 base_addr
= (entry_1
>> 16) |
6232 (entry_2
& 0xff000000) |
6233 ((entry_2
& 0xff) << 16);
6234 target_ldt_info
->base_addr
= tswapal(base_addr
);
6235 target_ldt_info
->limit
= tswap32(limit
);
6236 target_ldt_info
->flags
= tswap32(flags
);
6237 unlock_user_struct(target_ldt_info
, ptr
, 1);
6241 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6243 return -TARGET_ENOSYS
;
6246 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6253 case TARGET_ARCH_SET_GS
:
6254 case TARGET_ARCH_SET_FS
:
6255 if (code
== TARGET_ARCH_SET_GS
)
6259 cpu_x86_load_seg(env
, idx
, 0);
6260 env
->segs
[idx
].base
= addr
;
6262 case TARGET_ARCH_GET_GS
:
6263 case TARGET_ARCH_GET_FS
:
6264 if (code
== TARGET_ARCH_GET_GS
)
6268 val
= env
->segs
[idx
].base
;
6269 if (put_user(val
, addr
, abi_ulong
))
6270 ret
= -TARGET_EFAULT
;
6273 ret
= -TARGET_EINVAL
;
6278 #endif /* defined(TARGET_ABI32 */
6279 #endif /* defined(TARGET_I386) */
6282 * These constants are generic. Supply any that are missing from the host.
6285 # define PR_SET_NAME 15
6286 # define PR_GET_NAME 16
6288 #ifndef PR_SET_FP_MODE
6289 # define PR_SET_FP_MODE 45
6290 # define PR_GET_FP_MODE 46
6291 # define PR_FP_MODE_FR (1 << 0)
6292 # define PR_FP_MODE_FRE (1 << 1)
6294 #ifndef PR_SVE_SET_VL
6295 # define PR_SVE_SET_VL 50
6296 # define PR_SVE_GET_VL 51
6297 # define PR_SVE_VL_LEN_MASK 0xffff
6298 # define PR_SVE_VL_INHERIT (1 << 17)
6300 #ifndef PR_PAC_RESET_KEYS
6301 # define PR_PAC_RESET_KEYS 54
6302 # define PR_PAC_APIAKEY (1 << 0)
6303 # define PR_PAC_APIBKEY (1 << 1)
6304 # define PR_PAC_APDAKEY (1 << 2)
6305 # define PR_PAC_APDBKEY (1 << 3)
6306 # define PR_PAC_APGAKEY (1 << 4)
6308 #ifndef PR_SET_TAGGED_ADDR_CTRL
6309 # define PR_SET_TAGGED_ADDR_CTRL 55
6310 # define PR_GET_TAGGED_ADDR_CTRL 56
6311 # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
6313 #ifndef PR_SET_IO_FLUSHER
6314 # define PR_SET_IO_FLUSHER 57
6315 # define PR_GET_IO_FLUSHER 58
6317 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6318 # define PR_SET_SYSCALL_USER_DISPATCH 59
6320 #ifndef PR_SME_SET_VL
6321 # define PR_SME_SET_VL 63
6322 # define PR_SME_GET_VL 64
6323 # define PR_SME_VL_LEN_MASK 0xffff
6324 # define PR_SME_VL_INHERIT (1 << 17)
6327 #include "target_prctl.h"
6329 static abi_long
do_prctl_inval0(CPUArchState
*env
)
6331 return -TARGET_EINVAL
;
6334 static abi_long
do_prctl_inval1(CPUArchState
*env
, abi_long arg2
)
6336 return -TARGET_EINVAL
;
6339 #ifndef do_prctl_get_fp_mode
6340 #define do_prctl_get_fp_mode do_prctl_inval0
6342 #ifndef do_prctl_set_fp_mode
6343 #define do_prctl_set_fp_mode do_prctl_inval1
6345 #ifndef do_prctl_sve_get_vl
6346 #define do_prctl_sve_get_vl do_prctl_inval0
6348 #ifndef do_prctl_sve_set_vl
6349 #define do_prctl_sve_set_vl do_prctl_inval1
6351 #ifndef do_prctl_reset_keys
6352 #define do_prctl_reset_keys do_prctl_inval1
6354 #ifndef do_prctl_set_tagged_addr_ctrl
6355 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6357 #ifndef do_prctl_get_tagged_addr_ctrl
6358 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6360 #ifndef do_prctl_get_unalign
6361 #define do_prctl_get_unalign do_prctl_inval1
6363 #ifndef do_prctl_set_unalign
6364 #define do_prctl_set_unalign do_prctl_inval1
6366 #ifndef do_prctl_sme_get_vl
6367 #define do_prctl_sme_get_vl do_prctl_inval0
6369 #ifndef do_prctl_sme_set_vl
6370 #define do_prctl_sme_set_vl do_prctl_inval1
6373 static abi_long
do_prctl(CPUArchState
*env
, abi_long option
, abi_long arg2
,
6374 abi_long arg3
, abi_long arg4
, abi_long arg5
)
6379 case PR_GET_PDEATHSIG
:
6382 ret
= get_errno(prctl(PR_GET_PDEATHSIG
, &deathsig
,
6384 if (!is_error(ret
) &&
6385 put_user_s32(host_to_target_signal(deathsig
), arg2
)) {
6386 return -TARGET_EFAULT
;
6390 case PR_SET_PDEATHSIG
:
6391 return get_errno(prctl(PR_SET_PDEATHSIG
, target_to_host_signal(arg2
),
6395 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
6397 return -TARGET_EFAULT
;
6399 ret
= get_errno(prctl(PR_GET_NAME
, (uintptr_t)name
,
6401 unlock_user(name
, arg2
, 16);
6406 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
6408 return -TARGET_EFAULT
;
6410 ret
= get_errno(prctl(PR_SET_NAME
, (uintptr_t)name
,
6412 unlock_user(name
, arg2
, 0);
6415 case PR_GET_FP_MODE
:
6416 return do_prctl_get_fp_mode(env
);
6417 case PR_SET_FP_MODE
:
6418 return do_prctl_set_fp_mode(env
, arg2
);
6420 return do_prctl_sve_get_vl(env
);
6422 return do_prctl_sve_set_vl(env
, arg2
);
6424 return do_prctl_sme_get_vl(env
);
6426 return do_prctl_sme_set_vl(env
, arg2
);
6427 case PR_PAC_RESET_KEYS
:
6428 if (arg3
|| arg4
|| arg5
) {
6429 return -TARGET_EINVAL
;
6431 return do_prctl_reset_keys(env
, arg2
);
6432 case PR_SET_TAGGED_ADDR_CTRL
:
6433 if (arg3
|| arg4
|| arg5
) {
6434 return -TARGET_EINVAL
;
6436 return do_prctl_set_tagged_addr_ctrl(env
, arg2
);
6437 case PR_GET_TAGGED_ADDR_CTRL
:
6438 if (arg2
|| arg3
|| arg4
|| arg5
) {
6439 return -TARGET_EINVAL
;
6441 return do_prctl_get_tagged_addr_ctrl(env
);
6443 case PR_GET_UNALIGN
:
6444 return do_prctl_get_unalign(env
, arg2
);
6445 case PR_SET_UNALIGN
:
6446 return do_prctl_set_unalign(env
, arg2
);
6448 case PR_CAP_AMBIENT
:
6449 case PR_CAPBSET_READ
:
6450 case PR_CAPBSET_DROP
:
6451 case PR_GET_DUMPABLE
:
6452 case PR_SET_DUMPABLE
:
6453 case PR_GET_KEEPCAPS
:
6454 case PR_SET_KEEPCAPS
:
6455 case PR_GET_SECUREBITS
:
6456 case PR_SET_SECUREBITS
:
6459 case PR_GET_TIMERSLACK
:
6460 case PR_SET_TIMERSLACK
:
6462 case PR_MCE_KILL_GET
:
6463 case PR_GET_NO_NEW_PRIVS
:
6464 case PR_SET_NO_NEW_PRIVS
:
6465 case PR_GET_IO_FLUSHER
:
6466 case PR_SET_IO_FLUSHER
:
6467 case PR_SET_CHILD_SUBREAPER
:
6468 case PR_GET_SPECULATION_CTRL
:
6469 case PR_SET_SPECULATION_CTRL
:
6470 /* Some prctl options have no pointer arguments and we can pass on. */
6471 return get_errno(prctl(option
, arg2
, arg3
, arg4
, arg5
));
6473 case PR_GET_CHILD_SUBREAPER
:
6476 ret
= get_errno(prctl(PR_GET_CHILD_SUBREAPER
, &val
,
6478 if (!is_error(ret
) && put_user_s32(val
, arg2
)) {
6479 return -TARGET_EFAULT
;
6484 case PR_GET_TID_ADDRESS
:
6486 TaskState
*ts
= get_task_state(env_cpu(env
));
6487 return put_user_ual(ts
->child_tidptr
, arg2
);
6492 /* Was used for SPE on PowerPC. */
6493 return -TARGET_EINVAL
;
6500 case PR_GET_SECCOMP
:
6501 case PR_SET_SECCOMP
:
6502 case PR_SET_SYSCALL_USER_DISPATCH
:
6503 case PR_GET_THP_DISABLE
:
6504 case PR_SET_THP_DISABLE
:
6507 /* Disable to prevent the target disabling stuff we need. */
6508 return -TARGET_EINVAL
;
6511 qemu_log_mask(LOG_UNIMP
, "Unsupported prctl: " TARGET_ABI_FMT_ld
"\n",
6513 return -TARGET_EINVAL
;
6517 #define NEW_STACK_SIZE 0x40000
6520 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6523 pthread_mutex_t mutex
;
6524 pthread_cond_t cond
;
6527 abi_ulong child_tidptr
;
6528 abi_ulong parent_tidptr
;
6532 static void *clone_func(void *arg
)
6534 new_thread_info
*info
= arg
;
6539 rcu_register_thread();
6540 tcg_register_thread();
6544 ts
= get_task_state(cpu
);
6545 info
->tid
= sys_gettid();
6547 if (info
->child_tidptr
)
6548 put_user_u32(info
->tid
, info
->child_tidptr
);
6549 if (info
->parent_tidptr
)
6550 put_user_u32(info
->tid
, info
->parent_tidptr
);
6551 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
6552 /* Enable signals. */
6553 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6554 /* Signal to the parent that we're ready. */
6555 pthread_mutex_lock(&info
->mutex
);
6556 pthread_cond_broadcast(&info
->cond
);
6557 pthread_mutex_unlock(&info
->mutex
);
6558 /* Wait until the parent has finished initializing the tls state. */
6559 pthread_mutex_lock(&clone_lock
);
6560 pthread_mutex_unlock(&clone_lock
);
6566 /* do_fork() Must return host values and target errnos (unlike most
6567 do_*() functions). */
6568 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6569 abi_ulong parent_tidptr
, target_ulong newtls
,
6570 abi_ulong child_tidptr
)
6572 CPUState
*cpu
= env_cpu(env
);
6576 CPUArchState
*new_env
;
6579 flags
&= ~CLONE_IGNORED_FLAGS
;
6581 /* Emulate vfork() with fork() */
6582 if (flags
& CLONE_VFORK
)
6583 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6585 if (flags
& CLONE_VM
) {
6586 TaskState
*parent_ts
= get_task_state(cpu
);
6587 new_thread_info info
;
6588 pthread_attr_t attr
;
6590 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6591 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6592 return -TARGET_EINVAL
;
6595 ts
= g_new0(TaskState
, 1);
6596 init_task_state(ts
);
6598 /* Grab a mutex so that thread setup appears atomic. */
6599 pthread_mutex_lock(&clone_lock
);
6602 * If this is our first additional thread, we need to ensure we
6603 * generate code for parallel execution and flush old translations.
6604 * Do this now so that the copy gets CF_PARALLEL too.
6606 if (!tcg_cflags_has(cpu
, CF_PARALLEL
)) {
6607 tcg_cflags_set(cpu
, CF_PARALLEL
);
6611 /* we create a new CPU instance. */
6612 new_env
= cpu_copy(env
);
6613 /* Init regs that differ from the parent. */
6614 cpu_clone_regs_child(new_env
, newsp
, flags
);
6615 cpu_clone_regs_parent(env
, flags
);
6616 new_cpu
= env_cpu(new_env
);
6617 new_cpu
->opaque
= ts
;
6618 ts
->bprm
= parent_ts
->bprm
;
6619 ts
->info
= parent_ts
->info
;
6620 ts
->signal_mask
= parent_ts
->signal_mask
;
6622 if (flags
& CLONE_CHILD_CLEARTID
) {
6623 ts
->child_tidptr
= child_tidptr
;
6626 if (flags
& CLONE_SETTLS
) {
6627 cpu_set_tls (new_env
, newtls
);
6630 memset(&info
, 0, sizeof(info
));
6631 pthread_mutex_init(&info
.mutex
, NULL
);
6632 pthread_mutex_lock(&info
.mutex
);
6633 pthread_cond_init(&info
.cond
, NULL
);
6635 if (flags
& CLONE_CHILD_SETTID
) {
6636 info
.child_tidptr
= child_tidptr
;
6638 if (flags
& CLONE_PARENT_SETTID
) {
6639 info
.parent_tidptr
= parent_tidptr
;
6642 ret
= pthread_attr_init(&attr
);
6643 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6644 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6645 /* It is not safe to deliver signals until the child has finished
6646 initializing, so temporarily block all signals. */
6647 sigfillset(&sigmask
);
6648 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6649 cpu
->random_seed
= qemu_guest_random_seed_thread_part1();
6651 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6652 /* TODO: Free new CPU state if thread creation failed. */
6654 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6655 pthread_attr_destroy(&attr
);
6657 /* Wait for the child to initialize. */
6658 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6663 pthread_mutex_unlock(&info
.mutex
);
6664 pthread_cond_destroy(&info
.cond
);
6665 pthread_mutex_destroy(&info
.mutex
);
6666 pthread_mutex_unlock(&clone_lock
);
6668 /* if no CLONE_VM, we consider it is a fork */
6669 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6670 return -TARGET_EINVAL
;
6673 /* We can't support custom termination signals */
6674 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6675 return -TARGET_EINVAL
;
6678 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6679 if (flags
& CLONE_PIDFD
) {
6680 return -TARGET_EINVAL
;
6684 /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6685 if ((flags
& CLONE_PIDFD
) && (flags
& CLONE_PARENT_SETTID
)) {
6686 return -TARGET_EINVAL
;
6689 if (block_signals()) {
6690 return -QEMU_ERESTARTSYS
;
6696 /* Child Process. */
6697 cpu_clone_regs_child(env
, newsp
, flags
);
6699 /* There is a race condition here. The parent process could
6700 theoretically read the TID in the child process before the child
6701 tid is set. This would require using either ptrace
6702 (not implemented) or having *_tidptr to point at a shared memory
6703 mapping. We can't repeat the spinlock hack used above because
6704 the child process gets its own copy of the lock. */
6705 if (flags
& CLONE_CHILD_SETTID
)
6706 put_user_u32(sys_gettid(), child_tidptr
);
6707 if (flags
& CLONE_PARENT_SETTID
)
6708 put_user_u32(sys_gettid(), parent_tidptr
);
6709 ts
= get_task_state(cpu
);
6710 if (flags
& CLONE_SETTLS
)
6711 cpu_set_tls (env
, newtls
);
6712 if (flags
& CLONE_CHILD_CLEARTID
)
6713 ts
->child_tidptr
= child_tidptr
;
6715 cpu_clone_regs_parent(env
, flags
);
6716 if (flags
& CLONE_PIDFD
) {
6718 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6719 int pid_child
= ret
;
6720 pid_fd
= pidfd_open(pid_child
, 0);
6722 fcntl(pid_fd
, F_SETFD
, fcntl(pid_fd
, F_GETFL
)
6728 put_user_u32(pid_fd
, parent_tidptr
);
6732 g_assert(!cpu_in_exclusive_context(cpu
));
6737 /* warning : doesn't handle linux specific flags... */
6738 static int target_to_host_fcntl_cmd(int cmd
)
6743 case TARGET_F_DUPFD
:
6744 case TARGET_F_GETFD
:
6745 case TARGET_F_SETFD
:
6746 case TARGET_F_GETFL
:
6747 case TARGET_F_SETFL
:
6748 case TARGET_F_OFD_GETLK
:
6749 case TARGET_F_OFD_SETLK
:
6750 case TARGET_F_OFD_SETLKW
:
6753 case TARGET_F_GETLK
:
6756 case TARGET_F_SETLK
:
6759 case TARGET_F_SETLKW
:
6762 case TARGET_F_GETOWN
:
6765 case TARGET_F_SETOWN
:
6768 case TARGET_F_GETSIG
:
6771 case TARGET_F_SETSIG
:
6774 #if TARGET_ABI_BITS == 32
6775 case TARGET_F_GETLK64
:
6778 case TARGET_F_SETLK64
:
6781 case TARGET_F_SETLKW64
:
6785 case TARGET_F_SETLEASE
:
6788 case TARGET_F_GETLEASE
:
6791 #ifdef F_DUPFD_CLOEXEC
6792 case TARGET_F_DUPFD_CLOEXEC
:
6793 ret
= F_DUPFD_CLOEXEC
;
6796 case TARGET_F_NOTIFY
:
6800 case TARGET_F_GETOWN_EX
:
6805 case TARGET_F_SETOWN_EX
:
6810 case TARGET_F_SETPIPE_SZ
:
6813 case TARGET_F_GETPIPE_SZ
:
6818 case TARGET_F_ADD_SEALS
:
6821 case TARGET_F_GET_SEALS
:
6826 ret
= -TARGET_EINVAL
;
6830 #if defined(__powerpc64__)
6831 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6832 * is not supported by kernel. The glibc fcntl call actually adjusts
6833 * them to 5, 6 and 7 before making the syscall(). Since we make the
6834 * syscall directly, adjust to what is supported by the kernel.
6836 if (ret
>= F_GETLK
&& ret
<= F_SETLKW
) {
6844 #define FLOCK_TRANSTBL \
6846 TRANSTBL_CONVERT(F_RDLCK); \
6847 TRANSTBL_CONVERT(F_WRLCK); \
6848 TRANSTBL_CONVERT(F_UNLCK); \
6851 static int target_to_host_flock(int type
)
6853 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6855 #undef TRANSTBL_CONVERT
6856 return -TARGET_EINVAL
;
6859 static int host_to_target_flock(int type
)
6861 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6863 #undef TRANSTBL_CONVERT
6864 /* if we don't know how to convert the value coming
6865 * from the host we copy to the target field as-is
6870 static inline abi_long
copy_from_user_flock(struct flock
*fl
,
6871 abi_ulong target_flock_addr
)
6873 struct target_flock
*target_fl
;
6876 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6877 return -TARGET_EFAULT
;
6880 __get_user(l_type
, &target_fl
->l_type
);
6881 l_type
= target_to_host_flock(l_type
);
6885 fl
->l_type
= l_type
;
6886 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6887 __get_user(fl
->l_start
, &target_fl
->l_start
);
6888 __get_user(fl
->l_len
, &target_fl
->l_len
);
6889 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6890 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6894 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6895 const struct flock
*fl
)
6897 struct target_flock
*target_fl
;
6900 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6901 return -TARGET_EFAULT
;
6904 l_type
= host_to_target_flock(fl
->l_type
);
6905 __put_user(l_type
, &target_fl
->l_type
);
6906 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6907 __put_user(fl
->l_start
, &target_fl
->l_start
);
6908 __put_user(fl
->l_len
, &target_fl
->l_len
);
6909 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6910 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6914 typedef abi_long
from_flock64_fn(struct flock
*fl
, abi_ulong target_addr
);
6915 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock
*fl
);
6917 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6918 struct target_oabi_flock64
{
6926 static inline abi_long
copy_from_user_oabi_flock64(struct flock
*fl
,
6927 abi_ulong target_flock_addr
)
6929 struct target_oabi_flock64
*target_fl
;
6932 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6933 return -TARGET_EFAULT
;
6936 __get_user(l_type
, &target_fl
->l_type
);
6937 l_type
= target_to_host_flock(l_type
);
6941 fl
->l_type
= l_type
;
6942 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6943 __get_user(fl
->l_start
, &target_fl
->l_start
);
6944 __get_user(fl
->l_len
, &target_fl
->l_len
);
6945 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6946 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6950 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6951 const struct flock
*fl
)
6953 struct target_oabi_flock64
*target_fl
;
6956 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6957 return -TARGET_EFAULT
;
6960 l_type
= host_to_target_flock(fl
->l_type
);
6961 __put_user(l_type
, &target_fl
->l_type
);
6962 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6963 __put_user(fl
->l_start
, &target_fl
->l_start
);
6964 __put_user(fl
->l_len
, &target_fl
->l_len
);
6965 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6966 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6971 static inline abi_long
copy_from_user_flock64(struct flock
*fl
,
6972 abi_ulong target_flock_addr
)
6974 struct target_flock64
*target_fl
;
6977 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6978 return -TARGET_EFAULT
;
6981 __get_user(l_type
, &target_fl
->l_type
);
6982 l_type
= target_to_host_flock(l_type
);
6986 fl
->l_type
= l_type
;
6987 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6988 __get_user(fl
->l_start
, &target_fl
->l_start
);
6989 __get_user(fl
->l_len
, &target_fl
->l_len
);
6990 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6991 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6995 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6996 const struct flock
*fl
)
6998 struct target_flock64
*target_fl
;
7001 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
7002 return -TARGET_EFAULT
;
7005 l_type
= host_to_target_flock(fl
->l_type
);
7006 __put_user(l_type
, &target_fl
->l_type
);
7007 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
7008 __put_user(fl
->l_start
, &target_fl
->l_start
);
7009 __put_user(fl
->l_len
, &target_fl
->l_len
);
7010 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
7011 unlock_user_struct(target_fl
, target_flock_addr
, 1);
7015 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
7019 struct f_owner_ex fox
;
7020 struct target_f_owner_ex
*target_fox
;
7023 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
7025 if (host_cmd
== -TARGET_EINVAL
)
7029 case TARGET_F_GETLK
:
7030 ret
= copy_from_user_flock(&fl
, arg
);
7034 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl
));
7036 ret
= copy_to_user_flock(arg
, &fl
);
7040 case TARGET_F_SETLK
:
7041 case TARGET_F_SETLKW
:
7042 ret
= copy_from_user_flock(&fl
, arg
);
7046 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl
));
7049 case TARGET_F_GETLK64
:
7050 case TARGET_F_OFD_GETLK
:
7051 ret
= copy_from_user_flock64(&fl
, arg
);
7055 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl
));
7057 ret
= copy_to_user_flock64(arg
, &fl
);
7060 case TARGET_F_SETLK64
:
7061 case TARGET_F_SETLKW64
:
7062 case TARGET_F_OFD_SETLK
:
7063 case TARGET_F_OFD_SETLKW
:
7064 ret
= copy_from_user_flock64(&fl
, arg
);
7068 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl
));
7071 case TARGET_F_GETFL
:
7072 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7074 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
7075 /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7076 if (O_LARGEFILE
== 0 && HOST_LONG_BITS
== 64) {
7077 ret
|= TARGET_O_LARGEFILE
;
7082 case TARGET_F_SETFL
:
7083 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
7084 target_to_host_bitmask(arg
,
7089 case TARGET_F_GETOWN_EX
:
7090 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7092 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
7093 return -TARGET_EFAULT
;
7094 target_fox
->type
= tswap32(fox
.type
);
7095 target_fox
->pid
= tswap32(fox
.pid
);
7096 unlock_user_struct(target_fox
, arg
, 1);
7102 case TARGET_F_SETOWN_EX
:
7103 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
7104 return -TARGET_EFAULT
;
7105 fox
.type
= tswap32(target_fox
->type
);
7106 fox
.pid
= tswap32(target_fox
->pid
);
7107 unlock_user_struct(target_fox
, arg
, 0);
7108 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7112 case TARGET_F_SETSIG
:
7113 ret
= get_errno(safe_fcntl(fd
, host_cmd
, target_to_host_signal(arg
)));
7116 case TARGET_F_GETSIG
:
7117 ret
= host_to_target_signal(get_errno(safe_fcntl(fd
, host_cmd
, arg
)));
7120 case TARGET_F_SETOWN
:
7121 case TARGET_F_GETOWN
:
7122 case TARGET_F_SETLEASE
:
7123 case TARGET_F_GETLEASE
:
7124 case TARGET_F_SETPIPE_SZ
:
7125 case TARGET_F_GETPIPE_SZ
:
7126 case TARGET_F_ADD_SEALS
:
7127 case TARGET_F_GET_SEALS
:
7128 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7132 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7140 static inline int high2lowuid(int uid
)
7148 static inline int high2lowgid(int gid
)
7156 static inline int low2highuid(int uid
)
7158 if ((int16_t)uid
== -1)
7164 static inline int low2highgid(int gid
)
7166 if ((int16_t)gid
== -1)
7171 static inline int tswapid(int id
)
7176 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7178 #else /* !USE_UID16 */
7179 static inline int high2lowuid(int uid
)
7183 static inline int high2lowgid(int gid
)
7187 static inline int low2highuid(int uid
)
7191 static inline int low2highgid(int gid
)
7195 static inline int tswapid(int id
)
7200 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7202 #endif /* USE_UID16 */
7204 /* We must do direct syscalls for setting UID/GID, because we want to
7205 * implement the Linux system call semantics of "change only for this thread",
7206 * not the libc/POSIX semantics of "change for all threads in process".
7207 * (See http://ewontfix.com/17/ for more details.)
7208 * We use the 32-bit version of the syscalls if present; if it is not
7209 * then either the host architecture supports 32-bit UIDs natively with
7210 * the standard syscall, or the 16-bit UID is the best we can do.
7212 #ifdef __NR_setuid32
7213 #define __NR_sys_setuid __NR_setuid32
7215 #define __NR_sys_setuid __NR_setuid
7217 #ifdef __NR_setgid32
7218 #define __NR_sys_setgid __NR_setgid32
7220 #define __NR_sys_setgid __NR_setgid
7222 #ifdef __NR_setresuid32
7223 #define __NR_sys_setresuid __NR_setresuid32
7225 #define __NR_sys_setresuid __NR_setresuid
7227 #ifdef __NR_setresgid32
7228 #define __NR_sys_setresgid __NR_setresgid32
7230 #define __NR_sys_setresgid __NR_setresgid
7232 #ifdef __NR_setgroups32
7233 #define __NR_sys_setgroups __NR_setgroups32
7235 #define __NR_sys_setgroups __NR_setgroups
7238 _syscall1(int, sys_setuid
, uid_t
, uid
)
7239 _syscall1(int, sys_setgid
, gid_t
, gid
)
7240 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7241 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7242 _syscall2(int, sys_setgroups
, int, size
, gid_t
*, grouplist
)
7244 void syscall_init(void)
7247 const argtype
*arg_type
;
7250 thunk_init(STRUCT_MAX
);
7252 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7253 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7254 #include "syscall_types.h"
7256 #undef STRUCT_SPECIAL
7258 /* we patch the ioctl size if necessary. We rely on the fact that
7259 no ioctl has all the bits at '1' in the size field */
7261 while (ie
->target_cmd
!= 0) {
7262 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7263 TARGET_IOC_SIZEMASK
) {
7264 arg_type
= ie
->arg_type
;
7265 if (arg_type
[0] != TYPE_PTR
) {
7266 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7271 size
= thunk_type_size(arg_type
, 0);
7272 ie
->target_cmd
= (ie
->target_cmd
&
7273 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7274 (size
<< TARGET_IOC_SIZESHIFT
);
7277 /* automatic consistency check if same arch */
7278 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7279 (defined(__x86_64__) && defined(TARGET_X86_64))
7280 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7281 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7282 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7289 #ifdef TARGET_NR_truncate64
7290 static inline abi_long
target_truncate64(CPUArchState
*cpu_env
, const char *arg1
,
7295 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7299 return get_errno(truncate(arg1
, target_offset64(arg2
, arg3
)));
7303 #ifdef TARGET_NR_ftruncate64
7304 static inline abi_long
target_ftruncate64(CPUArchState
*cpu_env
, abi_long arg1
,
7309 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7313 return get_errno(ftruncate(arg1
, target_offset64(arg2
, arg3
)));
7317 #if defined(TARGET_NR_timer_settime) || \
7318 (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7319 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_its
,
7320 abi_ulong target_addr
)
7322 if (target_to_host_timespec(&host_its
->it_interval
, target_addr
+
7323 offsetof(struct target_itimerspec
,
7325 target_to_host_timespec(&host_its
->it_value
, target_addr
+
7326 offsetof(struct target_itimerspec
,
7328 return -TARGET_EFAULT
;
7335 #if defined(TARGET_NR_timer_settime64) || \
7336 (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7337 static inline abi_long
target_to_host_itimerspec64(struct itimerspec
*host_its
,
7338 abi_ulong target_addr
)
7340 if (target_to_host_timespec64(&host_its
->it_interval
, target_addr
+
7341 offsetof(struct target__kernel_itimerspec
,
7343 target_to_host_timespec64(&host_its
->it_value
, target_addr
+
7344 offsetof(struct target__kernel_itimerspec
,
7346 return -TARGET_EFAULT
;
7353 #if ((defined(TARGET_NR_timerfd_gettime) || \
7354 defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7355 defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7356 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7357 struct itimerspec
*host_its
)
7359 if (host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7361 &host_its
->it_interval
) ||
7362 host_to_target_timespec(target_addr
+ offsetof(struct target_itimerspec
,
7364 &host_its
->it_value
)) {
7365 return -TARGET_EFAULT
;
7371 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7372 defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7373 defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7374 static inline abi_long
host_to_target_itimerspec64(abi_ulong target_addr
,
7375 struct itimerspec
*host_its
)
7377 if (host_to_target_timespec64(target_addr
+
7378 offsetof(struct target__kernel_itimerspec
,
7380 &host_its
->it_interval
) ||
7381 host_to_target_timespec64(target_addr
+
7382 offsetof(struct target__kernel_itimerspec
,
7384 &host_its
->it_value
)) {
7385 return -TARGET_EFAULT
;
7391 #if defined(TARGET_NR_adjtimex) || \
7392 (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7393 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7394 abi_long target_addr
)
7396 struct target_timex
*target_tx
;
7398 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7399 return -TARGET_EFAULT
;
7402 __get_user(host_tx
->modes
, &target_tx
->modes
);
7403 __get_user(host_tx
->offset
, &target_tx
->offset
);
7404 __get_user(host_tx
->freq
, &target_tx
->freq
);
7405 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7406 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7407 __get_user(host_tx
->status
, &target_tx
->status
);
7408 __get_user(host_tx
->constant
, &target_tx
->constant
);
7409 __get_user(host_tx
->precision
, &target_tx
->precision
);
7410 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7411 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7412 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7413 __get_user(host_tx
->tick
, &target_tx
->tick
);
7414 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7415 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7416 __get_user(host_tx
->shift
, &target_tx
->shift
);
7417 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7418 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7419 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7420 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7421 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7422 __get_user(host_tx
->tai
, &target_tx
->tai
);
7424 unlock_user_struct(target_tx
, target_addr
, 0);
7428 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7429 struct timex
*host_tx
)
7431 struct target_timex
*target_tx
;
7433 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7434 return -TARGET_EFAULT
;
7437 __put_user(host_tx
->modes
, &target_tx
->modes
);
7438 __put_user(host_tx
->offset
, &target_tx
->offset
);
7439 __put_user(host_tx
->freq
, &target_tx
->freq
);
7440 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7441 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7442 __put_user(host_tx
->status
, &target_tx
->status
);
7443 __put_user(host_tx
->constant
, &target_tx
->constant
);
7444 __put_user(host_tx
->precision
, &target_tx
->precision
);
7445 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7446 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7447 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7448 __put_user(host_tx
->tick
, &target_tx
->tick
);
7449 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7450 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7451 __put_user(host_tx
->shift
, &target_tx
->shift
);
7452 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7453 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7454 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7455 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7456 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7457 __put_user(host_tx
->tai
, &target_tx
->tai
);
7459 unlock_user_struct(target_tx
, target_addr
, 1);
7465 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7466 static inline abi_long
target_to_host_timex64(struct timex
*host_tx
,
7467 abi_long target_addr
)
7469 struct target__kernel_timex
*target_tx
;
7471 if (copy_from_user_timeval64(&host_tx
->time
, target_addr
+
7472 offsetof(struct target__kernel_timex
,
7474 return -TARGET_EFAULT
;
7477 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7478 return -TARGET_EFAULT
;
7481 __get_user(host_tx
->modes
, &target_tx
->modes
);
7482 __get_user(host_tx
->offset
, &target_tx
->offset
);
7483 __get_user(host_tx
->freq
, &target_tx
->freq
);
7484 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7485 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7486 __get_user(host_tx
->status
, &target_tx
->status
);
7487 __get_user(host_tx
->constant
, &target_tx
->constant
);
7488 __get_user(host_tx
->precision
, &target_tx
->precision
);
7489 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7490 __get_user(host_tx
->tick
, &target_tx
->tick
);
7491 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7492 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7493 __get_user(host_tx
->shift
, &target_tx
->shift
);
7494 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7495 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7496 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7497 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7498 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7499 __get_user(host_tx
->tai
, &target_tx
->tai
);
7501 unlock_user_struct(target_tx
, target_addr
, 0);
7505 static inline abi_long
host_to_target_timex64(abi_long target_addr
,
7506 struct timex
*host_tx
)
7508 struct target__kernel_timex
*target_tx
;
7510 if (copy_to_user_timeval64(target_addr
+
7511 offsetof(struct target__kernel_timex
, time
),
7513 return -TARGET_EFAULT
;
7516 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7517 return -TARGET_EFAULT
;
7520 __put_user(host_tx
->modes
, &target_tx
->modes
);
7521 __put_user(host_tx
->offset
, &target_tx
->offset
);
7522 __put_user(host_tx
->freq
, &target_tx
->freq
);
7523 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7524 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7525 __put_user(host_tx
->status
, &target_tx
->status
);
7526 __put_user(host_tx
->constant
, &target_tx
->constant
);
7527 __put_user(host_tx
->precision
, &target_tx
->precision
);
7528 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7529 __put_user(host_tx
->tick
, &target_tx
->tick
);
7530 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7531 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7532 __put_user(host_tx
->shift
, &target_tx
->shift
);
7533 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7534 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7535 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7536 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7537 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7538 __put_user(host_tx
->tai
, &target_tx
->tai
);
7540 unlock_user_struct(target_tx
, target_addr
, 1);
7545 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7546 #define sigev_notify_thread_id _sigev_un._tid
7549 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7550 abi_ulong target_addr
)
7552 struct target_sigevent
*target_sevp
;
7554 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7555 return -TARGET_EFAULT
;
7558 /* This union is awkward on 64 bit systems because it has a 32 bit
7559 * integer and a pointer in it; we follow the conversion approach
7560 * used for handling sigval types in signal.c so the guest should get
7561 * the correct value back even if we did a 64 bit byteswap and it's
7562 * using the 32 bit integer.
7564 host_sevp
->sigev_value
.sival_ptr
=
7565 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7566 host_sevp
->sigev_signo
=
7567 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7568 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7569 host_sevp
->sigev_notify_thread_id
= tswap32(target_sevp
->_sigev_un
._tid
);
7571 unlock_user_struct(target_sevp
, target_addr
, 1);
7575 #if defined(TARGET_NR_mlockall)
7576 static inline int target_to_host_mlockall_arg(int arg
)
7580 if (arg
& TARGET_MCL_CURRENT
) {
7581 result
|= MCL_CURRENT
;
7583 if (arg
& TARGET_MCL_FUTURE
) {
7584 result
|= MCL_FUTURE
;
7587 if (arg
& TARGET_MCL_ONFAULT
) {
7588 result
|= MCL_ONFAULT
;
7596 static inline int target_to_host_msync_arg(abi_long arg
)
7598 return ((arg
& TARGET_MS_ASYNC
) ? MS_ASYNC
: 0) |
7599 ((arg
& TARGET_MS_INVALIDATE
) ? MS_INVALIDATE
: 0) |
7600 ((arg
& TARGET_MS_SYNC
) ? MS_SYNC
: 0) |
7601 (arg
& ~(TARGET_MS_ASYNC
| TARGET_MS_INVALIDATE
| TARGET_MS_SYNC
));
7604 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7605 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7606 defined(TARGET_NR_newfstatat))
7607 static inline abi_long
host_to_target_stat64(CPUArchState
*cpu_env
,
7608 abi_ulong target_addr
,
7609 struct stat
*host_st
)
7611 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7612 if (cpu_env
->eabi
) {
7613 struct target_eabi_stat64
*target_st
;
7615 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7616 return -TARGET_EFAULT
;
7617 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7618 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7619 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7620 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7621 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7623 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7624 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7625 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7626 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7627 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7628 __put_user(host_st
->st_size
, &target_st
->st_size
);
7629 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7630 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7631 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7632 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7633 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7634 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7635 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7636 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7637 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7639 unlock_user_struct(target_st
, target_addr
, 1);
7643 #if defined(TARGET_HAS_STRUCT_STAT64)
7644 struct target_stat64
*target_st
;
7646 struct target_stat
*target_st
;
7649 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7650 return -TARGET_EFAULT
;
7651 memset(target_st
, 0, sizeof(*target_st
));
7652 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7653 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7654 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7655 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7657 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7658 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7659 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7660 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7661 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7662 /* XXX: better use of kernel struct */
7663 __put_user(host_st
->st_size
, &target_st
->st_size
);
7664 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7665 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7666 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7667 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7668 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7669 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7670 __put_user(host_st
->st_atim
.tv_nsec
, &target_st
->target_st_atime_nsec
);
7671 __put_user(host_st
->st_mtim
.tv_nsec
, &target_st
->target_st_mtime_nsec
);
7672 __put_user(host_st
->st_ctim
.tv_nsec
, &target_st
->target_st_ctime_nsec
);
7674 unlock_user_struct(target_st
, target_addr
, 1);
7681 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7682 static inline abi_long
host_to_target_statx(struct target_statx
*host_stx
,
7683 abi_ulong target_addr
)
7685 struct target_statx
*target_stx
;
7687 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, target_addr
, 0)) {
7688 return -TARGET_EFAULT
;
7690 memset(target_stx
, 0, sizeof(*target_stx
));
7692 __put_user(host_stx
->stx_mask
, &target_stx
->stx_mask
);
7693 __put_user(host_stx
->stx_blksize
, &target_stx
->stx_blksize
);
7694 __put_user(host_stx
->stx_attributes
, &target_stx
->stx_attributes
);
7695 __put_user(host_stx
->stx_nlink
, &target_stx
->stx_nlink
);
7696 __put_user(host_stx
->stx_uid
, &target_stx
->stx_uid
);
7697 __put_user(host_stx
->stx_gid
, &target_stx
->stx_gid
);
7698 __put_user(host_stx
->stx_mode
, &target_stx
->stx_mode
);
7699 __put_user(host_stx
->stx_ino
, &target_stx
->stx_ino
);
7700 __put_user(host_stx
->stx_size
, &target_stx
->stx_size
);
7701 __put_user(host_stx
->stx_blocks
, &target_stx
->stx_blocks
);
7702 __put_user(host_stx
->stx_attributes_mask
, &target_stx
->stx_attributes_mask
);
7703 __put_user(host_stx
->stx_atime
.tv_sec
, &target_stx
->stx_atime
.tv_sec
);
7704 __put_user(host_stx
->stx_atime
.tv_nsec
, &target_stx
->stx_atime
.tv_nsec
);
7705 __put_user(host_stx
->stx_btime
.tv_sec
, &target_stx
->stx_btime
.tv_sec
);
7706 __put_user(host_stx
->stx_btime
.tv_nsec
, &target_stx
->stx_btime
.tv_nsec
);
7707 __put_user(host_stx
->stx_ctime
.tv_sec
, &target_stx
->stx_ctime
.tv_sec
);
7708 __put_user(host_stx
->stx_ctime
.tv_nsec
, &target_stx
->stx_ctime
.tv_nsec
);
7709 __put_user(host_stx
->stx_mtime
.tv_sec
, &target_stx
->stx_mtime
.tv_sec
);
7710 __put_user(host_stx
->stx_mtime
.tv_nsec
, &target_stx
->stx_mtime
.tv_nsec
);
7711 __put_user(host_stx
->stx_rdev_major
, &target_stx
->stx_rdev_major
);
7712 __put_user(host_stx
->stx_rdev_minor
, &target_stx
->stx_rdev_minor
);
7713 __put_user(host_stx
->stx_dev_major
, &target_stx
->stx_dev_major
);
7714 __put_user(host_stx
->stx_dev_minor
, &target_stx
->stx_dev_minor
);
7716 unlock_user_struct(target_stx
, target_addr
, 1);
7722 static int do_sys_futex(int *uaddr
, int op
, int val
,
7723 const struct timespec
*timeout
, int *uaddr2
,
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728 /* always a 64-bit time_t, it doesn't define _time64 version */
7729 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7732 #else /* HOST_LONG_BITS == 64 */
7733 #if defined(__NR_futex_time64)
7734 if (sizeof(timeout
->tv_sec
) == 8) {
7735 /* _time64 function on 32bit arch */
7736 return sys_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7739 #if defined(__NR_futex)
7740 /* old function on 32bit arch */
7741 return sys_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
);
7743 #endif /* HOST_LONG_BITS == 64 */
7744 g_assert_not_reached();
7747 static int do_safe_futex(int *uaddr
, int op
, int val
,
7748 const struct timespec
*timeout
, int *uaddr2
,
7751 #if HOST_LONG_BITS == 64
7752 #if defined(__NR_futex)
7753 /* always a 64-bit time_t, it doesn't define _time64 version */
7754 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7756 #else /* HOST_LONG_BITS == 64 */
7757 #if defined(__NR_futex_time64)
7758 if (sizeof(timeout
->tv_sec
) == 8) {
7759 /* _time64 function on 32bit arch */
7760 return get_errno(safe_futex_time64(uaddr
, op
, val
, timeout
, uaddr2
,
7764 #if defined(__NR_futex)
7765 /* old function on 32bit arch */
7766 return get_errno(safe_futex(uaddr
, op
, val
, timeout
, uaddr2
, val3
));
7768 #endif /* HOST_LONG_BITS == 64 */
7769 return -TARGET_ENOSYS
;
7772 /* ??? Using host futex calls even when target atomic operations
7773 are not really atomic probably breaks things. However implementing
7774 futexes locally would make futexes shared between multiple processes
7775 tricky. However they're probably useless because guest atomic
7776 operations won't work either. */
7777 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7778 static int do_futex(CPUState
*cpu
, bool time64
, target_ulong uaddr
,
7779 int op
, int val
, target_ulong timeout
,
7780 target_ulong uaddr2
, int val3
)
7782 struct timespec ts
, *pts
= NULL
;
7783 void *haddr2
= NULL
;
7786 /* We assume FUTEX_* constants are the same on both host and target. */
7787 #ifdef FUTEX_CMD_MASK
7788 base_op
= op
& FUTEX_CMD_MASK
;
7794 case FUTEX_WAIT_BITSET
:
7797 case FUTEX_WAIT_REQUEUE_PI
:
7799 haddr2
= g2h(cpu
, uaddr2
);
7802 case FUTEX_LOCK_PI2
:
7805 case FUTEX_WAKE_BITSET
:
7806 case FUTEX_TRYLOCK_PI
:
7807 case FUTEX_UNLOCK_PI
:
7811 val
= target_to_host_signal(val
);
7814 case FUTEX_CMP_REQUEUE
:
7815 case FUTEX_CMP_REQUEUE_PI
:
7816 val3
= tswap32(val3
);
7821 * For these, the 4th argument is not TIMEOUT, but VAL2.
7822 * But the prototype of do_safe_futex takes a pointer, so
7823 * insert casts to satisfy the compiler. We do not need
7824 * to tswap VAL2 since it's not compared to guest memory.
7826 pts
= (struct timespec
*)(uintptr_t)timeout
;
7828 haddr2
= g2h(cpu
, uaddr2
);
7831 return -TARGET_ENOSYS
;
7836 ? target_to_host_timespec64(pts
, timeout
)
7837 : target_to_host_timespec(pts
, timeout
)) {
7838 return -TARGET_EFAULT
;
7841 return do_safe_futex(g2h(cpu
, uaddr
), op
, val
, pts
, haddr2
, val3
);
7845 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7846 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7847 abi_long handle
, abi_long mount_id
,
7850 struct file_handle
*target_fh
;
7851 struct file_handle
*fh
;
7855 unsigned int size
, total_size
;
7857 if (get_user_s32(size
, handle
)) {
7858 return -TARGET_EFAULT
;
7861 name
= lock_user_string(pathname
);
7863 return -TARGET_EFAULT
;
7866 total_size
= sizeof(struct file_handle
) + size
;
7867 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7869 unlock_user(name
, pathname
, 0);
7870 return -TARGET_EFAULT
;
7873 fh
= g_malloc0(total_size
);
7874 fh
->handle_bytes
= size
;
7876 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7877 unlock_user(name
, pathname
, 0);
7879 /* man name_to_handle_at(2):
7880 * Other than the use of the handle_bytes field, the caller should treat
7881 * the file_handle structure as an opaque data type
7884 memcpy(target_fh
, fh
, total_size
);
7885 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7886 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7888 unlock_user(target_fh
, handle
, total_size
);
7890 if (put_user_s32(mid
, mount_id
)) {
7891 return -TARGET_EFAULT
;
7899 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7900 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7903 struct file_handle
*target_fh
;
7904 struct file_handle
*fh
;
7905 unsigned int size
, total_size
;
7908 if (get_user_s32(size
, handle
)) {
7909 return -TARGET_EFAULT
;
7912 total_size
= sizeof(struct file_handle
) + size
;
7913 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7915 return -TARGET_EFAULT
;
7918 fh
= g_memdup(target_fh
, total_size
);
7919 fh
->handle_bytes
= size
;
7920 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7922 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7923 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7927 unlock_user(target_fh
, handle
, total_size
);
7933 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7935 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7938 target_sigset_t
*target_mask
;
7942 if (flags
& ~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
)) {
7943 return -TARGET_EINVAL
;
7945 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7946 return -TARGET_EFAULT
;
7949 target_to_host_sigset(&host_mask
, target_mask
);
7951 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7953 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7955 fd_trans_register(ret
, &target_signalfd_trans
);
7958 unlock_user_struct(target_mask
, mask
, 0);
7964 /* Map host to target signal numbers for the wait family of syscalls.
7965 Assume all other status bits are the same. */
7966 int host_to_target_waitstatus(int status
)
7968 if (WIFSIGNALED(status
)) {
7969 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7971 if (WIFSTOPPED(status
)) {
7972 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7978 static int open_self_cmdline(CPUArchState
*cpu_env
, int fd
)
7980 CPUState
*cpu
= env_cpu(cpu_env
);
7981 struct linux_binprm
*bprm
= get_task_state(cpu
)->bprm
;
7984 for (i
= 0; i
< bprm
->argc
; i
++) {
7985 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7987 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7995 struct open_self_maps_data
{
7997 IntervalTreeRoot
*host_maps
;
8003 * Subroutine to output one line of /proc/self/maps,
8004 * or one region of /proc/self/smaps.
8008 # define test_stack(S, E, L) (E == L)
8010 # define test_stack(S, E, L) (S == L)
8013 static void open_self_maps_4(const struct open_self_maps_data
*d
,
8014 const MapInfo
*mi
, abi_ptr start
,
8015 abi_ptr end
, unsigned flags
)
8017 const struct image_info
*info
= d
->ts
->info
;
8018 const char *path
= mi
->path
;
8023 if (test_stack(start
, end
, info
->stack_limit
)) {
8025 } else if (start
== info
->brk
) {
8027 } else if (start
== info
->vdso
) {
8029 #ifdef TARGET_X86_64
8030 } else if (start
== TARGET_VSYSCALL_PAGE
) {
8031 path
= "[vsyscall]";
8035 /* Except null device (MAP_ANON), adjust offset for this fragment. */
8036 offset
= mi
->offset
;
8038 uintptr_t hstart
= (uintptr_t)g2h_untagged(start
);
8039 offset
+= hstart
- mi
->itree
.start
;
8042 count
= dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
8043 " %c%c%c%c %08" PRIx64
" %02x:%02x %"PRId64
,
8045 (flags
& PAGE_READ
) ? 'r' : '-',
8046 (flags
& PAGE_WRITE_ORG
) ? 'w' : '-',
8047 (flags
& PAGE_EXEC
) ? 'x' : '-',
8048 mi
->is_priv
? 'p' : 's',
8049 offset
, major(mi
->dev
), minor(mi
->dev
),
8050 (uint64_t)mi
->inode
);
8052 dprintf(fd
, "%*s%s\n", 73 - count
, "", path
);
8058 unsigned long size
= end
- start
;
8059 unsigned long page_size_kb
= TARGET_PAGE_SIZE
>> 10;
8060 unsigned long size_kb
= size
>> 10;
8062 dprintf(fd
, "Size: %lu kB\n"
8063 "KernelPageSize: %lu kB\n"
8064 "MMUPageSize: %lu kB\n"
8068 "Shared_Clean: 0 kB\n"
8069 "Shared_Dirty: 0 kB\n"
8070 "Private_Clean: 0 kB\n"
8071 "Private_Dirty: 0 kB\n"
8072 "Referenced: 0 kB\n"
8073 "Anonymous: %lu kB\n"
8075 "AnonHugePages: 0 kB\n"
8076 "ShmemPmdMapped: 0 kB\n"
8077 "FilePmdMapped: 0 kB\n"
8078 "Shared_Hugetlb: 0 kB\n"
8079 "Private_Hugetlb: 0 kB\n"
8084 "VmFlags:%s%s%s%s%s%s%s%s\n",
8085 size_kb
, page_size_kb
, page_size_kb
,
8086 (flags
& PAGE_ANON
? size_kb
: 0),
8087 (flags
& PAGE_READ
) ? " rd" : "",
8088 (flags
& PAGE_WRITE_ORG
) ? " wr" : "",
8089 (flags
& PAGE_EXEC
) ? " ex" : "",
8090 mi
->is_priv
? "" : " sh",
8091 (flags
& PAGE_READ
) ? " mr" : "",
8092 (flags
& PAGE_WRITE_ORG
) ? " mw" : "",
8093 (flags
& PAGE_EXEC
) ? " me" : "",
8094 mi
->is_priv
? "" : " ms");
8099 * Callback for walk_memory_regions, when read_self_maps() fails.
8100 * Proceed without the benefit of host /proc/self/maps cross-check.
8102 static int open_self_maps_3(void *opaque
, target_ulong guest_start
,
8103 target_ulong guest_end
, unsigned long flags
)
8105 static const MapInfo mi
= { .is_priv
= true };
8107 open_self_maps_4(opaque
, &mi
, guest_start
, guest_end
, flags
);
8112 * Callback for walk_memory_regions, when read_self_maps() succeeds.
8114 static int open_self_maps_2(void *opaque
, target_ulong guest_start
,
8115 target_ulong guest_end
, unsigned long flags
)
8117 const struct open_self_maps_data
*d
= opaque
;
8118 uintptr_t host_start
= (uintptr_t)g2h_untagged(guest_start
);
8119 uintptr_t host_last
= (uintptr_t)g2h_untagged(guest_end
- 1);
8121 #ifdef TARGET_X86_64
8123 * Because of the extremely high position of the page within the guest
8124 * virtual address space, this is not backed by host memory at all.
8125 * Therefore the loop below would fail. This is the only instance
8126 * of not having host backing memory.
8128 if (guest_start
== TARGET_VSYSCALL_PAGE
) {
8129 return open_self_maps_3(opaque
, guest_start
, guest_end
, flags
);
8134 IntervalTreeNode
*n
=
8135 interval_tree_iter_first(d
->host_maps
, host_start
, host_start
);
8136 MapInfo
*mi
= container_of(n
, MapInfo
, itree
);
8137 uintptr_t this_hlast
= MIN(host_last
, n
->last
);
8138 target_ulong this_gend
= h2g(this_hlast
) + 1;
8140 open_self_maps_4(d
, mi
, guest_start
, this_gend
, flags
);
8142 if (this_hlast
== host_last
) {
8145 host_start
= this_hlast
+ 1;
8146 guest_start
= h2g(host_start
);
8150 static int open_self_maps_1(CPUArchState
*env
, int fd
, bool smaps
)
8152 struct open_self_maps_data d
= {
8153 .ts
= get_task_state(env_cpu(env
)),
8154 .host_maps
= read_self_maps(),
8160 walk_memory_regions(&d
, open_self_maps_2
);
8161 free_self_maps(d
.host_maps
);
8163 walk_memory_regions(&d
, open_self_maps_3
);
8168 static int open_self_maps(CPUArchState
*cpu_env
, int fd
)
8170 return open_self_maps_1(cpu_env
, fd
, false);
8173 static int open_self_smaps(CPUArchState
*cpu_env
, int fd
)
8175 return open_self_maps_1(cpu_env
, fd
, true);
8178 static int open_self_stat(CPUArchState
*cpu_env
, int fd
)
8180 CPUState
*cpu
= env_cpu(cpu_env
);
8181 TaskState
*ts
= get_task_state(cpu
);
8182 g_autoptr(GString
) buf
= g_string_new(NULL
);
8185 for (i
= 0; i
< 44; i
++) {
8188 g_string_printf(buf
, FMT_pid
" ", getpid());
8189 } else if (i
== 1) {
8191 gchar
*bin
= g_strrstr(ts
->bprm
->argv
[0], "/");
8192 bin
= bin
? bin
+ 1 : ts
->bprm
->argv
[0];
8193 g_string_printf(buf
, "(%.15s) ", bin
);
8194 } else if (i
== 2) {
8196 g_string_assign(buf
, "R "); /* we are running right now */
8197 } else if (i
== 3) {
8199 g_string_printf(buf
, FMT_pid
" ", getppid());
8200 } else if (i
== 19) {
8203 WITH_RCU_READ_LOCK_GUARD() {
8205 CPU_FOREACH(cpu_iter
) {
8209 g_string_printf(buf
, "%d ", cpus
);
8210 } else if (i
== 21) {
8212 g_string_printf(buf
, "%" PRIu64
" ", ts
->start_boottime
);
8213 } else if (i
== 27) {
8215 g_string_printf(buf
, TARGET_ABI_FMT_ld
" ", ts
->info
->start_stack
);
8217 /* for the rest, there is MasterCard */
8218 g_string_printf(buf
, "0%c", i
== 43 ? '\n' : ' ');
8221 if (write(fd
, buf
->str
, buf
->len
) != buf
->len
) {
8229 static int open_self_auxv(CPUArchState
*cpu_env
, int fd
)
8231 CPUState
*cpu
= env_cpu(cpu_env
);
8232 TaskState
*ts
= get_task_state(cpu
);
8233 abi_ulong auxv
= ts
->info
->saved_auxv
;
8234 abi_ulong len
= ts
->info
->auxv_len
;
8238 * Auxiliary vector is stored in target process stack.
8239 * read in whole auxv vector and copy it to file
8241 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
8245 r
= write(fd
, ptr
, len
);
8252 lseek(fd
, 0, SEEK_SET
);
8253 unlock_user(ptr
, auxv
, len
);
8259 static int is_proc_myself(const char *filename
, const char *entry
)
8261 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
8262 filename
+= strlen("/proc/");
8263 if (!strncmp(filename
, "self/", strlen("self/"))) {
8264 filename
+= strlen("self/");
8265 } else if (*filename
>= '1' && *filename
<= '9') {
8267 snprintf(myself
, sizeof(myself
), "%d/", getpid());
8268 if (!strncmp(filename
, myself
, strlen(myself
))) {
8269 filename
+= strlen(myself
);
8276 if (!strcmp(filename
, entry
)) {
8283 static void excp_dump_file(FILE *logfile
, CPUArchState
*env
,
8284 const char *fmt
, int code
)
8287 CPUState
*cs
= env_cpu(env
);
8289 fprintf(logfile
, fmt
, code
);
8290 fprintf(logfile
, "Failing executable: %s\n", exec_path
);
8291 cpu_dump_state(cs
, logfile
, 0);
8292 open_self_maps(env
, fileno(logfile
));
8296 void target_exception_dump(CPUArchState
*env
, const char *fmt
, int code
)
8298 /* dump to console */
8299 excp_dump_file(stderr
, env
, fmt
, code
);
8301 /* dump to log file */
8302 if (qemu_log_separate()) {
8303 FILE *logfile
= qemu_log_trylock();
8305 excp_dump_file(logfile
, env
, fmt
, code
);
8306 qemu_log_unlock(logfile
);
8310 #include "target_proc.h"
8312 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8313 defined(HAVE_ARCH_PROC_CPUINFO) || \
8314 defined(HAVE_ARCH_PROC_HARDWARE)
8315 static int is_proc(const char *filename
, const char *entry
)
8317 return strcmp(filename
, entry
) == 0;
8321 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8322 static int open_net_route(CPUArchState
*cpu_env
, int fd
)
8329 fp
= fopen("/proc/net/route", "r");
8336 read
= getline(&line
, &len
, fp
);
8337 dprintf(fd
, "%s", line
);
8341 while ((read
= getline(&line
, &len
, fp
)) != -1) {
8343 uint32_t dest
, gw
, mask
;
8344 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
8347 fields
= sscanf(line
,
8348 "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8349 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
8350 &mask
, &mtu
, &window
, &irtt
);
8354 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8355 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
8356 metric
, tswap32(mask
), mtu
, window
, irtt
);
8366 static int maybe_do_fake_open(CPUArchState
*cpu_env
, int dirfd
,
8367 const char *fname
, int flags
, mode_t mode
,
8368 int openat2_resolve
, bool safe
)
8370 g_autofree
char *proc_name
= NULL
;
8371 const char *pathname
;
8373 const char *filename
;
8374 int (*fill
)(CPUArchState
*cpu_env
, int fd
);
8375 int (*cmp
)(const char *s1
, const char *s2
);
8377 const struct fake_open
*fake_open
;
8378 static const struct fake_open fakes
[] = {
8379 { "maps", open_self_maps
, is_proc_myself
},
8380 { "smaps", open_self_smaps
, is_proc_myself
},
8381 { "stat", open_self_stat
, is_proc_myself
},
8382 { "auxv", open_self_auxv
, is_proc_myself
},
8383 { "cmdline", open_self_cmdline
, is_proc_myself
},
8384 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8385 { "/proc/net/route", open_net_route
, is_proc
},
8387 #if defined(HAVE_ARCH_PROC_CPUINFO)
8388 { "/proc/cpuinfo", open_cpuinfo
, is_proc
},
8390 #if defined(HAVE_ARCH_PROC_HARDWARE)
8391 { "/proc/hardware", open_hardware
, is_proc
},
8393 { NULL
, NULL
, NULL
}
8396 /* if this is a file from /proc/ filesystem, expand full name */
8397 proc_name
= realpath(fname
, NULL
);
8398 if (proc_name
&& strncmp(proc_name
, "/proc/", 6) == 0) {
8399 pathname
= proc_name
;
8404 if (is_proc_myself(pathname
, "exe")) {
8405 /* Honor openat2 resolve flags */
8406 if ((openat2_resolve
& RESOLVE_NO_MAGICLINKS
) ||
8407 (openat2_resolve
& RESOLVE_NO_SYMLINKS
)) {
8412 return safe_openat(dirfd
, exec_path
, flags
, mode
);
8414 return openat(dirfd
, exec_path
, flags
, mode
);
8418 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
8419 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
8424 if (fake_open
->filename
) {
8426 char filename
[PATH_MAX
];
8429 fd
= memfd_create("qemu-open", 0);
8431 if (errno
!= ENOSYS
) {
8434 /* create temporary file to map stat to */
8435 tmpdir
= getenv("TMPDIR");
8438 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
8439 fd
= mkstemp(filename
);
8446 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
8452 lseek(fd
, 0, SEEK_SET
);
8460 int do_guest_openat(CPUArchState
*cpu_env
, int dirfd
, const char *pathname
,
8461 int flags
, mode_t mode
, bool safe
)
8463 int fd
= maybe_do_fake_open(cpu_env
, dirfd
, pathname
, flags
, mode
, 0, safe
);
8469 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
8471 return openat(dirfd
, path(pathname
), flags
, mode
);
8476 static int do_openat2(CPUArchState
*cpu_env
, abi_long dirfd
,
8477 abi_ptr guest_pathname
, abi_ptr guest_open_how
,
8478 abi_ulong guest_size
)
8480 struct open_how_ver0 how
= {0};
8484 if (guest_size
< sizeof(struct target_open_how_ver0
)) {
8485 return -TARGET_EINVAL
;
8487 ret
= copy_struct_from_user(&how
, sizeof(how
), guest_open_how
, guest_size
);
8489 if (ret
== -TARGET_E2BIG
) {
8490 qemu_log_mask(LOG_UNIMP
,
8491 "Unimplemented openat2 open_how size: "
8492 TARGET_ABI_FMT_lu
"\n", guest_size
);
8496 pathname
= lock_user_string(guest_pathname
);
8498 return -TARGET_EFAULT
;
8501 how
.flags
= target_to_host_bitmask(tswap64(how
.flags
), fcntl_flags_tbl
);
8502 how
.mode
= tswap64(how
.mode
);
8503 how
.resolve
= tswap64(how
.resolve
);
8504 int fd
= maybe_do_fake_open(cpu_env
, dirfd
, pathname
, how
.flags
, how
.mode
,
8507 ret
= get_errno(fd
);
8509 ret
= get_errno(safe_openat2(dirfd
, pathname
, &how
,
8510 sizeof(struct open_how_ver0
)));
8513 fd_trans_unregister(ret
);
8514 unlock_user(pathname
, guest_pathname
, 0);
8518 ssize_t
do_guest_readlink(const char *pathname
, char *buf
, size_t bufsiz
)
8522 if (!pathname
|| !buf
) {
8528 /* Short circuit this for the magic exe check. */
8533 if (is_proc_myself((const char *)pathname
, "exe")) {
8535 * Don't worry about sign mismatch as earlier mapping
8536 * logic would have thrown a bad address error.
8538 ret
= MIN(strlen(exec_path
), bufsiz
);
8539 /* We cannot NUL terminate the string. */
8540 memcpy(buf
, exec_path
, ret
);
8542 ret
= readlink(path(pathname
), buf
, bufsiz
);
8548 static int do_execv(CPUArchState
*cpu_env
, int dirfd
,
8549 abi_long pathname
, abi_long guest_argp
,
8550 abi_long guest_envp
, int flags
, bool is_execveat
)
8553 char **argp
, **envp
;
8562 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8563 if (get_user_ual(addr
, gp
)) {
8564 return -TARGET_EFAULT
;
8572 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8573 if (get_user_ual(addr
, gp
)) {
8574 return -TARGET_EFAULT
;
8582 argp
= g_new0(char *, argc
+ 1);
8583 envp
= g_new0(char *, envc
+ 1);
8585 for (gp
= guest_argp
, q
= argp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8586 if (get_user_ual(addr
, gp
)) {
8592 *q
= lock_user_string(addr
);
8599 for (gp
= guest_envp
, q
= envp
; gp
; gp
+= sizeof(abi_ulong
), q
++) {
8600 if (get_user_ual(addr
, gp
)) {
8606 *q
= lock_user_string(addr
);
8614 * Although execve() is not an interruptible syscall it is
8615 * a special case where we must use the safe_syscall wrapper:
8616 * if we allow a signal to happen before we make the host
8617 * syscall then we will 'lose' it, because at the point of
8618 * execve the process leaves QEMU's control. So we use the
8619 * safe syscall wrapper to ensure that we either take the
8620 * signal as a guest signal, or else it does not happen
8621 * before the execve completes and makes it the other
8622 * program's problem.
8624 p
= lock_user_string(pathname
);
8629 const char *exe
= p
;
8630 if (is_proc_myself(p
, "exe")) {
8634 ? safe_execveat(dirfd
, exe
, argp
, envp
, flags
)
8635 : safe_execve(exe
, argp
, envp
);
8636 ret
= get_errno(ret
);
8638 unlock_user(p
, pathname
, 0);
8643 ret
= -TARGET_EFAULT
;
8646 for (gp
= guest_argp
, q
= argp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8647 if (get_user_ual(addr
, gp
) || !addr
) {
8650 unlock_user(*q
, addr
, 0);
8652 for (gp
= guest_envp
, q
= envp
; *q
; gp
+= sizeof(abi_ulong
), q
++) {
8653 if (get_user_ual(addr
, gp
) || !addr
) {
8656 unlock_user(*q
, addr
, 0);
8664 #define TIMER_MAGIC 0x0caf0000
8665 #define TIMER_MAGIC_MASK 0xffff0000
8667 /* Convert QEMU provided timer ID back to internal 16bit index format */
8668 static target_timer_t
get_timer_id(abi_long arg
)
8670 target_timer_t timerid
= arg
;
8672 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
8673 return -TARGET_EINVAL
;
8678 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
8679 return -TARGET_EINVAL
;
8685 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8687 abi_ulong target_addr
,
8690 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8691 unsigned host_bits
= sizeof(*host_mask
) * 8;
8692 abi_ulong
*target_mask
;
8695 assert(host_size
>= target_size
);
8697 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8699 return -TARGET_EFAULT
;
8701 memset(host_mask
, 0, host_size
);
8703 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8704 unsigned bit
= i
* target_bits
;
8707 __get_user(val
, &target_mask
[i
]);
8708 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8709 if (val
& (1UL << j
)) {
8710 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8715 unlock_user(target_mask
, target_addr
, 0);
8719 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8721 abi_ulong target_addr
,
8724 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8725 unsigned host_bits
= sizeof(*host_mask
) * 8;
8726 abi_ulong
*target_mask
;
8729 assert(host_size
>= target_size
);
8731 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8733 return -TARGET_EFAULT
;
8736 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8737 unsigned bit
= i
* target_bits
;
8740 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8741 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8745 __put_user(val
, &target_mask
[i
]);
8748 unlock_user(target_mask
, target_addr
, target_size
);
8752 #ifdef TARGET_NR_getdents
8753 static int do_getdents(abi_long dirfd
, abi_long arg2
, abi_long count
)
8755 g_autofree
void *hdirp
= NULL
;
8757 int hlen
, hoff
, toff
;
8758 int hreclen
, treclen
;
8759 off_t prev_diroff
= 0;
8761 hdirp
= g_try_malloc(count
);
8763 return -TARGET_ENOMEM
;
8766 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8767 hlen
= sys_getdents(dirfd
, hdirp
, count
);
8769 hlen
= sys_getdents64(dirfd
, hdirp
, count
);
8772 hlen
= get_errno(hlen
);
8773 if (is_error(hlen
)) {
8777 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8779 return -TARGET_EFAULT
;
8782 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8783 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8784 struct linux_dirent
*hde
= hdirp
+ hoff
;
8786 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8788 struct target_dirent
*tde
= tdirp
+ toff
;
8792 namelen
= strlen(hde
->d_name
);
8793 hreclen
= hde
->d_reclen
;
8794 treclen
= offsetof(struct target_dirent
, d_name
) + namelen
+ 2;
8795 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent
));
8797 if (toff
+ treclen
> count
) {
8799 * If the host struct is smaller than the target struct, or
8800 * requires less alignment and thus packs into less space,
8801 * then the host can return more entries than we can pass
8805 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8809 * Return what we have, resetting the file pointer to the
8810 * location of the first record not returned.
8812 lseek(dirfd
, prev_diroff
, SEEK_SET
);
8816 prev_diroff
= hde
->d_off
;
8817 tde
->d_ino
= tswapal(hde
->d_ino
);
8818 tde
->d_off
= tswapal(hde
->d_off
);
8819 tde
->d_reclen
= tswap16(treclen
);
8820 memcpy(tde
->d_name
, hde
->d_name
, namelen
+ 1);
8823 * The getdents type is in what was formerly a padding byte at the
8824 * end of the structure.
8826 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8827 type
= *((uint8_t *)hde
+ hreclen
- 1);
8831 *((uint8_t *)tde
+ treclen
- 1) = type
;
8834 unlock_user(tdirp
, arg2
, toff
);
8837 #endif /* TARGET_NR_getdents */
8839 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8840 static int do_getdents64(abi_long dirfd
, abi_long arg2
, abi_long count
)
8842 g_autofree
void *hdirp
= NULL
;
8844 int hlen
, hoff
, toff
;
8845 int hreclen
, treclen
;
8846 off_t prev_diroff
= 0;
8848 hdirp
= g_try_malloc(count
);
8850 return -TARGET_ENOMEM
;
8853 hlen
= get_errno(sys_getdents64(dirfd
, hdirp
, count
));
8854 if (is_error(hlen
)) {
8858 tdirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
8860 return -TARGET_EFAULT
;
8863 for (hoff
= toff
= 0; hoff
< hlen
; hoff
+= hreclen
, toff
+= treclen
) {
8864 struct linux_dirent64
*hde
= hdirp
+ hoff
;
8865 struct target_dirent64
*tde
= tdirp
+ toff
;
8868 namelen
= strlen(hde
->d_name
) + 1;
8869 hreclen
= hde
->d_reclen
;
8870 treclen
= offsetof(struct target_dirent64
, d_name
) + namelen
;
8871 treclen
= QEMU_ALIGN_UP(treclen
, __alignof(struct target_dirent64
));
8873 if (toff
+ treclen
> count
) {
8875 * If the host struct is smaller than the target struct, or
8876 * requires less alignment and thus packs into less space,
8877 * then the host can return more entries than we can pass
8881 toff
= -TARGET_EINVAL
; /* result buffer is too small */
8885 * Return what we have, resetting the file pointer to the
8886 * location of the first record not returned.
8888 lseek(dirfd
, prev_diroff
, SEEK_SET
);
8892 prev_diroff
= hde
->d_off
;
8893 tde
->d_ino
= tswap64(hde
->d_ino
);
8894 tde
->d_off
= tswap64(hde
->d_off
);
8895 tde
->d_reclen
= tswap16(treclen
);
8896 tde
->d_type
= hde
->d_type
;
8897 memcpy(tde
->d_name
, hde
->d_name
, namelen
);
8900 unlock_user(tdirp
, arg2
, toff
);
8903 #endif /* TARGET_NR_getdents64 */
8905 #if defined(TARGET_NR_riscv_hwprobe)
8907 #define RISCV_HWPROBE_KEY_MVENDORID 0
8908 #define RISCV_HWPROBE_KEY_MARCHID 1
8909 #define RISCV_HWPROBE_KEY_MIMPID 2
8911 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8912 #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8914 #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
8915 #define RISCV_HWPROBE_IMA_FD (1 << 0)
8916 #define RISCV_HWPROBE_IMA_C (1 << 1)
8917 #define RISCV_HWPROBE_IMA_V (1 << 2)
8918 #define RISCV_HWPROBE_EXT_ZBA (1 << 3)
8919 #define RISCV_HWPROBE_EXT_ZBB (1 << 4)
8920 #define RISCV_HWPROBE_EXT_ZBS (1 << 5)
8921 #define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
8922 #define RISCV_HWPROBE_EXT_ZBC (1 << 7)
8923 #define RISCV_HWPROBE_EXT_ZBKB (1 << 8)
8924 #define RISCV_HWPROBE_EXT_ZBKC (1 << 9)
8925 #define RISCV_HWPROBE_EXT_ZBKX (1 << 10)
8926 #define RISCV_HWPROBE_EXT_ZKND (1 << 11)
8927 #define RISCV_HWPROBE_EXT_ZKNE (1 << 12)
8928 #define RISCV_HWPROBE_EXT_ZKNH (1 << 13)
8929 #define RISCV_HWPROBE_EXT_ZKSED (1 << 14)
8930 #define RISCV_HWPROBE_EXT_ZKSH (1 << 15)
8931 #define RISCV_HWPROBE_EXT_ZKT (1 << 16)
8932 #define RISCV_HWPROBE_EXT_ZVBB (1 << 17)
8933 #define RISCV_HWPROBE_EXT_ZVBC (1 << 18)
8934 #define RISCV_HWPROBE_EXT_ZVKB (1 << 19)
8935 #define RISCV_HWPROBE_EXT_ZVKG (1 << 20)
8936 #define RISCV_HWPROBE_EXT_ZVKNED (1 << 21)
8937 #define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22)
8938 #define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23)
8939 #define RISCV_HWPROBE_EXT_ZVKSED (1 << 24)
8940 #define RISCV_HWPROBE_EXT_ZVKSH (1 << 25)
8941 #define RISCV_HWPROBE_EXT_ZVKT (1 << 26)
8942 #define RISCV_HWPROBE_EXT_ZFH (1 << 27)
8943 #define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
8944 #define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
8945 #define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
8946 #define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
8947 #define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
8948 #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
8949 #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
8950 #define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
8952 #define RISCV_HWPROBE_KEY_CPUPERF_0 5
8953 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
8954 #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
8955 #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
8956 #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
8957 #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8958 #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
8960 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8962 struct riscv_hwprobe
{
8967 static void risc_hwprobe_fill_pairs(CPURISCVState
*env
,
8968 struct riscv_hwprobe
*pair
,
8971 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg(env
);
8973 for (; pair_count
> 0; pair_count
--, pair
++) {
8976 __put_user(0, &pair
->value
);
8977 __get_user(key
, &pair
->key
);
8979 case RISCV_HWPROBE_KEY_MVENDORID
:
8980 __put_user(cfg
->mvendorid
, &pair
->value
);
8982 case RISCV_HWPROBE_KEY_MARCHID
:
8983 __put_user(cfg
->marchid
, &pair
->value
);
8985 case RISCV_HWPROBE_KEY_MIMPID
:
8986 __put_user(cfg
->mimpid
, &pair
->value
);
8988 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR
:
8989 value
= riscv_has_ext(env
, RVI
) &&
8990 riscv_has_ext(env
, RVM
) &&
8991 riscv_has_ext(env
, RVA
) ?
8992 RISCV_HWPROBE_BASE_BEHAVIOR_IMA
: 0;
8993 __put_user(value
, &pair
->value
);
8995 case RISCV_HWPROBE_KEY_IMA_EXT_0
:
8996 value
= riscv_has_ext(env
, RVF
) &&
8997 riscv_has_ext(env
, RVD
) ?
8998 RISCV_HWPROBE_IMA_FD
: 0;
8999 value
|= riscv_has_ext(env
, RVC
) ?
9000 RISCV_HWPROBE_IMA_C
: 0;
9001 value
|= riscv_has_ext(env
, RVV
) ?
9002 RISCV_HWPROBE_IMA_V
: 0;
9003 value
|= cfg
->ext_zba
?
9004 RISCV_HWPROBE_EXT_ZBA
: 0;
9005 value
|= cfg
->ext_zbb
?
9006 RISCV_HWPROBE_EXT_ZBB
: 0;
9007 value
|= cfg
->ext_zbs
?
9008 RISCV_HWPROBE_EXT_ZBS
: 0;
9009 value
|= cfg
->ext_zicboz
?
9010 RISCV_HWPROBE_EXT_ZICBOZ
: 0;
9011 value
|= cfg
->ext_zbc
?
9012 RISCV_HWPROBE_EXT_ZBC
: 0;
9013 value
|= cfg
->ext_zbkb
?
9014 RISCV_HWPROBE_EXT_ZBKB
: 0;
9015 value
|= cfg
->ext_zbkc
?
9016 RISCV_HWPROBE_EXT_ZBKC
: 0;
9017 value
|= cfg
->ext_zbkx
?
9018 RISCV_HWPROBE_EXT_ZBKX
: 0;
9019 value
|= cfg
->ext_zknd
?
9020 RISCV_HWPROBE_EXT_ZKND
: 0;
9021 value
|= cfg
->ext_zkne
?
9022 RISCV_HWPROBE_EXT_ZKNE
: 0;
9023 value
|= cfg
->ext_zknh
?
9024 RISCV_HWPROBE_EXT_ZKNH
: 0;
9025 value
|= cfg
->ext_zksed
?
9026 RISCV_HWPROBE_EXT_ZKSED
: 0;
9027 value
|= cfg
->ext_zksh
?
9028 RISCV_HWPROBE_EXT_ZKSH
: 0;
9029 value
|= cfg
->ext_zkt
?
9030 RISCV_HWPROBE_EXT_ZKT
: 0;
9031 value
|= cfg
->ext_zvbb
?
9032 RISCV_HWPROBE_EXT_ZVBB
: 0;
9033 value
|= cfg
->ext_zvbc
?
9034 RISCV_HWPROBE_EXT_ZVBC
: 0;
9035 value
|= cfg
->ext_zvkb
?
9036 RISCV_HWPROBE_EXT_ZVKB
: 0;
9037 value
|= cfg
->ext_zvkg
?
9038 RISCV_HWPROBE_EXT_ZVKG
: 0;
9039 value
|= cfg
->ext_zvkned
?
9040 RISCV_HWPROBE_EXT_ZVKNED
: 0;
9041 value
|= cfg
->ext_zvknha
?
9042 RISCV_HWPROBE_EXT_ZVKNHA
: 0;
9043 value
|= cfg
->ext_zvknhb
?
9044 RISCV_HWPROBE_EXT_ZVKNHB
: 0;
9045 value
|= cfg
->ext_zvksed
?
9046 RISCV_HWPROBE_EXT_ZVKSED
: 0;
9047 value
|= cfg
->ext_zvksh
?
9048 RISCV_HWPROBE_EXT_ZVKSH
: 0;
9049 value
|= cfg
->ext_zvkt
?
9050 RISCV_HWPROBE_EXT_ZVKT
: 0;
9051 value
|= cfg
->ext_zfh
?
9052 RISCV_HWPROBE_EXT_ZFH
: 0;
9053 value
|= cfg
->ext_zfhmin
?
9054 RISCV_HWPROBE_EXT_ZFHMIN
: 0;
9055 value
|= cfg
->ext_zihintntl
?
9056 RISCV_HWPROBE_EXT_ZIHINTNTL
: 0;
9057 value
|= cfg
->ext_zvfh
?
9058 RISCV_HWPROBE_EXT_ZVFH
: 0;
9059 value
|= cfg
->ext_zvfhmin
?
9060 RISCV_HWPROBE_EXT_ZVFHMIN
: 0;
9061 value
|= cfg
->ext_zfa
?
9062 RISCV_HWPROBE_EXT_ZFA
: 0;
9063 value
|= cfg
->ext_ztso
?
9064 RISCV_HWPROBE_EXT_ZTSO
: 0;
9065 value
|= cfg
->ext_zacas
?
9066 RISCV_HWPROBE_EXT_ZACAS
: 0;
9067 value
|= cfg
->ext_zicond
?
9068 RISCV_HWPROBE_EXT_ZICOND
: 0;
9069 __put_user(value
, &pair
->value
);
9071 case RISCV_HWPROBE_KEY_CPUPERF_0
:
9072 __put_user(RISCV_HWPROBE_MISALIGNED_FAST
, &pair
->value
);
9074 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE
:
9075 value
= cfg
->ext_zicboz
? cfg
->cboz_blocksize
: 0;
9076 __put_user(value
, &pair
->value
);
9079 __put_user(-1, &pair
->key
);
9085 static int cpu_set_valid(abi_long arg3
, abi_long arg4
)
9088 size_t host_mask_size
, target_mask_size
;
9089 unsigned long *host_mask
;
9092 * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9093 * arg3 contains the cpu count.
9095 tmp
= (8 * sizeof(abi_ulong
));
9096 target_mask_size
= ((arg3
+ tmp
- 1) / tmp
) * sizeof(abi_ulong
);
9097 host_mask_size
= (target_mask_size
+ (sizeof(*host_mask
) - 1)) &
9098 ~(sizeof(*host_mask
) - 1);
9100 host_mask
= alloca(host_mask_size
);
9102 ret
= target_to_host_cpu_mask(host_mask
, host_mask_size
,
9103 arg4
, target_mask_size
);
9108 for (i
= 0 ; i
< host_mask_size
/ sizeof(*host_mask
); i
++) {
9109 if (host_mask
[i
] != 0) {
9113 return -TARGET_EINVAL
;
9116 static abi_long
do_riscv_hwprobe(CPUArchState
*cpu_env
, abi_long arg1
,
9117 abi_long arg2
, abi_long arg3
,
9118 abi_long arg4
, abi_long arg5
)
9121 struct riscv_hwprobe
*host_pairs
;
9123 /* flags must be 0 */
9125 return -TARGET_EINVAL
;
9130 ret
= cpu_set_valid(arg3
, arg4
);
9134 } else if (arg4
!= 0) {
9135 return -TARGET_EINVAL
;
9143 host_pairs
= lock_user(VERIFY_WRITE
, arg1
,
9144 sizeof(*host_pairs
) * (size_t)arg2
, 0);
9145 if (host_pairs
== NULL
) {
9146 return -TARGET_EFAULT
;
9148 risc_hwprobe_fill_pairs(cpu_env
, host_pairs
, arg2
);
9149 unlock_user(host_pairs
, arg1
, sizeof(*host_pairs
) * (size_t)arg2
);
9152 #endif /* TARGET_NR_riscv_hwprobe */
9154 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9155 _syscall2(int, pivot_root
, const char *, new_root
, const char *, put_old
)
9158 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9159 #define __NR_sys_open_tree __NR_open_tree
9160 _syscall3(int, sys_open_tree
, int, __dfd
, const char *, __filename
,
9161 unsigned int, __flags
)
9164 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9165 #define __NR_sys_move_mount __NR_move_mount
9166 _syscall5(int, sys_move_mount
, int, __from_dfd
, const char *, __from_pathname
,
9167 int, __to_dfd
, const char *, __to_pathname
, unsigned int, flag
)
9170 /* This is an internal helper for do_syscall so that it is easier
9171 * to have a single return point, so that actions, such as logging
9172 * of syscall results, can be performed.
9173 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9175 static abi_long
do_syscall1(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
9176 abi_long arg2
, abi_long arg3
, abi_long arg4
,
9177 abi_long arg5
, abi_long arg6
, abi_long arg7
,
9180 CPUState
*cpu
= env_cpu(cpu_env
);
9182 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9183 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9184 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9185 || defined(TARGET_NR_statx)
9188 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9189 || defined(TARGET_NR_fstatfs)
9195 case TARGET_NR_exit
:
9196 /* In old applications this may be used to implement _exit(2).
9197 However in threaded applications it is used for thread termination,
9198 and _exit_group is used for application termination.
9199 Do thread termination if we have more then one thread. */
9201 if (block_signals()) {
9202 return -QEMU_ERESTARTSYS
;
9205 pthread_mutex_lock(&clone_lock
);
9207 if (CPU_NEXT(first_cpu
)) {
9208 TaskState
*ts
= get_task_state(cpu
);
9210 if (ts
->child_tidptr
) {
9211 put_user_u32(0, ts
->child_tidptr
);
9212 do_sys_futex(g2h(cpu
, ts
->child_tidptr
),
9213 FUTEX_WAKE
, INT_MAX
, NULL
, NULL
, 0);
9216 object_unparent(OBJECT(cpu
));
9217 object_unref(OBJECT(cpu
));
9219 * At this point the CPU should be unrealized and removed
9220 * from cpu lists. We can clean-up the rest of the thread
9221 * data without the lock held.
9224 pthread_mutex_unlock(&clone_lock
);
9228 rcu_unregister_thread();
9232 pthread_mutex_unlock(&clone_lock
);
9233 preexit_cleanup(cpu_env
, arg1
);
9235 return 0; /* avoid warning */
9236 case TARGET_NR_read
:
9237 if (arg2
== 0 && arg3
== 0) {
9238 return get_errno(safe_read(arg1
, 0, 0));
9240 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9241 return -TARGET_EFAULT
;
9242 ret
= get_errno(safe_read(arg1
, p
, arg3
));
9244 fd_trans_host_to_target_data(arg1
)) {
9245 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
9247 unlock_user(p
, arg2
, ret
);
9250 case TARGET_NR_write
:
9251 if (arg2
== 0 && arg3
== 0) {
9252 return get_errno(safe_write(arg1
, 0, 0));
9254 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
9255 return -TARGET_EFAULT
;
9256 if (fd_trans_target_to_host_data(arg1
)) {
9257 void *copy
= g_malloc(arg3
);
9258 memcpy(copy
, p
, arg3
);
9259 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
9261 ret
= get_errno(safe_write(arg1
, copy
, ret
));
9265 ret
= get_errno(safe_write(arg1
, p
, arg3
));
9267 unlock_user(p
, arg2
, 0);
9270 #ifdef TARGET_NR_open
9271 case TARGET_NR_open
:
9272 if (!(p
= lock_user_string(arg1
)))
9273 return -TARGET_EFAULT
;
9274 ret
= get_errno(do_guest_openat(cpu_env
, AT_FDCWD
, p
,
9275 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
9277 fd_trans_unregister(ret
);
9278 unlock_user(p
, arg1
, 0);
9281 case TARGET_NR_openat
:
9282 if (!(p
= lock_user_string(arg2
)))
9283 return -TARGET_EFAULT
;
9284 ret
= get_errno(do_guest_openat(cpu_env
, arg1
, p
,
9285 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
9287 fd_trans_unregister(ret
);
9288 unlock_user(p
, arg2
, 0);
9290 case TARGET_NR_openat2
:
9291 ret
= do_openat2(cpu_env
, arg1
, arg2
, arg3
, arg4
);
9293 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9294 case TARGET_NR_name_to_handle_at
:
9295 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
9298 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9299 case TARGET_NR_open_by_handle_at
:
9300 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
9301 fd_trans_unregister(ret
);
9304 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9305 case TARGET_NR_pidfd_open
:
9306 return get_errno(pidfd_open(arg1
, arg2
));
9308 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9309 case TARGET_NR_pidfd_send_signal
:
9311 siginfo_t uinfo
, *puinfo
;
9314 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9316 return -TARGET_EFAULT
;
9318 target_to_host_siginfo(&uinfo
, p
);
9319 unlock_user(p
, arg3
, 0);
9324 ret
= get_errno(pidfd_send_signal(arg1
, target_to_host_signal(arg2
),
9329 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9330 case TARGET_NR_pidfd_getfd
:
9331 return get_errno(pidfd_getfd(arg1
, arg2
, arg3
));
9333 case TARGET_NR_close
:
9334 fd_trans_unregister(arg1
);
9335 return get_errno(close(arg1
));
9336 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9337 case TARGET_NR_close_range
:
9338 ret
= get_errno(sys_close_range(arg1
, arg2
, arg3
));
9339 if (ret
== 0 && !(arg3
& CLOSE_RANGE_CLOEXEC
)) {
9341 maxfd
= MIN(arg2
, target_fd_max
);
9342 for (fd
= arg1
; fd
< maxfd
; fd
++) {
9343 fd_trans_unregister(fd
);
9350 return do_brk(arg1
);
9351 #ifdef TARGET_NR_fork
9352 case TARGET_NR_fork
:
9353 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
9355 #ifdef TARGET_NR_waitpid
9356 case TARGET_NR_waitpid
:
9359 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
9360 if (!is_error(ret
) && arg2
&& ret
9361 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
9362 return -TARGET_EFAULT
;
9366 #ifdef TARGET_NR_waitid
9367 case TARGET_NR_waitid
:
9372 ret
= get_errno(safe_waitid(arg1
, arg2
, (arg3
? &info
: NULL
),
9373 arg4
, (arg5
? &ru
: NULL
)));
9374 if (!is_error(ret
)) {
9376 p
= lock_user(VERIFY_WRITE
, arg3
,
9377 sizeof(target_siginfo_t
), 0);
9379 return -TARGET_EFAULT
;
9381 host_to_target_siginfo(p
, &info
);
9382 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
9384 if (arg5
&& host_to_target_rusage(arg5
, &ru
)) {
9385 return -TARGET_EFAULT
;
9391 #ifdef TARGET_NR_creat /* not on alpha */
9392 case TARGET_NR_creat
:
9393 if (!(p
= lock_user_string(arg1
)))
9394 return -TARGET_EFAULT
;
9395 ret
= get_errno(creat(p
, arg2
));
9396 fd_trans_unregister(ret
);
9397 unlock_user(p
, arg1
, 0);
9400 #ifdef TARGET_NR_link
9401 case TARGET_NR_link
:
9404 p
= lock_user_string(arg1
);
9405 p2
= lock_user_string(arg2
);
9407 ret
= -TARGET_EFAULT
;
9409 ret
= get_errno(link(p
, p2
));
9410 unlock_user(p2
, arg2
, 0);
9411 unlock_user(p
, arg1
, 0);
9415 #if defined(TARGET_NR_linkat)
9416 case TARGET_NR_linkat
:
9420 return -TARGET_EFAULT
;
9421 p
= lock_user_string(arg2
);
9422 p2
= lock_user_string(arg4
);
9424 ret
= -TARGET_EFAULT
;
9426 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
9427 unlock_user(p
, arg2
, 0);
9428 unlock_user(p2
, arg4
, 0);
9432 #ifdef TARGET_NR_unlink
9433 case TARGET_NR_unlink
:
9434 if (!(p
= lock_user_string(arg1
)))
9435 return -TARGET_EFAULT
;
9436 ret
= get_errno(unlink(p
));
9437 unlock_user(p
, arg1
, 0);
9440 #if defined(TARGET_NR_unlinkat)
9441 case TARGET_NR_unlinkat
:
9442 if (!(p
= lock_user_string(arg2
)))
9443 return -TARGET_EFAULT
;
9444 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
9445 unlock_user(p
, arg2
, 0);
9448 case TARGET_NR_execveat
:
9449 return do_execv(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, true);
9450 case TARGET_NR_execve
:
9451 return do_execv(cpu_env
, AT_FDCWD
, arg1
, arg2
, arg3
, 0, false);
9452 case TARGET_NR_chdir
:
9453 if (!(p
= lock_user_string(arg1
)))
9454 return -TARGET_EFAULT
;
9455 ret
= get_errno(chdir(p
));
9456 unlock_user(p
, arg1
, 0);
9458 #ifdef TARGET_NR_time
9459 case TARGET_NR_time
:
9462 ret
= get_errno(time(&host_time
));
9465 && put_user_sal(host_time
, arg1
))
9466 return -TARGET_EFAULT
;
9470 #ifdef TARGET_NR_mknod
9471 case TARGET_NR_mknod
:
9472 if (!(p
= lock_user_string(arg1
)))
9473 return -TARGET_EFAULT
;
9474 ret
= get_errno(mknod(p
, arg2
, arg3
));
9475 unlock_user(p
, arg1
, 0);
9478 #if defined(TARGET_NR_mknodat)
9479 case TARGET_NR_mknodat
:
9480 if (!(p
= lock_user_string(arg2
)))
9481 return -TARGET_EFAULT
;
9482 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
9483 unlock_user(p
, arg2
, 0);
9486 #ifdef TARGET_NR_chmod
9487 case TARGET_NR_chmod
:
9488 if (!(p
= lock_user_string(arg1
)))
9489 return -TARGET_EFAULT
;
9490 ret
= get_errno(chmod(p
, arg2
));
9491 unlock_user(p
, arg1
, 0);
9494 #ifdef TARGET_NR_lseek
9495 case TARGET_NR_lseek
:
9496 return get_errno(lseek(arg1
, arg2
, arg3
));
9498 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9499 /* Alpha specific */
9500 case TARGET_NR_getxpid
:
9501 cpu_env
->ir
[IR_A4
] = getppid();
9502 return get_errno(getpid());
9504 #ifdef TARGET_NR_getpid
9505 case TARGET_NR_getpid
:
9506 return get_errno(getpid());
9508 case TARGET_NR_mount
:
9510 /* need to look at the data field */
9514 p
= lock_user_string(arg1
);
9516 return -TARGET_EFAULT
;
9522 p2
= lock_user_string(arg2
);
9525 unlock_user(p
, arg1
, 0);
9527 return -TARGET_EFAULT
;
9531 p3
= lock_user_string(arg3
);
9534 unlock_user(p
, arg1
, 0);
9536 unlock_user(p2
, arg2
, 0);
9537 return -TARGET_EFAULT
;
9543 /* FIXME - arg5 should be locked, but it isn't clear how to
9544 * do that since it's not guaranteed to be a NULL-terminated
9548 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
9550 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(cpu
, arg5
));
9552 ret
= get_errno(ret
);
9555 unlock_user(p
, arg1
, 0);
9557 unlock_user(p2
, arg2
, 0);
9559 unlock_user(p3
, arg3
, 0);
9563 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9564 #if defined(TARGET_NR_umount)
9565 case TARGET_NR_umount
:
9567 #if defined(TARGET_NR_oldumount)
9568 case TARGET_NR_oldumount
:
9570 if (!(p
= lock_user_string(arg1
)))
9571 return -TARGET_EFAULT
;
9572 ret
= get_errno(umount(p
));
9573 unlock_user(p
, arg1
, 0);
9576 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9577 case TARGET_NR_move_mount
:
9581 if (!arg2
|| !arg4
) {
9582 return -TARGET_EFAULT
;
9585 p2
= lock_user_string(arg2
);
9587 return -TARGET_EFAULT
;
9590 p4
= lock_user_string(arg4
);
9592 unlock_user(p2
, arg2
, 0);
9593 return -TARGET_EFAULT
;
9595 ret
= get_errno(sys_move_mount(arg1
, p2
, arg3
, p4
, arg5
));
9597 unlock_user(p2
, arg2
, 0);
9598 unlock_user(p4
, arg4
, 0);
9603 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9604 case TARGET_NR_open_tree
:
9610 return -TARGET_EFAULT
;
9613 p2
= lock_user_string(arg2
);
9615 return -TARGET_EFAULT
;
9618 host_flags
= arg3
& ~TARGET_O_CLOEXEC
;
9619 if (arg3
& TARGET_O_CLOEXEC
) {
9620 host_flags
|= O_CLOEXEC
;
9623 ret
= get_errno(sys_open_tree(arg1
, p2
, host_flags
));
9625 unlock_user(p2
, arg2
, 0);
9630 #ifdef TARGET_NR_stime /* not on alpha */
9631 case TARGET_NR_stime
:
9635 if (get_user_sal(ts
.tv_sec
, arg1
)) {
9636 return -TARGET_EFAULT
;
9638 return get_errno(clock_settime(CLOCK_REALTIME
, &ts
));
9641 #ifdef TARGET_NR_alarm /* not on alpha */
9642 case TARGET_NR_alarm
:
9645 #ifdef TARGET_NR_pause /* not on alpha */
9646 case TARGET_NR_pause
:
9647 if (!block_signals()) {
9648 sigsuspend(&get_task_state(cpu
)->signal_mask
);
9650 return -TARGET_EINTR
;
9652 #ifdef TARGET_NR_utime
9653 case TARGET_NR_utime
:
9655 struct utimbuf tbuf
, *host_tbuf
;
9656 struct target_utimbuf
*target_tbuf
;
9658 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
9659 return -TARGET_EFAULT
;
9660 tbuf
.actime
= tswapal(target_tbuf
->actime
);
9661 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
9662 unlock_user_struct(target_tbuf
, arg2
, 0);
9667 if (!(p
= lock_user_string(arg1
)))
9668 return -TARGET_EFAULT
;
9669 ret
= get_errno(utime(p
, host_tbuf
));
9670 unlock_user(p
, arg1
, 0);
9674 #ifdef TARGET_NR_utimes
9675 case TARGET_NR_utimes
:
9677 struct timeval
*tvp
, tv
[2];
9679 if (copy_from_user_timeval(&tv
[0], arg2
)
9680 || copy_from_user_timeval(&tv
[1],
9681 arg2
+ sizeof(struct target_timeval
)))
9682 return -TARGET_EFAULT
;
9687 if (!(p
= lock_user_string(arg1
)))
9688 return -TARGET_EFAULT
;
9689 ret
= get_errno(utimes(p
, tvp
));
9690 unlock_user(p
, arg1
, 0);
9694 #if defined(TARGET_NR_futimesat)
9695 case TARGET_NR_futimesat
:
9697 struct timeval
*tvp
, tv
[2];
9699 if (copy_from_user_timeval(&tv
[0], arg3
)
9700 || copy_from_user_timeval(&tv
[1],
9701 arg3
+ sizeof(struct target_timeval
)))
9702 return -TARGET_EFAULT
;
9707 if (!(p
= lock_user_string(arg2
))) {
9708 return -TARGET_EFAULT
;
9710 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
9711 unlock_user(p
, arg2
, 0);
9715 #ifdef TARGET_NR_access
9716 case TARGET_NR_access
:
9717 if (!(p
= lock_user_string(arg1
))) {
9718 return -TARGET_EFAULT
;
9720 ret
= get_errno(access(path(p
), arg2
));
9721 unlock_user(p
, arg1
, 0);
9724 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9725 case TARGET_NR_faccessat
:
9726 if (!(p
= lock_user_string(arg2
))) {
9727 return -TARGET_EFAULT
;
9729 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
9730 unlock_user(p
, arg2
, 0);
9733 #if defined(TARGET_NR_faccessat2)
9734 case TARGET_NR_faccessat2
:
9735 if (!(p
= lock_user_string(arg2
))) {
9736 return -TARGET_EFAULT
;
9738 ret
= get_errno(faccessat(arg1
, p
, arg3
, arg4
));
9739 unlock_user(p
, arg2
, 0);
9742 #ifdef TARGET_NR_nice /* not on alpha */
9743 case TARGET_NR_nice
:
9744 return get_errno(nice(arg1
));
9746 case TARGET_NR_sync
:
9749 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9750 case TARGET_NR_syncfs
:
9751 return get_errno(syncfs(arg1
));
9753 case TARGET_NR_kill
:
9754 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
9755 #ifdef TARGET_NR_rename
9756 case TARGET_NR_rename
:
9759 p
= lock_user_string(arg1
);
9760 p2
= lock_user_string(arg2
);
9762 ret
= -TARGET_EFAULT
;
9764 ret
= get_errno(rename(p
, p2
));
9765 unlock_user(p2
, arg2
, 0);
9766 unlock_user(p
, arg1
, 0);
9770 #if defined(TARGET_NR_renameat)
9771 case TARGET_NR_renameat
:
9774 p
= lock_user_string(arg2
);
9775 p2
= lock_user_string(arg4
);
9777 ret
= -TARGET_EFAULT
;
9779 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
9780 unlock_user(p2
, arg4
, 0);
9781 unlock_user(p
, arg2
, 0);
9785 #if defined(TARGET_NR_renameat2)
9786 case TARGET_NR_renameat2
:
9789 p
= lock_user_string(arg2
);
9790 p2
= lock_user_string(arg4
);
9792 ret
= -TARGET_EFAULT
;
9794 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
9796 unlock_user(p2
, arg4
, 0);
9797 unlock_user(p
, arg2
, 0);
9801 #ifdef TARGET_NR_mkdir
9802 case TARGET_NR_mkdir
:
9803 if (!(p
= lock_user_string(arg1
)))
9804 return -TARGET_EFAULT
;
9805 ret
= get_errno(mkdir(p
, arg2
));
9806 unlock_user(p
, arg1
, 0);
9809 #if defined(TARGET_NR_mkdirat)
9810 case TARGET_NR_mkdirat
:
9811 if (!(p
= lock_user_string(arg2
)))
9812 return -TARGET_EFAULT
;
9813 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
9814 unlock_user(p
, arg2
, 0);
9817 #ifdef TARGET_NR_rmdir
9818 case TARGET_NR_rmdir
:
9819 if (!(p
= lock_user_string(arg1
)))
9820 return -TARGET_EFAULT
;
9821 ret
= get_errno(rmdir(p
));
9822 unlock_user(p
, arg1
, 0);
9826 ret
= get_errno(dup(arg1
));
9828 fd_trans_dup(arg1
, ret
);
9831 #ifdef TARGET_NR_pipe
9832 case TARGET_NR_pipe
:
9833 return do_pipe(cpu_env
, arg1
, 0, 0);
9835 #ifdef TARGET_NR_pipe2
9836 case TARGET_NR_pipe2
:
9837 return do_pipe(cpu_env
, arg1
,
9838 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
9840 case TARGET_NR_times
:
9842 struct target_tms
*tmsp
;
9844 ret
= get_errno(times(&tms
));
9846 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
9848 return -TARGET_EFAULT
;
9849 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
9850 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
9851 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
9852 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
9855 ret
= host_to_target_clock_t(ret
);
9858 case TARGET_NR_acct
:
9860 ret
= get_errno(acct(NULL
));
9862 if (!(p
= lock_user_string(arg1
))) {
9863 return -TARGET_EFAULT
;
9865 ret
= get_errno(acct(path(p
)));
9866 unlock_user(p
, arg1
, 0);
9869 #ifdef TARGET_NR_umount2
9870 case TARGET_NR_umount2
:
9871 if (!(p
= lock_user_string(arg1
)))
9872 return -TARGET_EFAULT
;
9873 ret
= get_errno(umount2(p
, arg2
));
9874 unlock_user(p
, arg1
, 0);
9877 case TARGET_NR_ioctl
:
9878 return do_ioctl(arg1
, arg2
, arg3
);
9879 #ifdef TARGET_NR_fcntl
9880 case TARGET_NR_fcntl
:
9881 return do_fcntl(arg1
, arg2
, arg3
);
9883 case TARGET_NR_setpgid
:
9884 return get_errno(setpgid(arg1
, arg2
));
9885 case TARGET_NR_umask
:
9886 return get_errno(umask(arg1
));
9887 case TARGET_NR_chroot
:
9888 if (!(p
= lock_user_string(arg1
)))
9889 return -TARGET_EFAULT
;
9890 ret
= get_errno(chroot(p
));
9891 unlock_user(p
, arg1
, 0);
9893 #ifdef TARGET_NR_dup2
9894 case TARGET_NR_dup2
:
9895 ret
= get_errno(dup2(arg1
, arg2
));
9897 fd_trans_dup(arg1
, arg2
);
9901 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9902 case TARGET_NR_dup3
:
9906 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
9909 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
9910 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
9912 fd_trans_dup(arg1
, arg2
);
9917 #ifdef TARGET_NR_getppid /* not on alpha */
9918 case TARGET_NR_getppid
:
9919 return get_errno(getppid());
9921 #ifdef TARGET_NR_getpgrp
9922 case TARGET_NR_getpgrp
:
9923 return get_errno(getpgrp());
9925 case TARGET_NR_setsid
:
9926 return get_errno(setsid());
9927 #ifdef TARGET_NR_sigaction
9928 case TARGET_NR_sigaction
:
9930 #if defined(TARGET_MIPS)
9931 struct target_sigaction act
, oact
, *pact
, *old_act
;
9934 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9935 return -TARGET_EFAULT
;
9936 act
._sa_handler
= old_act
->_sa_handler
;
9937 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
9938 act
.sa_flags
= old_act
->sa_flags
;
9939 unlock_user_struct(old_act
, arg2
, 0);
9945 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9947 if (!is_error(ret
) && arg3
) {
9948 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9949 return -TARGET_EFAULT
;
9950 old_act
->_sa_handler
= oact
._sa_handler
;
9951 old_act
->sa_flags
= oact
.sa_flags
;
9952 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
9953 old_act
->sa_mask
.sig
[1] = 0;
9954 old_act
->sa_mask
.sig
[2] = 0;
9955 old_act
->sa_mask
.sig
[3] = 0;
9956 unlock_user_struct(old_act
, arg3
, 1);
9959 struct target_old_sigaction
*old_act
;
9960 struct target_sigaction act
, oact
, *pact
;
9962 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
9963 return -TARGET_EFAULT
;
9964 act
._sa_handler
= old_act
->_sa_handler
;
9965 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
9966 act
.sa_flags
= old_act
->sa_flags
;
9967 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9968 act
.sa_restorer
= old_act
->sa_restorer
;
9970 unlock_user_struct(old_act
, arg2
, 0);
9975 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
, 0));
9976 if (!is_error(ret
) && arg3
) {
9977 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
9978 return -TARGET_EFAULT
;
9979 old_act
->_sa_handler
= oact
._sa_handler
;
9980 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
9981 old_act
->sa_flags
= oact
.sa_flags
;
9982 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9983 old_act
->sa_restorer
= oact
.sa_restorer
;
9985 unlock_user_struct(old_act
, arg3
, 1);
9991 case TARGET_NR_rt_sigaction
:
9994 * For Alpha and SPARC this is a 5 argument syscall, with
9995 * a 'restorer' parameter which must be copied into the
9996 * sa_restorer field of the sigaction struct.
9997 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9998 * and arg5 is the sigsetsize.
10000 #if defined(TARGET_ALPHA)
10001 target_ulong sigsetsize
= arg4
;
10002 target_ulong restorer
= arg5
;
10003 #elif defined(TARGET_SPARC)
10004 target_ulong restorer
= arg4
;
10005 target_ulong sigsetsize
= arg5
;
10007 target_ulong sigsetsize
= arg4
;
10008 target_ulong restorer
= 0;
10010 struct target_sigaction
*act
= NULL
;
10011 struct target_sigaction
*oact
= NULL
;
10013 if (sigsetsize
!= sizeof(target_sigset_t
)) {
10014 return -TARGET_EINVAL
;
10016 if (arg2
&& !lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
10017 return -TARGET_EFAULT
;
10019 if (arg3
&& !lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
10020 ret
= -TARGET_EFAULT
;
10022 ret
= get_errno(do_sigaction(arg1
, act
, oact
, restorer
));
10024 unlock_user_struct(oact
, arg3
, 1);
10028 unlock_user_struct(act
, arg2
, 0);
10032 #ifdef TARGET_NR_sgetmask /* not on alpha */
10033 case TARGET_NR_sgetmask
:
10036 abi_ulong target_set
;
10037 ret
= do_sigprocmask(0, NULL
, &cur_set
);
10039 host_to_target_old_sigset(&target_set
, &cur_set
);
10045 #ifdef TARGET_NR_ssetmask /* not on alpha */
10046 case TARGET_NR_ssetmask
:
10048 sigset_t set
, oset
;
10049 abi_ulong target_set
= arg1
;
10050 target_to_host_old_sigset(&set
, &target_set
);
10051 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
10053 host_to_target_old_sigset(&target_set
, &oset
);
10059 #ifdef TARGET_NR_sigprocmask
10060 case TARGET_NR_sigprocmask
:
10062 #if defined(TARGET_ALPHA)
10063 sigset_t set
, oldset
;
10068 case TARGET_SIG_BLOCK
:
10071 case TARGET_SIG_UNBLOCK
:
10074 case TARGET_SIG_SETMASK
:
10078 return -TARGET_EINVAL
;
10081 target_to_host_old_sigset(&set
, &mask
);
10083 ret
= do_sigprocmask(how
, &set
, &oldset
);
10084 if (!is_error(ret
)) {
10085 host_to_target_old_sigset(&mask
, &oldset
);
10087 cpu_env
->ir
[IR_V0
] = 0; /* force no error */
10090 sigset_t set
, oldset
, *set_ptr
;
10094 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10096 return -TARGET_EFAULT
;
10098 target_to_host_old_sigset(&set
, p
);
10099 unlock_user(p
, arg2
, 0);
10102 case TARGET_SIG_BLOCK
:
10105 case TARGET_SIG_UNBLOCK
:
10108 case TARGET_SIG_SETMASK
:
10112 return -TARGET_EINVAL
;
10118 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10119 if (!is_error(ret
) && arg3
) {
10120 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10121 return -TARGET_EFAULT
;
10122 host_to_target_old_sigset(p
, &oldset
);
10123 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10129 case TARGET_NR_rt_sigprocmask
:
10132 sigset_t set
, oldset
, *set_ptr
;
10134 if (arg4
!= sizeof(target_sigset_t
)) {
10135 return -TARGET_EINVAL
;
10139 p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1);
10141 return -TARGET_EFAULT
;
10143 target_to_host_sigset(&set
, p
);
10144 unlock_user(p
, arg2
, 0);
10147 case TARGET_SIG_BLOCK
:
10150 case TARGET_SIG_UNBLOCK
:
10153 case TARGET_SIG_SETMASK
:
10157 return -TARGET_EINVAL
;
10163 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
10164 if (!is_error(ret
) && arg3
) {
10165 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
10166 return -TARGET_EFAULT
;
10167 host_to_target_sigset(p
, &oldset
);
10168 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
10172 #ifdef TARGET_NR_sigpending
10173 case TARGET_NR_sigpending
:
10176 ret
= get_errno(sigpending(&set
));
10177 if (!is_error(ret
)) {
10178 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10179 return -TARGET_EFAULT
;
10180 host_to_target_old_sigset(p
, &set
);
10181 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10186 case TARGET_NR_rt_sigpending
:
10190 /* Yes, this check is >, not != like most. We follow the kernel's
10191 * logic and it does it like this because it implements
10192 * NR_sigpending through the same code path, and in that case
10193 * the old_sigset_t is smaller in size.
10195 if (arg2
> sizeof(target_sigset_t
)) {
10196 return -TARGET_EINVAL
;
10199 ret
= get_errno(sigpending(&set
));
10200 if (!is_error(ret
)) {
10201 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
10202 return -TARGET_EFAULT
;
10203 host_to_target_sigset(p
, &set
);
10204 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
10208 #ifdef TARGET_NR_sigsuspend
10209 case TARGET_NR_sigsuspend
:
10213 #if defined(TARGET_ALPHA)
10214 TaskState
*ts
= get_task_state(cpu
);
10215 /* target_to_host_old_sigset will bswap back */
10216 abi_ulong mask
= tswapal(arg1
);
10217 set
= &ts
->sigsuspend_mask
;
10218 target_to_host_old_sigset(set
, &mask
);
10220 ret
= process_sigsuspend_mask(&set
, arg1
, sizeof(target_sigset_t
));
10225 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10226 finish_sigsuspend_mask(ret
);
10230 case TARGET_NR_rt_sigsuspend
:
10234 ret
= process_sigsuspend_mask(&set
, arg1
, arg2
);
10238 ret
= get_errno(safe_rt_sigsuspend(set
, SIGSET_T_SIZE
));
10239 finish_sigsuspend_mask(ret
);
10242 #ifdef TARGET_NR_rt_sigtimedwait
10243 case TARGET_NR_rt_sigtimedwait
:
10246 struct timespec uts
, *puts
;
10249 if (arg4
!= sizeof(target_sigset_t
)) {
10250 return -TARGET_EINVAL
;
10253 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
10254 return -TARGET_EFAULT
;
10255 target_to_host_sigset(&set
, p
);
10256 unlock_user(p
, arg1
, 0);
10259 if (target_to_host_timespec(puts
, arg3
)) {
10260 return -TARGET_EFAULT
;
10265 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10267 if (!is_error(ret
)) {
10269 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
10272 return -TARGET_EFAULT
;
10274 host_to_target_siginfo(p
, &uinfo
);
10275 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10277 ret
= host_to_target_signal(ret
);
10282 #ifdef TARGET_NR_rt_sigtimedwait_time64
10283 case TARGET_NR_rt_sigtimedwait_time64
:
10286 struct timespec uts
, *puts
;
10289 if (arg4
!= sizeof(target_sigset_t
)) {
10290 return -TARGET_EINVAL
;
10293 p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1);
10295 return -TARGET_EFAULT
;
10297 target_to_host_sigset(&set
, p
);
10298 unlock_user(p
, arg1
, 0);
10301 if (target_to_host_timespec64(puts
, arg3
)) {
10302 return -TARGET_EFAULT
;
10307 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
10309 if (!is_error(ret
)) {
10311 p
= lock_user(VERIFY_WRITE
, arg2
,
10312 sizeof(target_siginfo_t
), 0);
10314 return -TARGET_EFAULT
;
10316 host_to_target_siginfo(p
, &uinfo
);
10317 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
10319 ret
= host_to_target_signal(ret
);
10324 case TARGET_NR_rt_sigqueueinfo
:
10328 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
10330 return -TARGET_EFAULT
;
10332 target_to_host_siginfo(&uinfo
, p
);
10333 unlock_user(p
, arg3
, 0);
10334 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, target_to_host_signal(arg2
), &uinfo
));
10337 case TARGET_NR_rt_tgsigqueueinfo
:
10341 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
10343 return -TARGET_EFAULT
;
10345 target_to_host_siginfo(&uinfo
, p
);
10346 unlock_user(p
, arg4
, 0);
10347 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, target_to_host_signal(arg3
), &uinfo
));
10350 #ifdef TARGET_NR_sigreturn
10351 case TARGET_NR_sigreturn
:
10352 if (block_signals()) {
10353 return -QEMU_ERESTARTSYS
;
10355 return do_sigreturn(cpu_env
);
10357 case TARGET_NR_rt_sigreturn
:
10358 if (block_signals()) {
10359 return -QEMU_ERESTARTSYS
;
10361 return do_rt_sigreturn(cpu_env
);
10362 case TARGET_NR_sethostname
:
10363 if (!(p
= lock_user_string(arg1
)))
10364 return -TARGET_EFAULT
;
10365 ret
= get_errno(sethostname(p
, arg2
));
10366 unlock_user(p
, arg1
, 0);
10368 #ifdef TARGET_NR_setrlimit
10369 case TARGET_NR_setrlimit
:
10371 int resource
= target_to_host_resource(arg1
);
10372 struct target_rlimit
*target_rlim
;
10373 struct rlimit rlim
;
10374 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
10375 return -TARGET_EFAULT
;
10376 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
10377 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
10378 unlock_user_struct(target_rlim
, arg2
, 0);
10380 * If we just passed through resource limit settings for memory then
10381 * they would also apply to QEMU's own allocations, and QEMU will
10382 * crash or hang or die if its allocations fail. Ideally we would
10383 * track the guest allocations in QEMU and apply the limits ourselves.
10384 * For now, just tell the guest the call succeeded but don't actually
10387 if (resource
!= RLIMIT_AS
&&
10388 resource
!= RLIMIT_DATA
&&
10389 resource
!= RLIMIT_STACK
) {
10390 return get_errno(setrlimit(resource
, &rlim
));
10396 #ifdef TARGET_NR_getrlimit
10397 case TARGET_NR_getrlimit
:
10399 int resource
= target_to_host_resource(arg1
);
10400 struct target_rlimit
*target_rlim
;
10401 struct rlimit rlim
;
10403 ret
= get_errno(getrlimit(resource
, &rlim
));
10404 if (!is_error(ret
)) {
10405 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10406 return -TARGET_EFAULT
;
10407 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10408 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10409 unlock_user_struct(target_rlim
, arg2
, 1);
10414 case TARGET_NR_getrusage
:
10416 struct rusage rusage
;
10417 ret
= get_errno(getrusage(arg1
, &rusage
));
10418 if (!is_error(ret
)) {
10419 ret
= host_to_target_rusage(arg2
, &rusage
);
10423 #if defined(TARGET_NR_gettimeofday)
10424 case TARGET_NR_gettimeofday
:
10427 struct timezone tz
;
10429 ret
= get_errno(gettimeofday(&tv
, &tz
));
10430 if (!is_error(ret
)) {
10431 if (arg1
&& copy_to_user_timeval(arg1
, &tv
)) {
10432 return -TARGET_EFAULT
;
10434 if (arg2
&& copy_to_user_timezone(arg2
, &tz
)) {
10435 return -TARGET_EFAULT
;
10441 #if defined(TARGET_NR_settimeofday)
10442 case TARGET_NR_settimeofday
:
10444 struct timeval tv
, *ptv
= NULL
;
10445 struct timezone tz
, *ptz
= NULL
;
10448 if (copy_from_user_timeval(&tv
, arg1
)) {
10449 return -TARGET_EFAULT
;
10455 if (copy_from_user_timezone(&tz
, arg2
)) {
10456 return -TARGET_EFAULT
;
10461 return get_errno(settimeofday(ptv
, ptz
));
10464 #if defined(TARGET_NR_select)
10465 case TARGET_NR_select
:
10466 #if defined(TARGET_WANT_NI_OLD_SELECT)
10467 /* some architectures used to have old_select here
10468 * but now ENOSYS it.
10470 ret
= -TARGET_ENOSYS
;
10471 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10472 ret
= do_old_select(arg1
);
10474 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10478 #ifdef TARGET_NR_pselect6
10479 case TARGET_NR_pselect6
:
10480 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, false);
10482 #ifdef TARGET_NR_pselect6_time64
10483 case TARGET_NR_pselect6_time64
:
10484 return do_pselect6(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, true);
10486 #ifdef TARGET_NR_symlink
10487 case TARGET_NR_symlink
:
10490 p
= lock_user_string(arg1
);
10491 p2
= lock_user_string(arg2
);
10493 ret
= -TARGET_EFAULT
;
10495 ret
= get_errno(symlink(p
, p2
));
10496 unlock_user(p2
, arg2
, 0);
10497 unlock_user(p
, arg1
, 0);
10501 #if defined(TARGET_NR_symlinkat)
10502 case TARGET_NR_symlinkat
:
10505 p
= lock_user_string(arg1
);
10506 p2
= lock_user_string(arg3
);
10508 ret
= -TARGET_EFAULT
;
10510 ret
= get_errno(symlinkat(p
, arg2
, p2
));
10511 unlock_user(p2
, arg3
, 0);
10512 unlock_user(p
, arg1
, 0);
10516 #ifdef TARGET_NR_readlink
10517 case TARGET_NR_readlink
:
10520 p
= lock_user_string(arg1
);
10521 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10522 ret
= get_errno(do_guest_readlink(p
, p2
, arg3
));
10523 unlock_user(p2
, arg2
, ret
);
10524 unlock_user(p
, arg1
, 0);
10528 #if defined(TARGET_NR_readlinkat)
10529 case TARGET_NR_readlinkat
:
10532 p
= lock_user_string(arg2
);
10533 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10535 ret
= -TARGET_EFAULT
;
10536 } else if (!arg4
) {
10537 /* Short circuit this for the magic exe check. */
10538 ret
= -TARGET_EINVAL
;
10539 } else if (is_proc_myself((const char *)p
, "exe")) {
10541 * Don't worry about sign mismatch as earlier mapping
10542 * logic would have thrown a bad address error.
10544 ret
= MIN(strlen(exec_path
), arg4
);
10545 /* We cannot NUL terminate the string. */
10546 memcpy(p2
, exec_path
, ret
);
10548 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
10550 unlock_user(p2
, arg3
, ret
);
10551 unlock_user(p
, arg2
, 0);
10555 #ifdef TARGET_NR_swapon
10556 case TARGET_NR_swapon
:
10557 if (!(p
= lock_user_string(arg1
)))
10558 return -TARGET_EFAULT
;
10559 ret
= get_errno(swapon(p
, arg2
));
10560 unlock_user(p
, arg1
, 0);
10563 case TARGET_NR_reboot
:
10564 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
10565 /* arg4 must be ignored in all other cases */
10566 p
= lock_user_string(arg4
);
10568 return -TARGET_EFAULT
;
10570 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
10571 unlock_user(p
, arg4
, 0);
10573 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
10576 #ifdef TARGET_NR_mmap
10577 case TARGET_NR_mmap
:
10578 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10579 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10580 defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) \
10581 || defined(TARGET_S390X)
10584 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
10585 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
10586 return -TARGET_EFAULT
;
10587 v1
= tswapal(v
[0]);
10588 v2
= tswapal(v
[1]);
10589 v3
= tswapal(v
[2]);
10590 v4
= tswapal(v
[3]);
10591 v5
= tswapal(v
[4]);
10592 v6
= tswapal(v
[5]);
10593 unlock_user(v
, arg1
, 0);
10594 return do_mmap(v1
, v2
, v3
, v4
, v5
, v6
);
10597 /* mmap pointers are always untagged */
10598 return do_mmap(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10601 #ifdef TARGET_NR_mmap2
10602 case TARGET_NR_mmap2
:
10604 #define MMAP_SHIFT 12
10606 return do_mmap(arg1
, arg2
, arg3
, arg4
, arg5
,
10607 (off_t
)(abi_ulong
)arg6
<< MMAP_SHIFT
);
10609 case TARGET_NR_munmap
:
10610 arg1
= cpu_untagged_addr(cpu
, arg1
);
10611 return get_errno(target_munmap(arg1
, arg2
));
10612 case TARGET_NR_mprotect
:
10613 arg1
= cpu_untagged_addr(cpu
, arg1
);
10615 TaskState
*ts
= get_task_state(cpu
);
10616 /* Special hack to detect libc making the stack executable. */
10617 if ((arg3
& PROT_GROWSDOWN
)
10618 && arg1
>= ts
->info
->stack_limit
10619 && arg1
<= ts
->info
->start_stack
) {
10620 arg3
&= ~PROT_GROWSDOWN
;
10621 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
10622 arg1
= ts
->info
->stack_limit
;
10625 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
10626 #ifdef TARGET_NR_mremap
10627 case TARGET_NR_mremap
:
10628 arg1
= cpu_untagged_addr(cpu
, arg1
);
10629 /* mremap new_addr (arg5) is always untagged */
10630 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
10632 /* ??? msync/mlock/munlock are broken for softmmu. */
10633 #ifdef TARGET_NR_msync
10634 case TARGET_NR_msync
:
10635 return get_errno(msync(g2h(cpu
, arg1
), arg2
,
10636 target_to_host_msync_arg(arg3
)));
10638 #ifdef TARGET_NR_mlock
10639 case TARGET_NR_mlock
:
10640 return get_errno(mlock(g2h(cpu
, arg1
), arg2
));
10642 #ifdef TARGET_NR_munlock
10643 case TARGET_NR_munlock
:
10644 return get_errno(munlock(g2h(cpu
, arg1
), arg2
));
10646 #ifdef TARGET_NR_mlockall
10647 case TARGET_NR_mlockall
:
10648 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
10650 #ifdef TARGET_NR_munlockall
10651 case TARGET_NR_munlockall
:
10652 return get_errno(munlockall());
10654 #ifdef TARGET_NR_truncate
10655 case TARGET_NR_truncate
:
10656 if (!(p
= lock_user_string(arg1
)))
10657 return -TARGET_EFAULT
;
10658 ret
= get_errno(truncate(p
, arg2
));
10659 unlock_user(p
, arg1
, 0);
10662 #ifdef TARGET_NR_ftruncate
10663 case TARGET_NR_ftruncate
:
10664 return get_errno(ftruncate(arg1
, arg2
));
10666 case TARGET_NR_fchmod
:
10667 return get_errno(fchmod(arg1
, arg2
));
10668 #if defined(TARGET_NR_fchmodat)
10669 case TARGET_NR_fchmodat
:
10670 if (!(p
= lock_user_string(arg2
)))
10671 return -TARGET_EFAULT
;
10672 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
10673 unlock_user(p
, arg2
, 0);
10676 case TARGET_NR_getpriority
:
10677 /* Note that negative values are valid for getpriority, so we must
10678 differentiate based on errno settings. */
10680 ret
= getpriority(arg1
, arg2
);
10681 if (ret
== -1 && errno
!= 0) {
10682 return -host_to_target_errno(errno
);
10684 #ifdef TARGET_ALPHA
10685 /* Return value is the unbiased priority. Signal no error. */
10686 cpu_env
->ir
[IR_V0
] = 0;
10688 /* Return value is a biased priority to avoid negative numbers. */
10692 case TARGET_NR_setpriority
:
10693 return get_errno(setpriority(arg1
, arg2
, arg3
));
10694 #ifdef TARGET_NR_statfs
10695 case TARGET_NR_statfs
:
10696 if (!(p
= lock_user_string(arg1
))) {
10697 return -TARGET_EFAULT
;
10699 ret
= get_errno(statfs(path(p
), &stfs
));
10700 unlock_user(p
, arg1
, 0);
10702 if (!is_error(ret
)) {
10703 struct target_statfs
*target_stfs
;
10705 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
10706 return -TARGET_EFAULT
;
10707 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10708 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10709 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10710 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10711 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10712 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10713 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10714 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10715 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10716 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10717 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10718 #ifdef _STATFS_F_FLAGS
10719 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10721 __put_user(0, &target_stfs
->f_flags
);
10723 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10724 unlock_user_struct(target_stfs
, arg2
, 1);
10728 #ifdef TARGET_NR_fstatfs
10729 case TARGET_NR_fstatfs
:
10730 ret
= get_errno(fstatfs(arg1
, &stfs
));
10731 goto convert_statfs
;
10733 #ifdef TARGET_NR_statfs64
10734 case TARGET_NR_statfs64
:
10735 if (!(p
= lock_user_string(arg1
))) {
10736 return -TARGET_EFAULT
;
10738 ret
= get_errno(statfs(path(p
), &stfs
));
10739 unlock_user(p
, arg1
, 0);
10741 if (!is_error(ret
)) {
10742 struct target_statfs64
*target_stfs
;
10744 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
10745 return -TARGET_EFAULT
;
10746 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
10747 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
10748 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
10749 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
10750 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
10751 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
10752 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
10753 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
10754 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
10755 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
10756 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
10757 #ifdef _STATFS_F_FLAGS
10758 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
10760 __put_user(0, &target_stfs
->f_flags
);
10762 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
10763 unlock_user_struct(target_stfs
, arg3
, 1);
10766 case TARGET_NR_fstatfs64
:
10767 ret
= get_errno(fstatfs(arg1
, &stfs
));
10768 goto convert_statfs64
;
10770 #ifdef TARGET_NR_socketcall
10771 case TARGET_NR_socketcall
:
10772 return do_socketcall(arg1
, arg2
);
10774 #ifdef TARGET_NR_accept
10775 case TARGET_NR_accept
:
10776 return do_accept4(arg1
, arg2
, arg3
, 0);
10778 #ifdef TARGET_NR_accept4
10779 case TARGET_NR_accept4
:
10780 return do_accept4(arg1
, arg2
, arg3
, arg4
);
10782 #ifdef TARGET_NR_bind
10783 case TARGET_NR_bind
:
10784 return do_bind(arg1
, arg2
, arg3
);
10786 #ifdef TARGET_NR_connect
10787 case TARGET_NR_connect
:
10788 return do_connect(arg1
, arg2
, arg3
);
10790 #ifdef TARGET_NR_getpeername
10791 case TARGET_NR_getpeername
:
10792 return do_getpeername(arg1
, arg2
, arg3
);
10794 #ifdef TARGET_NR_getsockname
10795 case TARGET_NR_getsockname
:
10796 return do_getsockname(arg1
, arg2
, arg3
);
10798 #ifdef TARGET_NR_getsockopt
10799 case TARGET_NR_getsockopt
:
10800 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
10802 #ifdef TARGET_NR_listen
10803 case TARGET_NR_listen
:
10804 return get_errno(listen(arg1
, arg2
));
10806 #ifdef TARGET_NR_recv
10807 case TARGET_NR_recv
:
10808 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
10810 #ifdef TARGET_NR_recvfrom
10811 case TARGET_NR_recvfrom
:
10812 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10814 #ifdef TARGET_NR_recvmsg
10815 case TARGET_NR_recvmsg
:
10816 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
10818 #ifdef TARGET_NR_send
10819 case TARGET_NR_send
:
10820 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
10822 #ifdef TARGET_NR_sendmsg
10823 case TARGET_NR_sendmsg
:
10824 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
10826 #ifdef TARGET_NR_sendmmsg
10827 case TARGET_NR_sendmmsg
:
10828 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
10830 #ifdef TARGET_NR_recvmmsg
10831 case TARGET_NR_recvmmsg
:
10832 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
10834 #ifdef TARGET_NR_sendto
10835 case TARGET_NR_sendto
:
10836 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10838 #ifdef TARGET_NR_shutdown
10839 case TARGET_NR_shutdown
:
10840 return get_errno(shutdown(arg1
, arg2
));
10842 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10843 case TARGET_NR_getrandom
:
10844 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
10846 return -TARGET_EFAULT
;
10848 ret
= get_errno(getrandom(p
, arg2
, arg3
));
10849 unlock_user(p
, arg1
, ret
);
10852 #ifdef TARGET_NR_socket
10853 case TARGET_NR_socket
:
10854 return do_socket(arg1
, arg2
, arg3
);
10856 #ifdef TARGET_NR_socketpair
10857 case TARGET_NR_socketpair
:
10858 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
10860 #ifdef TARGET_NR_setsockopt
10861 case TARGET_NR_setsockopt
:
10862 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10864 #if defined(TARGET_NR_syslog)
10865 case TARGET_NR_syslog
:
10870 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10871 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10872 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10873 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10874 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10875 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10876 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10877 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10878 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10879 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10880 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10881 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10884 return -TARGET_EINVAL
;
10889 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10891 return -TARGET_EFAULT
;
10893 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10894 unlock_user(p
, arg2
, arg3
);
10898 return -TARGET_EINVAL
;
10903 case TARGET_NR_setitimer
:
10905 struct itimerval value
, ovalue
, *pvalue
;
10909 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10910 || copy_from_user_timeval(&pvalue
->it_value
,
10911 arg2
+ sizeof(struct target_timeval
)))
10912 return -TARGET_EFAULT
;
10916 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10917 if (!is_error(ret
) && arg3
) {
10918 if (copy_to_user_timeval(arg3
,
10919 &ovalue
.it_interval
)
10920 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10922 return -TARGET_EFAULT
;
10926 case TARGET_NR_getitimer
:
10928 struct itimerval value
;
10930 ret
= get_errno(getitimer(arg1
, &value
));
10931 if (!is_error(ret
) && arg2
) {
10932 if (copy_to_user_timeval(arg2
,
10933 &value
.it_interval
)
10934 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10936 return -TARGET_EFAULT
;
10940 #ifdef TARGET_NR_stat
10941 case TARGET_NR_stat
:
10942 if (!(p
= lock_user_string(arg1
))) {
10943 return -TARGET_EFAULT
;
10945 ret
= get_errno(stat(path(p
), &st
));
10946 unlock_user(p
, arg1
, 0);
10949 #ifdef TARGET_NR_lstat
10950 case TARGET_NR_lstat
:
10951 if (!(p
= lock_user_string(arg1
))) {
10952 return -TARGET_EFAULT
;
10954 ret
= get_errno(lstat(path(p
), &st
));
10955 unlock_user(p
, arg1
, 0);
10958 #ifdef TARGET_NR_fstat
10959 case TARGET_NR_fstat
:
10961 ret
= get_errno(fstat(arg1
, &st
));
10962 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10965 if (!is_error(ret
)) {
10966 struct target_stat
*target_st
;
10968 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10969 return -TARGET_EFAULT
;
10970 memset(target_st
, 0, sizeof(*target_st
));
10971 __put_user(st
.st_dev
, &target_st
->st_dev
);
10972 __put_user(st
.st_ino
, &target_st
->st_ino
);
10973 __put_user(st
.st_mode
, &target_st
->st_mode
);
10974 __put_user(st
.st_uid
, &target_st
->st_uid
);
10975 __put_user(st
.st_gid
, &target_st
->st_gid
);
10976 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10977 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10978 __put_user(st
.st_size
, &target_st
->st_size
);
10979 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10980 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10981 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10982 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10983 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10984 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10985 __put_user(st
.st_atim
.tv_nsec
,
10986 &target_st
->target_st_atime_nsec
);
10987 __put_user(st
.st_mtim
.tv_nsec
,
10988 &target_st
->target_st_mtime_nsec
);
10989 __put_user(st
.st_ctim
.tv_nsec
,
10990 &target_st
->target_st_ctime_nsec
);
10992 unlock_user_struct(target_st
, arg2
, 1);
10997 case TARGET_NR_vhangup
:
10998 return get_errno(vhangup());
10999 #ifdef TARGET_NR_syscall
11000 case TARGET_NR_syscall
:
11001 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
11002 arg6
, arg7
, arg8
, 0);
11004 #if defined(TARGET_NR_wait4)
11005 case TARGET_NR_wait4
:
11008 abi_long status_ptr
= arg2
;
11009 struct rusage rusage
, *rusage_ptr
;
11010 abi_ulong target_rusage
= arg4
;
11011 abi_long rusage_err
;
11013 rusage_ptr
= &rusage
;
11016 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
11017 if (!is_error(ret
)) {
11018 if (status_ptr
&& ret
) {
11019 status
= host_to_target_waitstatus(status
);
11020 if (put_user_s32(status
, status_ptr
))
11021 return -TARGET_EFAULT
;
11023 if (target_rusage
) {
11024 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
11033 #ifdef TARGET_NR_swapoff
11034 case TARGET_NR_swapoff
:
11035 if (!(p
= lock_user_string(arg1
)))
11036 return -TARGET_EFAULT
;
11037 ret
= get_errno(swapoff(p
));
11038 unlock_user(p
, arg1
, 0);
11041 case TARGET_NR_sysinfo
:
11043 struct target_sysinfo
*target_value
;
11044 struct sysinfo value
;
11045 ret
= get_errno(sysinfo(&value
));
11046 if (!is_error(ret
) && arg1
)
11048 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
11049 return -TARGET_EFAULT
;
11050 __put_user(value
.uptime
, &target_value
->uptime
);
11051 __put_user(value
.loads
[0], &target_value
->loads
[0]);
11052 __put_user(value
.loads
[1], &target_value
->loads
[1]);
11053 __put_user(value
.loads
[2], &target_value
->loads
[2]);
11054 __put_user(value
.totalram
, &target_value
->totalram
);
11055 __put_user(value
.freeram
, &target_value
->freeram
);
11056 __put_user(value
.sharedram
, &target_value
->sharedram
);
11057 __put_user(value
.bufferram
, &target_value
->bufferram
);
11058 __put_user(value
.totalswap
, &target_value
->totalswap
);
11059 __put_user(value
.freeswap
, &target_value
->freeswap
);
11060 __put_user(value
.procs
, &target_value
->procs
);
11061 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
11062 __put_user(value
.freehigh
, &target_value
->freehigh
);
11063 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
11064 unlock_user_struct(target_value
, arg1
, 1);
11068 #ifdef TARGET_NR_ipc
11069 case TARGET_NR_ipc
:
11070 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11072 #ifdef TARGET_NR_semget
11073 case TARGET_NR_semget
:
11074 return get_errno(semget(arg1
, arg2
, arg3
));
11076 #ifdef TARGET_NR_semop
11077 case TARGET_NR_semop
:
11078 return do_semtimedop(arg1
, arg2
, arg3
, 0, false);
11080 #ifdef TARGET_NR_semtimedop
11081 case TARGET_NR_semtimedop
:
11082 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, false);
11084 #ifdef TARGET_NR_semtimedop_time64
11085 case TARGET_NR_semtimedop_time64
:
11086 return do_semtimedop(arg1
, arg2
, arg3
, arg4
, true);
11088 #ifdef TARGET_NR_semctl
11089 case TARGET_NR_semctl
:
11090 return do_semctl(arg1
, arg2
, arg3
, arg4
);
11092 #ifdef TARGET_NR_msgctl
11093 case TARGET_NR_msgctl
:
11094 return do_msgctl(arg1
, arg2
, arg3
);
11096 #ifdef TARGET_NR_msgget
11097 case TARGET_NR_msgget
:
11098 return get_errno(msgget(arg1
, arg2
));
11100 #ifdef TARGET_NR_msgrcv
11101 case TARGET_NR_msgrcv
:
11102 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
11104 #ifdef TARGET_NR_msgsnd
11105 case TARGET_NR_msgsnd
:
11106 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
11108 #ifdef TARGET_NR_shmget
11109 case TARGET_NR_shmget
:
11110 return get_errno(shmget(arg1
, arg2
, arg3
));
11112 #ifdef TARGET_NR_shmctl
11113 case TARGET_NR_shmctl
:
11114 return do_shmctl(arg1
, arg2
, arg3
);
11116 #ifdef TARGET_NR_shmat
11117 case TARGET_NR_shmat
:
11118 return target_shmat(cpu_env
, arg1
, arg2
, arg3
);
11120 #ifdef TARGET_NR_shmdt
11121 case TARGET_NR_shmdt
:
11122 return target_shmdt(arg1
);
11124 case TARGET_NR_fsync
:
11125 return get_errno(fsync(arg1
));
11126 case TARGET_NR_clone
:
11127 /* Linux manages to have three different orderings for its
11128 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11129 * match the kernel's CONFIG_CLONE_* settings.
11130 * Microblaze is further special in that it uses a sixth
11131 * implicit argument to clone for the TLS pointer.
11133 #if defined(TARGET_MICROBLAZE)
11134 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
11135 #elif defined(TARGET_CLONE_BACKWARDS)
11136 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
11137 #elif defined(TARGET_CLONE_BACKWARDS2)
11138 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
11140 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
11143 #ifdef __NR_exit_group
11144 /* new thread calls */
11145 case TARGET_NR_exit_group
:
11146 preexit_cleanup(cpu_env
, arg1
);
11147 return get_errno(exit_group(arg1
));
11149 case TARGET_NR_setdomainname
:
11150 if (!(p
= lock_user_string(arg1
)))
11151 return -TARGET_EFAULT
;
11152 ret
= get_errno(setdomainname(p
, arg2
));
11153 unlock_user(p
, arg1
, 0);
11155 case TARGET_NR_uname
:
11156 /* no need to transcode because we use the linux syscall */
11158 struct new_utsname
* buf
;
11160 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
11161 return -TARGET_EFAULT
;
11162 ret
= get_errno(sys_uname(buf
));
11163 if (!is_error(ret
)) {
11164 /* Overwrite the native machine name with whatever is being
11166 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
11167 sizeof(buf
->machine
));
11168 /* Allow the user to override the reported release. */
11169 if (qemu_uname_release
&& *qemu_uname_release
) {
11170 g_strlcpy(buf
->release
, qemu_uname_release
,
11171 sizeof(buf
->release
));
11174 unlock_user_struct(buf
, arg1
, 1);
11178 case TARGET_NR_modify_ldt
:
11179 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
11180 #if !defined(TARGET_X86_64)
11181 case TARGET_NR_vm86
:
11182 return do_vm86(cpu_env
, arg1
, arg2
);
11185 #if defined(TARGET_NR_adjtimex)
11186 case TARGET_NR_adjtimex
:
11188 struct timex host_buf
;
11190 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
11191 return -TARGET_EFAULT
;
11193 ret
= get_errno(adjtimex(&host_buf
));
11194 if (!is_error(ret
)) {
11195 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
11196 return -TARGET_EFAULT
;
11202 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11203 case TARGET_NR_clock_adjtime
:
11207 if (target_to_host_timex(&htx
, arg2
) != 0) {
11208 return -TARGET_EFAULT
;
11210 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11211 if (!is_error(ret
) && host_to_target_timex(arg2
, &htx
)) {
11212 return -TARGET_EFAULT
;
11217 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11218 case TARGET_NR_clock_adjtime64
:
11222 if (target_to_host_timex64(&htx
, arg2
) != 0) {
11223 return -TARGET_EFAULT
;
11225 ret
= get_errno(clock_adjtime(arg1
, &htx
));
11226 if (!is_error(ret
) && host_to_target_timex64(arg2
, &htx
)) {
11227 return -TARGET_EFAULT
;
11232 case TARGET_NR_getpgid
:
11233 return get_errno(getpgid(arg1
));
11234 case TARGET_NR_fchdir
:
11235 return get_errno(fchdir(arg1
));
11236 case TARGET_NR_personality
:
11237 return get_errno(personality(arg1
));
11238 #ifdef TARGET_NR__llseek /* Not on alpha */
11239 case TARGET_NR__llseek
:
11242 #if !defined(__NR_llseek)
11243 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
11245 ret
= get_errno(res
);
11250 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
11252 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
11253 return -TARGET_EFAULT
;
11258 #ifdef TARGET_NR_getdents
11259 case TARGET_NR_getdents
:
11260 return do_getdents(arg1
, arg2
, arg3
);
11261 #endif /* TARGET_NR_getdents */
11262 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11263 case TARGET_NR_getdents64
:
11264 return do_getdents64(arg1
, arg2
, arg3
);
11265 #endif /* TARGET_NR_getdents64 */
11266 #if defined(TARGET_NR__newselect)
11267 case TARGET_NR__newselect
:
11268 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
11270 #ifdef TARGET_NR_poll
11271 case TARGET_NR_poll
:
11272 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, false, false);
11274 #ifdef TARGET_NR_ppoll
11275 case TARGET_NR_ppoll
:
11276 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, false);
11278 #ifdef TARGET_NR_ppoll_time64
11279 case TARGET_NR_ppoll_time64
:
11280 return do_ppoll(arg1
, arg2
, arg3
, arg4
, arg5
, true, true);
11282 case TARGET_NR_flock
:
11283 /* NOTE: the flock constant seems to be the same for every
11285 return get_errno(safe_flock(arg1
, arg2
));
11286 case TARGET_NR_readv
:
11288 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11290 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
11291 unlock_iovec(vec
, arg2
, arg3
, 1);
11293 ret
= -host_to_target_errno(errno
);
11297 case TARGET_NR_writev
:
11299 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11301 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
11302 unlock_iovec(vec
, arg2
, arg3
, 0);
11304 ret
= -host_to_target_errno(errno
);
11308 #if defined(TARGET_NR_preadv)
11309 case TARGET_NR_preadv
:
11311 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
11313 unsigned long low
, high
;
11315 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11316 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
11317 unlock_iovec(vec
, arg2
, arg3
, 1);
11319 ret
= -host_to_target_errno(errno
);
11324 #if defined(TARGET_NR_pwritev)
11325 case TARGET_NR_pwritev
:
11327 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11329 unsigned long low
, high
;
11331 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
11332 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
11333 unlock_iovec(vec
, arg2
, arg3
, 0);
11335 ret
= -host_to_target_errno(errno
);
11340 case TARGET_NR_getsid
:
11341 return get_errno(getsid(arg1
));
11342 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11343 case TARGET_NR_fdatasync
:
11344 return get_errno(fdatasync(arg1
));
11346 case TARGET_NR_sched_getaffinity
:
11348 unsigned int mask_size
;
11349 unsigned long *mask
;
11352 * sched_getaffinity needs multiples of ulong, so need to take
11353 * care of mismatches between target ulong and host ulong sizes.
11355 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11356 return -TARGET_EINVAL
;
11358 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11360 mask
= alloca(mask_size
);
11361 memset(mask
, 0, mask_size
);
11362 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
11364 if (!is_error(ret
)) {
11366 /* More data returned than the caller's buffer will fit.
11367 * This only happens if sizeof(abi_long) < sizeof(long)
11368 * and the caller passed us a buffer holding an odd number
11369 * of abi_longs. If the host kernel is actually using the
11370 * extra 4 bytes then fail EINVAL; otherwise we can just
11371 * ignore them and only copy the interesting part.
11373 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
11374 if (numcpus
> arg2
* 8) {
11375 return -TARGET_EINVAL
;
11380 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
11381 return -TARGET_EFAULT
;
11386 case TARGET_NR_sched_setaffinity
:
11388 unsigned int mask_size
;
11389 unsigned long *mask
;
11392 * sched_setaffinity needs multiples of ulong, so need to take
11393 * care of mismatches between target ulong and host ulong sizes.
11395 if (arg2
& (sizeof(abi_ulong
) - 1)) {
11396 return -TARGET_EINVAL
;
11398 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
11399 mask
= alloca(mask_size
);
11401 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
11406 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
11408 case TARGET_NR_getcpu
:
11410 unsigned cpuid
, node
;
11411 ret
= get_errno(sys_getcpu(arg1
? &cpuid
: NULL
,
11412 arg2
? &node
: NULL
,
11414 if (is_error(ret
)) {
11417 if (arg1
&& put_user_u32(cpuid
, arg1
)) {
11418 return -TARGET_EFAULT
;
11420 if (arg2
&& put_user_u32(node
, arg2
)) {
11421 return -TARGET_EFAULT
;
11425 case TARGET_NR_sched_setparam
:
11427 struct target_sched_param
*target_schp
;
11428 struct sched_param schp
;
11431 return -TARGET_EINVAL
;
11433 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1)) {
11434 return -TARGET_EFAULT
;
11436 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11437 unlock_user_struct(target_schp
, arg2
, 0);
11438 return get_errno(sys_sched_setparam(arg1
, &schp
));
11440 case TARGET_NR_sched_getparam
:
11442 struct target_sched_param
*target_schp
;
11443 struct sched_param schp
;
11446 return -TARGET_EINVAL
;
11448 ret
= get_errno(sys_sched_getparam(arg1
, &schp
));
11449 if (!is_error(ret
)) {
11450 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0)) {
11451 return -TARGET_EFAULT
;
11453 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
11454 unlock_user_struct(target_schp
, arg2
, 1);
11458 case TARGET_NR_sched_setscheduler
:
11460 struct target_sched_param
*target_schp
;
11461 struct sched_param schp
;
11463 return -TARGET_EINVAL
;
11465 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1)) {
11466 return -TARGET_EFAULT
;
11468 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
11469 unlock_user_struct(target_schp
, arg3
, 0);
11470 return get_errno(sys_sched_setscheduler(arg1
, arg2
, &schp
));
11472 case TARGET_NR_sched_getscheduler
:
11473 return get_errno(sys_sched_getscheduler(arg1
));
11474 case TARGET_NR_sched_getattr
:
11476 struct target_sched_attr
*target_scha
;
11477 struct sched_attr scha
;
11479 return -TARGET_EINVAL
;
11481 if (arg3
> sizeof(scha
)) {
11482 arg3
= sizeof(scha
);
11484 ret
= get_errno(sys_sched_getattr(arg1
, &scha
, arg3
, arg4
));
11485 if (!is_error(ret
)) {
11486 target_scha
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11487 if (!target_scha
) {
11488 return -TARGET_EFAULT
;
11490 target_scha
->size
= tswap32(scha
.size
);
11491 target_scha
->sched_policy
= tswap32(scha
.sched_policy
);
11492 target_scha
->sched_flags
= tswap64(scha
.sched_flags
);
11493 target_scha
->sched_nice
= tswap32(scha
.sched_nice
);
11494 target_scha
->sched_priority
= tswap32(scha
.sched_priority
);
11495 target_scha
->sched_runtime
= tswap64(scha
.sched_runtime
);
11496 target_scha
->sched_deadline
= tswap64(scha
.sched_deadline
);
11497 target_scha
->sched_period
= tswap64(scha
.sched_period
);
11498 if (scha
.size
> offsetof(struct sched_attr
, sched_util_min
)) {
11499 target_scha
->sched_util_min
= tswap32(scha
.sched_util_min
);
11500 target_scha
->sched_util_max
= tswap32(scha
.sched_util_max
);
11502 unlock_user(target_scha
, arg2
, arg3
);
11506 case TARGET_NR_sched_setattr
:
11508 struct target_sched_attr
*target_scha
;
11509 struct sched_attr scha
;
11513 return -TARGET_EINVAL
;
11515 if (get_user_u32(size
, arg2
)) {
11516 return -TARGET_EFAULT
;
11519 size
= offsetof(struct target_sched_attr
, sched_util_min
);
11521 if (size
< offsetof(struct target_sched_attr
, sched_util_min
)) {
11522 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11523 return -TARGET_EFAULT
;
11525 return -TARGET_E2BIG
;
11528 zeroed
= check_zeroed_user(arg2
, sizeof(struct target_sched_attr
), size
);
11531 } else if (zeroed
== 0) {
11532 if (put_user_u32(sizeof(struct target_sched_attr
), arg2
)) {
11533 return -TARGET_EFAULT
;
11535 return -TARGET_E2BIG
;
11537 if (size
> sizeof(struct target_sched_attr
)) {
11538 size
= sizeof(struct target_sched_attr
);
11541 target_scha
= lock_user(VERIFY_READ
, arg2
, size
, 1);
11542 if (!target_scha
) {
11543 return -TARGET_EFAULT
;
11546 scha
.sched_policy
= tswap32(target_scha
->sched_policy
);
11547 scha
.sched_flags
= tswap64(target_scha
->sched_flags
);
11548 scha
.sched_nice
= tswap32(target_scha
->sched_nice
);
11549 scha
.sched_priority
= tswap32(target_scha
->sched_priority
);
11550 scha
.sched_runtime
= tswap64(target_scha
->sched_runtime
);
11551 scha
.sched_deadline
= tswap64(target_scha
->sched_deadline
);
11552 scha
.sched_period
= tswap64(target_scha
->sched_period
);
11553 if (size
> offsetof(struct target_sched_attr
, sched_util_min
)) {
11554 scha
.sched_util_min
= tswap32(target_scha
->sched_util_min
);
11555 scha
.sched_util_max
= tswap32(target_scha
->sched_util_max
);
11557 unlock_user(target_scha
, arg2
, 0);
11558 return get_errno(sys_sched_setattr(arg1
, &scha
, arg3
));
11560 case TARGET_NR_sched_yield
:
11561 return get_errno(sched_yield());
11562 case TARGET_NR_sched_get_priority_max
:
11563 return get_errno(sched_get_priority_max(arg1
));
11564 case TARGET_NR_sched_get_priority_min
:
11565 return get_errno(sched_get_priority_min(arg1
));
11566 #ifdef TARGET_NR_sched_rr_get_interval
11567 case TARGET_NR_sched_rr_get_interval
:
11569 struct timespec ts
;
11570 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11571 if (!is_error(ret
)) {
11572 ret
= host_to_target_timespec(arg2
, &ts
);
11577 #ifdef TARGET_NR_sched_rr_get_interval_time64
11578 case TARGET_NR_sched_rr_get_interval_time64
:
11580 struct timespec ts
;
11581 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
11582 if (!is_error(ret
)) {
11583 ret
= host_to_target_timespec64(arg2
, &ts
);
11588 #if defined(TARGET_NR_nanosleep)
11589 case TARGET_NR_nanosleep
:
11591 struct timespec req
, rem
;
11592 target_to_host_timespec(&req
, arg1
);
11593 ret
= get_errno(safe_nanosleep(&req
, &rem
));
11594 if (is_error(ret
) && arg2
) {
11595 host_to_target_timespec(arg2
, &rem
);
11600 case TARGET_NR_prctl
:
11601 return do_prctl(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
11603 #ifdef TARGET_NR_arch_prctl
11604 case TARGET_NR_arch_prctl
:
11605 return do_arch_prctl(cpu_env
, arg1
, arg2
);
11607 #ifdef TARGET_NR_pread64
11608 case TARGET_NR_pread64
:
11609 if (regpairs_aligned(cpu_env
, num
)) {
11613 if (arg2
== 0 && arg3
== 0) {
11614 /* Special-case NULL buffer and zero length, which should succeed */
11617 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11619 return -TARGET_EFAULT
;
11622 ret
= get_errno(pread(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11623 unlock_user(p
, arg2
, ret
);
11625 case TARGET_NR_pwrite64
:
11626 if (regpairs_aligned(cpu_env
, num
)) {
11630 if (arg2
== 0 && arg3
== 0) {
11631 /* Special-case NULL buffer and zero length, which should succeed */
11634 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
11636 return -TARGET_EFAULT
;
11639 ret
= get_errno(pwrite(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11640 unlock_user(p
, arg2
, 0);
11643 case TARGET_NR_getcwd
:
11644 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11645 return -TARGET_EFAULT
;
11646 ret
= get_errno(sys_getcwd1(p
, arg2
));
11647 unlock_user(p
, arg1
, ret
);
11649 case TARGET_NR_capget
:
11650 case TARGET_NR_capset
:
11652 struct target_user_cap_header
*target_header
;
11653 struct target_user_cap_data
*target_data
= NULL
;
11654 struct __user_cap_header_struct header
;
11655 struct __user_cap_data_struct data
[2];
11656 struct __user_cap_data_struct
*dataptr
= NULL
;
11657 int i
, target_datalen
;
11658 int data_items
= 1;
11660 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11661 return -TARGET_EFAULT
;
11663 header
.version
= tswap32(target_header
->version
);
11664 header
.pid
= tswap32(target_header
->pid
);
11666 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11667 /* Version 2 and up takes pointer to two user_data structs */
11671 target_datalen
= sizeof(*target_data
) * data_items
;
11674 if (num
== TARGET_NR_capget
) {
11675 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11677 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11679 if (!target_data
) {
11680 unlock_user_struct(target_header
, arg1
, 0);
11681 return -TARGET_EFAULT
;
11684 if (num
== TARGET_NR_capset
) {
11685 for (i
= 0; i
< data_items
; i
++) {
11686 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11687 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11688 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11695 if (num
== TARGET_NR_capget
) {
11696 ret
= get_errno(capget(&header
, dataptr
));
11698 ret
= get_errno(capset(&header
, dataptr
));
11701 /* The kernel always updates version for both capget and capset */
11702 target_header
->version
= tswap32(header
.version
);
11703 unlock_user_struct(target_header
, arg1
, 1);
11706 if (num
== TARGET_NR_capget
) {
11707 for (i
= 0; i
< data_items
; i
++) {
11708 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11709 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11710 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11712 unlock_user(target_data
, arg2
, target_datalen
);
11714 unlock_user(target_data
, arg2
, 0);
11719 case TARGET_NR_sigaltstack
:
11720 return do_sigaltstack(arg1
, arg2
, cpu_env
);
11722 #ifdef CONFIG_SENDFILE
11723 #ifdef TARGET_NR_sendfile
11724 case TARGET_NR_sendfile
:
11726 off_t
*offp
= NULL
;
11729 ret
= get_user_sal(off
, arg3
);
11730 if (is_error(ret
)) {
11735 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11736 if (!is_error(ret
) && arg3
) {
11737 abi_long ret2
= put_user_sal(off
, arg3
);
11738 if (is_error(ret2
)) {
11745 #ifdef TARGET_NR_sendfile64
11746 case TARGET_NR_sendfile64
:
11748 off_t
*offp
= NULL
;
11751 ret
= get_user_s64(off
, arg3
);
11752 if (is_error(ret
)) {
11757 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11758 if (!is_error(ret
) && arg3
) {
11759 abi_long ret2
= put_user_s64(off
, arg3
);
11760 if (is_error(ret2
)) {
11768 #ifdef TARGET_NR_vfork
11769 case TARGET_NR_vfork
:
11770 return get_errno(do_fork(cpu_env
,
11771 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11774 #ifdef TARGET_NR_ugetrlimit
11775 case TARGET_NR_ugetrlimit
:
11777 struct rlimit rlim
;
11778 int resource
= target_to_host_resource(arg1
);
11779 ret
= get_errno(getrlimit(resource
, &rlim
));
11780 if (!is_error(ret
)) {
11781 struct target_rlimit
*target_rlim
;
11782 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11783 return -TARGET_EFAULT
;
11784 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11785 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11786 unlock_user_struct(target_rlim
, arg2
, 1);
11791 #ifdef TARGET_NR_truncate64
11792 case TARGET_NR_truncate64
:
11793 if (!(p
= lock_user_string(arg1
)))
11794 return -TARGET_EFAULT
;
11795 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11796 unlock_user(p
, arg1
, 0);
11799 #ifdef TARGET_NR_ftruncate64
11800 case TARGET_NR_ftruncate64
:
11801 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11803 #ifdef TARGET_NR_stat64
11804 case TARGET_NR_stat64
:
11805 if (!(p
= lock_user_string(arg1
))) {
11806 return -TARGET_EFAULT
;
11808 ret
= get_errno(stat(path(p
), &st
));
11809 unlock_user(p
, arg1
, 0);
11810 if (!is_error(ret
))
11811 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11814 #ifdef TARGET_NR_lstat64
11815 case TARGET_NR_lstat64
:
11816 if (!(p
= lock_user_string(arg1
))) {
11817 return -TARGET_EFAULT
;
11819 ret
= get_errno(lstat(path(p
), &st
));
11820 unlock_user(p
, arg1
, 0);
11821 if (!is_error(ret
))
11822 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11825 #ifdef TARGET_NR_fstat64
11826 case TARGET_NR_fstat64
:
11827 ret
= get_errno(fstat(arg1
, &st
));
11828 if (!is_error(ret
))
11829 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11832 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11833 #ifdef TARGET_NR_fstatat64
11834 case TARGET_NR_fstatat64
:
11836 #ifdef TARGET_NR_newfstatat
11837 case TARGET_NR_newfstatat
:
11839 if (!(p
= lock_user_string(arg2
))) {
11840 return -TARGET_EFAULT
;
11842 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11843 unlock_user(p
, arg2
, 0);
11844 if (!is_error(ret
))
11845 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11848 #if defined(TARGET_NR_statx)
11849 case TARGET_NR_statx
:
11851 struct target_statx
*target_stx
;
11855 p
= lock_user_string(arg2
);
11857 return -TARGET_EFAULT
;
11859 #if defined(__NR_statx)
11862 * It is assumed that struct statx is architecture independent.
11864 struct target_statx host_stx
;
11867 ret
= get_errno(sys_statx(dirfd
, p
, flags
, mask
, &host_stx
));
11868 if (!is_error(ret
)) {
11869 if (host_to_target_statx(&host_stx
, arg5
) != 0) {
11870 unlock_user(p
, arg2
, 0);
11871 return -TARGET_EFAULT
;
11875 if (ret
!= -TARGET_ENOSYS
) {
11876 unlock_user(p
, arg2
, 0);
11881 ret
= get_errno(fstatat(dirfd
, path(p
), &st
, flags
));
11882 unlock_user(p
, arg2
, 0);
11884 if (!is_error(ret
)) {
11885 if (!lock_user_struct(VERIFY_WRITE
, target_stx
, arg5
, 0)) {
11886 return -TARGET_EFAULT
;
11888 memset(target_stx
, 0, sizeof(*target_stx
));
11889 __put_user(major(st
.st_dev
), &target_stx
->stx_dev_major
);
11890 __put_user(minor(st
.st_dev
), &target_stx
->stx_dev_minor
);
11891 __put_user(st
.st_ino
, &target_stx
->stx_ino
);
11892 __put_user(st
.st_mode
, &target_stx
->stx_mode
);
11893 __put_user(st
.st_uid
, &target_stx
->stx_uid
);
11894 __put_user(st
.st_gid
, &target_stx
->stx_gid
);
11895 __put_user(st
.st_nlink
, &target_stx
->stx_nlink
);
11896 __put_user(major(st
.st_rdev
), &target_stx
->stx_rdev_major
);
11897 __put_user(minor(st
.st_rdev
), &target_stx
->stx_rdev_minor
);
11898 __put_user(st
.st_size
, &target_stx
->stx_size
);
11899 __put_user(st
.st_blksize
, &target_stx
->stx_blksize
);
11900 __put_user(st
.st_blocks
, &target_stx
->stx_blocks
);
11901 __put_user(st
.st_atime
, &target_stx
->stx_atime
.tv_sec
);
11902 __put_user(st
.st_mtime
, &target_stx
->stx_mtime
.tv_sec
);
11903 __put_user(st
.st_ctime
, &target_stx
->stx_ctime
.tv_sec
);
11904 unlock_user_struct(target_stx
, arg5
, 1);
11909 #ifdef TARGET_NR_lchown
11910 case TARGET_NR_lchown
:
11911 if (!(p
= lock_user_string(arg1
)))
11912 return -TARGET_EFAULT
;
11913 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11914 unlock_user(p
, arg1
, 0);
11917 #ifdef TARGET_NR_getuid
11918 case TARGET_NR_getuid
:
11919 return get_errno(high2lowuid(getuid()));
11921 #ifdef TARGET_NR_getgid
11922 case TARGET_NR_getgid
:
11923 return get_errno(high2lowgid(getgid()));
11925 #ifdef TARGET_NR_geteuid
11926 case TARGET_NR_geteuid
:
11927 return get_errno(high2lowuid(geteuid()));
11929 #ifdef TARGET_NR_getegid
11930 case TARGET_NR_getegid
:
11931 return get_errno(high2lowgid(getegid()));
11933 case TARGET_NR_setreuid
:
11934 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11935 case TARGET_NR_setregid
:
11936 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11937 case TARGET_NR_getgroups
:
11938 { /* the same code as for TARGET_NR_getgroups32 */
11939 int gidsetsize
= arg1
;
11940 target_id
*target_grouplist
;
11941 g_autofree gid_t
*grouplist
= NULL
;
11944 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11945 return -TARGET_EINVAL
;
11947 if (gidsetsize
> 0) {
11948 grouplist
= g_try_new(gid_t
, gidsetsize
);
11950 return -TARGET_ENOMEM
;
11953 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11954 if (!is_error(ret
) && gidsetsize
> 0) {
11955 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
11956 gidsetsize
* sizeof(target_id
), 0);
11957 if (!target_grouplist
) {
11958 return -TARGET_EFAULT
;
11960 for (i
= 0; i
< ret
; i
++) {
11961 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11963 unlock_user(target_grouplist
, arg2
,
11964 gidsetsize
* sizeof(target_id
));
11968 case TARGET_NR_setgroups
:
11969 { /* the same code as for TARGET_NR_setgroups32 */
11970 int gidsetsize
= arg1
;
11971 target_id
*target_grouplist
;
11972 g_autofree gid_t
*grouplist
= NULL
;
11975 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
11976 return -TARGET_EINVAL
;
11978 if (gidsetsize
> 0) {
11979 grouplist
= g_try_new(gid_t
, gidsetsize
);
11981 return -TARGET_ENOMEM
;
11983 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
11984 gidsetsize
* sizeof(target_id
), 1);
11985 if (!target_grouplist
) {
11986 return -TARGET_EFAULT
;
11988 for (i
= 0; i
< gidsetsize
; i
++) {
11989 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11991 unlock_user(target_grouplist
, arg2
,
11992 gidsetsize
* sizeof(target_id
));
11994 return get_errno(sys_setgroups(gidsetsize
, grouplist
));
11996 case TARGET_NR_fchown
:
11997 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11998 #if defined(TARGET_NR_fchownat)
11999 case TARGET_NR_fchownat
:
12000 if (!(p
= lock_user_string(arg2
)))
12001 return -TARGET_EFAULT
;
12002 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
12003 low2highgid(arg4
), arg5
));
12004 unlock_user(p
, arg2
, 0);
12007 #ifdef TARGET_NR_setresuid
12008 case TARGET_NR_setresuid
:
12009 return get_errno(sys_setresuid(low2highuid(arg1
),
12011 low2highuid(arg3
)));
12013 #ifdef TARGET_NR_getresuid
12014 case TARGET_NR_getresuid
:
12016 uid_t ruid
, euid
, suid
;
12017 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12018 if (!is_error(ret
)) {
12019 if (put_user_id(high2lowuid(ruid
), arg1
)
12020 || put_user_id(high2lowuid(euid
), arg2
)
12021 || put_user_id(high2lowuid(suid
), arg3
))
12022 return -TARGET_EFAULT
;
12027 #ifdef TARGET_NR_getresgid
12028 case TARGET_NR_setresgid
:
12029 return get_errno(sys_setresgid(low2highgid(arg1
),
12031 low2highgid(arg3
)));
12033 #ifdef TARGET_NR_getresgid
12034 case TARGET_NR_getresgid
:
12036 gid_t rgid
, egid
, sgid
;
12037 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12038 if (!is_error(ret
)) {
12039 if (put_user_id(high2lowgid(rgid
), arg1
)
12040 || put_user_id(high2lowgid(egid
), arg2
)
12041 || put_user_id(high2lowgid(sgid
), arg3
))
12042 return -TARGET_EFAULT
;
12047 #ifdef TARGET_NR_chown
12048 case TARGET_NR_chown
:
12049 if (!(p
= lock_user_string(arg1
)))
12050 return -TARGET_EFAULT
;
12051 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
12052 unlock_user(p
, arg1
, 0);
12055 case TARGET_NR_setuid
:
12056 return get_errno(sys_setuid(low2highuid(arg1
)));
12057 case TARGET_NR_setgid
:
12058 return get_errno(sys_setgid(low2highgid(arg1
)));
12059 case TARGET_NR_setfsuid
:
12060 return get_errno(setfsuid(arg1
));
12061 case TARGET_NR_setfsgid
:
12062 return get_errno(setfsgid(arg1
));
12064 #ifdef TARGET_NR_lchown32
12065 case TARGET_NR_lchown32
:
12066 if (!(p
= lock_user_string(arg1
)))
12067 return -TARGET_EFAULT
;
12068 ret
= get_errno(lchown(p
, arg2
, arg3
));
12069 unlock_user(p
, arg1
, 0);
12072 #ifdef TARGET_NR_getuid32
12073 case TARGET_NR_getuid32
:
12074 return get_errno(getuid());
12077 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12078 /* Alpha specific */
12079 case TARGET_NR_getxuid
:
12083 cpu_env
->ir
[IR_A4
]=euid
;
12085 return get_errno(getuid());
12087 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12088 /* Alpha specific */
12089 case TARGET_NR_getxgid
:
12093 cpu_env
->ir
[IR_A4
]=egid
;
12095 return get_errno(getgid());
12097 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12098 /* Alpha specific */
12099 case TARGET_NR_osf_getsysinfo
:
12100 ret
= -TARGET_EOPNOTSUPP
;
12102 case TARGET_GSI_IEEE_FP_CONTROL
:
12104 uint64_t fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12105 uint64_t swcr
= cpu_env
->swcr
;
12107 swcr
&= ~SWCR_STATUS_MASK
;
12108 swcr
|= (fpcr
>> 35) & SWCR_STATUS_MASK
;
12110 if (put_user_u64 (swcr
, arg2
))
12111 return -TARGET_EFAULT
;
12116 /* case GSI_IEEE_STATE_AT_SIGNAL:
12117 -- Not implemented in linux kernel.
12119 -- Retrieves current unaligned access state; not much used.
12120 case GSI_PROC_TYPE:
12121 -- Retrieves implver information; surely not used.
12122 case GSI_GET_HWRPB:
12123 -- Grabs a copy of the HWRPB; surely not used.
12128 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12129 /* Alpha specific */
12130 case TARGET_NR_osf_setsysinfo
:
12131 ret
= -TARGET_EOPNOTSUPP
;
12133 case TARGET_SSI_IEEE_FP_CONTROL
:
12135 uint64_t swcr
, fpcr
;
12137 if (get_user_u64 (swcr
, arg2
)) {
12138 return -TARGET_EFAULT
;
12142 * The kernel calls swcr_update_status to update the
12143 * status bits from the fpcr at every point that it
12144 * could be queried. Therefore, we store the status
12145 * bits only in FPCR.
12147 cpu_env
->swcr
= swcr
& (SWCR_TRAP_ENABLE_MASK
| SWCR_MAP_MASK
);
12149 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12150 fpcr
&= ((uint64_t)FPCR_DYN_MASK
<< 32);
12151 fpcr
|= alpha_ieee_swcr_to_fpcr(swcr
);
12152 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12157 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
12159 uint64_t exc
, fpcr
, fex
;
12161 if (get_user_u64(exc
, arg2
)) {
12162 return -TARGET_EFAULT
;
12164 exc
&= SWCR_STATUS_MASK
;
12165 fpcr
= cpu_alpha_load_fpcr(cpu_env
);
12167 /* Old exceptions are not signaled. */
12168 fex
= alpha_ieee_fpcr_to_swcr(fpcr
);
12170 fex
>>= SWCR_STATUS_TO_EXCSUM_SHIFT
;
12171 fex
&= (cpu_env
)->swcr
;
12173 /* Update the hardware fpcr. */
12174 fpcr
|= alpha_ieee_swcr_to_fpcr(exc
);
12175 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
12178 int si_code
= TARGET_FPE_FLTUNK
;
12179 target_siginfo_t info
;
12181 if (fex
& SWCR_TRAP_ENABLE_DNO
) {
12182 si_code
= TARGET_FPE_FLTUND
;
12184 if (fex
& SWCR_TRAP_ENABLE_INE
) {
12185 si_code
= TARGET_FPE_FLTRES
;
12187 if (fex
& SWCR_TRAP_ENABLE_UNF
) {
12188 si_code
= TARGET_FPE_FLTUND
;
12190 if (fex
& SWCR_TRAP_ENABLE_OVF
) {
12191 si_code
= TARGET_FPE_FLTOVF
;
12193 if (fex
& SWCR_TRAP_ENABLE_DZE
) {
12194 si_code
= TARGET_FPE_FLTDIV
;
12196 if (fex
& SWCR_TRAP_ENABLE_INV
) {
12197 si_code
= TARGET_FPE_FLTINV
;
12200 info
.si_signo
= SIGFPE
;
12202 info
.si_code
= si_code
;
12203 info
._sifields
._sigfault
._addr
= (cpu_env
)->pc
;
12204 queue_signal(cpu_env
, info
.si_signo
,
12205 QEMU_SI_FAULT
, &info
);
12211 /* case SSI_NVPAIRS:
12212 -- Used with SSIN_UACPROC to enable unaligned accesses.
12213 case SSI_IEEE_STATE_AT_SIGNAL:
12214 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12215 -- Not implemented in linux kernel
12220 #ifdef TARGET_NR_osf_sigprocmask
12221 /* Alpha specific. */
12222 case TARGET_NR_osf_sigprocmask
:
12226 sigset_t set
, oldset
;
12229 case TARGET_SIG_BLOCK
:
12232 case TARGET_SIG_UNBLOCK
:
12235 case TARGET_SIG_SETMASK
:
12239 return -TARGET_EINVAL
;
12242 target_to_host_old_sigset(&set
, &mask
);
12243 ret
= do_sigprocmask(how
, &set
, &oldset
);
12245 host_to_target_old_sigset(&mask
, &oldset
);
12252 #ifdef TARGET_NR_getgid32
12253 case TARGET_NR_getgid32
:
12254 return get_errno(getgid());
12256 #ifdef TARGET_NR_geteuid32
12257 case TARGET_NR_geteuid32
:
12258 return get_errno(geteuid());
12260 #ifdef TARGET_NR_getegid32
12261 case TARGET_NR_getegid32
:
12262 return get_errno(getegid());
12264 #ifdef TARGET_NR_setreuid32
12265 case TARGET_NR_setreuid32
:
12266 return get_errno(setreuid(arg1
, arg2
));
12268 #ifdef TARGET_NR_setregid32
12269 case TARGET_NR_setregid32
:
12270 return get_errno(setregid(arg1
, arg2
));
12272 #ifdef TARGET_NR_getgroups32
12273 case TARGET_NR_getgroups32
:
12274 { /* the same code as for TARGET_NR_getgroups */
12275 int gidsetsize
= arg1
;
12276 uint32_t *target_grouplist
;
12277 g_autofree gid_t
*grouplist
= NULL
;
12280 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12281 return -TARGET_EINVAL
;
12283 if (gidsetsize
> 0) {
12284 grouplist
= g_try_new(gid_t
, gidsetsize
);
12286 return -TARGET_ENOMEM
;
12289 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
12290 if (!is_error(ret
) && gidsetsize
> 0) {
12291 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
,
12292 gidsetsize
* 4, 0);
12293 if (!target_grouplist
) {
12294 return -TARGET_EFAULT
;
12296 for (i
= 0; i
< ret
; i
++) {
12297 target_grouplist
[i
] = tswap32(grouplist
[i
]);
12299 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
12304 #ifdef TARGET_NR_setgroups32
12305 case TARGET_NR_setgroups32
:
12306 { /* the same code as for TARGET_NR_setgroups */
12307 int gidsetsize
= arg1
;
12308 uint32_t *target_grouplist
;
12309 g_autofree gid_t
*grouplist
= NULL
;
12312 if (gidsetsize
> NGROUPS_MAX
|| gidsetsize
< 0) {
12313 return -TARGET_EINVAL
;
12315 if (gidsetsize
> 0) {
12316 grouplist
= g_try_new(gid_t
, gidsetsize
);
12318 return -TARGET_ENOMEM
;
12320 target_grouplist
= lock_user(VERIFY_READ
, arg2
,
12321 gidsetsize
* 4, 1);
12322 if (!target_grouplist
) {
12323 return -TARGET_EFAULT
;
12325 for (i
= 0; i
< gidsetsize
; i
++) {
12326 grouplist
[i
] = tswap32(target_grouplist
[i
]);
12328 unlock_user(target_grouplist
, arg2
, 0);
12330 return get_errno(sys_setgroups(gidsetsize
, grouplist
));
12333 #ifdef TARGET_NR_fchown32
12334 case TARGET_NR_fchown32
:
12335 return get_errno(fchown(arg1
, arg2
, arg3
));
12337 #ifdef TARGET_NR_setresuid32
12338 case TARGET_NR_setresuid32
:
12339 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
12341 #ifdef TARGET_NR_getresuid32
12342 case TARGET_NR_getresuid32
:
12344 uid_t ruid
, euid
, suid
;
12345 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
12346 if (!is_error(ret
)) {
12347 if (put_user_u32(ruid
, arg1
)
12348 || put_user_u32(euid
, arg2
)
12349 || put_user_u32(suid
, arg3
))
12350 return -TARGET_EFAULT
;
12355 #ifdef TARGET_NR_setresgid32
12356 case TARGET_NR_setresgid32
:
12357 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
12359 #ifdef TARGET_NR_getresgid32
12360 case TARGET_NR_getresgid32
:
12362 gid_t rgid
, egid
, sgid
;
12363 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
12364 if (!is_error(ret
)) {
12365 if (put_user_u32(rgid
, arg1
)
12366 || put_user_u32(egid
, arg2
)
12367 || put_user_u32(sgid
, arg3
))
12368 return -TARGET_EFAULT
;
12373 #ifdef TARGET_NR_chown32
12374 case TARGET_NR_chown32
:
12375 if (!(p
= lock_user_string(arg1
)))
12376 return -TARGET_EFAULT
;
12377 ret
= get_errno(chown(p
, arg2
, arg3
));
12378 unlock_user(p
, arg1
, 0);
12381 #ifdef TARGET_NR_setuid32
12382 case TARGET_NR_setuid32
:
12383 return get_errno(sys_setuid(arg1
));
12385 #ifdef TARGET_NR_setgid32
12386 case TARGET_NR_setgid32
:
12387 return get_errno(sys_setgid(arg1
));
12389 #ifdef TARGET_NR_setfsuid32
12390 case TARGET_NR_setfsuid32
:
12391 return get_errno(setfsuid(arg1
));
12393 #ifdef TARGET_NR_setfsgid32
12394 case TARGET_NR_setfsgid32
:
12395 return get_errno(setfsgid(arg1
));
12397 #ifdef TARGET_NR_mincore
12398 case TARGET_NR_mincore
:
12400 void *a
= lock_user(VERIFY_NONE
, arg1
, arg2
, 0);
12402 return -TARGET_ENOMEM
;
12404 p
= lock_user_string(arg3
);
12406 ret
= -TARGET_EFAULT
;
12408 ret
= get_errno(mincore(a
, arg2
, p
));
12409 unlock_user(p
, arg3
, ret
);
12411 unlock_user(a
, arg1
, 0);
12415 #ifdef TARGET_NR_arm_fadvise64_64
12416 case TARGET_NR_arm_fadvise64_64
:
12417 /* arm_fadvise64_64 looks like fadvise64_64 but
12418 * with different argument order: fd, advice, offset, len
12419 * rather than the usual fd, offset, len, advice.
12420 * Note that offset and len are both 64-bit so appear as
12421 * pairs of 32-bit registers.
12423 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
12424 target_offset64(arg5
, arg6
), arg2
);
12425 return -host_to_target_errno(ret
);
12428 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12430 #ifdef TARGET_NR_fadvise64_64
12431 case TARGET_NR_fadvise64_64
:
12432 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12433 /* 6 args: fd, advice, offset (high, low), len (high, low) */
12441 /* 6 args: fd, offset (high, low), len (high, low), advice */
12442 if (regpairs_aligned(cpu_env
, num
)) {
12443 /* offset is in (3,4), len in (5,6) and advice in 7 */
12451 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
12452 target_offset64(arg4
, arg5
), arg6
);
12453 return -host_to_target_errno(ret
);
12456 #ifdef TARGET_NR_fadvise64
12457 case TARGET_NR_fadvise64
:
12458 /* 5 args: fd, offset (high, low), len, advice */
12459 if (regpairs_aligned(cpu_env
, num
)) {
12460 /* offset is in (3,4), len in 5 and advice in 6 */
12466 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
12467 return -host_to_target_errno(ret
);
12470 #else /* not a 32-bit ABI */
12471 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12472 #ifdef TARGET_NR_fadvise64_64
12473 case TARGET_NR_fadvise64_64
:
12475 #ifdef TARGET_NR_fadvise64
12476 case TARGET_NR_fadvise64
:
12478 #ifdef TARGET_S390X
12480 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
12481 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
12482 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
12483 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
12487 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
12489 #endif /* end of 64-bit ABI fadvise handling */
12491 #ifdef TARGET_NR_madvise
12492 case TARGET_NR_madvise
:
12493 return target_madvise(arg1
, arg2
, arg3
);
12495 #ifdef TARGET_NR_fcntl64
12496 case TARGET_NR_fcntl64
:
12500 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
12501 to_flock64_fn
*copyto
= copy_to_user_flock64
;
12504 if (!cpu_env
->eabi
) {
12505 copyfrom
= copy_from_user_oabi_flock64
;
12506 copyto
= copy_to_user_oabi_flock64
;
12510 cmd
= target_to_host_fcntl_cmd(arg2
);
12511 if (cmd
== -TARGET_EINVAL
) {
12516 case TARGET_F_GETLK64
:
12517 ret
= copyfrom(&fl
, arg3
);
12521 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12523 ret
= copyto(arg3
, &fl
);
12527 case TARGET_F_SETLK64
:
12528 case TARGET_F_SETLKW64
:
12529 ret
= copyfrom(&fl
, arg3
);
12533 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
12536 ret
= do_fcntl(arg1
, arg2
, arg3
);
12542 #ifdef TARGET_NR_cacheflush
12543 case TARGET_NR_cacheflush
:
12544 /* self-modifying code is handled automatically, so nothing needed */
12547 #ifdef TARGET_NR_getpagesize
12548 case TARGET_NR_getpagesize
:
12549 return TARGET_PAGE_SIZE
;
12551 case TARGET_NR_gettid
:
12552 return get_errno(sys_gettid());
12553 #ifdef TARGET_NR_readahead
12554 case TARGET_NR_readahead
:
12555 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12556 if (regpairs_aligned(cpu_env
, num
)) {
12561 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
12563 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
12568 #ifdef TARGET_NR_setxattr
12569 case TARGET_NR_listxattr
:
12570 case TARGET_NR_llistxattr
:
12574 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12576 return -TARGET_EFAULT
;
12579 p
= lock_user_string(arg1
);
12581 if (num
== TARGET_NR_listxattr
) {
12582 ret
= get_errno(listxattr(p
, b
, arg3
));
12584 ret
= get_errno(llistxattr(p
, b
, arg3
));
12587 ret
= -TARGET_EFAULT
;
12589 unlock_user(p
, arg1
, 0);
12590 unlock_user(b
, arg2
, arg3
);
12593 case TARGET_NR_flistxattr
:
12597 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
12599 return -TARGET_EFAULT
;
12602 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12603 unlock_user(b
, arg2
, arg3
);
12606 case TARGET_NR_setxattr
:
12607 case TARGET_NR_lsetxattr
:
12611 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12613 return -TARGET_EFAULT
;
12616 p
= lock_user_string(arg1
);
12617 n
= lock_user_string(arg2
);
12619 if (num
== TARGET_NR_setxattr
) {
12620 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12622 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12625 ret
= -TARGET_EFAULT
;
12627 unlock_user(p
, arg1
, 0);
12628 unlock_user(n
, arg2
, 0);
12629 unlock_user(v
, arg3
, 0);
12632 case TARGET_NR_fsetxattr
:
12636 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12638 return -TARGET_EFAULT
;
12641 n
= lock_user_string(arg2
);
12643 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12645 ret
= -TARGET_EFAULT
;
12647 unlock_user(n
, arg2
, 0);
12648 unlock_user(v
, arg3
, 0);
12651 case TARGET_NR_getxattr
:
12652 case TARGET_NR_lgetxattr
:
12656 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12658 return -TARGET_EFAULT
;
12661 p
= lock_user_string(arg1
);
12662 n
= lock_user_string(arg2
);
12664 if (num
== TARGET_NR_getxattr
) {
12665 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12667 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12670 ret
= -TARGET_EFAULT
;
12672 unlock_user(p
, arg1
, 0);
12673 unlock_user(n
, arg2
, 0);
12674 unlock_user(v
, arg3
, arg4
);
12677 case TARGET_NR_fgetxattr
:
12681 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12683 return -TARGET_EFAULT
;
12686 n
= lock_user_string(arg2
);
12688 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12690 ret
= -TARGET_EFAULT
;
12692 unlock_user(n
, arg2
, 0);
12693 unlock_user(v
, arg3
, arg4
);
12696 case TARGET_NR_removexattr
:
12697 case TARGET_NR_lremovexattr
:
12700 p
= lock_user_string(arg1
);
12701 n
= lock_user_string(arg2
);
12703 if (num
== TARGET_NR_removexattr
) {
12704 ret
= get_errno(removexattr(p
, n
));
12706 ret
= get_errno(lremovexattr(p
, n
));
12709 ret
= -TARGET_EFAULT
;
12711 unlock_user(p
, arg1
, 0);
12712 unlock_user(n
, arg2
, 0);
12715 case TARGET_NR_fremovexattr
:
12718 n
= lock_user_string(arg2
);
12720 ret
= get_errno(fremovexattr(arg1
, n
));
12722 ret
= -TARGET_EFAULT
;
12724 unlock_user(n
, arg2
, 0);
12728 #endif /* CONFIG_ATTR */
12729 #ifdef TARGET_NR_set_thread_area
12730 case TARGET_NR_set_thread_area
:
12731 #if defined(TARGET_MIPS)
12732 cpu_env
->active_tc
.CP0_UserLocal
= arg1
;
12734 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12735 return do_set_thread_area(cpu_env
, arg1
);
12736 #elif defined(TARGET_M68K)
12738 TaskState
*ts
= get_task_state(cpu
);
12739 ts
->tp_value
= arg1
;
12743 return -TARGET_ENOSYS
;
12746 #ifdef TARGET_NR_get_thread_area
12747 case TARGET_NR_get_thread_area
:
12748 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12749 return do_get_thread_area(cpu_env
, arg1
);
12750 #elif defined(TARGET_M68K)
12752 TaskState
*ts
= get_task_state(cpu
);
12753 return ts
->tp_value
;
12756 return -TARGET_ENOSYS
;
12759 #ifdef TARGET_NR_getdomainname
12760 case TARGET_NR_getdomainname
:
12761 return -TARGET_ENOSYS
;
12764 #ifdef TARGET_NR_clock_settime
12765 case TARGET_NR_clock_settime
:
12767 struct timespec ts
;
12769 ret
= target_to_host_timespec(&ts
, arg2
);
12770 if (!is_error(ret
)) {
12771 ret
= get_errno(clock_settime(arg1
, &ts
));
12776 #ifdef TARGET_NR_clock_settime64
12777 case TARGET_NR_clock_settime64
:
12779 struct timespec ts
;
12781 ret
= target_to_host_timespec64(&ts
, arg2
);
12782 if (!is_error(ret
)) {
12783 ret
= get_errno(clock_settime(arg1
, &ts
));
12788 #ifdef TARGET_NR_clock_gettime
12789 case TARGET_NR_clock_gettime
:
12791 struct timespec ts
;
12792 ret
= get_errno(clock_gettime(arg1
, &ts
));
12793 if (!is_error(ret
)) {
12794 ret
= host_to_target_timespec(arg2
, &ts
);
12799 #ifdef TARGET_NR_clock_gettime64
12800 case TARGET_NR_clock_gettime64
:
12802 struct timespec ts
;
12803 ret
= get_errno(clock_gettime(arg1
, &ts
));
12804 if (!is_error(ret
)) {
12805 ret
= host_to_target_timespec64(arg2
, &ts
);
12810 #ifdef TARGET_NR_clock_getres
12811 case TARGET_NR_clock_getres
:
12813 struct timespec ts
;
12814 ret
= get_errno(clock_getres(arg1
, &ts
));
12815 if (!is_error(ret
)) {
12816 host_to_target_timespec(arg2
, &ts
);
12821 #ifdef TARGET_NR_clock_getres_time64
12822 case TARGET_NR_clock_getres_time64
:
12824 struct timespec ts
;
12825 ret
= get_errno(clock_getres(arg1
, &ts
));
12826 if (!is_error(ret
)) {
12827 host_to_target_timespec64(arg2
, &ts
);
12832 #ifdef TARGET_NR_clock_nanosleep
12833 case TARGET_NR_clock_nanosleep
:
12835 struct timespec ts
;
12836 if (target_to_host_timespec(&ts
, arg3
)) {
12837 return -TARGET_EFAULT
;
12839 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12840 &ts
, arg4
? &ts
: NULL
));
12842 * if the call is interrupted by a signal handler, it fails
12843 * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12844 * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12846 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12847 host_to_target_timespec(arg4
, &ts
)) {
12848 return -TARGET_EFAULT
;
12854 #ifdef TARGET_NR_clock_nanosleep_time64
12855 case TARGET_NR_clock_nanosleep_time64
:
12857 struct timespec ts
;
12859 if (target_to_host_timespec64(&ts
, arg3
)) {
12860 return -TARGET_EFAULT
;
12863 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12864 &ts
, arg4
? &ts
: NULL
));
12866 if (ret
== -TARGET_EINTR
&& arg4
&& arg2
!= TIMER_ABSTIME
&&
12867 host_to_target_timespec64(arg4
, &ts
)) {
12868 return -TARGET_EFAULT
;
12874 #if defined(TARGET_NR_set_tid_address)
12875 case TARGET_NR_set_tid_address
:
12877 TaskState
*ts
= get_task_state(cpu
);
12878 ts
->child_tidptr
= arg1
;
12879 /* do not call host set_tid_address() syscall, instead return tid() */
12880 return get_errno(sys_gettid());
12884 case TARGET_NR_tkill
:
12885 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12887 case TARGET_NR_tgkill
:
12888 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12889 target_to_host_signal(arg3
)));
12891 #ifdef TARGET_NR_set_robust_list
12892 case TARGET_NR_set_robust_list
:
12893 case TARGET_NR_get_robust_list
:
12894 /* The ABI for supporting robust futexes has userspace pass
12895 * the kernel a pointer to a linked list which is updated by
12896 * userspace after the syscall; the list is walked by the kernel
12897 * when the thread exits. Since the linked list in QEMU guest
12898 * memory isn't a valid linked list for the host and we have
12899 * no way to reliably intercept the thread-death event, we can't
12900 * support these. Silently return ENOSYS so that guest userspace
12901 * falls back to a non-robust futex implementation (which should
12902 * be OK except in the corner case of the guest crashing while
12903 * holding a mutex that is shared with another process via
12906 return -TARGET_ENOSYS
;
12909 #if defined(TARGET_NR_utimensat)
12910 case TARGET_NR_utimensat
:
12912 struct timespec
*tsp
, ts
[2];
12916 if (target_to_host_timespec(ts
, arg3
)) {
12917 return -TARGET_EFAULT
;
12919 if (target_to_host_timespec(ts
+ 1, arg3
+
12920 sizeof(struct target_timespec
))) {
12921 return -TARGET_EFAULT
;
12926 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12928 if (!(p
= lock_user_string(arg2
))) {
12929 return -TARGET_EFAULT
;
12931 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12932 unlock_user(p
, arg2
, 0);
12937 #ifdef TARGET_NR_utimensat_time64
12938 case TARGET_NR_utimensat_time64
:
12940 struct timespec
*tsp
, ts
[2];
12944 if (target_to_host_timespec64(ts
, arg3
)) {
12945 return -TARGET_EFAULT
;
12947 if (target_to_host_timespec64(ts
+ 1, arg3
+
12948 sizeof(struct target__kernel_timespec
))) {
12949 return -TARGET_EFAULT
;
12954 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12956 p
= lock_user_string(arg2
);
12958 return -TARGET_EFAULT
;
12960 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12961 unlock_user(p
, arg2
, 0);
12966 #ifdef TARGET_NR_futex
12967 case TARGET_NR_futex
:
12968 return do_futex(cpu
, false, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12970 #ifdef TARGET_NR_futex_time64
12971 case TARGET_NR_futex_time64
:
12972 return do_futex(cpu
, true, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12974 #ifdef CONFIG_INOTIFY
12975 #if defined(TARGET_NR_inotify_init)
12976 case TARGET_NR_inotify_init
:
12977 ret
= get_errno(inotify_init());
12979 fd_trans_register(ret
, &target_inotify_trans
);
12983 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12984 case TARGET_NR_inotify_init1
:
12985 ret
= get_errno(inotify_init1(target_to_host_bitmask(arg1
,
12986 fcntl_flags_tbl
)));
12988 fd_trans_register(ret
, &target_inotify_trans
);
12992 #if defined(TARGET_NR_inotify_add_watch)
12993 case TARGET_NR_inotify_add_watch
:
12994 p
= lock_user_string(arg2
);
12995 ret
= get_errno(inotify_add_watch(arg1
, path(p
), arg3
));
12996 unlock_user(p
, arg2
, 0);
12999 #if defined(TARGET_NR_inotify_rm_watch)
13000 case TARGET_NR_inotify_rm_watch
:
13001 return get_errno(inotify_rm_watch(arg1
, arg2
));
13005 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13006 case TARGET_NR_mq_open
:
13008 struct mq_attr posix_mq_attr
;
13009 struct mq_attr
*pposix_mq_attr
;
13012 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
13013 pposix_mq_attr
= NULL
;
13015 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
13016 return -TARGET_EFAULT
;
13018 pposix_mq_attr
= &posix_mq_attr
;
13020 p
= lock_user_string(arg1
- 1);
13022 return -TARGET_EFAULT
;
13024 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
13025 unlock_user (p
, arg1
, 0);
13029 case TARGET_NR_mq_unlink
:
13030 p
= lock_user_string(arg1
- 1);
13032 return -TARGET_EFAULT
;
13034 ret
= get_errno(mq_unlink(p
));
13035 unlock_user (p
, arg1
, 0);
13038 #ifdef TARGET_NR_mq_timedsend
13039 case TARGET_NR_mq_timedsend
:
13041 struct timespec ts
;
13043 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13045 if (target_to_host_timespec(&ts
, arg5
)) {
13046 return -TARGET_EFAULT
;
13048 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13049 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13050 return -TARGET_EFAULT
;
13053 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13055 unlock_user (p
, arg2
, arg3
);
13059 #ifdef TARGET_NR_mq_timedsend_time64
13060 case TARGET_NR_mq_timedsend_time64
:
13062 struct timespec ts
;
13064 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13066 if (target_to_host_timespec64(&ts
, arg5
)) {
13067 return -TARGET_EFAULT
;
13069 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
13070 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13071 return -TARGET_EFAULT
;
13074 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
13076 unlock_user(p
, arg2
, arg3
);
13081 #ifdef TARGET_NR_mq_timedreceive
13082 case TARGET_NR_mq_timedreceive
:
13084 struct timespec ts
;
13087 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
13089 if (target_to_host_timespec(&ts
, arg5
)) {
13090 return -TARGET_EFAULT
;
13092 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13094 if (!is_error(ret
) && host_to_target_timespec(arg5
, &ts
)) {
13095 return -TARGET_EFAULT
;
13098 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13101 unlock_user (p
, arg2
, arg3
);
13103 put_user_u32(prio
, arg4
);
13107 #ifdef TARGET_NR_mq_timedreceive_time64
13108 case TARGET_NR_mq_timedreceive_time64
:
13110 struct timespec ts
;
13113 p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1);
13115 if (target_to_host_timespec64(&ts
, arg5
)) {
13116 return -TARGET_EFAULT
;
13118 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13120 if (!is_error(ret
) && host_to_target_timespec64(arg5
, &ts
)) {
13121 return -TARGET_EFAULT
;
13124 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
13127 unlock_user(p
, arg2
, arg3
);
13129 put_user_u32(prio
, arg4
);
13135 /* Not implemented for now... */
13136 /* case TARGET_NR_mq_notify: */
13139 case TARGET_NR_mq_getsetattr
:
13141 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
13144 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
13145 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
13146 &posix_mq_attr_out
));
13147 } else if (arg3
!= 0) {
13148 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
13150 if (ret
== 0 && arg3
!= 0) {
13151 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
13157 #ifdef CONFIG_SPLICE
13158 #ifdef TARGET_NR_tee
13159 case TARGET_NR_tee
:
13161 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
13165 #ifdef TARGET_NR_splice
13166 case TARGET_NR_splice
:
13168 loff_t loff_in
, loff_out
;
13169 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
13171 if (get_user_u64(loff_in
, arg2
)) {
13172 return -TARGET_EFAULT
;
13174 ploff_in
= &loff_in
;
13177 if (get_user_u64(loff_out
, arg4
)) {
13178 return -TARGET_EFAULT
;
13180 ploff_out
= &loff_out
;
13182 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
13184 if (put_user_u64(loff_in
, arg2
)) {
13185 return -TARGET_EFAULT
;
13189 if (put_user_u64(loff_out
, arg4
)) {
13190 return -TARGET_EFAULT
;
13196 #ifdef TARGET_NR_vmsplice
13197 case TARGET_NR_vmsplice
:
13199 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
13201 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
13202 unlock_iovec(vec
, arg2
, arg3
, 0);
13204 ret
= -host_to_target_errno(errno
);
13209 #endif /* CONFIG_SPLICE */
13210 #ifdef CONFIG_EVENTFD
13211 #if defined(TARGET_NR_eventfd)
13212 case TARGET_NR_eventfd
:
13213 ret
= get_errno(eventfd(arg1
, 0));
13215 fd_trans_register(ret
, &target_eventfd_trans
);
13219 #if defined(TARGET_NR_eventfd2)
13220 case TARGET_NR_eventfd2
:
13222 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK_MASK
| TARGET_O_CLOEXEC
));
13223 if (arg2
& TARGET_O_NONBLOCK
) {
13224 host_flags
|= O_NONBLOCK
;
13226 if (arg2
& TARGET_O_CLOEXEC
) {
13227 host_flags
|= O_CLOEXEC
;
13229 ret
= get_errno(eventfd(arg1
, host_flags
));
13231 fd_trans_register(ret
, &target_eventfd_trans
);
13236 #endif /* CONFIG_EVENTFD */
13237 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13238 case TARGET_NR_fallocate
:
13239 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13240 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
13241 target_offset64(arg5
, arg6
)));
13243 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
13247 #if defined(CONFIG_SYNC_FILE_RANGE)
13248 #if defined(TARGET_NR_sync_file_range)
13249 case TARGET_NR_sync_file_range
:
13250 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13251 #if defined(TARGET_MIPS)
13252 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13253 target_offset64(arg5
, arg6
), arg7
));
13255 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
13256 target_offset64(arg4
, arg5
), arg6
));
13257 #endif /* !TARGET_MIPS */
13259 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
13263 #if defined(TARGET_NR_sync_file_range2) || \
13264 defined(TARGET_NR_arm_sync_file_range)
13265 #if defined(TARGET_NR_sync_file_range2)
13266 case TARGET_NR_sync_file_range2
:
13268 #if defined(TARGET_NR_arm_sync_file_range)
13269 case TARGET_NR_arm_sync_file_range
:
13271 /* This is like sync_file_range but the arguments are reordered */
13272 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13273 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
13274 target_offset64(arg5
, arg6
), arg2
));
13276 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
13281 #if defined(TARGET_NR_signalfd4)
13282 case TARGET_NR_signalfd4
:
13283 return do_signalfd4(arg1
, arg2
, arg4
);
13285 #if defined(TARGET_NR_signalfd)
13286 case TARGET_NR_signalfd
:
13287 return do_signalfd4(arg1
, arg2
, 0);
13289 #if defined(CONFIG_EPOLL)
13290 #if defined(TARGET_NR_epoll_create)
13291 case TARGET_NR_epoll_create
:
13292 return get_errno(epoll_create(arg1
));
13294 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13295 case TARGET_NR_epoll_create1
:
13296 return get_errno(epoll_create1(target_to_host_bitmask(arg1
, fcntl_flags_tbl
)));
13298 #if defined(TARGET_NR_epoll_ctl)
13299 case TARGET_NR_epoll_ctl
:
13301 struct epoll_event ep
;
13302 struct epoll_event
*epp
= 0;
13304 if (arg2
!= EPOLL_CTL_DEL
) {
13305 struct target_epoll_event
*target_ep
;
13306 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
13307 return -TARGET_EFAULT
;
13309 ep
.events
= tswap32(target_ep
->events
);
13311 * The epoll_data_t union is just opaque data to the kernel,
13312 * so we transfer all 64 bits across and need not worry what
13313 * actual data type it is.
13315 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
13316 unlock_user_struct(target_ep
, arg4
, 0);
13319 * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13320 * non-null pointer, even though this argument is ignored.
13325 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
13329 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13330 #if defined(TARGET_NR_epoll_wait)
13331 case TARGET_NR_epoll_wait
:
13333 #if defined(TARGET_NR_epoll_pwait)
13334 case TARGET_NR_epoll_pwait
:
13337 struct target_epoll_event
*target_ep
;
13338 struct epoll_event
*ep
;
13340 int maxevents
= arg3
;
13341 int timeout
= arg4
;
13343 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
13344 return -TARGET_EINVAL
;
13347 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
13348 maxevents
* sizeof(struct target_epoll_event
), 1);
13350 return -TARGET_EFAULT
;
13353 ep
= g_try_new(struct epoll_event
, maxevents
);
13355 unlock_user(target_ep
, arg2
, 0);
13356 return -TARGET_ENOMEM
;
13360 #if defined(TARGET_NR_epoll_pwait)
13361 case TARGET_NR_epoll_pwait
:
13363 sigset_t
*set
= NULL
;
13366 ret
= process_sigsuspend_mask(&set
, arg5
, arg6
);
13372 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13373 set
, SIGSET_T_SIZE
));
13376 finish_sigsuspend_mask(ret
);
13381 #if defined(TARGET_NR_epoll_wait)
13382 case TARGET_NR_epoll_wait
:
13383 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
13388 ret
= -TARGET_ENOSYS
;
13390 if (!is_error(ret
)) {
13392 for (i
= 0; i
< ret
; i
++) {
13393 target_ep
[i
].events
= tswap32(ep
[i
].events
);
13394 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
13396 unlock_user(target_ep
, arg2
,
13397 ret
* sizeof(struct target_epoll_event
));
13399 unlock_user(target_ep
, arg2
, 0);
13406 #ifdef TARGET_NR_prlimit64
13407 case TARGET_NR_prlimit64
:
13409 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13410 struct target_rlimit64
*target_rnew
, *target_rold
;
13411 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
13412 int resource
= target_to_host_resource(arg2
);
13414 if (arg3
&& (resource
!= RLIMIT_AS
&&
13415 resource
!= RLIMIT_DATA
&&
13416 resource
!= RLIMIT_STACK
)) {
13417 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
13418 return -TARGET_EFAULT
;
13420 __get_user(rnew
.rlim_cur
, &target_rnew
->rlim_cur
);
13421 __get_user(rnew
.rlim_max
, &target_rnew
->rlim_max
);
13422 unlock_user_struct(target_rnew
, arg3
, 0);
13426 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
13427 if (!is_error(ret
) && arg4
) {
13428 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
13429 return -TARGET_EFAULT
;
13431 __put_user(rold
.rlim_cur
, &target_rold
->rlim_cur
);
13432 __put_user(rold
.rlim_max
, &target_rold
->rlim_max
);
13433 unlock_user_struct(target_rold
, arg4
, 1);
13438 #ifdef TARGET_NR_gethostname
13439 case TARGET_NR_gethostname
:
13441 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
13443 ret
= get_errno(gethostname(name
, arg2
));
13444 unlock_user(name
, arg1
, arg2
);
13446 ret
= -TARGET_EFAULT
;
13451 #ifdef TARGET_NR_atomic_cmpxchg_32
13452 case TARGET_NR_atomic_cmpxchg_32
:
13454 /* should use start_exclusive from main.c */
13455 abi_ulong mem_value
;
13456 if (get_user_u32(mem_value
, arg6
)) {
13457 target_siginfo_t info
;
13458 info
.si_signo
= SIGSEGV
;
13460 info
.si_code
= TARGET_SEGV_MAPERR
;
13461 info
._sifields
._sigfault
._addr
= arg6
;
13462 queue_signal(cpu_env
, info
.si_signo
, QEMU_SI_FAULT
, &info
);
13466 if (mem_value
== arg2
)
13467 put_user_u32(arg1
, arg6
);
13471 #ifdef TARGET_NR_atomic_barrier
13472 case TARGET_NR_atomic_barrier
:
13473 /* Like the kernel implementation and the
13474 qemu arm barrier, no-op this? */
13478 #ifdef TARGET_NR_timer_create
13479 case TARGET_NR_timer_create
:
13481 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13483 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
13486 int timer_index
= next_free_host_timer();
13488 if (timer_index
< 0) {
13489 ret
= -TARGET_EAGAIN
;
13491 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
13494 phost_sevp
= &host_sevp
;
13495 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
13497 free_host_timer_slot(timer_index
);
13502 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
13504 free_host_timer_slot(timer_index
);
13506 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
13507 timer_delete(*phtimer
);
13508 free_host_timer_slot(timer_index
);
13509 return -TARGET_EFAULT
;
13517 #ifdef TARGET_NR_timer_settime
13518 case TARGET_NR_timer_settime
:
13520 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13521 * struct itimerspec * old_value */
13522 target_timer_t timerid
= get_timer_id(arg1
);
13526 } else if (arg3
== 0) {
13527 ret
= -TARGET_EINVAL
;
13529 timer_t htimer
= g_posix_timers
[timerid
];
13530 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13532 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
13533 return -TARGET_EFAULT
;
13536 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13537 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
13538 return -TARGET_EFAULT
;
13545 #ifdef TARGET_NR_timer_settime64
13546 case TARGET_NR_timer_settime64
:
13548 target_timer_t timerid
= get_timer_id(arg1
);
13552 } else if (arg3
== 0) {
13553 ret
= -TARGET_EINVAL
;
13555 timer_t htimer
= g_posix_timers
[timerid
];
13556 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
13558 if (target_to_host_itimerspec64(&hspec_new
, arg3
)) {
13559 return -TARGET_EFAULT
;
13562 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
13563 if (arg4
&& host_to_target_itimerspec64(arg4
, &hspec_old
)) {
13564 return -TARGET_EFAULT
;
13571 #ifdef TARGET_NR_timer_gettime
13572 case TARGET_NR_timer_gettime
:
13574 /* args: timer_t timerid, struct itimerspec *curr_value */
13575 target_timer_t timerid
= get_timer_id(arg1
);
13579 } else if (!arg2
) {
13580 ret
= -TARGET_EFAULT
;
13582 timer_t htimer
= g_posix_timers
[timerid
];
13583 struct itimerspec hspec
;
13584 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13586 if (host_to_target_itimerspec(arg2
, &hspec
)) {
13587 ret
= -TARGET_EFAULT
;
13594 #ifdef TARGET_NR_timer_gettime64
13595 case TARGET_NR_timer_gettime64
:
13597 /* args: timer_t timerid, struct itimerspec64 *curr_value */
13598 target_timer_t timerid
= get_timer_id(arg1
);
13602 } else if (!arg2
) {
13603 ret
= -TARGET_EFAULT
;
13605 timer_t htimer
= g_posix_timers
[timerid
];
13606 struct itimerspec hspec
;
13607 ret
= get_errno(timer_gettime(htimer
, &hspec
));
13609 if (host_to_target_itimerspec64(arg2
, &hspec
)) {
13610 ret
= -TARGET_EFAULT
;
13617 #ifdef TARGET_NR_timer_getoverrun
13618 case TARGET_NR_timer_getoverrun
:
13620 /* args: timer_t timerid */
13621 target_timer_t timerid
= get_timer_id(arg1
);
13626 timer_t htimer
= g_posix_timers
[timerid
];
13627 ret
= get_errno(timer_getoverrun(htimer
));
13633 #ifdef TARGET_NR_timer_delete
13634 case TARGET_NR_timer_delete
:
13636 /* args: timer_t timerid */
13637 target_timer_t timerid
= get_timer_id(arg1
);
13642 timer_t htimer
= g_posix_timers
[timerid
];
13643 ret
= get_errno(timer_delete(htimer
));
13644 free_host_timer_slot(timerid
);
13650 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13651 case TARGET_NR_timerfd_create
:
13652 ret
= get_errno(timerfd_create(arg1
,
13653 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
13655 fd_trans_register(ret
, &target_timerfd_trans
);
13660 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13661 case TARGET_NR_timerfd_gettime
:
13663 struct itimerspec its_curr
;
13665 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13667 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
13668 return -TARGET_EFAULT
;
13674 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13675 case TARGET_NR_timerfd_gettime64
:
13677 struct itimerspec its_curr
;
13679 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
13681 if (arg2
&& host_to_target_itimerspec64(arg2
, &its_curr
)) {
13682 return -TARGET_EFAULT
;
13688 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13689 case TARGET_NR_timerfd_settime
:
13691 struct itimerspec its_new
, its_old
, *p_new
;
13694 if (target_to_host_itimerspec(&its_new
, arg3
)) {
13695 return -TARGET_EFAULT
;
13702 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13704 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
13705 return -TARGET_EFAULT
;
13711 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13712 case TARGET_NR_timerfd_settime64
:
13714 struct itimerspec its_new
, its_old
, *p_new
;
13717 if (target_to_host_itimerspec64(&its_new
, arg3
)) {
13718 return -TARGET_EFAULT
;
13725 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
13727 if (arg4
&& host_to_target_itimerspec64(arg4
, &its_old
)) {
13728 return -TARGET_EFAULT
;
13734 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13735 case TARGET_NR_ioprio_get
:
13736 return get_errno(ioprio_get(arg1
, arg2
));
13739 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13740 case TARGET_NR_ioprio_set
:
13741 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
13744 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13745 case TARGET_NR_setns
:
13746 return get_errno(setns(arg1
, arg2
));
13748 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13749 case TARGET_NR_unshare
:
13750 return get_errno(unshare(arg1
));
13752 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13753 case TARGET_NR_kcmp
:
13754 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
13756 #ifdef TARGET_NR_swapcontext
13757 case TARGET_NR_swapcontext
:
13758 /* PowerPC specific. */
13759 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
13761 #ifdef TARGET_NR_memfd_create
13762 case TARGET_NR_memfd_create
:
13763 p
= lock_user_string(arg1
);
13765 return -TARGET_EFAULT
;
13767 ret
= get_errno(memfd_create(p
, arg2
));
13768 fd_trans_unregister(ret
);
13769 unlock_user(p
, arg1
, 0);
13772 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13773 case TARGET_NR_membarrier
:
13774 return get_errno(membarrier(arg1
, arg2
));
13777 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13778 case TARGET_NR_copy_file_range
:
13780 loff_t inoff
, outoff
;
13781 loff_t
*pinoff
= NULL
, *poutoff
= NULL
;
13784 if (get_user_u64(inoff
, arg2
)) {
13785 return -TARGET_EFAULT
;
13790 if (get_user_u64(outoff
, arg4
)) {
13791 return -TARGET_EFAULT
;
13795 /* Do not sign-extend the count parameter. */
13796 ret
= get_errno(safe_copy_file_range(arg1
, pinoff
, arg3
, poutoff
,
13797 (abi_ulong
)arg5
, arg6
));
13798 if (!is_error(ret
) && ret
> 0) {
13800 if (put_user_u64(inoff
, arg2
)) {
13801 return -TARGET_EFAULT
;
13805 if (put_user_u64(outoff
, arg4
)) {
13806 return -TARGET_EFAULT
;
13814 #if defined(TARGET_NR_pivot_root)
13815 case TARGET_NR_pivot_root
:
13818 p
= lock_user_string(arg1
); /* new_root */
13819 p2
= lock_user_string(arg2
); /* put_old */
13821 ret
= -TARGET_EFAULT
;
13823 ret
= get_errno(pivot_root(p
, p2
));
13825 unlock_user(p2
, arg2
, 0);
13826 unlock_user(p
, arg1
, 0);
13831 #if defined(TARGET_NR_riscv_hwprobe)
13832 case TARGET_NR_riscv_hwprobe
:
13833 return do_riscv_hwprobe(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
);
13837 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
13838 return -TARGET_ENOSYS
;
13843 abi_long
do_syscall(CPUArchState
*cpu_env
, int num
, abi_long arg1
,
13844 abi_long arg2
, abi_long arg3
, abi_long arg4
,
13845 abi_long arg5
, abi_long arg6
, abi_long arg7
,
13848 CPUState
*cpu
= env_cpu(cpu_env
);
13851 #ifdef DEBUG_ERESTARTSYS
13852 /* Debug-only code for exercising the syscall-restart code paths
13853 * in the per-architecture cpu main loops: restart every syscall
13854 * the guest makes once before letting it through.
13860 return -QEMU_ERESTARTSYS
;
13865 record_syscall_start(cpu
, num
, arg1
,
13866 arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
13868 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13869 print_syscall(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
13872 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
13873 arg5
, arg6
, arg7
, arg8
);
13875 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
13876 print_syscall_ret(cpu_env
, num
, ret
, arg1
, arg2
,
13877 arg3
, arg4
, arg5
, arg6
);
13880 record_syscall_return(cpu
, num
, ret
);