2 /*--------------------------------------------------------------------*/
3 /*--- Handle system calls. syswrap-main.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2017 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #include "libvex_guest_offsets.h"
30 #include "libvex_trc_values.h"
31 #include "pub_core_basics.h"
32 #include "pub_core_aspacemgr.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_vkiscnums.h"
35 #include "pub_core_threadstate.h"
36 #include "pub_core_libcbase.h"
37 #include "pub_core_libcassert.h"
38 #include "pub_core_libcprint.h"
39 #include "pub_core_libcproc.h" // For VG_(getpid)()
40 #include "pub_core_libcsignal.h"
41 #include "pub_core_scheduler.h" // For VG_({acquire,release}_BigLock),
43 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
44 #include "pub_core_tooliface.h"
45 #include "pub_core_options.h"
46 #include "pub_core_signals.h" // For VG_SIGVGKILL, VG_(poll_signals)
47 #include "pub_core_syscall.h"
48 #include "pub_core_machine.h"
49 #include "pub_core_mallocfree.h"
50 #include "pub_core_syswrap.h"
51 #include "pub_core_gdbserver.h" // VG_(gdbserver_report_syscall)
53 #include "priv_types_n_macros.h"
54 #include "priv_syswrap-main.h"
56 #if defined(VGO_darwin)
57 #include "priv_syswrap-darwin.h"
60 /* Useful info which needs to be recorded somewhere:
61 Use of registers in syscalls is:
63 NUM ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 ARG8 RESULT
65 x86 eax ebx ecx edx esi edi ebp n/a n/a eax (== NUM)
66 amd64 rax rdi rsi rdx r10 r8 r9 n/a n/a rax (== NUM)
67 ppc32 r0 r3 r4 r5 r6 r7 r8 n/a n/a r3+CR0.SO (== ARG1)
68 ppc64 r0 r3 r4 r5 r6 r7 r8 n/a n/a r3+CR0.SO (== ARG1)
69 arm r7 r0 r1 r2 r3 r4 r5 n/a n/a r0 (== ARG1)
70 mips32 v0 a0 a1 a2 a3 stack stack n/a n/a v0 (== NUM)
71 mips64 v0 a0 a1 a2 a3 a4 a5 a6 a7 v0 (== NUM)
72 arm64 x8 x0 x1 x2 x3 x4 x5 n/a n/a x0 ?? (== ARG1??)
74 On s390x the svc instruction is used for system calls. The system call
75 number is encoded in the instruction (8 bit immediate field). Since Linux
76 2.6 it is also allowed to use svc 0 with the system call number in r1.
77 This was introduced for system calls >255, but works for all. It is
78 also possible to see the svc 0 together with an EXecute instruction, that
79 fills in the immediate field.
80 s390x r1/SVC r2 r3 r4 r5 r6 r7 n/a n/a r2 (== ARG1)
82 NUM ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 ARG8 RESULT
84 x86 eax +4 +8 +12 +16 +20 +24 +28 +32 edx:eax, eflags.c
85 amd64 rax rdi rsi rdx rcx r8 r9 +8 +16 rdx:rax, rflags.c
87 For x86-darwin, "+N" denotes "in memory at N(%esp)"; ditto
88 amd64-darwin. Apparently 0(%esp) is some kind of return address
89 (perhaps for syscalls done with "sysenter"?) I don't think it is
90 relevant for syscalls done with "int $0x80/1/2".
93 x86 eax +4 +8 +12 +16 +20 +24 +28 +32 edx:eax, eflags.c
94 amd64 rax rdi rsi rdx r10 r8 r9 +8 +16 rdx:rax, rflags.c
96 "+N" denotes "in memory at N(%esp)". Solaris also supports fasttrap
97 syscalls. Fasttraps do not take any parameters (except of the sysno in eax)
98 and never fail (if the sysno is valid).
101 /* This is the top level of the system-call handler module. All
102 system calls are channelled through here, doing two things:
104 * notify the tool of the events (mem/reg reads, writes) happening
106 * perform the syscall, usually by passing it along to the kernel
109 A magical piece of assembly code, do_syscall_for_client_WRK, in
110 syscall-$PLATFORM.S does the tricky bit of passing a syscall to the
111 kernel, whilst having the simulator retain control.
114 /* The main function is VG_(client_syscall). The simulation calls it
115 whenever a client thread wants to do a syscall. The following is a
116 sketch of what it does.
118 * Ensures the root thread's stack is suitably mapped. Tedious and
119 arcane. See big big comment in VG_(client_syscall).
121 * First, it rounds up the syscall number and args (which is a
122 platform dependent activity) and puts them in a struct ("args")
123 and also a copy in "orig_args".
125 The pre/post wrappers refer to these structs and so no longer
126 need magic macros to access any specific registers. This struct
127 is stored in thread-specific storage.
130 * The pre-wrapper is called, passing it a pointer to struct
134 * The pre-wrapper examines the args and pokes the tool
135 appropriately. It may modify the args; this is why "orig_args"
138 The pre-wrapper may choose to 'do' the syscall itself, and
139 concludes one of three outcomes:
141 Success(N) -- syscall is already complete, with success;
144 Fail(N) -- syscall is already complete, with failure;
147 HandToKernel -- (the usual case): this needs to be given to
148 the kernel to be done, using the values in
149 the possibly-modified "args" struct.
151 In addition, the pre-wrapper may set some flags:
153 MayBlock -- only applicable when outcome==HandToKernel
155 PostOnFail -- only applicable when outcome==HandToKernel or Fail
158 * If the pre-outcome is HandToKernel, the syscall is duly handed
159 off to the kernel (perhaps involving some thread switchery, but
160 that's not important). This reduces the possible set of outcomes
161 to either Success(N) or Fail(N).
164 * The outcome (Success(N) or Fail(N)) is written back to the guest
165 register(s). This is platform specific:
167 x86: Success(N) ==> eax = N
172 ppc32: Success(N) ==> r3 = N, CR0.SO = 0
173 Fail(N) ==> r3 = N, CR0.SO = 1
176 x86: Success(N) ==> edx:eax = N, cc = 0
177 Fail(N) ==> edx:eax = N, cc = 1
179 s390x: Success(N) ==> r2 = N
183 x86: Success(N) ==> edx:eax = N, cc = 0
184 Fail(N) ==> eax = N, cc = 1
185 Same applies for fasttraps except they never fail.
187 * The post wrapper is called if:
190 - outcome==Success or (outcome==Fail and PostOnFail is set)
192 The post wrapper is passed the adulterated syscall args (struct
193 "args"), and the syscall outcome (viz, Success(N) or Fail(N)).
195 There are several other complications, primarily to do with
196 syscalls getting interrupted, explained in comments in the code.
199 /* CAVEATS for writing wrappers. It is important to follow these!
201 The macros defined in priv_types_n_macros.h are designed to help
202 decouple the wrapper logic from the actual representation of
203 syscall args/results, since these wrappers are designed to work on
206 Sometimes a PRE wrapper will complete the syscall itself, without
207 handing it to the kernel. It will use one of SET_STATUS_Success,
208 SET_STATUS_Failure or SET_STATUS_from_SysRes to set the return
209 value. It is critical to appreciate that use of the macro does not
210 immediately cause the underlying guest state to be updated -- that
211 is done by the driver logic in this file, when the wrapper returns.
213 As a result, PRE wrappers of the following form will malfunction:
218 SET_STATUS_Somehow(...)
220 // do something that assumes guest state is up to date
223 In particular, direct or indirect calls to VG_(poll_signals) after
224 setting STATUS can cause the guest state to be read (in order to
225 build signal frames). Do not do this. If you want a signal poll
226 after the syscall goes through, do "*flags |= SfPollAfter" and the
227 driver logic will do it for you.
231 Another critical requirement following introduction of new address
232 space manager (JRS, 20050923):
234 In a situation where the mappedness of memory has changed, aspacem
235 should be notified BEFORE the tool. Hence the following is
238 Bool d = VG_(am_notify_munmap)(s->start, s->end+1 - s->start);
239 VG_TRACK( die_mem_munmap, s->start, s->end+1 - s->start );
241 VG_(discard_translations)(s->start, s->end+1 - s->start);
243 whilst this is wrong:
245 VG_TRACK( die_mem_munmap, s->start, s->end+1 - s->start );
246 Bool d = VG_(am_notify_munmap)(s->start, s->end+1 - s->start);
248 VG_(discard_translations)(s->start, s->end+1 - s->start);
250 The reason is that the tool may itself ask aspacem for more shadow
251 memory as a result of the VG_TRACK call. In such a situation it is
252 critical that aspacem's segment array is up to date -- hence the
253 need to notify aspacem first.
257 Also .. take care to call VG_(discard_translations) whenever
258 memory with execute permissions is unmapped.
262 /* ---------------------------------------------------------------------
263 Do potentially blocking syscall for the client, and mess with
264 signal masks at the same time.
265 ------------------------------------------------------------------ */
267 /* Perform a syscall on behalf of a client thread, using a specific
268 signal mask. On completion, the signal mask is set to restore_mask
269 (which presumably blocks almost everything). If a signal happens
270 during the syscall, the handler should call
271 VG_(fixup_guest_state_after_syscall_interrupted) to adjust the
272 thread's context to do the right thing.
274 The _WRK function is handwritten assembly, implemented per-platform
275 in coregrind/m_syswrap/syscall-$PLAT.S. It has some very magic
276 properties. See comments at the top of
277 VG_(fixup_guest_state_after_syscall_interrupted) below for details.
279 This function (these functions) are required to return zero in case
280 of success (even if the syscall itself failed), and nonzero if the
281 sigprocmask-swizzling calls failed. We don't actually care about
282 the failure values from sigprocmask, although most of the assembly
283 implementations do attempt to return that, using the convention
284 0 for success, or 0x8000 | error-code for failure.
286 #if defined(VGO_linux)
288 UWord
ML_(do_syscall_for_client_WRK
)( Word syscallno
,
290 const vki_sigset_t
*syscall_mask
,
291 const vki_sigset_t
*restore_mask
,
293 #elif defined(VGO_darwin)
295 UWord
ML_(do_syscall_for_client_unix_WRK
)( Word syscallno
,
297 const vki_sigset_t
*syscall_mask
,
298 const vki_sigset_t
*restore_mask
,
299 Word sigsetSzB
); /* unused */
301 UWord
ML_(do_syscall_for_client_mach_WRK
)( Word syscallno
,
303 const vki_sigset_t
*syscall_mask
,
304 const vki_sigset_t
*restore_mask
,
305 Word sigsetSzB
); /* unused */
307 UWord
ML_(do_syscall_for_client_mdep_WRK
)( Word syscallno
,
309 const vki_sigset_t
*syscall_mask
,
310 const vki_sigset_t
*restore_mask
,
311 Word sigsetSzB
); /* unused */
312 #elif defined(VGO_solaris)
314 UWord
ML_(do_syscall_for_client_WRK
)( Word syscallno
,
316 const vki_sigset_t
*syscall_mask
,
317 const vki_sigset_t
*restore_mask
,
319 UWord
ML_(do_syscall_for_client_dret_WRK
)( Word syscallno
,
321 const vki_sigset_t
*syscall_mask
,
322 const vki_sigset_t
*restore_mask
,
330 void do_syscall_for_client ( Int syscallno
,
332 const vki_sigset_t
* syscall_mask
)
336 # if defined(VGO_linux)
337 err
= ML_(do_syscall_for_client_WRK
)(
338 syscallno
, &tst
->arch
.vex
,
339 syscall_mask
, &saved
, sizeof(vki_sigset_t
)
341 # elif defined(VGO_darwin)
342 switch (VG_DARWIN_SYSNO_CLASS(syscallno
)) {
343 case VG_DARWIN_SYSCALL_CLASS_UNIX
:
344 err
= ML_(do_syscall_for_client_unix_WRK
)(
345 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno
), &tst
->arch
.vex
,
346 syscall_mask
, &saved
, 0/*unused:sigsetSzB*/
349 case VG_DARWIN_SYSCALL_CLASS_MACH
:
350 err
= ML_(do_syscall_for_client_mach_WRK
)(
351 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno
), &tst
->arch
.vex
,
352 syscall_mask
, &saved
, 0/*unused:sigsetSzB*/
355 case VG_DARWIN_SYSCALL_CLASS_MDEP
:
356 err
= ML_(do_syscall_for_client_mdep_WRK
)(
357 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno
), &tst
->arch
.vex
,
358 syscall_mask
, &saved
, 0/*unused:sigsetSzB*/
366 # elif defined(VGO_solaris)
369 /* Fasttraps or anything else cannot go through this path. */
370 vg_assert(VG_SOLARIS_SYSNO_CLASS(syscallno
)
371 == VG_SOLARIS_SYSCALL_CLASS_CLASSIC
);
373 /* If the syscall is a door_return call then it has to be handled very
375 if (tst
->os_state
.in_door_return
)
376 err
= ML_(do_syscall_for_client_dret_WRK
)(
377 syscallno
, &tst
->arch
.vex
,
378 syscall_mask
, &saved
, &cflag
381 err
= ML_(do_syscall_for_client_WRK
)(
382 syscallno
, &tst
->arch
.vex
,
383 syscall_mask
, &saved
, &cflag
386 /* Save the carry flag. */
387 # if defined(VGP_x86_solaris)
388 LibVEX_GuestX86_put_eflag_c(cflag
, &tst
->arch
.vex
);
389 # elif defined(VGP_amd64_solaris)
390 LibVEX_GuestAMD64_put_rflag_c(cflag
, &tst
->arch
.vex
);
392 # error "Unknown platform"
400 "ML_(do_syscall_for_client_WRK): sigprocmask error %lu",
406 /* ---------------------------------------------------------------------
407 Impedance matchers and misc helpers
408 ------------------------------------------------------------------ */
411 Bool
eq_SyscallArgs ( SyscallArgs
* a1
, SyscallArgs
* a2
)
413 return a1
->sysno
== a2
->sysno
414 && a1
->arg1
== a2
->arg1
415 && a1
->arg2
== a2
->arg2
416 && a1
->arg3
== a2
->arg3
417 && a1
->arg4
== a2
->arg4
418 && a1
->arg5
== a2
->arg5
419 && a1
->arg6
== a2
->arg6
420 && a1
->arg7
== a2
->arg7
421 && a1
->arg8
== a2
->arg8
;
425 Bool
eq_SyscallStatus ( UInt sysno
, SyscallStatus
* s1
, SyscallStatus
* s2
)
427 /* was: return s1->what == s2->what && sr_EQ( s1->sres, s2->sres ); */
428 if (s1
->what
== s2
->what
&& sr_EQ( sysno
, s1
->sres
, s2
->sres
))
430 # if defined(VGO_darwin)
431 /* Darwin-specific debugging guff */
432 vg_assert(s1
->what
== s2
->what
);
433 VG_(printf
)("eq_SyscallStatus:\n");
434 VG_(printf
)(" {%lu %lu %u}\n", s1
->sres
._wLO
, s1
->sres
._wHI
, s1
->sres
._mode
);
435 VG_(printf
)(" {%lu %lu %u}\n", s2
->sres
._wLO
, s2
->sres
._wHI
, s2
->sres
._mode
);
441 /* Convert between SysRes and SyscallStatus, to the extent possible. */
444 SyscallStatus
convert_SysRes_to_SyscallStatus ( SysRes res
)
446 SyscallStatus status
;
447 status
.what
= SsComplete
;
453 /* Impedance matchers. These convert syscall arg or result data from
454 the platform-specific in-guest-state format to the canonical
455 formats, and back. */
458 void getSyscallArgsFromGuestState ( /*OUT*/SyscallArgs
* canonical
,
459 /*IN*/ VexGuestArchState
* gst_vanilla
,
462 #if defined(VGP_x86_linux)
463 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
464 canonical
->sysno
= gst
->guest_EAX
;
465 canonical
->arg1
= gst
->guest_EBX
;
466 canonical
->arg2
= gst
->guest_ECX
;
467 canonical
->arg3
= gst
->guest_EDX
;
468 canonical
->arg4
= gst
->guest_ESI
;
469 canonical
->arg5
= gst
->guest_EDI
;
470 canonical
->arg6
= gst
->guest_EBP
;
474 #elif defined(VGP_amd64_linux)
475 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
476 canonical
->sysno
= gst
->guest_RAX
;
477 canonical
->arg1
= gst
->guest_RDI
;
478 canonical
->arg2
= gst
->guest_RSI
;
479 canonical
->arg3
= gst
->guest_RDX
;
480 canonical
->arg4
= gst
->guest_R10
;
481 canonical
->arg5
= gst
->guest_R8
;
482 canonical
->arg6
= gst
->guest_R9
;
486 #elif defined(VGP_ppc32_linux)
487 VexGuestPPC32State
* gst
= (VexGuestPPC32State
*)gst_vanilla
;
488 canonical
->sysno
= gst
->guest_GPR0
;
489 canonical
->arg1
= gst
->guest_GPR3
;
490 canonical
->arg2
= gst
->guest_GPR4
;
491 canonical
->arg3
= gst
->guest_GPR5
;
492 canonical
->arg4
= gst
->guest_GPR6
;
493 canonical
->arg5
= gst
->guest_GPR7
;
494 canonical
->arg6
= gst
->guest_GPR8
;
498 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
499 VexGuestPPC64State
* gst
= (VexGuestPPC64State
*)gst_vanilla
;
500 canonical
->sysno
= gst
->guest_GPR0
;
501 canonical
->arg1
= gst
->guest_GPR3
;
502 canonical
->arg2
= gst
->guest_GPR4
;
503 canonical
->arg3
= gst
->guest_GPR5
;
504 canonical
->arg4
= gst
->guest_GPR6
;
505 canonical
->arg5
= gst
->guest_GPR7
;
506 canonical
->arg6
= gst
->guest_GPR8
;
510 #elif defined(VGP_arm_linux)
511 VexGuestARMState
* gst
= (VexGuestARMState
*)gst_vanilla
;
512 canonical
->sysno
= gst
->guest_R7
;
513 canonical
->arg1
= gst
->guest_R0
;
514 canonical
->arg2
= gst
->guest_R1
;
515 canonical
->arg3
= gst
->guest_R2
;
516 canonical
->arg4
= gst
->guest_R3
;
517 canonical
->arg5
= gst
->guest_R4
;
518 canonical
->arg6
= gst
->guest_R5
;
522 #elif defined(VGP_arm64_linux)
523 VexGuestARM64State
* gst
= (VexGuestARM64State
*)gst_vanilla
;
524 canonical
->sysno
= gst
->guest_X8
;
525 canonical
->arg1
= gst
->guest_X0
;
526 canonical
->arg2
= gst
->guest_X1
;
527 canonical
->arg3
= gst
->guest_X2
;
528 canonical
->arg4
= gst
->guest_X3
;
529 canonical
->arg5
= gst
->guest_X4
;
530 canonical
->arg6
= gst
->guest_X5
;
534 #elif defined(VGP_mips32_linux)
535 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
536 canonical
->sysno
= gst
->guest_r2
; // v0
537 if (canonical
->sysno
== __NR_exit
) {
538 canonical
->arg1
= gst
->guest_r4
; // a0
545 } else if (canonical
->sysno
!= __NR_syscall
) {
546 canonical
->arg1
= gst
->guest_r4
; // a0
547 canonical
->arg2
= gst
->guest_r5
; // a1
548 canonical
->arg3
= gst
->guest_r6
; // a2
549 canonical
->arg4
= gst
->guest_r7
; // a3
550 canonical
->arg5
= *((UInt
*) (gst
->guest_r29
+ 16)); // 16(guest_SP)
551 canonical
->arg6
= *((UInt
*) (gst
->guest_r29
+ 20)); // 20(guest_SP)
552 canonical
->arg7
= *((UInt
*) (gst
->guest_r29
+ 24)); // 24(guest_SP)
555 // Fixme hack handle syscall()
556 canonical
->sysno
= gst
->guest_r4
; // a0
557 canonical
->arg1
= gst
->guest_r5
; // a1
558 canonical
->arg2
= gst
->guest_r6
; // a2
559 canonical
->arg3
= gst
->guest_r7
; // a3
560 canonical
->arg4
= *((UInt
*) (gst
->guest_r29
+ 16)); // 16(guest_SP/sp)
561 canonical
->arg5
= *((UInt
*) (gst
->guest_r29
+ 20)); // 20(guest_SP/sp)
562 canonical
->arg6
= *((UInt
*) (gst
->guest_r29
+ 24)); // 24(guest_SP/sp)
563 canonical
->arg7
= *((UInt
*) (gst
->guest_r29
+ 28)); // 28(guest_SP/sp)
564 canonical
->arg8
= __NR_syscall
;
567 #elif defined(VGP_mips64_linux)
568 VexGuestMIPS64State
* gst
= (VexGuestMIPS64State
*)gst_vanilla
;
569 canonical
->sysno
= gst
->guest_r2
; // v0
570 canonical
->arg1
= gst
->guest_r4
; // a0
571 canonical
->arg2
= gst
->guest_r5
; // a1
572 canonical
->arg3
= gst
->guest_r6
; // a2
573 canonical
->arg4
= gst
->guest_r7
; // a3
574 canonical
->arg5
= gst
->guest_r8
; // a4
575 canonical
->arg6
= gst
->guest_r9
; // a5
576 canonical
->arg7
= gst
->guest_r10
; // a6
577 canonical
->arg8
= gst
->guest_r11
; // a7
579 #elif defined(VGP_nanomips_linux)
580 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
581 canonical
->sysno
= gst
->guest_r2
; // t4
582 canonical
->arg1
= gst
->guest_r4
; // a0
583 canonical
->arg2
= gst
->guest_r5
; // a1
584 canonical
->arg3
= gst
->guest_r6
; // a2
585 canonical
->arg4
= gst
->guest_r7
; // a3
586 canonical
->arg5
= gst
->guest_r8
; // a4
587 canonical
->arg6
= gst
->guest_r9
; // a5
588 canonical
->arg7
= gst
->guest_r10
; // a6
589 canonical
->arg8
= gst
->guest_r11
; // a7
590 #elif defined(VGP_x86_darwin)
591 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
592 UWord
*stack
= (UWord
*)gst
->guest_ESP
;
593 // GrP fixme hope syscalls aren't called with really shallow stacks...
594 canonical
->sysno
= gst
->guest_EAX
;
595 if (canonical
->sysno
!= 0) {
596 // stack[0] is return address
597 canonical
->arg1
= stack
[1];
598 canonical
->arg2
= stack
[2];
599 canonical
->arg3
= stack
[3];
600 canonical
->arg4
= stack
[4];
601 canonical
->arg5
= stack
[5];
602 canonical
->arg6
= stack
[6];
603 canonical
->arg7
= stack
[7];
604 canonical
->arg8
= stack
[8];
606 // GrP fixme hack handle syscall()
607 // GrP fixme what about __syscall() ?
608 // stack[0] is return address
609 // DDD: the tool can't see that the params have been shifted! Can
610 // lead to incorrect checking, I think, because the PRRAn/PSARn
611 // macros will mention the pre-shifted args.
612 canonical
->sysno
= stack
[1];
613 vg_assert(canonical
->sysno
!= 0);
614 canonical
->arg1
= stack
[2];
615 canonical
->arg2
= stack
[3];
616 canonical
->arg3
= stack
[4];
617 canonical
->arg4
= stack
[5];
618 canonical
->arg5
= stack
[6];
619 canonical
->arg6
= stack
[7];
620 canonical
->arg7
= stack
[8];
621 canonical
->arg8
= stack
[9];
623 PRINT("SYSCALL[%d,?](0) syscall(%s, ...); please stand by...\n",
624 VG_(getpid
)(), /*tid,*/
625 VG_SYSNUM_STRING(canonical
->sysno
));
628 // Here we determine what kind of syscall it was by looking at the
629 // interrupt kind, and then encode the syscall number using the 64-bit
630 // encoding for Valgrind's internal use.
632 // DDD: Would it be better to stash the JMP kind into the Darwin
633 // thread state rather than passing in the trc?
635 case VEX_TRC_JMP_SYS_INT128
:
636 // int $0x80 = Unix, 64-bit result
637 vg_assert(canonical
->sysno
>= 0);
638 canonical
->sysno
= VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(canonical
->sysno
);
640 case VEX_TRC_JMP_SYS_SYSENTER
:
641 // syscall = Unix, 32-bit result
642 // OR Mach, 32-bit result
643 if (canonical
->sysno
>= 0) {
644 // GrP fixme hack: 0xffff == I386_SYSCALL_NUMBER_MASK
645 canonical
->sysno
= VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(canonical
->sysno
648 canonical
->sysno
= VG_DARWIN_SYSCALL_CONSTRUCT_MACH(-canonical
->sysno
);
651 case VEX_TRC_JMP_SYS_INT129
:
652 // int $0x81 = Mach, 32-bit result
653 vg_assert(canonical
->sysno
< 0);
654 canonical
->sysno
= VG_DARWIN_SYSCALL_CONSTRUCT_MACH(-canonical
->sysno
);
656 case VEX_TRC_JMP_SYS_INT130
:
657 // int $0x82 = mdep, 32-bit result
658 vg_assert(canonical
->sysno
>= 0);
659 canonical
->sysno
= VG_DARWIN_SYSCALL_CONSTRUCT_MDEP(canonical
->sysno
);
666 #elif defined(VGP_amd64_darwin)
667 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
668 UWord
*stack
= (UWord
*)gst
->guest_RSP
;
670 vg_assert(trc
== VEX_TRC_JMP_SYS_SYSCALL
);
672 // GrP fixme hope syscalls aren't called with really shallow stacks...
673 canonical
->sysno
= gst
->guest_RAX
;
674 if (canonical
->sysno
!= __NR_syscall
) {
675 // stack[0] is return address
676 canonical
->arg1
= gst
->guest_RDI
;
677 canonical
->arg2
= gst
->guest_RSI
;
678 canonical
->arg3
= gst
->guest_RDX
;
679 canonical
->arg4
= gst
->guest_R10
; // not rcx with syscall insn
680 canonical
->arg5
= gst
->guest_R8
;
681 canonical
->arg6
= gst
->guest_R9
;
682 canonical
->arg7
= stack
[1];
683 canonical
->arg8
= stack
[2];
685 // GrP fixme hack handle syscall()
686 // GrP fixme what about __syscall() ?
687 // stack[0] is return address
688 // DDD: the tool can't see that the params have been shifted! Can
689 // lead to incorrect checking, I think, because the PRRAn/PSARn
690 // macros will mention the pre-shifted args.
691 canonical
->sysno
= VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(gst
->guest_RDI
);
692 vg_assert(canonical
->sysno
!= __NR_syscall
);
693 canonical
->arg1
= gst
->guest_RSI
;
694 canonical
->arg2
= gst
->guest_RDX
;
695 canonical
->arg3
= gst
->guest_R10
; // not rcx with syscall insn
696 canonical
->arg4
= gst
->guest_R8
;
697 canonical
->arg5
= gst
->guest_R9
;
698 canonical
->arg6
= stack
[1];
699 canonical
->arg7
= stack
[2];
700 canonical
->arg8
= stack
[3];
702 PRINT("SYSCALL[%d,?](0) syscall(%s, ...); please stand by...\n",
703 VG_(getpid
)(), /*tid,*/
704 VG_SYSNUM_STRING(canonical
->sysno
));
707 // no canonical->sysno adjustment needed
709 #elif defined(VGP_s390x_linux)
710 VexGuestS390XState
* gst
= (VexGuestS390XState
*)gst_vanilla
;
711 canonical
->sysno
= gst
->guest_SYSNO
;
712 canonical
->arg1
= gst
->guest_r2
;
713 canonical
->arg2
= gst
->guest_r3
;
714 canonical
->arg3
= gst
->guest_r4
;
715 canonical
->arg4
= gst
->guest_r5
;
716 canonical
->arg5
= gst
->guest_r6
;
717 canonical
->arg6
= gst
->guest_r7
;
721 #elif defined(VGP_x86_solaris)
722 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
723 UWord
*stack
= (UWord
*)gst
->guest_ESP
;
724 canonical
->sysno
= gst
->guest_EAX
;
725 /* stack[0] is a return address. */
726 canonical
->arg1
= stack
[1];
727 canonical
->arg2
= stack
[2];
728 canonical
->arg3
= stack
[3];
729 canonical
->arg4
= stack
[4];
730 canonical
->arg5
= stack
[5];
731 canonical
->arg6
= stack
[6];
732 canonical
->arg7
= stack
[7];
733 canonical
->arg8
= stack
[8];
736 case VEX_TRC_JMP_SYS_INT145
:
737 case VEX_TRC_JMP_SYS_SYSENTER
:
738 case VEX_TRC_JMP_SYS_SYSCALL
:
739 /* These three are not actually valid syscall instructions on Solaris.
740 Pretend for now that we handle them as normal syscalls. */
741 case VEX_TRC_JMP_SYS_INT128
:
742 case VEX_TRC_JMP_SYS_INT129
:
743 case VEX_TRC_JMP_SYS_INT130
:
744 /* int $0x91, sysenter, syscall = normal syscall */
746 case VEX_TRC_JMP_SYS_INT210
:
747 /* int $0xD2 = fasttrap */
749 = VG_SOLARIS_SYSCALL_CONSTRUCT_FASTTRAP(canonical
->sysno
);
756 #elif defined(VGP_amd64_solaris)
757 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
758 UWord
*stack
= (UWord
*)gst
->guest_RSP
;
759 canonical
->sysno
= gst
->guest_RAX
;
760 /* stack[0] is a return address. */
761 canonical
->arg1
= gst
->guest_RDI
;
762 canonical
->arg2
= gst
->guest_RSI
;
763 canonical
->arg3
= gst
->guest_RDX
;
764 canonical
->arg4
= gst
->guest_R10
; /* Not RCX with syscall. */
765 canonical
->arg5
= gst
->guest_R8
;
766 canonical
->arg6
= gst
->guest_R9
;
767 canonical
->arg7
= stack
[1];
768 canonical
->arg8
= stack
[2];
771 case VEX_TRC_JMP_SYS_SYSCALL
:
772 /* syscall = normal syscall */
774 case VEX_TRC_JMP_SYS_INT210
:
775 /* int $0xD2 = fasttrap */
777 = VG_SOLARIS_SYSCALL_CONSTRUCT_FASTTRAP(canonical
->sysno
);
785 # error "getSyscallArgsFromGuestState: unknown arch"
790 void putSyscallArgsIntoGuestState ( /*IN*/ SyscallArgs
* canonical
,
791 /*OUT*/VexGuestArchState
* gst_vanilla
)
793 #if defined(VGP_x86_linux)
794 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
795 gst
->guest_EAX
= canonical
->sysno
;
796 gst
->guest_EBX
= canonical
->arg1
;
797 gst
->guest_ECX
= canonical
->arg2
;
798 gst
->guest_EDX
= canonical
->arg3
;
799 gst
->guest_ESI
= canonical
->arg4
;
800 gst
->guest_EDI
= canonical
->arg5
;
801 gst
->guest_EBP
= canonical
->arg6
;
803 #elif defined(VGP_amd64_linux)
804 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
805 gst
->guest_RAX
= canonical
->sysno
;
806 gst
->guest_RDI
= canonical
->arg1
;
807 gst
->guest_RSI
= canonical
->arg2
;
808 gst
->guest_RDX
= canonical
->arg3
;
809 gst
->guest_R10
= canonical
->arg4
;
810 gst
->guest_R8
= canonical
->arg5
;
811 gst
->guest_R9
= canonical
->arg6
;
813 #elif defined(VGP_ppc32_linux)
814 VexGuestPPC32State
* gst
= (VexGuestPPC32State
*)gst_vanilla
;
815 gst
->guest_GPR0
= canonical
->sysno
;
816 gst
->guest_GPR3
= canonical
->arg1
;
817 gst
->guest_GPR4
= canonical
->arg2
;
818 gst
->guest_GPR5
= canonical
->arg3
;
819 gst
->guest_GPR6
= canonical
->arg4
;
820 gst
->guest_GPR7
= canonical
->arg5
;
821 gst
->guest_GPR8
= canonical
->arg6
;
823 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
824 VexGuestPPC64State
* gst
= (VexGuestPPC64State
*)gst_vanilla
;
825 gst
->guest_GPR0
= canonical
->sysno
;
826 gst
->guest_GPR3
= canonical
->arg1
;
827 gst
->guest_GPR4
= canonical
->arg2
;
828 gst
->guest_GPR5
= canonical
->arg3
;
829 gst
->guest_GPR6
= canonical
->arg4
;
830 gst
->guest_GPR7
= canonical
->arg5
;
831 gst
->guest_GPR8
= canonical
->arg6
;
833 #elif defined(VGP_arm_linux)
834 VexGuestARMState
* gst
= (VexGuestARMState
*)gst_vanilla
;
835 gst
->guest_R7
= canonical
->sysno
;
836 gst
->guest_R0
= canonical
->arg1
;
837 gst
->guest_R1
= canonical
->arg2
;
838 gst
->guest_R2
= canonical
->arg3
;
839 gst
->guest_R3
= canonical
->arg4
;
840 gst
->guest_R4
= canonical
->arg5
;
841 gst
->guest_R5
= canonical
->arg6
;
843 #elif defined(VGP_arm64_linux)
844 VexGuestARM64State
* gst
= (VexGuestARM64State
*)gst_vanilla
;
845 gst
->guest_X8
= canonical
->sysno
;
846 gst
->guest_X0
= canonical
->arg1
;
847 gst
->guest_X1
= canonical
->arg2
;
848 gst
->guest_X2
= canonical
->arg3
;
849 gst
->guest_X3
= canonical
->arg4
;
850 gst
->guest_X4
= canonical
->arg5
;
851 gst
->guest_X5
= canonical
->arg6
;
853 #elif defined(VGP_x86_darwin)
854 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
855 UWord
*stack
= (UWord
*)gst
->guest_ESP
;
857 gst
->guest_EAX
= VG_DARWIN_SYSNO_FOR_KERNEL(canonical
->sysno
);
859 // GrP fixme? gst->guest_TEMP_EFLAG_C = 0;
860 // stack[0] is return address
861 stack
[1] = canonical
->arg1
;
862 stack
[2] = canonical
->arg2
;
863 stack
[3] = canonical
->arg3
;
864 stack
[4] = canonical
->arg4
;
865 stack
[5] = canonical
->arg5
;
866 stack
[6] = canonical
->arg6
;
867 stack
[7] = canonical
->arg7
;
868 stack
[8] = canonical
->arg8
;
870 #elif defined(VGP_amd64_darwin)
871 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
872 UWord
*stack
= (UWord
*)gst
->guest_RSP
;
874 gst
->guest_RAX
= VG_DARWIN_SYSNO_FOR_KERNEL(canonical
->sysno
);
875 // GrP fixme? gst->guest_TEMP_EFLAG_C = 0;
877 // stack[0] is return address
878 gst
->guest_RDI
= canonical
->arg1
;
879 gst
->guest_RSI
= canonical
->arg2
;
880 gst
->guest_RDX
= canonical
->arg3
;
881 gst
->guest_RCX
= canonical
->arg4
;
882 gst
->guest_R8
= canonical
->arg5
;
883 gst
->guest_R9
= canonical
->arg6
;
884 stack
[1] = canonical
->arg7
;
885 stack
[2] = canonical
->arg8
;
887 #elif defined(VGP_s390x_linux)
888 VexGuestS390XState
* gst
= (VexGuestS390XState
*)gst_vanilla
;
889 gst
->guest_SYSNO
= canonical
->sysno
;
890 gst
->guest_r2
= canonical
->arg1
;
891 gst
->guest_r3
= canonical
->arg2
;
892 gst
->guest_r4
= canonical
->arg3
;
893 gst
->guest_r5
= canonical
->arg4
;
894 gst
->guest_r6
= canonical
->arg5
;
895 gst
->guest_r7
= canonical
->arg6
;
897 #elif defined(VGP_mips32_linux)
898 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
899 if (canonical
->arg8
!= __NR_syscall
) {
900 gst
->guest_r2
= canonical
->sysno
;
901 gst
->guest_r4
= canonical
->arg1
;
902 gst
->guest_r5
= canonical
->arg2
;
903 gst
->guest_r6
= canonical
->arg3
;
904 gst
->guest_r7
= canonical
->arg4
;
905 *((UInt
*) (gst
->guest_r29
+ 16)) = canonical
->arg5
; // 16(guest_GPR29/sp)
906 *((UInt
*) (gst
->guest_r29
+ 20)) = canonical
->arg6
; // 20(sp)
907 *((UInt
*) (gst
->guest_r29
+ 24)) = canonical
->arg7
; // 24(sp)
910 gst
->guest_r2
= __NR_syscall
;
911 gst
->guest_r4
= canonical
->sysno
;
912 gst
->guest_r5
= canonical
->arg1
;
913 gst
->guest_r6
= canonical
->arg2
;
914 gst
->guest_r7
= canonical
->arg3
;
915 *((UInt
*) (gst
->guest_r29
+ 16)) = canonical
->arg4
; // 16(guest_GPR29/sp)
916 *((UInt
*) (gst
->guest_r29
+ 20)) = canonical
->arg5
; // 20(sp)
917 *((UInt
*) (gst
->guest_r29
+ 24)) = canonical
->arg6
; // 24(sp)
918 *((UInt
*) (gst
->guest_r29
+ 28)) = canonical
->arg7
; // 28(sp)
921 #elif defined(VGP_nanomips_linux)
922 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
923 gst
->guest_r2
= canonical
->sysno
;
924 gst
->guest_r4
= canonical
->arg1
;
925 gst
->guest_r5
= canonical
->arg2
;
926 gst
->guest_r6
= canonical
->arg3
;
927 gst
->guest_r7
= canonical
->arg4
;
928 gst
->guest_r8
= canonical
->arg5
;
929 gst
->guest_r9
= canonical
->arg6
;
930 gst
->guest_r10
= canonical
->arg7
;
931 gst
->guest_r11
= canonical
->arg8
;
932 #elif defined(VGP_mips64_linux)
933 VexGuestMIPS64State
* gst
= (VexGuestMIPS64State
*)gst_vanilla
;
934 gst
->guest_r2
= canonical
->sysno
;
935 gst
->guest_r4
= canonical
->arg1
;
936 gst
->guest_r5
= canonical
->arg2
;
937 gst
->guest_r6
= canonical
->arg3
;
938 gst
->guest_r7
= canonical
->arg4
;
939 gst
->guest_r8
= canonical
->arg5
;
940 gst
->guest_r9
= canonical
->arg6
;
941 gst
->guest_r10
= canonical
->arg7
;
942 gst
->guest_r11
= canonical
->arg8
;
944 #elif defined(VGP_x86_solaris)
945 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
946 UWord
*stack
= (UWord
*)gst
->guest_ESP
;
948 /* Fasttraps or anything else cannot go through this way. */
949 vg_assert(VG_SOLARIS_SYSNO_CLASS(canonical
->sysno
)
950 == VG_SOLARIS_SYSCALL_CLASS_CLASSIC
);
951 gst
->guest_EAX
= canonical
->sysno
;
952 /* stack[0] is a return address. */
953 stack
[1] = canonical
->arg1
;
954 stack
[2] = canonical
->arg2
;
955 stack
[3] = canonical
->arg3
;
956 stack
[4] = canonical
->arg4
;
957 stack
[5] = canonical
->arg5
;
958 stack
[6] = canonical
->arg6
;
959 stack
[7] = canonical
->arg7
;
960 stack
[8] = canonical
->arg8
;
962 #elif defined(VGP_amd64_solaris)
963 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
964 UWord
*stack
= (UWord
*)gst
->guest_RSP
;
966 /* Fasttraps or anything else cannot go through this way. */
967 vg_assert(VG_SOLARIS_SYSNO_CLASS(canonical
->sysno
)
968 == VG_SOLARIS_SYSCALL_CLASS_CLASSIC
);
969 gst
->guest_RAX
= canonical
->sysno
;
970 /* stack[0] is a return address. */
971 gst
->guest_RDI
= canonical
->arg1
;
972 gst
->guest_RSI
= canonical
->arg2
;
973 gst
->guest_RDX
= canonical
->arg3
;
974 gst
->guest_R10
= canonical
->arg4
;
975 gst
->guest_R8
= canonical
->arg5
;
976 gst
->guest_R9
= canonical
->arg6
;
977 stack
[1] = canonical
->arg7
;
978 stack
[2] = canonical
->arg8
;
981 # error "putSyscallArgsIntoGuestState: unknown arch"
986 void getSyscallStatusFromGuestState ( /*OUT*/SyscallStatus
* canonical
,
987 /*IN*/ VexGuestArchState
* gst_vanilla
)
989 # if defined(VGP_x86_linux)
990 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
991 canonical
->sres
= VG_(mk_SysRes_x86_linux
)( gst
->guest_EAX
);
992 canonical
->what
= SsComplete
;
994 # elif defined(VGP_amd64_linux)
995 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
996 canonical
->sres
= VG_(mk_SysRes_amd64_linux
)( gst
->guest_RAX
);
997 canonical
->what
= SsComplete
;
999 # elif defined(VGP_ppc32_linux)
1000 VexGuestPPC32State
* gst
= (VexGuestPPC32State
*)gst_vanilla
;
1001 UInt cr
= LibVEX_GuestPPC32_get_CR( gst
);
1002 UInt cr0so
= (cr
>> 28) & 1;
1003 canonical
->sres
= VG_(mk_SysRes_ppc32_linux
)( gst
->guest_GPR3
, cr0so
);
1004 canonical
->what
= SsComplete
;
1006 # elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1007 VexGuestPPC64State
* gst
= (VexGuestPPC64State
*)gst_vanilla
;
1008 UInt cr
= LibVEX_GuestPPC64_get_CR( gst
);
1009 UInt cr0so
= (cr
>> 28) & 1;
1010 canonical
->sres
= VG_(mk_SysRes_ppc64_linux
)( gst
->guest_GPR3
, cr0so
);
1011 canonical
->what
= SsComplete
;
1013 # elif defined(VGP_arm_linux)
1014 VexGuestARMState
* gst
= (VexGuestARMState
*)gst_vanilla
;
1015 canonical
->sres
= VG_(mk_SysRes_arm_linux
)( gst
->guest_R0
);
1016 canonical
->what
= SsComplete
;
1018 # elif defined(VGP_arm64_linux)
1019 VexGuestARM64State
* gst
= (VexGuestARM64State
*)gst_vanilla
;
1020 canonical
->sres
= VG_(mk_SysRes_arm64_linux
)( gst
->guest_X0
);
1021 canonical
->what
= SsComplete
;
1023 # elif defined(VGP_mips32_linux)
1024 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
1025 UInt v0
= gst
->guest_r2
; // v0
1026 UInt v1
= gst
->guest_r3
; // v1
1027 UInt a3
= gst
->guest_r7
; // a3
1028 canonical
->sres
= VG_(mk_SysRes_mips32_linux
)( v0
, v1
, a3
);
1029 canonical
->what
= SsComplete
;
1031 # elif defined(VGP_mips64_linux)
1032 VexGuestMIPS64State
* gst
= (VexGuestMIPS64State
*)gst_vanilla
;
1033 ULong v0
= gst
->guest_r2
; // v0
1034 ULong v1
= gst
->guest_r3
; // v1
1035 ULong a3
= gst
->guest_r7
; // a3
1036 canonical
->sres
= VG_(mk_SysRes_mips64_linux
)(v0
, v1
, a3
);
1037 canonical
->what
= SsComplete
;
1039 # elif defined(VGP_nanomips_linux)
1040 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
1041 RegWord a0
= gst
->guest_r4
; // a0
1042 canonical
->sres
= VG_(mk_SysRes_nanomips_linux
)(a0
);
1043 canonical
->what
= SsComplete
;
1045 # elif defined(VGP_x86_darwin)
1046 /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
1047 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
1048 UInt carry
= 1 & LibVEX_GuestX86_get_eflags(gst
);
1052 switch (gst
->guest_SC_CLASS
) {
1053 case VG_DARWIN_SYSCALL_CLASS_UNIX
:
1054 // int $0x80 = Unix, 64-bit result
1056 wLO
= gst
->guest_EAX
;
1057 wHI
= gst
->guest_EDX
;
1059 case VG_DARWIN_SYSCALL_CLASS_MACH
:
1060 // int $0x81 = Mach, 32-bit result
1061 wLO
= gst
->guest_EAX
;
1063 case VG_DARWIN_SYSCALL_CLASS_MDEP
:
1064 // int $0x82 = mdep, 32-bit result
1065 wLO
= gst
->guest_EAX
;
1071 canonical
->sres
= VG_(mk_SysRes_x86_darwin
)(
1072 gst
->guest_SC_CLASS
, err
? True
: False
,
1075 canonical
->what
= SsComplete
;
1077 # elif defined(VGP_amd64_darwin)
1078 /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
1079 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
1080 ULong carry
= 1 & LibVEX_GuestAMD64_get_rflags(gst
);
1084 switch (gst
->guest_SC_CLASS
) {
1085 case VG_DARWIN_SYSCALL_CLASS_UNIX
:
1086 // syscall = Unix, 128-bit result
1088 wLO
= gst
->guest_RAX
;
1089 wHI
= gst
->guest_RDX
;
1091 case VG_DARWIN_SYSCALL_CLASS_MACH
:
1092 // syscall = Mach, 64-bit result
1093 wLO
= gst
->guest_RAX
;
1095 case VG_DARWIN_SYSCALL_CLASS_MDEP
:
1096 // syscall = mdep, 64-bit result
1097 wLO
= gst
->guest_RAX
;
1103 canonical
->sres
= VG_(mk_SysRes_amd64_darwin
)(
1104 gst
->guest_SC_CLASS
, err
? True
: False
,
1107 canonical
->what
= SsComplete
;
1109 # elif defined(VGP_s390x_linux)
1110 VexGuestS390XState
* gst
= (VexGuestS390XState
*)gst_vanilla
;
1111 canonical
->sres
= VG_(mk_SysRes_s390x_linux
)( gst
->guest_r2
);
1112 canonical
->what
= SsComplete
;
1114 # elif defined(VGP_x86_solaris)
1115 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
1116 UInt carry
= 1 & LibVEX_GuestX86_get_eflags(gst
);
1118 canonical
->sres
= VG_(mk_SysRes_x86_solaris
)(carry
? True
: False
,
1120 carry
? 0 : gst
->guest_EDX
);
1121 canonical
->what
= SsComplete
;
1123 # elif defined(VGP_amd64_solaris)
1124 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
1125 UInt carry
= 1 & LibVEX_GuestAMD64_get_rflags(gst
);
1127 canonical
->sres
= VG_(mk_SysRes_amd64_solaris
)(carry
? True
: False
,
1129 carry
? 0 : gst
->guest_RDX
);
1130 canonical
->what
= SsComplete
;
1133 # error "getSyscallStatusFromGuestState: unknown arch"
1138 void putSyscallStatusIntoGuestState ( /*IN*/ ThreadId tid
,
1139 /*IN*/ SyscallStatus
* canonical
,
1140 /*OUT*/VexGuestArchState
* gst_vanilla
)
1142 # if defined(VGP_x86_linux)
1143 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
1144 vg_assert(canonical
->what
== SsComplete
);
1145 if (sr_isError(canonical
->sres
)) {
1146 /* This isn't exactly right, in that really a Failure with res
1147 not in the range 1 .. 4095 is unrepresentable in the
1148 Linux-x86 scheme. Oh well. */
1149 gst
->guest_EAX
= - (Int
)sr_Err(canonical
->sres
);
1151 gst
->guest_EAX
= sr_Res(canonical
->sres
);
1153 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1154 OFFSET_x86_EAX
, sizeof(UWord
) );
1156 # elif defined(VGP_amd64_linux)
1157 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
1158 vg_assert(canonical
->what
== SsComplete
);
1159 if (sr_isError(canonical
->sres
)) {
1160 /* This isn't exactly right, in that really a Failure with res
1161 not in the range 1 .. 4095 is unrepresentable in the
1162 Linux-amd64 scheme. Oh well. */
1163 gst
->guest_RAX
= - (Long
)sr_Err(canonical
->sres
);
1165 gst
->guest_RAX
= sr_Res(canonical
->sres
);
1167 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1168 OFFSET_amd64_RAX
, sizeof(UWord
) );
1170 # elif defined(VGP_ppc32_linux)
1171 VexGuestPPC32State
* gst
= (VexGuestPPC32State
*)gst_vanilla
;
1172 UInt old_cr
= LibVEX_GuestPPC32_get_CR(gst
);
1173 vg_assert(canonical
->what
== SsComplete
);
1174 if (sr_isError(canonical
->sres
)) {
1176 LibVEX_GuestPPC32_put_CR( old_cr
| (1<<28), gst
);
1177 gst
->guest_GPR3
= sr_Err(canonical
->sres
);
1180 LibVEX_GuestPPC32_put_CR( old_cr
& ~(1<<28), gst
);
1181 gst
->guest_GPR3
= sr_Res(canonical
->sres
);
1183 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1184 OFFSET_ppc32_GPR3
, sizeof(UWord
) );
1185 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1186 OFFSET_ppc32_CR0_0
, sizeof(UChar
) );
1188 # elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1189 VexGuestPPC64State
* gst
= (VexGuestPPC64State
*)gst_vanilla
;
1190 UInt old_cr
= LibVEX_GuestPPC64_get_CR(gst
);
1191 vg_assert(canonical
->what
== SsComplete
);
1192 if (sr_isError(canonical
->sres
)) {
1194 LibVEX_GuestPPC64_put_CR( old_cr
| (1<<28), gst
);
1195 gst
->guest_GPR3
= sr_Err(canonical
->sres
);
1198 LibVEX_GuestPPC64_put_CR( old_cr
& ~(1<<28), gst
);
1199 gst
->guest_GPR3
= sr_Res(canonical
->sres
);
1201 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1202 OFFSET_ppc64_GPR3
, sizeof(UWord
) );
1203 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1204 OFFSET_ppc64_CR0_0
, sizeof(UChar
) );
1206 # elif defined(VGP_arm_linux)
1207 VexGuestARMState
* gst
= (VexGuestARMState
*)gst_vanilla
;
1208 vg_assert(canonical
->what
== SsComplete
);
1209 if (sr_isError(canonical
->sres
)) {
1210 /* This isn't exactly right, in that really a Failure with res
1211 not in the range 1 .. 4095 is unrepresentable in the
1212 Linux-arm scheme. Oh well. */
1213 gst
->guest_R0
= - (Int
)sr_Err(canonical
->sres
);
1215 gst
->guest_R0
= sr_Res(canonical
->sres
);
1217 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1218 OFFSET_arm_R0
, sizeof(UWord
) );
1220 # elif defined(VGP_arm64_linux)
1221 VexGuestARM64State
* gst
= (VexGuestARM64State
*)gst_vanilla
;
1222 vg_assert(canonical
->what
== SsComplete
);
1223 if (sr_isError(canonical
->sres
)) {
1224 /* This isn't exactly right, in that really a Failure with res
1225 not in the range 1 .. 4095 is unrepresentable in the
1226 Linux-arm64 scheme. Oh well. */
1227 gst
->guest_X0
= - (Long
)sr_Err(canonical
->sres
);
1229 gst
->guest_X0
= sr_Res(canonical
->sres
);
1231 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1232 OFFSET_arm64_X0
, sizeof(UWord
) );
1234 #elif defined(VGP_x86_darwin)
1235 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
1236 SysRes sres
= canonical
->sres
;
1237 vg_assert(canonical
->what
== SsComplete
);
1238 /* Unfortunately here we have to break abstraction and look
1239 directly inside 'res', in order to decide what to do. */
1240 switch (sres
._mode
) {
1241 case SysRes_MACH
: // int $0x81 = Mach, 32-bit result
1242 case SysRes_MDEP
: // int $0x82 = mdep, 32-bit result
1243 gst
->guest_EAX
= sres
._wLO
;
1244 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1245 OFFSET_x86_EAX
, sizeof(UInt
) );
1247 case SysRes_UNIX_OK
: // int $0x80 = Unix, 64-bit result
1248 case SysRes_UNIX_ERR
: // int $0x80 = Unix, 64-bit error
1249 gst
->guest_EAX
= sres
._wLO
;
1250 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1251 OFFSET_x86_EAX
, sizeof(UInt
) );
1252 gst
->guest_EDX
= sres
._wHI
;
1253 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1254 OFFSET_x86_EDX
, sizeof(UInt
) );
1255 LibVEX_GuestX86_put_eflag_c( sres
._mode
==SysRes_UNIX_ERR
? 1 : 0,
1257 // GrP fixme sets defined for entire eflags, not just bit c
1258 // DDD: this breaks exp-ptrcheck.
1259 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1260 offsetof(VexGuestX86State
, guest_CC_DEP1
), sizeof(UInt
) );
1267 #elif defined(VGP_amd64_darwin)
1268 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
1269 SysRes sres
= canonical
->sres
;
1270 vg_assert(canonical
->what
== SsComplete
);
1271 /* Unfortunately here we have to break abstraction and look
1272 directly inside 'res', in order to decide what to do. */
1273 switch (sres
._mode
) {
1274 case SysRes_MACH
: // syscall = Mach, 64-bit result
1275 case SysRes_MDEP
: // syscall = mdep, 64-bit result
1276 gst
->guest_RAX
= sres
._wLO
;
1277 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1278 OFFSET_amd64_RAX
, sizeof(ULong
) );
1280 case SysRes_UNIX_OK
: // syscall = Unix, 128-bit result
1281 case SysRes_UNIX_ERR
: // syscall = Unix, 128-bit error
1282 gst
->guest_RAX
= sres
._wLO
;
1283 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1284 OFFSET_amd64_RAX
, sizeof(ULong
) );
1285 gst
->guest_RDX
= sres
._wHI
;
1286 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1287 OFFSET_amd64_RDX
, sizeof(ULong
) );
1288 LibVEX_GuestAMD64_put_rflag_c( sres
._mode
==SysRes_UNIX_ERR
? 1 : 0,
1290 // GrP fixme sets defined for entire rflags, not just bit c
1291 // DDD: this breaks exp-ptrcheck.
1292 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1293 offsetof(VexGuestAMD64State
, guest_CC_DEP1
), sizeof(ULong
) );
1300 # elif defined(VGP_s390x_linux)
1301 VexGuestS390XState
* gst
= (VexGuestS390XState
*)gst_vanilla
;
1302 vg_assert(canonical
->what
== SsComplete
);
1303 if (sr_isError(canonical
->sres
)) {
1304 gst
->guest_r2
= - (Long
)sr_Err(canonical
->sres
);
1306 gst
->guest_r2
= sr_Res(canonical
->sres
);
1309 # elif defined(VGP_mips32_linux)
1310 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
1311 vg_assert(canonical
->what
== SsComplete
);
1312 if (sr_isError(canonical
->sres
)) {
1313 gst
->guest_r2
= (Int
)sr_Err(canonical
->sres
);
1314 gst
->guest_r7
= (Int
)sr_Err(canonical
->sres
);
1316 gst
->guest_r2
= sr_Res(canonical
->sres
);
1317 gst
->guest_r3
= sr_ResEx(canonical
->sres
);
1318 gst
->guest_r7
= (Int
)sr_Err(canonical
->sres
);
1320 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1321 OFFSET_mips32_r2
, sizeof(UWord
) );
1322 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1323 OFFSET_mips32_r3
, sizeof(UWord
) );
1324 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1325 OFFSET_mips32_r7
, sizeof(UWord
) );
1327 # elif defined(VGP_mips64_linux)
1328 VexGuestMIPS64State
* gst
= (VexGuestMIPS64State
*)gst_vanilla
;
1329 vg_assert(canonical
->what
== SsComplete
);
1330 if (sr_isError(canonical
->sres
)) {
1331 gst
->guest_r2
= (Int
)sr_Err(canonical
->sres
);
1332 gst
->guest_r7
= (Int
)sr_Err(canonical
->sres
);
1334 gst
->guest_r2
= sr_Res(canonical
->sres
);
1335 gst
->guest_r3
= sr_ResEx(canonical
->sres
);
1336 gst
->guest_r7
= (Int
)sr_Err(canonical
->sres
);
1338 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1339 OFFSET_mips64_r2
, sizeof(UWord
) );
1340 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1341 OFFSET_mips64_r3
, sizeof(UWord
) );
1342 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1343 OFFSET_mips64_r7
, sizeof(UWord
) );
1345 # elif defined(VGP_nanomips_linux)
1346 VexGuestMIPS32State
* gst
= (VexGuestMIPS32State
*)gst_vanilla
;
1347 vg_assert(canonical
->what
== SsComplete
);
1348 gst
->guest_r4
= canonical
->sres
._val
;
1349 VG_TRACK( post_reg_write
, Vg_CoreSysCall
, tid
,
1350 OFFSET_mips32_r4
, sizeof(UWord
) );
1352 # elif defined(VGP_x86_solaris)
1353 VexGuestX86State
* gst
= (VexGuestX86State
*)gst_vanilla
;
1354 SysRes sres
= canonical
->sres
;
1355 vg_assert(canonical
->what
== SsComplete
);
1357 if (sr_isError(sres
)) {
1358 gst
->guest_EAX
= sr_Err(sres
);
1359 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_x86_EAX
,
1361 LibVEX_GuestX86_put_eflag_c(1, gst
);
1364 gst
->guest_EAX
= sr_Res(sres
);
1365 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_x86_EAX
,
1367 gst
->guest_EDX
= sr_ResHI(sres
);
1368 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_x86_EDX
,
1370 LibVEX_GuestX86_put_eflag_c(0, gst
);
1372 /* Make CC_DEP1 and CC_DEP2 defined. This is inaccurate because it makes
1373 other eflags defined too (see README.solaris). */
1374 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, offsetof(VexGuestX86State
,
1375 guest_CC_DEP1
), sizeof(UInt
));
1376 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, offsetof(VexGuestX86State
,
1377 guest_CC_DEP2
), sizeof(UInt
));
1379 # elif defined(VGP_amd64_solaris)
1380 VexGuestAMD64State
* gst
= (VexGuestAMD64State
*)gst_vanilla
;
1381 SysRes sres
= canonical
->sres
;
1382 vg_assert(canonical
->what
== SsComplete
);
1384 if (sr_isError(sres
)) {
1385 gst
->guest_RAX
= sr_Err(sres
);
1386 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_amd64_RAX
,
1388 LibVEX_GuestAMD64_put_rflag_c(1, gst
);
1391 gst
->guest_RAX
= sr_Res(sres
);
1392 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_amd64_RAX
,
1394 gst
->guest_RDX
= sr_ResHI(sres
);
1395 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_amd64_RDX
,
1397 LibVEX_GuestAMD64_put_rflag_c(0, gst
);
1399 /* Make CC_DEP1 and CC_DEP2 defined. This is inaccurate because it makes
1400 other eflags defined too (see README.solaris). */
1401 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, offsetof(VexGuestAMD64State
,
1402 guest_CC_DEP1
), sizeof(ULong
));
1403 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, offsetof(VexGuestAMD64State
,
1404 guest_CC_DEP2
), sizeof(ULong
));
1407 # error "putSyscallStatusIntoGuestState: unknown arch"
1412 /* Tell me the offsets in the guest state of the syscall params, so
1413 that the scalar argument checkers don't have to have this info
1417 void getSyscallArgLayout ( /*OUT*/SyscallArgLayout
* layout
)
1419 VG_(bzero_inline
)(layout
, sizeof(*layout
));
1421 #if defined(VGP_x86_linux)
1422 layout
->o_sysno
= OFFSET_x86_EAX
;
1423 layout
->o_arg1
= OFFSET_x86_EBX
;
1424 layout
->o_arg2
= OFFSET_x86_ECX
;
1425 layout
->o_arg3
= OFFSET_x86_EDX
;
1426 layout
->o_arg4
= OFFSET_x86_ESI
;
1427 layout
->o_arg5
= OFFSET_x86_EDI
;
1428 layout
->o_arg6
= OFFSET_x86_EBP
;
1429 layout
->uu_arg7
= -1; /* impossible value */
1430 layout
->uu_arg8
= -1; /* impossible value */
1432 #elif defined(VGP_amd64_linux)
1433 layout
->o_sysno
= OFFSET_amd64_RAX
;
1434 layout
->o_arg1
= OFFSET_amd64_RDI
;
1435 layout
->o_arg2
= OFFSET_amd64_RSI
;
1436 layout
->o_arg3
= OFFSET_amd64_RDX
;
1437 layout
->o_arg4
= OFFSET_amd64_R10
;
1438 layout
->o_arg5
= OFFSET_amd64_R8
;
1439 layout
->o_arg6
= OFFSET_amd64_R9
;
1440 layout
->uu_arg7
= -1; /* impossible value */
1441 layout
->uu_arg8
= -1; /* impossible value */
1443 #elif defined(VGP_ppc32_linux)
1444 layout
->o_sysno
= OFFSET_ppc32_GPR0
;
1445 layout
->o_arg1
= OFFSET_ppc32_GPR3
;
1446 layout
->o_arg2
= OFFSET_ppc32_GPR4
;
1447 layout
->o_arg3
= OFFSET_ppc32_GPR5
;
1448 layout
->o_arg4
= OFFSET_ppc32_GPR6
;
1449 layout
->o_arg5
= OFFSET_ppc32_GPR7
;
1450 layout
->o_arg6
= OFFSET_ppc32_GPR8
;
1451 layout
->uu_arg7
= -1; /* impossible value */
1452 layout
->uu_arg8
= -1; /* impossible value */
1454 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1455 layout
->o_sysno
= OFFSET_ppc64_GPR0
;
1456 layout
->o_arg1
= OFFSET_ppc64_GPR3
;
1457 layout
->o_arg2
= OFFSET_ppc64_GPR4
;
1458 layout
->o_arg3
= OFFSET_ppc64_GPR5
;
1459 layout
->o_arg4
= OFFSET_ppc64_GPR6
;
1460 layout
->o_arg5
= OFFSET_ppc64_GPR7
;
1461 layout
->o_arg6
= OFFSET_ppc64_GPR8
;
1462 layout
->uu_arg7
= -1; /* impossible value */
1463 layout
->uu_arg8
= -1; /* impossible value */
1465 #elif defined(VGP_arm_linux)
1466 layout
->o_sysno
= OFFSET_arm_R7
;
1467 layout
->o_arg1
= OFFSET_arm_R0
;
1468 layout
->o_arg2
= OFFSET_arm_R1
;
1469 layout
->o_arg3
= OFFSET_arm_R2
;
1470 layout
->o_arg4
= OFFSET_arm_R3
;
1471 layout
->o_arg5
= OFFSET_arm_R4
;
1472 layout
->o_arg6
= OFFSET_arm_R5
;
1473 layout
->uu_arg7
= -1; /* impossible value */
1474 layout
->uu_arg8
= -1; /* impossible value */
1476 #elif defined(VGP_arm64_linux)
1477 layout
->o_sysno
= OFFSET_arm64_X8
;
1478 layout
->o_arg1
= OFFSET_arm64_X0
;
1479 layout
->o_arg2
= OFFSET_arm64_X1
;
1480 layout
->o_arg3
= OFFSET_arm64_X2
;
1481 layout
->o_arg4
= OFFSET_arm64_X3
;
1482 layout
->o_arg5
= OFFSET_arm64_X4
;
1483 layout
->o_arg6
= OFFSET_arm64_X5
;
1484 layout
->uu_arg7
= -1; /* impossible value */
1485 layout
->uu_arg8
= -1; /* impossible value */
1487 #elif defined(VGP_mips32_linux)
1488 layout
->o_sysno
= OFFSET_mips32_r2
;
1489 layout
->o_arg1
= OFFSET_mips32_r4
;
1490 layout
->o_arg2
= OFFSET_mips32_r5
;
1491 layout
->o_arg3
= OFFSET_mips32_r6
;
1492 layout
->o_arg4
= OFFSET_mips32_r7
;
1493 layout
->s_arg5
= sizeof(UWord
) * 4;
1494 layout
->s_arg6
= sizeof(UWord
) * 5;
1495 layout
->s_arg7
= sizeof(UWord
) * 6;
1496 layout
->uu_arg8
= -1; /* impossible value */
1498 #elif defined(VGP_nanomips_linux)
1499 layout
->o_sysno
= OFFSET_mips32_r2
;
1500 layout
->o_arg1
= OFFSET_mips32_r4
;
1501 layout
->o_arg2
= OFFSET_mips32_r5
;
1502 layout
->o_arg3
= OFFSET_mips32_r6
;
1503 layout
->o_arg4
= OFFSET_mips32_r7
;
1504 layout
->o_arg5
= OFFSET_mips32_r8
;
1505 layout
->o_arg6
= OFFSET_mips32_r9
;
1506 layout
->uu_arg7
= -1; /* impossible value */
1507 layout
->uu_arg8
= -1; /* impossible value */
1509 #elif defined(VGP_mips64_linux)
1510 layout
->o_sysno
= OFFSET_mips64_r2
;
1511 layout
->o_arg1
= OFFSET_mips64_r4
;
1512 layout
->o_arg2
= OFFSET_mips64_r5
;
1513 layout
->o_arg3
= OFFSET_mips64_r6
;
1514 layout
->o_arg4
= OFFSET_mips64_r7
;
1515 layout
->o_arg5
= OFFSET_mips64_r8
;
1516 layout
->o_arg6
= OFFSET_mips64_r9
;
1517 layout
->o_arg7
= OFFSET_mips64_r10
;
1518 layout
->o_arg8
= OFFSET_mips64_r11
;
1520 #elif defined(VGP_x86_darwin)
1521 layout
->o_sysno
= OFFSET_x86_EAX
;
1522 // syscall parameters are on stack in C convention
1523 layout
->s_arg1
= sizeof(UWord
) * 1;
1524 layout
->s_arg2
= sizeof(UWord
) * 2;
1525 layout
->s_arg3
= sizeof(UWord
) * 3;
1526 layout
->s_arg4
= sizeof(UWord
) * 4;
1527 layout
->s_arg5
= sizeof(UWord
) * 5;
1528 layout
->s_arg6
= sizeof(UWord
) * 6;
1529 layout
->s_arg7
= sizeof(UWord
) * 7;
1530 layout
->s_arg8
= sizeof(UWord
) * 8;
1532 #elif defined(VGP_amd64_darwin)
1533 layout
->o_sysno
= OFFSET_amd64_RAX
;
1534 layout
->o_arg1
= OFFSET_amd64_RDI
;
1535 layout
->o_arg2
= OFFSET_amd64_RSI
;
1536 layout
->o_arg3
= OFFSET_amd64_RDX
;
1537 layout
->o_arg4
= OFFSET_amd64_RCX
;
1538 layout
->o_arg5
= OFFSET_amd64_R8
;
1539 layout
->o_arg6
= OFFSET_amd64_R9
;
1540 layout
->s_arg7
= sizeof(UWord
) * 1;
1541 layout
->s_arg8
= sizeof(UWord
) * 2;
1543 #elif defined(VGP_s390x_linux)
1544 layout
->o_sysno
= OFFSET_s390x_SYSNO
;
1545 layout
->o_arg1
= OFFSET_s390x_r2
;
1546 layout
->o_arg2
= OFFSET_s390x_r3
;
1547 layout
->o_arg3
= OFFSET_s390x_r4
;
1548 layout
->o_arg4
= OFFSET_s390x_r5
;
1549 layout
->o_arg5
= OFFSET_s390x_r6
;
1550 layout
->o_arg6
= OFFSET_s390x_r7
;
1551 layout
->uu_arg7
= -1; /* impossible value */
1552 layout
->uu_arg8
= -1; /* impossible value */
1554 #elif defined(VGP_x86_solaris)
1555 layout
->o_sysno
= OFFSET_x86_EAX
;
1556 /* Syscall parameters are on the stack. */
1557 layout
->s_arg1
= sizeof(UWord
) * 1;
1558 layout
->s_arg2
= sizeof(UWord
) * 2;
1559 layout
->s_arg3
= sizeof(UWord
) * 3;
1560 layout
->s_arg4
= sizeof(UWord
) * 4;
1561 layout
->s_arg5
= sizeof(UWord
) * 5;
1562 layout
->s_arg6
= sizeof(UWord
) * 6;
1563 layout
->s_arg7
= sizeof(UWord
) * 7;
1564 layout
->s_arg8
= sizeof(UWord
) * 8;
1566 #elif defined(VGP_amd64_solaris)
1567 layout
->o_sysno
= OFFSET_amd64_RAX
;
1568 layout
->o_arg1
= OFFSET_amd64_RDI
;
1569 layout
->o_arg2
= OFFSET_amd64_RSI
;
1570 layout
->o_arg3
= OFFSET_amd64_RDX
;
1571 layout
->o_arg4
= OFFSET_amd64_R10
;
1572 layout
->o_arg5
= OFFSET_amd64_R8
;
1573 layout
->o_arg6
= OFFSET_amd64_R9
;
1574 layout
->s_arg7
= sizeof(UWord
) * 1;
1575 layout
->s_arg8
= sizeof(UWord
) * 2;
1578 # error "getSyscallLayout: unknown arch"
1583 /* ---------------------------------------------------------------------
1584 The main driver logic
1585 ------------------------------------------------------------------ */
1587 /* Finding the handlers for a given syscall, or faking up one
1588 when no handler is found. */
1591 void bad_before ( ThreadId tid
,
1592 SyscallArgLayout
* layout
,
1593 /*MOD*/SyscallArgs
* args
,
1594 /*OUT*/SyscallStatus
* status
,
1595 /*OUT*/UWord
* flags
)
1597 VG_(dmsg
)("WARNING: unhandled %s syscall: %s\n",
1598 VG_PLATFORM
, VG_SYSNUM_STRING(args
->sysno
));
1599 if (VG_(clo_verbosity
) > 1) {
1600 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
1602 VG_(dmsg
)("You may be able to write your own handler.\n");
1603 VG_(dmsg
)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1604 VG_(dmsg
)("Nevertheless we consider this a bug. Please report\n");
1605 VG_(dmsg
)("it at http://valgrind.org/support/bug_reports.html.\n");
1607 SET_STATUS_Failure(VKI_ENOSYS
);
1609 # if defined(VGO_solaris)
1614 static SyscallTableEntry bad_sys
=
1615 { bad_before
, NULL
};
1617 static const SyscallTableEntry
* get_syscall_entry ( Int syscallno
)
1619 const SyscallTableEntry
* sys
= NULL
;
1621 # if defined(VGO_linux)
1622 sys
= ML_(get_linux_syscall_entry
)( syscallno
);
1624 # elif defined(VGO_darwin)
1625 Int idx
= VG_DARWIN_SYSNO_INDEX(syscallno
);
1627 switch (VG_DARWIN_SYSNO_CLASS(syscallno
)) {
1628 case VG_DARWIN_SYSCALL_CLASS_UNIX
:
1629 if (idx
>= 0 && idx
< ML_(syscall_table_size
) &&
1630 ML_(syscall_table
)[idx
].before
!= NULL
)
1631 sys
= &ML_(syscall_table
)[idx
];
1633 case VG_DARWIN_SYSCALL_CLASS_MACH
:
1634 if (idx
>= 0 && idx
< ML_(mach_trap_table_size
) &&
1635 ML_(mach_trap_table
)[idx
].before
!= NULL
)
1636 sys
= &ML_(mach_trap_table
)[idx
];
1638 case VG_DARWIN_SYSCALL_CLASS_MDEP
:
1639 if (idx
>= 0 && idx
< ML_(mdep_trap_table_size
) &&
1640 ML_(mdep_trap_table
)[idx
].before
!= NULL
)
1641 sys
= &ML_(mdep_trap_table
)[idx
];
1648 # elif defined(VGO_solaris)
1649 sys
= ML_(get_solaris_syscall_entry
)(syscallno
);
1655 return sys
== NULL
? &bad_sys
: sys
;
1659 /* Add and remove signals from mask so that we end up telling the
1660 kernel the state we actually want rather than what the client
1662 void VG_(sanitize_client_sigmask
)(vki_sigset_t
*mask
)
1664 VG_(sigdelset
)(mask
, VKI_SIGKILL
);
1665 VG_(sigdelset
)(mask
, VKI_SIGSTOP
);
1666 VG_(sigdelset
)(mask
, VG_SIGVGKILL
); /* never block */
1671 SyscallArgs orig_args
;
1673 SyscallStatus status
;
1678 SyscallInfo
*syscallInfo
;
1680 /* The scheduler needs to be able to zero out these records after a
1681 fork, hence this is exported from m_syswrap. */
1682 void VG_(clear_syscallInfo
) ( ThreadId tid
)
1684 vg_assert(syscallInfo
);
1685 vg_assert(tid
>= 0 && tid
< VG_N_THREADS
);
1686 VG_(memset
)( & syscallInfo
[tid
], 0, sizeof( syscallInfo
[tid
] ));
1687 syscallInfo
[tid
].status
.what
= SsIdle
;
1690 Bool
VG_(is_in_syscall
) ( ThreadId tid
)
1692 vg_assert(tid
>= 0 && tid
< VG_N_THREADS
);
1693 return (syscallInfo
&& syscallInfo
[tid
].status
.what
!= SsIdle
);
1696 Word
VG_(is_in_syscall_no
) (ThreadId tid
)
1698 vg_assert(tid
>= 0 && tid
< VG_N_THREADS
);
1699 return syscallInfo
[tid
].orig_args
.sysno
;
1702 static void ensure_initialised ( void )
1705 static Bool init_done
= False
;
1710 syscallInfo
= VG_(malloc
)("scinfo", VG_N_THREADS
* sizeof syscallInfo
[0]);
1712 for (i
= 0; i
< VG_N_THREADS
; i
++) {
1713 VG_(clear_syscallInfo
)( i
);
1717 /* --- This is the main function of this file. --- */
1719 void VG_(client_syscall
) ( ThreadId tid
, UInt trc
)
1723 const SyscallTableEntry
* ent
;
1724 SyscallArgLayout layout
;
1727 ensure_initialised();
1729 vg_assert(VG_(is_valid_tid
)(tid
));
1730 vg_assert(tid
>= 1 && tid
< VG_N_THREADS
);
1731 vg_assert(VG_(is_running_thread
)(tid
));
1733 # if !defined(VGO_darwin)
1734 // Resync filtering is meaningless on non-Darwin targets.
1735 vg_assert(VG_(clo_resync_filter
) == 0);
1738 tst
= VG_(get_ThreadState
)(tid
);
1740 /* BEGIN ensure root thread's stack is suitably mapped */
1741 /* In some rare circumstances, we may do the syscall without the
1742 bottom page of the stack being mapped, because the stack pointer
1743 was moved down just a few instructions before the syscall
1744 instruction, and there have been no memory references since
1745 then, that would cause a call to VG_(extend_stack) to have
1748 In native execution that's OK: the kernel automagically extends
1749 the stack's mapped area down to cover the stack pointer (or sp -
1750 redzone, really). In simulated normal execution that's OK too,
1751 since any signals we get from accessing below the mapped area of
1752 the (guest's) stack lead us to VG_(extend_stack), where we
1753 simulate the kernel's stack extension logic. But that leaves
1754 the problem of entering a syscall with the SP unmapped. Because
1755 the kernel doesn't know that the segment immediately above SP is
1756 supposed to be a grow-down segment, it causes the syscall to
1757 fail, and thereby causes a divergence between native behaviour
1758 (syscall succeeds) and simulated behaviour (syscall fails).
1760 This is quite a rare failure mode. It has only been seen
1761 affecting calls to sys_readlink on amd64-linux, and even then it
1762 requires a certain code sequence around the syscall to trigger
1765 extern int my_readlink ( const char* path );
1768 ".globl my_readlink\n"
1770 "\tsubq $0x1008,%rsp\n"
1771 "\tmovq %rdi,%rdi\n" // path is in rdi
1772 "\tmovq %rsp,%rsi\n" // &buf[0] -> rsi
1773 "\tmovl $0x1000,%edx\n" // sizeof(buf) in rdx
1774 "\tmovl $"__NR_READLINK",%eax\n" // syscall number
1776 "\taddq $0x1008,%rsp\n"
1781 For more details, see bug #156404
1782 (https://bugs.kde.org/show_bug.cgi?id=156404).
1784 The fix is actually very simple. We simply need to call
1785 VG_(extend_stack) for this thread, handing it the lowest
1786 possible valid address for stack (sp - redzone), to ensure the
1787 pages all the way down to that address, are mapped. Because
1788 this is a potentially expensive and frequent operation, we
1791 Only the main thread (tid=1) has a growdown stack. So
1792 ignore all others. It is conceivable, although highly unlikely,
1793 that the main thread exits, and later another thread is
1794 allocated tid=1, but that's harmless, I believe;
1795 VG_(extend_stack) will do nothing when applied to a non-root
1798 All this guff is of course Linux-specific. Hence the ifdef.
1800 # if defined(VGO_linux)
1801 if (tid
== 1/*ROOT THREAD*/) {
1802 Addr stackMin
= VG_(get_SP
)(tid
) - VG_STACK_REDZONE_SZB
;
1804 /* The precise thing to do here would be to extend the stack only
1805 if the system call can be proven to access unmapped user stack
1806 memory. That is an enormous amount of work even if a proper
1807 spec of system calls was available.
1809 In the case where the system call does not access user memory
1810 the stack pointer here can have any value. A legitimate testcase
1811 that exercises this is none/tests/s390x/stmg.c:
1812 The stack pointer happens to be in the reservation segment near
1813 the end of the addressable memory and there is no SkAnonC segment
1816 So the approximation we're taking here is to extend the stack only
1817 if the client stack pointer does not look bogus. */
1818 if (VG_(am_addr_is_in_extensible_client_stack
)(stackMin
))
1819 VG_(extend_stack
)( tid
, stackMin
);
1822 /* END ensure root thread's stack is suitably mapped */
1824 /* First off, get the syscall args and number. This is a
1825 platform-dependent action. */
1827 sci
= & syscallInfo
[tid
];
1828 vg_assert(sci
->status
.what
== SsIdle
);
1830 getSyscallArgsFromGuestState( &sci
->orig_args
, &tst
->arch
.vex
, trc
);
1832 /* Copy .orig_args to .args. The pre-handler may modify .args, but
1833 we want to keep the originals too, just in case. */
1834 sci
->args
= sci
->orig_args
;
1836 /* Save the syscall number in the thread state in case the syscall
1837 is interrupted by a signal. */
1838 sysno
= sci
->orig_args
.sysno
;
1840 /* It's sometimes useful, as a crude debugging hack, to get a
1841 stack trace at each (or selected) syscalls. */
1842 if (0 && sysno
== __NR_ioctl
) {
1843 VG_(umsg
)("\nioctl:\n");
1844 VG_(get_and_pp_StackTrace
)(tid
, 10);
1848 # if defined(VGO_darwin)
1849 /* Record syscall class. But why? Because the syscall might be
1850 interrupted by a signal, and in the signal handler (which will
1851 be m_signals.async_signalhandler) we will need to build a SysRes
1852 reflecting the syscall return result. In order to do that we
1853 need to know the syscall class. Hence stash it in the guest
1854 state of this thread. This madness is not needed on Linux
1855 because it only has a single syscall return convention and so
1856 there is no ambiguity involved in converting the post-signal
1857 machine state into a SysRes. */
1858 tst
->arch
.vex
.guest_SC_CLASS
= VG_DARWIN_SYSNO_CLASS(sysno
);
1861 /* The default what-to-do-next thing is hand the syscall to the
1862 kernel, so we pre-set that here. Set .sres to something
1863 harmless looking (is irrelevant because .what is not
1865 sci
->status
.what
= SsHandToKernel
;
1866 sci
->status
.sres
= VG_(mk_SysRes_Error
)(0);
1869 /* Fetch the syscall's handlers. If no handlers exist for this
1870 syscall, we are given dummy handlers which force an immediate
1871 return with ENOSYS. */
1872 ent
= get_syscall_entry(sysno
);
1874 /* Fetch the layout information, which tells us where in the guest
1875 state the syscall args reside. This is a platform-dependent
1876 action. This info is needed so that the scalar syscall argument
1877 checks (PRE_REG_READ calls) know which bits of the guest state
1878 they need to inspect. */
1879 getSyscallArgLayout( &layout
);
1881 /* Make sure the tmp signal mask matches the real signal mask;
1882 sigsuspend may change this. */
1883 vg_assert(VG_(iseqsigset
)(&tst
->sig_mask
, &tst
->tmp_sig_mask
));
1885 /* Right, we're finally ready to Party. Call the pre-handler and
1886 see what we get back. At this point:
1888 sci->status.what is Unset (we don't know yet).
1889 sci->orig_args contains the original args.
1890 sci->args is the same as sci->orig_args.
1894 PRINT("SYSCALL[%d,%u](%s) ",
1895 VG_(getpid
)(), tid
, VG_SYSNUM_STRING(sysno
));
1897 /* Do any pre-syscall actions */
1898 if (VG_(needs
).syscall_wrapper
) {
1900 tmpv
[0] = sci
->orig_args
.arg1
;
1901 tmpv
[1] = sci
->orig_args
.arg2
;
1902 tmpv
[2] = sci
->orig_args
.arg3
;
1903 tmpv
[3] = sci
->orig_args
.arg4
;
1904 tmpv
[4] = sci
->orig_args
.arg5
;
1905 tmpv
[5] = sci
->orig_args
.arg6
;
1906 tmpv
[6] = sci
->orig_args
.arg7
;
1907 tmpv
[7] = sci
->orig_args
.arg8
;
1908 VG_TDICT_CALL(tool_pre_syscall
, tid
, sysno
,
1909 &tmpv
[0], sizeof(tmpv
)/sizeof(tmpv
[0]));
1913 vg_assert(ent
->before
);
1916 &sci
->args
, &sci
->status
, &sci
->flags
);
1918 /* If needed, gdbserver will report syscall entry to GDB */
1919 VG_(gdbserver_report_syscall
)(True
, sysno
, tid
);
1921 /* The pre-handler may have modified:
1925 All else remains unchanged.
1926 Although the args may be modified, pre handlers are not allowed
1927 to change the syscall number.
1929 /* Now we proceed according to what the pre-handler decided. */
1930 vg_assert(sci
->status
.what
== SsHandToKernel
1931 || sci
->status
.what
== SsComplete
);
1932 vg_assert(sci
->args
.sysno
== sci
->orig_args
.sysno
);
1934 if (sci
->status
.what
== SsComplete
&& !sr_isError(sci
->status
.sres
)) {
1935 /* The pre-handler completed the syscall itself, declaring
1937 if (sci
->flags
& SfNoWriteResult
) {
1938 PRINT(" --> [pre-success] NoWriteResult");
1940 PRINT(" --> [pre-success] %s", VG_(sr_as_string
)(sci
->status
.sres
));
1942 /* In this case the allowable flags are to ask for a signal-poll
1943 and/or a yield after the call. Changing the args isn't
1945 vg_assert(0 == (sci
->flags
1946 & ~(SfPollAfter
| SfYieldAfter
| SfNoWriteResult
)));
1947 vg_assert(eq_SyscallArgs(&sci
->args
, &sci
->orig_args
));
1951 if (sci
->status
.what
== SsComplete
&& sr_isError(sci
->status
.sres
)) {
1952 /* The pre-handler decided to fail syscall itself. */
1953 PRINT(" --> [pre-fail] %s", VG_(sr_as_string
)(sci
->status
.sres
));
1954 /* In this case, the pre-handler is also allowed to ask for the
1955 post-handler to be run anyway. Changing the args is not
1957 vg_assert(0 == (sci
->flags
& ~(SfMayBlock
| SfPostOnFail
| SfPollAfter
)));
1958 vg_assert(eq_SyscallArgs(&sci
->args
, &sci
->orig_args
));
1962 if (sci
->status
.what
!= SsHandToKernel
) {
1967 else /* (sci->status.what == HandToKernel) */ {
1968 /* Ok, this is the usual case -- and the complicated one. There
1969 are two subcases: sync and async. async is the general case
1970 and is to be used when there is any possibility that the
1971 syscall might block [a fact that the pre-handler must tell us
1972 via the sci->flags field.] Because the tidying-away /
1973 context-switch overhead of the async case could be large, if
1974 we are sure that the syscall will not block, we fast-track it
1975 by doing it directly in this thread, which is a lot
1978 /* Check that the given flags are allowable: MayBlock, PollAfter
1979 and PostOnFail are ok. */
1980 vg_assert(0 == (sci
->flags
& ~(SfMayBlock
| SfPostOnFail
| SfPollAfter
)));
1982 if (sci
->flags
& SfMayBlock
) {
1984 /* Syscall may block, so run it asynchronously */
1987 PRINT(" --> [async] ... \n");
1989 mask
= tst
->sig_mask
;
1990 VG_(sanitize_client_sigmask
)(&mask
);
1992 /* Gack. More impedance matching. Copy the possibly
1993 modified syscall args back into the guest state. */
1994 /* JRS 2009-Mar-16: if the syscall args are possibly modified,
1995 then this assertion is senseless:
1996 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1997 The case that exposed it was sys_posix_spawn on Darwin,
1998 which heavily modifies its arguments but then lets the call
1999 go through anyway, with SfToBlock set, hence we end up here. */
2000 putSyscallArgsIntoGuestState( &sci
->args
, &tst
->arch
.vex
);
2002 /* SfNoWriteResult flag is invalid for blocking signals because
2003 do_syscall_for_client() directly modifies the guest state. */
2004 vg_assert(!(sci
->flags
& SfNoWriteResult
));
2006 /* Drop the bigLock */
2007 VG_(release_BigLock
)(tid
, VgTs_WaitSys
, "VG_(client_syscall)[async]");
2008 /* Urr. We're now in a race against other threads trying to
2009 acquire the bigLock. I guess that doesn't matter provided
2010 that do_syscall_for_client only touches thread-local
2013 /* Do the call, which operates directly on the guest state,
2014 not on our abstracted copies of the args/result. */
2015 do_syscall_for_client(sysno
, tst
, &mask
);
2017 /* do_syscall_for_client may not return if the syscall was
2018 interrupted by a signal. In that case, flow of control is
2019 first to m_signals.async_sighandler, which calls
2020 VG_(fixup_guest_state_after_syscall_interrupted), which
2021 fixes up the guest state, and possibly calls
2022 VG_(post_syscall). Once that's done, control drops back
2023 to the scheduler. */
2025 /* Darwin: do_syscall_for_client may not return if the
2026 syscall was workq_ops(WQOPS_THREAD_RETURN) and the kernel
2027 responded by starting the thread at wqthread_hijack(reuse=1)
2028 (to run another workqueue item). In that case, wqthread_hijack
2029 calls ML_(wqthread_continue), which is similar to
2030 VG_(fixup_guest_state_after_syscall_interrupted). */
2032 /* Reacquire the lock */
2033 VG_(acquire_BigLock
)(tid
, "VG_(client_syscall)[async]");
2035 /* Even more impedance matching. Extract the syscall status
2036 from the guest state. */
2037 getSyscallStatusFromGuestState( &sci
->status
, &tst
->arch
.vex
);
2038 vg_assert(sci
->status
.what
== SsComplete
);
2040 /* Be decorative, if required. */
2041 if (VG_(clo_trace_syscalls
)) {
2042 PRINT("SYSCALL[%d,%u](%s) ... [async] --> %s",
2043 VG_(getpid
)(), tid
, VG_SYSNUM_STRING(sysno
),
2044 VG_(sr_as_string
)(sci
->status
.sres
));
2049 /* run the syscall directly */
2050 /* The pre-handler may have modified the syscall args, but
2051 since we're passing values in ->args directly to the
2052 kernel, there's no point in flushing them back to the
2053 guest state. Indeed doing so could be construed as
2056 = VG_(do_syscall
)(sysno
, sci
->args
.arg1
, sci
->args
.arg2
,
2057 sci
->args
.arg3
, sci
->args
.arg4
,
2058 sci
->args
.arg5
, sci
->args
.arg6
,
2059 sci
->args
.arg7
, sci
->args
.arg8
);
2060 sci
->status
= convert_SysRes_to_SyscallStatus(sres
);
2062 /* Be decorative, if required. */
2063 if (VG_(clo_trace_syscalls
)) {
2064 PRINT("[sync] --> %s", VG_(sr_as_string
)(sci
->status
.sres
));
2069 vg_assert(sci
->status
.what
== SsComplete
);
2071 vg_assert(VG_(is_running_thread
)(tid
));
2073 /* Dump the syscall result back in the guest state. This is
2074 a platform-specific action. */
2075 if (!(sci
->flags
& SfNoWriteResult
))
2076 putSyscallStatusIntoGuestState( tid
, &sci
->status
, &tst
->arch
.vex
);
2078 /* If needed, gdbserver will report syscall return to GDB */
2079 VG_(gdbserver_report_syscall
)(False
, sysno
, tid
);
2082 - the guest state is now correctly modified following the syscall
2083 - modified args, original args and syscall status are still
2084 available in the syscallInfo[] entry for this syscall.
2086 Now go on to do the post-syscall actions (read on down ..)
2089 VG_(post_syscall
)(tid
);
2094 /* Perform post syscall actions. The expected state on entry is
2095 precisely as at the end of VG_(client_syscall), that is:
2097 - guest state up to date following the syscall
2098 - modified args, original args and syscall status are still
2099 available in the syscallInfo[] entry for this syscall.
2100 - syscall status matches what's in the guest state.
2102 There are two ways to get here: the normal way -- being called by
2103 VG_(client_syscall), and the unusual way, from
2104 VG_(fixup_guest_state_after_syscall_interrupted).
2105 Darwin: there's a third way, ML_(wqthread_continue).
2107 void VG_(post_syscall
) (ThreadId tid
)
2110 const SyscallTableEntry
* ent
;
2111 SyscallStatus test_status
;
2116 vg_assert(VG_(is_valid_tid
)(tid
));
2117 vg_assert(tid
>= 1 && tid
< VG_N_THREADS
);
2118 vg_assert(VG_(is_running_thread
)(tid
));
2120 tst
= VG_(get_ThreadState
)(tid
);
2121 sci
= & syscallInfo
[tid
];
2123 /* m_signals.sigvgkill_handler might call here even when not in
2125 if (sci
->status
.what
== SsIdle
|| sci
->status
.what
== SsHandToKernel
) {
2126 sci
->status
.what
= SsIdle
;
2130 /* Validate current syscallInfo entry. In particular we require
2131 that the current .status matches what's actually in the guest
2132 state. At least in the normal case where we have actually
2133 previously written the result into the guest state. */
2134 vg_assert(sci
->status
.what
== SsComplete
);
2136 /* Get the system call number. Because the pre-handler isn't
2137 allowed to mess with it, it should be the same for both the
2138 original and potentially-modified args. */
2139 vg_assert(sci
->args
.sysno
== sci
->orig_args
.sysno
);
2140 sysno
= sci
->args
.sysno
;
2142 getSyscallStatusFromGuestState( &test_status
, &tst
->arch
.vex
);
2143 if (!(sci
->flags
& SfNoWriteResult
)) {
2144 vg_assert(eq_SyscallStatus( sysno
, &sci
->status
, &test_status
));
2146 /* Failure of the above assertion on Darwin can indicate a problem
2147 in the syscall wrappers that pre-fail or pre-succeed the
2148 syscall, by calling SET_STATUS_Success or SET_STATUS_Failure,
2149 when they really should call SET_STATUS_from_SysRes. The former
2150 create a UNIX-class syscall result on Darwin, which may not be
2151 correct for the syscall; if that's the case then this assertion
2152 fires. See PRE(thread_fast_set_cthread_self) for an example. On
2153 non-Darwin platforms this assertion is should never fail, and this
2154 comment is completely irrelevant. */
2155 /* Ok, looks sane */
2157 /* pre: status == Complete (asserted above) */
2158 /* Consider either success or failure. Now run the post handler if:
2160 - Success or (Failure and PostOnFail is set)
2162 ent
= get_syscall_entry(sysno
);
2164 && ((!sr_isError(sci
->status
.sres
))
2165 || (sr_isError(sci
->status
.sres
)
2166 && (sci
->flags
& SfPostOnFail
) ))) {
2168 (ent
->after
)( tid
, &sci
->args
, &sci
->status
);
2171 /* Because the post handler might have changed the status (eg, the
2172 post-handler for sys_open can change the result from success to
2173 failure if the kernel supplied a fd that it doesn't like), once
2174 again dump the syscall result back in the guest state.*/
2175 if (!(sci
->flags
& SfNoWriteResult
))
2176 putSyscallStatusIntoGuestState( tid
, &sci
->status
, &tst
->arch
.vex
);
2178 /* Do any post-syscall actions required by the tool. */
2179 if (VG_(needs
).syscall_wrapper
) {
2181 tmpv
[0] = sci
->orig_args
.arg1
;
2182 tmpv
[1] = sci
->orig_args
.arg2
;
2183 tmpv
[2] = sci
->orig_args
.arg3
;
2184 tmpv
[3] = sci
->orig_args
.arg4
;
2185 tmpv
[4] = sci
->orig_args
.arg5
;
2186 tmpv
[5] = sci
->orig_args
.arg6
;
2187 tmpv
[6] = sci
->orig_args
.arg7
;
2188 tmpv
[7] = sci
->orig_args
.arg8
;
2189 VG_TDICT_CALL(tool_post_syscall
, tid
,
2191 &tmpv
[0], sizeof(tmpv
)/sizeof(tmpv
[0]),
2195 /* The syscall is done. */
2196 vg_assert(sci
->status
.what
== SsComplete
);
2197 sci
->status
.what
= SsIdle
;
2199 /* The pre/post wrappers may have concluded that pending signals
2200 might have been created, and will have set SfPollAfter to
2201 request a poll for them once the syscall is done. */
2202 if (sci
->flags
& SfPollAfter
)
2203 VG_(poll_signals
)(tid
);
2205 /* Similarly, the wrappers might have asked for a yield
2207 if (sci
->flags
& SfYieldAfter
)
2212 /* ---------------------------------------------------------------------
2213 Dealing with syscalls which get interrupted by a signal:
2214 VG_(fixup_guest_state_after_syscall_interrupted)
2215 ------------------------------------------------------------------ */
2217 /* Syscalls done on behalf of the client are finally handed off to the
2218 kernel in VG_(client_syscall) above, either by calling
2219 do_syscall_for_client (the async case), or by calling
2220 VG_(do_syscall6) (the sync case).
2222 If the syscall is not interrupted by a signal (it may block and
2223 later unblock, but that's irrelevant here) then those functions
2224 eventually return and so control is passed to VG_(post_syscall).
2225 NB: not sure if the sync case can actually get interrupted, as it
2226 operates with all signals masked.
2228 However, the syscall may get interrupted by an async-signal. In
2229 that case do_syscall_for_client/VG_(do_syscall6) do not
2230 return. Instead we wind up in m_signals.async_sighandler. We need
2231 to fix up the guest state to make it look like the syscall was
2232 interrupted for guest. So async_sighandler calls here, and this
2233 does the fixup. Note that from here we wind up calling
2234 VG_(post_syscall) too.
2238 /* These are addresses within ML_(do_syscall_for_client_WRK). See
2239 syscall-$PLAT.S for details.
2241 #if defined(VGO_linux)
2242 extern const Addr
ML_(blksys_setup
);
2243 extern const Addr
ML_(blksys_restart
);
2244 extern const Addr
ML_(blksys_complete
);
2245 extern const Addr
ML_(blksys_committed
);
2246 extern const Addr
ML_(blksys_finished
);
2247 #elif defined(VGO_darwin)
2248 /* Darwin requires extra uglyness */
2249 extern const Addr
ML_(blksys_setup_MACH
);
2250 extern const Addr
ML_(blksys_restart_MACH
);
2251 extern const Addr
ML_(blksys_complete_MACH
);
2252 extern const Addr
ML_(blksys_committed_MACH
);
2253 extern const Addr
ML_(blksys_finished_MACH
);
2254 extern const Addr
ML_(blksys_setup_MDEP
);
2255 extern const Addr
ML_(blksys_restart_MDEP
);
2256 extern const Addr
ML_(blksys_complete_MDEP
);
2257 extern const Addr
ML_(blksys_committed_MDEP
);
2258 extern const Addr
ML_(blksys_finished_MDEP
);
2259 extern const Addr
ML_(blksys_setup_UNIX
);
2260 extern const Addr
ML_(blksys_restart_UNIX
);
2261 extern const Addr
ML_(blksys_complete_UNIX
);
2262 extern const Addr
ML_(blksys_committed_UNIX
);
2263 extern const Addr
ML_(blksys_finished_UNIX
);
2264 #elif defined(VGO_solaris)
2265 extern const Addr
ML_(blksys_setup
);
2266 extern const Addr
ML_(blksys_complete
);
2267 extern const Addr
ML_(blksys_committed
);
2268 extern const Addr
ML_(blksys_finished
);
2269 extern const Addr
ML_(blksys_setup_DRET
);
2270 extern const Addr
ML_(blksys_complete_DRET
);
2271 extern const Addr
ML_(blksys_committed_DRET
);
2272 extern const Addr
ML_(blksys_finished_DRET
);
2274 # error "Unknown OS"
2278 /* Back up guest state to restart a system call. */
2280 void ML_(fixup_guest_state_to_restart_syscall
) ( ThreadArchState
* arch
)
2282 #if defined(VGP_x86_linux)
2283 arch
->vex
.guest_EIP
-= 2; // sizeof(int $0x80)
2285 /* Make sure our caller is actually sane, and we're really backing
2286 back over a syscall.
2291 UChar
*p
= (UChar
*)arch
->vex
.guest_EIP
;
2293 if (p
[0] != 0xcd || p
[1] != 0x80)
2294 VG_(message
)(Vg_DebugMsg
,
2295 "?! restarting over syscall at %#x %02x %02x\n",
2296 arch
->vex
.guest_EIP
, p
[0], p
[1]);
2298 vg_assert(p
[0] == 0xcd && p
[1] == 0x80);
2301 #elif defined(VGP_amd64_linux)
2302 arch
->vex
.guest_RIP
-= 2; // sizeof(syscall)
2304 /* Make sure our caller is actually sane, and we're really backing
2305 back over a syscall.
2310 UChar
*p
= (UChar
*)arch
->vex
.guest_RIP
;
2312 if (p
[0] != 0x0F || p
[1] != 0x05)
2313 VG_(message
)(Vg_DebugMsg
,
2314 "?! restarting over syscall at %#llx %02x %02x\n",
2315 arch
->vex
.guest_RIP
, p
[0], p
[1]);
2317 vg_assert(p
[0] == 0x0F && p
[1] == 0x05);
2320 #elif defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux)
2321 arch
->vex
.guest_CIA
-= 4; // sizeof(ppc32 instr)
2323 /* Make sure our caller is actually sane, and we're really backing
2324 back over a syscall.
2329 UChar
*p
= (UChar
*)arch
->vex
.guest_CIA
;
2331 if (p
[0] != 0x44 || p
[1] != 0x0 || p
[2] != 0x0 || p
[3] != 0x02)
2332 VG_(message
)(Vg_DebugMsg
,
2333 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2334 (ULong
)arch
->vex
.guest_CIA
, p
[0], p
[1], p
[2], p
[3]);
2336 vg_assert(p
[0] == 0x44 && p
[1] == 0x0 && p
[2] == 0x0 && p
[3] == 0x2);
2339 #elif defined(VGP_ppc64le_linux)
2340 arch
->vex
.guest_CIA
-= 4; // sizeof(ppc32 instr)
2342 /* Make sure our caller is actually sane, and we're really backing
2343 back over a syscall.
2348 UChar
*p
= (UChar
*)arch
->vex
.guest_CIA
;
2350 if (p
[3] != 0x44 || p
[2] != 0x0 || p
[1] != 0x0 || p
[0] != 0x02)
2351 VG_(message
)(Vg_DebugMsg
,
2352 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2353 arch
->vex
.guest_CIA
, p
[3], p
[2], p
[1], p
[0]);
2355 vg_assert(p
[3] == 0x44 && p
[2] == 0x0 && p
[1] == 0x0 && p
[0] == 0x2);
2358 #elif defined(VGP_arm_linux)
2359 if (arch
->vex
.guest_R15T
& 1) {
2360 // Thumb mode. SVC is a encoded as
2362 // where imm8 is the SVC number, and we only accept 0.
2363 arch
->vex
.guest_R15T
-= 2; // sizeof(thumb 16 bit insn)
2364 UChar
* p
= (UChar
*)(arch
->vex
.guest_R15T
- 1);
2365 Bool valid
= p
[0] == 0 && p
[1] == 0xDF;
2367 VG_(message
)(Vg_DebugMsg
,
2368 "?! restarting over (Thumb) syscall that is not syscall "
2369 "at %#x %02x %02x\n",
2370 arch
->vex
.guest_R15T
- 1, p
[0], p
[1]);
2373 // FIXME: NOTE, this really isn't right. We need to back up
2374 // ITSTATE to what it was before the SVC instruction, but we
2375 // don't know what it was. At least assert that it is now
2376 // zero, because if it is nonzero then it must also have
2377 // been nonzero for the SVC itself, which means it was
2378 // conditional. Urk.
2379 vg_assert(arch
->vex
.guest_ITSTATE
== 0);
2381 // ARM mode. SVC is encoded as
2383 // where imm24 is the SVC number, and we only accept 0.
2384 arch
->vex
.guest_R15T
-= 4; // sizeof(arm instr)
2385 UChar
* p
= (UChar
*)arch
->vex
.guest_R15T
;
2386 Bool valid
= p
[0] == 0 && p
[1] == 0 && p
[2] == 0
2387 && (p
[3] & 0xF) == 0xF;
2389 VG_(message
)(Vg_DebugMsg
,
2390 "?! restarting over (ARM) syscall that is not syscall "
2391 "at %#x %02x %02x %02x %02x\n",
2392 arch
->vex
.guest_R15T
, p
[0], p
[1], p
[2], p
[3]);
2397 #elif defined(VGP_arm64_linux)
2398 arch
->vex
.guest_PC
-= 4; // sizeof(arm64 instr)
2400 /* Make sure our caller is actually sane, and we're really backing
2401 back over a syscall.
2403 svc #0 == d4 00 00 01
2406 UChar
*p
= (UChar
*)arch
->vex
.guest_PC
;
2408 if (p
[0] != 0x01 || p
[1] != 0x00 || p
[2] != 0x00 || p
[3] != 0xD4)
2411 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2412 arch
->vex
.guest_PC
, p
[0], p
[1], p
[2], p
[3]
2415 vg_assert(p
[0] == 0x01 && p
[1] == 0x00 && p
[2] == 0x00 && p
[3] == 0xD4);
2418 #elif defined(VGP_x86_darwin)
2419 arch
->vex
.guest_EIP
= arch
->vex
.guest_IP_AT_SYSCALL
;
2421 /* Make sure our caller is actually sane, and we're really backing
2422 back over a syscall.
2424 int $0x80 == CD 80 // Used to communicate with BSD syscalls
2425 int $0x81 == CD 81 // Used to communicate with Mach traps
2426 int $0x82 == CD 82 // Used to communicate with "thread" ?
2427 sysenter == 0F 34 // Used to communicate with Unix syscalls
2430 UChar
*p
= (UChar
*)arch
->vex
.guest_EIP
;
2431 Bool ok
= (p
[0] == 0xCD && p
[1] == 0x80)
2432 || (p
[0] == 0xCD && p
[1] == 0x81)
2433 || (p
[0] == 0xCD && p
[1] == 0x82)
2434 || (p
[0] == 0x0F && p
[1] == 0x34);
2436 VG_(message
)(Vg_DebugMsg
,
2437 "?! restarting over syscall at %#x %02x %02x\n",
2438 arch
->vex
.guest_EIP
, p
[0], p
[1]);
2442 #elif defined(VGP_amd64_darwin)
2443 arch
->vex
.guest_RIP
= arch
->vex
.guest_IP_AT_SYSCALL
;
2445 /* Make sure our caller is actually sane, and we're really backing
2446 back over a syscall.
2451 UChar
*p
= (UChar
*)arch
->vex
.guest_RIP
;
2453 Bool ok
= (p
[0] == 0x0F && p
[1] == 0x05);
2455 VG_(message
)(Vg_DebugMsg
,
2456 "?! restarting over syscall at %#llx %02x %02x\n",
2457 arch
->vex
.guest_RIP
, p
[0], p
[1]);
2461 #elif defined(VGP_s390x_linux)
2462 arch
->vex
.guest_IA
-= 2; // sizeof(syscall)
2464 /* Make sure our caller is actually sane, and we're really backing
2465 back over a syscall.
2470 UChar
*p
= (UChar
*)arch
->vex
.guest_IA
;
2472 VG_(message
)(Vg_DebugMsg
,
2473 "?! restarting over syscall at %#llx %02x %02x\n",
2474 arch
->vex
.guest_IA
, p
[0], p
[1]);
2476 vg_assert(p
[0] == 0x0A);
2479 #elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
2481 arch
->vex
.guest_PC
-= 4; // sizeof(mips instr)
2483 /* Make sure our caller is actually sane, and we're really backing
2484 back over a syscall.
2486 syscall == 00 00 00 0C
2488 syscall == 0C 00 00 00
2491 UChar
*p
= (UChar
*)(Addr
)(arch
->vex
.guest_PC
);
2492 # if defined (VG_LITTLEENDIAN)
2493 if (p
[0] != 0x0c || p
[1] != 0x00 || p
[2] != 0x00 || p
[3] != 0x00)
2494 VG_(message
)(Vg_DebugMsg
,
2495 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2496 (ULong
)arch
->vex
.guest_PC
, p
[0], p
[1], p
[2], p
[3]);
2498 vg_assert(p
[0] == 0x0c && p
[1] == 0x00 && p
[2] == 0x00 && p
[3] == 0x00);
2499 # elif defined (VG_BIGENDIAN)
2500 if (p
[0] != 0x00 || p
[1] != 0x00 || p
[2] != 0x00 || p
[3] != 0x0c)
2501 VG_(message
)(Vg_DebugMsg
,
2502 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2503 (ULong
)arch
->vex
.guest_PC
, p
[0], p
[1], p
[2], p
[3]);
2505 vg_assert(p
[0] == 0x00 && p
[1] == 0x00 && p
[2] == 0x00 && p
[3] == 0x0c);
2507 # error "Unknown endianness"
2511 #elif defined(VGP_nanomips_linux)
2513 /* Make sure our caller is actually sane, and we're really backing
2514 back over a syscall.
2516 arch
->vex
.guest_PC
-= 2;
2517 /* PC has to be 16-bit aligned. */
2518 vg_assert((arch
->vex
.guest_PC
& 1) == 0);
2520 UShort
*p
= ASSUME_ALIGNED(UShort
*, (Addr
)(arch
->vex
.guest_PC
));
2522 if (((*p
) & 0xFFFD) != 0x1008) {
2523 if (((*(p
- 1)) & 0xFFFD) != 0x0008) {
2524 VG_(message
)(Vg_DebugMsg
,
2525 "?! restarting over syscall at %#x %08lx\n",
2526 arch
->vex
.guest_PC
, (UWord
)(*p
));
2529 arch
->vex
.guest_PC
-= 2;
2532 #elif defined(VGP_x86_solaris)
2533 arch
->vex
.guest_EIP
-= 2; // sizeof(int $0x91) or sizeof(syscall)
2535 /* Make sure our caller is actually sane, and we're really backing
2536 back over a syscall.
2542 Handle also other syscall instructions because we also handle them in
2549 UChar
*p
= (UChar
*)arch
->vex
.guest_EIP
;
2551 Bool ok
= (p
[0] == 0xCD && p
[1] == 0x91)
2552 || (p
[0] == 0x0F && p
[1] == 0x05)
2553 || (p
[0] == 0x0F && p
[1] == 0x34)
2554 || (p
[0] == 0xCD && p
[1] == 0x80)
2555 || (p
[0] == 0xCD && p
[1] == 0x81)
2556 || (p
[0] == 0xCD && p
[1] == 0x82);
2558 VG_(message
)(Vg_DebugMsg
,
2559 "?! restarting over syscall at %#x %02x %02x\n",
2560 arch
->vex
.guest_EIP
, p
[0], p
[1]);
2564 #elif defined(VGP_amd64_solaris)
2565 arch
->vex
.guest_RIP
-= 2; // sizeof(syscall)
2567 /* Make sure our caller is actually sane, and we're really backing
2568 back over a syscall.
2573 UChar
*p
= (UChar
*)arch
->vex
.guest_RIP
;
2575 Bool ok
= (p
[0] == 0x0F && p
[1] == 0x05);
2577 VG_(message
)(Vg_DebugMsg
,
2578 "?! restarting over syscall at %#llx %02x %02x\n",
2579 arch
->vex
.guest_RIP
, p
[0], p
[1]);
2584 # error "ML_(fixup_guest_state_to_restart_syscall): unknown plat"
2590 Fix up the guest state when a syscall is interrupted by a signal
2591 and so has been forced to return 'sysret'.
2593 To do this, we determine the precise state of the syscall by
2594 looking at the (real) IP at the time the signal happened. The
2595 syscall sequence looks like:
2599 3. save result to guest state (EAX, RAX, R3+CR0.SO, R0, V0)
2603 happens at Then Why?
2604 [1-2) restart nothing has happened (restart syscall)
2605 [2] restart syscall hasn't started, or kernel wants to restart
2606 [2-3) save syscall complete, but results not saved
2607 [3-4) syscall complete, results saved
2609 Sometimes we never want to restart an interrupted syscall (because
2610 sigaction says not to), so we only restart if "restart" is True.
2612 This will also call VG_(post_syscall) if the syscall has actually
2613 completed (either because it was interrupted, or because it
2614 actually finished). It will not call VG_(post_syscall) if the
2615 syscall is set up for restart, which means that the pre-wrapper may
2616 get called multiple times.
2620 VG_(fixup_guest_state_after_syscall_interrupted
)( ThreadId tid
,
2624 struct vki_ucontext
*uc
)
2626 /* Note that we don't know the syscall number here, since (1) in
2627 general there's no reliable way to get hold of it short of
2628 stashing it in the guest state before the syscall, and (2) in
2629 any case we don't need to know it for the actions done by this
2632 Furthermore, 'sres' is only used in the case where the syscall
2633 is complete, but the result has not been committed to the guest
2634 state yet. In any other situation it will be meaningless and
2635 therefore ignored. */
2638 SyscallStatus canonical
;
2639 ThreadArchState
* th_regs
;
2642 /* Compute some Booleans indicating which range we're in. */
2644 in_setup_to_restart
, // [1,2) in the .S files
2645 at_restart
, // [2] in the .S files
2646 in_complete_to_committed
, // [3,4) in the .S files
2647 in_committed_to_finished
; // [4,5) in the .S files
2649 if (VG_(clo_trace_signals
))
2650 VG_(message
)( Vg_DebugMsg
,
2651 "interrupted_syscall: tid=%u, ip=%#lx, "
2652 "restart=%s, sres.isErr=%s, sres.val=%" FMT_REGWORD
"u\n",
2655 restart
? "True" : "False",
2656 sr_isError(sres
) ? "True" : "False",
2657 sr_isError(sres
) ? (RegWord
)sr_Err(sres
) :
2658 (RegWord
)sr_Res(sres
));
2660 vg_assert(VG_(is_valid_tid
)(tid
));
2661 vg_assert(tid
>= 1 && tid
< VG_N_THREADS
);
2662 vg_assert(VG_(is_running_thread
)(tid
));
2664 tst
= VG_(get_ThreadState
)(tid
);
2665 th_regs
= &tst
->arch
;
2666 sci
= & syscallInfo
[tid
];
2668 # if defined(VGO_linux)
2670 = ip
< ML_(blksys_setup
) || ip
>= ML_(blksys_finished
);
2672 = ip
>= ML_(blksys_setup
) && ip
< ML_(blksys_restart
);
2674 = ip
== ML_(blksys_restart
);
2675 in_complete_to_committed
2676 = ip
>= ML_(blksys_complete
) && ip
< ML_(blksys_committed
);
2677 in_committed_to_finished
2678 = ip
>= ML_(blksys_committed
) && ip
< ML_(blksys_finished
);
2679 # elif defined(VGO_darwin)
2681 = (ip
< ML_(blksys_setup_MACH
) || ip
>= ML_(blksys_finished_MACH
))
2682 && (ip
< ML_(blksys_setup_MDEP
) || ip
>= ML_(blksys_finished_MDEP
))
2683 && (ip
< ML_(blksys_setup_UNIX
) || ip
>= ML_(blksys_finished_UNIX
));
2685 = (ip
>= ML_(blksys_setup_MACH
) && ip
< ML_(blksys_restart_MACH
))
2686 || (ip
>= ML_(blksys_setup_MDEP
) && ip
< ML_(blksys_restart_MDEP
))
2687 || (ip
>= ML_(blksys_setup_UNIX
) && ip
< ML_(blksys_restart_UNIX
));
2689 = (ip
== ML_(blksys_restart_MACH
))
2690 || (ip
== ML_(blksys_restart_MDEP
))
2691 || (ip
== ML_(blksys_restart_UNIX
));
2692 in_complete_to_committed
2693 = (ip
>= ML_(blksys_complete_MACH
) && ip
< ML_(blksys_committed_MACH
))
2694 || (ip
>= ML_(blksys_complete_MDEP
) && ip
< ML_(blksys_committed_MDEP
))
2695 || (ip
>= ML_(blksys_complete_UNIX
) && ip
< ML_(blksys_committed_UNIX
));
2696 in_committed_to_finished
2697 = (ip
>= ML_(blksys_committed_MACH
) && ip
< ML_(blksys_finished_MACH
))
2698 || (ip
>= ML_(blksys_committed_MDEP
) && ip
< ML_(blksys_finished_MDEP
))
2699 || (ip
>= ML_(blksys_committed_UNIX
) && ip
< ML_(blksys_finished_UNIX
));
2700 /* Wasn't that just So Much Fun? Does your head hurt yet? Mine does. */
2701 # elif defined(VGO_solaris)
2702 /* The solaris port is never outside the range. */
2703 outside_range
= False
;
2704 /* The Solaris kernel never restarts syscalls directly! */
2706 if (tst
->os_state
.in_door_return
) {
2707 vg_assert(ip
>= ML_(blksys_setup_DRET
)
2708 && ip
< ML_(blksys_finished_DRET
));
2711 = ip
>= ML_(blksys_setup_DRET
) && ip
< ML_(blksys_complete_DRET
);
2712 in_complete_to_committed
2713 = ip
>= ML_(blksys_complete_DRET
) && ip
< ML_(blksys_committed_DRET
);
2714 in_committed_to_finished
2715 = ip
>= ML_(blksys_committed_DRET
) && ip
< ML_(blksys_finished_DRET
);
2718 vg_assert(ip
>= ML_(blksys_setup
) && ip
< ML_(blksys_finished
));
2721 = ip
>= ML_(blksys_setup
) && ip
< ML_(blksys_complete
);
2722 in_complete_to_committed
2723 = ip
>= ML_(blksys_complete
) && ip
< ML_(blksys_committed
);
2724 in_committed_to_finished
2725 = ip
>= ML_(blksys_committed
) && ip
< ML_(blksys_finished
);
2728 # error "Unknown OS"
2731 /* Figure out what the state of the syscall was by examining the
2732 (real) IP at the time of the signal, and act accordingly. */
2733 if (outside_range
) {
2734 if (VG_(clo_trace_signals
))
2735 VG_(message
)( Vg_DebugMsg
,
2736 " not in syscall at all: hmm, very suspicious\n" );
2737 /* Looks like we weren't in a syscall at all. Hmm. */
2738 vg_assert(sci
->status
.what
!= SsIdle
);
2742 /* We should not be here unless this thread had first started up
2743 the machinery for a syscall by calling VG_(client_syscall).
2745 vg_assert(sci
->status
.what
!= SsIdle
);
2747 /* now, do one of four fixup actions, depending on where the IP has
2750 if (in_setup_to_restart
) {
2751 /* syscall hasn't even started; go around again */
2752 if (VG_(clo_trace_signals
))
2753 VG_(message
)( Vg_DebugMsg
, " not started: restarting\n");
2754 vg_assert(sci
->status
.what
== SsHandToKernel
);
2755 ML_(fixup_guest_state_to_restart_syscall
)(th_regs
);
2760 # if defined(VGO_solaris)
2761 /* We should never hit this branch on Solaris, see the comment above. */
2765 /* We're either about to run the syscall, or it was interrupted
2766 and the kernel restarted it. Restart if asked, otherwise
2769 if (VG_(clo_trace_signals
))
2770 VG_(message
)( Vg_DebugMsg
, " at syscall instr: restarting\n");
2771 ML_(fixup_guest_state_to_restart_syscall
)(th_regs
);
2773 if (VG_(clo_trace_signals
))
2774 VG_(message
)( Vg_DebugMsg
, " at syscall instr: returning EINTR\n");
2775 canonical
= convert_SysRes_to_SyscallStatus(
2776 VG_(mk_SysRes_Error
)( VKI_EINTR
)
2778 if (!(sci
->flags
& SfNoWriteResult
))
2779 putSyscallStatusIntoGuestState( tid
, &canonical
, &th_regs
->vex
);
2780 sci
->status
= canonical
;
2781 VG_(post_syscall
)(tid
);
2786 if (in_complete_to_committed
) {
2787 /* Syscall complete, but result hasn't been written back yet.
2788 Write the SysRes we were supplied with back to the guest
2790 if (VG_(clo_trace_signals
))
2791 VG_(message
)( Vg_DebugMsg
,
2792 " completed, but uncommitted: committing\n");
2793 canonical
= convert_SysRes_to_SyscallStatus( sres
);
2794 vg_assert(!(sci
->flags
& SfNoWriteResult
));
2795 putSyscallStatusIntoGuestState( tid
, &canonical
, &th_regs
->vex
);
2796 # if defined(VGO_solaris)
2797 if (tst
->os_state
.in_door_return
) {
2798 # if defined(VGP_x86_solaris)
2799 /* Registers %esp and %ebp were also modified by the syscall. */
2800 tst
->arch
.vex
.guest_ESP
= uc
->uc_mcontext
.gregs
[VKI_UESP
];
2801 tst
->arch
.vex
.guest_EBP
= uc
->uc_mcontext
.gregs
[VKI_EBP
];
2802 # elif defined(VGP_amd64_solaris)
2803 tst
->arch
.vex
.guest_RSP
= uc
->uc_mcontext
.gregs
[VKI_REG_RSP
];
2804 tst
->arch
.vex
.guest_RBP
= uc
->uc_mcontext
.gregs
[VKI_REG_RBP
];
2808 sci
->status
= canonical
;
2809 VG_(post_syscall
)(tid
);
2813 if (in_committed_to_finished
) {
2814 /* Result committed, but the signal mask has not been restored;
2815 we expect our caller (the signal handler) will have fixed
2817 if (VG_(clo_trace_signals
))
2818 VG_(message
)( Vg_DebugMsg
,
2819 " completed and committed: nothing to do\n");
2820 # if defined(VGP_x86_solaris)
2821 /* The %eax and %edx values are committed but the carry flag is still
2822 uncommitted. Save it now. */
2823 LibVEX_GuestX86_put_eflag_c(sr_isError(sres
), &th_regs
->vex
);
2824 # elif defined(VGP_amd64_solaris)
2825 LibVEX_GuestAMD64_put_rflag_c(sr_isError(sres
), &th_regs
->vex
);
2827 getSyscallStatusFromGuestState( &sci
->status
, &th_regs
->vex
);
2828 vg_assert(sci
->status
.what
== SsComplete
);
2829 VG_(post_syscall
)(tid
);
2833 VG_(core_panic
)("?? strange syscall interrupt state?");
2835 /* In all cases, the syscall is now finished (even if we called
2836 ML_(fixup_guest_state_to_restart_syscall), since that just
2837 re-positions the guest's IP for another go at it). So we need
2838 to record that fact. */
2839 sci
->status
.what
= SsIdle
;
2843 #if defined(VGO_solaris)
2844 /* Returns True if ip is inside a fixable syscall code in syscall-*-*.S. This
2845 function can be called by a 'non-running' thread! */
2846 Bool
VG_(is_ip_in_blocking_syscall
)(ThreadId tid
, Addr ip
)
2848 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
2850 if (tst
->os_state
.in_door_return
)
2851 return ip
>= ML_(blksys_setup_DRET
) && ip
< ML_(blksys_finished_DRET
);
2853 return ip
>= ML_(blksys_setup
) && ip
< ML_(blksys_finished
);
2858 #if defined(VGO_darwin)
2859 // Clean up after workq_ops(WQOPS_THREAD_RETURN) jumped to wqthread_hijack.
2860 // This is similar to VG_(fixup_guest_state_after_syscall_interrupted).
2861 // This longjmps back to the scheduler.
2862 void ML_(wqthread_continue_NORETURN
)(ThreadId tid
)
2867 VG_(acquire_BigLock
)(tid
, "wqthread_continue_NORETURN");
2869 PRINT("SYSCALL[%d,%u](%s) workq_ops() starting new workqueue item\n",
2870 VG_(getpid
)(), tid
, VG_SYSNUM_STRING(__NR_workq_ops
));
2872 vg_assert(VG_(is_valid_tid
)(tid
));
2873 vg_assert(tid
>= 1 && tid
< VG_N_THREADS
);
2874 vg_assert(VG_(is_running_thread
)(tid
));
2876 tst
= VG_(get_ThreadState
)(tid
);
2877 sci
= & syscallInfo
[tid
];
2878 vg_assert(sci
->status
.what
!= SsIdle
);
2879 vg_assert(tst
->os_state
.wq_jmpbuf_valid
); // check this BEFORE post_syscall
2881 // Pretend the syscall completed normally, but don't touch the thread state.
2882 sci
->status
= convert_SysRes_to_SyscallStatus( VG_(mk_SysRes_Success
)(0) );
2883 sci
->flags
|= SfNoWriteResult
;
2884 VG_(post_syscall
)(tid
);
2886 ML_(sync_mappings
)("in", "ML_(wqthread_continue_NORETURN)", 0);
2888 sci
->status
.what
= SsIdle
;
2890 vg_assert(tst
->sched_jmpbuf_valid
);
2891 VG_MINIMAL_LONGJMP(tst
->sched_jmpbuf
);
2899 /* ---------------------------------------------------------------------
2900 A place to store the where-to-call-when-really-done pointer
2901 ------------------------------------------------------------------ */
2903 // When the final thread is done, where shall I call to shutdown the
2904 // system cleanly? Is set once at startup (in m_main) and never
2905 // changes after that. Is basically a pointer to the exit
2906 // continuation. This is all just a nasty hack to avoid calling
2907 // directly from m_syswrap to m_main at exit, since that would cause
2908 // m_main to become part of a module cycle, which is silly.
2909 void (* VG_(address_of_m_main_shutdown_actions_NORETURN
) )
2910 (ThreadId
,VgSchedReturnCode
)
2913 /*--------------------------------------------------------------------*/
2915 /*--------------------------------------------------------------------*/