tests/vg_regtest: Always evaluate prerequisite expressions with sh
[valgrind.git] / coregrind / m_syswrap / syswrap-main.c
blob0479400ffba54a5b7c24aaf9b09d1f641d5e1ab3
2 /*--------------------------------------------------------------------*/
3 /*--- Handle system calls. syswrap-main.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2000-2013 Julian Seward
11 jseward@acm.org
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 #include "libvex_guest_offsets.h"
32 #include "libvex_trc_values.h"
33 #include "pub_core_basics.h"
34 #include "pub_core_aspacemgr.h"
35 #include "pub_core_vki.h"
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_threadstate.h"
38 #include "pub_core_libcbase.h"
39 #include "pub_core_libcassert.h"
40 #include "pub_core_libcprint.h"
41 #include "pub_core_libcproc.h" // For VG_(getpid)()
42 #include "pub_core_libcsignal.h"
43 #include "pub_core_scheduler.h" // For VG_({acquire,release}_BigLock),
44 // and VG_(vg_yield)
45 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
46 #include "pub_core_tooliface.h"
47 #include "pub_core_options.h"
48 #include "pub_core_signals.h" // For VG_SIGVGKILL, VG_(poll_signals)
49 #include "pub_core_syscall.h"
50 #include "pub_core_machine.h"
51 #include "pub_core_mallocfree.h"
52 #include "pub_core_syswrap.h"
54 #include "priv_types_n_macros.h"
55 #include "priv_syswrap-main.h"
57 #if defined(VGO_darwin)
58 #include "priv_syswrap-darwin.h"
59 #endif
61 /* Useful info which needs to be recorded somewhere:
62 Use of registers in syscalls is:
64 NUM ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 ARG8 RESULT
65 LINUX:
66 x86 eax ebx ecx edx esi edi ebp n/a n/a eax (== NUM)
67 amd64 rax rdi rsi rdx r10 r8 r9 n/a n/a rax (== NUM)
68 ppc32 r0 r3 r4 r5 r6 r7 r8 n/a n/a r3+CR0.SO (== ARG1)
69 ppc64 r0 r3 r4 r5 r6 r7 r8 n/a n/a r3+CR0.SO (== ARG1)
70 arm r7 r0 r1 r2 r3 r4 r5 n/a n/a r0 (== ARG1)
71 mips32 v0 a0 a1 a2 a3 stack stack n/a n/a v0 (== NUM)
72 mips64 v0 a0 a1 a2 a3 a4 a5 a6 a7 v0 (== NUM)
73 arm64 x8 x0 x1 x2 x3 x4 x5 n/a n/a x0 ?? (== ARG1??)
75 On s390x the svc instruction is used for system calls. The system call
76 number is encoded in the instruction (8 bit immediate field). Since Linux
77 2.6 it is also allowed to use svc 0 with the system call number in r1.
78 This was introduced for system calls >255, but works for all. It is
79 also possible to see the svc 0 together with an EXecute instruction, that
80 fills in the immediate field.
81 s390x r1/SVC r2 r3 r4 r5 r6 r7 n/a n/a r2 (== ARG1)
83 NUM ARG1 ARG2 ARG3 ARG4 ARG5 ARG6 ARG7 ARG8 RESULT
84 DARWIN:
85 x86 eax +4 +8 +12 +16 +20 +24 +28 +32 edx:eax, eflags.c
86 amd64 rax rdi rsi rdx rcx r8 r9 +8 +16 rdx:rax, rflags.c
88 For x86-darwin, "+N" denotes "in memory at N(%esp)"; ditto
89 amd64-darwin. Apparently 0(%esp) is some kind of return address
90 (perhaps for syscalls done with "sysenter"?) I don't think it is
91 relevant for syscalls done with "int $0x80/1/2".
93 SOLARIS:
94 x86 eax +4 +8 +12 +16 +20 +24 +28 +32 edx:eax, eflags.c
95 amd64 rax rdi rsi rdx r10 r8 r9 +8 +16 rdx:rax, rflags.c
97 "+N" denotes "in memory at N(%esp)". Solaris also supports fasttrap
98 syscalls. Fasttraps do not take any parameters (except of the sysno in eax)
99 and never fail (if the sysno is valid).
102 /* This is the top level of the system-call handler module. All
103 system calls are channelled through here, doing two things:
105 * notify the tool of the events (mem/reg reads, writes) happening
107 * perform the syscall, usually by passing it along to the kernel
108 unmodified.
110 A magical piece of assembly code, do_syscall_for_client_WRK, in
111 syscall-$PLATFORM.S does the tricky bit of passing a syscall to the
112 kernel, whilst having the simulator retain control.
115 /* The main function is VG_(client_syscall). The simulation calls it
116 whenever a client thread wants to do a syscall. The following is a
117 sketch of what it does.
119 * Ensures the root thread's stack is suitably mapped. Tedious and
120 arcane. See big big comment in VG_(client_syscall).
122 * First, it rounds up the syscall number and args (which is a
123 platform dependent activity) and puts them in a struct ("args")
124 and also a copy in "orig_args".
126 The pre/post wrappers refer to these structs and so no longer
127 need magic macros to access any specific registers. This struct
128 is stored in thread-specific storage.
131 * The pre-wrapper is called, passing it a pointer to struct
132 "args".
135 * The pre-wrapper examines the args and pokes the tool
136 appropriately. It may modify the args; this is why "orig_args"
137 is also stored.
139 The pre-wrapper may choose to 'do' the syscall itself, and
140 concludes one of three outcomes:
142 Success(N) -- syscall is already complete, with success;
143 result is N
145 Fail(N) -- syscall is already complete, with failure;
146 error code is N
148 HandToKernel -- (the usual case): this needs to be given to
149 the kernel to be done, using the values in
150 the possibly-modified "args" struct.
152 In addition, the pre-wrapper may set some flags:
154 MayBlock -- only applicable when outcome==HandToKernel
156 PostOnFail -- only applicable when outcome==HandToKernel or Fail
159 * If the pre-outcome is HandToKernel, the syscall is duly handed
160 off to the kernel (perhaps involving some thread switchery, but
161 that's not important). This reduces the possible set of outcomes
162 to either Success(N) or Fail(N).
165 * The outcome (Success(N) or Fail(N)) is written back to the guest
166 register(s). This is platform specific:
168 x86: Success(N) ==> eax = N
169 Fail(N) ==> eax = -N
171 ditto amd64
173 ppc32: Success(N) ==> r3 = N, CR0.SO = 0
174 Fail(N) ==> r3 = N, CR0.SO = 1
176 Darwin:
177 x86: Success(N) ==> edx:eax = N, cc = 0
178 Fail(N) ==> edx:eax = N, cc = 1
180 s390x: Success(N) ==> r2 = N
181 Fail(N) ==> r2 = -N
183 Solaris:
184 x86: Success(N) ==> edx:eax = N, cc = 0
185 Fail(N) ==> eax = N, cc = 1
186 Same applies for fasttraps except they never fail.
188 * The post wrapper is called if:
190 - it exists, and
191 - outcome==Success or (outcome==Fail and PostOnFail is set)
193 The post wrapper is passed the adulterated syscall args (struct
194 "args"), and the syscall outcome (viz, Success(N) or Fail(N)).
196 There are several other complications, primarily to do with
197 syscalls getting interrupted, explained in comments in the code.
200 /* CAVEATS for writing wrappers. It is important to follow these!
202 The macros defined in priv_types_n_macros.h are designed to help
203 decouple the wrapper logic from the actual representation of
204 syscall args/results, since these wrappers are designed to work on
205 multiple platforms.
207 Sometimes a PRE wrapper will complete the syscall itself, without
208 handing it to the kernel. It will use one of SET_STATUS_Success,
209 SET_STATUS_Failure or SET_STATUS_from_SysRes to set the return
210 value. It is critical to appreciate that use of the macro does not
211 immediately cause the underlying guest state to be updated -- that
212 is done by the driver logic in this file, when the wrapper returns.
214 As a result, PRE wrappers of the following form will malfunction:
216 PRE(fooble)
218 ... do stuff ...
219 SET_STATUS_Somehow(...)
221 // do something that assumes guest state is up to date
224 In particular, direct or indirect calls to VG_(poll_signals) after
225 setting STATUS can cause the guest state to be read (in order to
226 build signal frames). Do not do this. If you want a signal poll
227 after the syscall goes through, do "*flags |= SfPollAfter" and the
228 driver logic will do it for you.
230 -----------
232 Another critical requirement following introduction of new address
233 space manager (JRS, 20050923):
235 In a situation where the mappedness of memory has changed, aspacem
236 should be notified BEFORE the tool. Hence the following is
237 correct:
239 Bool d = VG_(am_notify_munmap)(s->start, s->end+1 - s->start);
240 VG_TRACK( die_mem_munmap, s->start, s->end+1 - s->start );
241 if (d)
242 VG_(discard_translations)(s->start, s->end+1 - s->start);
244 whilst this is wrong:
246 VG_TRACK( die_mem_munmap, s->start, s->end+1 - s->start );
247 Bool d = VG_(am_notify_munmap)(s->start, s->end+1 - s->start);
248 if (d)
249 VG_(discard_translations)(s->start, s->end+1 - s->start);
251 The reason is that the tool may itself ask aspacem for more shadow
252 memory as a result of the VG_TRACK call. In such a situation it is
253 critical that aspacem's segment array is up to date -- hence the
254 need to notify aspacem first.
256 -----------
258 Also .. take care to call VG_(discard_translations) whenever
259 memory with execute permissions is unmapped.
263 /* ---------------------------------------------------------------------
264 Do potentially blocking syscall for the client, and mess with
265 signal masks at the same time.
266 ------------------------------------------------------------------ */
268 /* Perform a syscall on behalf of a client thread, using a specific
269 signal mask. On completion, the signal mask is set to restore_mask
270 (which presumably blocks almost everything). If a signal happens
271 during the syscall, the handler should call
272 VG_(fixup_guest_state_after_syscall_interrupted) to adjust the
273 thread's context to do the right thing.
275 The _WRK function is handwritten assembly, implemented per-platform
276 in coregrind/m_syswrap/syscall-$PLAT.S. It has some very magic
277 properties. See comments at the top of
278 VG_(fixup_guest_state_after_syscall_interrupted) below for details.
280 This function (these functions) are required to return zero in case
281 of success (even if the syscall itself failed), and nonzero if the
282 sigprocmask-swizzling calls failed. We don't actually care about
283 the failure values from sigprocmask, although most of the assembly
284 implementations do attempt to return that, using the convention
285 0 for success, or 0x8000 | error-code for failure.
287 #if defined(VGO_linux)
288 extern
289 UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
290 void* guest_state,
291 const vki_sigset_t *syscall_mask,
292 const vki_sigset_t *restore_mask,
293 Word sigsetSzB );
294 #elif defined(VGO_darwin)
295 extern
296 UWord ML_(do_syscall_for_client_unix_WRK)( Word syscallno,
297 void* guest_state,
298 const vki_sigset_t *syscall_mask,
299 const vki_sigset_t *restore_mask,
300 Word sigsetSzB ); /* unused */
301 extern
302 UWord ML_(do_syscall_for_client_mach_WRK)( Word syscallno,
303 void* guest_state,
304 const vki_sigset_t *syscall_mask,
305 const vki_sigset_t *restore_mask,
306 Word sigsetSzB ); /* unused */
307 extern
308 UWord ML_(do_syscall_for_client_mdep_WRK)( Word syscallno,
309 void* guest_state,
310 const vki_sigset_t *syscall_mask,
311 const vki_sigset_t *restore_mask,
312 Word sigsetSzB ); /* unused */
313 #elif defined(VGO_solaris)
314 extern
315 UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
316 void* guest_state,
317 const vki_sigset_t *syscall_mask,
318 const vki_sigset_t *restore_mask,
319 UChar *cflag);
320 UWord ML_(do_syscall_for_client_dret_WRK)( Word syscallno,
321 void* guest_state,
322 const vki_sigset_t *syscall_mask,
323 const vki_sigset_t *restore_mask,
324 UChar *cflag);
325 #else
326 # error "Unknown OS"
327 #endif
330 static
331 void do_syscall_for_client ( Int syscallno,
332 ThreadState* tst,
333 const vki_sigset_t* syscall_mask )
335 vki_sigset_t saved;
336 UWord err;
337 # if defined(VGO_linux)
338 err = ML_(do_syscall_for_client_WRK)(
339 syscallno, &tst->arch.vex,
340 syscall_mask, &saved, sizeof(vki_sigset_t)
342 # elif defined(VGO_darwin)
343 switch (VG_DARWIN_SYSNO_CLASS(syscallno)) {
344 case VG_DARWIN_SYSCALL_CLASS_UNIX:
345 err = ML_(do_syscall_for_client_unix_WRK)(
346 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno), &tst->arch.vex,
347 syscall_mask, &saved, 0/*unused:sigsetSzB*/
349 break;
350 case VG_DARWIN_SYSCALL_CLASS_MACH:
351 err = ML_(do_syscall_for_client_mach_WRK)(
352 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno), &tst->arch.vex,
353 syscall_mask, &saved, 0/*unused:sigsetSzB*/
355 break;
356 case VG_DARWIN_SYSCALL_CLASS_MDEP:
357 err = ML_(do_syscall_for_client_mdep_WRK)(
358 VG_DARWIN_SYSNO_FOR_KERNEL(syscallno), &tst->arch.vex,
359 syscall_mask, &saved, 0/*unused:sigsetSzB*/
361 break;
362 default:
363 vg_assert(0);
364 /*NOTREACHED*/
365 break;
367 # elif defined(VGO_solaris)
368 UChar cflag;
370 /* Fasttraps or anything else cannot go through this path. */
371 vg_assert(VG_SOLARIS_SYSNO_CLASS(syscallno)
372 == VG_SOLARIS_SYSCALL_CLASS_CLASSIC);
374 /* If the syscall is a door_return call then it has to be handled very
375 differently. */
376 if (tst->os_state.in_door_return)
377 err = ML_(do_syscall_for_client_dret_WRK)(
378 syscallno, &tst->arch.vex,
379 syscall_mask, &saved, &cflag
381 else
382 err = ML_(do_syscall_for_client_WRK)(
383 syscallno, &tst->arch.vex,
384 syscall_mask, &saved, &cflag
387 /* Save the carry flag. */
388 # if defined(VGP_x86_solaris)
389 LibVEX_GuestX86_put_eflag_c(cflag, &tst->arch.vex);
390 # elif defined(VGP_amd64_solaris)
391 LibVEX_GuestAMD64_put_rflag_c(cflag, &tst->arch.vex);
392 # else
393 # error "Unknown platform"
394 # endif
396 # else
397 # error "Unknown OS"
398 # endif
399 vg_assert2(
400 err == 0,
401 "ML_(do_syscall_for_client_WRK): sigprocmask error %d",
402 (Int)(err & 0xFFF)
407 /* ---------------------------------------------------------------------
408 Impedance matchers and misc helpers
409 ------------------------------------------------------------------ */
411 static
412 Bool eq_SyscallArgs ( SyscallArgs* a1, SyscallArgs* a2 )
414 return a1->sysno == a2->sysno
415 && a1->arg1 == a2->arg1
416 && a1->arg2 == a2->arg2
417 && a1->arg3 == a2->arg3
418 && a1->arg4 == a2->arg4
419 && a1->arg5 == a2->arg5
420 && a1->arg6 == a2->arg6
421 && a1->arg7 == a2->arg7
422 && a1->arg8 == a2->arg8;
425 static
426 Bool eq_SyscallStatus ( UInt sysno, SyscallStatus* s1, SyscallStatus* s2 )
428 /* was: return s1->what == s2->what && sr_EQ( s1->sres, s2->sres ); */
429 if (s1->what == s2->what && sr_EQ( sysno, s1->sres, s2->sres ))
430 return True;
431 # if defined(VGO_darwin)
432 /* Darwin-specific debugging guff */
433 vg_assert(s1->what == s2->what);
434 VG_(printf)("eq_SyscallStatus:\n");
435 VG_(printf)(" {%lu %lu %u}\n", s1->sres._wLO, s1->sres._wHI, s1->sres._mode);
436 VG_(printf)(" {%lu %lu %u}\n", s2->sres._wLO, s2->sres._wHI, s2->sres._mode);
437 vg_assert(0);
438 # endif
439 return False;
442 /* Convert between SysRes and SyscallStatus, to the extent possible. */
444 static
445 SyscallStatus convert_SysRes_to_SyscallStatus ( SysRes res )
447 SyscallStatus status;
448 status.what = SsComplete;
449 status.sres = res;
450 return status;
454 /* Impedance matchers. These convert syscall arg or result data from
455 the platform-specific in-guest-state format to the canonical
456 formats, and back. */
458 static
459 void getSyscallArgsFromGuestState ( /*OUT*/SyscallArgs* canonical,
460 /*IN*/ VexGuestArchState* gst_vanilla,
461 /*IN*/ UInt trc )
463 #if defined(VGP_x86_linux)
464 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
465 canonical->sysno = gst->guest_EAX;
466 canonical->arg1 = gst->guest_EBX;
467 canonical->arg2 = gst->guest_ECX;
468 canonical->arg3 = gst->guest_EDX;
469 canonical->arg4 = gst->guest_ESI;
470 canonical->arg5 = gst->guest_EDI;
471 canonical->arg6 = gst->guest_EBP;
472 canonical->arg7 = 0;
473 canonical->arg8 = 0;
475 #elif defined(VGP_amd64_linux)
476 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
477 canonical->sysno = gst->guest_RAX;
478 canonical->arg1 = gst->guest_RDI;
479 canonical->arg2 = gst->guest_RSI;
480 canonical->arg3 = gst->guest_RDX;
481 canonical->arg4 = gst->guest_R10;
482 canonical->arg5 = gst->guest_R8;
483 canonical->arg6 = gst->guest_R9;
484 canonical->arg7 = 0;
485 canonical->arg8 = 0;
487 #elif defined(VGP_ppc32_linux)
488 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
489 canonical->sysno = gst->guest_GPR0;
490 canonical->arg1 = gst->guest_GPR3;
491 canonical->arg2 = gst->guest_GPR4;
492 canonical->arg3 = gst->guest_GPR5;
493 canonical->arg4 = gst->guest_GPR6;
494 canonical->arg5 = gst->guest_GPR7;
495 canonical->arg6 = gst->guest_GPR8;
496 canonical->arg7 = 0;
497 canonical->arg8 = 0;
499 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
500 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
501 canonical->sysno = gst->guest_GPR0;
502 canonical->arg1 = gst->guest_GPR3;
503 canonical->arg2 = gst->guest_GPR4;
504 canonical->arg3 = gst->guest_GPR5;
505 canonical->arg4 = gst->guest_GPR6;
506 canonical->arg5 = gst->guest_GPR7;
507 canonical->arg6 = gst->guest_GPR8;
508 canonical->arg7 = 0;
509 canonical->arg8 = 0;
511 #elif defined(VGP_arm_linux)
512 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
513 canonical->sysno = gst->guest_R7;
514 canonical->arg1 = gst->guest_R0;
515 canonical->arg2 = gst->guest_R1;
516 canonical->arg3 = gst->guest_R2;
517 canonical->arg4 = gst->guest_R3;
518 canonical->arg5 = gst->guest_R4;
519 canonical->arg6 = gst->guest_R5;
520 canonical->arg7 = 0;
521 canonical->arg8 = 0;
523 #elif defined(VGP_arm64_linux)
524 VexGuestARM64State* gst = (VexGuestARM64State*)gst_vanilla;
525 canonical->sysno = gst->guest_X8;
526 canonical->arg1 = gst->guest_X0;
527 canonical->arg2 = gst->guest_X1;
528 canonical->arg3 = gst->guest_X2;
529 canonical->arg4 = gst->guest_X3;
530 canonical->arg5 = gst->guest_X4;
531 canonical->arg6 = gst->guest_X5;
532 canonical->arg7 = 0;
533 canonical->arg8 = 0;
535 #elif defined(VGP_mips32_linux)
536 VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
537 canonical->sysno = gst->guest_r2; // v0
538 if (canonical->sysno == __NR_exit) {
539 canonical->arg1 = gst->guest_r4; // a0
540 canonical->arg2 = 0;
541 canonical->arg3 = 0;
542 canonical->arg4 = 0;
543 canonical->arg5 = 0;
544 canonical->arg6 = 0;
545 canonical->arg8 = 0;
546 } else if (canonical->sysno != __NR_syscall) {
547 canonical->arg1 = gst->guest_r4; // a0
548 canonical->arg2 = gst->guest_r5; // a1
549 canonical->arg3 = gst->guest_r6; // a2
550 canonical->arg4 = gst->guest_r7; // a3
551 canonical->arg5 = *((UInt*) (gst->guest_r29 + 16)); // 16(guest_SP/sp)
552 canonical->arg6 = *((UInt*) (gst->guest_r29 + 20)); // 20(sp)
553 canonical->arg8 = 0;
554 } else {
555 // Fixme hack handle syscall()
556 canonical->sysno = gst->guest_r4; // a0
557 canonical->arg1 = gst->guest_r5; // a1
558 canonical->arg2 = gst->guest_r6; // a2
559 canonical->arg3 = gst->guest_r7; // a3
560 canonical->arg4 = *((UInt*) (gst->guest_r29 + 16)); // 16(guest_SP/sp)
561 canonical->arg5 = *((UInt*) (gst->guest_r29 + 20)); // 20(guest_SP/sp)
562 canonical->arg6 = *((UInt*) (gst->guest_r29 + 24)); // 24(guest_SP/sp)
563 canonical->arg8 = __NR_syscall;
566 #elif defined(VGP_mips64_linux)
567 VexGuestMIPS64State* gst = (VexGuestMIPS64State*)gst_vanilla;
568 canonical->sysno = gst->guest_r2; // v0
569 canonical->arg1 = gst->guest_r4; // a0
570 canonical->arg2 = gst->guest_r5; // a1
571 canonical->arg3 = gst->guest_r6; // a2
572 canonical->arg4 = gst->guest_r7; // a3
573 canonical->arg5 = gst->guest_r8; // a4
574 canonical->arg6 = gst->guest_r9; // a5
576 #elif defined(VGP_x86_darwin)
577 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
578 UWord *stack = (UWord *)gst->guest_ESP;
579 // GrP fixme hope syscalls aren't called with really shallow stacks...
580 canonical->sysno = gst->guest_EAX;
581 if (canonical->sysno != 0) {
582 // stack[0] is return address
583 canonical->arg1 = stack[1];
584 canonical->arg2 = stack[2];
585 canonical->arg3 = stack[3];
586 canonical->arg4 = stack[4];
587 canonical->arg5 = stack[5];
588 canonical->arg6 = stack[6];
589 canonical->arg7 = stack[7];
590 canonical->arg8 = stack[8];
591 } else {
592 // GrP fixme hack handle syscall()
593 // GrP fixme what about __syscall() ?
594 // stack[0] is return address
595 // DDD: the tool can't see that the params have been shifted! Can
596 // lead to incorrect checking, I think, because the PRRAn/PSARn
597 // macros will mention the pre-shifted args.
598 canonical->sysno = stack[1];
599 vg_assert(canonical->sysno != 0);
600 canonical->arg1 = stack[2];
601 canonical->arg2 = stack[3];
602 canonical->arg3 = stack[4];
603 canonical->arg4 = stack[5];
604 canonical->arg5 = stack[6];
605 canonical->arg6 = stack[7];
606 canonical->arg7 = stack[8];
607 canonical->arg8 = stack[9];
609 PRINT("SYSCALL[%d,?](0) syscall(%s, ...); please stand by...\n",
610 VG_(getpid)(), /*tid,*/
611 VG_SYSNUM_STRING(canonical->sysno));
614 // Here we determine what kind of syscall it was by looking at the
615 // interrupt kind, and then encode the syscall number using the 64-bit
616 // encoding for Valgrind's internal use.
618 // DDD: Would it be better to stash the JMP kind into the Darwin
619 // thread state rather than passing in the trc?
620 switch (trc) {
621 case VEX_TRC_JMP_SYS_INT128:
622 // int $0x80 = Unix, 64-bit result
623 vg_assert(canonical->sysno >= 0);
624 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(canonical->sysno);
625 break;
626 case VEX_TRC_JMP_SYS_SYSENTER:
627 // syscall = Unix, 32-bit result
628 // OR Mach, 32-bit result
629 if (canonical->sysno >= 0) {
630 // GrP fixme hack: 0xffff == I386_SYSCALL_NUMBER_MASK
631 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(canonical->sysno
632 & 0xffff);
633 } else {
634 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_MACH(-canonical->sysno);
636 break;
637 case VEX_TRC_JMP_SYS_INT129:
638 // int $0x81 = Mach, 32-bit result
639 vg_assert(canonical->sysno < 0);
640 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_MACH(-canonical->sysno);
641 break;
642 case VEX_TRC_JMP_SYS_INT130:
643 // int $0x82 = mdep, 32-bit result
644 vg_assert(canonical->sysno >= 0);
645 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_MDEP(canonical->sysno);
646 break;
647 default:
648 vg_assert(0);
649 break;
652 #elif defined(VGP_amd64_darwin)
653 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
654 UWord *stack = (UWord *)gst->guest_RSP;
656 vg_assert(trc == VEX_TRC_JMP_SYS_SYSCALL);
658 // GrP fixme hope syscalls aren't called with really shallow stacks...
659 canonical->sysno = gst->guest_RAX;
660 if (canonical->sysno != __NR_syscall) {
661 // stack[0] is return address
662 canonical->arg1 = gst->guest_RDI;
663 canonical->arg2 = gst->guest_RSI;
664 canonical->arg3 = gst->guest_RDX;
665 canonical->arg4 = gst->guest_R10; // not rcx with syscall insn
666 canonical->arg5 = gst->guest_R8;
667 canonical->arg6 = gst->guest_R9;
668 canonical->arg7 = stack[1];
669 canonical->arg8 = stack[2];
670 } else {
671 // GrP fixme hack handle syscall()
672 // GrP fixme what about __syscall() ?
673 // stack[0] is return address
674 // DDD: the tool can't see that the params have been shifted! Can
675 // lead to incorrect checking, I think, because the PRRAn/PSARn
676 // macros will mention the pre-shifted args.
677 canonical->sysno = VG_DARWIN_SYSCALL_CONSTRUCT_UNIX(gst->guest_RDI);
678 vg_assert(canonical->sysno != __NR_syscall);
679 canonical->arg1 = gst->guest_RSI;
680 canonical->arg2 = gst->guest_RDX;
681 canonical->arg3 = gst->guest_R10; // not rcx with syscall insn
682 canonical->arg4 = gst->guest_R8;
683 canonical->arg5 = gst->guest_R9;
684 canonical->arg6 = stack[1];
685 canonical->arg7 = stack[2];
686 canonical->arg8 = stack[3];
688 PRINT("SYSCALL[%d,?](0) syscall(%s, ...); please stand by...\n",
689 VG_(getpid)(), /*tid,*/
690 VG_SYSNUM_STRING(canonical->sysno));
693 // no canonical->sysno adjustment needed
695 #elif defined(VGP_s390x_linux)
696 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
697 canonical->sysno = gst->guest_SYSNO;
698 canonical->arg1 = gst->guest_r2;
699 canonical->arg2 = gst->guest_r3;
700 canonical->arg3 = gst->guest_r4;
701 canonical->arg4 = gst->guest_r5;
702 canonical->arg5 = gst->guest_r6;
703 canonical->arg6 = gst->guest_r7;
704 canonical->arg7 = 0;
705 canonical->arg8 = 0;
707 #elif defined(VGP_tilegx_linux)
708 VexGuestTILEGXState* gst = (VexGuestTILEGXState*)gst_vanilla;
709 canonical->sysno = gst->guest_r10;
710 canonical->arg1 = gst->guest_r0;
711 canonical->arg2 = gst->guest_r1;
712 canonical->arg3 = gst->guest_r2;
713 canonical->arg4 = gst->guest_r3;
714 canonical->arg5 = gst->guest_r4;
715 canonical->arg6 = gst->guest_r5;
716 canonical->arg7 = 0;
717 canonical->arg8 = 0;
719 #elif defined(VGP_x86_solaris)
720 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
721 UWord *stack = (UWord *)gst->guest_ESP;
722 canonical->sysno = gst->guest_EAX;
723 /* stack[0] is a return address. */
724 canonical->arg1 = stack[1];
725 canonical->arg2 = stack[2];
726 canonical->arg3 = stack[3];
727 canonical->arg4 = stack[4];
728 canonical->arg5 = stack[5];
729 canonical->arg6 = stack[6];
730 canonical->arg7 = stack[7];
731 canonical->arg8 = stack[8];
733 switch (trc) {
734 case VEX_TRC_JMP_SYS_INT145:
735 case VEX_TRC_JMP_SYS_SYSENTER:
736 case VEX_TRC_JMP_SYS_SYSCALL:
737 /* These three are not actually valid syscall instructions on Solaris.
738 Pretend for now that we handle them as normal syscalls. */
739 case VEX_TRC_JMP_SYS_INT128:
740 case VEX_TRC_JMP_SYS_INT129:
741 case VEX_TRC_JMP_SYS_INT130:
742 /* int $0x91, sysenter, syscall = normal syscall */
743 break;
744 case VEX_TRC_JMP_SYS_INT210:
745 /* int $0xD2 = fasttrap */
746 canonical->sysno
747 = VG_SOLARIS_SYSCALL_CONSTRUCT_FASTTRAP(canonical->sysno);
748 break;
749 default:
750 vg_assert(0);
751 break;
754 #elif defined(VGP_amd64_solaris)
755 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
756 UWord *stack = (UWord *)gst->guest_RSP;
757 canonical->sysno = gst->guest_RAX;
758 /* stack[0] is a return address. */
759 canonical->arg1 = gst->guest_RDI;
760 canonical->arg2 = gst->guest_RSI;
761 canonical->arg3 = gst->guest_RDX;
762 canonical->arg4 = gst->guest_R10; /* Not RCX with syscall. */
763 canonical->arg5 = gst->guest_R8;
764 canonical->arg6 = gst->guest_R9;
765 canonical->arg7 = stack[1];
766 canonical->arg8 = stack[2];
768 switch (trc) {
769 case VEX_TRC_JMP_SYS_SYSCALL:
770 /* syscall = normal syscall */
771 break;
772 case VEX_TRC_JMP_SYS_INT210:
773 /* int $0xD2 = fasttrap */
774 canonical->sysno
775 = VG_SOLARIS_SYSCALL_CONSTRUCT_FASTTRAP(canonical->sysno);
776 break;
777 default:
778 vg_assert(0);
779 break;
782 #else
783 # error "getSyscallArgsFromGuestState: unknown arch"
784 #endif
787 static
788 void putSyscallArgsIntoGuestState ( /*IN*/ SyscallArgs* canonical,
789 /*OUT*/VexGuestArchState* gst_vanilla )
791 #if defined(VGP_x86_linux)
792 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
793 gst->guest_EAX = canonical->sysno;
794 gst->guest_EBX = canonical->arg1;
795 gst->guest_ECX = canonical->arg2;
796 gst->guest_EDX = canonical->arg3;
797 gst->guest_ESI = canonical->arg4;
798 gst->guest_EDI = canonical->arg5;
799 gst->guest_EBP = canonical->arg6;
801 #elif defined(VGP_amd64_linux)
802 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
803 gst->guest_RAX = canonical->sysno;
804 gst->guest_RDI = canonical->arg1;
805 gst->guest_RSI = canonical->arg2;
806 gst->guest_RDX = canonical->arg3;
807 gst->guest_R10 = canonical->arg4;
808 gst->guest_R8 = canonical->arg5;
809 gst->guest_R9 = canonical->arg6;
811 #elif defined(VGP_ppc32_linux)
812 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
813 gst->guest_GPR0 = canonical->sysno;
814 gst->guest_GPR3 = canonical->arg1;
815 gst->guest_GPR4 = canonical->arg2;
816 gst->guest_GPR5 = canonical->arg3;
817 gst->guest_GPR6 = canonical->arg4;
818 gst->guest_GPR7 = canonical->arg5;
819 gst->guest_GPR8 = canonical->arg6;
821 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
822 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
823 gst->guest_GPR0 = canonical->sysno;
824 gst->guest_GPR3 = canonical->arg1;
825 gst->guest_GPR4 = canonical->arg2;
826 gst->guest_GPR5 = canonical->arg3;
827 gst->guest_GPR6 = canonical->arg4;
828 gst->guest_GPR7 = canonical->arg5;
829 gst->guest_GPR8 = canonical->arg6;
831 #elif defined(VGP_arm_linux)
832 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
833 gst->guest_R7 = canonical->sysno;
834 gst->guest_R0 = canonical->arg1;
835 gst->guest_R1 = canonical->arg2;
836 gst->guest_R2 = canonical->arg3;
837 gst->guest_R3 = canonical->arg4;
838 gst->guest_R4 = canonical->arg5;
839 gst->guest_R5 = canonical->arg6;
841 #elif defined(VGP_arm64_linux)
842 VexGuestARM64State* gst = (VexGuestARM64State*)gst_vanilla;
843 gst->guest_X8 = canonical->sysno;
844 gst->guest_X0 = canonical->arg1;
845 gst->guest_X1 = canonical->arg2;
846 gst->guest_X2 = canonical->arg3;
847 gst->guest_X3 = canonical->arg4;
848 gst->guest_X4 = canonical->arg5;
849 gst->guest_X5 = canonical->arg6;
851 #elif defined(VGP_x86_darwin)
852 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
853 UWord *stack = (UWord *)gst->guest_ESP;
855 gst->guest_EAX = VG_DARWIN_SYSNO_FOR_KERNEL(canonical->sysno);
857 // GrP fixme? gst->guest_TEMP_EFLAG_C = 0;
858 // stack[0] is return address
859 stack[1] = canonical->arg1;
860 stack[2] = canonical->arg2;
861 stack[3] = canonical->arg3;
862 stack[4] = canonical->arg4;
863 stack[5] = canonical->arg5;
864 stack[6] = canonical->arg6;
865 stack[7] = canonical->arg7;
866 stack[8] = canonical->arg8;
868 #elif defined(VGP_amd64_darwin)
869 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
870 UWord *stack = (UWord *)gst->guest_RSP;
872 gst->guest_RAX = VG_DARWIN_SYSNO_FOR_KERNEL(canonical->sysno);
873 // GrP fixme? gst->guest_TEMP_EFLAG_C = 0;
875 // stack[0] is return address
876 gst->guest_RDI = canonical->arg1;
877 gst->guest_RSI = canonical->arg2;
878 gst->guest_RDX = canonical->arg3;
879 gst->guest_RCX = canonical->arg4;
880 gst->guest_R8 = canonical->arg5;
881 gst->guest_R9 = canonical->arg6;
882 stack[1] = canonical->arg7;
883 stack[2] = canonical->arg8;
885 #elif defined(VGP_s390x_linux)
886 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
887 gst->guest_SYSNO = canonical->sysno;
888 gst->guest_r2 = canonical->arg1;
889 gst->guest_r3 = canonical->arg2;
890 gst->guest_r4 = canonical->arg3;
891 gst->guest_r5 = canonical->arg4;
892 gst->guest_r6 = canonical->arg5;
893 gst->guest_r7 = canonical->arg6;
895 #elif defined(VGP_mips32_linux)
896 VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
897 if (canonical->arg8 != __NR_syscall) {
898 gst->guest_r2 = canonical->sysno;
899 gst->guest_r4 = canonical->arg1;
900 gst->guest_r5 = canonical->arg2;
901 gst->guest_r6 = canonical->arg3;
902 gst->guest_r7 = canonical->arg4;
903 *((UInt*) (gst->guest_r29 + 16)) = canonical->arg5; // 16(guest_GPR29/sp)
904 *((UInt*) (gst->guest_r29 + 20)) = canonical->arg6; // 20(sp)
905 } else {
906 canonical->arg8 = 0;
907 gst->guest_r2 = __NR_syscall;
908 gst->guest_r4 = canonical->sysno;
909 gst->guest_r5 = canonical->arg1;
910 gst->guest_r6 = canonical->arg2;
911 gst->guest_r7 = canonical->arg3;
912 *((UInt*) (gst->guest_r29 + 16)) = canonical->arg4; // 16(guest_GPR29/sp)
913 *((UInt*) (gst->guest_r29 + 20)) = canonical->arg5; // 20(sp)
914 *((UInt*) (gst->guest_r29 + 24)) = canonical->arg6; // 24(sp)
917 #elif defined(VGP_mips64_linux)
918 VexGuestMIPS64State* gst = (VexGuestMIPS64State*)gst_vanilla;
919 gst->guest_r2 = canonical->sysno;
920 gst->guest_r4 = canonical->arg1;
921 gst->guest_r5 = canonical->arg2;
922 gst->guest_r6 = canonical->arg3;
923 gst->guest_r7 = canonical->arg4;
924 gst->guest_r8 = canonical->arg5;
925 gst->guest_r9 = canonical->arg6;
927 #elif defined(VGP_tilegx_linux)
928 VexGuestTILEGXState* gst = (VexGuestTILEGXState*)gst_vanilla;
929 gst->guest_r10 = canonical->sysno;
930 gst->guest_r0 = canonical->arg1;
931 gst->guest_r1 = canonical->arg2;
932 gst->guest_r2 = canonical->arg3;
933 gst->guest_r3 = canonical->arg4;
934 gst->guest_r4 = canonical->arg5;
935 gst->guest_r5 = canonical->arg6;
937 #elif defined(VGP_x86_solaris)
938 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
939 UWord *stack = (UWord *)gst->guest_ESP;
941 /* Fasttraps or anything else cannot go through this way. */
942 vg_assert(VG_SOLARIS_SYSNO_CLASS(canonical->sysno)
943 == VG_SOLARIS_SYSCALL_CLASS_CLASSIC);
944 gst->guest_EAX = canonical->sysno;
945 /* stack[0] is a return address. */
946 stack[1] = canonical->arg1;
947 stack[2] = canonical->arg2;
948 stack[3] = canonical->arg3;
949 stack[4] = canonical->arg4;
950 stack[5] = canonical->arg5;
951 stack[6] = canonical->arg6;
952 stack[7] = canonical->arg7;
953 stack[8] = canonical->arg8;
955 #elif defined(VGP_amd64_solaris)
956 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
957 UWord *stack = (UWord *)gst->guest_RSP;
959 /* Fasttraps or anything else cannot go through this way. */
960 vg_assert(VG_SOLARIS_SYSNO_CLASS(canonical->sysno)
961 == VG_SOLARIS_SYSCALL_CLASS_CLASSIC);
962 gst->guest_RAX = canonical->sysno;
963 /* stack[0] is a return address. */
964 gst->guest_RDI = canonical->arg1;
965 gst->guest_RSI = canonical->arg2;
966 gst->guest_RDX = canonical->arg3;
967 gst->guest_R10 = canonical->arg4;
968 gst->guest_R8 = canonical->arg5;
969 gst->guest_R9 = canonical->arg6;
970 stack[1] = canonical->arg7;
971 stack[2] = canonical->arg8;
973 #else
974 # error "putSyscallArgsIntoGuestState: unknown arch"
975 #endif
978 static
979 void getSyscallStatusFromGuestState ( /*OUT*/SyscallStatus* canonical,
980 /*IN*/ VexGuestArchState* gst_vanilla )
982 # if defined(VGP_x86_linux)
983 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
984 canonical->sres = VG_(mk_SysRes_x86_linux)( gst->guest_EAX );
985 canonical->what = SsComplete;
987 # elif defined(VGP_amd64_linux)
988 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
989 canonical->sres = VG_(mk_SysRes_amd64_linux)( gst->guest_RAX );
990 canonical->what = SsComplete;
992 # elif defined(VGP_ppc32_linux)
993 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
994 UInt cr = LibVEX_GuestPPC32_get_CR( gst );
995 UInt cr0so = (cr >> 28) & 1;
996 canonical->sres = VG_(mk_SysRes_ppc32_linux)( gst->guest_GPR3, cr0so );
997 canonical->what = SsComplete;
999 # elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1000 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
1001 UInt cr = LibVEX_GuestPPC64_get_CR( gst );
1002 UInt cr0so = (cr >> 28) & 1;
1003 canonical->sres = VG_(mk_SysRes_ppc64_linux)( gst->guest_GPR3, cr0so );
1004 canonical->what = SsComplete;
1006 # elif defined(VGP_arm_linux)
1007 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
1008 canonical->sres = VG_(mk_SysRes_arm_linux)( gst->guest_R0 );
1009 canonical->what = SsComplete;
1011 # elif defined(VGP_arm64_linux)
1012 VexGuestARM64State* gst = (VexGuestARM64State*)gst_vanilla;
1013 canonical->sres = VG_(mk_SysRes_arm64_linux)( gst->guest_X0 );
1014 canonical->what = SsComplete;
1016 # elif defined(VGP_mips32_linux)
1017 VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
1018 UInt v0 = gst->guest_r2; // v0
1019 UInt v1 = gst->guest_r3; // v1
1020 UInt a3 = gst->guest_r7; // a3
1021 canonical->sres = VG_(mk_SysRes_mips32_linux)( v0, v1, a3 );
1022 canonical->what = SsComplete;
1024 # elif defined(VGP_mips64_linux)
1025 VexGuestMIPS64State* gst = (VexGuestMIPS64State*)gst_vanilla;
1026 ULong v0 = gst->guest_r2; // v0
1027 ULong v1 = gst->guest_r3; // v1
1028 ULong a3 = gst->guest_r7; // a3
1029 canonical->sres = VG_(mk_SysRes_mips64_linux)(v0, v1, a3);
1030 canonical->what = SsComplete;
1032 # elif defined(VGP_x86_darwin)
1033 /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
1034 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1035 UInt carry = 1 & LibVEX_GuestX86_get_eflags(gst);
1036 UInt err = 0;
1037 UInt wLO = 0;
1038 UInt wHI = 0;
1039 switch (gst->guest_SC_CLASS) {
1040 case VG_DARWIN_SYSCALL_CLASS_UNIX:
1041 // int $0x80 = Unix, 64-bit result
1042 err = carry;
1043 wLO = gst->guest_EAX;
1044 wHI = gst->guest_EDX;
1045 break;
1046 case VG_DARWIN_SYSCALL_CLASS_MACH:
1047 // int $0x81 = Mach, 32-bit result
1048 wLO = gst->guest_EAX;
1049 break;
1050 case VG_DARWIN_SYSCALL_CLASS_MDEP:
1051 // int $0x82 = mdep, 32-bit result
1052 wLO = gst->guest_EAX;
1053 break;
1054 default:
1055 vg_assert(0);
1056 break;
1058 canonical->sres = VG_(mk_SysRes_x86_darwin)(
1059 gst->guest_SC_CLASS, err ? True : False,
1060 wHI, wLO
1062 canonical->what = SsComplete;
1064 # elif defined(VGP_amd64_darwin)
1065 /* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
1066 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
1067 ULong carry = 1 & LibVEX_GuestAMD64_get_rflags(gst);
1068 ULong err = 0;
1069 ULong wLO = 0;
1070 ULong wHI = 0;
1071 switch (gst->guest_SC_CLASS) {
1072 case VG_DARWIN_SYSCALL_CLASS_UNIX:
1073 // syscall = Unix, 128-bit result
1074 err = carry;
1075 wLO = gst->guest_RAX;
1076 wHI = gst->guest_RDX;
1077 break;
1078 case VG_DARWIN_SYSCALL_CLASS_MACH:
1079 // syscall = Mach, 64-bit result
1080 wLO = gst->guest_RAX;
1081 break;
1082 case VG_DARWIN_SYSCALL_CLASS_MDEP:
1083 // syscall = mdep, 64-bit result
1084 wLO = gst->guest_RAX;
1085 break;
1086 default:
1087 vg_assert(0);
1088 break;
1090 canonical->sres = VG_(mk_SysRes_amd64_darwin)(
1091 gst->guest_SC_CLASS, err ? True : False,
1092 wHI, wLO
1094 canonical->what = SsComplete;
1096 # elif defined(VGP_s390x_linux)
1097 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
1098 canonical->sres = VG_(mk_SysRes_s390x_linux)( gst->guest_r2 );
1099 canonical->what = SsComplete;
1101 # elif defined(VGP_tilegx_linux)
1102 VexGuestTILEGXState* gst = (VexGuestTILEGXState*)gst_vanilla;
1103 canonical->sres = VG_(mk_SysRes_tilegx_linux)( gst->guest_r0 );
1104 canonical->what = SsComplete;
1106 # elif defined(VGP_x86_solaris)
1107 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1108 UInt carry = 1 & LibVEX_GuestX86_get_eflags(gst);
1110 canonical->sres = VG_(mk_SysRes_x86_solaris)(carry ? True : False,
1111 gst->guest_EAX,
1112 carry ? 0 : gst->guest_EDX);
1113 canonical->what = SsComplete;
1115 # elif defined(VGP_amd64_solaris)
1116 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
1117 UInt carry = 1 & LibVEX_GuestAMD64_get_rflags(gst);
1119 canonical->sres = VG_(mk_SysRes_amd64_solaris)(carry ? True : False,
1120 gst->guest_RAX,
1121 carry ? 0 : gst->guest_RDX);
1122 canonical->what = SsComplete;
1124 # else
1125 # error "getSyscallStatusFromGuestState: unknown arch"
1126 # endif
1129 static
1130 void putSyscallStatusIntoGuestState ( /*IN*/ ThreadId tid,
1131 /*IN*/ SyscallStatus* canonical,
1132 /*OUT*/VexGuestArchState* gst_vanilla )
1134 # if defined(VGP_x86_linux)
1135 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1136 vg_assert(canonical->what == SsComplete);
1137 if (sr_isError(canonical->sres)) {
1138 /* This isn't exactly right, in that really a Failure with res
1139 not in the range 1 .. 4095 is unrepresentable in the
1140 Linux-x86 scheme. Oh well. */
1141 gst->guest_EAX = - (Int)sr_Err(canonical->sres);
1142 } else {
1143 gst->guest_EAX = sr_Res(canonical->sres);
1145 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1146 OFFSET_x86_EAX, sizeof(UWord) );
1148 # elif defined(VGP_amd64_linux)
1149 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
1150 vg_assert(canonical->what == SsComplete);
1151 if (sr_isError(canonical->sres)) {
1152 /* This isn't exactly right, in that really a Failure with res
1153 not in the range 1 .. 4095 is unrepresentable in the
1154 Linux-amd64 scheme. Oh well. */
1155 gst->guest_RAX = - (Long)sr_Err(canonical->sres);
1156 } else {
1157 gst->guest_RAX = sr_Res(canonical->sres);
1159 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1160 OFFSET_amd64_RAX, sizeof(UWord) );
1162 # elif defined(VGP_ppc32_linux)
1163 VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
1164 UInt old_cr = LibVEX_GuestPPC32_get_CR(gst);
1165 vg_assert(canonical->what == SsComplete);
1166 if (sr_isError(canonical->sres)) {
1167 /* set CR0.SO */
1168 LibVEX_GuestPPC32_put_CR( old_cr | (1<<28), gst );
1169 gst->guest_GPR3 = sr_Err(canonical->sres);
1170 } else {
1171 /* clear CR0.SO */
1172 LibVEX_GuestPPC32_put_CR( old_cr & ~(1<<28), gst );
1173 gst->guest_GPR3 = sr_Res(canonical->sres);
1175 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1176 OFFSET_ppc32_GPR3, sizeof(UWord) );
1177 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1178 OFFSET_ppc32_CR0_0, sizeof(UChar) );
1180 # elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1181 VexGuestPPC64State* gst = (VexGuestPPC64State*)gst_vanilla;
1182 UInt old_cr = LibVEX_GuestPPC64_get_CR(gst);
1183 vg_assert(canonical->what == SsComplete);
1184 if (sr_isError(canonical->sres)) {
1185 /* set CR0.SO */
1186 LibVEX_GuestPPC64_put_CR( old_cr | (1<<28), gst );
1187 gst->guest_GPR3 = sr_Err(canonical->sres);
1188 } else {
1189 /* clear CR0.SO */
1190 LibVEX_GuestPPC64_put_CR( old_cr & ~(1<<28), gst );
1191 gst->guest_GPR3 = sr_Res(canonical->sres);
1193 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1194 OFFSET_ppc64_GPR3, sizeof(UWord) );
1195 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1196 OFFSET_ppc64_CR0_0, sizeof(UChar) );
1198 # elif defined(VGP_arm_linux)
1199 VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
1200 vg_assert(canonical->what == SsComplete);
1201 if (sr_isError(canonical->sres)) {
1202 /* This isn't exactly right, in that really a Failure with res
1203 not in the range 1 .. 4095 is unrepresentable in the
1204 Linux-arm scheme. Oh well. */
1205 gst->guest_R0 = - (Int)sr_Err(canonical->sres);
1206 } else {
1207 gst->guest_R0 = sr_Res(canonical->sres);
1209 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1210 OFFSET_arm_R0, sizeof(UWord) );
1212 # elif defined(VGP_arm64_linux)
1213 VexGuestARM64State* gst = (VexGuestARM64State*)gst_vanilla;
1214 vg_assert(canonical->what == SsComplete);
1215 if (sr_isError(canonical->sres)) {
1216 /* This isn't exactly right, in that really a Failure with res
1217 not in the range 1 .. 4095 is unrepresentable in the
1218 Linux-arm64 scheme. Oh well. */
1219 gst->guest_X0 = - (Long)sr_Err(canonical->sres);
1220 } else {
1221 gst->guest_X0 = sr_Res(canonical->sres);
1223 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1224 OFFSET_arm64_X0, sizeof(UWord) );
1226 #elif defined(VGP_x86_darwin)
1227 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1228 SysRes sres = canonical->sres;
1229 vg_assert(canonical->what == SsComplete);
1230 /* Unfortunately here we have to break abstraction and look
1231 directly inside 'res', in order to decide what to do. */
1232 switch (sres._mode) {
1233 case SysRes_MACH: // int $0x81 = Mach, 32-bit result
1234 case SysRes_MDEP: // int $0x82 = mdep, 32-bit result
1235 gst->guest_EAX = sres._wLO;
1236 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1237 OFFSET_x86_EAX, sizeof(UInt) );
1238 break;
1239 case SysRes_UNIX_OK: // int $0x80 = Unix, 64-bit result
1240 case SysRes_UNIX_ERR: // int $0x80 = Unix, 64-bit error
1241 gst->guest_EAX = sres._wLO;
1242 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1243 OFFSET_x86_EAX, sizeof(UInt) );
1244 gst->guest_EDX = sres._wHI;
1245 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1246 OFFSET_x86_EDX, sizeof(UInt) );
1247 LibVEX_GuestX86_put_eflag_c( sres._mode==SysRes_UNIX_ERR ? 1 : 0,
1248 gst );
1249 // GrP fixme sets defined for entire eflags, not just bit c
1250 // DDD: this breaks exp-ptrcheck.
1251 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1252 offsetof(VexGuestX86State, guest_CC_DEP1), sizeof(UInt) );
1253 break;
1254 default:
1255 vg_assert(0);
1256 break;
1259 #elif defined(VGP_amd64_darwin)
1260 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
1261 SysRes sres = canonical->sres;
1262 vg_assert(canonical->what == SsComplete);
1263 /* Unfortunately here we have to break abstraction and look
1264 directly inside 'res', in order to decide what to do. */
1265 switch (sres._mode) {
1266 case SysRes_MACH: // syscall = Mach, 64-bit result
1267 case SysRes_MDEP: // syscall = mdep, 64-bit result
1268 gst->guest_RAX = sres._wLO;
1269 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1270 OFFSET_amd64_RAX, sizeof(ULong) );
1271 break;
1272 case SysRes_UNIX_OK: // syscall = Unix, 128-bit result
1273 case SysRes_UNIX_ERR: // syscall = Unix, 128-bit error
1274 gst->guest_RAX = sres._wLO;
1275 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1276 OFFSET_amd64_RAX, sizeof(ULong) );
1277 gst->guest_RDX = sres._wHI;
1278 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1279 OFFSET_amd64_RDX, sizeof(ULong) );
1280 LibVEX_GuestAMD64_put_rflag_c( sres._mode==SysRes_UNIX_ERR ? 1 : 0,
1281 gst );
1282 // GrP fixme sets defined for entire rflags, not just bit c
1283 // DDD: this breaks exp-ptrcheck.
1284 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1285 offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(ULong) );
1286 break;
1287 default:
1288 vg_assert(0);
1289 break;
1292 # elif defined(VGP_s390x_linux)
1293 VexGuestS390XState* gst = (VexGuestS390XState*)gst_vanilla;
1294 vg_assert(canonical->what == SsComplete);
1295 if (sr_isError(canonical->sres)) {
1296 gst->guest_r2 = - (Long)sr_Err(canonical->sres);
1297 } else {
1298 gst->guest_r2 = sr_Res(canonical->sres);
1301 # elif defined(VGP_mips32_linux)
1302 VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
1303 vg_assert(canonical->what == SsComplete);
1304 if (sr_isError(canonical->sres)) {
1305 gst->guest_r2 = (Int)sr_Err(canonical->sres);
1306 gst->guest_r7 = (Int)sr_Err(canonical->sres);
1307 } else {
1308 gst->guest_r2 = sr_Res(canonical->sres);
1309 gst->guest_r3 = sr_ResEx(canonical->sres);
1310 gst->guest_r7 = (Int)sr_Err(canonical->sres);
1312 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1313 OFFSET_mips32_r2, sizeof(UWord) );
1314 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1315 OFFSET_mips32_r3, sizeof(UWord) );
1316 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1317 OFFSET_mips32_r7, sizeof(UWord) );
1319 # elif defined(VGP_mips64_linux)
1320 VexGuestMIPS64State* gst = (VexGuestMIPS64State*)gst_vanilla;
1321 vg_assert(canonical->what == SsComplete);
1322 if (sr_isError(canonical->sres)) {
1323 gst->guest_r2 = (Int)sr_Err(canonical->sres);
1324 gst->guest_r7 = (Int)sr_Err(canonical->sres);
1325 } else {
1326 gst->guest_r2 = sr_Res(canonical->sres);
1327 gst->guest_r3 = sr_ResEx(canonical->sres);
1328 gst->guest_r7 = (Int)sr_Err(canonical->sres);
1330 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1331 OFFSET_mips64_r2, sizeof(UWord) );
1332 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1333 OFFSET_mips64_r3, sizeof(UWord) );
1334 VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
1335 OFFSET_mips64_r7, sizeof(UWord) );
1337 # elif defined(VGP_tilegx_linux)
1338 VexGuestTILEGXState* gst = (VexGuestTILEGXState*)gst_vanilla;
1339 vg_assert(canonical->what == SsComplete);
1340 if (sr_isError(canonical->sres)) {
1341 gst->guest_r0 = - (Long)sr_Err(canonical->sres);
1342 // r1 hold errno
1343 gst->guest_r1 = (Long)sr_Err(canonical->sres);
1344 } else {
1345 gst->guest_r0 = sr_Res(canonical->sres);
1346 gst->guest_r1 = 0;
1349 # elif defined(VGP_x86_solaris)
1350 VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
1351 SysRes sres = canonical->sres;
1352 vg_assert(canonical->what == SsComplete);
1354 if (sr_isError(sres)) {
1355 gst->guest_EAX = sr_Err(sres);
1356 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_EAX,
1357 sizeof(UInt));
1358 LibVEX_GuestX86_put_eflag_c(1, gst);
1360 else {
1361 gst->guest_EAX = sr_Res(sres);
1362 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_EAX,
1363 sizeof(UInt));
1364 gst->guest_EDX = sr_ResHI(sres);
1365 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_EDX,
1366 sizeof(UInt));
1367 LibVEX_GuestX86_put_eflag_c(0, gst);
1369 /* Make CC_DEP1 and CC_DEP2 defined. This is inaccurate because it makes
1370 other eflags defined too (see README.solaris). */
1371 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestX86State,
1372 guest_CC_DEP1), sizeof(UInt));
1373 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestX86State,
1374 guest_CC_DEP2), sizeof(UInt));
1376 # elif defined(VGP_amd64_solaris)
1377 VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
1378 SysRes sres = canonical->sres;
1379 vg_assert(canonical->what == SsComplete);
1381 if (sr_isError(sres)) {
1382 gst->guest_RAX = sr_Err(sres);
1383 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_amd64_RAX,
1384 sizeof(ULong));
1385 LibVEX_GuestAMD64_put_rflag_c(1, gst);
1387 else {
1388 gst->guest_RAX = sr_Res(sres);
1389 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_amd64_RAX,
1390 sizeof(ULong));
1391 gst->guest_RDX = sr_ResHI(sres);
1392 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_amd64_RDX,
1393 sizeof(ULong));
1394 LibVEX_GuestAMD64_put_rflag_c(0, gst);
1396 /* Make CC_DEP1 and CC_DEP2 defined. This is inaccurate because it makes
1397 other eflags defined too (see README.solaris). */
1398 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestAMD64State,
1399 guest_CC_DEP1), sizeof(ULong));
1400 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestAMD64State,
1401 guest_CC_DEP2), sizeof(ULong));
1403 # else
1404 # error "putSyscallStatusIntoGuestState: unknown arch"
1405 # endif
1409 /* Tell me the offsets in the guest state of the syscall params, so
1410 that the scalar argument checkers don't have to have this info
1411 hardwired. */
1413 static
1414 void getSyscallArgLayout ( /*OUT*/SyscallArgLayout* layout )
1416 VG_(bzero_inline)(layout, sizeof(*layout));
1418 #if defined(VGP_x86_linux)
1419 layout->o_sysno = OFFSET_x86_EAX;
1420 layout->o_arg1 = OFFSET_x86_EBX;
1421 layout->o_arg2 = OFFSET_x86_ECX;
1422 layout->o_arg3 = OFFSET_x86_EDX;
1423 layout->o_arg4 = OFFSET_x86_ESI;
1424 layout->o_arg5 = OFFSET_x86_EDI;
1425 layout->o_arg6 = OFFSET_x86_EBP;
1426 layout->uu_arg7 = -1; /* impossible value */
1427 layout->uu_arg8 = -1; /* impossible value */
1429 #elif defined(VGP_amd64_linux)
1430 layout->o_sysno = OFFSET_amd64_RAX;
1431 layout->o_arg1 = OFFSET_amd64_RDI;
1432 layout->o_arg2 = OFFSET_amd64_RSI;
1433 layout->o_arg3 = OFFSET_amd64_RDX;
1434 layout->o_arg4 = OFFSET_amd64_R10;
1435 layout->o_arg5 = OFFSET_amd64_R8;
1436 layout->o_arg6 = OFFSET_amd64_R9;
1437 layout->uu_arg7 = -1; /* impossible value */
1438 layout->uu_arg8 = -1; /* impossible value */
1440 #elif defined(VGP_ppc32_linux)
1441 layout->o_sysno = OFFSET_ppc32_GPR0;
1442 layout->o_arg1 = OFFSET_ppc32_GPR3;
1443 layout->o_arg2 = OFFSET_ppc32_GPR4;
1444 layout->o_arg3 = OFFSET_ppc32_GPR5;
1445 layout->o_arg4 = OFFSET_ppc32_GPR6;
1446 layout->o_arg5 = OFFSET_ppc32_GPR7;
1447 layout->o_arg6 = OFFSET_ppc32_GPR8;
1448 layout->uu_arg7 = -1; /* impossible value */
1449 layout->uu_arg8 = -1; /* impossible value */
1451 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
1452 layout->o_sysno = OFFSET_ppc64_GPR0;
1453 layout->o_arg1 = OFFSET_ppc64_GPR3;
1454 layout->o_arg2 = OFFSET_ppc64_GPR4;
1455 layout->o_arg3 = OFFSET_ppc64_GPR5;
1456 layout->o_arg4 = OFFSET_ppc64_GPR6;
1457 layout->o_arg5 = OFFSET_ppc64_GPR7;
1458 layout->o_arg6 = OFFSET_ppc64_GPR8;
1459 layout->uu_arg7 = -1; /* impossible value */
1460 layout->uu_arg8 = -1; /* impossible value */
1462 #elif defined(VGP_arm_linux)
1463 layout->o_sysno = OFFSET_arm_R7;
1464 layout->o_arg1 = OFFSET_arm_R0;
1465 layout->o_arg2 = OFFSET_arm_R1;
1466 layout->o_arg3 = OFFSET_arm_R2;
1467 layout->o_arg4 = OFFSET_arm_R3;
1468 layout->o_arg5 = OFFSET_arm_R4;
1469 layout->o_arg6 = OFFSET_arm_R5;
1470 layout->uu_arg7 = -1; /* impossible value */
1471 layout->uu_arg8 = -1; /* impossible value */
1473 #elif defined(VGP_arm64_linux)
1474 layout->o_sysno = OFFSET_arm64_X8;
1475 layout->o_arg1 = OFFSET_arm64_X0;
1476 layout->o_arg2 = OFFSET_arm64_X1;
1477 layout->o_arg3 = OFFSET_arm64_X2;
1478 layout->o_arg4 = OFFSET_arm64_X3;
1479 layout->o_arg5 = OFFSET_arm64_X4;
1480 layout->o_arg6 = OFFSET_arm64_X5;
1481 layout->uu_arg7 = -1; /* impossible value */
1482 layout->uu_arg8 = -1; /* impossible value */
1484 #elif defined(VGP_mips32_linux)
1485 layout->o_sysno = OFFSET_mips32_r2;
1486 layout->o_arg1 = OFFSET_mips32_r4;
1487 layout->o_arg2 = OFFSET_mips32_r5;
1488 layout->o_arg3 = OFFSET_mips32_r6;
1489 layout->o_arg4 = OFFSET_mips32_r7;
1490 layout->s_arg5 = sizeof(UWord) * 4;
1491 layout->s_arg6 = sizeof(UWord) * 5;
1492 layout->uu_arg7 = -1; /* impossible value */
1493 layout->uu_arg8 = -1; /* impossible value */
1495 #elif defined(VGP_mips64_linux)
1496 layout->o_sysno = OFFSET_mips64_r2;
1497 layout->o_arg1 = OFFSET_mips64_r4;
1498 layout->o_arg2 = OFFSET_mips64_r5;
1499 layout->o_arg3 = OFFSET_mips64_r6;
1500 layout->o_arg4 = OFFSET_mips64_r7;
1501 layout->o_arg5 = OFFSET_mips64_r8;
1502 layout->o_arg6 = OFFSET_mips64_r9;
1503 layout->uu_arg7 = -1; /* impossible value */
1504 layout->uu_arg8 = -1; /* impossible value */
1506 #elif defined(VGP_x86_darwin)
1507 layout->o_sysno = OFFSET_x86_EAX;
1508 // syscall parameters are on stack in C convention
1509 layout->s_arg1 = sizeof(UWord) * 1;
1510 layout->s_arg2 = sizeof(UWord) * 2;
1511 layout->s_arg3 = sizeof(UWord) * 3;
1512 layout->s_arg4 = sizeof(UWord) * 4;
1513 layout->s_arg5 = sizeof(UWord) * 5;
1514 layout->s_arg6 = sizeof(UWord) * 6;
1515 layout->s_arg7 = sizeof(UWord) * 7;
1516 layout->s_arg8 = sizeof(UWord) * 8;
1518 #elif defined(VGP_amd64_darwin)
1519 layout->o_sysno = OFFSET_amd64_RAX;
1520 layout->o_arg1 = OFFSET_amd64_RDI;
1521 layout->o_arg2 = OFFSET_amd64_RSI;
1522 layout->o_arg3 = OFFSET_amd64_RDX;
1523 layout->o_arg4 = OFFSET_amd64_RCX;
1524 layout->o_arg5 = OFFSET_amd64_R8;
1525 layout->o_arg6 = OFFSET_amd64_R9;
1526 layout->s_arg7 = sizeof(UWord) * 1;
1527 layout->s_arg8 = sizeof(UWord) * 2;
1529 #elif defined(VGP_s390x_linux)
1530 layout->o_sysno = OFFSET_s390x_SYSNO;
1531 layout->o_arg1 = OFFSET_s390x_r2;
1532 layout->o_arg2 = OFFSET_s390x_r3;
1533 layout->o_arg3 = OFFSET_s390x_r4;
1534 layout->o_arg4 = OFFSET_s390x_r5;
1535 layout->o_arg5 = OFFSET_s390x_r6;
1536 layout->o_arg6 = OFFSET_s390x_r7;
1537 layout->uu_arg7 = -1; /* impossible value */
1538 layout->uu_arg8 = -1; /* impossible value */
1540 #elif defined(VGP_tilegx_linux)
1541 layout->o_sysno = OFFSET_tilegx_r(10);
1542 layout->o_arg1 = OFFSET_tilegx_r(0);
1543 layout->o_arg2 = OFFSET_tilegx_r(1);
1544 layout->o_arg3 = OFFSET_tilegx_r(2);
1545 layout->o_arg4 = OFFSET_tilegx_r(3);
1546 layout->o_arg5 = OFFSET_tilegx_r(4);
1547 layout->o_arg6 = OFFSET_tilegx_r(5);
1548 layout->uu_arg7 = -1; /* impossible value */
1549 layout->uu_arg8 = -1; /* impossible value */
1551 #elif defined(VGP_x86_solaris)
1552 layout->o_sysno = OFFSET_x86_EAX;
1553 /* Syscall parameters are on the stack. */
1554 layout->s_arg1 = sizeof(UWord) * 1;
1555 layout->s_arg2 = sizeof(UWord) * 2;
1556 layout->s_arg3 = sizeof(UWord) * 3;
1557 layout->s_arg4 = sizeof(UWord) * 4;
1558 layout->s_arg5 = sizeof(UWord) * 5;
1559 layout->s_arg6 = sizeof(UWord) * 6;
1560 layout->s_arg7 = sizeof(UWord) * 7;
1561 layout->s_arg8 = sizeof(UWord) * 8;
1563 #elif defined(VGP_amd64_solaris)
1564 layout->o_sysno = OFFSET_amd64_RAX;
1565 layout->o_arg1 = OFFSET_amd64_RDI;
1566 layout->o_arg2 = OFFSET_amd64_RSI;
1567 layout->o_arg3 = OFFSET_amd64_RDX;
1568 layout->o_arg4 = OFFSET_amd64_R10;
1569 layout->o_arg5 = OFFSET_amd64_R8;
1570 layout->o_arg6 = OFFSET_amd64_R9;
1571 layout->s_arg7 = sizeof(UWord) * 1;
1572 layout->s_arg8 = sizeof(UWord) * 2;
1574 #else
1575 # error "getSyscallLayout: unknown arch"
1576 #endif
1580 /* ---------------------------------------------------------------------
1581 The main driver logic
1582 ------------------------------------------------------------------ */
1584 /* Finding the handlers for a given syscall, or faking up one
1585 when no handler is found. */
1587 static
1588 void bad_before ( ThreadId tid,
1589 SyscallArgLayout* layout,
1590 /*MOD*/SyscallArgs* args,
1591 /*OUT*/SyscallStatus* status,
1592 /*OUT*/UWord* flags )
1594 VG_(dmsg)("WARNING: unhandled %s syscall: %s\n",
1595 VG_PLATFORM, VG_SYSNUM_STRING(args->sysno));
1596 if (VG_(clo_verbosity) > 1) {
1597 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
1599 VG_(dmsg)("You may be able to write your own handler.\n");
1600 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1601 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
1602 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html.\n");
1604 SET_STATUS_Failure(VKI_ENOSYS);
1606 # if defined(VGO_solaris)
1607 VG_(exit)(1);
1608 # endif
1611 static SyscallTableEntry bad_sys =
1612 { bad_before, NULL };
1614 static const SyscallTableEntry* get_syscall_entry ( Int syscallno )
1616 const SyscallTableEntry* sys = NULL;
1618 # if defined(VGO_linux)
1619 sys = ML_(get_linux_syscall_entry)( syscallno );
1621 # elif defined(VGO_darwin)
1622 Int idx = VG_DARWIN_SYSNO_INDEX(syscallno);
1624 switch (VG_DARWIN_SYSNO_CLASS(syscallno)) {
1625 case VG_DARWIN_SYSCALL_CLASS_UNIX:
1626 if (idx >= 0 && idx < ML_(syscall_table_size) &&
1627 ML_(syscall_table)[idx].before != NULL)
1628 sys = &ML_(syscall_table)[idx];
1629 break;
1630 case VG_DARWIN_SYSCALL_CLASS_MACH:
1631 if (idx >= 0 && idx < ML_(mach_trap_table_size) &&
1632 ML_(mach_trap_table)[idx].before != NULL)
1633 sys = &ML_(mach_trap_table)[idx];
1634 break;
1635 case VG_DARWIN_SYSCALL_CLASS_MDEP:
1636 if (idx >= 0 && idx < ML_(mdep_trap_table_size) &&
1637 ML_(mdep_trap_table)[idx].before != NULL)
1638 sys = &ML_(mdep_trap_table)[idx];
1639 break;
1640 default:
1641 vg_assert(0);
1642 break;
1645 # elif defined(VGO_solaris)
1646 sys = ML_(get_solaris_syscall_entry)(syscallno);
1648 # else
1649 # error Unknown OS
1650 # endif
1652 return sys == NULL ? &bad_sys : sys;
1656 /* Add and remove signals from mask so that we end up telling the
1657 kernel the state we actually want rather than what the client
1658 wants. */
1659 static void sanitize_client_sigmask(vki_sigset_t *mask)
1661 VG_(sigdelset)(mask, VKI_SIGKILL);
1662 VG_(sigdelset)(mask, VKI_SIGSTOP);
1663 VG_(sigdelset)(mask, VG_SIGVGKILL); /* never block */
1666 typedef
1667 struct {
1668 SyscallArgs orig_args;
1669 SyscallArgs args;
1670 SyscallStatus status;
1671 UWord flags;
1673 SyscallInfo;
1675 SyscallInfo *syscallInfo;
1677 /* The scheduler needs to be able to zero out these records after a
1678 fork, hence this is exported from m_syswrap. */
1679 void VG_(clear_syscallInfo) ( Int tid )
1681 vg_assert(syscallInfo);
1682 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1683 VG_(memset)( & syscallInfo[tid], 0, sizeof( syscallInfo[tid] ));
1684 syscallInfo[tid].status.what = SsIdle;
1687 Bool VG_(is_in_syscall) ( Int tid )
1689 vg_assert(tid >= 0 && tid < VG_N_THREADS);
1690 return (syscallInfo[tid].status.what != SsIdle);
1693 static void ensure_initialised ( void )
1695 Int i;
1696 static Bool init_done = False;
1697 if (init_done)
1698 return;
1699 init_done = True;
1701 syscallInfo = VG_(malloc)("scinfo", VG_N_THREADS * sizeof syscallInfo[0]);
1703 for (i = 0; i < VG_N_THREADS; i++) {
1704 VG_(clear_syscallInfo)( i );
1708 /* --- This is the main function of this file. --- */
1710 void VG_(client_syscall) ( ThreadId tid, UInt trc )
1712 Word sysno;
1713 ThreadState* tst;
1714 const SyscallTableEntry* ent;
1715 SyscallArgLayout layout;
1716 SyscallInfo* sci;
1718 ensure_initialised();
1720 vg_assert(VG_(is_valid_tid)(tid));
1721 vg_assert(tid >= 1 && tid < VG_N_THREADS);
1722 vg_assert(VG_(is_running_thread)(tid));
1724 # if !defined(VGO_darwin)
1725 // Resync filtering is meaningless on non-Darwin targets.
1726 vg_assert(VG_(clo_resync_filter) == 0);
1727 # endif
1729 tst = VG_(get_ThreadState)(tid);
1731 /* BEGIN ensure root thread's stack is suitably mapped */
1732 /* In some rare circumstances, we may do the syscall without the
1733 bottom page of the stack being mapped, because the stack pointer
1734 was moved down just a few instructions before the syscall
1735 instruction, and there have been no memory references since
1736 then, that would cause a call to VG_(extend_stack) to have
1737 happened.
1739 In native execution that's OK: the kernel automagically extends
1740 the stack's mapped area down to cover the stack pointer (or sp -
1741 redzone, really). In simulated normal execution that's OK too,
1742 since any signals we get from accessing below the mapped area of
1743 the (guest's) stack lead us to VG_(extend_stack), where we
1744 simulate the kernel's stack extension logic. But that leaves
1745 the problem of entering a syscall with the SP unmapped. Because
1746 the kernel doesn't know that the segment immediately above SP is
1747 supposed to be a grow-down segment, it causes the syscall to
1748 fail, and thereby causes a divergence between native behaviour
1749 (syscall succeeds) and simulated behaviour (syscall fails).
1751 This is quite a rare failure mode. It has only been seen
1752 affecting calls to sys_readlink on amd64-linux, and even then it
1753 requires a certain code sequence around the syscall to trigger
1754 it. Here is one:
1756 extern int my_readlink ( const char* path );
1757 asm(
1758 ".text\n"
1759 ".globl my_readlink\n"
1760 "my_readlink:\n"
1761 "\tsubq $0x1008,%rsp\n"
1762 "\tmovq %rdi,%rdi\n" // path is in rdi
1763 "\tmovq %rsp,%rsi\n" // &buf[0] -> rsi
1764 "\tmovl $0x1000,%edx\n" // sizeof(buf) in rdx
1765 "\tmovl $"__NR_READLINK",%eax\n" // syscall number
1766 "\tsyscall\n"
1767 "\taddq $0x1008,%rsp\n"
1768 "\tret\n"
1769 ".previous\n"
1772 For more details, see bug #156404
1773 (https://bugs.kde.org/show_bug.cgi?id=156404).
1775 The fix is actually very simple. We simply need to call
1776 VG_(extend_stack) for this thread, handing it the lowest
1777 possible valid address for stack (sp - redzone), to ensure the
1778 pages all the way down to that address, are mapped. Because
1779 this is a potentially expensive and frequent operation, we
1780 do the following:
1782 Only the main thread (tid=1) has a growdown stack. So
1783 ignore all others. It is conceivable, although highly unlikely,
1784 that the main thread exits, and later another thread is
1785 allocated tid=1, but that's harmless, I believe;
1786 VG_(extend_stack) will do nothing when applied to a non-root
1787 thread.
1789 All this guff is of course Linux-specific. Hence the ifdef.
1791 # if defined(VGO_linux)
1792 if (tid == 1/*ROOT THREAD*/) {
1793 Addr stackMin = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
1795 /* The precise thing to do here would be to extend the stack only
1796 if the system call can be proven to access unmapped user stack
1797 memory. That is an enormous amount of work even if a proper
1798 spec of system calls was available.
1800 In the case where the system call does not access user memory
1801 the stack pointer here can have any value. A legitimate testcase
1802 that exercises this is none/tests/s390x/stmg.c:
1803 The stack pointer happens to be in the reservation segment near
1804 the end of the addressable memory and there is no SkAnonC segment
1805 above.
1807 So the approximation we're taking here is to extend the stack only
1808 if the client stack pointer does not look bogus. */
1809 if (VG_(am_addr_is_in_extensible_client_stack)(stackMin))
1810 VG_(extend_stack)( tid, stackMin );
1812 # endif
1813 /* END ensure root thread's stack is suitably mapped */
1815 /* First off, get the syscall args and number. This is a
1816 platform-dependent action. */
1818 sci = & syscallInfo[tid];
1819 vg_assert(sci->status.what == SsIdle);
1821 getSyscallArgsFromGuestState( &sci->orig_args, &tst->arch.vex, trc );
1823 /* Copy .orig_args to .args. The pre-handler may modify .args, but
1824 we want to keep the originals too, just in case. */
1825 sci->args = sci->orig_args;
1827 /* Save the syscall number in the thread state in case the syscall
1828 is interrupted by a signal. */
1829 sysno = sci->orig_args.sysno;
1831 /* It's sometimes useful, as a crude debugging hack, to get a
1832 stack trace at each (or selected) syscalls. */
1833 if (0 && sysno == __NR_ioctl) {
1834 VG_(umsg)("\nioctl:\n");
1835 VG_(get_and_pp_StackTrace)(tid, 10);
1836 VG_(umsg)("\n");
1839 # if defined(VGO_darwin)
1840 /* Record syscall class. But why? Because the syscall might be
1841 interrupted by a signal, and in the signal handler (which will
1842 be m_signals.async_signalhandler) we will need to build a SysRes
1843 reflecting the syscall return result. In order to do that we
1844 need to know the syscall class. Hence stash it in the guest
1845 state of this thread. This madness is not needed on Linux
1846 because it only has a single syscall return convention and so
1847 there is no ambiguity involved in converting the post-signal
1848 machine state into a SysRes. */
1849 tst->arch.vex.guest_SC_CLASS = VG_DARWIN_SYSNO_CLASS(sysno);
1850 # endif
1852 /* The default what-to-do-next thing is hand the syscall to the
1853 kernel, so we pre-set that here. Set .sres to something
1854 harmless looking (is irrelevant because .what is not
1855 SsComplete.) */
1856 sci->status.what = SsHandToKernel;
1857 sci->status.sres = VG_(mk_SysRes_Error)(0);
1858 sci->flags = 0;
1860 /* Fetch the syscall's handlers. If no handlers exist for this
1861 syscall, we are given dummy handlers which force an immediate
1862 return with ENOSYS. */
1863 ent = get_syscall_entry(sysno);
1865 /* Fetch the layout information, which tells us where in the guest
1866 state the syscall args reside. This is a platform-dependent
1867 action. This info is needed so that the scalar syscall argument
1868 checks (PRE_REG_READ calls) know which bits of the guest state
1869 they need to inspect. */
1870 getSyscallArgLayout( &layout );
1872 /* Make sure the tmp signal mask matches the real signal mask;
1873 sigsuspend may change this. */
1874 vg_assert(VG_(iseqsigset)(&tst->sig_mask, &tst->tmp_sig_mask));
1876 /* Right, we're finally ready to Party. Call the pre-handler and
1877 see what we get back. At this point:
1879 sci->status.what is Unset (we don't know yet).
1880 sci->orig_args contains the original args.
1881 sci->args is the same as sci->orig_args.
1882 sci->flags is zero.
1885 PRINT("SYSCALL[%d,%d](%s) ",
1886 VG_(getpid)(), tid, VG_SYSNUM_STRING(sysno));
1888 /* Do any pre-syscall actions */
1889 if (VG_(needs).syscall_wrapper) {
1890 UWord tmpv[8];
1891 tmpv[0] = sci->orig_args.arg1;
1892 tmpv[1] = sci->orig_args.arg2;
1893 tmpv[2] = sci->orig_args.arg3;
1894 tmpv[3] = sci->orig_args.arg4;
1895 tmpv[4] = sci->orig_args.arg5;
1896 tmpv[5] = sci->orig_args.arg6;
1897 tmpv[6] = sci->orig_args.arg7;
1898 tmpv[7] = sci->orig_args.arg8;
1899 VG_TDICT_CALL(tool_pre_syscall, tid, sysno,
1900 &tmpv[0], sizeof(tmpv)/sizeof(tmpv[0]));
1903 vg_assert(ent);
1904 vg_assert(ent->before);
1905 (ent->before)( tid,
1906 &layout,
1907 &sci->args, &sci->status, &sci->flags );
1909 /* The pre-handler may have modified:
1910 sci->args
1911 sci->status
1912 sci->flags
1913 All else remains unchanged.
1914 Although the args may be modified, pre handlers are not allowed
1915 to change the syscall number.
1917 /* Now we proceed according to what the pre-handler decided. */
1918 vg_assert(sci->status.what == SsHandToKernel
1919 || sci->status.what == SsComplete);
1920 vg_assert(sci->args.sysno == sci->orig_args.sysno);
1922 if (sci->status.what == SsComplete && !sr_isError(sci->status.sres)) {
1923 /* The pre-handler completed the syscall itself, declaring
1924 success. */
1925 if (sci->flags & SfNoWriteResult) {
1926 PRINT(" --> [pre-success] NoWriteResult");
1927 } else {
1928 PRINT(" --> [pre-success] %s", VG_(sr_as_string)(sci->status.sres));
1930 /* In this case the allowable flags are to ask for a signal-poll
1931 and/or a yield after the call. Changing the args isn't
1932 allowed. */
1933 vg_assert(0 == (sci->flags
1934 & ~(SfPollAfter | SfYieldAfter | SfNoWriteResult)));
1935 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1938 else
1939 if (sci->status.what == SsComplete && sr_isError(sci->status.sres)) {
1940 /* The pre-handler decided to fail syscall itself. */
1941 PRINT(" --> [pre-fail] %s", VG_(sr_as_string)(sci->status.sres));
1942 /* In this case, the pre-handler is also allowed to ask for the
1943 post-handler to be run anyway. Changing the args is not
1944 allowed. */
1945 vg_assert(0 == (sci->flags & ~(SfMayBlock | SfPostOnFail | SfPollAfter)));
1946 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1949 else
1950 if (sci->status.what != SsHandToKernel) {
1951 /* huh?! */
1952 vg_assert(0);
1955 else /* (sci->status.what == HandToKernel) */ {
1956 /* Ok, this is the usual case -- and the complicated one. There
1957 are two subcases: sync and async. async is the general case
1958 and is to be used when there is any possibility that the
1959 syscall might block [a fact that the pre-handler must tell us
1960 via the sci->flags field.] Because the tidying-away /
1961 context-switch overhead of the async case could be large, if
1962 we are sure that the syscall will not block, we fast-track it
1963 by doing it directly in this thread, which is a lot
1964 simpler. */
1966 /* Check that the given flags are allowable: MayBlock, PollAfter
1967 and PostOnFail are ok. */
1968 vg_assert(0 == (sci->flags & ~(SfMayBlock | SfPostOnFail | SfPollAfter)));
1970 if (sci->flags & SfMayBlock) {
1972 /* Syscall may block, so run it asynchronously */
1973 vki_sigset_t mask;
1975 PRINT(" --> [async] ... \n");
1977 mask = tst->sig_mask;
1978 sanitize_client_sigmask(&mask);
1980 /* Gack. More impedance matching. Copy the possibly
1981 modified syscall args back into the guest state. */
1982 /* JRS 2009-Mar-16: if the syscall args are possibly modified,
1983 then this assertion is senseless:
1984 vg_assert(eq_SyscallArgs(&sci->args, &sci->orig_args));
1985 The case that exposed it was sys_posix_spawn on Darwin,
1986 which heavily modifies its arguments but then lets the call
1987 go through anyway, with SfToBlock set, hence we end up here. */
1988 putSyscallArgsIntoGuestState( &sci->args, &tst->arch.vex );
1990 /* SfNoWriteResult flag is invalid for blocking signals because
1991 do_syscall_for_client() directly modifies the guest state. */
1992 vg_assert(!(sci->flags & SfNoWriteResult));
1994 /* Drop the bigLock */
1995 VG_(release_BigLock)(tid, VgTs_WaitSys, "VG_(client_syscall)[async]");
1996 /* Urr. We're now in a race against other threads trying to
1997 acquire the bigLock. I guess that doesn't matter provided
1998 that do_syscall_for_client only touches thread-local
1999 state. */
2001 /* Do the call, which operates directly on the guest state,
2002 not on our abstracted copies of the args/result. */
2003 do_syscall_for_client(sysno, tst, &mask);
2005 /* do_syscall_for_client may not return if the syscall was
2006 interrupted by a signal. In that case, flow of control is
2007 first to m_signals.async_sighandler, which calls
2008 VG_(fixup_guest_state_after_syscall_interrupted), which
2009 fixes up the guest state, and possibly calls
2010 VG_(post_syscall). Once that's done, control drops back
2011 to the scheduler. */
2013 /* Darwin: do_syscall_for_client may not return if the
2014 syscall was workq_ops(WQOPS_THREAD_RETURN) and the kernel
2015 responded by starting the thread at wqthread_hijack(reuse=1)
2016 (to run another workqueue item). In that case, wqthread_hijack
2017 calls ML_(wqthread_continue), which is similar to
2018 VG_(fixup_guest_state_after_syscall_interrupted). */
2020 /* Reacquire the lock */
2021 VG_(acquire_BigLock)(tid, "VG_(client_syscall)[async]");
2023 /* Even more impedance matching. Extract the syscall status
2024 from the guest state. */
2025 getSyscallStatusFromGuestState( &sci->status, &tst->arch.vex );
2026 vg_assert(sci->status.what == SsComplete);
2028 /* Be decorative, if required. */
2029 if (VG_(clo_trace_syscalls)) {
2030 PRINT("SYSCALL[%d,%d](%s) ... [async] --> %s",
2031 VG_(getpid)(), tid, VG_SYSNUM_STRING(sysno),
2032 VG_(sr_as_string)(sci->status.sres));
2035 } else {
2037 /* run the syscall directly */
2038 /* The pre-handler may have modified the syscall args, but
2039 since we're passing values in ->args directly to the
2040 kernel, there's no point in flushing them back to the
2041 guest state. Indeed doing so could be construed as
2042 incorrect. */
2043 SysRes sres
2044 = VG_(do_syscall)(sysno, sci->args.arg1, sci->args.arg2,
2045 sci->args.arg3, sci->args.arg4,
2046 sci->args.arg5, sci->args.arg6,
2047 sci->args.arg7, sci->args.arg8 );
2048 sci->status = convert_SysRes_to_SyscallStatus(sres);
2050 /* Be decorative, if required. */
2051 if (VG_(clo_trace_syscalls)) {
2052 PRINT("[sync] --> %s", VG_(sr_as_string)(sci->status.sres));
2057 vg_assert(sci->status.what == SsComplete);
2059 vg_assert(VG_(is_running_thread)(tid));
2061 /* Dump the syscall result back in the guest state. This is
2062 a platform-specific action. */
2063 if (!(sci->flags & SfNoWriteResult))
2064 putSyscallStatusIntoGuestState( tid, &sci->status, &tst->arch.vex );
2066 /* Situation now:
2067 - the guest state is now correctly modified following the syscall
2068 - modified args, original args and syscall status are still
2069 available in the syscallInfo[] entry for this syscall.
2071 Now go on to do the post-syscall actions (read on down ..)
2073 PRINT(" ");
2074 VG_(post_syscall)(tid);
2075 PRINT("\n");
2079 /* Perform post syscall actions. The expected state on entry is
2080 precisely as at the end of VG_(client_syscall), that is:
2082 - guest state up to date following the syscall
2083 - modified args, original args and syscall status are still
2084 available in the syscallInfo[] entry for this syscall.
2085 - syscall status matches what's in the guest state.
2087 There are two ways to get here: the normal way -- being called by
2088 VG_(client_syscall), and the unusual way, from
2089 VG_(fixup_guest_state_after_syscall_interrupted).
2090 Darwin: there's a third way, ML_(wqthread_continue).
2092 void VG_(post_syscall) (ThreadId tid)
2094 SyscallInfo* sci;
2095 const SyscallTableEntry* ent;
2096 SyscallStatus test_status;
2097 ThreadState* tst;
2098 Word sysno;
2100 /* Preliminaries */
2101 vg_assert(VG_(is_valid_tid)(tid));
2102 vg_assert(tid >= 1 && tid < VG_N_THREADS);
2103 vg_assert(VG_(is_running_thread)(tid));
2105 tst = VG_(get_ThreadState)(tid);
2106 sci = & syscallInfo[tid];
2108 /* m_signals.sigvgkill_handler might call here even when not in
2109 a syscall. */
2110 if (sci->status.what == SsIdle || sci->status.what == SsHandToKernel) {
2111 sci->status.what = SsIdle;
2112 return;
2115 /* Validate current syscallInfo entry. In particular we require
2116 that the current .status matches what's actually in the guest
2117 state. At least in the normal case where we have actually
2118 previously written the result into the guest state. */
2119 vg_assert(sci->status.what == SsComplete);
2121 /* Get the system call number. Because the pre-handler isn't
2122 allowed to mess with it, it should be the same for both the
2123 original and potentially-modified args. */
2124 vg_assert(sci->args.sysno == sci->orig_args.sysno);
2125 sysno = sci->args.sysno;
2127 getSyscallStatusFromGuestState( &test_status, &tst->arch.vex );
2128 if (!(sci->flags & SfNoWriteResult))
2129 vg_assert(eq_SyscallStatus( sysno, &sci->status, &test_status ));
2130 /* Failure of the above assertion on Darwin can indicate a problem
2131 in the syscall wrappers that pre-fail or pre-succeed the
2132 syscall, by calling SET_STATUS_Success or SET_STATUS_Failure,
2133 when they really should call SET_STATUS_from_SysRes. The former
2134 create a UNIX-class syscall result on Darwin, which may not be
2135 correct for the syscall; if that's the case then this assertion
2136 fires. See PRE(thread_fast_set_cthread_self) for an example. On
2137 non-Darwin platforms this assertion is should never fail, and this
2138 comment is completely irrelevant. */
2139 /* Ok, looks sane */
2141 /* pre: status == Complete (asserted above) */
2142 /* Consider either success or failure. Now run the post handler if:
2143 - it exists, and
2144 - Success or (Failure and PostOnFail is set)
2146 ent = get_syscall_entry(sysno);
2147 if (ent->after
2148 && ((!sr_isError(sci->status.sres))
2149 || (sr_isError(sci->status.sres)
2150 && (sci->flags & SfPostOnFail) ))) {
2152 (ent->after)( tid, &sci->args, &sci->status );
2155 /* Because the post handler might have changed the status (eg, the
2156 post-handler for sys_open can change the result from success to
2157 failure if the kernel supplied a fd that it doesn't like), once
2158 again dump the syscall result back in the guest state.*/
2159 if (!(sci->flags & SfNoWriteResult))
2160 putSyscallStatusIntoGuestState( tid, &sci->status, &tst->arch.vex );
2162 /* Do any post-syscall actions required by the tool. */
2163 if (VG_(needs).syscall_wrapper) {
2164 UWord tmpv[8];
2165 tmpv[0] = sci->orig_args.arg1;
2166 tmpv[1] = sci->orig_args.arg2;
2167 tmpv[2] = sci->orig_args.arg3;
2168 tmpv[3] = sci->orig_args.arg4;
2169 tmpv[4] = sci->orig_args.arg5;
2170 tmpv[5] = sci->orig_args.arg6;
2171 tmpv[6] = sci->orig_args.arg7;
2172 tmpv[7] = sci->orig_args.arg8;
2173 VG_TDICT_CALL(tool_post_syscall, tid,
2174 sysno,
2175 &tmpv[0], sizeof(tmpv)/sizeof(tmpv[0]),
2176 sci->status.sres);
2179 /* The syscall is done. */
2180 vg_assert(sci->status.what == SsComplete);
2181 sci->status.what = SsIdle;
2183 /* The pre/post wrappers may have concluded that pending signals
2184 might have been created, and will have set SfPollAfter to
2185 request a poll for them once the syscall is done. */
2186 if (sci->flags & SfPollAfter)
2187 VG_(poll_signals)(tid);
2189 /* Similarly, the wrappers might have asked for a yield
2190 afterwards. */
2191 if (sci->flags & SfYieldAfter)
2192 VG_(vg_yield)();
2196 /* ---------------------------------------------------------------------
2197 Dealing with syscalls which get interrupted by a signal:
2198 VG_(fixup_guest_state_after_syscall_interrupted)
2199 ------------------------------------------------------------------ */
2201 /* Syscalls done on behalf of the client are finally handed off to the
2202 kernel in VG_(client_syscall) above, either by calling
2203 do_syscall_for_client (the async case), or by calling
2204 VG_(do_syscall6) (the sync case).
2206 If the syscall is not interrupted by a signal (it may block and
2207 later unblock, but that's irrelevant here) then those functions
2208 eventually return and so control is passed to VG_(post_syscall).
2209 NB: not sure if the sync case can actually get interrupted, as it
2210 operates with all signals masked.
2212 However, the syscall may get interrupted by an async-signal. In
2213 that case do_syscall_for_client/VG_(do_syscall6) do not
2214 return. Instead we wind up in m_signals.async_sighandler. We need
2215 to fix up the guest state to make it look like the syscall was
2216 interrupted for guest. So async_sighandler calls here, and this
2217 does the fixup. Note that from here we wind up calling
2218 VG_(post_syscall) too.
2222 /* These are addresses within ML_(do_syscall_for_client_WRK). See
2223 syscall-$PLAT.S for details.
2225 #if defined(VGO_linux)
2226 extern const Addr ML_(blksys_setup);
2227 extern const Addr ML_(blksys_restart);
2228 extern const Addr ML_(blksys_complete);
2229 extern const Addr ML_(blksys_committed);
2230 extern const Addr ML_(blksys_finished);
2231 #elif defined(VGO_darwin)
2232 /* Darwin requires extra uglyness */
2233 extern const Addr ML_(blksys_setup_MACH);
2234 extern const Addr ML_(blksys_restart_MACH);
2235 extern const Addr ML_(blksys_complete_MACH);
2236 extern const Addr ML_(blksys_committed_MACH);
2237 extern const Addr ML_(blksys_finished_MACH);
2238 extern const Addr ML_(blksys_setup_MDEP);
2239 extern const Addr ML_(blksys_restart_MDEP);
2240 extern const Addr ML_(blksys_complete_MDEP);
2241 extern const Addr ML_(blksys_committed_MDEP);
2242 extern const Addr ML_(blksys_finished_MDEP);
2243 extern const Addr ML_(blksys_setup_UNIX);
2244 extern const Addr ML_(blksys_restart_UNIX);
2245 extern const Addr ML_(blksys_complete_UNIX);
2246 extern const Addr ML_(blksys_committed_UNIX);
2247 extern const Addr ML_(blksys_finished_UNIX);
2248 #elif defined(VGO_solaris)
2249 extern const Addr ML_(blksys_setup);
2250 extern const Addr ML_(blksys_complete);
2251 extern const Addr ML_(blksys_committed);
2252 extern const Addr ML_(blksys_finished);
2253 extern const Addr ML_(blksys_setup_DRET);
2254 extern const Addr ML_(blksys_complete_DRET);
2255 extern const Addr ML_(blksys_committed_DRET);
2256 extern const Addr ML_(blksys_finished_DRET);
2257 #else
2258 # error "Unknown OS"
2259 #endif
2262 /* Back up guest state to restart a system call. */
2264 void ML_(fixup_guest_state_to_restart_syscall) ( ThreadArchState* arch )
2266 #if defined(VGP_x86_linux)
2267 arch->vex.guest_EIP -= 2; // sizeof(int $0x80)
2269 /* Make sure our caller is actually sane, and we're really backing
2270 back over a syscall.
2272 int $0x80 == CD 80
2275 UChar *p = (UChar *)arch->vex.guest_EIP;
2277 if (p[0] != 0xcd || p[1] != 0x80)
2278 VG_(message)(Vg_DebugMsg,
2279 "?! restarting over syscall at %#x %02x %02x\n",
2280 arch->vex.guest_EIP, p[0], p[1]);
2282 vg_assert(p[0] == 0xcd && p[1] == 0x80);
2285 #elif defined(VGP_amd64_linux)
2286 arch->vex.guest_RIP -= 2; // sizeof(syscall)
2288 /* Make sure our caller is actually sane, and we're really backing
2289 back over a syscall.
2291 syscall == 0F 05
2294 UChar *p = (UChar *)arch->vex.guest_RIP;
2296 if (p[0] != 0x0F || p[1] != 0x05)
2297 VG_(message)(Vg_DebugMsg,
2298 "?! restarting over syscall at %#llx %02x %02x\n",
2299 arch->vex.guest_RIP, p[0], p[1]);
2301 vg_assert(p[0] == 0x0F && p[1] == 0x05);
2304 #elif defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux)
2305 arch->vex.guest_CIA -= 4; // sizeof(ppc32 instr)
2307 /* Make sure our caller is actually sane, and we're really backing
2308 back over a syscall.
2310 sc == 44 00 00 02
2313 UChar *p = (UChar *)arch->vex.guest_CIA;
2315 if (p[0] != 0x44 || p[1] != 0x0 || p[2] != 0x0 || p[3] != 0x02)
2316 VG_(message)(Vg_DebugMsg,
2317 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2318 arch->vex.guest_CIA + 0ULL, p[0], p[1], p[2], p[3]);
2320 vg_assert(p[0] == 0x44 && p[1] == 0x0 && p[2] == 0x0 && p[3] == 0x2);
2323 #elif defined(VGP_ppc64le_linux)
2324 arch->vex.guest_CIA -= 4; // sizeof(ppc32 instr)
2326 /* Make sure our caller is actually sane, and we're really backing
2327 back over a syscall.
2329 sc == 44 00 00 02
2332 UChar *p = (UChar *)arch->vex.guest_CIA;
2334 if (p[3] != 0x44 || p[2] != 0x0 || p[1] != 0x0 || p[0] != 0x02)
2335 VG_(message)(Vg_DebugMsg,
2336 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2337 arch->vex.guest_CIA + 0ULL, p[3], p[2], p[1], p[0]);
2339 vg_assert(p[3] == 0x44 && p[2] == 0x0 && p[1] == 0x0 && p[0] == 0x2);
2342 #elif defined(VGP_arm_linux)
2343 if (arch->vex.guest_R15T & 1) {
2344 // Thumb mode. SVC is a encoded as
2345 // 1101 1111 imm8
2346 // where imm8 is the SVC number, and we only accept 0.
2347 arch->vex.guest_R15T -= 2; // sizeof(thumb 16 bit insn)
2348 UChar* p = (UChar*)(arch->vex.guest_R15T - 1);
2349 Bool valid = p[0] == 0 && p[1] == 0xDF;
2350 if (!valid) {
2351 VG_(message)(Vg_DebugMsg,
2352 "?! restarting over (Thumb) syscall that is not syscall "
2353 "at %#llx %02x %02x\n",
2354 arch->vex.guest_R15T - 1ULL, p[0], p[1]);
2356 vg_assert(valid);
2357 // FIXME: NOTE, this really isn't right. We need to back up
2358 // ITSTATE to what it was before the SVC instruction, but we
2359 // don't know what it was. At least assert that it is now
2360 // zero, because if it is nonzero then it must also have
2361 // been nonzero for the SVC itself, which means it was
2362 // conditional. Urk.
2363 vg_assert(arch->vex.guest_ITSTATE == 0);
2364 } else {
2365 // ARM mode. SVC is encoded as
2366 // cond 1111 imm24
2367 // where imm24 is the SVC number, and we only accept 0.
2368 arch->vex.guest_R15T -= 4; // sizeof(arm instr)
2369 UChar* p = (UChar*)arch->vex.guest_R15T;
2370 Bool valid = p[0] == 0 && p[1] == 0 && p[2] == 0
2371 && (p[3] & 0xF) == 0xF;
2372 if (!valid) {
2373 VG_(message)(Vg_DebugMsg,
2374 "?! restarting over (ARM) syscall that is not syscall "
2375 "at %#llx %02x %02x %02x %02x\n",
2376 arch->vex.guest_R15T + 0ULL, p[0], p[1], p[2], p[3]);
2378 vg_assert(valid);
2381 #elif defined(VGP_arm64_linux)
2382 arch->vex.guest_PC -= 4; // sizeof(arm64 instr)
2384 /* Make sure our caller is actually sane, and we're really backing
2385 back over a syscall.
2387 svc #0 == d4 00 00 01
2390 UChar *p = (UChar *)arch->vex.guest_PC;
2392 if (p[0] != 0x01 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0xD4)
2393 VG_(message)(
2394 Vg_DebugMsg,
2395 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2396 arch->vex.guest_PC + 0ULL, p[0], p[1], p[2], p[3]
2399 vg_assert(p[0] == 0x01 && p[1] == 0x00 && p[2] == 0x00 && p[3] == 0xD4);
2402 #elif defined(VGP_x86_darwin)
2403 arch->vex.guest_EIP = arch->vex.guest_IP_AT_SYSCALL;
2405 /* Make sure our caller is actually sane, and we're really backing
2406 back over a syscall.
2408 int $0x80 == CD 80
2409 int $0x81 == CD 81
2410 int $0x82 == CD 82
2411 sysenter == 0F 34
2414 UChar *p = (UChar *)arch->vex.guest_EIP;
2415 Bool ok = (p[0] == 0xCD && p[1] == 0x80)
2416 || (p[0] == 0xCD && p[1] == 0x81)
2417 || (p[0] == 0xCD && p[1] == 0x82)
2418 || (p[0] == 0x0F && p[1] == 0x34);
2419 if (!ok)
2420 VG_(message)(Vg_DebugMsg,
2421 "?! restarting over syscall at %#x %02x %02x\n",
2422 arch->vex.guest_EIP, p[0], p[1]);
2423 vg_assert(ok);
2426 #elif defined(VGP_amd64_darwin)
2427 // DDD: #warning GrP fixme amd64 restart unimplemented
2428 vg_assert(0);
2430 #elif defined(VGP_s390x_linux)
2431 arch->vex.guest_IA -= 2; // sizeof(syscall)
2433 /* Make sure our caller is actually sane, and we're really backing
2434 back over a syscall.
2436 syscall == 0A <num>
2439 UChar *p = (UChar *)arch->vex.guest_IA;
2440 if (p[0] != 0x0A)
2441 VG_(message)(Vg_DebugMsg,
2442 "?! restarting over syscall at %#llx %02x %02x\n",
2443 arch->vex.guest_IA, p[0], p[1]);
2445 vg_assert(p[0] == 0x0A);
2448 #elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
2450 arch->vex.guest_PC -= 4; // sizeof(mips instr)
2452 /* Make sure our caller is actually sane, and we're really backing
2453 back over a syscall.
2455 syscall == 00 00 00 0C
2456 big endian
2457 syscall == 0C 00 00 00
2460 UChar *p = (UChar *)(arch->vex.guest_PC);
2461 # if defined (VG_LITTLEENDIAN)
2462 if (p[0] != 0x0c || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x00)
2463 VG_(message)(Vg_DebugMsg,
2464 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2465 (ULong)arch->vex.guest_PC, p[0], p[1], p[2], p[3]);
2467 vg_assert(p[0] == 0x0c && p[1] == 0x00 && p[2] == 0x00 && p[3] == 0x00);
2468 # elif defined (VG_BIGENDIAN)
2469 if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x0c)
2470 VG_(message)(Vg_DebugMsg,
2471 "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
2472 (ULong)arch->vex.guest_PC, p[0], p[1], p[2], p[3]);
2474 vg_assert(p[0] == 0x00 && p[1] == 0x00 && p[2] == 0x00 && p[3] == 0x0c);
2475 # else
2476 # error "Unknown endianness"
2477 # endif
2479 #elif defined(VGP_tilegx_linux)
2480 arch->vex.guest_pc -= 8; // sizeof({ swint1 })
2482 /* Make sure our caller is actually sane, and we're really backing
2483 back over a syscall. no other instruction in same bundle.
2486 unsigned long *p = (unsigned long *)arch->vex.guest_pc;
2488 if (p[0] != 0x286b180051485000ULL ) // "swint1", little enidan only
2489 VG_(message)(Vg_DebugMsg,
2490 "?! restarting over syscall at 0x%lx %lx\n",
2491 arch->vex.guest_pc, p[0]);
2492 vg_assert(p[0] == 0x286b180051485000ULL);
2495 #elif defined(VGP_x86_solaris)
2496 arch->vex.guest_EIP -= 2; // sizeof(int $0x91) or sizeof(syscall)
2498 /* Make sure our caller is actually sane, and we're really backing
2499 back over a syscall.
2501 int $0x91 == CD 91
2502 syscall == 0F 05
2503 sysenter == 0F 34
2505 Handle also other syscall instructions because we also handle them in
2506 the scheduler.
2507 int $0x80 == CD 80
2508 int $0x81 == CD 81
2509 int $0x82 == CD 82
2512 UChar *p = (UChar *)arch->vex.guest_EIP;
2514 Bool ok = (p[0] == 0xCD && p[1] == 0x91)
2515 || (p[0] == 0x0F && p[1] == 0x05)
2516 || (p[0] == 0x0F && p[1] == 0x34)
2517 || (p[0] == 0xCD && p[1] == 0x80)
2518 || (p[0] == 0xCD && p[1] == 0x81)
2519 || (p[0] == 0xCD && p[1] == 0x82);
2520 if (!ok)
2521 VG_(message)(Vg_DebugMsg,
2522 "?! restarting over syscall at %#x %02x %02x\n",
2523 arch->vex.guest_EIP, p[0], p[1]);
2524 vg_assert(ok);
2527 #elif defined(VGP_amd64_solaris)
2528 arch->vex.guest_RIP -= 2; // sizeof(syscall)
2530 /* Make sure our caller is actually sane, and we're really backing
2531 back over a syscall.
2533 syscall == 0F 05
2536 UChar *p = (UChar *)arch->vex.guest_RIP;
2538 Bool ok = (p[0] == 0x0F && p[1] == 0x05);
2539 if (!ok)
2540 VG_(message)(Vg_DebugMsg,
2541 "?! restarting over syscall at %#llx %02x %02x\n",
2542 arch->vex.guest_RIP, p[0], p[1]);
2543 vg_assert(ok);
2546 #else
2547 # error "ML_(fixup_guest_state_to_restart_syscall): unknown plat"
2548 #endif
2553 Fix up the guest state when a syscall is interrupted by a signal
2554 and so has been forced to return 'sysret'.
2556 To do this, we determine the precise state of the syscall by
2557 looking at the (real) IP at the time the signal happened. The
2558 syscall sequence looks like:
2560 1. unblock signals
2561 2. perform syscall
2562 3. save result to guest state (EAX, RAX, R3+CR0.SO, R0, V0)
2563 4. re-block signals
2565 If a signal
2566 happens at Then Why?
2567 [1-2) restart nothing has happened (restart syscall)
2568 [2] restart syscall hasn't started, or kernel wants to restart
2569 [2-3) save syscall complete, but results not saved
2570 [3-4) syscall complete, results saved
2572 Sometimes we never want to restart an interrupted syscall (because
2573 sigaction says not to), so we only restart if "restart" is True.
2575 This will also call VG_(post_syscall) if the syscall has actually
2576 completed (either because it was interrupted, or because it
2577 actually finished). It will not call VG_(post_syscall) if the
2578 syscall is set up for restart, which means that the pre-wrapper may
2579 get called multiple times.
2582 void
2583 VG_(fixup_guest_state_after_syscall_interrupted)( ThreadId tid,
2584 Addr ip,
2585 SysRes sres,
2586 Bool restart,
2587 struct vki_ucontext *uc)
2589 /* Note that we don't know the syscall number here, since (1) in
2590 general there's no reliable way to get hold of it short of
2591 stashing it in the guest state before the syscall, and (2) in
2592 any case we don't need to know it for the actions done by this
2593 routine.
2595 Furthermore, 'sres' is only used in the case where the syscall
2596 is complete, but the result has not been committed to the guest
2597 state yet. In any other situation it will be meaningless and
2598 therefore ignored. */
2600 ThreadState* tst;
2601 SyscallStatus canonical;
2602 ThreadArchState* th_regs;
2603 SyscallInfo* sci;
2605 /* Compute some Booleans indicating which range we're in. */
2606 Bool outside_range,
2607 in_setup_to_restart, // [1,2) in the .S files
2608 at_restart, // [2] in the .S files
2609 in_complete_to_committed, // [3,4) in the .S files
2610 in_committed_to_finished; // [4,5) in the .S files
2612 if (VG_(clo_trace_signals))
2613 VG_(message)( Vg_DebugMsg,
2614 "interrupted_syscall: tid=%d, ip=0x%llx, "
2615 "restart=%s, sres.isErr=%s, sres.val=%lld\n",
2616 (Int)tid,
2617 (ULong)ip,
2618 restart ? "True" : "False",
2619 sr_isError(sres) ? "True" : "False",
2620 (Long)(sr_isError(sres) ? sr_Err(sres) : sr_Res(sres)) );
2622 vg_assert(VG_(is_valid_tid)(tid));
2623 vg_assert(tid >= 1 && tid < VG_N_THREADS);
2624 vg_assert(VG_(is_running_thread)(tid));
2626 tst = VG_(get_ThreadState)(tid);
2627 th_regs = &tst->arch;
2628 sci = & syscallInfo[tid];
2630 # if defined(VGO_linux)
2631 outside_range
2632 = ip < ML_(blksys_setup) || ip >= ML_(blksys_finished);
2633 in_setup_to_restart
2634 = ip >= ML_(blksys_setup) && ip < ML_(blksys_restart);
2635 at_restart
2636 = ip == ML_(blksys_restart);
2637 in_complete_to_committed
2638 = ip >= ML_(blksys_complete) && ip < ML_(blksys_committed);
2639 in_committed_to_finished
2640 = ip >= ML_(blksys_committed) && ip < ML_(blksys_finished);
2641 # elif defined(VGO_darwin)
2642 outside_range
2643 = (ip < ML_(blksys_setup_MACH) || ip >= ML_(blksys_finished_MACH))
2644 && (ip < ML_(blksys_setup_MDEP) || ip >= ML_(blksys_finished_MDEP))
2645 && (ip < ML_(blksys_setup_UNIX) || ip >= ML_(blksys_finished_UNIX));
2646 in_setup_to_restart
2647 = (ip >= ML_(blksys_setup_MACH) && ip < ML_(blksys_restart_MACH))
2648 || (ip >= ML_(blksys_setup_MDEP) && ip < ML_(blksys_restart_MDEP))
2649 || (ip >= ML_(blksys_setup_UNIX) && ip < ML_(blksys_restart_UNIX));
2650 at_restart
2651 = (ip == ML_(blksys_restart_MACH))
2652 || (ip == ML_(blksys_restart_MDEP))
2653 || (ip == ML_(blksys_restart_UNIX));
2654 in_complete_to_committed
2655 = (ip >= ML_(blksys_complete_MACH) && ip < ML_(blksys_committed_MACH))
2656 || (ip >= ML_(blksys_complete_MDEP) && ip < ML_(blksys_committed_MDEP))
2657 || (ip >= ML_(blksys_complete_UNIX) && ip < ML_(blksys_committed_UNIX));
2658 in_committed_to_finished
2659 = (ip >= ML_(blksys_committed_MACH) && ip < ML_(blksys_finished_MACH))
2660 || (ip >= ML_(blksys_committed_MDEP) && ip < ML_(blksys_finished_MDEP))
2661 || (ip >= ML_(blksys_committed_UNIX) && ip < ML_(blksys_finished_UNIX));
2662 /* Wasn't that just So Much Fun? Does your head hurt yet? Mine does. */
2663 # elif defined(VGO_solaris)
2664 /* The solaris port is never outside the range. */
2665 outside_range = False;
2666 /* The Solaris kernel never restarts syscalls directly! */
2667 at_restart = False;
2668 if (tst->os_state.in_door_return) {
2669 vg_assert(ip >= ML_(blksys_setup_DRET)
2670 && ip < ML_(blksys_finished_DRET));
2672 in_setup_to_restart
2673 = ip >= ML_(blksys_setup_DRET) && ip < ML_(blksys_complete_DRET);
2674 in_complete_to_committed
2675 = ip >= ML_(blksys_complete_DRET) && ip < ML_(blksys_committed_DRET);
2676 in_committed_to_finished
2677 = ip >= ML_(blksys_committed_DRET) && ip < ML_(blksys_finished_DRET);
2679 else {
2680 vg_assert(ip >= ML_(blksys_setup) && ip < ML_(blksys_finished));
2682 in_setup_to_restart
2683 = ip >= ML_(blksys_setup) && ip < ML_(blksys_complete);
2684 in_complete_to_committed
2685 = ip >= ML_(blksys_complete) && ip < ML_(blksys_committed);
2686 in_committed_to_finished
2687 = ip >= ML_(blksys_committed) && ip < ML_(blksys_finished);
2689 # else
2690 # error "Unknown OS"
2691 # endif
2693 /* Figure out what the state of the syscall was by examining the
2694 (real) IP at the time of the signal, and act accordingly. */
2695 if (outside_range) {
2696 if (VG_(clo_trace_signals))
2697 VG_(message)( Vg_DebugMsg,
2698 " not in syscall at all: hmm, very suspicious\n" );
2699 /* Looks like we weren't in a syscall at all. Hmm. */
2700 vg_assert(sci->status.what != SsIdle);
2701 return;
2704 /* We should not be here unless this thread had first started up
2705 the machinery for a syscall by calling VG_(client_syscall).
2706 Hence: */
2707 vg_assert(sci->status.what != SsIdle);
2709 /* now, do one of four fixup actions, depending on where the IP has
2710 got to. */
2712 if (in_setup_to_restart) {
2713 /* syscall hasn't even started; go around again */
2714 if (VG_(clo_trace_signals))
2715 VG_(message)( Vg_DebugMsg, " not started: restarting\n");
2716 vg_assert(sci->status.what == SsHandToKernel);
2717 ML_(fixup_guest_state_to_restart_syscall)(th_regs);
2720 else
2721 if (at_restart) {
2722 # if defined(VGO_solaris)
2723 /* We should never hit this branch on Solaris, see the comment above. */
2724 vg_assert(0);
2725 # endif
2727 /* We're either about to run the syscall, or it was interrupted
2728 and the kernel restarted it. Restart if asked, otherwise
2729 EINTR it. */
2730 if (restart) {
2731 if (VG_(clo_trace_signals))
2732 VG_(message)( Vg_DebugMsg, " at syscall instr: restarting\n");
2733 ML_(fixup_guest_state_to_restart_syscall)(th_regs);
2734 } else {
2735 if (VG_(clo_trace_signals))
2736 VG_(message)( Vg_DebugMsg, " at syscall instr: returning EINTR\n");
2737 canonical = convert_SysRes_to_SyscallStatus(
2738 VG_(mk_SysRes_Error)( VKI_EINTR )
2740 if (!(sci->flags & SfNoWriteResult))
2741 putSyscallStatusIntoGuestState( tid, &canonical, &th_regs->vex );
2742 sci->status = canonical;
2743 VG_(post_syscall)(tid);
2747 else
2748 if (in_complete_to_committed) {
2749 /* Syscall complete, but result hasn't been written back yet.
2750 Write the SysRes we were supplied with back to the guest
2751 state. */
2752 if (VG_(clo_trace_signals))
2753 VG_(message)( Vg_DebugMsg,
2754 " completed, but uncommitted: committing\n");
2755 canonical = convert_SysRes_to_SyscallStatus( sres );
2756 vg_assert(!(sci->flags & SfNoWriteResult));
2757 putSyscallStatusIntoGuestState( tid, &canonical, &th_regs->vex );
2758 # if defined(VGO_solaris)
2759 if (tst->os_state.in_door_return) {
2760 # if defined(VGP_x86_solaris)
2761 /* Registers %esp and %ebp were also modified by the syscall. */
2762 tst->arch.vex.guest_ESP = uc->uc_mcontext.gregs[VKI_UESP];
2763 tst->arch.vex.guest_EBP = uc->uc_mcontext.gregs[VKI_EBP];
2764 # elif defined(VGP_amd64_solaris)
2765 tst->arch.vex.guest_RSP = uc->uc_mcontext.gregs[VKI_REG_RSP];
2766 tst->arch.vex.guest_RBP = uc->uc_mcontext.gregs[VKI_REG_RBP];
2767 # endif
2769 # endif
2770 sci->status = canonical;
2771 VG_(post_syscall)(tid);
2774 else
2775 if (in_committed_to_finished) {
2776 /* Result committed, but the signal mask has not been restored;
2777 we expect our caller (the signal handler) will have fixed
2778 this up. */
2779 if (VG_(clo_trace_signals))
2780 VG_(message)( Vg_DebugMsg,
2781 " completed and committed: nothing to do\n");
2782 # if defined(VGP_x86_solaris)
2783 /* The %eax and %edx values are committed but the carry flag is still
2784 uncommitted. Save it now. */
2785 LibVEX_GuestX86_put_eflag_c(sr_isError(sres), &th_regs->vex);
2786 # elif defined(VGP_amd64_solaris)
2787 LibVEX_GuestAMD64_put_rflag_c(sr_isError(sres), &th_regs->vex);
2788 # endif
2789 getSyscallStatusFromGuestState( &sci->status, &th_regs->vex );
2790 vg_assert(sci->status.what == SsComplete);
2791 VG_(post_syscall)(tid);
2794 else
2795 VG_(core_panic)("?? strange syscall interrupt state?");
2797 /* In all cases, the syscall is now finished (even if we called
2798 ML_(fixup_guest_state_to_restart_syscall), since that just
2799 re-positions the guest's IP for another go at it). So we need
2800 to record that fact. */
2801 sci->status.what = SsIdle;
2805 #if defined(VGO_solaris)
2806 /* Returns True if ip is inside a fixable syscall code in syscall-*-*.S. This
2807 function can be called by a 'non-running' thread! */
2808 Bool VG_(is_ip_in_blocking_syscall)(ThreadId tid, Addr ip)
2810 ThreadState *tst = VG_(get_ThreadState)(tid);
2812 if (tst->os_state.in_door_return)
2813 return ip >= ML_(blksys_setup_DRET) && ip < ML_(blksys_finished_DRET);
2814 else
2815 return ip >= ML_(blksys_setup) && ip < ML_(blksys_finished);
2817 #endif
2820 #if defined(VGO_darwin)
2821 // Clean up after workq_ops(WQOPS_THREAD_RETURN) jumped to wqthread_hijack.
2822 // This is similar to VG_(fixup_guest_state_after_syscall_interrupted).
2823 // This longjmps back to the scheduler.
2824 void ML_(wqthread_continue_NORETURN)(ThreadId tid)
2826 ThreadState* tst;
2827 SyscallInfo* sci;
2829 VG_(acquire_BigLock)(tid, "wqthread_continue_NORETURN");
2831 PRINT("SYSCALL[%d,%d](%s) workq_ops() starting new workqueue item\n",
2832 VG_(getpid)(), tid, VG_SYSNUM_STRING(__NR_workq_ops));
2834 vg_assert(VG_(is_valid_tid)(tid));
2835 vg_assert(tid >= 1 && tid < VG_N_THREADS);
2836 vg_assert(VG_(is_running_thread)(tid));
2838 tst = VG_(get_ThreadState)(tid);
2839 sci = & syscallInfo[tid];
2840 vg_assert(sci->status.what != SsIdle);
2841 vg_assert(tst->os_state.wq_jmpbuf_valid); // check this BEFORE post_syscall
2843 // Pretend the syscall completed normally, but don't touch the thread state.
2844 sci->status = convert_SysRes_to_SyscallStatus( VG_(mk_SysRes_Success)(0) );
2845 sci->flags |= SfNoWriteResult;
2846 VG_(post_syscall)(tid);
2848 ML_(sync_mappings)("in", "ML_(wqthread_continue_NORETURN)", 0);
2850 sci->status.what = SsIdle;
2852 vg_assert(tst->sched_jmpbuf_valid);
2853 VG_MINIMAL_LONGJMP(tst->sched_jmpbuf);
2855 /* NOTREACHED */
2856 vg_assert(0);
2858 #endif
2861 /* ---------------------------------------------------------------------
2862 A place to store the where-to-call-when-really-done pointer
2863 ------------------------------------------------------------------ */
2865 // When the final thread is done, where shall I call to shutdown the
2866 // system cleanly? Is set once at startup (in m_main) and never
2867 // changes after that. Is basically a pointer to the exit
2868 // continuation. This is all just a nasty hack to avoid calling
2869 // directly from m_syswrap to m_main at exit, since that would cause
2870 // m_main to become part of a module cycle, which is silly.
2871 void (* VG_(address_of_m_main_shutdown_actions_NORETURN) )
2872 (ThreadId,VgSchedReturnCode)
2873 = NULL;
2875 /*--------------------------------------------------------------------*/
2876 /*--- end ---*/
2877 /*--------------------------------------------------------------------*/