drd/drd_pthread_intercepts: Add a workaround for what is probably a compiler bug
[valgrind.git] / coregrind / m_syswrap / syswrap-solaris.c
blob5dba90ac8718f2dde303c4e4696c240cc921eaf8
2 /*--------------------------------------------------------------------*/
3 /*--- Solaris-specific syscalls, etc. syswrap-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2011-2017 Petr Pavlu
11 setup@dagobah.cz
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 /* Copyright 2013-2017, Ivo Raisr <ivosh@ivosh.net>. */
31 /* Copyright 2015-2017, Tomas Jedlicka <jedlickat@gmail.com>. */
33 /* Copyright 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
35 #if defined(VGO_solaris)
37 #include "libvex_guest_offsets.h"
38 #include "pub_core_basics.h"
39 #include "pub_core_vki.h"
40 #include "pub_core_vkiscnums.h"
41 #include "pub_core_threadstate.h"
42 #include "pub_core_aspacemgr.h"
43 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
44 #include "pub_core_debuglog.h"
45 #include "pub_core_clientstate.h"
46 #include "pub_core_gdbserver.h"
47 #include "pub_core_inner.h"
48 #include "pub_core_libcassert.h"
49 #include "pub_core_libcbase.h"
50 #include "pub_core_libcfile.h"
51 #include "pub_core_libcprint.h"
52 #include "pub_core_libcproc.h"
53 #include "pub_core_libcsignal.h"
54 #include "pub_core_machine.h" // VG_(get_SP)
55 #include "pub_core_mallocfree.h"
56 #include "pub_core_options.h"
57 #include "pub_core_tooliface.h"
58 #include "pub_core_transtab.h" // VG_(discard_translations)
59 #include "pub_core_scheduler.h"
60 #include "pub_core_sigframe.h"
61 #include "pub_core_signals.h"
62 #include "pub_core_stacks.h"
63 #include "pub_core_syscall.h"
64 #include "pub_core_syswrap.h"
65 #include "pub_core_ume.h"
66 #if defined(ENABLE_INNER_CLIENT_REQUEST)
67 #include "pub_core_clreq.h"
68 #endif
70 #include "priv_types_n_macros.h"
71 #include "priv_syswrap-generic.h"
72 #include "priv_syswrap-main.h"
73 #include "priv_syswrap-solaris.h"
75 /* Return the number of non-dead and daemon threads.
76 count_daemon == True: count daemon threads
77 count_daemon == False: count non-daemon threads */
78 static UInt count_living_daemon_threads(Bool count_daemon)
80 UInt count = 0;
81 for (ThreadId tid = 1; tid < VG_N_THREADS; tid++)
82 if (VG_(threads)[tid].status != VgTs_Empty &&
83 VG_(threads)[tid].status != VgTs_Zombie &&
84 VG_(threads)[tid].os_state.daemon_thread == count_daemon)
85 count++;
87 return count;
90 /* Note: The following functions (thread_wrapper, run_a_thread_NORETURN,
91 ML_(start_thread_NORETURN), ML_(allocstack) and
92 VG_(main_thread_wrapper_NORETURN)) are based on the code in
93 syswrap-linux.c. Keep them synchronized! */
95 /* Run a thread from beginning to end and return the thread's
96 scheduler-return-code. */
97 static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
99 VgSchedReturnCode ret;
100 ThreadId tid = (ThreadId)tidW;
101 ThreadState *tst = VG_(get_ThreadState)(tid);
103 VG_(debugLog)(1, "syswrap-solaris",
104 "thread_wrapper(tid=%u): entry\n",
105 tid);
107 vg_assert(tst->status == VgTs_Init);
109 /* Make sure we get the CPU lock before doing anything significant. */
110 VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
112 if (0)
113 VG_(printf)("thread tid %u started: stack = %p\n", tid, (void *)&tid);
115 /* Make sure error reporting is enabled in the new thread. */
116 tst->err_disablement_level = 0;
118 if (tid == 1)
119 VG_TRACK(pre_thread_first_insn, tid);
120 else {
121 /* For newly created threads, VG_TRACK(pre_thread_first_insn, tid) is
122 invoked later from PRE(sys_getsetcontext)() when setucontext()
123 called from _thrp_setup() concludes new thread setup. Invoking it
124 here would be way too early - new thread has no stack, yet. */
127 tst->os_state.lwpid = VG_(gettid)();
128 tst->os_state.threadgroup = VG_(getpid)();
130 /* Thread created with all signals blocked; scheduler will set the
131 appropriate mask. */
133 ret = VG_(scheduler)(tid);
135 vg_assert(VG_(is_exiting)(tid));
137 vg_assert(tst->status == VgTs_Runnable);
138 vg_assert(VG_(is_running_thread)(tid));
140 VG_(debugLog)(1, "syswrap-solaris",
141 "thread_wrapper(tid=%u): exit, schedreturncode %s\n",
142 tid, VG_(name_of_VgSchedReturnCode)(ret));
144 /* Return to caller, still holding the lock. */
145 return ret;
148 /* Run a thread all the way to the end, then do appropriate exit actions
149 (this is the last-one-out-turn-off-the-lights bit). */
150 static void run_a_thread_NORETURN(Word tidW)
152 ThreadId tid = (ThreadId)tidW;
153 VgSchedReturnCode src;
154 Int c;
155 ThreadState *tst;
156 #ifdef ENABLE_INNER_CLIENT_REQUEST
157 Int registered_vgstack_id;
158 #endif
160 VG_(debugLog)(1, "syswrap-solaris",
161 "run_a_thread_NORETURN(tid=%u): pre-thread_wrapper\n",
162 tid);
164 tst = VG_(get_ThreadState)(tid);
165 vg_assert(tst);
167 /* A thread has two stacks:
168 * the simulated stack (used by the synthetic cpu. Guest process
169 is using this stack).
170 * the valgrind stack (used by the real cpu. Valgrind code is running
171 on this stack).
172 When Valgrind runs as an inner, it must signal that its (real) stack
173 is the stack to use by the outer to e.g. do stacktraces.
175 INNER_REQUEST
176 (registered_vgstack_id
177 = VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
178 tst->os_state.valgrind_stack_init_SP));
180 /* Run the thread all the way through. */
181 src = thread_wrapper(tid);
183 VG_(debugLog)(1, "syswrap-solaris",
184 "run_a_thread_NORETURN(tid=%u): post-thread_wrapper\n",
185 tid);
187 c = count_living_daemon_threads(False);
188 vg_assert(c >= 1); /* Stay sane. */
190 /* Tell the tool that schedctl data belonging to this thread are gone. */
191 Addr a = tst->os_state.schedctl_data;
192 if (a != 0)
193 VG_TRACK(die_mem_munmap, a, sizeof(struct vki_sc_shared));
195 /* Deregister thread's stack. */
196 if (tst->os_state.stk_id != NULL_STK_ID)
197 VG_(deregister_stack)(tst->os_state.stk_id);
199 /* Tell the tool this thread is exiting. */
200 VG_TRACK(pre_thread_ll_exit, tid);
202 /* If the thread is exiting with errors disabled, complain loudly;
203 doing so is bad (does the user know this has happened?) Also, in all
204 cases, be paranoid and clear the flag anyway so that the thread slot is
205 safe in this respect if later reallocated. This should be unnecessary
206 since the flag should be cleared when the slot is reallocated, in
207 thread_wrapper(). */
208 if (tst->err_disablement_level > 0) {
209 VG_(umsg)(
210 "WARNING: exiting thread has error reporting disabled.\n"
211 "WARNING: possibly as a result of some mistake in the use\n"
212 "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
214 VG_(debugLog)(
215 1, "syswrap-solaris",
216 "run_a_thread_NORETURN(tid=%u): "
217 "WARNING: exiting thread has err_disablement_level = %u\n",
218 tid, tst->err_disablement_level
221 tst->err_disablement_level = 0;
223 if (c == 1) {
224 UInt daemon_threads = count_living_daemon_threads(True);
225 if (daemon_threads == 0)
226 VG_(debugLog)(1, "syswrap-solaris",
227 "run_a_thread_NORETURN(tid=%u): "
228 "last one standing\n",
229 tid);
230 else
231 VG_(debugLog)(1, "syswrap-solaris",
232 "run_a_thread_NORETURN(tid=%u): "
233 "last non-daemon thread standing "
234 "[daemon threads=%u]\n",
235 tid, daemon_threads);
237 /* We are the last non-daemon thread standing. Keep hold of the lock and
238 carry on to show final tool results, then exit the entire system.
239 Use the continuation pointer set at startup in m_main. */
240 if ((src == VgSrc_ExitThread) && (daemon_threads > 0))
241 src = VgSrc_ExitProcess;
242 (*VG_(address_of_m_main_shutdown_actions_NORETURN))(tid, src);
244 else {
245 VG_(debugLog)(1, "syswrap-solaris",
246 "run_a_thread_NORETURN(tid=%u): "
247 "not last one standing\n",
248 tid);
250 /* OK, thread is dead, but others still exist. Just exit. */
252 /* This releases the run lock. */
253 VG_(exit_thread)(tid);
254 vg_assert(tst->status == VgTs_Zombie);
255 vg_assert(sizeof(tst->status) == 4);
257 INNER_REQUEST(VALGRIND_STACK_DEREGISTER(registered_vgstack_id));
259 /* We have to use this sequence to terminate the thread to
260 prevent a subtle race. If VG_(exit_thread)() had left the
261 ThreadState as Empty, then it could have been reallocated, reusing
262 the stack while we're doing these last cleanups. Instead,
263 VG_(exit_thread) leaves it as Zombie to prevent reallocation. We
264 need to make sure we don't touch the stack between marking it Empty
265 and exiting. Hence the assembler. */
266 #if defined(VGP_x86_solaris)
267 /* Luckily lwp_exit doesn't take any arguments so we don't have to mess
268 with the stack. */
269 __asm__ __volatile__ (
270 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
271 "movl $"VG_STRINGIFY(__NR_lwp_exit)", %%eax\n"
272 "int $0x91\n" /* lwp_exit() */
273 : [status] "=m" (tst->status)
274 : [EMPTY] "n" (VgTs_Empty)
275 : "eax", "edx", "cc", "memory");
276 #elif defined(VGP_amd64_solaris)
277 __asm__ __volatile__ (
278 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
279 "movq $"VG_STRINGIFY(__NR_lwp_exit)", %%rax\n"
280 "syscall\n" /* lwp_exit() */
281 : [status] "=m" (tst->status)
282 : [EMPTY] "n" (VgTs_Empty)
283 : "rax", "rdx", "cc", "memory");
284 #else
285 # error "Unknown platform"
286 #endif
288 VG_(core_panic)("Thread exit failed?\n");
291 /*NOTREACHED*/
292 vg_assert(0);
295 Word ML_(start_thread_NORETURN)(void *arg)
297 ThreadState *tst = (ThreadState*)arg;
298 ThreadId tid = tst->tid;
300 run_a_thread_NORETURN((Word)tid);
301 /*NOTREACHED*/
302 vg_assert(0);
305 /* Allocate a stack for this thread, if it doesn't already have one.
306 They're allocated lazily, and never freed. Returns the initial stack
307 pointer value to use, or 0 if allocation failed. */
308 Addr ML_(allocstack)(ThreadId tid)
310 ThreadState *tst = VG_(get_ThreadState)(tid);
311 VgStack *stack;
312 Addr initial_SP;
314 /* Either the stack_base and stack_init_SP are both zero (in which
315 case a stack hasn't been allocated) or they are both non-zero,
316 in which case it has. */
318 if (tst->os_state.valgrind_stack_base == 0)
319 vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
321 if (tst->os_state.valgrind_stack_base != 0)
322 vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
324 /* If no stack is present, allocate one. */
326 if (tst->os_state.valgrind_stack_base == 0) {
327 stack = VG_(am_alloc_VgStack)( &initial_SP );
328 if (stack) {
329 tst->os_state.valgrind_stack_base = (Addr)stack;
330 tst->os_state.valgrind_stack_init_SP = initial_SP;
334 if (0)
335 VG_(printf)("stack for tid %u at %p; init_SP=%p\n",
336 tid,
337 (void*)tst->os_state.valgrind_stack_base,
338 (void*)tst->os_state.valgrind_stack_init_SP);
340 return tst->os_state.valgrind_stack_init_SP;
343 /* Allocate a stack for the main thread, and run it all the way to the
344 end. Although we already have a working VgStack (VG_(interim_stack)) it's
345 better to allocate a new one, so that overflow detection works uniformly
346 for all threads. Also initialize the GDT (for normal threads, this is done
347 in the PRE wrapper of lwp_create). */
348 void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
350 Addr sp;
352 VG_(debugLog)(1, "syswrap-solaris",
353 "entering VG_(main_thread_wrapper_NORETURN)\n");
355 sp = ML_(allocstack)(tid);
356 #if defined(ENABLE_INNER_CLIENT_REQUEST)
358 // we must register the main thread stack before the call
359 // to ML_(call_on_new_stack_0_1), otherwise the outer valgrind
360 // reports 'write error' on the non registered stack.
361 ThreadState *tst = VG_(get_ThreadState)(tid);
362 INNER_REQUEST
363 ((void)
364 VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
365 tst->os_state.valgrind_stack_init_SP));
367 #endif
369 #if defined(VGP_x86_solaris)
371 ThreadState *tst = VG_(get_ThreadState)(tid);
372 ML_(setup_gdt)(&tst->arch.vex);
373 ML_(update_gdt_lwpgs)(tid);
375 #elif defined(VGP_amd64_solaris)
376 /* Nothing to do. */
377 #else
378 # error "Unknown platform"
379 #endif
381 /* If we can't even allocate the first thread's stack, we're hosed.
382 Give up. */
383 vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
385 /* Shouldn't be any other threads around yet. */
386 vg_assert(VG_(count_living_threads)() == 1);
388 ML_(call_on_new_stack_0_1)(
389 (Addr)sp, /* stack */
390 0, /* bogus return address */
391 run_a_thread_NORETURN, /* fn to call */
392 (Word)tid /* arg to give it */
395 /*NOTREACHED*/
396 vg_assert(0);
399 /* Deallocate the GDT for a thread. */
400 void VG_(cleanup_thread)(ThreadArchState *arch)
402 #if defined(VGP_x86_solaris)
403 ML_(cleanup_gdt)(&arch->vex);
404 #elif defined(VGP_amd64_solaris)
405 /* Nothing to do. */
406 #else
407 # error "Unknown platform"
408 #endif
412 * Notify core about spring cleaning of schedctl data pages for all threads
413 * in child post-fork handler. Libc will issue new schedctl syscalls for threads
414 * in the child when needs arise.
416 * See also POST(schedctl) and run_a_thread_NORETURN() when a thread exits.
418 static void clean_schedctl_data(ThreadId tid)
420 UInt i;
421 for (i = 0; i < VG_N_THREADS; i++) {
422 ThreadState *tst = &VG_(threads)[i];
423 if (tst->status != VgTs_Empty) {
424 Addr a = tst->os_state.schedctl_data;
425 if (a != 0) {
426 tst->os_state.schedctl_data = 0;
427 a = VG_PGROUNDDN(a);
428 if (VG_(am_find_anon_segment)(a))
429 VG_(am_notify_munmap)(a, VKI_PAGE_SIZE);
435 void VG_(syswrap_init)(void)
437 VG_(atfork)(NULL, NULL, clean_schedctl_data);
440 /* Changes ownership of a memory mapping shared between kernel and the client
441 process. This mapping should have already been pre-arranged during process
442 address space initialization happening in kernel. Valgrind on startup created
443 a segment for this mapping categorized as Valgrind's owned anonymous.
444 Size of this mapping typically varies among Solaris versions but should be
445 page aligned.
446 If 'once_only' is 'True', it is expected this function is called once only
447 and the mapping ownership has not been changed, yet [useful during
448 initialization]. If 'False', this function can be called many times but does
449 change ownership only upon the first invocation [useful in syscall wrappers].
451 void VG_(change_mapping_ownership)(Addr addr, Bool once_only)
453 const NSegment *seg = VG_(am_find_anon_segment)(addr);
454 vg_assert(seg != NULL);
455 vg_assert(seg->start == addr);
456 vg_assert(VG_IS_PAGE_ALIGNED(seg->start));
457 vg_assert(VG_IS_PAGE_ALIGNED(seg->end + 1));
458 SizeT size = seg->end - seg->start + 1;
459 vg_assert(size > 0);
461 Bool do_change = False;
462 if (once_only) {
463 vg_assert(VG_(am_is_valid_for_valgrind)(addr, size, VKI_PROT_READ));
464 do_change = True;
465 } else {
466 if (!VG_(am_is_valid_for_client)(addr, size, VKI_PROT_READ))
467 do_change = True;
470 if (do_change) {
471 Bool change_ownership_OK = VG_(am_change_ownership_v_to_c)(addr, size);
472 vg_assert(change_ownership_OK);
474 /* Tell the tool about just discovered mapping. */
475 VG_TRACK(new_mem_startup,
476 addr, size,
477 True /* readable? */,
478 False /* writable? */,
479 False /* executable? */,
480 0 /* di_handle */);
484 /* Calculate the Fletcher-32 checksum of a given buffer. */
485 UInt ML_(fletcher32)(UShort *buf, SizeT blocks)
487 UInt sum1 = 0;
488 UInt sum2 = 0;
489 SizeT i;
491 for (i = 0; i < blocks; i++) {
492 sum1 = (sum1 + buf[i]) % 0xffff;
493 sum2 = (sum2 + sum1) % 0xffff;
496 return (sum2 << 16) | sum1;
499 /* Calculate the Fletcher-64 checksum of a given buffer. */
500 ULong ML_(fletcher64)(UInt *buf, SizeT blocks)
502 ULong sum1 = 0;
503 ULong sum2 = 0;
504 SizeT i;
506 for (i = 0; i < blocks; i++) {
507 sum1 = (sum1 + buf[i]) % 0xffffffff;
508 sum2 = (sum2 + sum1) % 0xffffffff;
510 return (sum2 << 32) | sum1;
513 /* Save a complete context (VCPU state, sigmask) of a given client thread
514 into the vki_ucontext_t structure. This structure is supposed to be
515 allocated in the client memory, a caller must make sure that the memory can
516 be dereferenced. The active tool is informed about the save. */
517 void VG_(save_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part)
519 ThreadState *tst = VG_(get_ThreadState)(tid);
521 VG_TRACK(pre_mem_write, part, tid, "save_context(uc)", (Addr)uc,
522 sizeof(*uc));
524 uc->uc_flags = VKI_UC_ALL;
525 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_flags,
526 sizeof(uc->uc_flags));
528 /* Old context */
529 uc->uc_link = tst->os_state.oldcontext;
530 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_link,
531 sizeof(uc->uc_link));
533 /* Clear uc->vki_uc_signo. This slot is used by the signal machinery to
534 store a signal number. */
535 VKI_UC_SIGNO(uc) = 0;
537 /* Sigmask */
538 uc->uc_sigmask = tst->sig_mask;
539 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_sigmask,
540 sizeof(uc->uc_sigmask));
542 /* Stack */
544 if (tst->os_state.ustack
545 && ML_(safe_to_deref)(tst->os_state.ustack, sizeof(vki_stack_t))
546 && tst->os_state.ustack->ss_size) {
547 /* If ustack points to a valid stack copy it to ucontext. */
548 uc->uc_stack = *tst->os_state.ustack;
550 else {
551 /* Ustack is not valid. A correct stack has to be figured out
552 manually. */
553 SysRes res;
554 vki_stack_t altstack;
556 /* Get information about alternate stack. */
557 res = VG_(do_sys_sigaltstack)(tid, NULL, &altstack);
558 vg_assert(!sr_isError(res));
560 if (altstack.ss_flags == VKI_SS_ONSTACK) {
561 /* If the alternate stack is active copy it to ucontext. */
562 uc->uc_stack = altstack;
564 else {
565 /* No information about stack is present, save information about
566 current main stack to ucontext. This branch should be reached
567 only by the main thread. */
568 ThreadState *tst2 = VG_(get_ThreadState)(1);
569 uc->uc_stack.ss_sp = (void*)(tst2->client_stack_highest_byte + 1
570 - tst2->client_stack_szB);
571 uc->uc_stack.ss_size = tst2->client_stack_szB;
572 uc->uc_stack.ss_flags = 0;
576 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_stack,
577 sizeof(uc->uc_stack));
580 /* Save the architecture-specific part of the context. */
581 ML_(save_machine_context)(tid, uc, part);
584 /* Set a complete context (VCPU state, sigmask) of a given client thread
585 according to values passed in the vki_ucontext_t structure. This structure
586 is supposed to be allocated in the client memory, a caller must make sure
587 that the memory can be dereferenced. The active tool is informed about
588 what parts of the structure are read.
590 This function is a counterpart to VG_(save_context)(). */
591 void VG_(restore_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part,
592 Bool esp_is_thrptr)
594 ThreadState *tst = VG_(get_ThreadState)(tid);
595 Addr old_esp = VG_(get_SP)(tid);
597 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_flags)",
598 (Addr)&uc->uc_flags, sizeof(uc->uc_flags));
600 /* Old context */
601 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_link)",
602 (Addr)&uc->uc_link, sizeof(uc->uc_link));
603 tst->os_state.oldcontext = uc->uc_link;
605 /* Sigmask */
606 if (uc->uc_flags & VKI_UC_SIGMASK) {
607 SysRes res;
609 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_sigmask)",
610 (Addr)&uc->uc_sigmask, sizeof(uc->uc_sigmask));
611 res = VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, &uc->uc_sigmask,
612 NULL);
613 /* Setting signal mask should never fail. */
614 vg_assert(!sr_isError(res));
617 /* Stack */
618 if (uc->uc_flags & VKI_UC_STACK) {
619 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_stack)",
620 (Addr)&uc->uc_stack, sizeof(uc->uc_stack));
622 if (uc->uc_stack.ss_flags == VKI_SS_ONSTACK) {
623 /* This seems to be a little bit dangerous but it is what the kernel
624 does. */
625 if (VG_(clo_trace_signals))
626 VG_(dmsg)("restore_context, sigaltstack: tid %u, "
627 "ss %p{%p,sz=%lu,flags=%#x}\n",
628 tid, &uc->uc_stack, uc->uc_stack.ss_sp,
629 (SizeT)uc->uc_stack.ss_size, uc->uc_stack.ss_flags);
631 tst->altstack.ss_sp = uc->uc_stack.ss_sp;
632 tst->altstack.ss_size = uc->uc_stack.ss_size;
633 /* Do not copy ss_flags, they are calculated dynamically by
634 Valgrind. */
637 /* Copyout the new stack. */
638 if (tst->os_state.ustack
639 && VG_(am_is_valid_for_client)((Addr)tst->os_state.ustack,
640 sizeof(*tst->os_state.ustack),
641 VKI_PROT_WRITE))
642 *tst->os_state.ustack = uc->uc_stack;
643 VG_TRACK(post_mem_write, part, tid, (Addr)&tst->os_state.ustack,
644 sizeof(tst->os_state.ustack));
647 /* Restore the architecture-specific part of the context. */
648 ML_(restore_machine_context)(tid, uc, part, esp_is_thrptr);
650 /* If the thread stack is already known, kill the deallocated stack area.
651 This is important when returning from a signal handler. */
652 if (tst->client_stack_highest_byte && tst->client_stack_szB) {
653 Addr end = tst->client_stack_highest_byte;
654 Addr start = end + 1 - tst->client_stack_szB;
655 Addr new_esp = VG_(get_SP)(tid);
657 /* Make sure that the old and new stack pointer are on the same (active)
658 stack. Alternate stack is currently never affected by this code. */
659 if (start <= old_esp && old_esp <= end
660 && start <= new_esp && new_esp <= end
661 && new_esp > old_esp)
662 VG_TRACK(die_mem_stack, old_esp - VG_STACK_REDZONE_SZB,
663 (new_esp - old_esp) + VG_STACK_REDZONE_SZB);
667 /* Set a client stack associated with a given thread id according to values
668 passed in the vki_stack_t structure. */
669 static void set_stack(ThreadId tid, vki_stack_t *st)
671 ThreadState *tst = VG_(get_ThreadState)(tid);
672 Addr new_start, new_end;
673 SizeT new_size;
674 Addr cur_start;
675 SizeT cur_size;
677 VG_(debugLog)(2, "syswrap-solaris",
678 "set stack: sp=%#lx, size=%#lx.\n",
679 (Addr)st->ss_sp, (SizeT)st->ss_size);
681 /* Stay sane. */
682 vg_assert(st->ss_flags == 0);
684 new_start = (Addr)st->ss_sp;
685 new_end = new_start + st->ss_size - 1;
686 new_size = st->ss_size;
687 cur_start = tst->client_stack_highest_byte + 1
688 - tst->client_stack_szB;
689 cur_size = tst->client_stack_szB;
691 if (new_start == cur_start && new_size == cur_size) {
692 /* No change is requested, bail out. */
693 return;
696 if (tid == 1 && (new_size == 0 || new_size > VG_(clstk_max_size))) {
697 /* The main thread requests to use a stack without any size checking, or
698 too big stack. Fallback to the maximum allocated client stack. */
700 /* TODO I think it is possible to give up on setting main stack anyway.
701 Valgrind knows where it is located and it is already registered as
702 VG_(clstk_id). */
704 new_size = VG_(clstk_max_size);
705 new_end = tst->client_stack_highest_byte;
706 new_start = new_end + 1 - new_size;
709 if (tst->os_state.stk_id == NULL_STK_ID) {
710 /* This thread doesn't have a stack set yet. */
711 VG_(debugLog)(2, "syswrap-solaris",
712 "Stack set to %#lx-%#lx (new) for thread %u.\n",
713 new_start, new_end, tid);
714 tst->os_state.stk_id = VG_(register_stack)(new_start, new_end);
715 } else {
716 /* Change a thread stack. */
717 VG_(debugLog)(2, "syswrap-solaris",
718 "Stack set to %#lx-%#lx (change) for thread %u.\n",
719 new_start, new_end, tid);
720 VG_(change_stack)(tst->os_state.stk_id, new_start, new_end);
722 tst->client_stack_highest_byte = new_end;
723 tst->client_stack_szB = new_size;
726 /* ---------------------------------------------------------------------
727 Door tracking. Used mainly for server side where door_return()
728 parameters alone do not contain sufficient information.
729 Also used on client side when new door descriptors are passed via
730 door_call() in desc_ptr. Not used for tracking door descriptors
731 explicitly open()'ed [generic fd tracking is used in that case].
732 ------------------------------------------------------------------ */
734 /* One of these is allocated for each created door. */
735 typedef struct OpenDoor
737 Bool server; /* TRUE = server door, FALSE = client door */
738 Int fd; /* The file descriptor. */
739 union {
740 /* Server side. */
741 struct {
742 Addr server_procedure; /* The door server procedure. */
743 HChar *pathname; /* NULL if unknown. */
745 /* Client side. */
746 struct {
747 /* Hook called during PRE door_call()
748 to check contents of params->data_ptr. */
749 void (*pre_mem_hook)(ThreadId tid, Int fd,
750 void *data_ptr, SizeT data_size);
751 /* Hook called during POST door_call()
752 to define contents of params->rbuf. */
753 void (*post_mem_hook)(ThreadId tid, Int fd,
754 void *rbuf, SizeT rsize);
757 struct OpenDoor *next, *prev;
758 } OpenDoor;
760 /* List of allocated door fds. */
761 static OpenDoor *doors_recorded = NULL;
762 static UInt nr_doors_recorded = 0;
764 static OpenDoor *door_record_create(void)
766 OpenDoor *d = VG_(malloc)("syswrap.door_record_create.1", sizeof(OpenDoor));
767 d->prev = NULL;
768 d->next = doors_recorded;
769 if (doors_recorded != NULL)
770 doors_recorded->prev = d;
771 doors_recorded = d;
772 nr_doors_recorded += 1;
774 return d;
777 /* Records a server door. */
778 static void door_record_server(ThreadId tid, Addr server_procedure, Int fd)
780 OpenDoor *d = doors_recorded;
782 while (d != NULL) {
783 if ((d->server == TRUE) && (d->server_procedure == server_procedure)) {
784 if (d->pathname) {
785 VG_(free)(d->pathname);
787 break;
789 d = d->next;
792 if (d == NULL)
793 d = door_record_create();
794 vg_assert(d != NULL);
796 d->server = TRUE;
797 d->fd = fd;
798 d->server_procedure = server_procedure;
799 d->pathname = NULL;
802 /* Records a client door. */
803 static void door_record_client(ThreadId tid, Int fd,
804 void (*pre_mem_hook)(ThreadId tid, Int fd, void *data_ptr, SizeT data_size),
805 void (*post_mem_hook)(ThreadId tid, Int fd, void *rbuf, SizeT rsize))
807 OpenDoor *d = doors_recorded;
809 while (d != NULL) {
810 if ((d->server == FALSE) && (d->fd == fd))
811 break;
812 d = d->next;
815 if (d == NULL)
816 d = door_record_create();
817 vg_assert(d != NULL);
819 d->server = FALSE;
820 d->fd = fd;
821 d->pre_mem_hook = pre_mem_hook;
822 d->post_mem_hook = post_mem_hook;
825 /* Revokes an open door, be it server side or client side. */
826 static void door_record_revoke(ThreadId tid, Int fd)
828 OpenDoor *d = doors_recorded;
830 while (d != NULL) {
831 if (d->fd == fd) {
832 if (d->prev != NULL)
833 d->prev->next = d->next;
834 else
835 doors_recorded = d->next;
836 if (d->next != NULL)
837 d->next->prev = d->prev;
839 if ((d->server == TRUE) && (d->pathname != NULL))
840 VG_(free)(d->pathname);
841 VG_(free)(d);
842 nr_doors_recorded -= 1;
843 return;
845 d = d->next;
849 /* Attaches a server door to a filename. */
850 static void door_record_server_fattach(Int fd, HChar *pathname)
852 OpenDoor *d = doors_recorded;
854 while (d != NULL) {
855 if (d->fd == fd) {
856 vg_assert(d->server == TRUE);
858 if (d->pathname != NULL)
859 VG_(free)(d->pathname);
860 d->pathname = VG_(strdup)("syswrap.door_server_fattach.1", pathname);
861 return;
863 d = d->next;
867 /* Finds a server door based on server procedure. */
868 static const OpenDoor *door_find_by_proc(Addr server_procedure)
870 OpenDoor *d = doors_recorded;
872 while (d != NULL) {
873 if ((d->server) && (d->server_procedure == server_procedure))
874 return d;
875 d = d->next;
878 return NULL;
881 /* Finds a client door based on fd. */
882 static const OpenDoor *door_find_by_fd(Int fd)
884 OpenDoor *d = doors_recorded;
886 while (d != NULL) {
887 if ((d->server == FALSE) && (d->fd == fd))
888 return d;
889 d = d->next;
892 return NULL;
895 /* ---------------------------------------------------------------------
896 PRE/POST wrappers for Solaris-specific syscalls
897 ------------------------------------------------------------------ */
899 #define PRE(name) DEFN_PRE_TEMPLATE(solaris, name)
900 #define POST(name) DEFN_POST_TEMPLATE(solaris, name)
902 /* prototypes */
903 DECL_TEMPLATE(solaris, sys_exit);
904 #if defined(SOLARIS_SPAWN_SYSCALL)
905 DECL_TEMPLATE(solaris, sys_spawn);
906 #endif /* SOLARIS_SPAWN_SYSCALL */
907 #if defined(SOLARIS_OLD_SYSCALLS)
908 DECL_TEMPLATE(solaris, sys_open);
909 #endif /* SOLARIS_OLD_SYSCALLS */
910 DECL_TEMPLATE(solaris, sys_close);
911 DECL_TEMPLATE(solaris, sys_linkat);
912 DECL_TEMPLATE(solaris, sys_symlinkat);
913 DECL_TEMPLATE(solaris, sys_time);
914 DECL_TEMPLATE(solaris, sys_brk);
915 DECL_TEMPLATE(solaris, sys_stat);
916 DECL_TEMPLATE(solaris, sys_lseek);
917 DECL_TEMPLATE(solaris, sys_mount);
918 DECL_TEMPLATE(solaris, sys_readlinkat);
919 DECL_TEMPLATE(solaris, sys_stime);
920 DECL_TEMPLATE(solaris, sys_fstat);
921 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
922 DECL_TEMPLATE(solaris, sys_frealpathat);
923 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
924 DECL_TEMPLATE(solaris, sys_stty);
925 DECL_TEMPLATE(solaris, sys_gtty);
926 DECL_TEMPLATE(solaris, sys_pgrpsys);
927 DECL_TEMPLATE(solaris, sys_pipe);
928 DECL_TEMPLATE(solaris, sys_faccessat);
929 DECL_TEMPLATE(solaris, sys_mknodat);
930 DECL_TEMPLATE(solaris, sys_sysi86);
931 DECL_TEMPLATE(solaris, sys_shmsys);
932 DECL_TEMPLATE(solaris, sys_semsys);
933 DECL_TEMPLATE(solaris, sys_ioctl);
934 DECL_TEMPLATE(solaris, sys_fchownat);
935 DECL_TEMPLATE(solaris, sys_fdsync);
936 DECL_TEMPLATE(solaris, sys_execve);
937 DECL_TEMPLATE(solaris, sys_fcntl);
938 DECL_TEMPLATE(solaris, sys_renameat);
939 DECL_TEMPLATE(solaris, sys_unlinkat);
940 DECL_TEMPLATE(solaris, sys_fstatat);
941 DECL_TEMPLATE(solaris, sys_openat);
942 DECL_TEMPLATE(solaris, sys_tasksys);
943 DECL_TEMPLATE(solaris, sys_getpagesizes);
944 DECL_TEMPLATE(solaris, sys_lwp_park);
945 DECL_TEMPLATE(solaris, sys_sendfilev);
946 #if defined(SOLARIS_LWP_NAME_SYSCALL)
947 DECL_TEMPLATE(solaris, sys_lwp_name);
948 #endif /* SOLARIS_LWP_NAME_SYSCALL */
949 DECL_TEMPLATE(solaris, sys_privsys);
950 DECL_TEMPLATE(solaris, sys_ucredsys);
951 DECL_TEMPLATE(solaris, sys_sysfs);
952 DECL_TEMPLATE(solaris, sys_getmsg);
953 DECL_TEMPLATE(solaris, sys_putmsg);
954 DECL_TEMPLATE(solaris, sys_lstat);
955 DECL_TEMPLATE(solaris, sys_sigprocmask);
956 DECL_TEMPLATE(solaris, sys_sigsuspend);
957 DECL_TEMPLATE(solaris, sys_sigaction);
958 DECL_TEMPLATE(solaris, sys_sigpending);
959 DECL_TEMPLATE(solaris, sys_getsetcontext);
960 DECL_TEMPLATE(solaris, sys_fchmodat);
961 DECL_TEMPLATE(solaris, sys_mkdirat);
962 DECL_TEMPLATE(solaris, sys_statvfs);
963 DECL_TEMPLATE(solaris, sys_fstatvfs);
964 DECL_TEMPLATE(solaris, sys_nfssys);
965 DECL_TEMPLATE(solaris, sys_waitid);
966 DECL_TEMPLATE(solaris, sys_sigsendsys);
967 #if defined(SOLARIS_UTIMESYS_SYSCALL)
968 DECL_TEMPLATE(solaris, sys_utimesys);
969 #endif /* SOLARIS_UTIMESYS_SYSCALL */
970 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
971 DECL_TEMPLATE(solaris, sys_utimensat);
972 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
973 DECL_TEMPLATE(solaris, sys_sigresend);
974 DECL_TEMPLATE(solaris, sys_priocntlsys);
975 DECL_TEMPLATE(solaris, sys_pathconf);
976 DECL_TEMPLATE(solaris, sys_mmap);
977 #if defined(SOLARIS_UUIDSYS_SYSCALL)
978 DECL_TEMPLATE(solaris, sys_uuidsys);
979 #endif /* SOLARIS_UUIDSYS_SYSCALL */
980 DECL_TEMPLATE(solaris, sys_mmapobj);
981 DECL_TEMPLATE(solaris, sys_memcntl);
982 DECL_TEMPLATE(solaris, sys_getpmsg);
983 DECL_TEMPLATE(solaris, sys_putpmsg);
984 #if defined(SOLARIS_OLD_SYSCALLS)
985 DECL_TEMPLATE(solaris, sys_rename);
986 #endif /* SOLARIS_OLD_SYSCALLS */
987 DECL_TEMPLATE(solaris, sys_uname);
988 DECL_TEMPLATE(solaris, sys_setegid);
989 DECL_TEMPLATE(solaris, sys_sysconfig);
990 DECL_TEMPLATE(solaris, sys_systeminfo);
991 DECL_TEMPLATE(solaris, sys_seteuid);
992 DECL_TEMPLATE(solaris, sys_forksys);
993 #if defined(SOLARIS_GETRANDOM_SYSCALL)
994 DECL_TEMPLATE(solaris, sys_getrandom);
995 #endif /* SOLARIS_GETRANDOM_SYSCALL */
996 DECL_TEMPLATE(solaris, sys_sigtimedwait);
997 DECL_TEMPLATE(solaris, sys_yield);
998 DECL_TEMPLATE(solaris, sys_lwp_sema_post);
999 DECL_TEMPLATE(solaris, sys_lwp_sema_trywait);
1000 DECL_TEMPLATE(solaris, sys_lwp_detach);
1001 DECL_TEMPLATE(solaris, sys_modctl);
1002 DECL_TEMPLATE(solaris, sys_fchroot);
1003 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
1004 DECL_TEMPLATE(solaris, sys_system_stats);
1005 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
1006 DECL_TEMPLATE(solaris, sys_gettimeofday);
1007 DECL_TEMPLATE(solaris, sys_lwp_create);
1008 DECL_TEMPLATE(solaris, sys_lwp_exit);
1009 DECL_TEMPLATE(solaris, sys_lwp_suspend);
1010 DECL_TEMPLATE(solaris, sys_lwp_continue);
1011 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
1012 DECL_TEMPLATE(solaris, sys_lwp_sigqueue);
1013 #else
1014 DECL_TEMPLATE(solaris, sys_lwp_kill);
1015 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
1016 DECL_TEMPLATE(solaris, sys_lwp_self);
1017 DECL_TEMPLATE(solaris, sys_lwp_sigmask);
1018 DECL_TEMPLATE(solaris, sys_lwp_private);
1019 DECL_TEMPLATE(solaris, sys_lwp_wait);
1020 DECL_TEMPLATE(solaris, sys_lwp_mutex_wakeup);
1021 DECL_TEMPLATE(solaris, sys_lwp_cond_wait);
1022 DECL_TEMPLATE(solaris, sys_lwp_cond_signal);
1023 DECL_TEMPLATE(solaris, sys_lwp_cond_broadcast);
1024 DECL_TEMPLATE(solaris, sys_pread);
1025 DECL_TEMPLATE(solaris, sys_pwrite);
1026 DECL_TEMPLATE(solaris, sys_lgrpsys);
1027 DECL_TEMPLATE(solaris, sys_rusagesys);
1028 DECL_TEMPLATE(solaris, sys_port);
1029 DECL_TEMPLATE(solaris, sys_pollsys);
1030 DECL_TEMPLATE(solaris, sys_labelsys);
1031 DECL_TEMPLATE(solaris, sys_acl);
1032 DECL_TEMPLATE(solaris, sys_auditsys);
1033 DECL_TEMPLATE(solaris, sys_p_online);
1034 DECL_TEMPLATE(solaris, sys_sigqueue);
1035 DECL_TEMPLATE(solaris, sys_clock_gettime);
1036 DECL_TEMPLATE(solaris, sys_clock_settime);
1037 DECL_TEMPLATE(solaris, sys_clock_getres);
1038 DECL_TEMPLATE(solaris, sys_timer_create);
1039 DECL_TEMPLATE(solaris, sys_timer_delete);
1040 DECL_TEMPLATE(solaris, sys_timer_settime);
1041 DECL_TEMPLATE(solaris, sys_timer_gettime);
1042 DECL_TEMPLATE(solaris, sys_timer_getoverrun);
1043 DECL_TEMPLATE(solaris, sys_facl);
1044 DECL_TEMPLATE(solaris, sys_door);
1045 DECL_TEMPLATE(solaris, sys_schedctl);
1046 DECL_TEMPLATE(solaris, sys_pset);
1047 DECL_TEMPLATE(solaris, sys_resolvepath);
1048 DECL_TEMPLATE(solaris, sys_lwp_mutex_timedlock);
1049 DECL_TEMPLATE(solaris, sys_lwp_rwlock_sys);
1050 DECL_TEMPLATE(solaris, sys_lwp_sema_timedwait);
1051 DECL_TEMPLATE(solaris, sys_zone);
1052 DECL_TEMPLATE(solaris, sys_getcwd);
1053 DECL_TEMPLATE(solaris, sys_so_socket);
1054 DECL_TEMPLATE(solaris, sys_so_socketpair);
1055 DECL_TEMPLATE(solaris, sys_bind);
1056 DECL_TEMPLATE(solaris, sys_listen);
1057 DECL_TEMPLATE(solaris, sys_accept);
1058 DECL_TEMPLATE(solaris, sys_connect);
1059 DECL_TEMPLATE(solaris, sys_shutdown);
1060 DECL_TEMPLATE(solaris, sys_recv);
1061 DECL_TEMPLATE(solaris, sys_recvfrom);
1062 DECL_TEMPLATE(solaris, sys_recvmsg);
1063 DECL_TEMPLATE(solaris, sys_send);
1064 DECL_TEMPLATE(solaris, sys_sendmsg);
1065 DECL_TEMPLATE(solaris, sys_sendto);
1066 DECL_TEMPLATE(solaris, sys_getpeername);
1067 DECL_TEMPLATE(solaris, sys_getsockname);
1068 DECL_TEMPLATE(solaris, sys_getsockopt);
1069 DECL_TEMPLATE(solaris, sys_setsockopt);
1070 DECL_TEMPLATE(solaris, sys_lwp_mutex_register);
1071 DECL_TEMPLATE(solaris, sys_uucopy);
1072 DECL_TEMPLATE(solaris, sys_umount2);
1074 DECL_TEMPLATE(solaris, fast_gethrtime);
1075 DECL_TEMPLATE(solaris, fast_gethrvtime);
1076 DECL_TEMPLATE(solaris, fast_gethrestime);
1077 DECL_TEMPLATE(solaris, fast_getlgrp);
1078 #if defined(SOLARIS_GETHRT_FASTTRAP)
1079 DECL_TEMPLATE(solaris, fast_gethrt);
1080 #endif /* SOLARIS_GETHRT_FASTTRAP */
1081 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
1082 DECL_TEMPLATE(solaris, fast_getzoneoffset);
1083 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
1085 /* implementation */
1086 PRE(sys_exit)
1088 /* void exit(int status); */
1089 ThreadId t;
1091 PRINT("sys_exit( %ld )", SARG1);
1092 PRE_REG_READ1(void, "exit", int, status);
1094 for (t = 1; t < VG_N_THREADS; t++) {
1095 if (VG_(threads)[t].status == VgTs_Empty)
1096 continue;
1098 /* Assign the exit code, VG_(nuke_all_threads_except) will assign
1099 the exitreason. */
1100 VG_(threads)[t].os_state.exitcode = ARG1;
1103 /* Indicate in all other threads that the process is exiting.
1104 Then wait using VG_(reap_threads) for these threads to disappear.
1105 See comments in syswrap-linux.c, PRE(sys_exit_group) wrapper,
1106 for reasoning why this cannot give a deadlock. */
1107 VG_(nuke_all_threads_except)(tid, VgSrc_ExitProcess);
1108 VG_(reap_threads)(tid);
1109 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
1110 /* We do assign VgSrc_ExitThread and not VgSrc_ExitProcess, as this thread
1111 is the thread calling exit_group and so its registers must be considered
1112 as not reachable. See pub_tool_machine.h VG_(apply_to_GP_regs). */
1114 /* We have to claim the syscall already succeeded. */
1115 SET_STATUS_Success(0);
1118 #if defined(SOLARIS_SPAWN_SYSCALL)
1119 static Bool spawn_pre_check_kfa(ThreadId tid, SyscallStatus *status,
1120 vki_kfile_attr_t *kfa)
1122 PRE_FIELD_READ("spawn(attrs->kfa_size)", kfa->kfa_size);
1123 PRE_FIELD_READ("spawn(attrs->kfa_type)", kfa->kfa_type);
1125 if (ML_(safe_to_deref)(kfa, kfa->kfa_size)) {
1126 switch (kfa->kfa_type) {
1127 case VKI_FA_DUP2:
1128 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1129 PRE_FIELD_READ("spawn(attrs->kfa_newfiledes)", kfa->kfa_newfiledes);
1130 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(dup2)", tid, False) ||
1131 !ML_(fd_allowed)(kfa->kfa_newfiledes, "spawn(dup2)", tid, False)) {
1132 SET_STATUS_Failure(VKI_EBADF);
1133 return False;
1135 break;
1136 case VKI_FA_CLOSE:
1137 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1138 /* If doing -d style logging (which is to fd = 2 = stderr),
1139 don't allow that filedes to be closed. See ML_(fd_allowed)(). */
1140 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(close)", tid, False) ||
1141 (kfa->kfa_filedes == 2 && VG_(debugLog_getLevel)() > 0)) {
1142 SET_STATUS_Failure(VKI_EBADF);
1143 return False;
1145 break;
1146 case VKI_FA_CLOSEFROM:
1147 /* :TODO: All file descriptors greater than or equal to
1148 kfa->kfa_filedes would have to be checked. */
1149 VG_(unimplemented)("Support for spawn() with file attribute type "
1150 "FA_CLOSEFROM.");
1151 break;
1152 case VKI_FA_OPEN:
1153 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1154 PRE_FIELD_READ("spawn(attrs->kfa_oflag)", kfa->kfa_oflag);
1155 PRE_FIELD_READ("spawn(attrs->kfa_mode)", kfa->kfa_mode);
1156 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(open)", tid, False)) {
1157 SET_STATUS_Failure(VKI_EBADF);
1158 return False;
1160 /* fallthrough */
1161 case VKI_FA_CHDIR:
1162 PRE_FIELD_READ("spawn(attrs->kfa_pathsize)", kfa->kfa_pathsize);
1163 if (kfa->kfa_pathsize != 0) {
1164 PRE_MEM_RASCIIZ("spawn(attrs->kfa_data)", (Addr) kfa->kfa_data);
1166 break;
1167 default:
1168 VG_(unimplemented)("Support for spawn() with file attribute type %u.",
1169 kfa->kfa_type);
1173 return True;
1176 PRE(sys_spawn)
1178 /* int spawn(char *path, void *attrs, size_t attrsize,
1179 char *argenv, size_t aesize); */
1180 PRINT("sys_spawn ( %#lx(%s), %#lx, %lu, %#lx, %lu )",
1181 ARG1, (HChar *) ARG1, ARG2, ARG3, ARG4, ARG5);
1182 PRE_REG_READ5(long, "spawn", const char *, path, void *, attrs,
1183 size_t, attrsize, char *, argenv, size_t, aesize);
1185 /* First check input arguments. */
1186 PRE_MEM_RASCIIZ("spawn(path)", ARG1);
1187 if (ARG3 > 0) {
1188 /* --- vki_kspawn_attr_t --
1189 | ksa_version |
1190 | ksa_size |
1191 | ksa_attr_off | -----| (only if != 0)
1192 | ksa_attr_size | |
1193 | ksa_path_off | =====|====| (only if != 0)
1194 | ksa_path_size | | |
1195 | ksa_shell_off | -----|----|----| (only if != 0)
1196 | ksa_shell_size | | | |
1197 | ksa_data[0] | | | |
1198 ------------------------ | | |
1199 | vki_spawn_attr_t | <----| | |
1200 ------------------------ | |
1201 | path | <---------| |
1202 ------------------------ |
1203 | shell | <---------------
1204 ------------------------
1205 | file actions | (not included in ksa_size, only in ARG3)
1206 ------------------------
1208 ksa_size = sizeof(vki_kspawn_attr_t) + ksa_attr_size + ksa_path_size +
1209 ksa_shell_size
1210 attrs_size (ARG3) = ksa_size + file actions size */
1212 vki_kspawn_attr_t *attrs = (vki_kspawn_attr_t *) ARG2;
1213 PRE_FIELD_READ("spawn(attrs->ksa_version)", attrs->ksa_version);
1214 PRE_FIELD_READ("spawn(attrs->ksa_size)", attrs->ksa_size);
1215 PRE_FIELD_READ("spawn(attrs->ksa_attr_off)", attrs->ksa_attr_off);
1216 PRE_FIELD_READ("spawn(attrs->ksa_path_off)", attrs->ksa_path_off);
1217 PRE_FIELD_READ("spawn(attrs->ksa_shell_off)", attrs->ksa_shell_off);
1219 if (ML_(safe_to_deref)(attrs, sizeof(vki_kspawn_attr_t))) {
1220 if (attrs->ksa_version != VKI_SPAWN_VERSION) {
1221 VG_(unimplemented)("Support for spawn() with attributes "
1222 "version %u.", attrs->ksa_version);
1225 if (attrs->ksa_attr_off != 0) {
1226 PRE_FIELD_READ("spawn(attrs->ksa_attr_size)", attrs->ksa_attr_size);
1227 vki_spawn_attr_t *sap =
1228 (vki_spawn_attr_t *) ((Addr) attrs + attrs->ksa_attr_off);
1229 PRE_MEM_READ("spawn(attrs->ksa_attr)",
1230 (Addr) sap, attrs->ksa_attr_size);
1231 if (ML_(safe_to_deref)(sap, sizeof(vki_spawn_attr_t))) {
1232 if (sap->sa_psflags & VKI_POSIX_SPAWN_SETVAMASK_NP) {
1233 VG_(unimplemented)("Support for spawn() with attributes flag "
1234 "including POSIX_SPAWN_SETVAMASK_NP.");
1236 /* paranoia */
1237 Int rem = sap->sa_psflags & ~(
1238 VKI_POSIX_SPAWN_RESETIDS | VKI_POSIX_SPAWN_SETPGROUP |
1239 VKI_POSIX_SPAWN_SETSIGDEF | VKI_POSIX_SPAWN_SETSIGMASK |
1240 VKI_POSIX_SPAWN_SETSCHEDPARAM | VKI_POSIX_SPAWN_SETSCHEDULER |
1241 VKI_POSIX_SPAWN_SETSID_NP | VKI_POSIX_SPAWN_SETVAMASK_NP |
1242 VKI_POSIX_SPAWN_SETSIGIGN_NP | VKI_POSIX_SPAWN_NOSIGCHLD_NP |
1243 VKI_POSIX_SPAWN_WAITPID_NP | VKI_POSIX_SPAWN_NOEXECERR_NP);
1244 if (rem != 0) {
1245 VG_(unimplemented)("Support for spawn() with attributes flag "
1246 "%#x.", sap->sa_psflags);
1251 if (attrs->ksa_path_off != 0) {
1252 PRE_FIELD_READ("spawn(attrs->ksa_path_size)", attrs->ksa_path_size);
1253 PRE_MEM_RASCIIZ("spawn(attrs->ksa_path)",
1254 (Addr) attrs + attrs->ksa_path_off);
1257 if (attrs->ksa_shell_off != 0) {
1258 PRE_FIELD_READ("spawn(attrs->ksa_shell_size)",
1259 attrs->ksa_shell_size);
1260 PRE_MEM_RASCIIZ("spawn(attrs->ksa_shell)",
1261 (Addr) attrs + attrs->ksa_shell_off);
1264 vki_kfile_attr_t *kfa = (vki_kfile_attr_t *) (ARG2 + attrs->ksa_size);
1265 while ((Addr) kfa < ARG2 + ARG3) {
1266 if (spawn_pre_check_kfa(tid, status, kfa) == False) {
1267 return;
1269 kfa = (vki_kfile_attr_t *) ((Addr) kfa + kfa->kfa_size);
1273 PRE_MEM_READ("spawn(argenv)", ARG4, ARG5);
1275 /* Check that the name at least begins in client-accessible storage. */
1276 if ((ARG1 == 0) || !ML_(safe_to_deref)((HChar *) ARG1, 1)) {
1277 SET_STATUS_Failure(VKI_EFAULT);
1278 return;
1281 /* Check that attrs reside in client-accessible storage. */
1282 if (ARG2 != 0) {
1283 if (!VG_(am_is_valid_for_client)(ARG2, ARG3, VKI_PROT_READ)) {
1284 SET_STATUS_Failure(VKI_EFAULT);
1285 return;
1289 /* Check that the argenv reside in client-accessible storage.
1290 Solaris disallows to perform spawn() without any arguments & environment
1291 variables specified. */
1292 if ((ARG4 == 0) /* obviously bogus */ ||
1293 !VG_(am_is_valid_for_client)(ARG4, ARG5, VKI_PROT_READ)) {
1294 SET_STATUS_Failure(VKI_EFAULT);
1295 return;
1298 /* Copy existing attrs or create empty minimal ones. */
1299 vki_kspawn_attr_t *attrs;
1300 SizeT attrs_size;
1301 if (ARG2 == 0) {
1302 /* minimalistic kspawn_attr_t + spawn_attr_t */
1303 attrs_size = sizeof(vki_kspawn_attr_t) + sizeof(vki_spawn_attr_t);
1304 attrs = VG_(calloc)("syswrap.spawn.1", 1, attrs_size);
1305 attrs->ksa_version = VKI_SPAWN_VERSION;
1306 attrs->ksa_size = attrs_size;
1307 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1308 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1309 } else if (((vki_kspawn_attr_t *) ARG2)->ksa_attr_off == 0) {
1310 /* existing kspawn_attr_t but missing spawn_attr_t */
1311 attrs_size = ARG3 + sizeof(vki_spawn_attr_t);
1312 attrs = VG_(calloc)("syswrap.spawn.2", 1, attrs_size);
1313 VG_(memcpy)(attrs, (void *) ARG2, sizeof(vki_kspawn_attr_t));
1314 SizeT file_actions_size = ARG3 - attrs->ksa_size;
1315 attrs->ksa_size += sizeof(vki_spawn_attr_t);
1316 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1317 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1318 if (attrs->ksa_path_off != 0) {
1319 VG_(memcpy)((HChar *) attrs + attrs->ksa_path_off +
1320 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1321 attrs->ksa_path_off, attrs->ksa_path_size);
1322 attrs->ksa_path_off += sizeof(vki_spawn_attr_t);
1324 if (attrs->ksa_shell_off != 0) {
1325 VG_(memcpy)((HChar *) attrs + attrs->ksa_shell_off +
1326 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1327 attrs->ksa_shell_off, attrs->ksa_shell_size);
1328 attrs->ksa_shell_off += sizeof(vki_spawn_attr_t);
1330 if (file_actions_size > 0) {
1331 VG_(memcpy)((HChar *) attrs + attrs_size - file_actions_size,
1332 (HChar *) ARG2 + ARG3 - file_actions_size,
1333 file_actions_size);
1335 } else {
1336 /* existing kspawn_attr_t + spawn_attr_t */
1337 attrs_size = ARG3;
1338 attrs = VG_(malloc)("syswrap.spawn.3", attrs_size);
1339 VG_(memcpy)(attrs, (void *) ARG2, attrs_size);
1341 vki_spawn_attr_t *spa = (vki_spawn_attr_t *) ((HChar *) attrs +
1342 attrs->ksa_attr_off);
1344 /* Convert argv and envp parts of argenv into their separate XArray's.
1345 Duplicate strings because argv and envp will be then modified. */
1346 XArray *argv = VG_(newXA)(VG_(malloc), "syswrap.spawn.4",
1347 VG_(free), sizeof(HChar *));
1348 XArray *envp = VG_(newXA)(VG_(malloc), "syswrap.spawn.5",
1349 VG_(free), sizeof(HChar *));
1351 HChar *argenv = (HChar *) ARG4;
1352 XArray *current_xa = argv;
1353 while ((Addr) argenv < ARG4 + ARG5) {
1354 if (*argenv == '\0') {
1355 argenv += 1;
1356 if (current_xa == argv) {
1357 current_xa = envp;
1358 if ((*argenv == '\0') && ((Addr) argenv == ARG4 + ARG5 - 1)) {
1359 /* envp part is empty, it contained only {NULL}. */
1360 break;
1362 } else {
1363 if ((Addr) argenv != ARG4 + ARG5) {
1364 if (VG_(clo_trace_syscalls))
1365 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1366 SET_STATUS_Failure(VKI_EINVAL);
1367 goto exit;
1369 break;
1373 if (*argenv != '\1') {
1374 if (VG_(clo_trace_syscalls))
1375 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1376 SET_STATUS_Failure(VKI_EINVAL);
1377 goto exit;
1379 argenv += 1;
1381 HChar *duplicate = VG_(strdup)("syswrap.spawn.6", argenv);
1382 VG_(addToXA)(current_xa, &duplicate);
1383 argenv += VG_(strlen)(argenv) + 1;
1386 /* Debug-only printing. */
1387 if (0) {
1388 VG_(printf)("\nARG1 = %#lx(%s)\n", ARG1, (HChar *) ARG1);
1389 VG_(printf)("ARG4 (argv) = ");
1390 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1391 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1394 VG_(printf)("\nARG4 (envp) = ");
1395 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1396 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1398 VG_(printf)("\n");
1401 /* Decide whether or not we want to trace the spawned child.
1402 Omit the executable name itself from child_argv. */
1403 const HChar **child_argv = VG_(malloc)("syswrap.spawn.7",
1404 (VG_(sizeXA)(argv) - 1) * sizeof(HChar *));
1405 for (Word i = 1; i < VG_(sizeXA)(argv); i++) {
1406 child_argv[i - 1] = *(HChar **) VG_(indexXA)(argv, i);
1408 Bool trace_this_child = VG_(should_we_trace_this_child)((HChar *) ARG1,
1409 child_argv);
1410 VG_(free)(child_argv);
1412 /* If we're tracing the child, and the launcher name looks bogus (possibly
1413 because launcher.c couldn't figure it out, see comments therein) then we
1414 have no option but to fail. */
1415 if (trace_this_child &&
1416 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
1417 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
1418 goto exit;
1421 /* Set up the child's exe path. */
1422 const HChar *path = (const HChar *) ARG1;
1423 const HChar *launcher_basename = NULL;
1424 if (trace_this_child) {
1425 /* We want to exec the launcher. */
1426 path = VG_(name_of_launcher);
1427 vg_assert(path != NULL);
1429 launcher_basename = VG_(strrchr)(path, '/');
1430 if ((launcher_basename == NULL) || (launcher_basename[1] == '\0')) {
1431 launcher_basename = path; /* hmm, tres dubious */
1432 } else {
1433 launcher_basename++;
1437 /* Set up the child's environment.
1439 Remove the valgrind-specific stuff from the environment so the child
1440 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
1441 unconditionally, since if we are tracing the child, the child valgrind
1442 will set up the appropriate client environment.
1444 Then, if tracing the child, set VALGRIND_LIB for it. */
1445 HChar **child_envp = VG_(calloc)("syswrap.spawn.8",
1446 VG_(sizeXA)(envp) + 1, sizeof(HChar *));
1447 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1448 child_envp[i] = *(HChar **) VG_(indexXA)(envp, i);
1450 VG_(env_remove_valgrind_env_stuff)(child_envp, /* ro_strings */ False,
1451 VG_(free));
1453 /* Stuff was removed from child_envp, reflect that in envp XArray. */
1454 VG_(dropTailXA)(envp, VG_(sizeXA)(envp));
1455 for (UInt i = 0; child_envp[i] != NULL; i++) {
1456 VG_(addToXA)(envp, &child_envp[i]);
1458 VG_(free)(child_envp);
1460 if (trace_this_child) {
1461 /* Set VALGRIND_LIB in envp. */
1462 SizeT len = VG_(strlen)(VALGRIND_LIB) + VG_(strlen)(VG_(libdir)) + 2;
1463 HChar *valstr = VG_(malloc)("syswrap.spawn.9", len);
1464 VG_(sprintf)(valstr, "%s=%s", VALGRIND_LIB, VG_(libdir));
1465 VG_(addToXA)(envp, &valstr);
1468 /* Set up the child's args. If not tracing it, they are left untouched.
1469 Otherwise, they are:
1471 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG4[1..],
1473 except that the first VG_(args_for_valgrind_noexecpass) args are
1474 omitted. */
1475 if (trace_this_child) {
1476 vg_assert(VG_(args_for_valgrind) != NULL);
1477 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
1478 vg_assert(VG_(args_for_valgrind_noexecpass)
1479 <= VG_(sizeXA)(VG_(args_for_valgrind)));
1481 /* So what args will there be? Bear with me... */
1482 /* ... launcher basename, ... */
1483 HChar *duplicate = VG_(strdup)("syswrap.spawn.10", launcher_basename);
1484 VG_(insertIndexXA)(argv, 0, &duplicate);
1486 /* ... Valgrind's args, ... */
1487 UInt v_args = VG_(sizeXA)(VG_(args_for_valgrind));
1488 v_args -= VG_(args_for_valgrind_noexecpass);
1489 for (Word i = VG_(args_for_valgrind_noexecpass);
1490 i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
1491 duplicate = VG_(strdup)("syswrap.spawn.11",
1492 *(HChar **) VG_(indexXA)(VG_(args_for_valgrind), i));
1493 VG_(insertIndexXA)(argv, 1 + i, &duplicate);
1496 /* ... name of client executable, ... */
1497 duplicate = VG_(strdup)("syswrap.spawn.12", (HChar *) ARG1);
1498 VG_(insertIndexXA)(argv, 1 + v_args, &duplicate);
1500 /* ... and args for client executable (without [0]). */
1501 duplicate = *(HChar **) VG_(indexXA)(argv, 1 + v_args + 1);
1502 VG_(free)(duplicate);
1503 VG_(removeIndexXA)(argv, 1 + v_args + 1);
1506 /* Debug-only printing. */
1507 if (0) {
1508 VG_(printf)("\npath = %s\n", path);
1509 VG_(printf)("argv = ");
1510 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1511 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1514 VG_(printf)("\nenvp = ");
1515 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1516 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1518 VG_(printf)("\n");
1521 /* Set the signal state up for spawned child.
1523 Signals set to be caught are equivalent to signals set to the default
1524 action, from the child's perspective.
1526 Therefore query SCSS and prepare default (DFL) and ignore (IGN) signal
1527 sets. Then combine these sets with those passed from client, if flags
1528 POSIX_SPAWN_SETSIGDEF, or POSIX_SPAWN_SETSIGIGN_NP have been specified.
1530 vki_sigset_t sig_default;
1531 vki_sigset_t sig_ignore;
1532 VG_(sigemptyset)(&sig_default);
1533 VG_(sigemptyset)(&sig_ignore);
1534 for (Int i = 1; i < VG_(max_signal); i++) {
1535 vki_sigaction_fromK_t sa;
1536 VG_(do_sys_sigaction)(i, NULL, &sa); /* query SCSS */
1537 if (sa.sa_handler == VKI_SIG_IGN) {
1538 VG_(sigaddset)(&sig_ignore, i);
1539 } else {
1540 VG_(sigaddset)(&sig_default, i);
1544 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGDEF) {
1545 VG_(sigaddset_from_set)(&spa->sa_sigdefault, &sig_default);
1546 } else {
1547 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGDEF;
1548 spa->sa_sigdefault = sig_default;
1551 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGIGN_NP) {
1552 VG_(sigaddset_from_set)(&spa->sa_sigignore, &sig_ignore);
1553 } else {
1554 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGIGN_NP;
1555 spa->sa_sigignore = sig_ignore;
1558 /* Set the signal mask for spawned child.
1560 Analogous to signal handlers: query SCSS for blocked signals mask
1561 and combine this mask with that passed from client, if flag
1562 POSIX_SPAWN_SETSIGMASK has been specified. */
1563 vki_sigset_t *sigmask = &VG_(get_ThreadState)(tid)->sig_mask;
1564 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGMASK) {
1565 VG_(sigaddset_from_set)(&spa->sa_sigmask, sigmask);
1566 } else {
1567 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGMASK;
1568 spa->sa_sigmask = *sigmask;
1571 /* Lastly, reconstruct argenv from argv + envp. */
1572 SizeT argenv_size = 1 + 1;
1573 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1574 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(argv, i)) + 2;
1576 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1577 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(envp, i)) + 2;
1580 argenv = VG_(malloc)("syswrap.spawn.13", argenv_size);
1581 HChar *current = argenv;
1582 #define COPY_CHAR_TO_ARGENV(dst, character) \
1583 do { \
1584 *(dst) = character; \
1585 (dst) += 1; \
1586 } while (0)
1587 #define COPY_STRING_TO_ARGENV(dst, src) \
1588 do { \
1589 COPY_CHAR_TO_ARGENV(dst, '\1'); \
1590 SizeT src_len = VG_(strlen)((src)) + 1; \
1591 VG_(memcpy)((dst), (src), src_len); \
1592 (dst) += src_len; \
1593 } while (0)
1595 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1596 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(argv, i));
1598 COPY_CHAR_TO_ARGENV(current, '\0');
1599 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1600 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(envp, i));
1602 COPY_CHAR_TO_ARGENV(current, '\0');
1603 vg_assert(current == argenv + argenv_size);
1604 #undef COPY_CHAR_TO_ARGENV
1605 #undef COPY_STRING_TOARGENV
1607 /* Actual spawn() syscall. */
1608 SysRes res = VG_(do_syscall5)(__NR_spawn, (UWord) path, (UWord) attrs,
1609 attrs_size, (UWord) argenv, argenv_size);
1610 SET_STATUS_from_SysRes(res);
1611 VG_(free)(argenv);
1613 if (SUCCESS) {
1614 PRINT(" spawn: process %d spawned child %ld\n", VG_(getpid)(), RES);
1617 exit:
1618 VG_(free)(attrs);
1619 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1620 VG_(free)(*(HChar **) VG_(indexXA)(argv, i));
1622 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1623 VG_(free)(*(HChar **) VG_(indexXA)(envp, i));
1625 VG_(deleteXA)(argv);
1626 VG_(deleteXA)(envp);
1628 #endif /* SOLARIS_SPAWN_SYSCALL */
1630 /* Handles the case where the open is of /proc/self/psinfo or
1631 /proc/<pid>/psinfo. Fetch fresh contents into psinfo_t,
1632 fake fname, psargs, argc and argv. Write the structure to the fake
1633 file we cooked up at startup (in m_main) and give out a copy of this
1634 fd. Also seek the cloned fd back to the start. */
1635 static Bool handle_psinfo_open(SyscallStatus *status,
1636 Bool use_openat,
1637 const HChar *filename,
1638 Int arg1, UWord arg3, UWord arg4)
1640 if (!ML_(safe_to_deref)((const void *) filename, 1))
1641 return False;
1643 HChar name[VKI_PATH_MAX]; // large enough
1644 VG_(sprintf)(name, "/proc/%d/psinfo", VG_(getpid)());
1646 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/psinfo"))
1647 return False;
1649 /* Use original arguments to open() or openat(). */
1650 SysRes sres;
1651 #if defined(SOLARIS_OLD_SYSCALLS)
1652 if (use_openat)
1653 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1654 arg3, arg4);
1655 else
1656 sres = VG_(do_syscall3)(SYS_open, (UWord) filename, arg3, arg4);
1657 #else
1658 vg_assert(use_openat == True);
1659 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1660 arg3, arg4);
1661 #endif /* SOLARIS_OLD_SYSCALLS */
1663 if (sr_isError(sres)) {
1664 SET_STATUS_from_SysRes(sres);
1665 return True;
1667 Int fd = sr_Res(sres);
1669 vki_psinfo_t psinfo;
1670 sres = VG_(do_syscall3)(SYS_read, fd, (UWord) &psinfo, sizeof(psinfo));
1671 if (sr_isError(sres)) {
1672 SET_STATUS_from_SysRes(sres);
1673 VG_(close)(fd);
1674 return True;
1676 if (sr_Res(sres) != sizeof(psinfo)) {
1677 SET_STATUS_Failure(VKI_ENODATA);
1678 VG_(close)(fd);
1679 return True;
1682 VG_(close)(fd);
1684 VG_(client_fname)(psinfo.pr_fname, sizeof(psinfo.pr_fname), True);
1685 VG_(client_cmd_and_args)(psinfo.pr_psargs, sizeof(psinfo.pr_psargs));
1687 Addr *ptr = (Addr *) VG_(get_initial_client_SP)();
1688 psinfo.pr_argc = *ptr++;
1689 psinfo.pr_argv = (Addr) ptr;
1691 sres = VG_(do_syscall4)(SYS_pwrite, VG_(cl_psinfo_fd),
1692 (UWord) &psinfo, sizeof(psinfo), 0);
1693 if (sr_isError(sres)) {
1694 SET_STATUS_from_SysRes(sres);
1695 return True;
1698 sres = VG_(dup)(VG_(cl_psinfo_fd));
1699 SET_STATUS_from_SysRes(sres);
1700 if (!sr_isError(sres)) {
1701 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1702 if (off < 0)
1703 SET_STATUS_Failure(VKI_EMFILE);
1706 return True;
1709 #if defined(SOLARIS_PROC_CMDLINE)
1710 /* Handles the case where the open is of /proc/self/cmdline or
1711 /proc/<pid>/cmdline. Just give it a copy of VG_(cl_cmdline_fd) for the
1712 fake file we cooked up at startup (in m_main). Also, seek the
1713 cloned fd back to the start. */
1714 static Bool handle_cmdline_open(SyscallStatus *status, const HChar *filename)
1716 if (!ML_(safe_to_deref)((const void *) filename, 1))
1717 return False;
1719 HChar name[VKI_PATH_MAX]; // large enough
1720 VG_(sprintf)(name, "/proc/%d/cmdline", VG_(getpid)());
1722 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/cmdline"))
1723 return False;
1725 SysRes sres = VG_(dup)(VG_(cl_cmdline_fd));
1726 SET_STATUS_from_SysRes(sres);
1727 if (!sr_isError(sres)) {
1728 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1729 if (off < 0)
1730 SET_STATUS_Failure(VKI_EMFILE);
1733 return True;
1735 #endif /* SOLARIS_PROC_CMDLINE */
1738 #if defined(SOLARIS_OLD_SYSCALLS)
1739 PRE(sys_open)
1741 /* int open(const char *filename, int flags);
1742 int open(const char *filename, int flags, mode_t mode); */
1744 if (ARG2 & VKI_O_CREAT) {
1745 /* 3-arg version */
1746 PRINT("sys_open ( %#lx(%s), %ld, %ld )", ARG1, (HChar *) ARG1,
1747 SARG2, ARG3);
1748 PRE_REG_READ3(long, "open", const char *, filename,
1749 int, flags, vki_mode_t, mode);
1750 } else {
1751 /* 2-arg version */
1752 PRINT("sys_open ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
1753 PRE_REG_READ2(long, "open", const char *, filename, int, flags);
1756 PRE_MEM_RASCIIZ("open(filename)", ARG1);
1758 if (ML_(handle_auxv_open)(status, (const HChar*)ARG1, ARG2))
1759 return;
1761 if (handle_psinfo_open(status, False /*use_openat*/, (const HChar*)ARG1, 0,
1762 ARG2, ARG3))
1763 return;
1765 *flags |= SfMayBlock;
1768 POST(sys_open)
1770 if (!ML_(fd_allowed)(RES, "open", tid, True)) {
1771 VG_(close)(RES);
1772 SET_STATUS_Failure(VKI_EMFILE);
1773 } else if (VG_(clo_track_fds))
1774 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG1);
1776 #endif /* SOLARIS_OLD_SYSCALLS */
1778 PRE(sys_close)
1780 WRAPPER_PRE_NAME(generic, sys_close)(tid, layout, arrghs, status,
1781 flags);
1784 POST(sys_close)
1786 WRAPPER_POST_NAME(generic, sys_close)(tid, arrghs, status);
1787 door_record_revoke(tid, ARG1);
1788 /* Possibly an explicitly open'ed client door fd was just closed.
1789 Generic sys_close wrapper calls this only if VG_(clo_track_fds) = True. */
1790 if (!VG_(clo_track_fds))
1791 ML_(record_fd_close)(ARG1);
1794 PRE(sys_linkat)
1796 /* int linkat(int fd1, const char *path1, int fd2,
1797 const char *path2, int flag);
1800 /* Interpret the first and third arguments as 32-bit values even on 64-bit
1801 architecture. This is different from Linux, for example, where glibc
1802 sign-extends them. */
1803 Int fd1 = (Int) ARG1;
1804 Int fd2 = (Int) ARG3;
1806 PRINT("sys_linkat ( %d, %#lx(%s), %d, %#lx(%s), %ld )",
1807 fd1, ARG2, (HChar *) ARG2, fd2, ARG4, (HChar *) ARG4, SARG5);
1808 PRE_REG_READ5(long, "linkat", int, fd1, const char *, path1,
1809 int, fd2, const char *, path2, int, flags);
1810 PRE_MEM_RASCIIZ("linkat(path1)", ARG2);
1811 PRE_MEM_RASCIIZ("linkat(path2)", ARG4);
1813 /* Be strict but ignore fd1/fd2 for absolute path1/path2. */
1814 if (fd1 != VKI_AT_FDCWD
1815 && ML_(safe_to_deref)((void *) ARG2, 1)
1816 && ((HChar *) ARG2)[0] != '/'
1817 && !ML_(fd_allowed)(fd1, "linkat", tid, False)) {
1818 SET_STATUS_Failure(VKI_EBADF);
1820 if (fd2 != VKI_AT_FDCWD
1821 && ML_(safe_to_deref)((void *) ARG4, 1)
1822 && ((HChar *) ARG4)[0] != '/'
1823 && !ML_(fd_allowed)(fd2, "linkat", tid, False)) {
1824 SET_STATUS_Failure(VKI_EBADF);
1827 *flags |= SfMayBlock;
1830 PRE(sys_symlinkat)
1832 /* int symlinkat(const char *path1, int fd, const char *path2); */
1834 /* Interpret the second argument as 32-bit value even on 64-bit architecture.
1835 This is different from Linux, for example, where glibc sign-extends it. */
1836 Int fd = (Int) ARG2;
1838 PRINT("sys_symlinkat ( %#lx(%s), %d, %#lx(%s) )",
1839 ARG1, (HChar *) ARG1, fd, ARG3, (HChar *) ARG3);
1840 PRE_REG_READ3(long, "symlinkat", const char *, path1, int, fd,
1841 const char *, path2);
1842 PRE_MEM_RASCIIZ("symlinkat(path1)", ARG1);
1843 PRE_MEM_RASCIIZ("symlinkat(path2)", ARG3);
1845 /* Be strict but ignore fd for absolute path2. */
1846 if (fd != VKI_AT_FDCWD
1847 && ML_(safe_to_deref)((void *) ARG3, 1)
1848 && ((HChar *) ARG3)[0] != '/'
1849 && !ML_(fd_allowed)(fd, "symlinkat", tid, False))
1850 SET_STATUS_Failure(VKI_EBADF);
1852 *flags |= SfMayBlock;
1855 PRE(sys_time)
1857 /* time_t time(); */
1858 PRINT("sys_time ( )");
1859 PRE_REG_READ0(long, "time");
1862 /* Data segment for brk (heap). It is an expandable anonymous mapping
1863 abutting a 1-page reservation. The data segment starts at VG_(brk_base)
1864 and runs up to VG_(brk_limit). None of these two values have to be
1865 page-aligned.
1866 Initial data segment is established (see initimg-solaris.c for rationale):
1867 - directly during client program image initialization,
1868 - or on demand when the executed program is the runtime linker itself,
1869 after it has loaded its target dynamic executable (see PRE(sys_mmapobj)),
1870 or when the first brk() syscall is made.
1872 Notable facts:
1873 - VG_(brk_base) is not page aligned; does not move
1874 - VG_(brk_limit) moves between [VG_(brk_base), data segment end]
1875 - data segment end is always page aligned
1876 - right after data segment end is 1-page reservation
1878 | heap | 1 page
1879 +------+------+--------------+-------+
1880 | BSS | anon | anon | resvn |
1881 +------+------+--------------+-------+
1883 ^ ^ ^ ^
1884 | | | |
1885 | | | data segment end
1886 | | VG_(brk_limit) -- no alignment constraint
1887 | brk_base_pgup -- page aligned
1888 VG_(brk_base) -- not page aligned -- does not move
1890 Because VG_(brk_base) is not page-aligned and is initially located within
1891 pre-established BSS (data) segment, special care has to be taken in the code
1892 below to handle this feature.
1894 Reservation segment is used to protect the data segment merging with
1895 a pre-existing segment. This should be no problem because address space
1896 manager ensures that requests for client address space are satisfied from
1897 the highest available addresses. However when memory is low, data segment
1898 can meet with mmap'ed objects and the reservation segment separates these.
1899 The page that contains VG_(brk_base) is already allocated by the program's
1900 loaded data segment. The brk syscall wrapper handles this special case. */
1902 static Bool brk_segment_established = False;
1904 /* Establishes initial data segment for brk (heap). */
1905 Bool VG_(setup_client_dataseg)(void)
1907 /* Segment size is initially at least 1 MB and at most 8 MB. */
1908 SizeT m1 = 1024 * 1024;
1909 SizeT m8 = 8 * m1;
1910 SizeT initial_size = VG_(client_rlimit_data).rlim_cur;
1911 VG_(debugLog)(1, "syswrap-solaris", "Setup client data (brk) segment "
1912 "at %#lx\n", VG_(brk_base));
1913 if (initial_size < m1)
1914 initial_size = m1;
1915 if (initial_size > m8)
1916 initial_size = m8;
1917 initial_size = VG_PGROUNDUP(initial_size);
1919 Addr anon_start = VG_PGROUNDUP(VG_(brk_base));
1920 SizeT anon_size = VG_PGROUNDUP(initial_size);
1921 Addr resvn_start = anon_start + anon_size;
1922 SizeT resvn_size = VKI_PAGE_SIZE;
1924 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
1925 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
1926 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
1927 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
1928 vg_assert(VG_(brk_base) == VG_(brk_limit));
1930 /* Find the loaded data segment and remember its protection. */
1931 const NSegment *seg = VG_(am_find_nsegment)(VG_(brk_base) - 1);
1932 vg_assert(seg != NULL);
1933 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
1934 | (seg->hasW ? VKI_PROT_WRITE : 0)
1935 | (seg->hasX ? VKI_PROT_EXEC : 0);
1937 /* Try to create the data segment and associated reservation where
1938 VG_(brk_base) says. */
1939 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
1940 anon_size);
1941 if (!ok) {
1942 /* That didn't work, we're hosed. */
1943 return False;
1946 /* Map the data segment. */
1947 SysRes sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
1948 vg_assert(!sr_isError(sres));
1949 vg_assert(sr_Res(sres) == anon_start);
1951 brk_segment_established = True;
1952 return True;
1955 /* Tell the tool about the client data segment and then kill it which will
1956 make it initially inaccessible/unaddressable. */
1957 void VG_(track_client_dataseg)(ThreadId tid)
1959 const NSegment *seg = VG_(am_find_nsegment)(VG_PGROUNDUP(VG_(brk_base)));
1960 vg_assert(seg != NULL);
1961 vg_assert(seg->kind == SkAnonC);
1963 VG_TRACK(new_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base), tid);
1964 VG_TRACK(die_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base));
1967 static void PRINTF_CHECK(1, 2)
1968 possibly_complain_brk(const HChar *format, ...)
1970 static Bool alreadyComplained = False;
1971 if (!alreadyComplained) {
1972 alreadyComplained = True;
1973 if (VG_(clo_verbosity) > 0) {
1974 va_list vargs;
1975 va_start(vargs, format);
1976 VG_(vmessage)(Vg_UserMsg, format, vargs);
1977 va_end(vargs);
1978 VG_(umsg)("(See section Limitations in the user manual.)\n");
1979 VG_(umsg)("NOTE: further instances of this message will not be "
1980 "shown.\n");
1985 PRE(sys_brk)
1987 /* unsigned long brk(caddr_t end_data_segment); */
1988 /* The Solaris kernel returns 0 on success.
1989 In addition to this, brk(0) returns current data segment end. This is
1990 very different from the Linux kernel, for example. */
1992 Addr old_brk_limit = VG_(brk_limit);
1993 /* If VG_(brk_base) is page-aligned then old_brk_base_pgup is equal to
1994 VG_(brk_base). */
1995 Addr old_brk_base_pgup = VG_PGROUNDUP(VG_(brk_base));
1996 Addr new_brk = ARG1;
1997 const NSegment *seg, *seg2;
1999 PRINT("sys_brk ( %#lx )", ARG1);
2000 PRE_REG_READ1(unsigned long, "brk", vki_caddr_t, end_data_segment);
2002 if (new_brk == 0) {
2003 /* brk(0) - specific to Solaris 11 only. */
2004 SET_STATUS_Success(old_brk_limit);
2005 return;
2008 /* Handle some trivial cases. */
2009 if (new_brk == old_brk_limit) {
2010 SET_STATUS_Success(0);
2011 return;
2013 if (new_brk < VG_(brk_base)) {
2014 /* Clearly impossible. */
2015 SET_STATUS_Failure(VKI_ENOMEM);
2016 return;
2018 if (new_brk - VG_(brk_base) > VG_(client_rlimit_data).rlim_cur) {
2019 SET_STATUS_Failure(VKI_ENOMEM);
2020 return;
2023 /* The brk base and limit must have been already set. */
2024 vg_assert(VG_(brk_base) != -1);
2025 vg_assert(VG_(brk_limit) != -1);
2027 if (!brk_segment_established) {
2028 /* Stay sane (because there should have been no brk activity yet). */
2029 vg_assert(VG_(brk_base) == VG_(brk_limit));
2031 if (!VG_(setup_client_dataseg)()) {
2032 possibly_complain_brk("Cannot map memory to initialize brk segment in "
2033 "thread #%d at %#lx\n", tid, VG_(brk_base));
2034 SET_STATUS_Failure(VKI_ENOMEM);
2035 return;
2038 VG_(track_client_dataseg)(tid);
2041 if (new_brk < old_brk_limit) {
2042 /* Shrinking the data segment. Be lazy and don't munmap the excess
2043 area. */
2044 if (old_brk_limit > old_brk_base_pgup) {
2045 /* Calculate new local brk (=MAX(new_brk, old_brk_base_pgup)). */
2046 Addr new_brk_local;
2047 if (new_brk < old_brk_base_pgup)
2048 new_brk_local = old_brk_base_pgup;
2049 else
2050 new_brk_local = new_brk;
2052 /* Find a segment at the beginning and at the end of the shrinked
2053 range. */
2054 seg = VG_(am_find_nsegment)(new_brk_local);
2055 seg2 = VG_(am_find_nsegment)(old_brk_limit - 1);
2056 vg_assert(seg);
2057 vg_assert(seg->kind == SkAnonC);
2058 vg_assert(seg2);
2059 vg_assert(seg == seg2);
2061 /* Discard any translations and zero-out the area. */
2062 if (seg->hasT)
2063 VG_(discard_translations)(new_brk_local,
2064 old_brk_limit - new_brk_local,
2065 "do_brk(shrink)");
2066 /* Since we're being lazy and not unmapping pages, we have to zero out
2067 the area, so that if the area later comes back into circulation, it
2068 will be filled with zeroes, as if it really had been unmapped and
2069 later remapped. Be a bit paranoid and try hard to ensure we're not
2070 going to segfault by doing the write - check that segment is
2071 writable. */
2072 if (seg->hasW)
2073 VG_(memset)((void*)new_brk_local, 0, old_brk_limit - new_brk_local);
2076 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2077 if (new_brk < old_brk_base_pgup) {
2078 /* Calculate old local brk (=MIN(old_brk_limit, old_brk_base_up)). */
2079 Addr old_brk_local;
2080 if (old_brk_limit < old_brk_base_pgup)
2081 old_brk_local = old_brk_limit;
2082 else
2083 old_brk_local = old_brk_base_pgup;
2085 /* Find a segment at the beginning and at the end of the shrinked
2086 range. */
2087 seg = VG_(am_find_nsegment)(new_brk);
2088 seg2 = VG_(am_find_nsegment)(old_brk_local - 1);
2089 vg_assert(seg);
2090 vg_assert(seg2);
2091 vg_assert(seg == seg2);
2093 /* Discard any translations and zero-out the area. */
2094 if (seg->hasT)
2095 VG_(discard_translations)(new_brk, old_brk_local - new_brk,
2096 "do_brk(shrink)");
2097 if (seg->hasW)
2098 VG_(memset)((void*)new_brk, 0, old_brk_local - new_brk);
2101 /* We are done, update VG_(brk_limit), tell the tool about the changes,
2102 and leave. */
2103 VG_(brk_limit) = new_brk;
2104 VG_TRACK(die_mem_brk, new_brk, old_brk_limit - new_brk);
2105 SET_STATUS_Success(0);
2106 return;
2109 /* We are expanding the brk segment. */
2111 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2112 if (old_brk_limit < old_brk_base_pgup) {
2113 /* Calculate new local brk (=MIN(new_brk, old_brk_base_pgup)). */
2114 Addr new_brk_local;
2115 if (new_brk < old_brk_base_pgup)
2116 new_brk_local = new_brk;
2117 else
2118 new_brk_local = old_brk_base_pgup;
2120 /* Find a segment at the beginning and at the end of the expanded
2121 range. */
2122 seg = VG_(am_find_nsegment)(old_brk_limit);
2123 seg2 = VG_(am_find_nsegment)(new_brk_local - 1);
2124 vg_assert(seg);
2125 vg_assert(seg2);
2126 vg_assert(seg == seg2);
2128 /* Nothing else to do. */
2131 if (new_brk > old_brk_base_pgup) {
2132 /* Calculate old local brk (=MAX(old_brk_limit, old_brk_base_pgup)). */
2133 Addr old_brk_local;
2134 if (old_brk_limit < old_brk_base_pgup)
2135 old_brk_local = old_brk_base_pgup;
2136 else
2137 old_brk_local = old_brk_limit;
2139 /* Find a segment at the beginning of the expanded range. */
2140 if (old_brk_local > old_brk_base_pgup)
2141 seg = VG_(am_find_nsegment)(old_brk_local - 1);
2142 else
2143 seg = VG_(am_find_nsegment)(old_brk_local);
2144 vg_assert(seg);
2145 vg_assert(seg->kind == SkAnonC);
2147 /* Find the 1-page reservation segment. */
2148 seg2 = VG_(am_next_nsegment)(seg, True/*forwards*/);
2149 vg_assert(seg2);
2150 vg_assert(seg2->kind == SkResvn);
2151 vg_assert(seg->end + 1 == seg2->start);
2152 vg_assert(seg2->end - seg2->start + 1 == VKI_PAGE_SIZE);
2154 if (new_brk <= seg2->start) {
2155 /* Still fits within the existing anon segment, nothing to do. */
2156 } else {
2157 /* Data segment limit was already checked. */
2158 Addr anon_start = seg->end + 1;
2159 Addr resvn_start = VG_PGROUNDUP(new_brk);
2160 SizeT anon_size = resvn_start - anon_start;
2161 SizeT resvn_size = VKI_PAGE_SIZE;
2162 SysRes sres;
2164 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
2165 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
2166 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
2167 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
2168 vg_assert(anon_size > 0);
2170 /* Address space manager checks for free address space for us;
2171 reservation would not be otherwise created. */
2172 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
2173 anon_size);
2174 if (!ok) {
2175 possibly_complain_brk("brk segment overflow in thread #%d: can not "
2176 "grow to %#lx\n", tid, new_brk);
2177 SET_STATUS_Failure(VKI_ENOMEM);
2178 return;
2181 /* Establish protection from the existing segment. */
2182 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
2183 | (seg->hasW ? VKI_PROT_WRITE : 0)
2184 | (seg->hasX ? VKI_PROT_EXEC : 0);
2186 /* Address space manager will merge old and new data segments. */
2187 sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
2188 if (sr_isError(sres)) {
2189 possibly_complain_brk("Cannot map memory to grow brk segment in "
2190 "thread #%d to %#lx\n", tid, new_brk);
2191 SET_STATUS_Failure(VKI_ENOMEM);
2192 return;
2194 vg_assert(sr_Res(sres) == anon_start);
2196 seg = VG_(am_find_nsegment)(old_brk_base_pgup);
2197 seg2 = VG_(am_find_nsegment)(VG_PGROUNDUP(new_brk) - 1);
2198 vg_assert(seg);
2199 vg_assert(seg2);
2200 vg_assert(seg == seg2);
2201 vg_assert(new_brk <= seg->end + 1);
2205 /* We are done, update VG_(brk_limit), tell the tool about the changes, and
2206 leave. */
2207 VG_(brk_limit) = new_brk;
2208 VG_TRACK(new_mem_brk, old_brk_limit, new_brk - old_brk_limit, tid);
2209 SET_STATUS_Success(0);
2212 PRE(sys_stat)
2214 /* int stat(const char *path, struct stat *buf); */
2215 /* Note: We could use here the sys_newstat generic wrapper, but the 'new'
2216 in its name is rather confusing in the Solaris context, thus we provide
2217 our own wrapper. */
2218 PRINT("sys_stat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
2219 PRE_REG_READ2(long, "stat", const char *, path, struct stat *, buf);
2221 PRE_MEM_RASCIIZ("stat(path)", ARG1);
2222 PRE_MEM_WRITE("stat(buf)", ARG2, sizeof(struct vki_stat));
2225 POST(sys_stat)
2227 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2230 PRE(sys_lseek)
2232 /* off_t lseek(int fildes, off_t offset, int whence); */
2233 PRINT("sys_lseek ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2234 PRE_REG_READ3(long, "lseek", int, fildes, vki_off_t, offset, int, whence);
2236 /* Stay sane. */
2237 if (!ML_(fd_allowed)(ARG1, "lseek", tid, False))
2238 SET_STATUS_Failure(VKI_EBADF);
2241 PRE(sys_mount)
2243 /* int mount(const char *spec, const char *dir, int mflag, char *fstype,
2244 char *dataptr, int datalen, char *optptr, int optlen); */
2245 *flags |= SfMayBlock;
2246 if (ARG3 & VKI_MS_OPTIONSTR) {
2247 /* 8-argument mount */
2248 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld, "
2249 "%#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3,
2250 ARG4, (HChar *) ARG4, ARG5, ARG6, ARG7, (HChar *) ARG7, SARG8);
2251 PRE_REG_READ8(long, "mount", const char *, spec, const char *, dir,
2252 int, mflag, char *, fstype, char *, dataptr, int, datalen,
2253 char *, optptr, int, optlen);
2255 else if (ARG3 & VKI_MS_DATA) {
2256 /* 6-argument mount */
2257 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld )",
2258 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4,
2259 (HChar *) ARG4, ARG5, SARG6);
2260 PRE_REG_READ6(long, "mount", const char *, spec, const char *, dir,
2261 int, mflag, char *, fstype, char *, dataptr,
2262 int, datalen);
2264 else {
2265 /* 4-argument mount */
2266 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s) )", ARG1,
2267 (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4, (HChar *) ARG4);
2268 PRE_REG_READ4(long, "mount", const char *, spec, const char *, dir,
2269 int, mflag, char *, fstype);
2271 if (ARG1)
2272 PRE_MEM_RASCIIZ("mount(spec)", ARG1);
2273 PRE_MEM_RASCIIZ("mount(dir)", ARG2);
2274 if (ARG4 && ARG4 >= 256) {
2275 /* If ARG4 < 256, then it's an index to a fs table in the kernel. */
2276 PRE_MEM_RASCIIZ("mount(fstype)", ARG4);
2278 if (ARG3 & (VKI_MS_DATA | VKI_MS_OPTIONSTR)) {
2279 if (ARG5)
2280 PRE_MEM_READ("mount(dataptr)", ARG5, ARG6);
2281 if ((ARG3 & VKI_MS_OPTIONSTR) && ARG7) {
2282 /* in/out buffer */
2283 PRE_MEM_RASCIIZ("mount(optptr)", ARG7);
2284 PRE_MEM_WRITE("mount(optptr)", ARG7, ARG8);
2289 POST(sys_mount)
2291 if (ARG3 & VKI_MS_OPTIONSTR) {
2292 POST_MEM_WRITE(ARG7, VG_(strlen)((HChar*)ARG7) + 1);
2293 } else if (ARG3 & VKI_MS_DATA) {
2294 if ((ARG2) &&
2295 (ARG3 & MS_NOMNTTAB) &&
2296 (VG_STREQ((HChar *) ARG4, "namefs")) &&
2297 (ARG6 == sizeof(struct vki_namefd)) &&
2298 ML_(safe_to_deref)((void *) ARG5, ARG6)) {
2299 /* Most likely an fattach() call for a door file descriptor. */
2300 door_record_server_fattach(((struct vki_namefd *) ARG5)->fd,
2301 (HChar *) ARG2);
2306 PRE(sys_readlinkat)
2308 /* ssize_t readlinkat(int dfd, const char *path, char *buf,
2309 size_t bufsiz); */
2310 HChar name[30]; // large enough
2311 Word saved = SYSNO;
2313 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2314 This is different from Linux, for example, where glibc sign-extends it. */
2315 Int dfd = (Int) ARG1;
2317 PRINT("sys_readlinkat ( %d, %#lx(%s), %#lx, %ld )", dfd, ARG2,
2318 (HChar *) ARG2, ARG3, SARG4);
2319 PRE_REG_READ4(long, "readlinkat", int, dfd, const char *, path,
2320 char *, buf, int, bufsiz);
2321 PRE_MEM_RASCIIZ("readlinkat(path)", ARG2);
2322 PRE_MEM_WRITE("readlinkat(buf)", ARG3, ARG4);
2324 /* Be strict but ignore dfd for absolute path. */
2325 if (dfd != VKI_AT_FDCWD
2326 && ML_(safe_to_deref)((void *) ARG2, 1)
2327 && ((HChar *) ARG2)[0] != '/'
2328 && !ML_(fd_allowed)(dfd, "readlinkat", tid, False)) {
2329 SET_STATUS_Failure(VKI_EBADF);
2330 return;
2333 /* Handle the case where readlinkat is looking at /proc/self/path/a.out or
2334 /proc/<pid>/path/a.out. */
2335 VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)());
2336 if (ML_(safe_to_deref)((void*)ARG2, 1) &&
2337 (!VG_(strcmp)((HChar*)ARG2, name) ||
2338 !VG_(strcmp)((HChar*)ARG2, "/proc/self/path/a.out"))) {
2339 VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd));
2340 SET_STATUS_from_SysRes(VG_(do_syscall4)(saved, dfd, (UWord)name, ARG3,
2341 ARG4));
2345 POST(sys_readlinkat)
2347 POST_MEM_WRITE(ARG3, RES);
2350 PRE(sys_stime)
2352 /* Kernel: int stime(time_t time); */
2353 PRINT("sys_stime ( %ld )", ARG1);
2354 PRE_REG_READ1(long, "stime", vki_time_t, time);
2357 PRE(sys_fstat)
2359 /* int fstat(int fildes, struct stat *buf); */
2360 /* Note: We could use here the sys_newfstat generic wrapper, but the 'new'
2361 in its name is rather confusing in the Solaris context, thus we provide
2362 our own wrapper. */
2363 PRINT("sys_fstat ( %ld, %#lx )", SARG1, ARG2);
2364 PRE_REG_READ2(long, "fstat", int, fildes, struct stat *, buf);
2365 PRE_MEM_WRITE("fstat(buf)", ARG2, sizeof(struct vki_stat));
2367 /* Be strict. */
2368 if (!ML_(fd_allowed)(ARG1, "fstat", tid, False))
2369 SET_STATUS_Failure(VKI_EBADF);
2372 POST(sys_fstat)
2374 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2377 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
2378 PRE(sys_frealpathat)
2380 /* int frealpathat(int fd, char *path, char *buf, size_t buflen); */
2382 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2383 This is different from Linux, for example, where glibc sign-extends it. */
2384 Int fd = (Int) ARG1;
2386 PRINT("sys_frealpathat ( %d, %#lx(%s), %#lx, %lu )",
2387 fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
2388 PRE_REG_READ4(long, "frealpathat", int, fd, char *, path,
2389 char *, buf, vki_size_t, buflen);
2390 PRE_MEM_RASCIIZ("frealpathat(path)", ARG2);
2391 PRE_MEM_WRITE("frealpathat(buf)", ARG3, ARG4);
2393 /* Be strict but ignore fd for absolute path. */
2394 if (fd != VKI_AT_FDCWD
2395 && ML_(safe_to_deref)((void *) ARG2, 1)
2396 && ((HChar *) ARG2)[0] != '/'
2397 && !ML_(fd_allowed)(fd, "frealpathat", tid, False))
2398 SET_STATUS_Failure(VKI_EBADF);
2401 POST(sys_frealpathat)
2403 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
2405 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
2407 PRE(sys_stty)
2409 /* int stty(int fd, const struct sgttyb *tty); */
2410 PRINT("sys_stty ( %ld, %#lx )", SARG1, ARG2);
2411 PRE_REG_READ2(long, "stty", int, fd,
2412 const struct vki_sgttyb *, tty);
2413 PRE_MEM_READ("stty(tty)", ARG2, sizeof(struct vki_sgttyb));
2415 /* Be strict. */
2416 if (!ML_(fd_allowed)(ARG1, "stty", tid, False))
2417 SET_STATUS_Failure(VKI_EBADF);
2420 PRE(sys_gtty)
2422 /* int gtty(int fd, struct sgttyb *tty); */
2423 PRINT("sys_gtty ( %ld, %#lx )", SARG1, ARG2);
2424 PRE_REG_READ2(long, "gtty", int, fd, struct vki_sgttyb *, tty);
2425 PRE_MEM_WRITE("gtty(tty)", ARG2, sizeof(struct vki_sgttyb));
2427 /* Be strict. */
2428 if (!ML_(fd_allowed)(ARG1, "gtty", tid, False))
2429 SET_STATUS_Failure(VKI_EBADF);
2432 POST(sys_gtty)
2434 POST_MEM_WRITE(ARG2, sizeof(struct vki_sgttyb));
2437 PRE(sys_pgrpsys)
2439 /* Kernel: int setpgrp(int flag, int pid, int pgid); */
2440 switch (ARG1 /*flag*/) {
2441 case 0:
2442 /* Libc: pid_t getpgrp(void); */
2443 PRINT("sys_pgrpsys ( %ld )", SARG1);
2444 PRE_REG_READ1(long, SC2("pgrpsys", "getpgrp"), int, flag);
2445 break;
2446 case 1:
2447 /* Libc: pid_t setpgrp(void); */
2448 PRINT("sys_pgrpsys ( %ld )", SARG1);
2449 PRE_REG_READ1(long, SC2("pgrpsys", "setpgrp"), int, flag);
2450 break;
2451 case 2:
2452 /* Libc: pid_t getsid(pid_t pid); */
2453 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2454 PRE_REG_READ2(long, SC2("pgrpsys", "getsid"), int, flag,
2455 vki_pid_t, pid);
2456 break;
2457 case 3:
2458 /* Libc: pid_t setsid(void); */
2459 PRINT("sys_pgrpsys ( %ld )", SARG1);
2460 PRE_REG_READ1(long, SC2("pgrpsys", "setsid"), int, flag);
2461 break;
2462 case 4:
2463 /* Libc: pid_t getpgid(pid_t pid); */
2464 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2465 PRE_REG_READ2(long, SC2("pgrpsys", "getpgid"), int, flag,
2466 vki_pid_t, pid);
2467 break;
2468 case 5:
2469 /* Libc: int setpgid(pid_t pid, pid_t pgid); */
2470 PRINT("sys_pgrpsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2471 PRE_REG_READ3(long, SC2("pgrpsys", "setpgid"), int, flag,
2472 vki_pid_t, pid, vki_pid_t, pgid);
2473 break;
2474 default:
2475 VG_(unimplemented)("Syswrap of the pgrpsys call with flag %ld.", SARG1);
2476 /*NOTREACHED*/
2477 break;
2481 PRE(sys_pipe)
2483 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2484 /* int pipe(int fildes[2], int flags); */
2485 PRINT("sys_pipe ( %#lx, %ld )", ARG1, SARG2);
2486 PRE_REG_READ2(long, "pipe", int *, fildes, int, flags);
2487 PRE_MEM_WRITE("pipe(fildes)", ARG1, 2 * sizeof(int));
2488 #else
2489 /* longlong_t pipe(); */
2490 PRINT("sys_pipe ( )");
2491 PRE_REG_READ0(long, "pipe");
2492 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2495 POST(sys_pipe)
2497 Int p0, p1;
2499 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2500 int *fds = (int*)ARG1;
2501 p0 = fds[0];
2502 p1 = fds[1];
2503 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
2504 #else
2505 p0 = RES;
2506 p1 = RESHI;
2507 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2509 if (!ML_(fd_allowed)(p0, "pipe", tid, True) ||
2510 !ML_(fd_allowed)(p1, "pipe", tid, True)) {
2511 VG_(close)(p0);
2512 VG_(close)(p1);
2513 SET_STATUS_Failure(VKI_EMFILE);
2515 else if (VG_(clo_track_fds)) {
2516 ML_(record_fd_open_nameless)(tid, p0);
2517 ML_(record_fd_open_nameless)(tid, p1);
2521 PRE(sys_faccessat)
2523 /* int faccessat(int fd, const char *path, int amode, int flag); */
2525 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2526 This is different from Linux, for example, where glibc sign-extends it. */
2527 Int fd = (Int) ARG1;
2529 PRINT("sys_faccessat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2530 (HChar *) ARG2, SARG3, SARG4);
2531 PRE_REG_READ4(long, "faccessat", int, fd, const char *, path,
2532 int, amode, int, flag);
2533 PRE_MEM_RASCIIZ("faccessat(path)", ARG2);
2535 /* Be strict but ignore fd for absolute path. */
2536 if (fd != VKI_AT_FDCWD
2537 && ML_(safe_to_deref)((void *) ARG2, 1)
2538 && ((HChar *) ARG2)[0] != '/'
2539 && !ML_(fd_allowed)(fd, "faccessat", tid, False))
2540 SET_STATUS_Failure(VKI_EBADF);
2543 PRE(sys_mknodat)
2545 /* int mknodat(int fd, char *fname, mode_t fmode, dev_t dev); */
2547 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2548 This is different from Linux, for example, where glibc sign-extends it. */
2549 Int fd = (Int) ARG1;
2551 PRINT("sys_mknodat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2552 (HChar *) ARG2, SARG3, SARG4);
2553 PRE_REG_READ4(long, "mknodat", int, fd, const char *, fname,
2554 vki_mode_t, fmode, vki_dev_t, dev);
2555 PRE_MEM_RASCIIZ("mknodat(fname)", ARG2);
2557 /* Be strict but ignore fd for absolute path. */
2558 if (fd != VKI_AT_FDCWD
2559 && ML_(safe_to_deref)((void *) ARG2, 1)
2560 && ((HChar *) ARG2)[0] != '/'
2561 && !ML_(fd_allowed)(fd, "mknodat", tid, False))
2562 SET_STATUS_Failure(VKI_EBADF);
2564 *flags |= SfMayBlock;
2567 POST(sys_mknodat)
2569 if (!ML_(fd_allowed)(RES, "mknodat", tid, True)) {
2570 VG_(close)(RES);
2571 SET_STATUS_Failure(VKI_EMFILE);
2572 } else if (VG_(clo_track_fds))
2573 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG2);
2576 PRE(sys_sysi86)
2578 /* int sysi86(int cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); */
2579 PRINT("sys_sysi86 ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
2580 PRE_REG_READ4(long, "sysi86", int, cmd, uintptr_t, arg1, uintptr_t, arg2,
2581 uintptr_t, arg3);
2583 switch (ARG1 /*cmd*/) {
2584 case VKI_SI86FPSTART:
2585 PRE_MEM_WRITE("sysi86(fp_hw)", ARG2, sizeof(vki_uint_t));
2586 /* ARG3 is a desired x87 FCW value, ARG4 is a desired SSE MXCSR value.
2587 They are passed to the kernel but V will change them later anyway
2588 (this is a general Valgrind limitation described in the official
2589 documentation). */
2590 break;
2591 default:
2592 VG_(unimplemented)("Syswrap of the sysi86 call with cmd %ld.", SARG1);
2593 /*NOTREACHED*/
2594 break;
2598 POST(sys_sysi86)
2600 switch (ARG1 /*cmd*/) {
2601 case VKI_SI86FPSTART:
2602 POST_MEM_WRITE(ARG2, sizeof(vki_uint_t));
2603 break;
2604 default:
2605 vg_assert(0);
2606 break;
2610 PRE(sys_shmsys)
2612 /* Kernel: uintptr_t shmsys(int opcode, uintptr_t a0, uintptr_t a1,
2613 uintptr_t a2, uintptr_t a3);
2615 *flags |= SfMayBlock;
2617 switch (ARG1 /*opcode*/) {
2618 case VKI_SHMAT:
2619 /* Libc: void *shmat(int shmid, const void *shmaddr, int shmflg); */
2620 PRINT("sys_shmsys ( %ld, %ld, %#lx, %ld )",
2621 SARG1, SARG2, ARG3, SARG4);
2622 PRE_REG_READ4(long, SC2("shmsys", "shmat"), int, opcode,
2623 int, shmid, const void *, shmaddr, int, shmflg);
2625 UWord addr = ML_(generic_PRE_sys_shmat)(tid, ARG2, ARG3, ARG4);
2626 if (addr == 0)
2627 SET_STATUS_Failure(VKI_EINVAL);
2628 else
2629 ARG3 = addr;
2630 break;
2632 case VKI_SHMCTL:
2633 /* Libc: int shmctl(int shmid, int cmd, struct shmid_ds *buf); */
2634 switch (ARG3 /* cmd */) {
2635 case VKI_SHM_LOCK:
2636 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2637 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "lock"),
2638 int, opcode, int, shmid, int, cmd);
2639 break;
2640 case VKI_SHM_UNLOCK:
2641 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2642 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "unlock"),
2643 int, opcode, int, shmid, int, cmd);
2644 break;
2645 case VKI_IPC_RMID:
2646 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2647 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "rmid"),
2648 int, opcode, int, shmid, int, cmd);
2649 break;
2650 case VKI_IPC_SET:
2651 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2652 SARG1, SARG2, SARG3, ARG4);
2653 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set"),
2654 int, opcode, int, shmid, int, cmd,
2655 struct vki_shmid_ds *, buf);
2657 struct vki_shmid_ds *buf = (struct vki_shmid_ds *) ARG4;
2658 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.uid)",
2659 buf->shm_perm.uid);
2660 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.gid)",
2661 buf->shm_perm.gid);
2662 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.mode)",
2663 buf->shm_perm.mode);
2664 break;
2665 case VKI_IPC_STAT:
2666 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2667 SARG1, SARG2, SARG3, ARG4);
2668 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat"),
2669 int, opcode, int, shmid, int, cmd,
2670 struct vki_shmid_ds *, buf);
2671 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat, buf)", ARG4,
2672 sizeof(struct vki_shmid_ds));
2673 break;
2674 case VKI_IPC_SET64:
2675 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2676 SARG1, SARG2, SARG3, ARG4);
2677 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set64"),
2678 int, opcode, int, shmid, int, cmd,
2679 struct vki_shmid_ds64 *, buf);
2681 struct vki_shmid_ds64 *buf64 = (struct vki_shmid_ds64 *) ARG4;
2682 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2683 "buf->shmx_perm.ipcx_uid)",
2684 buf64->shmx_perm.ipcx_uid);
2685 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2686 "buf->shmx_perm.ipcx_gid)",
2687 buf64->shmx_perm.ipcx_gid);
2688 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2689 "buf->shmx_perm.ipcx_mode)",
2690 buf64->shmx_perm.ipcx_mode);
2691 break;
2692 case VKI_IPC_STAT64:
2693 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2694 SARG1, SARG2, SARG3, ARG4);
2695 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat64"),
2696 int, opcode, int, shmid, int, cmd,
2697 struct vki_shmid_ds64 *, buf);
2698 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat64, buf)", ARG4,
2699 sizeof(struct vki_shmid_ds64));
2700 break;
2701 #if defined(SOLARIS_SHM_NEW)
2702 case VKI_IPC_XSTAT64:
2703 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2704 SARG1, SARG2, SARG3, ARG4);
2705 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "xstat64"),
2706 int, opcode, int, shmid, int, cmd,
2707 struct vki_shmid_ds64 *, buf);
2708 PRE_MEM_WRITE("shmsys(shmctl, ipc_xstat64, buf)", ARG4,
2709 sizeof(struct vki_shmid_xds64));
2710 break;
2711 #endif /* SOLARIS_SHM_NEW */
2712 default:
2713 VG_(unimplemented)("Syswrap of the shmsys(shmctl) call with "
2714 "cmd %ld.", SARG3);
2715 /*NOTREACHED*/
2716 break;
2718 break;
2720 case VKI_SHMDT:
2721 /* Libc: int shmdt(const void *shmaddr); */
2722 PRINT("sys_shmsys ( %ld, %#lx )", SARG1, ARG2);
2723 PRE_REG_READ2(long, SC2("shmsys", "shmdt"), int, opcode,
2724 const void *, shmaddr);
2726 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG2))
2727 SET_STATUS_Failure(VKI_EINVAL);
2728 break;
2730 case VKI_SHMGET:
2731 /* Libc: int shmget(key_t key, size_t size, int shmflg); */
2732 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2733 SARG1, SARG2, ARG3, ARG4);
2734 PRE_REG_READ4(long, SC2("shmsys", "shmget"), int, opcode,
2735 vki_key_t, key, vki_size_t, size, int, shmflg);
2736 break;
2738 case VKI_SHMIDS:
2739 /* Libc: int shmids(int *buf, uint_t nids, uint_t *pnids); */
2740 PRINT("sys_shmsys ( %ld, %#lx, %lu, %#lx )",
2741 SARG1, ARG2, ARG3, ARG4);
2742 PRE_REG_READ4(long, SC2("shmsys", "shmids"), int, opcode,
2743 int *, buf, vki_uint_t, nids, vki_uint_t *, pnids);
2745 PRE_MEM_WRITE("shmsys(shmids, buf)", ARG2, ARG3 * sizeof(int *));
2746 PRE_MEM_WRITE("shmsys(shmids, pnids)", ARG4, sizeof(vki_uint_t));
2747 break;
2749 #if defined(SOLARIS_SHM_NEW)
2750 case VKI_SHMADV:
2751 /* Libc: int shmadv(int shmid, uint_t cmd, uint_t *advice); */
2752 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2753 SARG1, SARG2, ARG3, ARG4);
2754 PRE_REG_READ4(long, SC2("shmsys", "shmadv"), int, opcode,
2755 int, shmid, vki_uint_t, cmd, vki_uint_t *, advice);
2757 switch (ARG3 /*cmd*/) {
2758 case VKI_SHM_ADV_GET:
2759 PRE_MEM_WRITE("shmsys(shmadv, advice)", ARG4,
2760 sizeof(vki_uint_t));
2761 break;
2762 case VKI_SHM_ADV_SET:
2763 PRE_MEM_READ("shmsys(shmadv, advice)", ARG4,
2764 sizeof(vki_uint_t));
2765 break;
2766 default:
2767 VG_(unimplemented)("Syswrap of the shmsys(shmadv) call with "
2768 "cmd %lu.", ARG3);
2769 /*NOTREACHED*/
2770 break;
2772 break;
2774 case VKI_SHMGET_OSM:
2775 /* Libc: int shmget_osm(key_t key, size_t size, int shmflg,
2776 size_t granule_sz);
2778 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld, %lu )",
2779 SARG1, SARG2, ARG3, SARG4, ARG5);
2780 PRE_REG_READ5(long, SC2("shmsys", "shmget_osm"), int, opcode,
2781 vki_key_t, key, vki_size_t, size, int, shmflg,
2782 vki_size_t, granule_sz);
2783 break;
2784 #endif /* SOLARIS_SHM_NEW */
2786 default:
2787 VG_(unimplemented)("Syswrap of the shmsys call with opcode %ld.",
2788 SARG1);
2789 /*NOTREACHED*/
2790 break;
2794 POST(sys_shmsys)
2796 switch (ARG1 /*opcode*/) {
2797 case VKI_SHMAT:
2798 ML_(generic_POST_sys_shmat)(tid, RES, ARG2, ARG3, ARG4);
2799 break;
2801 case VKI_SHMCTL:
2802 switch (ARG3 /*cmd*/) {
2803 case VKI_SHM_LOCK:
2804 case VKI_SHM_UNLOCK:
2805 case VKI_IPC_RMID:
2806 case VKI_IPC_SET:
2807 break;
2808 case VKI_IPC_STAT:
2809 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds));
2810 break;
2811 case VKI_IPC_SET64:
2812 break;
2813 case VKI_IPC_STAT64:
2814 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds64));
2815 break;
2816 #if defined(SOLARIS_SHM_NEW)
2817 case VKI_IPC_XSTAT64:
2818 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_xds64));
2819 break;
2820 #endif /* SOLARIS_SHM_NEW */
2821 default:
2822 vg_assert(0);
2823 break;
2825 break;
2827 case VKI_SHMDT:
2828 ML_(generic_POST_sys_shmdt)(tid, RES, ARG2);
2829 break;
2831 case VKI_SHMGET:
2832 break;
2834 case VKI_SHMIDS:
2836 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2838 uint_t *pnids = (vki_uint_t *) ARG4;
2839 if (*pnids <= ARG3)
2840 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
2842 break;
2844 #if defined(SOLARIS_SHM_NEW)
2845 case VKI_SHMADV:
2846 switch (ARG3 /*cmd*/) {
2847 case VKI_SHM_ADV_GET:
2848 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2849 break;
2850 case VKI_SHM_ADV_SET:
2851 break;
2852 default:
2853 vg_assert(0);
2854 break;
2856 break;
2858 case VKI_SHMGET_OSM:
2859 break;
2860 #endif /* SOLARIS_SHM_NEW */
2862 default:
2863 vg_assert(0);
2864 break;
2868 PRE(sys_semsys)
2870 /* Kernel: int semsys(int opcode, uintptr_t a1, uintptr_t a2, uintptr_t a3,
2871 uintptr_t a4);
2873 *flags |= SfMayBlock;
2875 switch (ARG1 /*opcode*/) {
2876 case VKI_SEMCTL:
2877 /* Libc: int semctl(int semid, int semnum, int cmd...); */
2878 switch (ARG4) {
2879 case VKI_IPC_STAT:
2880 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2881 SARG1, SARG2, SARG3, SARG4, ARG5);
2882 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat"), int, opcode,
2883 int, semid, int, semnum, int, cmd,
2884 struct vki_semid_ds *, arg);
2885 break;
2886 case VKI_IPC_SET:
2887 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2888 SARG1, SARG2, SARG3, SARG4, ARG5);
2889 PRE_REG_READ5(long, SC3("semsys", "semctl", "set"), int, opcode,
2890 int, semid, int, semnum, int, cmd,
2891 struct vki_semid_ds *, arg);
2892 break;
2893 case VKI_IPC_STAT64:
2894 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2895 SARG1, SARG2, SARG3, SARG4, ARG5);
2896 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat64"), int, opcode,
2897 int, semid, int, semnum, int, cmd,
2898 struct vki_semid64_ds *, arg);
2899 break;
2900 case VKI_IPC_SET64:
2901 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2902 SARG1, SARG2, SARG3, SARG4, ARG5);
2903 PRE_REG_READ5(long, SC3("semsys", "semctl", "set64"), int, opcode,
2904 int, semid, int, semnum, int, cmd,
2905 struct vki_semid64_ds *, arg);
2906 break;
2907 case VKI_IPC_RMID:
2908 PRINT("sys_semsys ( %ld, %ld, %ld )", SARG1, SARG3, SARG4);
2909 PRE_REG_READ3(long, SC3("semsys", "semctl", "rmid"), int, opcode,
2910 int, semid, int, cmd);
2911 break;
2912 case VKI_GETALL:
2913 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2914 SARG1, SARG2, SARG4, ARG5);
2915 PRE_REG_READ4(long, SC3("semsys", "semctl", "getall"), int, opcode,
2916 int, semid, int, cmd, ushort_t *, arg);
2917 break;
2918 case VKI_SETALL:
2919 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2920 SARG1, SARG2, SARG4, ARG5);
2921 PRE_REG_READ4(long, SC3("semsys", "semctl", "setall"), int, opcode,
2922 int, semid, int, cmd, ushort_t *, arg);
2923 break;
2924 case VKI_GETVAL:
2925 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2926 SARG1, SARG2, SARG3, SARG4);
2927 PRE_REG_READ4(long, SC3("semsys", "semctl", "getval"), int, opcode,
2928 int, semid, int, semnum, int, cmd);
2929 break;
2930 case VKI_SETVAL:
2931 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2932 SARG1, SARG2, SARG3, SARG4, ARG5);
2933 PRE_REG_READ5(long, SC3("semsys", "semctl", "setval"), int, opcode,
2934 int, semid, int, semnum, int, cmd,
2935 union vki_semun *, arg);
2936 break;
2937 case VKI_GETPID:
2938 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2939 SARG1, SARG2, SARG3, SARG4);
2940 PRE_REG_READ4(long, SC3("semsys", "semctl", "getpid"), int, opcode,
2941 int, semid, int, semnum, int, cmd);
2942 break;
2943 case VKI_GETNCNT:
2944 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2945 SARG1, SARG2, SARG3, SARG4);
2946 PRE_REG_READ4(long, SC3("semsys", "semctl", "getncnt"),
2947 int, opcode, int, semid, int, semnum, int, cmd);
2948 break;
2949 case VKI_GETZCNT:
2950 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2951 SARG1, SARG2, SARG3, SARG4);
2952 PRE_REG_READ4(long, SC3("semsys", "semctl", "getzcnt"),
2953 int, opcode, int, semid, int, semnum, int, cmd);
2954 break;
2955 default:
2956 VG_(unimplemented)("Syswrap of the semsys(semctl) call "
2957 "with cmd %ld.", SARG4);
2958 /*NOTREACHED*/
2959 break;
2961 ML_(generic_PRE_sys_semctl)(tid, ARG2, ARG3, ARG4, ARG5);
2962 break;
2963 case VKI_SEMGET:
2964 /* Libc: int semget(key_t key, int nsems, int semflg); */
2965 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )", SARG1, SARG2, SARG3, SARG4);
2966 PRE_REG_READ4(long, SC2("semsys", "semget"), int, opcode,
2967 vki_key_t, key, int, nsems, int, semflg);
2968 break;
2969 case VKI_SEMOP:
2970 /* Libc: int semop(int semid, struct sembuf *sops, size_t nsops); */
2971 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
2972 PRE_REG_READ4(long, SC2("semsys", "semop"), int, opcode, int, semid,
2973 struct vki_sembuf *, sops, vki_size_t, nsops);
2974 ML_(generic_PRE_sys_semop)(tid, ARG2, ARG3, ARG4);
2975 break;
2976 case VKI_SEMIDS:
2977 /* Libc: int semids(int *buf, uint_t nids, uint_t *pnids); */
2978 PRINT("sys_semsys ( %ld, %#lx, %lu, %#lx )", SARG1, ARG2, ARG3, ARG4);
2979 PRE_REG_READ4(long, SC2("semsys", "semids"), int, opcode, int *, buf,
2980 vki_uint_t, nids, vki_uint_t *, pnids);
2982 PRE_MEM_WRITE("semsys(semids, buf)", ARG2, ARG3 * sizeof(int *));
2983 PRE_MEM_WRITE("semsys(semids, pnids)", ARG4, sizeof(vki_uint_t));
2984 break;
2985 case VKI_SEMTIMEDOP:
2986 /* Libc: int semtimedop(int semid, struct sembuf *sops, size_t nsops,
2987 const struct timespec *timeout);
2989 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu, %#lx )", SARG1, SARG2, ARG3,
2990 ARG4, ARG5);
2991 PRE_REG_READ5(long, SC2("semsys", "semtimedop"), int, opcode,
2992 int, semid, struct vki_sembuf *, sops, vki_size_t, nsops,
2993 struct vki_timespec *, timeout);
2994 ML_(generic_PRE_sys_semtimedop)(tid, ARG2, ARG3, ARG4, ARG5);
2995 break;
2996 default:
2997 VG_(unimplemented)("Syswrap of the semsys call with opcode %ld.", SARG1);
2998 /*NOTREACHED*/
2999 break;
3003 POST(sys_semsys)
3005 switch (ARG1 /*opcode*/) {
3006 case VKI_SEMCTL:
3007 ML_(generic_POST_sys_semctl)(tid, RES, ARG2, ARG3, ARG4, ARG5);
3008 break;
3009 case VKI_SEMGET:
3010 case VKI_SEMOP:
3011 break;
3012 case VKI_SEMIDS:
3014 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
3016 uint_t *pnids = (uint_t *)ARG4;
3017 if (*pnids <= ARG3)
3018 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
3020 break;
3021 case VKI_SEMTIMEDOP:
3022 break;
3023 default:
3024 vg_assert(0);
3025 break;
3029 /* ---------------------------------------------------------------------
3030 ioctl wrappers
3031 ------------------------------------------------------------------ */
3033 PRE(sys_ioctl)
3035 /* int ioctl(int fildes, int request, ...); */
3036 *flags |= SfMayBlock;
3038 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3039 architectures. */
3040 Int cmd = (Int) ARG2;
3042 switch (cmd /*request*/) {
3043 /* Handle 2-arg specially here (they do not use ARG3 at all). */
3044 case VKI_DINFOIDENT:
3045 case VKI_TIOCNOTTY:
3046 case VKI_TIOCSCTTY:
3047 PRINT("sys_ioctl ( %ld, %#lx )", SARG1, ARG2);
3048 PRE_REG_READ2(long, "ioctl", int, fd, int, request);
3049 break;
3050 /* And now come the 3-arg ones. */
3051 default:
3052 PRINT("sys_ioctl ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
3053 PRE_REG_READ3(long, "ioctl", int, fd, int, request, intptr_t, arg);
3054 break;
3057 switch (cmd /*request*/) {
3058 /* pools */
3059 case VKI_POOL_STATUSQ:
3060 PRE_MEM_WRITE("ioctl(POOL_STATUSQ)", ARG3, sizeof(vki_pool_status_t));
3061 break;
3063 /* mntio */
3064 case VKI_MNTIOC_GETEXTMNTENT:
3066 PRE_MEM_READ("ioctl(MNTIOC_GETEXTMNTENT)",
3067 ARG3, sizeof(struct vki_mntentbuf));
3069 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3070 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3071 PRE_MEM_WRITE("ioctl(MNTIOC_GETEXTMNTENT, embuf->mbuf_emp)",
3072 (Addr) embuf->mbuf_emp, sizeof(struct vki_extmnttab));
3073 PRE_MEM_WRITE("ioctl(MNTIOC_GETEXTMNTENT, embuf->mbuf_buf)",
3074 (Addr) embuf->mbuf_buf, embuf->mbuf_bufsize);
3077 break;
3079 case VKI_MNTIOC_GETMNTANY:
3081 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY)",
3082 ARG3, sizeof(struct vki_mntentbuf));
3084 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3085 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3086 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_emp)",
3087 (Addr) embuf->mbuf_emp, sizeof(struct vki_mnttab));
3088 PRE_MEM_WRITE("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_buf)",
3089 (Addr) embuf->mbuf_buf, embuf->mbuf_bufsize);
3091 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3092 if (ML_(safe_to_deref(mnt, sizeof(struct vki_mnttab)))) {
3093 if (mnt->mnt_special != NULL)
3094 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_special)",
3095 (Addr) mnt->mnt_special);
3096 if (mnt->mnt_mountp != NULL)
3097 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mountp)",
3098 (Addr) mnt->mnt_mountp);
3099 if (mnt->mnt_fstype != NULL)
3100 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_fstype)",
3101 (Addr) mnt->mnt_fstype);
3102 if (mnt->mnt_mntopts != NULL)
3103 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mntopts)",
3104 (Addr) mnt->mnt_mntopts);
3105 if (mnt->mnt_time != NULL)
3106 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_time)",
3107 (Addr) mnt->mnt_time);
3111 break;
3113 /* termio/termios */
3114 case VKI_TCGETA:
3115 PRE_MEM_WRITE("ioctl(TCGETA)", ARG3, sizeof(struct vki_termio));
3116 break;
3117 case VKI_TCGETS:
3118 PRE_MEM_WRITE("ioctl(TCGETS)", ARG3, sizeof(struct vki_termios));
3119 break;
3120 case VKI_TCSETS:
3121 PRE_MEM_READ("ioctl(TCSETS)", ARG3, sizeof(struct vki_termios));
3122 break;
3123 case VKI_TCSETSW:
3124 PRE_MEM_READ("ioctl(TCSETSW)", ARG3, sizeof(struct vki_termios));
3125 break;
3126 case VKI_TCSETSF:
3127 PRE_MEM_READ("ioctl(TCSETSF)", ARG3, sizeof(struct vki_termios));
3128 break;
3129 case VKI_TIOCGWINSZ:
3130 PRE_MEM_WRITE("ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize));
3131 break;
3132 case VKI_TIOCSWINSZ:
3133 PRE_MEM_READ("ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize));
3134 break;
3135 case VKI_TIOCGPGRP:
3136 PRE_MEM_WRITE("ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t));
3137 break;
3138 case VKI_TIOCSPGRP:
3139 PRE_MEM_READ("ioctl(TIOCSPGRP)", ARG3, sizeof(vki_pid_t));
3140 break;
3141 case VKI_TIOCGSID:
3142 PRE_MEM_WRITE("ioctl(TIOCGSID)", ARG3, sizeof(vki_pid_t));
3143 break;
3144 case VKI_TIOCNOTTY:
3145 case VKI_TIOCSCTTY:
3146 break;
3148 /* STREAMS */
3149 case VKI_I_PUSH:
3150 PRE_MEM_RASCIIZ("ioctl(I_PUSH)", ARG3);
3151 break;
3152 case VKI_I_FLUSH:
3153 break;
3154 case VKI_I_STR:
3156 PRE_MEM_READ("ioctl(I_STR)", ARG3, sizeof(struct vki_strioctl));
3158 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3159 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3160 if ((p->ic_dp != NULL) && (p->ic_len > 0)) {
3161 PRE_MEM_READ("ioctl(I_STR, strioctl->ic_dp)",
3162 (Addr) p->ic_dp, p->ic_len);
3166 break;
3167 case VKI_I_FIND:
3168 PRE_MEM_RASCIIZ("ioctl(I_FIND)", ARG3);
3169 break;
3170 case VKI_I_PEEK:
3172 /* Try hard not to mark strpeek->*buf.len members as being read. */
3173 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3175 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.maxlen)",
3176 p->ctlbuf.maxlen);
3177 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.len)",
3178 p->ctlbuf.len);
3179 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3180 p->ctlbuf.buf);
3181 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.maxlen)",
3182 p->databuf.maxlen);
3183 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->databuf.len)",
3184 p->databuf.len);
3185 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.buf)",
3186 p->databuf.buf);
3187 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->flags)", p->flags);
3188 /*PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->flags)", p->flags);*/
3190 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3191 if (p->ctlbuf.buf && p->ctlbuf.maxlen > 0)
3192 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3193 (Addr)p->ctlbuf.buf, p->ctlbuf.maxlen);
3194 if (p->databuf.buf && p->databuf.maxlen > 0)
3195 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->databuf.buf)",
3196 (Addr)p->databuf.buf, p->databuf.maxlen);
3199 break;
3200 case VKI_I_CANPUT:
3201 break;
3203 /* sockio */
3204 case VKI_SIOCGIFCONF:
3206 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3207 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_len)", p->ifc_len);
3208 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_buf)", p->ifc_buf);
3209 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3210 if ((p->ifc_buf != NULL) && (p->ifc_len > 0))
3211 PRE_MEM_WRITE("ioctl(SIOCGIFCONF, ifconf->ifc_buf)",
3212 (Addr) p->ifc_buf, p->ifc_len);
3214 /* ifc_len gets also written to during SIOCGIFCONF ioctl. */
3216 break;
3217 case VKI_SIOCGIFFLAGS:
3219 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3220 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3221 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_flags)", p->ifr_flags);
3223 break;
3224 case VKI_SIOCGIFNETMASK:
3226 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3227 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3228 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_addr)", p->ifr_addr);
3230 break;
3231 case VKI_SIOCGIFNUM:
3232 PRE_MEM_WRITE("ioctl(SIOCGIFNUM)", ARG3, sizeof(int));
3233 break;
3234 case VKI_SIOCGLIFBRDADDR:
3236 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3237 PRE_FIELD_READ("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_name)",
3238 p->lifr_name);
3239 PRE_FIELD_WRITE("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_addr)",
3240 p->lifr_addr);
3242 break;
3243 case VKI_SIOCGLIFCONF:
3245 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3246 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_len)", p->lifc_len);
3247 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)", p->lifc_buf);
3248 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_family)",
3249 p->lifc_family);
3250 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_flags)",
3251 p->lifc_flags);
3252 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3253 if ((p->lifc_buf != NULL) && (p->lifc_len > 0))
3254 PRE_MEM_WRITE("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)",
3255 (Addr) p->lifc_buf, p->lifc_len);
3257 /* lifc_len gets also written to during SIOCGLIFCONF ioctl. */
3259 break;
3260 case VKI_SIOCGLIFFLAGS:
3262 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3263 PRE_FIELD_READ("ioctl(SIOCGLIFFLAGS, lifreq->lifr_name)",
3264 p->lifr_name);
3265 PRE_FIELD_WRITE("ioctl(SIOCGLIFFLAGS, lifreq->lifr_flags)",
3266 p->lifr_flags);
3268 break;
3269 case VKI_SIOCGLIFNETMASK:
3271 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3272 PRE_FIELD_READ("ioctl(SIOCGLIFNETMASK, lifreq->lifr_name)",
3273 p->lifr_name);
3274 PRE_FIELD_WRITE("ioctl(SIOCGLIFNETMASK, lifreq->lifr_addr)",
3275 p->lifr_addr);
3277 break;
3278 case VKI_SIOCGLIFNUM:
3280 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3281 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_family)",
3282 p->lifn_family);
3283 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_flags)",
3284 p->lifn_flags);
3285 PRE_FIELD_WRITE("ioctl(SIOCGLIFNUM, lifn->lifn_count)",
3286 p->lifn_count);
3288 break;
3290 /* filio */
3291 case VKI_FIOSETOWN:
3292 PRE_MEM_READ("ioctl(FIOSETOWN)", ARG3, sizeof(vki_pid_t));
3293 break;
3294 case VKI_FIOGETOWN:
3295 PRE_MEM_WRITE("ioctl(FIOGETOWN)", ARG3, sizeof(vki_pid_t));
3296 break;
3298 /* CRYPTO */
3299 case VKI_CRYPTO_GET_PROVIDER_LIST:
3301 vki_crypto_get_provider_list_t *pl =
3302 (vki_crypto_get_provider_list_t *) ARG3;
3303 PRE_FIELD_READ("ioctl(CRYPTO_GET_PROVIDER_LIST, pl->pl_count)",
3304 pl->pl_count);
3306 if (ML_(safe_to_deref)(pl, sizeof(*pl))) {
3307 PRE_MEM_WRITE("ioctl(CRYPTO_GET_PROVIDER_LIST)", ARG3,
3308 MAX(1, pl->pl_count) *
3309 sizeof(vki_crypto_get_provider_list_t));
3311 /* Save the requested count to unused ARG4 below,
3312 when we know pre-handler succeeded.
3315 break;
3317 /* dtrace */
3318 case VKI_DTRACEHIOC_REMOVE:
3319 break;
3320 case VKI_DTRACEHIOC_ADDDOF:
3322 vki_dof_helper_t *dh = (vki_dof_helper_t *) ARG3;
3323 PRE_MEM_RASCIIZ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_mod)",
3324 (Addr) dh->dofhp_mod);
3325 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_addr",
3326 dh->dofhp_addr);
3327 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_dof",
3328 dh->dofhp_dof);
3330 break;
3332 /* devinfo */
3333 case VKI_DINFOUSRLD:
3334 /* We should do PRE_MEM_WRITE here but the question is for how many? */
3335 break;
3336 case VKI_DINFOIDENT:
3337 break;
3339 default:
3340 ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3);
3341 break;
3344 /* Be strict. */
3345 if (!ML_(fd_allowed)(ARG1, "ioctl", tid, False)) {
3346 SET_STATUS_Failure(VKI_EBADF);
3347 } else if (ARG2 == VKI_CRYPTO_GET_PROVIDER_LIST) {
3348 /* Save the requested count to unused ARG4 now. */
3349 ARG4 = ARG3;
3353 POST(sys_ioctl)
3355 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3356 architectures. */
3357 Int cmd = (Int) ARG2;
3359 switch (cmd /*request*/) {
3360 /* pools */
3361 case VKI_POOL_STATUSQ:
3362 POST_MEM_WRITE(ARG3, sizeof(vki_pool_status_t));
3363 break;
3365 /* mntio */
3366 case VKI_MNTIOC_GETEXTMNTENT:
3368 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3369 struct vki_extmnttab *mnt = (struct vki_extmnttab *) embuf->mbuf_emp;
3371 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_extmnttab));
3372 if (mnt != NULL) {
3373 if (mnt->mnt_special != NULL)
3374 POST_MEM_WRITE((Addr) mnt->mnt_special,
3375 VG_(strlen)(mnt->mnt_special) + 1);
3376 if (mnt->mnt_mountp != NULL)
3377 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3378 VG_(strlen)(mnt->mnt_mountp) + 1);
3379 if (mnt->mnt_fstype != NULL)
3380 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3381 VG_(strlen)(mnt->mnt_fstype) + 1);
3382 if (mnt->mnt_mntopts != NULL)
3383 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3384 VG_(strlen)(mnt->mnt_mntopts) + 1);
3385 if (mnt->mnt_time != NULL)
3386 POST_MEM_WRITE((Addr) mnt->mnt_time,
3387 VG_(strlen)(mnt->mnt_time) + 1);
3390 break;
3392 case VKI_MNTIOC_GETMNTANY:
3394 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3395 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3397 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_mnttab));
3398 if (mnt != NULL) {
3399 if (mnt->mnt_special != NULL)
3400 POST_MEM_WRITE((Addr) mnt->mnt_special,
3401 VG_(strlen)(mnt->mnt_special) + 1);
3402 if (mnt->mnt_mountp != NULL)
3403 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3404 VG_(strlen)(mnt->mnt_mountp) + 1);
3405 if (mnt->mnt_fstype != NULL)
3406 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3407 VG_(strlen)(mnt->mnt_fstype) + 1);
3408 if (mnt->mnt_mntopts != NULL)
3409 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3410 VG_(strlen)(mnt->mnt_mntopts) + 1);
3411 if (mnt->mnt_time != NULL)
3412 POST_MEM_WRITE((Addr) mnt->mnt_time,
3413 VG_(strlen)(mnt->mnt_time) + 1);
3416 break;
3418 /* termio/termios */
3419 case VKI_TCGETA:
3420 POST_MEM_WRITE(ARG3, sizeof(struct vki_termio));
3421 break;
3422 case VKI_TCGETS:
3423 POST_MEM_WRITE(ARG3, sizeof(struct vki_termios));
3424 break;
3425 case VKI_TCSETS:
3426 break;
3427 case VKI_TCSETSW:
3428 break;
3429 case VKI_TCSETSF:
3430 break;
3431 case VKI_TIOCGWINSZ:
3432 POST_MEM_WRITE(ARG3, sizeof(struct vki_winsize));
3433 break;
3434 case VKI_TIOCSWINSZ:
3435 break;
3436 case VKI_TIOCGPGRP:
3437 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3438 break;
3439 case VKI_TIOCSPGRP:
3440 break;
3441 case VKI_TIOCGSID:
3442 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3443 break;
3444 case VKI_TIOCNOTTY:
3445 case VKI_TIOCSCTTY:
3446 break;
3448 /* STREAMS */
3449 case VKI_I_PUSH:
3450 break;
3451 case VKI_I_FLUSH:
3452 break;
3453 case VKI_I_STR:
3455 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3457 POST_FIELD_WRITE(p->ic_len);
3458 if ((p->ic_dp != NULL) && (p->ic_len > 0))
3459 POST_MEM_WRITE((Addr) p->ic_dp, p->ic_len);
3461 break;
3462 case VKI_I_FIND:
3463 break;
3464 case VKI_I_PEEK:
3466 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3468 POST_FIELD_WRITE(p->ctlbuf.len);
3469 POST_FIELD_WRITE(p->databuf.len);
3470 POST_FIELD_WRITE(p->flags);
3472 if (p->ctlbuf.buf && p->ctlbuf.len > 0)
3473 POST_MEM_WRITE((Addr)p->ctlbuf.buf, p->ctlbuf.len);
3474 if (p->databuf.buf && p->databuf.len > 0)
3475 POST_MEM_WRITE((Addr)p->databuf.buf, p->databuf.len);
3477 break;
3478 case VKI_I_CANPUT:
3479 break;
3481 /* sockio */
3482 case VKI_SIOCGIFCONF:
3484 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3485 POST_FIELD_WRITE(p->ifc_len);
3486 POST_FIELD_WRITE(p->ifc_req);
3487 if ((p->ifc_req != NULL) && (p->ifc_len > 0))
3488 POST_MEM_WRITE((Addr) p->ifc_req, p->ifc_len);
3490 break;
3491 case VKI_SIOCGIFFLAGS:
3493 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3494 POST_FIELD_WRITE(p->ifr_flags);
3496 break;
3497 case VKI_SIOCGIFNETMASK:
3499 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3500 POST_FIELD_WRITE(p->ifr_addr);
3502 break;
3503 case VKI_SIOCGIFNUM:
3504 POST_MEM_WRITE(ARG3, sizeof(int));
3505 break;
3506 case VKI_SIOCGLIFBRDADDR:
3508 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3509 POST_FIELD_WRITE(p->lifr_addr);
3511 break;
3512 case VKI_SIOCGLIFCONF:
3514 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3515 POST_FIELD_WRITE(p->lifc_len);
3516 POST_FIELD_WRITE(p->lifc_req);
3517 if ((p->lifc_req != NULL) && (p->lifc_len > 0))
3518 POST_MEM_WRITE((Addr) p->lifc_req, p->lifc_len);
3520 break;
3521 case VKI_SIOCGLIFFLAGS:
3523 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3524 POST_FIELD_WRITE(p->lifr_flags);
3526 break;
3527 case VKI_SIOCGLIFNETMASK:
3529 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3530 POST_FIELD_WRITE(p->lifr_addr);
3532 break;
3533 case VKI_SIOCGLIFNUM:
3535 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3536 POST_FIELD_WRITE(p->lifn_count);
3538 break;
3540 /* filio */
3541 case VKI_FIOSETOWN:
3542 break;
3543 case VKI_FIOGETOWN:
3544 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3545 break;
3547 /* CRYPTO */
3548 case VKI_CRYPTO_GET_PROVIDER_LIST:
3550 vki_crypto_get_provider_list_t *pl =
3551 (vki_crypto_get_provider_list_t *) ARG3;
3553 POST_FIELD_WRITE(pl->pl_count);
3554 POST_FIELD_WRITE(pl->pl_return_value);
3556 if ((ARG4 > 0) && (pl->pl_return_value == VKI_CRYPTO_SUCCESS))
3557 POST_MEM_WRITE((Addr) pl->pl_list, pl->pl_count *
3558 sizeof(vki_crypto_provider_entry_t));
3560 break;
3562 /* dtrace */
3563 case VKI_DTRACEHIOC_REMOVE:
3564 case VKI_DTRACEHIOC_ADDDOF:
3565 break;
3567 /* devinfo */
3568 case VKI_DINFOUSRLD:
3569 POST_MEM_WRITE(ARG3, RES);
3570 break;
3571 case VKI_DINFOIDENT:
3572 break;
3574 default:
3575 /* Not really anything to do since ioctl direction hints are hardly used
3576 on Solaris. */
3577 break;
3581 PRE(sys_fchownat)
3583 /* int fchownat(int fd, const char *path, uid_t owner, gid_t group,
3584 int flag); */
3586 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
3587 This is different from Linux, for example, where glibc sign-extends it. */
3588 Int fd = (Int) ARG1;
3590 PRINT("sys_fchownat ( %d, %#lx(%s), %ld, %ld, %ld )", fd,
3591 ARG2, (HChar *) ARG2, SARG3, SARG4, ARG5);
3592 PRE_REG_READ5(long, "fchownat", int, fd, const char *, path,
3593 vki_uid_t, owner, vki_gid_t, group, int, flag);
3595 if (ARG2)
3596 PRE_MEM_RASCIIZ("fchownat(path)", ARG2);
3598 /* Be strict but ignore fd for absolute path. */
3599 if (fd != VKI_AT_FDCWD
3600 && ML_(safe_to_deref)((void *) ARG2, 1)
3601 && ((HChar *) ARG2)[0] != '/'
3602 && !ML_(fd_allowed)(fd, "fchownat", tid, False))
3603 SET_STATUS_Failure(VKI_EBADF);
3606 PRE(sys_fdsync)
3608 /* int fdsync(int fd, int flag); */
3609 PRINT("sys_fdsync ( %ld, %ld )", SARG1, SARG2);
3610 PRE_REG_READ2(long, "fdsync", int, fd, int, flag);
3612 /* Be strict. */
3613 if (!ML_(fd_allowed)(ARG1, "fdsync", tid, False))
3614 SET_STATUS_Failure(VKI_EBADF);
3617 PRE(sys_execve)
3619 Int i, j;
3620 /* This is a Solaris specific version of the generic pre-execve wrapper. */
3622 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3623 /* int execve(uintptr_t file, const char **argv, const char **envp,
3624 int flags); */
3625 PRINT("sys_execve ( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
3626 PRE_REG_READ4(long, "execve", uintptr_t, file, const char **, argv,
3627 const char **, envp, int, flags);
3629 #else
3631 /* int execve(const char *fname, const char **argv, const char **envp); */
3632 PRINT("sys_execve ( %#lx(%s), %#lx, %#lx )",
3633 ARG1, (HChar *) ARG1, ARG2, ARG3);
3634 PRE_REG_READ3(long, "execve", const char *, file, const char **, argv,
3635 const char **, envp);
3636 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3638 Bool ARG1_is_fd = False;
3639 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3640 if (ARG4 & VKI_EXEC_DESCRIPTOR) {
3641 ARG1_is_fd = True;
3643 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3645 if (ARG1_is_fd == False)
3646 PRE_MEM_RASCIIZ("execve(filename)", ARG1);
3647 if (ARG2)
3648 ML_(pre_argv_envp)(ARG2, tid, "execve(argv)", "execve(argv[i])");
3649 if (ARG3)
3650 ML_(pre_argv_envp)(ARG3, tid, "execve(envp)", "execve(envp[i])");
3652 /* Erk. If the exec fails, then the following will have made a mess of
3653 things which makes it hard for us to continue. The right thing to do is
3654 piece everything together again in POST(execve), but that's close to
3655 impossible. Instead, we make an effort to check that the execve will
3656 work before actually doing it. */
3658 const HChar *fname = (const HChar *) ARG1;
3659 if (ARG1_is_fd) {
3660 if (!ML_(fd_allowed)(ARG1, "execve", tid, False)) {
3661 SET_STATUS_Failure(VKI_EBADF);
3662 return;
3665 if (VG_(resolve_filename)(ARG1, &fname) == False) {
3666 SET_STATUS_Failure(VKI_EBADF);
3667 return;
3670 struct vg_stat stats;
3671 if (VG_(fstat)(ARG1, &stats) != 0) {
3672 SET_STATUS_Failure(VKI_EBADF);
3673 return;
3676 if (stats.nlink > 1)
3677 VG_(unimplemented)("Syswrap of execve where fd points to a hardlink.");
3680 /* Check that the name at least begins in client-accessible storage. */
3681 if (ARG1_is_fd == False) {
3682 if ((fname == NULL) || !ML_(safe_to_deref)(fname, 1)) {
3683 SET_STATUS_Failure(VKI_EFAULT);
3684 return;
3688 /* Check that the args at least begin in client-accessible storage.
3689 Solaris disallows to perform the exec without any arguments specified.
3691 if (!ARG2 /* obviously bogus */ ||
3692 !VG_(am_is_valid_for_client)(ARG2, 1, VKI_PROT_READ)) {
3693 SET_STATUS_Failure(VKI_EFAULT);
3694 return;
3697 /* Debug-only printing. */
3698 if (0) {
3699 VG_(printf)("ARG1 = %#lx(%s)\n", ARG1, fname);
3700 if (ARG2) {
3701 Int q;
3702 HChar** vec = (HChar**)ARG2;
3704 VG_(printf)("ARG2 = ");
3705 for (q = 0; vec[q]; q++)
3706 VG_(printf)("%p(%s) ", vec[q], vec[q]);
3707 VG_(printf)("\n");
3709 else
3710 VG_(printf)("ARG2 = null\n");
3713 /* Decide whether or not we want to follow along. */
3714 /* Make 'child_argv' be a pointer to the child's arg vector (skipping the
3715 exe name) */
3716 const HChar **child_argv = (const HChar **) ARG2;
3717 if (child_argv[0] == NULL)
3718 child_argv = NULL;
3719 Bool trace_this_child = VG_(should_we_trace_this_child)(fname, child_argv);
3721 /* Do the important checks: it is a file, is executable, permissions are
3722 ok, etc. We allow setuid executables to run only in the case when
3723 we are not simulating them, that is, they to be run natively. */
3724 Bool setuid_allowed = trace_this_child ? False : True;
3725 SysRes res = VG_(pre_exec_check)(fname, NULL, setuid_allowed);
3726 if (sr_isError(res)) {
3727 SET_STATUS_Failure(sr_Err(res));
3728 return;
3731 /* If we're tracing the child, and the launcher name looks bogus (possibly
3732 because launcher.c couldn't figure it out, see comments therein) then we
3733 have no option but to fail. */
3734 if (trace_this_child &&
3735 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
3736 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
3737 return;
3740 /* After this point, we can't recover if the execve fails. */
3741 VG_(debugLog)(1, "syswrap", "Exec of %s\n", fname);
3743 /* Terminate gdbserver if it is active. */
3744 if (VG_(clo_vgdb) != Vg_VgdbNo) {
3745 /* If the child will not be traced, we need to terminate gdbserver to
3746 cleanup the gdbserver resources (e.g. the FIFO files). If child will
3747 be traced, we also terminate gdbserver: the new Valgrind will start a
3748 fresh gdbserver after exec. */
3749 VG_(gdbserver)(0);
3752 /* Resistance is futile. Nuke all other threads. POSIX mandates this.
3753 (Really, nuke them all, since the new process will make its own new
3754 thread.) */
3755 VG_(nuke_all_threads_except)(tid, VgSrc_ExitThread);
3756 VG_(reap_threads)(tid);
3758 /* Set up the child's exe path. */
3759 const HChar *path = fname;
3760 const HChar *launcher_basename = NULL;
3761 if (trace_this_child) {
3762 /* We want to exec the launcher. Get its pre-remembered path. */
3763 path = VG_(name_of_launcher);
3764 /* VG_(name_of_launcher) should have been acquired by m_main at
3765 startup. */
3766 vg_assert(path);
3768 launcher_basename = VG_(strrchr)(path, '/');
3769 if (!launcher_basename || launcher_basename[1] == '\0')
3770 launcher_basename = path; /* hmm, tres dubious */
3771 else
3772 launcher_basename++;
3775 /* Set up the child's environment.
3777 Remove the valgrind-specific stuff from the environment so the child
3778 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
3779 unconditionally, since if we are tracing the child, the child valgrind
3780 will set up the appropriate client environment. Nb: we make a copy of
3781 the environment before trying to mangle it as it might be in read-only
3782 memory (bug #101881).
3784 Then, if tracing the child, set VALGRIND_LIB for it. */
3785 HChar **envp = NULL;
3786 if (ARG3 != 0) {
3787 envp = VG_(env_clone)((HChar**)ARG3);
3788 vg_assert(envp != NULL);
3789 VG_(env_remove_valgrind_env_stuff)(envp, True /*ro_strings*/, NULL);
3792 if (trace_this_child) {
3793 /* Set VALGRIND_LIB in ARG3 (the environment). */
3794 VG_(env_setenv)( &envp, VALGRIND_LIB, VG_(libdir));
3797 /* Set up the child's args. If not tracing it, they are simply ARG2.
3798 Otherwise, they are:
3800 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG2[1..],
3802 except that the first VG_(args_for_valgrind_noexecpass) args are
3803 omitted. */
3804 HChar **argv = NULL;
3805 if (!trace_this_child)
3806 argv = (HChar **) ARG2;
3807 else {
3808 Int tot_args;
3810 vg_assert(VG_(args_for_valgrind));
3811 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
3812 vg_assert(VG_(args_for_valgrind_noexecpass)
3813 <= VG_(sizeXA)(VG_(args_for_valgrind)));
3815 /* How many args in total will there be? */
3816 /* launcher basename */
3817 tot_args = 1;
3818 /* V's args */
3819 tot_args += VG_(sizeXA)(VG_(args_for_valgrind));
3820 tot_args -= VG_(args_for_valgrind_noexecpass);
3821 /* name of client exe */
3822 tot_args++;
3823 /* args for client exe, skipping [0] */
3824 HChar **arg2copy = (HChar **) ARG2;
3825 if (arg2copy[0] != NULL)
3826 for (i = 1; arg2copy[i]; i++)
3827 tot_args++;
3828 /* allocate */
3829 argv = VG_(malloc)("syswrap.exec.5", (tot_args + 1) * sizeof(HChar*));
3830 /* copy */
3831 j = 0;
3832 argv[j++] = CONST_CAST(HChar *, launcher_basename);
3833 for (i = 0; i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
3834 if (i < VG_(args_for_valgrind_noexecpass))
3835 continue;
3836 argv[j++] = *(HChar**)VG_(indexXA)(VG_(args_for_valgrind), i);
3838 argv[j++] = CONST_CAST(HChar *, fname);
3839 if (arg2copy[0] != NULL)
3840 for (i = 1; arg2copy[i]; i++)
3841 argv[j++] = arg2copy[i];
3842 argv[j++] = NULL;
3843 /* check */
3844 vg_assert(j == tot_args + 1);
3847 /* Set the signal state up for exec.
3849 We need to set the real signal state to make sure the exec'd process
3850 gets SIG_IGN properly.
3852 Also set our real sigmask to match the client's sigmask so that the
3853 exec'd child will get the right mask. First we need to clear out any
3854 pending signals so they they don't get delivered, which would confuse
3855 things.
3857 XXX This is a bug - the signals should remain pending, and be delivered
3858 to the new process after exec. There's also a race-condition, since if
3859 someone delivers us a signal between the sigprocmask and the execve,
3860 we'll still get the signal. Oh well.
3863 vki_sigset_t allsigs;
3864 vki_siginfo_t info;
3866 /* What this loop does: it queries SCSS (the signal state that the
3867 client _thinks_ the kernel is in) by calling VG_(do_sys_sigaction),
3868 and modifies the real kernel signal state accordingly. */
3869 for (i = 1; i < VG_(max_signal); i++) {
3870 vki_sigaction_fromK_t sa_f;
3871 vki_sigaction_toK_t sa_t;
3872 VG_(do_sys_sigaction)(i, NULL, &sa_f);
3873 VG_(convert_sigaction_fromK_to_toK)(&sa_f, &sa_t);
3874 VG_(sigaction)(i, &sa_t, NULL);
3877 VG_(sigfillset)(&allsigs);
3878 while (VG_(sigtimedwait_zero)(&allsigs, &info) > 0)
3881 ThreadState *tst = VG_(get_ThreadState)(tid);
3882 VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL);
3885 /* Debug-only printing. */
3886 if (0) {
3887 HChar **cpp;
3888 VG_(printf)("exec: %s\n", path);
3889 for (cpp = argv; cpp && *cpp; cpp++)
3890 VG_(printf)("argv: %s\n", *cpp);
3891 if (0)
3892 for (cpp = envp; cpp && *cpp; cpp++)
3893 VG_(printf)("env: %s\n", *cpp);
3896 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3897 res = VG_(do_syscall4)(__NR_execve, (UWord) path, (UWord) argv,
3898 (UWord) envp, ARG4 & ~VKI_EXEC_DESCRIPTOR);
3899 #else
3900 res = VG_(do_syscall3)(__NR_execve, (UWord) path, (UWord) argv,
3901 (UWord) envp);
3902 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3903 SET_STATUS_from_SysRes(res);
3905 /* If we got here, then the execve failed. We've already made way too much
3906 of a mess to continue, so we have to abort. */
3907 vg_assert(FAILURE);
3908 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3909 if (ARG1_is_fd)
3910 VG_(message)(Vg_UserMsg, "execve(%ld, %#lx, %#lx, %lu) failed, "
3911 "errno %ld\n", SARG1, ARG2, ARG3, ARG4, ERR);
3912 else
3913 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx, %ld) failed, errno"
3914 " %lu\n", ARG1, (HChar *) ARG1, ARG2, ARG3, SARG4, ERR);
3915 #else
3916 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx) failed, errno %lu\n",
3917 ARG1, (HChar *) ARG1, ARG2, ARG3, ERR);
3918 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3919 VG_(message)(Vg_UserMsg, "EXEC FAILED: I can't recover from "
3920 "execve() failing, so I'm dying.\n");
3921 VG_(message)(Vg_UserMsg, "Add more stringent tests in PRE(sys_execve), "
3922 "or work out how to recover.\n");
3923 VG_(exit)(101);
3924 /*NOTREACHED*/
3927 static void pre_mem_read_flock(ThreadId tid, struct vki_flock *lock)
3929 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3930 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3931 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3932 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3935 #if defined(VGP_x86_solaris)
3936 static void pre_mem_read_flock64(ThreadId tid, struct vki_flock64 *lock)
3938 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3939 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3940 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3941 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3943 #endif /* VGP_x86_solaris */
3945 PRE(sys_fcntl)
3947 /* int fcntl(int fildes, int cmd, ...); */
3949 switch (ARG2 /*cmd*/) {
3950 /* These ones ignore ARG3. */
3951 case VKI_F_GETFD:
3952 case VKI_F_GETFL:
3953 case VKI_F_GETXFL:
3954 PRINT("sys_fcntl ( %ld, %ld )", SARG1, SARG2);
3955 PRE_REG_READ2(long, "fcntl", int, fildes, int, cmd);
3956 break;
3958 /* These ones use ARG3 as "arg". */
3959 case VKI_F_DUPFD:
3960 case VKI_F_DUPFD_CLOEXEC:
3961 case VKI_F_SETFD:
3962 case VKI_F_SETFL:
3963 case VKI_F_DUP2FD:
3964 case VKI_F_BADFD:
3965 PRINT("sys_fcntl ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
3966 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd, int, arg);
3967 /* Check if a client program isn't going to poison any of V's output
3968 fds. */
3969 if (ARG2 == VKI_F_DUP2FD &&
3970 !ML_(fd_allowed)(ARG3, "fcntl(F_DUP2FD)", tid, False)) {
3971 SET_STATUS_Failure(VKI_EBADF);
3972 return;
3974 break;
3976 /* These ones use ARG3 as "native lock" (input only). */
3977 case VKI_F_SETLK:
3978 case VKI_F_SETLKW:
3979 case VKI_F_ALLOCSP:
3980 case VKI_F_FREESP:
3981 case VKI_F_SETLK_NBMAND:
3982 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3983 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3984 struct flock *, lock);
3985 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
3986 break;
3988 /* This one uses ARG3 as "native lock" (input&output). */
3989 case VKI_F_GETLK:
3990 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3991 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3992 struct flock *, lock);
3993 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
3994 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock));
3995 break;
3997 #if defined(VGP_x86_solaris)
3998 /* These ones use ARG3 as "transitional 64b lock" (input only). */
3999 case VKI_F_SETLK64:
4000 case VKI_F_SETLKW64:
4001 case VKI_F_ALLOCSP64:
4002 case VKI_F_FREESP64:
4003 case VKI_F_SETLK64_NBMAND:
4004 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4005 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4006 struct flock64 *, lock);
4007 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
4008 break;
4010 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
4011 case VKI_F_GETLK64:
4012 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4013 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4014 struct flock64 *, lock);
4015 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
4016 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock64));
4017 break;
4018 #endif /* VGP_x86_solaris */
4020 /* These ones use ARG3 as "fshare". */
4021 case VKI_F_SHARE:
4022 case VKI_F_UNSHARE:
4023 case VKI_F_SHARE_NBMAND:
4024 PRINT("sys_fcntl[ARG3=='fshare'] ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4025 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4026 struct fshare *, sh);
4027 PRE_MEM_READ("fcntl(fshare)", ARG3, sizeof(struct vki_fshare));
4028 break;
4030 default:
4031 VG_(unimplemented)("Syswrap of the fcntl call with cmd %ld.", SARG2);
4032 /*NOTREACHED*/
4033 break;
4036 if (ARG2 == VKI_F_SETLKW
4037 #if defined(VGP_x86_solaris)
4038 || ARG2 == VKI_F_SETLKW64
4039 #endif /* VGP_x86_solaris */
4041 *flags |= SfMayBlock;
4043 /* Be strict. */
4044 if (!ML_(fd_allowed)(ARG1, "fcntl", tid, False))
4045 SET_STATUS_Failure(VKI_EBADF);
4048 POST(sys_fcntl)
4050 switch (ARG2 /*cmd*/) {
4051 case VKI_F_DUPFD:
4052 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD)", tid, True)) {
4053 VG_(close)(RES);
4054 SET_STATUS_Failure(VKI_EMFILE);
4055 } else if (VG_(clo_track_fds))
4056 ML_(record_fd_open_named)(tid, RES);
4057 break;
4059 case VKI_F_DUPFD_CLOEXEC:
4060 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD_CLOEXEC)", tid, True)) {
4061 VG_(close)(RES);
4062 SET_STATUS_Failure(VKI_EMFILE);
4063 } else if (VG_(clo_track_fds))
4064 ML_(record_fd_open_named)(tid, RES);
4065 break;
4067 case VKI_F_DUP2FD:
4068 if (!ML_(fd_allowed)(RES, "fcntl(F_DUP2FD)", tid, True)) {
4069 VG_(close)(RES);
4070 SET_STATUS_Failure(VKI_EMFILE);
4071 } else if (VG_(clo_track_fds))
4072 ML_(record_fd_open_named)(tid, RES);
4073 break;
4075 /* This one uses ARG3 as "native lock" (input&output). */
4076 case VKI_F_GETLK:
4077 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock));
4078 break;
4080 #if defined(VGP_x86_solaris)
4081 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
4082 case VKI_F_GETLK64:
4083 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock64));
4084 break;
4085 #endif /* VGP_x86_solaris */
4087 default:
4088 break;
4092 PRE(sys_renameat)
4094 /* int renameat(int fromfd, const char *old, int tofd, const char *new); */
4096 /* Interpret the first and third arguments as 32-bit values even on 64-bit
4097 architecture. This is different from Linux, for example, where glibc
4098 sign-extends them. */
4099 Int fromfd = (Int) ARG1;
4100 Int tofd = (Int) ARG3;
4102 *flags |= SfMayBlock;
4103 PRINT("sys_renameat ( %d, %#lx(%s), %d, %#lx(%s) )", fromfd,
4104 ARG2, (HChar *) ARG2, tofd, ARG4, (HChar *) ARG4);
4105 PRE_REG_READ4(long, "renameat", int, fromfd, const char *, old,
4106 int, tofd, const char *, new);
4108 PRE_MEM_RASCIIZ("renameat(old)", ARG2);
4109 PRE_MEM_RASCIIZ("renameat(new)", ARG4);
4111 /* Be strict but ignore fromfd/tofd for absolute old/new. */
4112 if (fromfd != VKI_AT_FDCWD
4113 && ML_(safe_to_deref)((void *) ARG2, 1)
4114 && ((HChar *) ARG2)[0] != '/'
4115 && !ML_(fd_allowed)(fromfd, "renameat", tid, False)) {
4116 SET_STATUS_Failure(VKI_EBADF);
4118 if (tofd != VKI_AT_FDCWD
4119 && ML_(safe_to_deref)((void *) ARG4, 1)
4120 && ((HChar *) ARG4)[0] != '/'
4121 && !ML_(fd_allowed)(tofd, "renameat", tid, False)) {
4122 SET_STATUS_Failure(VKI_EBADF);
4126 PRE(sys_unlinkat)
4128 /* int unlinkat(int dirfd, const char *pathname, int flags); */
4130 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4131 This is different from Linux, for example, where glibc sign-extends it. */
4132 Int dfd = (Int) ARG1;
4134 *flags |= SfMayBlock;
4135 PRINT("sys_unlinkat ( %d, %#lx(%s), %ld )", dfd, ARG2, (HChar *) ARG2,
4136 SARG3);
4137 PRE_REG_READ3(long, "unlinkat", int, dirfd, const char *, pathname,
4138 int, flags);
4139 PRE_MEM_RASCIIZ("unlinkat(pathname)", ARG2);
4141 /* Be strict but ignore dfd for absolute pathname. */
4142 if (dfd != VKI_AT_FDCWD
4143 && ML_(safe_to_deref)((void *) ARG2, 1)
4144 && ((HChar *) ARG2)[0] != '/'
4145 && !ML_(fd_allowed)(dfd, "unlinkat", tid, False))
4146 SET_STATUS_Failure(VKI_EBADF);
4149 PRE(sys_fstatat)
4151 /* int fstatat(int fildes, const char *path, struct stat *buf,
4152 int flag); */
4154 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4155 This is different from Linux, for example, where glibc sign-extends it. */
4156 Int fd = (Int) ARG1;
4158 PRINT("sys_fstatat ( %d, %#lx(%s), %#lx, %ld )", fd, ARG2,
4159 (HChar *) ARG2, ARG3, SARG4);
4160 PRE_REG_READ4(long, "fstatat", int, fildes, const char *, path,
4161 struct stat *, buf, int, flag);
4162 if (ARG2) {
4163 /* Only test ARG2 if it isn't NULL. The kernel treats the NULL-case as
4164 fstat(fildes, buf). */
4165 PRE_MEM_RASCIIZ("fstatat(path)", ARG2);
4167 PRE_MEM_WRITE("fstatat(buf)", ARG3, sizeof(struct vki_stat));
4169 /* Be strict but ignore fildes for absolute path. */
4170 if (fd != VKI_AT_FDCWD
4171 && ML_(safe_to_deref)((void *) ARG2, 1)
4172 && ((HChar *) ARG2)[0] != '/'
4173 && !ML_(fd_allowed)(fd, "fstatat", tid, False))
4174 SET_STATUS_Failure(VKI_EBADF);
4177 POST(sys_fstatat)
4179 POST_MEM_WRITE(ARG3, sizeof(struct vki_stat));
4182 PRE(sys_openat)
4184 /* int openat(int fildes, const char *filename, int flags);
4185 int openat(int fildes, const char *filename, int flags, mode_t mode); */
4187 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4188 This is different from Linux, for example, where glibc sign-extends it. */
4189 Int fd = (Int) ARG1;
4191 if (ARG3 & VKI_O_CREAT) {
4192 /* 4-arg version */
4193 PRINT("sys_openat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2, (HChar *) ARG2,
4194 SARG3, SARG4);
4195 PRE_REG_READ4(long, "openat", int, fildes, const char *, filename,
4196 int, flags, vki_mode_t, mode);
4198 else {
4199 /* 3-arg version */
4200 PRINT("sys_openat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2,
4201 SARG3);
4202 PRE_REG_READ3(long, "openat", int, fildes, const char *, filename,
4203 int, flags);
4206 PRE_MEM_RASCIIZ("openat(filename)", ARG2);
4208 /* Be strict but ignore fildes for absolute pathname. */
4209 if (fd != VKI_AT_FDCWD
4210 && ML_(safe_to_deref)((void *) ARG2, 1)
4211 && ((HChar *) ARG2)[0] != '/'
4212 && !ML_(fd_allowed)(fd, "openat", tid, False)) {
4213 SET_STATUS_Failure(VKI_EBADF);
4214 return;
4217 if (ML_(handle_auxv_open)(status, (const HChar *) ARG2, ARG3))
4218 return;
4220 if (handle_psinfo_open(status, True /*use_openat*/, (const HChar *) ARG2,
4221 fd, ARG3, ARG4))
4222 return;
4224 #if defined(SOLARIS_PROC_CMDLINE)
4225 if (handle_cmdline_open(status, (const HChar *) ARG2))
4226 return;
4227 #endif /* SOLARIS_PROC_CMDLINE */
4229 *flags |= SfMayBlock;
4232 POST(sys_openat)
4234 if (!ML_(fd_allowed)(RES, "openat", tid, True)) {
4235 VG_(close)(RES);
4236 SET_STATUS_Failure(VKI_EMFILE);
4238 else if (VG_(clo_track_fds))
4239 ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
4242 PRE(sys_tasksys)
4244 /* Kernel: long tasksys(int code, projid_t projid, uint_t flags,
4245 void *projidbuf, size_t pbufsz);
4247 switch (ARG1 /*code*/) {
4248 case 0:
4249 /* Libc: taskid_t settaskid(projid_t project, uint_t flags); */
4250 PRINT("sys_tasksys ( %ld, %ld, %lu )", SARG1, SARG2, ARG3);
4251 PRE_REG_READ3(long, SC2("tasksys", "settaskid"), int, code,
4252 vki_projid_t, projid, vki_uint_t, flags);
4253 break;
4254 case 1:
4255 /* Libc: taskid_t gettaskid(void); */
4256 PRINT("sys_tasksys ( %ld )", SARG1);
4257 PRE_REG_READ1(long, SC2("tasksys", "gettaskid"), int, code);
4258 break;
4259 case 2:
4260 /* Libc: projid_t getprojid(void); */
4261 PRINT("sys_tasksys ( %ld )", SARG1);
4262 PRE_REG_READ1(long, SC2("tasksys", "getprojid"), int, code);
4263 break;
4264 case 3:
4265 /* Libproject: size_t projlist(id_t *idbuf, size_t idbufsz); */
4266 PRINT("sys_tasksys ( %ld, %#lx, %lu )", SARG1, ARG4, ARG5);
4267 PRE_REG_READ3(long, SC2("tasksys", "projlist"), int, code,
4268 vki_id_t *, idbuf, vki_size_t, idbufsz);
4269 PRE_MEM_WRITE("tasksys(idbuf)", ARG4, ARG5);
4270 break;
4271 default:
4272 VG_(unimplemented)("Syswrap of the tasksys call with code %ld.", SARG1);
4273 /*NOTREACHED*/
4274 break;
4278 POST(sys_tasksys)
4280 switch (ARG1 /*code*/) {
4281 case 0:
4282 case 1:
4283 case 2:
4284 break;
4285 case 3:
4286 if ((ARG4 != 0) && (ARG5 != 0))
4287 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
4288 break;
4289 default:
4290 vg_assert(0);
4291 break;
4295 PRE(sys_lwp_park)
4297 /* Kernel: int lwp_park(int which, uintptr_t arg1, uintptr_t arg2);
4299 *flags |= SfMayBlock;
4300 switch (ARG1 /*which*/) {
4301 case 0:
4302 /* Libc: int lwp_park(timespec_t *timeout, id_t lwpid); */
4303 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4304 PRE_REG_READ3(long, SC2("lwp_park", "lwp_park"), int, which,
4305 timespec_t *, timeout, vki_id_t, lwpid);
4306 if (ARG2) {
4307 PRE_MEM_READ("lwp_park(timeout)", ARG2, sizeof(vki_timespec_t));
4308 /*PRE_MEM_WRITE("lwp_park(timeout)", ARG2,
4309 sizeof(vki_timespec_t));*/
4311 break;
4312 case 1:
4313 /* Libc: int lwp_unpark(id_t lwpid); */
4314 PRINT("sys_lwp_park ( %ld, %ld )", SARG1, SARG2);
4315 PRE_REG_READ2(long, SC2("lwp_park", "lwp_unpark"), int, which,
4316 vki_id_t, lwpid);
4317 break;
4318 case 2:
4319 /* Libc: int lwp_unpark_all(id_t *lwpid, int nids); */
4320 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4321 PRE_REG_READ3(long, SC2("lwp_park", "lwp_unpark_all"), int, which,
4322 id_t *, lwpid, int, nids);
4323 PRE_MEM_READ("lwp_park(lwpid)", ARG2, ARG3 * sizeof(vki_id_t));
4324 break;
4325 default:
4326 VG_(unimplemented)("Syswrap of the lwp_park call with which %ld.", SARG1);
4327 /*NOTREACHED*/
4328 break;
4332 POST(sys_lwp_park)
4334 switch (ARG1 /*which*/) {
4335 case 0:
4336 if (ARG2)
4337 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
4338 break;
4339 case 1:
4340 case 2:
4341 break;
4342 default:
4343 vg_assert(0);
4344 break;
4348 PRE(sys_sendfilev)
4350 /* Kernel: ssize_t sendfilev(int opcode, int fd,
4351 const struct sendfilevec *vec,
4352 int sfvcnt, size_t *xferred);
4354 PRINT("sys_sendfilev ( %ld, %ld, %#lx, %ld, %#lx )",
4355 SARG1, SARG2, ARG3, SARG4, ARG5);
4357 switch (ARG1 /*opcode*/) {
4358 case VKI_SENDFILEV:
4360 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4361 const struct vki_sendfilevec *, vec,
4362 int, sfvcnt, vki_size_t *, xferred);
4364 PRE_MEM_READ("sendfilev(vec)", ARG3,
4365 ARG4 * sizeof(struct vki_sendfilevec));
4366 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4368 struct vki_sendfilevec *vec = (struct vki_sendfilevec *) ARG3;
4369 if (ML_(safe_to_deref)(vec, ARG4 *
4370 sizeof(struct vki_sendfilevec))) {
4371 UInt i;
4372 for (i = 0; i < ARG4; i++) {
4373 HChar desc[35]; // large enough
4374 if (vec[i].sfv_fd == VKI_SFV_FD_SELF) {
4375 VG_(snprintf)(desc, sizeof(desc),
4376 "sendfilev(vec[%u].sfv_off", i);
4377 PRE_MEM_READ(desc, vec[i].sfv_off, vec[i].sfv_len);
4378 } else {
4379 VG_(snprintf)(desc, sizeof(desc),
4380 "sendfilev(vec[%u].sfv_fd)", i);
4381 if (!ML_(fd_allowed)(vec[i].sfv_fd, desc, tid, False))
4382 SET_STATUS_Failure(VKI_EBADF);
4387 break;
4388 case VKI_SENDFILEV64:
4390 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4391 const struct vki_sendfilevec64 *, vec,
4392 int, sfvcnt, vki_size_t *, xferred);
4394 PRE_MEM_READ("sendfilev(vec)", ARG3,
4395 ARG4 * sizeof(struct vki_sendfilevec64));
4396 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4398 struct vki_sendfilevec64 *vec64 =
4399 (struct vki_sendfilevec64 *) ARG3;
4400 if (ML_(safe_to_deref)(vec64, ARG4 *
4401 sizeof(struct vki_sendfilevec64))) {
4402 UInt i;
4403 for (i = 0; i < ARG4; i++) {
4404 HChar desc[35]; // large enough
4405 if (vec64[i].sfv_fd == VKI_SFV_FD_SELF) {
4406 VG_(snprintf)(desc, sizeof(desc),
4407 "sendfilev(vec[%u].sfv_off", i);
4408 PRE_MEM_READ(desc, vec64[i].sfv_off, vec64[i].sfv_len);
4409 } else {
4410 VG_(snprintf)(desc, sizeof(desc),
4411 "sendfilev(vec[%u].sfv_fd)", i);
4412 if (!ML_(fd_allowed)(vec64[i].sfv_fd, desc,
4413 tid, False))
4414 SET_STATUS_Failure(VKI_EBADF);
4419 break;
4420 default:
4421 VG_(unimplemented)("Syswrap of the sendfilev call with "
4422 "opcode %ld.", SARG1);
4423 /*NOTREACHED*/
4424 break;
4427 /* Be strict. */
4428 if (!ML_(fd_allowed)(ARG2, "sendfilev(fd)", tid, False))
4429 SET_STATUS_Failure(VKI_EBADF);
4431 *flags |= SfMayBlock;
4434 POST(sys_sendfilev)
4436 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
4439 #if defined(SOLARIS_LWP_NAME_SYSCALL)
4440 PRE(sys_lwp_name)
4442 /* int lwp_name(int opcode, id_t lwpid, char *name, size_t len); */
4443 PRINT("sys_lwp_name ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
4445 switch (ARG1 /*opcode*/) {
4446 case 0:
4447 /* lwp_setname */
4448 PRE_REG_READ3(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4449 char *, name);
4450 PRE_MEM_RASCIIZ("lwp_name(name)", ARG3);
4451 break;
4452 case 1:
4453 /* lwp_getname */
4454 PRE_REG_READ4(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4455 char *, name, vki_size_t, len);
4456 PRE_MEM_WRITE("lwp_name(name)", ARG3, ARG4);
4457 break;
4458 default:
4459 VG_(unimplemented)("Syswrap of the lwp_name call with opcode %ld.", SARG1);
4460 /*NOTREACHED*/
4461 break;
4465 POST(sys_lwp_name)
4467 switch (ARG1 /*opcode*/) {
4468 case 0:
4469 if (ARG3) { // Paranoia
4470 const HChar *new_name = (const HChar *) ARG3;
4471 ThreadState *tst = VG_(get_ThreadState)(tid);
4472 SizeT new_len = VG_(strlen)(new_name);
4474 /* Don't bother reusing the memory. This is a rare event. */
4475 tst->thread_name = VG_(realloc)("syswrap.lwp_name", tst->thread_name,
4476 new_len + 1);
4477 VG_(strcpy)(tst->thread_name, new_name);
4479 break;
4480 case 1:
4481 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4482 break;
4483 default:
4484 vg_assert(0);
4485 break;
4488 #endif /* SOLARIS_LWP_NAME_SYSCALL */
4490 PRE(sys_privsys)
4492 /* Kernel: int privsys(int code, priv_op_t op, priv_ptype_t type,
4493 void *buf, size_t bufsize, int itype);
4495 switch (ARG1 /*code*/) {
4496 case VKI_PRIVSYS_SETPPRIV:
4497 /* Libc: int setppriv(priv_op_t op, priv_ptype_t type,
4498 const priv_set_t *pset);
4500 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4501 ARG4, ARG5);
4502 PRE_REG_READ5(long, SC2("privsys", "setppriv"), int, code,
4503 vki_priv_op_t, op, vki_priv_ptype_t, type,
4504 const priv_set_t *, pset, vki_size_t, bufsize);
4505 PRE_MEM_READ("privsys(pset)", ARG4, ARG5);
4506 break;
4507 case VKI_PRIVSYS_GETPPRIV:
4508 /* Libc: int getppriv(priv_ptype_t type, priv_set_t *pset);
4509 priv_set_t *pset -> void *buf
4511 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4512 ARG4, ARG5);
4513 PRE_REG_READ5(long, SC2("privsys", "getppriv"), int, code,
4514 vki_priv_op_t, op, vki_priv_ptype_t, type, priv_set_t *, pset,
4515 vki_size_t, bufsize);
4516 PRE_MEM_WRITE("privsys(pset)", ARG4, ARG5);
4517 break;
4518 case VKI_PRIVSYS_GETIMPLINFO:
4519 /* Libc: int getprivinfo(priv_impl_info_t *buf, size_t bufsize);
4520 priv_impl_info_t *buf -> void *buf
4522 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4523 ARG4, ARG5);
4524 PRE_REG_READ5(long, SC2("privsys", "getprivinfo"), int, code,
4525 vki_priv_op_t, op, vki_priv_ptype_t, type,
4526 priv_impl_info_t *, buf, vki_size_t, bufsize);
4527 PRE_MEM_WRITE("privsys(buf)", ARG4, ARG5);
4528 break;
4529 case VKI_PRIVSYS_SETPFLAGS:
4530 /* Libc: int setpflags(uint_t flag, uint_t val);
4531 uint_t flag -> priv_op_t op
4532 uint_t val -> priv_ptype_t type
4534 PRINT("sys_privsys ( %ld, %lu, %lu )", SARG1, ARG2, ARG3);
4535 PRE_REG_READ3(long, SC2("privsys", "setpflags"), int, code,
4536 vki_uint_t, flag, vki_uint_t, val);
4537 break;
4538 case VKI_PRIVSYS_GETPFLAGS:
4539 /* Libc: uint_t getpflags(uint_t flag);
4540 uint_t flag -> priv_op_t op
4542 PRINT("sys_privsys ( %ld, %lu )", SARG1, ARG2);
4543 PRE_REG_READ2(long, SC2("privsys", "setpflags"), int, code,
4544 vki_uint_t, flag);
4545 break;
4546 case VKI_PRIVSYS_ISSETUGID:
4547 /* Libc: int issetugid(void); */
4548 PRINT("sys_privsys ( %ld )", SARG1);
4549 PRE_REG_READ1(long, SC2("privsys", "issetugid"), int, code);
4550 break;
4551 case VKI_PRIVSYS_PFEXEC_REG:
4552 /* Libc: int register_pfexec(int did);
4553 int did -> priv_op_t op
4555 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4556 PRE_REG_READ2(long, SC2("privsys", "register_pfexec"), int, code,
4557 int, did);
4558 break;
4559 case VKI_PRIVSYS_PFEXEC_UNREG:
4560 /* Libc: int unregister_pfexec(int did); */
4561 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4562 PRE_REG_READ2(long, SC2("privsys", "unregister_pfexec"), int, code,
4563 int, did);
4564 break;
4565 default:
4566 VG_(unimplemented)("Syswrap of the privsys call with code %ld.", SARG1);
4567 /*NOTREACHED*/
4568 break;
4571 /* Be strict. */
4572 if ((ARG1 == VKI_PRIVSYS_PFEXEC_REG ||
4573 ARG1 == VKI_PRIVSYS_PFEXEC_UNREG) &&
4574 !ML_(fd_allowed)(ARG2, "privsys", tid, False))
4575 SET_STATUS_Failure(VKI_EBADF);
4578 POST(sys_privsys)
4580 switch (ARG1 /*code*/) {
4581 case VKI_PRIVSYS_SETPPRIV:
4582 break;
4583 case VKI_PRIVSYS_GETPPRIV:
4584 POST_MEM_WRITE(ARG4, sizeof(vki_priv_set_t));
4585 break;
4586 case VKI_PRIVSYS_GETIMPLINFO:
4587 /* The kernel copy outs data of size min(bufsize, privinfosize).
4588 Unfortunately, it does not seem to be possible to easily obtain the
4589 privinfosize value. The code below optimistically marks all ARG5
4590 bytes (aka bufsize) as written by the kernel. */
4591 POST_MEM_WRITE(ARG4, ARG5);
4592 break;
4593 case VKI_PRIVSYS_SETPFLAGS:
4594 case VKI_PRIVSYS_GETPFLAGS:
4595 case VKI_PRIVSYS_ISSETUGID:
4596 case VKI_PRIVSYS_PFEXEC_REG:
4597 case VKI_PRIVSYS_PFEXEC_UNREG:
4598 break;
4599 default:
4600 vg_assert(0);
4601 break;
4605 PRE(sys_ucredsys)
4607 /* Kernel: int ucredsys(int code, int obj, void *buf); */
4608 PRINT("sys_ucredsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4610 switch (ARG1 /*code*/) {
4611 case VKI_UCREDSYS_UCREDGET:
4612 /* Libc: ucred_t *ucred_get(pid_t pid); */
4613 PRE_REG_READ3(long, SC2("ucredsys", "ucredget"), int, code,
4614 vki_pid_t, pid, vki_ucred_t *, buf);
4615 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4616 break;
4618 case VKI_UCREDSYS_GETPEERUCRED:
4619 /* Libc: int getpeerucred(int fd, ucred_t **ucred); */
4620 PRE_REG_READ3(long, SC2("ucredsys", "getpeerucred"), int, code,
4621 int, fd, vki_ucred_t *, buf);
4622 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4624 /* Be strict. */
4625 if (!ML_(fd_allowed)(ARG2, "ucredsys", tid, False))
4626 SET_STATUS_Failure(VKI_EBADF);
4627 break;
4629 default:
4630 VG_(unimplemented)("Syswrap of the ucredsys call with code %ld.", SARG1);
4631 /*NOTREACHED*/
4632 break;
4636 POST(sys_ucredsys)
4638 switch (ARG1 /*code*/) {
4639 case VKI_UCREDSYS_UCREDGET:
4640 case VKI_UCREDSYS_GETPEERUCRED:
4641 vg_assert(ARG3 != 0);
4642 POST_MEM_WRITE(ARG3, ((vki_ucred_t *) ARG3)->uc_size);
4643 break;
4645 default:
4646 vg_assert(0);
4647 break;
4651 PRE(sys_sysfs)
4653 /* Kernel: int sysfs(int opcode, long a1, long a2); */
4654 PRINT("sys_sysfs ( %ld, %ld, %ld )", SARG1, SARG2, ARG3);
4656 switch (ARG1 /*opcode*/) {
4657 case VKI_GETFSIND:
4658 /* Libc: int sysfs(int opcode, const char *fsname); */
4659 PRE_REG_READ2(long, SC2("sysfs", "getfsind"), int, opcode,
4660 const char *, fsname);
4661 PRE_MEM_RASCIIZ("sysfs(fsname)", ARG2);
4662 break;
4663 case VKI_GETFSTYP:
4664 /* Libc: int sysfs(int opcode, int fs_index, char *buf); */
4665 PRE_REG_READ3(long, SC2("sysfs", "getfstyp"), int, opcode,
4666 int, fs_index, char *, buf);
4667 PRE_MEM_WRITE("sysfs(buf)", ARG3, VKI_FSTYPSZ + 1);
4668 break;
4669 case VKI_GETNFSTYP:
4670 /* Libc: int sysfs(int opcode); */
4671 PRE_REG_READ1(long, SC2("sysfs", "getnfstyp"), int, opcode);
4672 break;
4673 default:
4674 VG_(unimplemented)("Syswrap of the sysfs call with opcode %ld.", SARG1);
4675 /*NOTREACHED*/
4676 break;
4680 POST(sys_sysfs)
4682 switch (ARG1 /*opcode*/) {
4683 case VKI_GETFSIND:
4684 case VKI_GETNFSTYP:
4685 break;
4686 case VKI_GETFSTYP:
4687 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4688 break;
4689 default:
4690 vg_assert(0);
4691 break;
4696 PRE(sys_getmsg)
4698 /* int getmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4699 int *flagsp); */
4700 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4701 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4702 *flags |= SfMayBlock;
4703 PRINT("sys_getmsg ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
4704 PRE_REG_READ4(long, "getmsg", int, fildes, struct vki_strbuf *, ctlptr,
4705 struct vki_strbuf *, dataptr, int *, flagsp);
4706 if (ctrlptr) {
4707 PRE_FIELD_READ("getmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
4708 PRE_FIELD_WRITE("getmsg(ctrlptr->len)", ctrlptr->len);
4709 PRE_FIELD_READ("getmsg(ctrlptr->buf)", ctrlptr->buf);
4710 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4711 && ctrlptr->maxlen > 0)
4712 PRE_MEM_WRITE("getmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4713 ctrlptr->maxlen);
4715 if (dataptr) {
4716 PRE_FIELD_READ("getmsg(dataptr->maxlen)", dataptr->maxlen);
4717 PRE_FIELD_WRITE("getmsg(dataptr->len)", dataptr->len);
4718 PRE_FIELD_READ("getmsg(dataptr->buf)", dataptr->buf);
4719 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4720 && dataptr->maxlen > 0)
4721 PRE_MEM_WRITE("getmsg(dataptr->buf)", (Addr)dataptr->buf,
4722 dataptr->maxlen);
4724 PRE_MEM_READ("getmsg(flagsp)", ARG4, sizeof(int));
4725 /*PRE_MEM_WRITE("getmsg(flagsp)", ARG4, sizeof(int));*/
4727 /* Be strict. */
4728 if (!ML_(fd_allowed)(ARG1, "getmsg", tid, False))
4729 SET_STATUS_Failure(VKI_EBADF);
4732 POST(sys_getmsg)
4734 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4735 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4737 if (ctrlptr && ctrlptr->len > 0)
4738 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
4739 if (dataptr && dataptr->len > 0)
4740 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
4741 POST_MEM_WRITE(ARG4, sizeof(int));
4744 PRE(sys_putmsg)
4746 /* int putmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4747 int flags); */
4748 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4749 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4750 *flags |= SfMayBlock;
4751 PRINT("sys_putmsg ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
4752 PRE_REG_READ4(long, "putmsg", int, fildes, struct vki_strbuf *, ctrlptr,
4753 struct vki_strbuf *, dataptr, int, flags);
4754 if (ctrlptr) {
4755 PRE_FIELD_READ("putmsg(ctrlptr->len)", ctrlptr->len);
4756 PRE_FIELD_READ("putmsg(ctrlptr->buf)", ctrlptr->buf);
4757 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4758 && ctrlptr->len > 0)
4759 PRE_MEM_READ("putmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4760 ctrlptr->len);
4762 if (dataptr) {
4763 PRE_FIELD_READ("putmsg(dataptr->len)", dataptr->len);
4764 PRE_FIELD_READ("putmsg(dataptr->buf)", dataptr->buf);
4765 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4766 && dataptr->len > 0)
4767 PRE_MEM_READ("putmsg(dataptr->buf)", (Addr)dataptr->buf,
4768 dataptr->len);
4771 /* Be strict. */
4772 if (!ML_(fd_allowed)(ARG1, "putmsg", tid, False))
4773 SET_STATUS_Failure(VKI_EBADF);
4776 PRE(sys_lstat)
4778 /* int lstat(const char *path, struct stat *buf); */
4779 /* Note: We could use here the sys_newlstat generic wrapper, but the 'new'
4780 in its name is rather confusing in the Solaris context, thus we provide
4781 our own wrapper. */
4782 PRINT("sys_lstat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
4783 PRE_REG_READ2(long, "lstat", const char *, path, struct stat *, buf);
4785 PRE_MEM_RASCIIZ("lstat(path)", ARG1);
4786 PRE_MEM_WRITE("lstat(buf)", ARG2, sizeof(struct vki_stat));
4789 POST(sys_lstat)
4791 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
4794 PRE(sys_sigprocmask)
4796 /* int sigprocmask(int how, const sigset_t *set, sigset_t *oset); */
4797 PRINT("sys_sigprocmask ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4798 PRE_REG_READ3(long, "sigprocmask",
4799 int, how, vki_sigset_t *, set, vki_sigset_t *, oset);
4800 if (ARG2)
4801 PRE_MEM_READ("sigprocmask(set)", ARG2, sizeof(vki_sigset_t));
4802 if (ARG3)
4803 PRE_MEM_WRITE("sigprocmask(oset)", ARG3, sizeof(vki_sigset_t));
4805 /* Be safe. */
4806 if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_sigset_t)))) {
4807 SET_STATUS_Failure(VKI_EFAULT);
4809 if (ARG3 && !ML_(safe_to_deref((void*)ARG3, sizeof(vki_sigset_t)))) {
4810 SET_STATUS_Failure(VKI_EFAULT);
4813 if (!FAILURE)
4814 SET_STATUS_from_SysRes(
4815 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, (vki_sigset_t*)ARG2,
4816 (vki_sigset_t*)ARG3)
4819 if (SUCCESS)
4820 *flags |= SfPollAfter;
4823 POST(sys_sigprocmask)
4825 if (ARG3)
4826 POST_MEM_WRITE(ARG3, sizeof(vki_sigset_t));
4829 PRE(sys_sigsuspend)
4831 *flags |= SfMayBlock;
4833 /* int sigsuspend(const sigset_t *set); */
4834 PRINT("sys_sigsuspend ( %#lx )", ARG1);
4835 PRE_REG_READ1(long, "sigsuspend", vki_sigset_t *, set);
4836 PRE_MEM_READ("sigsuspend(set)", ARG1, sizeof(vki_sigset_t));
4838 /* Be safe. */
4839 if (ARG1 && ML_(safe_to_deref((void *) ARG1, sizeof(vki_sigset_t)))) {
4840 VG_(sigdelset)((vki_sigset_t *) ARG1, VG_SIGVGKILL);
4841 /* We cannot mask VG_SIGVGKILL, as otherwise this thread would not
4842 be killable by VG_(nuke_all_threads_except).
4843 We thus silently ignore the user request to mask this signal.
4844 Note that this is similar to what is done for e.g.
4845 sigprocmask (see m_signals.c calculate_SKSS_from_SCSS). */
4849 PRE(sys_sigaction)
4851 /* int sigaction(int signal, const struct sigaction *act,
4852 struct sigaction *oact); */
4853 PRINT("sys_sigaction ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4854 PRE_REG_READ3(long, "sigaction", int, signal,
4855 const struct sigaction *, act, struct sigaction *, oact);
4857 /* Note that on Solaris, vki_sigaction_toK_t and vki_sigaction_fromK_t are
4858 both typedefs of 'struct sigaction'. */
4860 if (ARG2) {
4861 vki_sigaction_toK_t *sa = (vki_sigaction_toK_t*)ARG2;
4862 PRE_FIELD_READ("sigaction(act->sa_flags)", sa->sa_flags);
4863 PRE_FIELD_READ("sigaction(act->sa_handler)", sa->ksa_handler);
4864 PRE_FIELD_READ("sigaction(act->sa_mask)", sa->sa_mask);
4866 if (ARG3)
4867 PRE_MEM_WRITE("sigaction(oact)", ARG3, sizeof(vki_sigaction_fromK_t));
4869 /* Be safe. */
4870 if (ARG2 && !ML_(safe_to_deref((void*)ARG2,
4871 sizeof(vki_sigaction_toK_t)))) {
4872 SET_STATUS_Failure(VKI_EFAULT);
4874 if (ARG3 && !ML_(safe_to_deref((void*)ARG3,
4875 sizeof(vki_sigaction_fromK_t)))) {
4876 SET_STATUS_Failure(VKI_EFAULT);
4879 if (!FAILURE)
4880 SET_STATUS_from_SysRes(
4881 VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t*)ARG2,
4882 (vki_sigaction_fromK_t*)ARG3));
4885 POST(sys_sigaction)
4887 if (ARG3)
4888 POST_MEM_WRITE(ARG3, sizeof(vki_sigaction_fromK_t));
4891 PRE(sys_sigpending)
4893 /* int sigpending(int flag, sigset_t *setp); */
4894 PRINT("sys_sigpending ( %ld, %#lx )", SARG1, ARG2);
4895 PRE_REG_READ2(long, "sigpending", int, flag, sigset_t *, setp);
4896 PRE_MEM_WRITE("sigpending(setp)", ARG2, sizeof(vki_sigset_t));
4899 POST(sys_sigpending)
4901 POST_MEM_WRITE(ARG2, sizeof(vki_sigset_t));
4904 PRE(sys_getsetcontext)
4906 /* Kernel: int getsetcontext(int flag, void *arg) */
4907 ThreadState *tst = VG_(get_ThreadState)(tid);
4908 PRINT("sys_getsetcontext ( %ld, %#lx )", SARG1, ARG2);
4909 switch (ARG1 /*flag*/) {
4910 case VKI_GETCONTEXT:
4911 /* Libc: int getcontext(ucontext_t *ucp); */
4912 PRE_REG_READ2(long, SC2("getsetcontext", "getcontext"), int, flag,
4913 ucontext_t *, ucp);
4914 PRE_MEM_WRITE("getsetcontext(ucp)", ARG2, sizeof(vki_ucontext_t));
4916 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4917 SET_STATUS_Failure(VKI_EFAULT);
4918 return;
4920 VG_(save_context)(tid, (vki_ucontext_t*)ARG2, Vg_CoreSysCall);
4921 SET_STATUS_Success(0);
4922 break;
4923 case VKI_SETCONTEXT:
4924 /* Libc: int setcontext(const ucontext_t *ucp); */
4925 PRE_REG_READ2(long, SC2("getsetcontext", "setcontext"), int, flag,
4926 const ucontext_t *, ucp);
4928 if (!ARG2) {
4929 /* Setting NULL context causes thread exit. */
4930 tst->exitreason = VgSrc_ExitThread;
4931 tst->os_state.exitcode = 0;
4932 SET_STATUS_Success(0);
4933 return;
4936 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4937 SET_STATUS_Failure(VKI_EFAULT);
4938 return;
4941 VG_(restore_context)(tid, (vki_ucontext_t*)ARG2,
4942 Vg_CoreSysCall, False/*esp_is_thrptr*/);
4943 /* Tell the driver not to update the guest state with the "result". */
4944 *flags |= SfNoWriteResult;
4945 /* Check to see if any signals arose as a result of this. */
4946 *flags |= SfPollAfter;
4948 /* Check if this is a possible return from a signal handler. */
4949 VG_(sigframe_return)(tid, (vki_ucontext_t*)ARG2);
4951 SET_STATUS_Success(0);
4952 break;
4953 case VKI_GETUSTACK:
4954 /* Libc: int getustack(stack_t **spp); */
4955 PRE_REG_READ2(long, SC2("getsetcontext", "getustack"), int, flag,
4956 stack_t **, spp);
4957 PRE_MEM_WRITE("getsetcontext(spp)", ARG2, sizeof(vki_stack_t*));
4959 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t*)))) {
4960 SET_STATUS_Failure(VKI_EFAULT);
4961 return;
4964 *(vki_stack_t**)ARG2 = tst->os_state.ustack;
4965 POST_MEM_WRITE(ARG2, sizeof(vki_stack_t*));
4966 SET_STATUS_Success(0);
4967 break;
4968 case VKI_SETUSTACK:
4970 /* Libc: int setustack(stack_t *sp); */
4971 PRE_REG_READ2(long, SC2("getsetcontext", "setustack"), int, flag,
4972 stack_t *, sp);
4974 /* The kernel does not read the stack data instantly but it can read
4975 them later so it is better to make sure the data are defined. */
4976 PRE_MEM_READ("getsetcontext_setustack(sp)", ARG2, sizeof(vki_stack_t));
4978 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) {
4979 SET_STATUS_Failure(VKI_EFAULT);
4980 return;
4983 vki_stack_t *old_stack = tst->os_state.ustack;
4984 tst->os_state.ustack = (vki_stack_t*)ARG2;
4986 /* The thread is setting the ustack pointer. It is a good time to get
4987 information about its stack. */
4988 if (tst->os_state.ustack->ss_flags == 0) {
4989 /* If the sanity check of ss_flags passed set the stack. */
4990 set_stack(tid, tst->os_state.ustack);
4992 if ((old_stack == NULL) && (tid > 1)) {
4993 /* New thread creation is now completed. Inform the tool. */
4994 VG_TRACK(pre_thread_first_insn, tid);
4998 SET_STATUS_Success(0);
5000 break;
5001 default:
5002 VG_(unimplemented)("Syswrap of the context call with flag %ld.", SARG1);
5003 /*NOTREACHED*/
5004 break;
5008 PRE(sys_fchmodat)
5010 /* int fchmodat(int fd, const char *path, mode_t mode, int flag); */
5012 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5013 This is different from Linux, for example, where glibc sign-extends it. */
5014 Int fd = (Int) ARG1;
5016 PRINT("sys_fchmodat ( %d, %#lx(%s), %ld, %ld )",
5017 fd, ARG2, (HChar *) ARG2, SARG3, SARG4);
5018 PRE_REG_READ4(long, "fchmodat",
5019 int, fd, const char *, path, vki_mode_t, mode, int, flag);
5021 if (ARG2)
5022 PRE_MEM_RASCIIZ("fchmodat(path)", ARG2);
5024 /* Be strict but ignore fd for absolute path. */
5025 if (fd != VKI_AT_FDCWD
5026 && ML_(safe_to_deref)((void *) ARG2, 1)
5027 && ((HChar *) ARG2)[0] != '/'
5028 && !ML_(fd_allowed)(fd, "fchmodat", tid, False))
5029 SET_STATUS_Failure(VKI_EBADF);
5032 PRE(sys_mkdirat)
5034 /* int mkdirat(int fd, const char *path, mode_t mode); */
5036 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5037 This is different from Linux, for example, where glibc sign-extends it. */
5038 Int fd = (Int) ARG1;
5040 *flags |= SfMayBlock;
5041 PRINT("sys_mkdirat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2, SARG3);
5042 PRE_REG_READ3(long, "mkdirat", int, fd, const char *, path,
5043 vki_mode_t, mode);
5044 PRE_MEM_RASCIIZ("mkdirat(path)", ARG2);
5046 /* Be strict but ignore fd for absolute path. */
5047 if (fd != VKI_AT_FDCWD
5048 && ML_(safe_to_deref)((void *) ARG2, 1)
5049 && ((HChar *) ARG2)[0] != '/'
5050 && !ML_(fd_allowed)(fd, "mkdirat", tid, False))
5051 SET_STATUS_Failure(VKI_EBADF);
5054 static void do_statvfs_post(struct vki_statvfs *stats, ThreadId tid)
5056 POST_FIELD_WRITE(stats->f_bsize);
5057 POST_FIELD_WRITE(stats->f_frsize);
5058 POST_FIELD_WRITE(stats->f_blocks);
5059 POST_FIELD_WRITE(stats->f_bfree);
5060 POST_FIELD_WRITE(stats->f_bavail);
5061 POST_FIELD_WRITE(stats->f_files);
5062 POST_FIELD_WRITE(stats->f_ffree);
5063 POST_FIELD_WRITE(stats->f_favail);
5064 POST_FIELD_WRITE(stats->f_fsid);
5065 POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
5066 POST_FIELD_WRITE(stats->f_flag);
5067 POST_FIELD_WRITE(stats->f_namemax);
5068 POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
5071 PRE(sys_statvfs)
5073 /* int statvfs(const char *path, struct statvfs *buf); */
5074 *flags |= SfMayBlock;
5075 PRINT("sys_statvfs ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
5076 PRE_REG_READ2(long, "statvfs", const char *, path,
5077 struct vki_statvfs *, buf);
5078 PRE_MEM_RASCIIZ("statvfs(path)", ARG1);
5079 PRE_MEM_WRITE("statvfs(buf)", ARG2, sizeof(struct vki_statvfs));
5082 POST(sys_statvfs)
5084 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
5087 PRE(sys_fstatvfs)
5089 /* int fstatvfs(int fd, struct statvfs *buf); */
5090 *flags |= SfMayBlock;
5091 PRINT("sys_fstatvfs ( %ld, %#lx )", SARG1, ARG2);
5092 PRE_REG_READ2(long, "fstatvfs", int, fd, struct vki_statvfs *, buf);
5093 PRE_MEM_WRITE("fstatvfs(buf)", ARG2, sizeof(struct vki_statvfs));
5095 /* Be strict. */
5096 if (!ML_(fd_allowed)(ARG1, "fstatvfs", tid, False))
5097 SET_STATUS_Failure(VKI_EBADF);
5100 POST(sys_fstatvfs)
5102 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
5105 PRE(sys_nfssys)
5107 /* int nfssys(enum nfssys_op opcode, void *arg); */
5108 *flags |= SfMayBlock;
5109 PRINT("sys_nfssys ( %ld, %#lx )", SARG1, ARG2);
5111 switch (ARG1 /*opcode*/) {
5112 case VKI_NFS_REVAUTH:
5113 PRE_REG_READ2(long, SC2("nfssys", "nfs_revauth"), int, opcode,
5114 struct vki_nfs_revauth_args *, args);
5115 PRE_MEM_READ("nfssys(arg)", ARG2,
5116 sizeof(struct vki_nfs_revauth_args));
5117 break;
5118 default:
5119 VG_(unimplemented)("Syswrap of the nfssys call with opcode %ld.", SARG1);
5120 /*NOTREACHED*/
5121 break;
5125 POST(sys_nfssys)
5127 switch (ARG1 /*opcode*/) {
5128 case VKI_NFS_REVAUTH:
5129 break;
5130 default:
5131 vg_assert(0);
5132 break;
5136 PRE(sys_waitid)
5138 /* int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); */
5139 *flags |= SfMayBlock;
5140 PRINT("sys_waitid( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
5141 PRE_REG_READ4(long, "waitid", vki_idtype_t, idtype, vki_id_t, id,
5142 siginfo_t *, infop, int, options);
5143 PRE_MEM_WRITE("waitid(infop)", ARG3, sizeof(vki_siginfo_t));
5146 POST(sys_waitid)
5148 POST_MEM_WRITE(ARG3, sizeof(vki_siginfo_t));
5151 PRE(sys_sigsendsys)
5153 /* int sigsendsys(procset_t *psp, int sig); */
5154 PRINT("sys_sigsendsys( %#lx, %ld )", ARG1, SARG2);
5155 PRE_REG_READ2(long, "sigsendsys", vki_procset_t *, psp, int, signal);
5156 PRE_MEM_READ("sigsendsys(psp)", ARG1, sizeof(vki_procset_t));
5158 if (!ML_(client_signal_OK)(ARG1)) {
5159 SET_STATUS_Failure(VKI_EINVAL);
5161 if (!ML_(safe_to_deref)((void *) ARG1, sizeof(vki_procset_t))) {
5162 SET_STATUS_Failure(VKI_EFAULT);
5165 /* Exit early if there are problems. */
5166 if (FAILURE)
5167 return;
5169 vki_procset_t *psp = (vki_procset_t *) ARG1;
5170 switch (psp->p_op) {
5171 case VKI_POP_AND:
5172 break;
5173 default:
5174 VG_(unimplemented)("Syswrap of the sigsendsys call with op %u.",
5175 psp->p_op);
5178 UInt pid;
5179 if ((psp->p_lidtype == VKI_P_PID) && (psp->p_ridtype == VKI_P_ALL)) {
5180 pid = psp->p_lid;
5181 } else if ((psp->p_lidtype == VKI_P_ALL) && (psp->p_ridtype == VKI_P_PID)) {
5182 pid = psp->p_rid;
5183 } else {
5184 VG_(unimplemented)("Syswrap of the sigsendsys call with lidtype %u and"
5185 "ridtype %u.", psp->p_lidtype, psp->p_ridtype);
5188 if (VG_(clo_trace_signals))
5189 VG_(message)(Vg_DebugMsg, "sigsendsys: sending signal to process %d\n",
5190 pid);
5192 /* Handle SIGKILL specially. */
5193 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(pid, -1)) {
5194 SET_STATUS_Success(0);
5195 return;
5198 /* Check to see if this gave us a pending signal. */
5199 *flags |= SfPollAfter;
5202 #if defined(SOLARIS_UTIMESYS_SYSCALL)
5203 PRE(sys_utimesys)
5205 /* Kernel: int utimesys(int code, uintptr_t arg1, uintptr_t arg2,
5206 uintptr_t arg3, uintptr_t arg4);
5209 switch (ARG1 /*code*/) {
5210 case 0:
5211 /* Libc: int futimens(int fd, const timespec_t times[2]); */
5212 PRINT("sys_utimesys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
5213 PRE_REG_READ3(long, "utimesys", int, code, int, fd,
5214 const vki_timespec_t *, times);
5215 if (ARG3)
5216 PRE_MEM_READ("utimesys(times)", ARG3, 2 * sizeof(vki_timespec_t));
5218 /* Be strict. */
5219 if (!ML_(fd_allowed)(ARG2, "utimesys", tid, False))
5220 SET_STATUS_Failure(VKI_EBADF);
5221 break;
5222 case 1:
5224 /* Libc: int utimensat(int fd, const char *path,
5225 const timespec_t times[2], int flag);
5228 /* Interpret the second argument as 32-bit value even on 64-bit
5229 architecture. This is different from Linux, for example, where glibc
5230 sign-extends it. */
5231 Int fd = (Int) ARG2;
5233 PRINT("sys_utimesys ( %ld, %d, %#lx(%s), %#lx, %ld )",
5234 SARG1, fd, ARG3, (HChar *) ARG3, ARG4, SARG5);
5235 PRE_REG_READ5(long, "utimesys", int, code, int, fd, const char *, path,
5236 const vki_timespec_t *, times, int, flag);
5237 if (ARG3)
5238 PRE_MEM_RASCIIZ("utimesys(path)", ARG3);
5239 if (ARG4)
5240 PRE_MEM_READ("utimesys(times)", ARG4, 2 * sizeof(vki_timespec_t));
5242 /* Be strict but ignore fd for absolute path. */
5243 if (fd != VKI_AT_FDCWD
5244 && ML_(safe_to_deref)((void *) ARG3, 1)
5245 && ((HChar *) ARG3)[0] != '/'
5246 && !ML_(fd_allowed)(fd, "utimesys", tid, False))
5247 SET_STATUS_Failure(VKI_EBADF);
5248 break;
5250 default:
5251 VG_(unimplemented)("Syswrap of the utimesys call with code %ld.", SARG1);
5252 /*NOTREACHED*/
5253 break;
5256 #endif /* SOLARIS_UTIMESYS_SYSCALL */
5258 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
5259 PRE(sys_utimensat)
5261 /* int utimensat(int fd, const char *path, const timespec_t times[2],
5262 int flag);
5265 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5266 This is different from Linux, for example, where glibc sign-extends it. */
5267 Int fd = (Int) ARG1;
5269 PRINT("sys_utimensat ( %d, %#lx(%s), %#lx, %ld )",
5270 fd, ARG2, (HChar *) ARG2, ARG3, SARG4);
5271 PRE_REG_READ4(long, "utimensat", int, fd, const char *, path,
5272 const vki_timespec_t *, times, int, flag);
5273 if (ARG2)
5274 PRE_MEM_RASCIIZ("utimensat(path)", ARG2);
5275 if (ARG3)
5276 PRE_MEM_READ("utimensat(times)", ARG3, 2 * sizeof(vki_timespec_t));
5278 /* Be strict but ignore fd for absolute path. */
5279 if (fd != VKI_AT_FDCWD
5280 && ML_(safe_to_deref)((void *) ARG2, 1)
5281 && ((HChar *) ARG2)[0] != '/'
5282 && !ML_(fd_allowed)(fd, "utimensat", tid, False))
5283 SET_STATUS_Failure(VKI_EBADF);
5285 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
5287 PRE(sys_sigresend)
5289 /* int sigresend(int signal, siginfo_t *siginfo, sigset_t *mask); */
5290 /* Sends a signal to the calling thread, the mask parameter specifies a new
5291 signal mask. */
5293 /* Static (const) mask accessible from outside of this function. */
5294 static vki_sigset_t block_all;
5296 PRINT("sys_sigresend( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
5297 PRE_REG_READ3(long, "sigresend", int, signal, vki_siginfo_t *, siginfo,
5298 vki_sigset_t *, mask);
5300 if (ARG2)
5301 PRE_MEM_READ("sigresend(siginfo)", ARG2, sizeof(vki_siginfo_t));
5302 PRE_MEM_WRITE("sigresend(mask)", ARG3, sizeof(vki_sigset_t));
5304 /* Check the signal and mask. */
5305 if (!ML_(client_signal_OK)(ARG1)) {
5306 SET_STATUS_Failure(VKI_EINVAL);
5308 if (!ML_(safe_to_deref)((void*)ARG3, sizeof(vki_sigset_t))) {
5309 SET_STATUS_Failure(VKI_EFAULT);
5312 /* Exit early if there are problems. */
5313 if (FAILURE)
5314 return;
5316 /* Save the requested mask to unused ARG4. */
5317 ARG4 = ARG3;
5319 /* Fake the requested sigmask with a block-all mask. If the syscall
5320 succeeds then we will block "all" signals for a few instructions (in
5321 syscall-x86-solaris.S) but the correct mask will be almost instantly set
5322 again by a call to sigprocmask (also in syscall-x86-solaris.S). If the
5323 syscall fails then the mask is not changed, so everything is ok too. */
5324 VG_(sigfillset)(&block_all);
5325 ARG3 = (UWord)&block_all;
5327 /* Check to see if this gave us a pending signal. */
5328 *flags |= SfPollAfter;
5330 if (VG_(clo_trace_signals))
5331 VG_(message)(Vg_DebugMsg, "sigresend: resending signal %ld\n", ARG1);
5333 /* Handle SIGKILL specially. */
5334 if (ARG1 == VKI_SIGKILL && ML_(do_sigkill)(tid, -1)) {
5335 SET_STATUS_Success(0);
5336 return;
5339 /* Ask to handle this syscall via the slow route, since that's the only one
5340 that sets tst->status to VgTs_WaitSys. If the result of doing the
5341 syscall is an immediate run of async_signalhandler() in m_signals.c,
5342 then we need the thread to be properly tidied away. */
5343 *flags |= SfMayBlock;
5346 POST(sys_sigresend)
5348 /* The syscall succeeded, set the requested mask. */
5349 VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, (vki_sigset_t*)ARG4, NULL);
5351 if (VG_(clo_trace_signals))
5352 VG_(message)(Vg_DebugMsg, "sigresend: resent signal %lu\n", ARG1);
5355 static void mem_priocntlsys_parm_ok(ThreadId tid, Bool pre, Bool reade,
5356 vki_pc_vaparm_t *parm)
5358 if (reade)
5359 return;
5361 if (pre)
5362 PRE_FIELD_WRITE("priocntlsys(parm)", parm->pc_parm);
5363 else
5364 POST_FIELD_WRITE(parm->pc_parm);
5367 static void mem_priocntlsys_parm(ThreadId tid, Bool pre, Bool reade,
5368 const HChar *clname,
5369 vki_pc_vaparm_t *parm)
5371 /* This function is used to handle the PC_SETXPARMS and PC_GETXPARMS
5372 parameters. In the case of PC_SETXPARMS, the code below merely checks
5373 if all parameters are scalar, PRE_MEM_READ() for these parameters is
5374 already done by the PC_SETXPARMS handler in PRE(sys_priocntlsys).
5376 A caller of this function is responsible for checking that clname and
5377 &parm->key can be dereferenced. */
5379 if (VG_STREQ(clname, "RT")) {
5380 switch (parm->pc_key) {
5381 case VKI_RT_KY_PRI:
5382 case VKI_RT_KY_TQSECS:
5383 case VKI_RT_KY_TQNSECS:
5384 case VKI_RT_KY_TQSIG:
5385 /* Scalar values that are stored directly in pc_parm. */
5386 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5387 return;
5390 else if (VG_STREQ(clname, "TS")) {
5391 switch (parm->pc_key) {
5392 case VKI_TS_KY_UPRILIM:
5393 case VKI_TS_KY_UPRI:
5394 /* Scalar values that are stored directly in pc_parm. */
5395 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5396 return;
5399 else if (VG_STREQ(clname, "IA")) {
5400 switch (parm->pc_key) {
5401 case VKI_IA_KY_UPRILIM:
5402 case VKI_IA_KY_UPRI:
5403 case VKI_IA_KY_MODE:
5404 /* Scalar values that are stored directly in pc_parm. */
5405 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5406 return;
5409 else if (VG_STREQ(clname, "FSS")) {
5410 switch (parm->pc_key) {
5411 case VKI_FSS_KY_UPRILIM:
5412 case VKI_FSS_KY_UPRI:
5413 /* Scalar values that are stored directly in pc_parm. */
5414 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5415 return;
5418 else if (VG_STREQ(clname, "FX")) {
5419 switch (parm->pc_key) {
5420 case VKI_FX_KY_UPRILIM:
5421 case VKI_FX_KY_UPRI:
5422 case VKI_FX_KY_TQSECS:
5423 case VKI_FX_KY_TQNSECS:
5424 /* Scalar values that are stored directly in pc_parm. */
5425 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5426 return;
5429 else {
5430 /* Unknown class. */
5431 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5432 clname);
5433 /*NOTREACHED*/
5436 /* The class is known but pc_key is unknown. */
5437 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s "
5438 "and pc_key=%d.", clname, parm->pc_key);
5439 /*NOTREACHED*/
5442 PRE(sys_priocntlsys)
5444 /* long priocntlsys(int pc_version, procset_t *psp, int cmd, caddr_t arg,
5445 caddr_t arg2); */
5447 if (ARG1 != 1) {
5448 /* Only the first version of priocntlsys is supported by the code below.
5450 VG_(unimplemented)("Syswrap of the priocntlsys where pc_version=%lu.",
5451 ARG1);
5452 /*NOTREACHED*/
5455 PRINT("sys_priocntlsys ( %ld, %#lx, %ld, %#lx, %#lx )", SARG1, ARG2, SARG3,
5456 ARG4, ARG5);
5457 PRE_REG_READ5(long, "priocntlsys", int, pc_version, procset_t *, psp,
5458 int, cmd, void *, arg, void *, arg2);
5460 switch (ARG3 /*cmd*/) {
5461 case VKI_PC_GETCID:
5462 if (ARG4) {
5463 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5464 PRE_MEM_RASCIIZ("priocntlsys(clname)", (Addr)info->pc_clname);
5465 /* The next line says that the complete pcinfo_t structure can be
5466 written, but this actually isn't true for pc_clname which is
5467 always only read. */
5468 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5470 break;
5471 case VKI_PC_GETCLINFO:
5472 if (ARG4) {
5473 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5474 PRE_FIELD_READ("priocntlsys(cid)", info->pc_cid);
5475 /* The next line says that the complete pcinfo_t structure can be
5476 written, but this actually isn't true for pc_cid which is
5477 always only read. */
5478 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5480 break;
5481 case VKI_PC_SETPARMS:
5482 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5483 /* The next line says that the complete pcparms_t structure is read
5484 which is never actually true (we are too pessimistic here).
5485 Unfortunately we can't do better because we don't know what
5486 process class is involved. */
5487 PRE_MEM_READ("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5488 break;
5489 case VKI_PC_GETPARMS:
5490 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5491 PRE_MEM_WRITE("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5492 break;
5493 case VKI_PC_GETPRIRANGE:
5495 vki_pcpri_t *pcpri = (vki_pcpri_t*)ARG4;
5496 PRE_FIELD_READ("priocntlsys(cid)", pcpri->pc_cid);
5498 PRE_MEM_WRITE("priocntlsys(pri)", ARG4, sizeof(vki_pcpri_t));
5499 break;
5500 case VKI_PC_DONICE:
5501 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5503 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5504 PRE_FIELD_READ("priocntlsys(op)", nicee->pc_op);
5505 if (ML_(safe_to_deref)(&nicee->pc_op, sizeof(nicee->pc_op))) {
5506 switch (nicee->pc_op) {
5507 case VKI_PC_GETNICE:
5508 PRE_FIELD_WRITE("priocntlsys(val)", nicee->pc_val);
5509 break;
5510 case VKI_PC_SETNICE:
5511 PRE_FIELD_READ("priocntlsys(val)", nicee->pc_val);
5512 break;
5513 default:
5514 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5515 "cmd=PC_DONICE and pc_op=%d", nicee->pc_op);
5516 /*NOTREACHED*/
5517 break;
5521 break;
5522 case VKI_PC_SETXPARMS:
5523 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5524 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5525 if (ARG5) {
5526 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5527 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5528 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5529 sizeof(parms->pc_vaparmscnt))) {
5530 vki_uint_t i;
5531 PRE_MEM_READ("priocntlsys(parms)", (Addr)parms->pc_parms,
5532 parms->pc_vaparmscnt * sizeof(parms->pc_parms[0]));
5533 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5534 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5535 if (ML_(safe_to_deref)(parm, sizeof(*parm)) &&
5536 ML_(safe_to_deref)((void*)ARG4, 1))
5537 mem_priocntlsys_parm(tid, True /*pre*/, True /*read*/,
5538 (HChar*)ARG4, parm);
5542 break;
5543 case VKI_PC_GETXPARMS:
5544 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5545 if (ARG4)
5546 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5547 if (ARG5) {
5548 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5549 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5550 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5551 sizeof(parms->pc_vaparmscnt))) {
5552 vki_uint_t i;
5553 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5554 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5555 PRE_MEM_READ("priocntlsys(parms)", (Addr)&parm->pc_key,
5556 parms->pc_vaparmscnt * sizeof(parm->pc_key));
5557 if (ML_(safe_to_deref)(&parm->pc_key,
5558 sizeof(parm->pc_key))) {
5559 /* First handle PC_KY_CLNAME, then class specific keys.
5560 Note that PC_KY_CLNAME can be used only with
5561 ARG4==NULL && parms->pc_vaparmscnt==1. We are not so
5562 strict here and handle this special case as a regular
5563 one which makes the code simpler. */
5564 if (parm->pc_key == VKI_PC_KY_CLNAME)
5565 PRE_MEM_WRITE("priocntlsys(clname)", parm->pc_parm,
5566 VKI_PC_CLNMSZ);
5567 else if (ARG4 && ML_(safe_to_deref)((void*)ARG4, 1))
5568 mem_priocntlsys_parm(tid, True /*pre*/,
5569 False /*read*/, (HChar*)ARG4,
5570 parm);
5575 break;
5576 case VKI_PC_SETDFLCL:
5577 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5578 break;
5579 case VKI_PC_GETDFLCL:
5580 if (ARG4) {
5581 /* GETDFLCL writes to the ARG4 buffer only if ARG4 isn't NULL. Also
5582 note that if ARG4 is NULL then the syscall succeeds. */
5583 PRE_MEM_WRITE("priocntlsys(clname)", ARG4, VKI_PC_CLNMSZ);
5585 break;
5586 case VKI_PC_DOPRIO:
5587 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5589 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5590 PRE_FIELD_READ("priocntlsys(op)", prio->pc_op);
5591 if (ML_(safe_to_deref)(&prio->pc_op, sizeof(prio->pc_op))) {
5592 switch (prio->pc_op) {
5593 case VKI_PC_GETPRIO:
5594 PRE_FIELD_WRITE("priocntlsys(cid)", prio->pc_cid);
5595 PRE_FIELD_WRITE("priocntlsys(val)", prio->pc_val);
5596 break;
5597 case VKI_PC_SETPRIO:
5598 PRE_FIELD_READ("priocntlsys(cid)", prio->pc_cid);
5599 PRE_FIELD_READ("priocntlsys(val)", prio->pc_val);
5600 break;
5601 default:
5602 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5603 "cmd=PC_DOPRIO and pc_op=%d", prio->pc_op);
5604 /*NOTREACHED*/
5605 break;
5609 break;
5610 case VKI_PC_ADMIN:
5611 default:
5612 VG_(unimplemented)("Syswrap of the priocntlsys call with cmd %ld.", SARG3);
5613 /*NOTREACHED*/
5614 break;
5618 static void post_mem_write_priocntlsys_clinfo(ThreadId tid,
5619 const HChar *clname, Addr clinfo)
5621 if (VG_STREQ(clname, "RT"))
5622 POST_MEM_WRITE(clinfo, sizeof(vki_rtinfo_t));
5623 else if (VG_STREQ(clname, "TS"))
5624 POST_MEM_WRITE(clinfo, sizeof(vki_tsinfo_t));
5625 else if (VG_STREQ(clname, "IA"))
5626 POST_MEM_WRITE(clinfo, sizeof(vki_iainfo_t));
5627 else if (VG_STREQ(clname, "FSS"))
5628 POST_MEM_WRITE(clinfo, sizeof(vki_fssinfo_t));
5629 else if (VG_STREQ(clname, "FX"))
5630 POST_MEM_WRITE(clinfo, sizeof(vki_fxinfo_t));
5631 else if (VG_STREQ(clname, "SDC")) {
5632 /* Relax. */
5634 else {
5635 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5636 clname);
5637 /*NOTREACHED*/
5641 POST(sys_priocntlsys)
5643 switch (ARG3 /*cmd*/) {
5644 case VKI_PC_GETCID:
5645 if (ARG4) {
5646 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5647 POST_FIELD_WRITE(info->pc_cid);
5648 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5649 (Addr)&info->pc_clinfo);
5651 break;
5652 case VKI_PC_GETCLINFO:
5653 if (ARG4) {
5654 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5655 POST_MEM_WRITE((Addr)info->pc_clname,
5656 VG_(strlen)((HChar*)info->pc_clname) + 1);
5657 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5658 (Addr)&info->pc_clinfo);
5660 break;
5661 case VKI_PC_SETPARMS:
5662 /* Relax. */
5663 break;
5664 case VKI_PC_GETPARMS:
5665 /* The next line says that the complete pcparms_t structure is
5666 written which is never actually true (we are too optimistic here).
5667 Unfortunately we can't do better because we don't know what
5668 process class is involved. */
5669 POST_MEM_WRITE(ARG4, sizeof(vki_pcparms_t));
5670 break;
5671 case VKI_PC_GETPRIRANGE:
5672 POST_MEM_WRITE(ARG4, sizeof(vki_pcpri_t));
5673 break;
5674 case VKI_PC_DONICE:
5676 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5677 if (nicee->pc_op == VKI_PC_GETNICE)
5678 POST_FIELD_WRITE(nicee->pc_val);
5680 break;
5681 case VKI_PC_SETXPARMS:
5682 /* Relax. */
5683 break;
5684 case VKI_PC_GETXPARMS:
5686 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5687 vki_uint_t i;
5688 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5689 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5690 if (parm->pc_key == VKI_PC_KY_CLNAME)
5691 POST_MEM_WRITE(parm->pc_parm,
5692 VG_(strlen)((HChar*)(Addr)parm->pc_parm) + 1);
5693 else if (ARG4)
5694 mem_priocntlsys_parm(tid, False /*pre*/, False /*read*/,
5695 (HChar*)ARG4, parm);
5698 break;
5699 case VKI_PC_SETDFLCL:
5700 /* Relax. */
5701 break;
5702 case VKI_PC_GETDFLCL:
5703 if (ARG4)
5704 POST_MEM_WRITE(ARG4, VG_(strlen)((HChar*)ARG4) + 1);
5705 break;
5706 case VKI_PC_DOPRIO:
5708 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5709 if (prio->pc_op == VKI_PC_GETPRIO) {
5710 POST_FIELD_WRITE(prio->pc_cid);
5711 POST_FIELD_WRITE(prio->pc_val);
5714 break;
5715 case VKI_PC_ADMIN:
5716 default:
5717 vg_assert(0);
5718 break;
5722 PRE(sys_pathconf)
5724 /* long pathconf(const char *path, int name); */
5725 PRINT("sys_pathconf ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
5726 PRE_REG_READ2(long, "pathconf", const char *, path, int, name);
5727 PRE_MEM_RASCIIZ("pathconf(path)", ARG1);
5730 PRE(sys_mmap)
5732 /* void *mmap(void *addr, size_t len, int prot, int flags,
5733 int fildes, off_t off); */
5734 SysRes r;
5735 OffT offset;
5737 /* Stay sane. */
5738 vg_assert(VKI_PAGE_SIZE == 4096);
5739 vg_assert(sizeof(offset) == sizeof(ARG6));
5741 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx )",
5742 ARG1, ARG2, ARG3, ARG4, SARG5, ARG6);
5743 PRE_REG_READ6(long, "mmap", void *, start, vki_size_t, length,
5744 int, prot, int, flags, int, fd, vki_off_t, offset);
5746 /* Make sure that if off < 0 then it's passed correctly to the generic mmap
5747 wraper. */
5748 offset = *(OffT*)&ARG6;
5750 r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
5751 SET_STATUS_from_SysRes(r);
5754 #if defined(SOLARIS_UUIDSYS_SYSCALL)
5755 PRE(sys_uuidsys)
5757 /* int uuidsys(struct uuid *uuid); */
5758 PRINT("sys_uuidsys ( %#lx )", ARG1);
5759 PRE_REG_READ1(long, "uuidsys", struct vki_uuid *, uuid);
5760 PRE_MEM_WRITE("uuidsys(uuid)", ARG1, sizeof(struct vki_uuid));
5763 POST(sys_uuidsys)
5765 POST_MEM_WRITE(ARG1, sizeof(struct vki_uuid));
5767 #endif /* SOLARIS_UUIDSYS_SYSCALL */
5769 /* Syscall mmapobj emulation. Processes ELF program headers
5770 and maps them into correct place in memory. Not an easy task, though.
5771 ELF program header of PT_LOAD/PT_SUNWBSS type specifies:
5772 o p_vaddr - actually a memory offset
5773 o p_memsz - total segment size, including text, data and BSS
5774 o p_filesz - file-based segment size mapping (includes only text and data);
5775 p_memsz - p_filesz is the size of BSS
5776 o p_offset - offset into the ELF file where the file-based mapping starts
5778 Several problematic areas to cover here:
5779 1. p_offset can contain a value which is not page-aligned. In that case
5780 we mmap a part of the file prior to p_offset to make the start address
5781 page-aligned.
5782 2. Partially unused page after the file-based mapping must be zeroed.
5783 3. The first mapping is flagged with MR_HDR_ELF and needs to contain
5784 the ELF header. This information is used and verified by the dynamic
5785 linker (ld.so.1). */
5786 static SysRes mmapobj_process_phdrs(ThreadId tid, Int fd,
5787 vki_mmapobj_result_t *storage,
5788 vki_uint_t *elements,
5789 const VKI_ESZ(Ehdr) *ehdr,
5790 const VKI_ESZ(Phdr) *phdrs)
5792 #define ADVANCE_PHDR(ehdr, phdr) \
5793 (const VKI_ESZ(Phdr) *) ((const HChar *) (phdr) + (ehdr)->e_phentsize)
5795 SysRes res;
5796 Int i;
5797 Int first_segment_idx = -1;
5798 UInt idx;
5799 UInt segments = 0; /* loadable segments */
5800 Addr start_addr = 0;
5801 Addr end_addr = 0;
5802 Addr elfbrk = 0;
5803 SizeT max_align = VKI_PAGE_SIZE;
5805 /* 1. First pass over phdrs - determine number, span and max alignment. */
5806 const VKI_ESZ(Phdr) *phdr = phdrs;
5807 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5808 /* Skip this header if no memory is requested. */
5809 if (phdr->p_memsz == 0)
5810 continue;
5812 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5813 Off64T offset = 0;
5815 if (VG_(clo_trace_syscalls))
5816 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
5817 "program header #%u: addr=%#lx type=%#lx "
5818 "prot=%#lx memsz=%#lx filesz=%#lx file "
5819 "offset=%#lx\n", idx, phdr->p_vaddr,
5820 (UWord) phdr->p_type, (UWord) phdr->p_flags,
5821 phdr->p_memsz, phdr->p_filesz, phdr->p_offset);
5823 if (segments == 0) {
5824 first_segment_idx = idx;
5826 if (phdr->p_filesz == 0) {
5827 VG_(unimplemented)("Syswrap of the mmapobj call with the first "
5828 "loadable ELF program header specifying "
5829 "p_filesz == 0");
5830 /*NOTREACHED*/
5831 return res;
5834 /* Address of the first segment must be either NULL or within the
5835 first page. */
5836 if ((ehdr->e_type == VKI_ET_DYN) &&
5837 ((phdr->p_vaddr & VKI_PAGEMASK) != 0)) {
5838 if (VG_(clo_trace_syscalls))
5839 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5840 "ELF program header #%u does not land on "
5841 "the first page (vaddr=%#lx)\n", idx,
5842 phdr->p_vaddr);
5843 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5846 start_addr = phdr->p_vaddr;
5847 /* The first segment is mapped from the beginning of the file (to
5848 include also the ELF header), so include this memory as well.
5849 Later on we flag this mapping with MR_HDR_ELF. */
5850 offset = phdr->p_offset;
5853 if (phdr->p_align > 1) {
5854 if ((phdr->p_vaddr % phdr->p_align) !=
5855 (phdr->p_offset % phdr->p_align)) {
5856 if (VG_(clo_trace_syscalls))
5857 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5858 "ELF program header #%u does not have "
5859 "congruent offset and vaddr (vaddr=%#lx "
5860 "file offset=%#lx align=%#lx)\n", idx,
5861 phdr->p_vaddr, phdr->p_offset,
5862 phdr->p_align);
5863 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5867 if (phdr->p_vaddr < end_addr) {
5868 if (VG_(clo_trace_syscalls))
5869 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5870 "ELF program header #%u specifies overlaping "
5871 "address (vaddr=%#lx end_addr=%#lx)\n",
5872 idx, phdr->p_vaddr, end_addr);
5873 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5876 end_addr = elfbrk = phdr->p_vaddr + phdr->p_memsz + offset;
5877 end_addr = VG_PGROUNDUP(end_addr);
5878 if (phdr->p_align > max_align) {
5879 max_align = phdr->p_align;
5882 segments += 1;
5886 /* Alignment check - it should be power of two. */
5887 if ((max_align & (max_align - 1)) != 0) {
5888 if (VG_(clo_trace_syscalls))
5889 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5890 "is not a power of 2 (%#lx)\n", max_align);
5891 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5893 vg_assert(max_align >= VKI_PAGE_SIZE);
5895 #if defined(VGP_x86_solaris)
5896 if (max_align > VKI_UINT_MAX) {
5897 if (VG_(clo_trace_syscalls))
5898 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5899 "for 32-bit ELF is >32-bits (%#lx)\n", max_align);
5900 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5902 #endif /* VGP_x86_solaris */
5904 if (segments == 0) {
5905 if (VG_(clo_trace_syscalls))
5906 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5907 "to map (0 loadable segments)");
5908 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5911 vg_assert(end_addr >= start_addr);
5912 SizeT span = end_addr - start_addr;
5913 if (span == 0) {
5914 if (VG_(clo_trace_syscalls))
5915 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5916 "to map (%u loadable segments spanning 0 bytes)\n",
5917 segments);
5918 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5920 vg_assert(first_segment_idx >= 0);
5922 if (segments > *elements) {
5923 if (VG_(clo_trace_syscalls))
5924 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: too many "
5925 "segments (%u)\n", segments);
5926 return VG_(mk_SysRes_Error)(VKI_E2BIG);
5929 if (VG_(clo_trace_syscalls))
5930 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: there "
5931 "are %u loadable segments spanning %#lx bytes; max "
5932 "align is %#lx\n", segments, span, max_align);
5934 /* Now get the aspacemgr oraculum advisory.
5935 Later on we mmap file-based and BSS mappings into this address space area
5936 as required and leave the holes unmapped. */
5937 if (ehdr->e_type == VKI_ET_DYN) {
5938 MapRequest mreq = {MAlign, max_align, span};
5939 Bool ok;
5940 start_addr = VG_(am_get_advisory)(&mreq, True /* forClient */, &ok);
5941 if (!ok) {
5942 if (VG_(clo_trace_syscalls))
5943 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5944 "failed to reserve address space of %#lx bytes "
5945 "with alignment %#lx\n", span, max_align);
5946 return VG_(mk_SysRes_Error)(VKI_ENOMEM);
5948 vg_assert(VG_ROUNDUP(start_addr, max_align) == start_addr);
5950 if (VG_(clo_trace_syscalls))
5951 VG_(debugLog)(2, "syswrap-solaris", "PRE(sys_mmapobj): address space "
5952 "reserved at: vaddr=%#lx size=%#lx\n",
5953 start_addr, span);
5954 } else {
5955 vg_assert(ehdr->e_type == VKI_ET_EXEC);
5956 /* ET_EXEC uses fixed mappings. Will be checked when processing phdrs. */
5959 /* This is an utterly ugly hack, the aspacemgr assumes that only one
5960 segment is added at the time. However we add here multiple segments so
5961 AM_SANITY_CHECK inside the aspacemgr can easily fail. We want to
5962 prevent that thus we disable these checks. The scheduler will check the
5963 aspacemgr sanity after the syscall. */
5964 UInt sanity_level = VG_(clo_sanity_level);
5965 VG_(clo_sanity_level) = 1;
5967 /* 2. Second pass over phdrs - map the program headers and fill in
5968 the mmapobj_result_t array. */
5969 phdr = phdrs;
5970 *elements = 0;
5971 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5972 /* Skip this header if no memory is requested. */
5973 if (phdr->p_memsz == 0)
5974 continue;
5976 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5977 UInt prot = 0;
5978 if (phdr->p_flags & VKI_PF_R)
5979 prot |= VKI_PROT_READ;
5980 if (phdr->p_flags & VKI_PF_W)
5981 prot |= VKI_PROT_WRITE;
5982 if (phdr->p_flags & VKI_PF_X)
5983 prot |= VKI_PROT_EXEC;
5985 vki_mmapobj_result_t *mrp = &storage[*elements];
5986 mrp->mr_msize = phdr->p_memsz;
5987 mrp->mr_fsize = phdr->p_filesz;
5988 mrp->mr_offset = 0;
5989 mrp->mr_prot = prot;
5990 mrp->mr_flags = 0;
5991 Off64T file_offset = phdr->p_offset;
5992 if (idx == first_segment_idx) {
5993 mrp->mr_flags = VKI_MR_HDR_ELF;
5994 if (ehdr->e_type == VKI_ET_DYN) {
5995 if (phdr->p_offset > 0) {
5996 /* Include the ELF header into the first segment.
5997 This means we ignore p_offset from the program header
5998 and map from file offset 0. */
5999 mrp->mr_msize += phdr->p_offset;
6000 mrp->mr_fsize += phdr->p_offset;
6001 file_offset = 0;
6003 } else {
6004 vg_assert(ehdr->e_type == VKI_ET_EXEC);
6005 start_addr = phdr->p_vaddr;
6009 /* p_vaddr is absolute for ET_EXEC, and relative for ET_DYN. */
6010 mrp->mr_addr = (vki_caddr_t) phdr->p_vaddr;
6011 if (ehdr->e_type == VKI_ET_DYN) {
6012 mrp->mr_addr += start_addr;
6015 SizeT page_offset = (Addr) mrp->mr_addr & VKI_PAGEOFFSET;
6016 if (page_offset > 0) {
6017 vg_assert(file_offset >= page_offset);
6018 /* Mapping address does not start at the beginning of a page.
6019 Therefore include some bytes before to make it page aligned. */
6020 mrp->mr_addr -= page_offset;
6021 mrp->mr_msize += page_offset;
6022 mrp->mr_offset = page_offset;
6023 file_offset -= page_offset;
6025 SizeT file_size = mrp->mr_fsize + mrp->mr_offset;
6026 if (VG_(clo_trace_syscalls))
6027 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
6028 "mmapobj result #%u: addr=%#lx msize=%#lx "
6029 "fsize=%#lx mr_offset=%#lx prot=%#x flags=%#x\n",
6030 *elements, (Addr) mrp->mr_addr,
6031 (UWord) mrp->mr_msize, (UWord) mrp->mr_fsize,
6032 (UWord) mrp->mr_offset, mrp->mr_prot,
6033 mrp->mr_flags);
6035 UInt flags = VKI_MAP_PRIVATE | VKI_MAP_FIXED;
6036 if ((mrp->mr_prot & (VKI_PROT_WRITE | VKI_PROT_EXEC)) ==
6037 VKI_PROT_EXEC) {
6038 flags |= VKI_MAP_TEXT;
6039 } else {
6040 flags |= VKI_MAP_INITDATA;
6043 /* Determine if there will be partially unused page after file-based
6044 mapping. If so, then we need to zero it explicitly afterwards. */
6045 Addr mapping_end = (Addr) mrp->mr_addr + file_size;
6046 SizeT zeroed_size = VG_PGROUNDUP(mapping_end) - mapping_end;
6047 Bool mprotect_needed = False;
6048 if ((zeroed_size > 0) && ((prot & VKI_PROT_WRITE) == 0)) {
6049 prot |= VKI_PROT_WRITE;
6050 mprotect_needed = True;
6053 if (ehdr->e_type == VKI_ET_EXEC) {
6054 /* Now check if the requested address space is available. */
6055 if (!VG_(am_is_free_or_resvn)((Addr) mrp->mr_addr, mrp->mr_msize)) {
6056 if (VG_(clo_trace_syscalls))
6057 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6058 "requested segment at %#lx with size of "
6059 "%#lx bytes is not available\n",
6060 (Addr) mrp->mr_addr, (UWord) mrp->mr_msize);
6061 res = VG_(mk_SysRes_Error)(VKI_EADDRINUSE);
6062 goto mmap_error;
6066 if (file_size > 0) {
6067 res = VG_(am_mmap_file_fixed_client_flags)((Addr) mrp->mr_addr,
6068 file_size, prot, flags, fd, file_offset);
6069 if (sr_isError(res)) {
6070 if (VG_(clo_trace_syscalls))
6071 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6072 "mmap failed: addr=%#lx size=%#lx prot=%#x "
6073 "flags=%#x fd=%d file offset=%#llx\n",
6074 (Addr) mrp->mr_addr, file_size,
6075 prot, flags, fd, file_offset);
6076 goto mmap_error;
6079 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
6080 "segment: vaddr=%#lx size=%#lx prot=%#x "
6081 "flags=%#x fd=%d file offset=%#llx\n",
6082 (Addr) mrp->mr_addr, file_size, mrp->mr_prot,
6083 flags, fd, file_offset);
6086 if (zeroed_size > 0) {
6087 /* Now zero out the end of partially used page. */
6088 VG_(memset)((void *) mapping_end, 0, zeroed_size);
6089 if (mprotect_needed) {
6090 prot &= ~VKI_PROT_WRITE;
6091 res = VG_(do_syscall3)(SYS_mprotect, (Addr) mrp->mr_addr,
6092 file_size, prot);
6093 if (sr_isError(res)) {
6094 if (VG_(clo_trace_syscalls))
6095 VG_(debugLog)(3, "syswrap-solaris",
6096 "mmapobj_process_phdrs: mprotect failed: "
6097 "addr=%#lx size=%#lx prot=%#x",
6098 (Addr) mrp->mr_addr, file_size, prot);
6099 /* Mapping for this segment was already established. */
6100 idx += 1;
6101 goto mmap_error;
6106 if (file_size > 0) {
6107 ML_(notify_core_and_tool_of_mmap)((Addr) mrp->mr_addr, file_size,
6108 prot, flags, fd, file_offset);
6111 /* Page(s) after the mapping backed up by the file are part of BSS.
6112 They need to be mmap'ed over with correct flags and will be
6113 implicitly zeroed. */
6114 mapping_end = VG_PGROUNDUP(mrp->mr_addr + mrp->mr_msize);
6115 Addr page_end = VG_PGROUNDUP(mrp->mr_addr + file_size);
6116 vg_assert(mapping_end >= page_end);
6117 zeroed_size = mapping_end - page_end;
6118 if (zeroed_size > 0) {
6119 flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS;
6120 res = VG_(am_mmap_anon_fixed_client)(page_end, zeroed_size, prot);
6121 if (sr_isError(res)) {
6122 if (VG_(clo_trace_syscalls))
6123 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6124 "mmap_anon failed: addr=%#lx size=%#lx "
6125 "prot=%#x\n", page_end, zeroed_size, prot);
6126 idx += 1; /* mapping for this segment was already established */
6127 goto mmap_error;
6130 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
6131 "anonymous segment (BSS): vaddr=%#lx size=%#lx "
6132 "prot=%#x\n", page_end, zeroed_size, prot);
6133 ML_(notify_core_and_tool_of_mmap)(page_end, zeroed_size,
6134 prot, flags, -1, 0);
6137 VG_(di_notify_mmap)((Addr) mrp->mr_addr, False /*allow_SkFileV*/, fd);
6139 *elements += 1;
6140 vg_assert(*elements <= segments);
6144 if ((ehdr->e_type == VKI_ET_EXEC) && (!brk_segment_established)) {
6145 vg_assert(VG_(brk_base) == VG_(brk_limit));
6146 vg_assert(VG_(brk_base) == -1);
6147 VG_(brk_base) = VG_(brk_limit) = elfbrk;
6149 if (!VG_(setup_client_dataseg)()) {
6150 VG_(umsg)("Cannot map memory to initialize brk segment in thread #%d "
6151 "at %#lx\n", tid, VG_(brk_base));
6152 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
6153 goto mmap_error;
6156 VG_(track_client_dataseg)(tid);
6159 /* Restore VG_(clo_sanity_level). The scheduler will perform the aspacemgr
6160 sanity check after the syscall. */
6161 VG_(clo_sanity_level) = sanity_level;
6163 return VG_(mk_SysRes_Success)(0);
6165 mmap_error:
6166 for (i = idx - 1; i > 0; i--) {
6167 Bool discard_translations;
6168 Addr addr = (Addr) storage[i].mr_addr;
6170 VG_(am_munmap_client)(&discard_translations, addr, storage[i].mr_msize);
6171 ML_(notify_core_and_tool_of_munmap)(addr, storage[i].mr_msize);
6173 *elements = 0;
6174 return res;
6176 #undef ADVANCE_PHDR
6179 static SysRes mmapobj_interpret(ThreadId tid, Int fd,
6180 vki_mmapobj_result_t *storage,
6181 vki_uint_t *elements)
6183 SysRes res;
6185 struct vg_stat stats;
6186 if (VG_(fstat)(fd, &stats) != 0) {
6187 return VG_(mk_SysRes_Error)(VKI_EBADF);
6190 if (stats.size < sizeof(VKI_ESZ(Ehdr))) {
6191 if (VG_(clo_trace_syscalls))
6192 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: insufficient "
6193 "file size (%lld)\n", stats.size);
6194 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6197 /* Align the header buffer appropriately. */
6198 vki_ulong_t lheader[sizeof(VKI_ESZ(Ehdr)) / sizeof(vki_ulong_t) + 1];
6199 HChar *header = (HChar *) &lheader;
6201 res = VG_(pread)(fd, header, sizeof(VKI_ESZ(Ehdr)), 0);
6202 if (sr_isError(res)) {
6203 if (VG_(clo_trace_syscalls))
6204 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6205 "header failed\n");
6206 return res;
6207 } else if (sr_Res(res) != sizeof(VKI_ESZ(Ehdr))) {
6208 if (VG_(clo_trace_syscalls))
6209 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6210 "header failed - only %lu bytes out of %lu\n",
6211 sr_Res(res), (UWord) sizeof(VKI_ESZ(Ehdr)));
6212 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6215 /* Verify file type is ELF. */
6216 if ((header[VKI_EI_MAG0] != VKI_ELFMAG0) ||
6217 (header[VKI_EI_MAG1] != VKI_ELFMAG1) ||
6218 (header[VKI_EI_MAG2] != VKI_ELFMAG2) ||
6219 (header[VKI_EI_MAG3] != VKI_ELFMAG3)) {
6220 if (VG_(clo_trace_syscalls))
6221 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6222 "missing magic\n");
6223 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6226 if (header[VKI_EI_CLASS] != VG_ELF_CLASS) {
6227 if (VG_(clo_trace_syscalls))
6228 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF class "
6229 "mismatch (%u vs %u)\n", header[VKI_EI_CLASS],
6230 VG_ELF_CLASS);
6231 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6234 VKI_ESZ(Ehdr) *ehdr = (VKI_ESZ(Ehdr) *) header;
6235 if ((ehdr->e_type != VKI_ET_EXEC) && (ehdr->e_type != VKI_ET_DYN)) {
6236 VG_(unimplemented)("Syswrap of the mmapobj call with ELF type %u.",
6237 ehdr->e_type);
6238 /*NOTREACHED*/
6239 return res;
6242 if (ehdr->e_phnum == VKI_PN_XNUM) {
6243 VG_(unimplemented)("Syswrap of the mmapobj call with number of ELF "
6244 "program headers == PN_XNUM");
6245 /*NOTREACHED*/
6246 return res;
6249 /* Check alignment. */
6250 #if defined(VGP_x86_solaris)
6251 if (!VG_IS_4_ALIGNED(ehdr->e_phentsize)) {
6252 #elif defined(VGP_amd64_solaris)
6253 if (!VG_IS_8_ALIGNED(ehdr->e_phentsize)) {
6254 #else
6255 # error "Unknown platform"
6256 #endif
6257 if (VG_(clo_trace_syscalls))
6258 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6259 "phentsize not aligned properly (%u)\n",
6260 ehdr->e_phentsize);
6261 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6264 SizeT phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
6265 if (phdrs_size == 0) {
6266 if (VG_(clo_trace_syscalls))
6267 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: no ELF "
6268 "program headers\n");
6269 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6272 VKI_ESZ(Phdr) *phdrs = VG_(malloc)("syswrap.mi.1", phdrs_size);
6273 res = VG_(pread)(fd, phdrs, phdrs_size, ehdr->e_phoff);
6274 if (sr_isError(res)) {
6275 if (VG_(clo_trace_syscalls))
6276 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6277 "program headers failed\n");
6278 VG_(free)(phdrs);
6279 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6280 } else if (sr_Res(res) != phdrs_size) {
6281 if (VG_(clo_trace_syscalls))
6282 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6283 "program headers failed - only %lu bytes out of %lu\n",
6284 sr_Res(res), phdrs_size);
6285 VG_(free)(phdrs);
6286 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6289 if (VG_(clo_trace_syscalls))
6290 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_interpret: %u ELF "
6291 "program headers with total size of %lu bytes\n",
6292 ehdr->e_phnum, phdrs_size);
6294 /* Now process the program headers. */
6295 res = mmapobj_process_phdrs(tid, fd, storage, elements, ehdr, phdrs);
6296 VG_(free)(phdrs);
6297 return res;
6300 PRE(sys_mmapobj)
6302 /* int mmapobj(int fd, uint_t flags, mmapobj_result_t *storage,
6303 uint_t *elements, void *arg); */
6304 PRINT("sys_mmapobj ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6305 ARG4, ARG5);
6306 PRE_REG_READ5(long, "mmapobj", int, fd, vki_uint_t, flags,
6307 mmapobj_result_t *, storage, uint_t *, elements,
6308 void *, arg);
6310 PRE_MEM_READ("mmapobj(elements)", ARG4, sizeof(vki_uint_t));
6311 /*PRE_MEM_WRITE("mmapobj(elements)", ARG4, sizeof(vki_uint_t));*/
6312 if (ML_(safe_to_deref)((void*)ARG4, sizeof(vki_uint_t))) {
6313 vki_uint_t *u = (vki_uint_t*)ARG4;
6314 PRE_MEM_WRITE("mmapobj(storage)", ARG3,
6315 *u * sizeof(vki_mmapobj_result_t));
6318 if (ARG2 & VKI_MMOBJ_PADDING)
6319 PRE_MEM_READ("mmapobj(arg)", ARG5, sizeof(vki_size_t));
6321 /* Be strict. */
6322 if (!ML_(fd_allowed)(ARG1, "mmapobj", tid, False)) {
6323 SET_STATUS_Failure(VKI_EBADF);
6324 return;
6327 /* We cannot advise mmapobj about desired address(es). Unfortunately
6328 kernel places mappings from mmapobj at the end of process address
6329 space, defeating memcheck's optimized fast 2-level array algorithm.
6330 So we need to emulate what mmapobj does in the kernel. */
6332 /* Sanity check on parameters. */
6333 if ((ARG2 & ~VKI_MMOBJ_ALL_FLAGS) != 0) {
6334 SET_STATUS_Failure(VKI_EINVAL);
6335 return;
6338 if (!ML_(safe_to_deref)((void *) ARG4, sizeof(vki_uint_t))) {
6339 SET_STATUS_Failure(VKI_EFAULT);
6340 return;
6342 vki_uint_t *elements = (vki_uint_t *) ARG4;
6344 if (*elements > 0) {
6345 if (!ML_(safe_to_deref)((void *) ARG3,
6346 *elements * sizeof(vki_mmapobj_result_t))) {
6347 SET_STATUS_Failure(VKI_EFAULT);
6348 return;
6352 /* For now, supported is only MMOBJ_INTERPRET and no MMOBJ_PADDING. */
6353 if (ARG2 != VKI_MMOBJ_INTERPRET) {
6354 VG_(unimplemented)("Syswrap of the mmapobj call with flags %lu.", ARG2);
6355 /*NOTREACHED*/
6356 return;
6359 SysRes res = mmapobj_interpret(tid, (Int) ARG1,
6360 (vki_mmapobj_result_t *) ARG3, elements);
6361 SET_STATUS_from_SysRes(res);
6363 if (!sr_isError(res)) {
6364 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
6366 UInt idx;
6367 for (idx = 0; idx < *(vki_uint_t *) ARG4; idx++) {
6368 vki_mmapobj_result_t *mrp = &((vki_mmapobj_result_t *) ARG3)[idx];
6369 POST_FIELD_WRITE(mrp->mr_addr);
6370 POST_FIELD_WRITE(mrp->mr_msize);
6371 POST_FIELD_WRITE(mrp->mr_fsize);
6372 POST_FIELD_WRITE(mrp->mr_prot);
6373 POST_FIELD_WRITE(mrp->mr_flags);
6374 POST_FIELD_WRITE(mrp->mr_offset);
6379 PRE(sys_memcntl)
6381 /* int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
6382 int attr, int mask); */
6383 PRINT("sys_memcntl ( %#lx, %#lx, %ld, %#lx, %#lx, %#lx )", ARG1, ARG2,
6384 SARG3, ARG4, ARG5, ARG6);
6385 PRE_REG_READ6(long, "memcntl", void *, addr, vki_size_t, len, int, cmd,
6386 void *, arg, int, attr, int, mask);
6388 if (ARG3 != VKI_MC_LOCKAS && ARG3 != VKI_MC_UNLOCKAS &&
6389 !ML_(valid_client_addr)(ARG1, ARG2, tid, "memcntl")) {
6390 /* MC_LOCKAS and MC_UNLOCKAS work on the complete address space thus we
6391 don't check the address range validity if these commands are
6392 requested. */
6393 SET_STATUS_Failure(VKI_ENOMEM);
6394 return;
6397 if (ARG3 == VKI_MC_HAT_ADVISE)
6398 PRE_MEM_READ("memcntl(arg)", ARG4, sizeof(struct vki_memcntl_mha));
6401 PRE(sys_getpmsg)
6403 /* int getpmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
6404 int *bandp, int *flagsp); */
6405 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6406 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6407 *flags |= SfMayBlock;
6408 PRINT("sys_getpmsg ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6409 ARG4, ARG5);
6410 PRE_REG_READ5(long, "getpmsg", int, fildes, struct vki_strbuf *, ctlptr,
6411 struct vki_strbuf *, dataptr, int *, bandp, int *, flagsp);
6412 if (ctrlptr) {
6413 PRE_FIELD_READ("getpmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
6414 PRE_FIELD_WRITE("getpmsg(ctrlptr->len)", ctrlptr->len);
6415 PRE_FIELD_READ("getpmsg(ctrlptr->buf)", ctrlptr->buf);
6416 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6417 && ctrlptr->maxlen > 0)
6418 PRE_MEM_WRITE("getpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6419 ctrlptr->maxlen);
6421 if (dataptr) {
6422 PRE_FIELD_READ("getpmsg(dataptr->maxlen)", dataptr->maxlen);
6423 PRE_FIELD_WRITE("getpmsg(dataptr->len)", dataptr->len);
6424 PRE_FIELD_READ("getpmsg(dataptr->buf)", dataptr->buf);
6425 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6426 && dataptr->maxlen > 0)
6427 PRE_MEM_WRITE("getpmsg(dataptr->buf)", (Addr)dataptr->buf,
6428 dataptr->maxlen);
6430 PRE_MEM_READ("getpmsg(bandp)", ARG4, sizeof(int));
6431 /*PRE_MEM_WRITE("getpmsg(bandp)", ARG4, sizeof(int));*/
6432 PRE_MEM_READ("getpmsg(flagsp)", ARG5, sizeof(int));
6433 /*PRE_MEM_WRITE("getpmsg(flagsp)", ARG5, sizeof(int));*/
6435 /* Be strict. */
6436 if (!ML_(fd_allowed)(ARG1, "getpmsg", tid, False))
6437 SET_STATUS_Failure(VKI_EBADF);
6440 POST(sys_getpmsg)
6442 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6443 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6445 if (ctrlptr && ctrlptr->len > 0)
6446 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
6447 if (dataptr && dataptr->len > 0)
6448 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
6449 POST_MEM_WRITE(ARG4, sizeof(int));
6450 POST_MEM_WRITE(ARG5, sizeof(int));
6453 PRE(sys_putpmsg)
6455 /* int putpmsg(int fildes, const struct strbuf *ctlptr,
6456 const struct strbuf *dataptr, int band, int flags); */
6457 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6458 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6459 *flags |= SfMayBlock;
6460 PRINT("sys_putpmsg ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
6461 SARG5);
6462 PRE_REG_READ5(long, "putpmsg", int, fildes, struct vki_strbuf *, ctrlptr,
6463 struct vki_strbuf *, dataptr, int, band, int, flags);
6464 if (ctrlptr) {
6465 PRE_FIELD_READ("putpmsg(ctrlptr->len)", ctrlptr->len);
6466 PRE_FIELD_READ("putpmsg(ctrlptr->buf)", ctrlptr->buf);
6467 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6468 && ctrlptr->len > 0)
6469 PRE_MEM_READ("putpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6470 ctrlptr->len);
6472 if (dataptr) {
6473 PRE_FIELD_READ("putpmsg(dataptr->len)", dataptr->len);
6474 PRE_FIELD_READ("putpmsg(dataptr->buf)", dataptr->buf);
6475 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6476 && dataptr->len > 0)
6477 PRE_MEM_READ("putpmsg(dataptr->buf)", (Addr)dataptr->buf,
6478 dataptr->len);
6481 /* Be strict. */
6482 if (!ML_(fd_allowed)(ARG1, "putpmsg", tid, False))
6483 SET_STATUS_Failure(VKI_EBADF);
6486 #if defined(SOLARIS_OLD_SYSCALLS)
6487 PRE(sys_rename)
6489 /* int rename(const char *from, const char *to); */
6491 *flags |= SfMayBlock;
6492 PRINT("sys_rename ( %#lx(%s), %#lx(%s) )",
6493 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2);
6494 PRE_REG_READ2(long, "rename", const char *, from, const char *, to);
6496 PRE_MEM_RASCIIZ("rename(from)", ARG1);
6497 PRE_MEM_RASCIIZ("rename(to)", ARG2);
6499 #endif /* SOLARIS_OLD_SYSCALLS */
6501 PRE(sys_uname)
6503 /* int uname(struct utsname *name); */
6504 PRINT("sys_uname ( %#lx )", ARG1);
6505 PRE_REG_READ1(long, "uname", struct vki_utsname *, name);
6506 PRE_MEM_WRITE("uname(name)", ARG1, sizeof(struct vki_utsname));
6509 POST(sys_uname)
6511 struct vki_utsname *name = (struct vki_utsname *) ARG1;
6512 POST_MEM_WRITE((Addr) name->sysname, VG_(strlen)(name->sysname) + 1);
6513 POST_MEM_WRITE((Addr) name->nodename, VG_(strlen)(name->nodename) + 1);
6514 POST_MEM_WRITE((Addr) name->release, VG_(strlen)(name->release) + 1);
6515 POST_MEM_WRITE((Addr) name->version, VG_(strlen)(name->version) + 1);
6516 POST_MEM_WRITE((Addr) name->machine, VG_(strlen)(name->machine) + 1);
6519 PRE(sys_setegid)
6521 /* int setegid(gid_t egid); */
6522 PRINT("sys_setegid ( %ld )", SARG1);
6523 PRE_REG_READ1(long, "setegid", vki_gid_t, egid);
6526 PRE(sys_sysconfig)
6528 /* long sysconf(int name); */
6529 PRINT("sys_sysconfig ( %ld )", SARG1);
6530 PRE_REG_READ1(long, "sysconf", int, name);
6532 if (ARG1 == VKI_CONFIG_OPEN_FILES)
6533 SET_STATUS_Success(VG_(fd_soft_limit));
6536 PRE(sys_systeminfo)
6538 /* int sysinfo(int command, char *buf, long count); */
6539 PRINT("sys_systeminfo ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
6540 PRE_REG_READ3(long, "sysinfo", int, command, char *, buf, long, count);
6542 switch (ARG1 /*command*/) {
6543 case VKI_SI_SYSNAME:
6544 case VKI_SI_HOSTNAME:
6545 case VKI_SI_RELEASE:
6546 case VKI_SI_VERSION:
6547 case VKI_SI_MACHINE:
6548 case VKI_SI_ARCHITECTURE:
6549 case VKI_SI_HW_SERIAL:
6550 case VKI_SI_HW_PROVIDER:
6551 case VKI_SI_SRPC_DOMAIN:
6552 case VKI_SI_PLATFORM:
6553 case VKI_SI_ISALIST:
6554 case VKI_SI_DHCP_CACHE:
6555 case VKI_SI_ARCHITECTURE_32:
6556 case VKI_SI_ARCHITECTURE_64:
6557 case VKI_SI_ARCHITECTURE_K:
6558 case VKI_SI_ARCHITECTURE_NATIVE:
6559 PRE_MEM_WRITE("sysinfo(buf)", ARG2, ARG3);
6560 break;
6562 case VKI_SI_SET_HOSTNAME:
6563 case VKI_SI_SET_SRCP_DOMAIN:
6564 PRE_MEM_RASCIIZ("sysinfo(buf)", ARG2);
6565 break;
6567 default:
6568 VG_(unimplemented)("Syswrap of the sysinfo call with command %ld.", SARG1);
6569 /*NOTREACHED*/
6570 break;
6574 POST(sys_systeminfo)
6576 if (ARG1 != VKI_SI_SET_HOSTNAME && ARG1 != VKI_SI_SET_SRCP_DOMAIN)
6577 POST_MEM_WRITE(ARG2, MIN(RES, ARG3));
6580 PRE(sys_seteuid)
6582 /* int seteuid(uid_t euid); */
6583 PRINT("sys_seteuid ( %ld )", SARG1);
6584 PRE_REG_READ1(long, "seteuid", vki_uid_t, euid);
6587 PRE(sys_forksys)
6589 /* int64_t forksys(int subcode, int flags); */
6590 Int fds[2];
6591 Int res;
6592 PRINT("sys_forksys ( %ld, %ld )", SARG1, SARG2);
6593 PRE_REG_READ2(long, "forksys", int, subcode, int, flags);
6595 if (ARG1 == 1) {
6596 /* Support for forkall() requires changes to the big lock processing
6597 which are not yet implemented. */
6598 VG_(unimplemented)("Support for forkall().");
6599 /*NOTREACHED*/
6600 return;
6603 if (ARG1 != 0 && ARG1 != 2) {
6604 VG_(unimplemented)("Syswrap of the forksys call where subcode=%ld.",
6605 SARG1);
6606 /*NOTREACHED*/
6609 if (ARG1 == 2) {
6610 /* vfork() is requested. Translate it to a normal fork() but work around
6611 a problem with posix_spawn() which relies on the real vfork()
6612 behaviour. See a description in vg_preloaded.c for details. */
6613 res = VG_(pipe)(fds);
6614 vg_assert(res == 0);
6616 vg_assert(fds[0] != fds[1]);
6618 /* Move to Valgrind fds and set close-on-exec flag on both of them (done
6619 by VG_(safe_fd). */
6620 fds[0] = VG_(safe_fd)(fds[0]);
6621 fds[1] = VG_(safe_fd)(fds[1]);
6622 vg_assert(fds[0] != fds[1]);
6624 vg_assert(VG_(vfork_fildes_addr) != NULL);
6625 vg_assert(*VG_(vfork_fildes_addr) == -1);
6626 *VG_(vfork_fildes_addr) = fds[0];
6629 VG_(do_atfork_pre)(tid);
6630 SET_STATUS_from_SysRes(VG_(do_syscall2)(__NR_forksys, 0, ARG2));
6632 if (!SUCCESS) {
6633 /* vfork */
6634 if (ARG1 == 2) {
6635 VG_(close)(fds[0]);
6636 VG_(close)(fds[1]);
6639 return;
6642 if (RESHI) {
6643 VG_(do_atfork_child)(tid);
6645 /* vfork */
6646 if (ARG1 == 2)
6647 VG_(close)(fds[1]);
6649 # if defined(SOLARIS_PT_SUNDWTRACE_THRP)
6650 /* Kernel can map a new page as a scratch space of the DTrace fasttrap
6651 provider. There is no way we can directly get its address - it's all
6652 private to the kernel. Fish it the slow way. */
6653 Addr addr;
6654 SizeT size;
6655 UInt prot;
6656 Bool found = VG_(am_search_for_new_segment)(&addr, &size, &prot);
6657 if (found) {
6658 VG_(debugLog)(1, "syswrap-solaris", "PRE(forksys), new segment: "
6659 "vaddr=%#lx, size=%#lx, prot=%#x\n", addr, size, prot);
6660 vg_assert(prot == (VKI_PROT_READ | VKI_PROT_EXEC));
6661 vg_assert(size == VKI_PAGE_SIZE);
6662 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, VKI_MAP_ANONYMOUS,
6663 -1, 0);
6665 /* Note: We don't notify the debuginfo reader about this mapping
6666 because there is no debug information stored in this segment. */
6668 # endif /* SOLARIS_PT_SUNDWTRACE_THRP */
6670 else {
6671 VG_(do_atfork_parent)(tid);
6673 /* Print information about the fork. */
6674 PRINT(" fork: process %d created child %d\n", VG_(getpid)(),
6675 (Int)RES);
6677 /* vfork */
6678 if (ARG1 == 2) {
6679 /* Wait for the child to finish (exec or exit). */
6680 UChar w;
6682 VG_(close)(fds[0]);
6684 res = VG_(read)(fds[1], &w, 1);
6685 if (res == 1)
6686 SET_STATUS_Failure(w);
6687 VG_(close)(fds[1]);
6689 *VG_(vfork_fildes_addr) = -1;
6694 #if defined(SOLARIS_GETRANDOM_SYSCALL)
6695 PRE(sys_getrandom)
6697 /* int getrandom(void *buf, size_t buflen, uint_t flags); */
6698 PRINT("sys_getrandom ( %#lx, %lu, %lu )", ARG1, ARG2, ARG3);
6699 PRE_REG_READ3(long, "getrandom", void *, buf, vki_size_t, buflen,
6700 vki_uint_t, flags);
6701 PRE_MEM_WRITE("getrandom(buf)", ARG1, ARG2);
6704 POST(sys_getrandom)
6706 POST_MEM_WRITE(ARG1, RES);
6708 #endif /* SOLARIS_GETRANDOM_SYSCALL */
6710 PRE(sys_sigtimedwait)
6712 /* int sigtimedwait(const sigset_t *set, siginfo_t *info,
6713 const timespec_t *timeout); */
6714 *flags |= SfMayBlock;
6715 PRINT("sys_sigtimedwait ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
6716 PRE_REG_READ3(long, "sigtimedwait", vki_sigset_t *, set,
6717 vki_siginfo_t *, info, vki_timespec_t *, timeout);
6718 PRE_MEM_READ("sigtimewait(set)", ARG1, sizeof(vki_sigset_t));
6719 if (ARG2)
6720 PRE_MEM_WRITE("sigtimedwait(info)", ARG2, sizeof(vki_siginfo_t));
6721 if (ARG3)
6722 PRE_MEM_READ("sigtimedwait(timeout)", ARG3, sizeof(vki_timespec_t));
6725 POST(sys_sigtimedwait)
6727 if (ARG2)
6728 POST_MEM_WRITE(ARG2, sizeof(vki_siginfo_t));
6731 PRE(sys_yield)
6733 /* void yield(void); */
6734 *flags |= SfMayBlock;
6735 PRINT("sys_yield ( )");
6736 PRE_REG_READ0(long, "yield");
6739 PRE(sys_lwp_sema_post)
6741 /* int lwp_sema_post(lwp_sema_t *sema); */
6742 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6743 *flags |= SfMayBlock;
6744 PRINT("sys_lwp_sema_post ( %#lx )", ARG1);
6745 PRE_REG_READ1(long, "lwp_sema_post", lwp_sema_t *, sema);
6747 PRE_FIELD_READ("lwp_sema_post(sema->type)", sema->vki_sema_type);
6748 PRE_FIELD_READ("lwp_sema_post(sema->count)", sema->vki_sema_count);
6749 /*PRE_FIELD_WRITE("lwp_sema_post(sema->count)", sema->vki_sema_count);*/
6750 PRE_FIELD_READ("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);
6751 /*PRE_FIELD_WRITE("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);*/
6754 POST(sys_lwp_sema_post)
6756 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6757 POST_FIELD_WRITE(sema->vki_sema_count);
6758 POST_FIELD_WRITE(sema->vki_sema_waiters);
6761 PRE(sys_lwp_sema_trywait)
6763 /* int lwp_sema_trywait(lwp_sema_t *sema); */
6764 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6765 PRINT("sys_lwp_sema_trywait ( %#lx )", ARG1);
6766 PRE_REG_READ1(long, "lwp_sema_trywait", lwp_sema_t *, sema);
6768 PRE_FIELD_READ("lwp_sema_trywait(sema->type)", sema->vki_sema_type);
6769 PRE_FIELD_READ("lwp_sema_trywait(sema->count)", sema->vki_sema_count);
6770 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->count)", sema->vki_sema_count);*/
6771 PRE_FIELD_READ("lwp_sema_trywait(sema->waiters)", sema->vki_sema_waiters);
6772 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->waiters)",
6773 sema->vki_sema_waiters);*/
6776 POST(sys_lwp_sema_trywait)
6778 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6779 POST_FIELD_WRITE(sema->vki_sema_count);
6780 POST_FIELD_WRITE(sema->vki_sema_waiters);
6783 PRE(sys_lwp_detach)
6785 /* int lwp_detach(id_t lwpid); */
6786 PRINT("sys_lwp_detach ( %ld )", SARG1);
6787 PRE_REG_READ1(long, "lwp_detach", vki_id_t, lwpid);
6790 PRE(sys_modctl)
6792 /* int modctl(int cmd, uintptr_t a1, uintptr_t a2, uintptr_t a3,
6793 uintptr_t a4, uintptr_t a5); */
6794 *flags |= SfMayBlock;
6796 switch (ARG1 /*cmd*/) {
6797 case VKI_MODLOAD:
6798 /* int modctl_modload(int use_path, char *filename, int *rvp); */
6799 PRINT("sys_modctl ( %ld, %ld, %#lx(%s), %#lx )",
6800 SARG1, ARG2, ARG3, (HChar *) ARG3, ARG4);
6801 PRE_REG_READ4(long, SC2("modctl", "modload"),
6802 int, cmd, int, use_path, char *, filename, int *, rvp);
6803 PRE_MEM_RASCIIZ("modctl(filaneme)", ARG3);
6804 if (ARG4 != 0) {
6805 PRE_MEM_WRITE("modctl(rvp)", ARG4, sizeof(int *));
6807 break;
6808 case VKI_MODUNLOAD:
6809 /* int modctl_modunload(modid_t id); */
6810 PRINT("sys_modctl ( %ld, %ld )", SARG1, SARG2);
6811 PRE_REG_READ2(long, SC2("modctl", "modunload"),
6812 int, cmd, vki_modid_t, id);
6813 break;
6814 case VKI_MODINFO: {
6815 /* int modctl_modinfo(modid_t id, struct modinfo *umodi); */
6816 PRINT("sys_modctl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
6817 PRE_REG_READ3(long, SC2("modctl", "modinfo"),
6818 int, cmd, vki_modid_t, id, struct modinfo *, umodi);
6820 struct vki_modinfo *umodi = (struct vki_modinfo *) ARG3;
6821 PRE_FIELD_READ("modctl(umodi->mi_info)", umodi->mi_info);
6822 PRE_FIELD_READ("modctl(umodi->mi_id)", umodi->mi_id);
6823 PRE_FIELD_READ("modctl(umodi->mi_nextid)", umodi->mi_nextid);
6824 PRE_MEM_WRITE("modctl(umodi)", ARG3, sizeof(struct vki_modinfo));
6825 break;
6828 # if defined(SOLARIS_MODCTL_MODNVL)
6829 case VKI_MODNVL_DEVLINKSYNC:
6830 /* int modnvl_devlinksync(sysnvl_op_t a1, uintptr_t a2, uintptr_t a3,
6831 uintptr_t a4); */
6832 switch (ARG2 /*op*/) {
6834 # if defined(HAVE_SYS_SYSNVL_H)
6835 case VKI_SYSNVL_OP_GET:
6836 PRE_REG_READ5(long, SC3("modctl", "modnvl_devlinksync", "get"),
6837 int, cmd, sysnvl_op_t, a1, char *, bufp,
6838 uint64_t *, buflenp, uint64_t *, genp);
6839 # else
6840 case VKI_MODCTL_NVL_OP_GET:
6841 PRE_REG_READ5(long, SC3("modctl", "modnvl_devlinksync", "get"),
6842 int, cmd, modctl_nvl_op_t, a1, char *, bufp,
6843 uint64_t *, buflenp, uint64_t *, genp);
6844 # endif /* HAVE_SYS_SYSNVL_H */
6846 PRINT("sys_modctl ( %ld, %lu, %#lx, %#lx, %#lx )",
6847 SARG1, ARG2, ARG3, ARG4, ARG5);
6848 PRE_MEM_WRITE("modctl(buflenp)", ARG4, sizeof(vki_uint64_t));
6849 if (ML_(safe_to_deref)((vki_uint64_t *) ARG4, sizeof(vki_uint64_t))) {
6850 if (ARG3 != 0) {
6851 PRE_MEM_WRITE("modctl(bufp)", ARG3, *(vki_uint64_t *) ARG4);
6854 if (ARG5 != 0) {
6855 PRE_MEM_WRITE("modctl(genp)", ARG5, sizeof(vki_uint64_t));
6857 break;
6859 # if defined(HAVE_SYS_SYSNVL_H)
6860 case VKI_SYSNVL_OP_UPDATE:
6861 PRE_REG_READ4(long, SC3("modctl", "modnvl_devlinksync", "update"),
6862 int, cmd, sysnvl_op_t, a1, char *, bufp,
6863 uint64_t *, buflenp);
6864 # else
6865 case VKI_MODCTL_NVL_OP_UPDATE:
6866 PRE_REG_READ4(long, SC3("modctl", "modnvl_devlinksync", "update"),
6867 int, cmd, modctl_nvl_op_t, a1, char *, bufp,
6868 uint64_t *, buflenp);
6869 # endif /* HAVE_SYS_SYSNVL_H */
6871 PRINT("sys_modctl ( %ld, %lu, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
6872 PRE_MEM_READ("modctl(buflenp)", ARG4, sizeof(vki_uint64_t));
6873 if (ML_(safe_to_deref)((vki_uint64_t *) ARG4, sizeof(vki_uint64_t))) {
6874 PRE_MEM_READ("modctl(bufp)", ARG3, *(vki_uint64_t *) ARG4);
6876 break;
6878 default:
6879 VG_(unimplemented)("Syswrap of the modctl call with command "
6880 "MODNVL_DEVLINKSYNC and op %ld.", ARG2);
6881 /*NOTREACHED*/
6882 break;
6884 break;
6886 case VKI_MODDEVINFO_CACHE_TS:
6887 /* int modctl_devinfo_cache_ts(uint64_t *utsp); */
6888 PRINT("sys_modctl ( %ld, %#lx )", SARG1, ARG2);
6889 PRE_REG_READ2(long, SC2("modctl", "moddevinfo_cache_ts"),
6890 int, cmd, uint64_t *, utsp);
6891 PRE_MEM_WRITE("modctl(utsp)", ARG2, sizeof(vki_uint64_t));
6892 break;
6893 # endif /* SOLARIS_MODCTL_MODNVL */
6895 default:
6896 VG_(unimplemented)("Syswrap of the modctl call with command %ld.", SARG1);
6897 /*NOTREACHED*/
6898 break;
6902 POST(sys_modctl)
6904 switch (ARG1 /*cmd*/) {
6905 case VKI_MODLOAD:
6906 if (ARG4 != 0) {
6907 POST_MEM_WRITE(ARG4, sizeof(int *));
6909 break;
6910 case VKI_MODUNLOAD:
6911 break;
6912 case VKI_MODINFO:
6913 POST_MEM_WRITE(ARG3, sizeof(struct vki_modinfo));
6914 break;
6915 # if defined(SOLARIS_MODCTL_MODNVL)
6916 case VKI_MODNVL_DEVLINKSYNC:
6917 switch (ARG2 /*op*/) {
6919 # if defined(HAVE_SYS_SYSNVL_H)
6920 case VKI_SYSNVL_OP_GET:
6921 # else
6922 case VKI_MODCTL_NVL_OP_GET:
6923 # endif /* HAVE_SYS_SYSNVL_H */
6925 POST_MEM_WRITE(ARG4, sizeof(vki_uint64_t));
6926 if (ARG3 != 0) {
6927 POST_MEM_WRITE(ARG3, *(vki_uint64_t *) ARG4);
6929 if (ARG5 != 0) {
6930 POST_MEM_WRITE(ARG5, sizeof(vki_uint64_t));
6932 break;
6934 # if defined(HAVE_SYS_SYSNVL_H)
6935 case VKI_SYSNVL_OP_UPDATE:
6936 # else
6937 case VKI_MODCTL_NVL_OP_UPDATE:
6938 # endif /* HAVE_SYS_SYSNVL_H */
6939 break;
6941 default:
6942 vg_assert(0);
6943 break;
6945 break;
6946 case VKI_MODDEVINFO_CACHE_TS:
6947 POST_MEM_WRITE(ARG2, sizeof(vki_uint64_t));
6948 break;
6949 # endif /* SOLARIS_MODCTL_MODNVL */
6951 default:
6952 vg_assert(0);
6953 break;
6957 PRE(sys_fchroot)
6959 /* int fchroot(int fd); */
6960 PRINT("sys_fchroot ( %ld )", SARG1);
6961 PRE_REG_READ1(long, "fchroot", int, fd);
6963 /* Be strict. */
6964 if (!ML_(fd_allowed)(ARG1, "fchroot", tid, False))
6965 SET_STATUS_Failure(VKI_EBADF);
6968 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
6969 PRE(sys_system_stats)
6971 /* void system_stats(int flag); */
6972 PRINT("sys_system_stats ( %ld )", SARG1);
6973 PRE_REG_READ1(void, "system_stats", int, flag);
6975 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
6977 PRE(sys_gettimeofday)
6979 /* Kernel: int gettimeofday(struct timeval *tp); */
6980 PRINT("sys_gettimeofday ( %#lx )", ARG1);
6981 PRE_REG_READ1(long, "gettimeofday", struct timeval *, tp);
6982 if (ARG1)
6983 PRE_timeval_WRITE("gettimeofday(tp)", ARG1);
6986 POST(sys_gettimeofday)
6988 if (ARG1)
6989 POST_timeval_WRITE(ARG1);
6992 PRE(sys_lwp_create)
6994 /* int lwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) */
6996 ThreadId ctid;
6997 ThreadState *ptst;
6998 ThreadState *ctst;
6999 Addr stack;
7000 SysRes res;
7001 vki_ucontext_t uc;
7002 Bool tool_informed = False;
7004 PRINT("sys_lwp_create ( %#lx, %ld, %#lx )", ARG1, ARG2, ARG3);
7005 PRE_REG_READ3(long, "lwp_create", ucontext_t *, ucp, int, flags,
7006 id_t *, new_lwp);
7008 if (ARG3 != 0)
7009 PRE_MEM_WRITE("lwp_create(new_lwp)", ARG3, sizeof(vki_id_t));
7011 /* If we can't deref ucontext_t then we can't do anything. */
7012 if (!ML_(safe_to_deref)((void*)ARG1, sizeof(vki_ucontext_t))) {
7013 SET_STATUS_Failure(VKI_EINVAL);
7014 return;
7017 ctid = VG_(alloc_ThreadState)();
7018 ptst = VG_(get_ThreadState)(tid);
7019 ctst = VG_(get_ThreadState)(ctid);
7021 /* Stay sane. */
7022 vg_assert(VG_(is_running_thread)(tid));
7023 vg_assert(VG_(is_valid_tid)(ctid));
7025 stack = ML_(allocstack)(ctid);
7026 if (!stack) {
7027 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
7028 goto out;
7031 /* First inherit parent's guest state */
7032 ctst->arch.vex = ptst->arch.vex;
7033 ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
7034 ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
7036 /* Set up some values. */
7037 ctst->os_state.parent = tid;
7038 ctst->os_state.threadgroup = ptst->os_state.threadgroup;
7039 ctst->sig_mask = ptst->sig_mask;
7040 ctst->tmp_sig_mask = ptst->sig_mask;
7042 /* No stack definition should be currently present. The stack will be set
7043 later by libc by a setustack() call (the getsetcontext syscall). */
7044 ctst->client_stack_highest_byte = 0;
7045 ctst->client_stack_szB = 0;
7046 vg_assert(ctst->os_state.stk_id == NULL_STK_ID);
7048 /* Inform a tool that a new thread is created. This has to be done before
7049 any other core->tool event is sent. */
7050 vg_assert(VG_(owns_BigLock_LL)(tid));
7051 VG_TRACK(pre_thread_ll_create, tid, ctid);
7052 tool_informed = True;
7054 #if defined(VGP_x86_solaris)
7055 /* Set up GDT (this has to be done before calling
7056 VG_(restore_context)(). */
7057 ML_(setup_gdt)(&ctst->arch.vex);
7058 #elif defined(VGP_amd64_solaris)
7059 /* Nothing to do. */
7060 #else
7061 # error "Unknown platform"
7062 #endif
7064 /* Now set up the new thread according to ucontext_t. */
7065 VG_(restore_context)(ctid, (vki_ucontext_t*)ARG1, Vg_CoreSysCall,
7066 True/*esp_is_thrptr*/);
7068 /* Set up V thread (this also tells the kernel to block all signals in the
7069 thread). */
7070 ML_(setup_start_thread_context)(ctid, &uc);
7072 /* Actually create the new thread. */
7073 res = VG_(do_syscall3)(__NR_lwp_create, (UWord)&uc, ARG2, ARG3);
7075 if (!sr_isError(res)) {
7076 if (ARG3 != 0)
7077 POST_MEM_WRITE(ARG3, sizeof(vki_id_t));
7078 if (ARG2 & VKI_LWP_DAEMON)
7079 ctst->os_state.daemon_thread = True;
7082 out:
7083 if (sr_isError(res)) {
7084 if (tool_informed) {
7085 /* Tell a tool the thread exited in a hurry. */
7086 VG_TRACK(pre_thread_ll_exit, ctid);
7089 /* lwp_create failed. */
7090 VG_(cleanup_thread)(&ctst->arch);
7091 ctst->status = VgTs_Empty;
7094 SET_STATUS_from_SysRes(res);
7097 PRE(sys_lwp_exit)
7099 /* void syslwp_exit(); */
7100 ThreadState *tst = VG_(get_ThreadState)(tid);
7101 PRINT("sys_lwp_exit ( )");
7102 PRE_REG_READ0(long, "lwp_exit");
7104 /* Set the thread's status to be exiting, then claim that the syscall
7105 succeeded. */
7106 tst->exitreason = VgSrc_ExitThread;
7107 tst->os_state.exitcode = 0;
7108 SET_STATUS_Success(0);
7111 PRE(sys_lwp_suspend)
7113 /* int lwp_suspend(id_t lwpid); */
7114 ThreadState *tst = VG_(get_ThreadState)(tid);
7115 PRINT("sys_lwp_suspend ( %ld )", SARG1);
7116 PRE_REG_READ1(long, "lwp_suspend", vki_id_t, lwpid);
7118 if (ARG1 == tst->os_state.lwpid) {
7119 /* Set the SfMayBlock flag only if the currently running thread should
7120 be suspended. If this flag was used also when suspending other
7121 threads then it could happen that a thread holding the_BigLock would
7122 be suspended and Valgrind would hang. */
7123 *flags |= SfMayBlock;
7127 PRE(sys_lwp_continue)
7129 /* int lwp_continue(id_t target_lwp); */
7130 PRINT("sys_lwp_continue ( %ld )", SARG1);
7131 PRE_REG_READ1(long, "lwp_continue", vki_id_t, target_lwp);
7134 static void
7135 do_lwp_sigqueue(const HChar *syscall_name, UWord target_lwp, UWord signo,
7136 SyscallStatus *status, UWord *flags)
7138 if (!ML_(client_signal_OK)(signo)) {
7139 SET_STATUS_Failure(VKI_EINVAL);
7140 return;
7143 /* Check to see if this gave us a pending signal. */
7144 *flags |= SfPollAfter;
7146 if (VG_(clo_trace_signals))
7147 VG_(message)(Vg_DebugMsg, "%s: sending signal %lu to thread %lu\n",
7148 syscall_name, signo, target_lwp);
7150 /* If we're sending SIGKILL, check to see if the target is one of our
7151 threads and handle it specially. */
7152 if (signo == VKI_SIGKILL && ML_(do_sigkill)(target_lwp, -1)) {
7153 SET_STATUS_Success(0);
7154 return;
7157 /* Ask to handle this syscall via the slow route, since that's the only one
7158 that sets tst->status to VgTs_WaitSys. If the result of doing the
7159 syscall is an immediate run of async_signalhandler() in m_signals.c,
7160 then we need the thread to be properly tidied away. */
7161 *flags |= SfMayBlock;
7164 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
7165 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID)
7166 PRE(sys_lwp_sigqueue)
7168 /* int lwp_sigqueue(pid_t target_pid, id_t target_lwp, int signal,
7169 void *value, int si_code, timespec_t *timeout);
7171 PRINT("sys_lwp_sigqueue ( %ld, %ld, %ld, %#lx, %ld, %#lx )",
7172 SARG1, SARG2, SARG3, ARG4, SARG5, ARG6);
7173 PRE_REG_READ6(long, "lwp_sigqueue", vki_pid_t, target_pid,
7174 vki_id_t, target_lwp, int, signal, void *, value, int, si_code,
7175 vki_timespec_t *, timeout);
7177 if (ARG6)
7178 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG6, sizeof(vki_timespec_t));
7180 if ((ARG1 == 0) || (ARG1 == VG_(getpid)())) {
7181 do_lwp_sigqueue("lwp_sigqueue", ARG2, ARG3, status, flags);
7182 } else {
7183 /* Signal is sent to a different process. */
7184 if (VG_(clo_trace_signals))
7185 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sending signal %ld to "
7186 "process %ld, thread %ld\n", SARG3, SARG1, SARG2);
7187 *flags |= SfMayBlock;
7191 POST(sys_lwp_sigqueue)
7193 if (VG_(clo_trace_signals))
7194 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %ld to process %ld, "
7195 "thread %ld\n", SARG3, SARG1, SARG2);
7198 #else
7200 PRE(sys_lwp_sigqueue)
7202 /* int lwp_sigqueue(id_t target_lwp, int signal, void *value,
7203 int si_code, timespec_t *timeout);
7205 PRINT("sys_lwp_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
7206 SARG1, SARG2, ARG3, SARG4, ARG5);
7207 PRE_REG_READ5(long, "lwp_sigqueue", vki_id_t, target_lwp, int, signal,
7208 void *, value, int, si_code, vki_timespec_t *, timeout);
7210 if (ARG5)
7211 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
7213 do_lwp_sigqueue("lwp_sigqueue", ARG1, ARG2, status, flags);
7216 POST(sys_lwp_sigqueue)
7218 if (VG_(clo_trace_signals))
7219 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %lu to thread %lu\n",
7220 ARG2, ARG1);
7224 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID */
7226 #else
7228 PRE(sys_lwp_kill)
7230 /* int lwp_kill(id_t target_lwp, int signal); */
7231 PRINT("sys_lwp_kill ( %ld, %ld )", SARG1, SARG2);
7232 PRE_REG_READ2(long, "lwp_kill", vki_id_t, target_lwp, int, signal);
7234 do_lwp_sigqueue("lwp_kill", ARG1, ARG2, status, flags);
7237 POST(sys_lwp_kill)
7239 if (VG_(clo_trace_signals))
7240 VG_(message)(Vg_DebugMsg, "lwp_kill: sent signal %lu to thread %lu\n",
7241 ARG2, ARG1);
7243 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
7245 PRE(sys_lwp_self)
7247 /* id_t lwp_self(void); */
7248 PRINT("sys_lwp_self ( )");
7249 PRE_REG_READ0(long, "lwp_self");
7252 PRE(sys_lwp_sigmask)
7254 /* int64_t lwp_sigmask(int how, uint_t bits0, uint_t bits1, uint_t bits2,
7255 uint_t bits3); */
7256 vki_sigset_t sigset;
7257 PRINT("sys_lwp_sigmask ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
7258 ARG4, ARG5);
7259 PRE_REG_READ5(long, "lwp_sigmask", int, how, vki_uint_t, bits0,
7260 vki_uint_t, bits1, vki_uint_t, bits2, vki_uint_t, bits3);
7262 sigset.__sigbits[0] = ARG2;
7263 sigset.__sigbits[1] = ARG3;
7264 sigset.__sigbits[2] = ARG4;
7265 sigset.__sigbits[3] = ARG5;
7267 SET_STATUS_from_SysRes(
7268 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, &sigset, NULL)
7271 if (SUCCESS)
7272 *flags |= SfPollAfter;
7275 PRE(sys_lwp_private)
7277 /* int lwp_private(int cmd, int which, uintptr_t base); */
7278 ThreadState *tst = VG_(get_ThreadState)(tid);
7279 Int supported_base, supported_sel;
7280 PRINT("sys_lwp_private ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7281 PRE_REG_READ3(long, "lwp_private", int, cmd, int, which,
7282 uintptr_t, base);
7284 /* Note: Only the %gs base is currently supported on x86 and the %fs base
7285 on amd64. Support for the %fs base on x86 and for the %gs base on amd64
7286 should be added. Anything else is probably a client program error. */
7287 #if defined(VGP_x86_solaris)
7288 supported_base = VKI_LWP_GSBASE;
7289 supported_sel = VKI_LWPGS_SEL;
7290 #elif defined(VGP_amd64_solaris)
7291 supported_base = VKI_LWP_FSBASE;
7292 supported_sel = 0;
7293 #else
7294 #error "Unknown platform"
7295 #endif
7296 if (ARG2 != supported_base) {
7297 VG_(unimplemented)("Syswrap of the lwp_private call where which=%ld.",
7298 SARG2);
7299 /*NOTREACHED*/
7302 switch (ARG1 /*cmd*/) {
7303 case VKI_LWP_SETPRIVATE:
7304 #if defined(VGP_x86_solaris)
7305 tst->os_state.thrptr = ARG3;
7306 ML_(update_gdt_lwpgs)(tid);
7307 #elif defined(VGP_amd64_solaris)
7308 tst->arch.vex.guest_FS_CONST = ARG3;
7309 #else
7310 #error "Unknown platform"
7311 #endif
7312 SET_STATUS_Success(supported_sel);
7313 break;
7314 case VKI_LWP_GETPRIVATE:
7316 int thrptr;
7317 #if defined(VGP_x86_solaris)
7318 thrptr = tst->os_state.thrptr;
7319 #elif defined(VGP_amd64_solaris)
7320 thrptr = tst->arch.vex.guest_FS_CONST;
7321 #else
7322 #error "Unknown platform"
7323 #endif
7325 if (thrptr == 0) {
7326 SET_STATUS_Failure(VKI_EINVAL);
7327 return;
7330 #if defined(VGP_x86_solaris)
7331 if (tst->arch.vex.guest_GS != supported_sel) {
7332 SET_STATUS_Failure(VKI_EINVAL);
7333 return;
7335 #elif defined(VGP_amd64_solaris)
7336 /* Valgrind on amd64 does not allow to change the gs register so
7337 a check that guest_GS is equal to supported_sel is not needed
7338 here. */
7339 #else
7340 #error "Unknown platform"
7341 #endif
7343 PRE_MEM_WRITE("lwp_private(base)", ARG3, sizeof(Addr));
7344 if (!ML_(safe_to_deref((void*)ARG3, sizeof(Addr)))) {
7345 SET_STATUS_Failure(VKI_EFAULT);
7346 return;
7348 *(Addr*)ARG3 = thrptr;
7349 POST_MEM_WRITE((Addr)ARG3, sizeof(Addr));
7350 SET_STATUS_Success(0);
7351 break;
7353 default:
7354 VG_(unimplemented)("Syswrap of the lwp_private call where cmd=%ld.",
7355 SARG1);
7356 /*NOTREACHED*/
7357 break;
7361 PRE(sys_lwp_wait)
7363 /* int lwp_wait(id_t lwpid, id_t *departed); */
7364 *flags |= SfMayBlock;
7365 PRINT("sys_lwp_wait ( %ld, %#lx )", SARG1, ARG2);
7366 PRE_REG_READ2(long, "lwp_wait", vki_id_t, lwpid, vki_id_t *, departed);
7367 if (ARG2)
7368 PRE_MEM_WRITE("lwp_wait(departed)", ARG2, sizeof(vki_id_t));
7371 POST(sys_lwp_wait)
7373 POST_MEM_WRITE(ARG2, sizeof(vki_id_t));
7376 PRE(sys_lwp_mutex_wakeup)
7378 /* int lwp_mutex_wakeup(lwp_mutex_t *lp, int release_all); */
7379 *flags |= SfMayBlock;
7380 PRINT("sys_lwp_mutex_wakeup ( %#lx, %ld )", ARG1, SARG2);
7381 PRE_REG_READ2(long, "lwp_mutex_wakeup", vki_lwp_mutex_t *, lp,
7382 int, release_all);
7383 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
7384 PRE_FIELD_READ("lwp_mutex_wakeup(lp->mutex_type)", lp->vki_mutex_type);
7385 PRE_FIELD_WRITE("lwp_mutex_wakeup(lp->mutex_waiters)",
7386 lp->vki_mutex_waiters);
7389 POST(sys_lwp_mutex_wakeup)
7391 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
7392 POST_FIELD_WRITE(lp->vki_mutex_waiters);
7395 PRE(sys_lwp_cond_wait)
7397 /* int lwp_cond_wait(lwp_cond_t *cvp, lwp_mutex_t *mp, timespec_t *tsp,
7398 int check_park); */
7399 *flags |= SfMayBlock;
7400 PRINT("sys_lwp_cond_wait( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
7401 PRE_REG_READ4(long, "lwp_cond_wait", vki_lwp_cond_t *, cvp,
7402 vki_lwp_mutex_t *, mp, vki_timespec_t *, tsp, int, check_part);
7404 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7405 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7406 PRE_FIELD_READ("lwp_cond_wait(cvp->type)", cvp->vki_cond_type);
7407 PRE_FIELD_READ("lwp_cond_wait(cvp->waiters_kernel)",
7408 cvp->vki_cond_waiters_kernel);
7409 PRE_FIELD_READ("lwp_cond_wait(mp->mutex_type)", mp->vki_mutex_type);
7410 PRE_FIELD_WRITE("lwp_cond_wait(mp->mutex_waiters)", mp->vki_mutex_waiters);
7411 if (ARG3 != 0)
7412 PRE_MEM_READ("lwp_cond_wait(tsp)", ARG3, sizeof(vki_timespec_t));
7415 POST(sys_lwp_cond_wait)
7417 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7418 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7419 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7420 POST_FIELD_WRITE(mp->vki_mutex_waiters);
7421 if (ARG3 != 0)
7422 POST_MEM_WRITE(ARG3, sizeof(vki_timespec_t));
7425 PRE(sys_lwp_cond_signal)
7427 /* int lwp_cond_signal(lwp_cond_t *cvp); */
7428 *flags |= SfMayBlock;
7429 PRINT("sys_lwp_cond_signal( %#lx )", ARG1);
7430 PRE_REG_READ1(long, "lwp_cond_signal", vki_lwp_cond_t *, cvp);
7432 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7433 PRE_FIELD_READ("lwp_cond_signal(cvp->type)", cvp->vki_cond_type);
7434 PRE_FIELD_READ("lwp_cond_signal(cvp->waiters_kernel)",
7435 cvp->vki_cond_waiters_kernel);
7438 POST(sys_lwp_cond_signal)
7440 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7441 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7444 PRE(sys_lwp_cond_broadcast)
7446 /* int lwp_cond_broadcast(lwp_cond_t *cvp); */
7447 *flags |= SfMayBlock;
7448 PRINT("sys_lwp_cond_broadcast ( %#lx )", ARG1);
7449 PRE_REG_READ1(long, "lwp_cond_broadcast", vki_lwp_cond_t *, cvp);
7451 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7452 PRE_FIELD_READ("lwp_cond_broadcast(cvp->type)", cvp->vki_cond_type);
7453 PRE_FIELD_READ("lwp_cond_broadcast(cvp->waiters_kernel)",
7454 cvp->vki_cond_waiters_kernel);
7455 /*PRE_FIELD_WRITE("lwp_cond_broadcast(cvp->waiters_kernel)",
7456 cvp->vki_cond_waiters_kernel);*/
7459 POST(sys_lwp_cond_broadcast)
7461 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7462 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7465 PRE(sys_pread)
7467 /* ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset); */
7468 *flags |= SfMayBlock;
7469 PRINT("sys_pread ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7470 PRE_REG_READ4(long, "pread", int, fildes, void *, buf,
7471 vki_size_t, nbyte, vki_off_t, offset);
7472 PRE_MEM_WRITE("pread(buf)", ARG2, ARG3);
7474 /* Be strict. */
7475 if (!ML_(fd_allowed)(ARG1, "pread", tid, False))
7476 SET_STATUS_Failure(VKI_EBADF);
7479 POST(sys_pread)
7481 POST_MEM_WRITE(ARG2, RES);
7484 PRE(sys_pwrite)
7486 /* ssize_t pwrite(int fildes, const void *buf, size_t nbyte,
7487 off_t offset); */
7488 *flags |= SfMayBlock;
7489 PRINT("sys_pwrite ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7490 PRE_REG_READ4(long, "pwrite", int, fildes, const void *, buf,
7491 vki_size_t, nbyte, vki_off_t, offset);
7492 PRE_MEM_READ("pwrite(buf)", ARG2, ARG3);
7494 /* Be strict. */
7495 if (!ML_(fd_allowed)(ARG1, "pwrite", tid, False))
7496 SET_STATUS_Failure(VKI_EBADF);
7499 PRE(sys_getpagesizes)
7501 /* int getpagesizes(int legacy, size_t *buf, int nelem); */
7502 PRINT("sys_getpagesizes ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7503 PRE_REG_READ3(long, "getpagesizes", int, legacy, size_t *, buf,
7504 int, nelem);
7505 if (ARG2)
7506 PRE_MEM_WRITE("getpagesizes(buf)", ARG2, ARG3 * sizeof(vki_size_t));
7509 POST(sys_getpagesizes)
7511 if (ARG2)
7512 POST_MEM_WRITE(ARG2, RES * sizeof(vki_size_t));
7515 PRE(sys_lgrpsys)
7517 /* Kernel: int lgrpsys(int subcode, long ia, void *ap); */
7518 switch (ARG1 /*subcode*/) {
7519 case VKI_LGRP_SYS_MEMINFO:
7520 PRINT("sys_lgrpsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7521 PRE_REG_READ3(long, SC2("lgrpsys", "meminfo"), int, subcode,
7522 int, addr_count, vki_meminfo_t *, minfo);
7523 PRE_MEM_READ("lgrpsys(minfo)", ARG3, sizeof(vki_meminfo_t));
7525 if (ML_(safe_to_deref)((vki_meminfo_t *) ARG3, sizeof(vki_meminfo_t))) {
7526 vki_meminfo_t *minfo = (vki_meminfo_t *) ARG3;
7527 PRE_MEM_READ("lgrpsys(minfo->mi_inaddr)",
7528 (Addr) minfo->mi_inaddr, SARG2 * sizeof(vki_uint64_t));
7529 PRE_MEM_READ("lgrpsys(minfo->mi_info_req)", (Addr) minfo->mi_info_req,
7530 minfo->mi_info_count * sizeof(vki_uint_t));
7531 PRE_MEM_WRITE("lgrpsys(minfo->mi_outdata)", (Addr) minfo->mi_outdata,
7532 SARG2 * minfo->mi_info_count * sizeof(vki_uint64_t));
7533 PRE_MEM_WRITE("lgrpsys(minfo->mi_validity)",
7534 (Addr) minfo->mi_validity, SARG2 * sizeof(vki_uint_t));
7536 break;
7537 case VKI_LGRP_SYS_GENERATION:
7538 /* Liblgrp: lgrp_gen_t lgrp_generation(lgrp_view_t view); */
7539 PRINT("sys_lgrpsys ( %ld, %ld )", SARG1, SARG2);
7540 PRE_REG_READ2(long, SC2("lgrpsys", "generation"), int, subcode,
7541 vki_lgrp_view_t, view);
7542 break;
7543 case VKI_LGRP_SYS_VERSION:
7544 /* Liblgrp: int lgrp_version(int version); */
7545 PRINT("sys_lgrpsys ( %ld, %ld )", SARG1, SARG2);
7546 PRE_REG_READ2(long, SC2("lgrpsys", "version"), int, subcode,
7547 int, version);
7548 break;
7549 case VKI_LGRP_SYS_SNAPSHOT:
7550 /* Liblgrp: int lgrp_snapshot(void *buf, size_t bufsize); */
7551 PRINT("sys_lgrpsys ( %ld, %lu, %#lx )", SARG1, ARG2, ARG3);
7552 PRE_REG_READ3(long, SC2("lgrpsys", "snapshot"), int, subcode,
7553 vki_size_t, bufsize, void *, buf);
7554 PRE_MEM_WRITE("lgrpsys(buf)", ARG3, ARG2);
7555 break;
7556 default:
7557 VG_(unimplemented)("Syswrap of the lgrpsys call with subcode %ld.",
7558 SARG1);
7559 /*NOTREACHED*/
7560 break;
7564 POST(sys_lgrpsys)
7566 switch (ARG1 /*subcode*/) {
7567 case VKI_LGRP_SYS_MEMINFO:
7569 vki_meminfo_t *minfo = (vki_meminfo_t *) ARG3;
7570 POST_MEM_WRITE((Addr) minfo->mi_outdata,
7571 SARG2 * minfo->mi_info_count * sizeof(vki_uint64_t));
7572 POST_MEM_WRITE((Addr) minfo->mi_validity, SARG2 * sizeof(vki_uint_t));
7574 break;
7575 case VKI_LGRP_SYS_GENERATION:
7576 case VKI_LGRP_SYS_VERSION:
7577 break;
7578 case VKI_LGRP_SYS_SNAPSHOT:
7579 POST_MEM_WRITE(ARG3, RES);
7580 break;
7581 default:
7582 vg_assert(0);
7583 break;
7587 PRE(sys_rusagesys)
7589 /* Kernel: int rusagesys(int code, void *arg1, void *arg2,
7590 void *arg3, void *arg4); */
7591 switch (ARG1 /*code*/) {
7592 case VKI__RUSAGESYS_GETRUSAGE:
7593 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7594 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7595 /* Libc: int getrusage(int who, struct rusage *r_usage); */
7596 PRINT("sys_rusagesys ( %ld, %#lx )", SARG1, ARG2);
7597 PRE_REG_READ2(long, SC2("rusagesys", "getrusage"), int, code,
7598 struct vki_rusage *, r_usage);
7599 PRE_MEM_WRITE("rusagesys(r_usage)", ARG2, sizeof(struct vki_rusage));
7600 break;
7602 case VKI__RUSAGESYS_GETVMUSAGE:
7603 /* Libc: int getvmusage(uint_t flags, time_t age,
7604 vmusage_t *buf, size_t *nres); */
7605 PRINT("sys_rusagesys ( %ld, %lu, %ld, %#lx, %#lx )",
7606 SARG1, ARG2, SARG3, ARG4, ARG5);
7607 PRE_REG_READ5(long, SC2("rusagesys", "getvmusage"), int, code,
7608 vki_uint_t, flags, vki_time_t, age,
7609 vki_vmusage_t *, buf, vki_size_t *, nres);
7610 PRE_MEM_READ("rusagesys(nres)", ARG5, sizeof(vki_size_t));
7611 /* PRE_MEM_WRITE("rusagesys(nres)", ARG5, sizeof(vki_size_t)); */
7613 if (ML_(safe_to_deref)((void *) ARG5, sizeof(vki_size_t))) {
7614 vki_size_t *nres = (vki_size_t *) ARG5;
7615 PRE_MEM_WRITE("rusagesys(buf)", ARG4,
7616 *nres * sizeof(vki_vmusage_t));
7618 *flags |= SfMayBlock;
7619 break;
7621 default:
7622 VG_(unimplemented)("Syswrap of the rusagesys call with code %ld.", SARG1);
7623 /*NOTREACHED*/
7624 break;
7628 POST(sys_rusagesys)
7630 switch (ARG1 /*code*/) {
7631 case VKI__RUSAGESYS_GETRUSAGE:
7632 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7633 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7634 POST_MEM_WRITE(ARG2, sizeof(struct vki_rusage));
7635 break;
7636 case VKI__RUSAGESYS_GETVMUSAGE:
7638 vki_size_t *nres = (vki_size_t *) ARG5;
7639 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
7640 POST_MEM_WRITE(ARG4, *nres * sizeof(vki_vmusage_t));
7642 break;
7643 default:
7644 vg_assert(0);
7645 break;
7649 PRE(sys_port)
7651 /* Kernel: int64_t portfs(int opcode, uintptr_t a0, uintptr_t a1,
7652 uintptr_t a2, uintptr_t a3, uintptr_t a4); */
7653 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7654 *flags |= SfMayBlock;
7655 switch (opcode) {
7656 case VKI_PORT_CREATE:
7657 PRINT("sys_port ( %ld )", SARG1);
7658 PRE_REG_READ1(long, SC2("port", "create"), int, opcode);
7659 break;
7660 case VKI_PORT_ASSOCIATE:
7661 case VKI_PORT_DISSOCIATE:
7662 PRINT("sys_port ( %ld, %ld, %ld, %#lx, %ld, %#lx )", SARG1, SARG2, SARG3,
7663 ARG4, SARG5, ARG6);
7664 if (opcode == VKI_PORT_ASSOCIATE) {
7665 PRE_REG_READ6(long, SC2("port", "associate"), int, opcode, int, a0,
7666 int, a1, uintptr_t, a2, int, a3, void *, a4);
7668 else {
7669 PRE_REG_READ6(long, SC2("port", "dissociate"), int, opcode, int, a0,
7670 int, a1, uintptr_t, a2, int, a3, void *, a4);
7673 switch (ARG3 /*source*/) {
7674 case VKI_PORT_SOURCE_FD:
7675 if (!ML_(fd_allowed)(ARG4, "port", tid, False)) {
7676 SET_STATUS_Failure(VKI_EBADF);
7678 break;
7679 case VKI_PORT_SOURCE_FILE:
7681 struct vki_file_obj *fo = (struct vki_file_obj *)ARG4;
7682 PRE_MEM_READ("port(file_obj)", ARG4, sizeof(struct vki_file_obj));
7683 if (ML_(safe_to_deref)(&fo->fo_name, sizeof(fo->fo_name)))
7684 PRE_MEM_RASCIIZ("port(file_obj->fo_name)", (Addr)fo->fo_name);
7686 break;
7687 default:
7688 VG_(unimplemented)("Syswrap of the port_associate/dissociate call "
7689 "type %ld.", SARG3);
7690 /*NOTREACHED*/
7691 break;
7693 break;
7694 case VKI_PORT_SEND:
7695 PRINT("sys_port ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
7696 PRE_REG_READ4(long, SC2("port", "send"), int, opcode, int, a0, int, a1,
7697 void *, a2);
7698 break;
7699 case VKI_PORT_SENDN:
7700 PRINT("sys_port ( %ld, %#lx, %#lx, %lu, %lx, %#lx)", SARG1, ARG2, ARG3,
7701 ARG4, ARG5, ARG6);
7702 PRE_REG_READ6(long, SC2("port", "sendn"), int, opcode, int *, a0,
7703 int *, a1, vki_uint_t, a2, int, a3, void *, a4);
7704 PRE_MEM_READ("port(ports)", ARG2, ARG4 * sizeof(int));
7705 PRE_MEM_WRITE("port(errors)", ARG3, ARG4 * sizeof(int));
7706 break;
7707 case VKI_PORT_GET:
7708 PRINT("sys_port ( %ld, %ld, %#lx, %ld, %ld, %#lx )", SARG1, SARG2, ARG3,
7709 SARG4, SARG5, ARG6);
7710 PRE_REG_READ6(long, SC2("port", "get"), int, opcode, int, a0,
7711 port_event_t *, a1, vki_time_t, a2, long, a3,
7712 timespec_t *, a4);
7713 PRE_MEM_WRITE("port(uevp)", ARG3, sizeof(vki_port_event_t));
7714 break;
7715 case VKI_PORT_GETN:
7716 PRINT("sys_port ( %ld, %ld, %#lx, %lu, %lu, %#lx )", SARG1, SARG2, ARG3,
7717 ARG4, ARG5, ARG6);
7718 PRE_REG_READ6(long, SC2("port", "getn"), int, opcode, int, a0,
7719 port_event_t *, a1, vki_uint_t, a2, vki_uint_t, a3,
7720 timespec_t *, a4);
7721 if (ARG6)
7722 PRE_MEM_READ("port(timeout)", ARG6, sizeof(vki_timespec_t));
7723 PRE_MEM_WRITE("port(uevp)", ARG3, ARG4 * sizeof(vki_port_event_t));
7724 break;
7725 case VKI_PORT_ALERT:
7726 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, SARG4,
7727 ARG5);
7728 PRE_REG_READ5(long, SC2("port", "alert"), int, opcode, int, a0, int, a1,
7729 int, a2, void *, a3);
7730 break;
7731 case VKI_PORT_DISPATCH:
7732 // FIXME: check order: SARG2, SARG1 or SARG1, SARG2 ??
7733 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx, %#lx )", SARG2, SARG1, SARG3,
7734 SARG4, ARG5, ARG6);
7735 PRE_REG_READ6(long, SC2("port", "dispatch"), int, opcode, int, a0,
7736 int, a1, int, a2, uintptr_t, a3, void *, a4);
7737 break;
7738 default:
7739 VG_(unimplemented)("Syswrap of the port call with opcode %ld.", SARG1);
7740 /*NOTREACHED*/
7741 break;
7744 /* Be strict. */
7745 if ((opcode != VKI_PORT_CREATE && opcode != VKI_PORT_SENDN) &&
7746 !ML_(fd_allowed)(ARG2, "port", tid, False))
7747 SET_STATUS_Failure(VKI_EBADF);
7750 POST(sys_port)
7752 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7753 switch (opcode) {
7754 case VKI_PORT_CREATE:
7755 if (!ML_(fd_allowed)(RES, "port", tid, True)) {
7756 VG_(close)(RES);
7757 SET_STATUS_Failure(VKI_EMFILE);
7759 else if (VG_(clo_track_fds))
7760 ML_(record_fd_open_named)(tid, RES);
7761 break;
7762 case VKI_PORT_ASSOCIATE:
7763 case VKI_PORT_DISSOCIATE:
7764 case VKI_PORT_SEND:
7765 break;
7766 case VKI_PORT_SENDN:
7767 if (RES != ARG4) {
7768 /* If there is any error then the whole errors area is written. */
7769 POST_MEM_WRITE(ARG3, ARG4 * sizeof(int));
7771 break;
7772 case VKI_PORT_GET:
7773 POST_MEM_WRITE(ARG3, sizeof(vki_port_event_t));
7774 break;
7775 case VKI_PORT_GETN:
7776 POST_MEM_WRITE(ARG3, RES * sizeof(vki_port_event_t));
7777 break;
7778 case VKI_PORT_ALERT:
7779 case VKI_PORT_DISPATCH:
7780 break;
7781 default:
7782 VG_(unimplemented)("Syswrap of the port call with opcode %lu.", ARG1);
7783 /*NOTREACHED*/
7784 break;
7788 PRE(sys_pollsys)
7790 /* int pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeout,
7791 sigset_t *set); */
7792 UWord i;
7793 struct vki_pollfd *ufds = (struct vki_pollfd *)ARG1;
7795 *flags |= SfMayBlock | SfPostOnFail;
7797 PRINT("sys_pollsys ( %#lx, %lu, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
7798 PRE_REG_READ4(long, "poll", pollfd_t *, fds, vki_nfds_t, nfds,
7799 timespec_t *, timeout, sigset_t *, set);
7801 for (i = 0; i < ARG2; i++) {
7802 vki_pollfd_t *u = &ufds[i];
7803 PRE_FIELD_READ("poll(ufds.fd)", u->fd);
7804 /* XXX Check if it's valid? */
7805 PRE_FIELD_READ("poll(ufds.events)", u->events);
7806 PRE_FIELD_WRITE("poll(ufds.revents)", u->revents);
7809 if (ARG3)
7810 PRE_MEM_READ("poll(timeout)", ARG3, sizeof(vki_timespec_t));
7812 if (ARG4) {
7813 PRE_MEM_READ("poll(set)", ARG4, sizeof(vki_sigset_t));
7815 const vki_sigset_t *guest_sigmask = (vki_sigset_t *) ARG4;
7816 if (!ML_(safe_to_deref)(guest_sigmask, sizeof(vki_sigset_t))) {
7817 ARG4 = 1; /* Something recognisable to POST() hook. */
7818 } else {
7819 vki_sigset_t *vg_sigmask =
7820 VG_(malloc)("syswrap.pollsys.1", sizeof(vki_sigset_t));
7821 ARG4 = (Addr) vg_sigmask;
7822 *vg_sigmask = *guest_sigmask;
7823 VG_(sanitize_client_sigmask)(vg_sigmask);
7828 POST(sys_pollsys)
7830 vg_assert(SUCCESS || FAILURE);
7832 if (SUCCESS && (RES >= 0)) {
7833 UWord i;
7834 vki_pollfd_t *ufds = (vki_pollfd_t*)ARG1;
7835 for (i = 0; i < ARG2; i++)
7836 POST_FIELD_WRITE(ufds[i].revents);
7839 if ((ARG4 != 0) && (ARG4 != 1)) {
7840 VG_(free)((vki_sigset_t *) ARG4);
7844 PRE(sys_labelsys)
7846 /* Kernel: int labelsys(int op, void *a1, void *a2, void *a3,
7847 void *a4, void *a5); */
7849 switch (ARG1 /*op*/) {
7850 case VKI_TSOL_SYSLABELING:
7851 /* Libc: int is_system_labeled(void); */
7852 PRINT("sys_labelsys ( %ld )", SARG1);
7853 PRE_REG_READ1(long, SC2("labelsys", "syslabeling"), int, op);
7854 break;
7856 case VKI_TSOL_TNRH:
7857 /* Libtsnet: int tnrh(int cmd, tsol_rhent_t *buf); */
7858 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7859 PRE_REG_READ3(long, SC2("labelsys", "tnrh"), int, op, int, cmd,
7860 vki_tsol_rhent_t *, buf);
7861 if (ARG2 != VKI_TNDB_FLUSH)
7862 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_rhent_t));
7863 break;
7865 case VKI_TSOL_TNRHTP:
7866 /* Libtsnet: int tnrhtp(int cmd, tsol_tpent_t *buf); */
7867 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7868 PRE_REG_READ3(long, SC2("labelsys", "tnrhtp"), int, op, int, cmd,
7869 vki_tsol_tpent_t *, buf);
7870 if (ARG2 != VKI_TNDB_FLUSH)
7871 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_tpent_t));
7872 break;
7874 case VKI_TSOL_TNMLP:
7875 /* Libtsnet: int tnmlp(int cmd, tsol_mlpent_t *buf); */
7876 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7877 PRE_REG_READ3(long, SC2("labelsys", "tnmlp"), int, op, int, cmd,
7878 vki_tsol_mlpent_t *, buf);
7879 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_mlpent_t));
7880 break;
7882 case VKI_TSOL_GETLABEL:
7883 /* Libtsol: int getlabel(const char *path, bslabel_t *label); */
7884 PRINT("sys_labelsys ( %ld, %#lx(%s), %#lx )",
7885 SARG1, ARG2, (HChar *) ARG2, ARG3);
7886 PRE_REG_READ3(long, SC2("labelsys", "getlabel"), int, op,
7887 const char *, path, vki_bslabel_t *, label);
7888 PRE_MEM_RASCIIZ("labelsys(path)", ARG2);
7889 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7890 break;
7892 case VKI_TSOL_FGETLABEL:
7893 /* Libtsol: int fgetlabel(int fd, bslabel_t *label); */
7894 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7895 PRE_REG_READ3(long, SC2("labelsys", "fgetlabel"), int, op,
7896 int, fd, vki_bslabel_t *, label);
7897 /* Be strict. */
7898 if (!ML_(fd_allowed)(ARG2, "labelsys(fgetlabel)", tid, False))
7899 SET_STATUS_Failure(VKI_EBADF);
7900 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7901 break;
7903 #if defined(SOLARIS_TSOL_CLEARANCE)
7904 case VKI_TSOL_GETCLEARANCE:
7905 /* Libtsol: int getclearance(bslabel_t *clearance); */
7906 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7907 PRE_REG_READ2(long, SC2("labelsys", "getclearance"), int, op,
7908 vki_bslabel_t *, clearance);
7909 PRE_MEM_WRITE("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7910 break;
7912 case VKI_TSOL_SETCLEARANCE:
7913 /* Libtsol: int setclearance(bslabel_t *clearance); */
7914 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7915 PRE_REG_READ2(long, SC2("labelsys", "setclearance"), int, op,
7916 vki_bslabel_t *, clearance);
7917 PRE_MEM_READ("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7918 break;
7919 #endif /* SOLARIS_TSOL_CLEARANCE */
7921 default:
7922 VG_(unimplemented)("Syswrap of the labelsys call with op %ld.", SARG1);
7923 /*NOTREACHED*/
7924 break;
7928 POST(sys_labelsys)
7930 switch (ARG1 /*op*/) {
7931 case VKI_TSOL_SYSLABELING:
7932 break;
7934 case VKI_TSOL_TNRH:
7935 switch (ARG2 /*cmd*/) {
7936 case VKI_TNDB_LOAD:
7937 case VKI_TNDB_DELETE:
7938 case VKI_TNDB_FLUSH:
7939 break;
7940 #if defined(SOLARIS_TNDB_GET_TNIP)
7941 case TNDB_GET_TNIP:
7942 #endif /* SOLARIS_TNDB_GET_TNIP */
7943 case VKI_TNDB_GET:
7944 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_rhent_t));
7945 break;
7946 default:
7947 vg_assert(0);
7948 break;
7950 break;
7952 case VKI_TSOL_TNRHTP:
7953 switch (ARG2 /*cmd*/) {
7954 case VKI_TNDB_LOAD:
7955 case VKI_TNDB_DELETE:
7956 case VKI_TNDB_FLUSH:
7957 break;
7958 case VKI_TNDB_GET:
7959 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_tpent_t));
7960 break;
7961 default:
7962 vg_assert(0);
7963 break;
7965 break;
7967 case VKI_TSOL_TNMLP:
7968 switch (ARG2 /*cmd*/) {
7969 case VKI_TNDB_LOAD:
7970 case VKI_TNDB_DELETE:
7971 case VKI_TNDB_FLUSH:
7972 break;
7973 case VKI_TNDB_GET:
7974 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_mlpent_t));
7975 break;
7976 default:
7977 vg_assert(0);
7978 break;
7980 break;
7982 case VKI_TSOL_GETLABEL:
7983 case VKI_TSOL_FGETLABEL:
7984 POST_MEM_WRITE(ARG3, sizeof(vki_bslabel_t));
7985 break;
7987 #if defined(SOLARIS_TSOL_CLEARANCE)
7988 case VKI_TSOL_GETCLEARANCE:
7989 POST_MEM_WRITE(ARG2, sizeof(vki_bslabel_t));
7990 break;
7992 case VKI_TSOL_SETCLEARANCE:
7993 break;
7994 #endif /* SOLARIS_TSOL_CLEARANCE */
7996 default:
7997 vg_assert(0);
7998 break;
8002 PRE(sys_acl)
8004 /* int acl(char *pathp, int cmd, int nentries, void *aclbufp); */
8005 PRINT("sys_acl ( %#lx(%s), %ld, %ld, %#lx )", ARG1, (HChar *) ARG1, SARG2,
8006 SARG3, ARG4);
8008 PRE_REG_READ4(long, "acl", char *, pathp, int, cmd,
8009 int, nentries, void *, aclbufp);
8010 PRE_MEM_RASCIIZ("acl(pathp)", ARG1);
8012 switch (ARG2 /*cmd*/) {
8013 case VKI_SETACL:
8014 if (ARG4)
8015 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8016 break;
8017 case VKI_GETACL:
8018 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8019 break;
8020 case VKI_GETACLCNT:
8021 break;
8022 case VKI_ACE_SETACL:
8023 if (ARG4)
8024 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8025 break;
8026 case VKI_ACE_GETACL:
8027 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8028 break;
8029 case VKI_ACE_GETACLCNT:
8030 break;
8031 default:
8032 VG_(unimplemented)("Syswrap of the acl call with cmd %ld.", SARG2);
8033 /*NOTREACHED*/
8034 break;
8038 POST(sys_acl)
8040 switch (ARG2 /*cmd*/) {
8041 case VKI_SETACL:
8042 break;
8043 case VKI_GETACL:
8044 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8045 break;
8046 case VKI_GETACLCNT:
8047 break;
8048 case VKI_ACE_SETACL:
8049 break;
8050 case VKI_ACE_GETACL:
8051 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8052 break;
8053 case VKI_ACE_GETACLCNT:
8054 break;
8055 default:
8056 vg_assert(0);
8057 break;
8061 PRE(sys_auditsys)
8063 /* Kernel: int auditsys(long code, long a1, long a2, long a3, long a4); */
8064 switch (ARG1 /*code*/) {
8065 case VKI_BSM_GETAUID:
8066 /* Libbsm: int getauid(au_id_t *auid); */
8067 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8068 PRE_REG_READ2(long, SC2("auditsys", "getauid"), long, code,
8069 vki_au_id_t *, auid);
8070 PRE_MEM_WRITE("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
8071 break;
8072 case VKI_BSM_SETAUID:
8073 /* Libbsm: int setauid(au_id_t *auid); */
8074 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8075 PRE_REG_READ2(long, SC2("auditsys", "setauid"), long, code,
8076 vki_au_id_t *, auid);
8077 PRE_MEM_READ("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
8078 break;
8079 case VKI_BSM_GETAUDIT:
8080 /* Libbsm: int getaudit(auditinfo_t *ai); */
8081 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8082 PRE_REG_READ2(long, SC2("auditsys", "getaudit"), long, code,
8083 vki_auditinfo_t *, ai);
8084 PRE_MEM_WRITE("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
8085 break;
8086 case VKI_BSM_SETAUDIT:
8087 /* Libbsm: int setaudit(auditinfo_t *ai); */
8088 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8089 PRE_REG_READ2(long, SC2("auditsys", "setaudit"), long, code,
8090 vki_auditinfo_t *, ai);
8091 PRE_MEM_READ("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
8092 break;
8093 case VKI_BSM_AUDIT:
8094 /* Libbsm: int audit(void *record, int length); */
8095 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8096 PRE_REG_READ3(long, SC2("auditsys", "audit"), long, code,
8097 void *, record, int, length);
8098 PRE_MEM_READ("auditsys(record)", ARG2, ARG3);
8099 break;
8100 case VKI_BSM_AUDITCTL:
8101 /* Libbsm: int auditon(int cmd, caddr_t data, int length); */
8102 PRINT("sys_auditsys ( %ld, %ld, %#lx, %ld )",
8103 SARG1, SARG2, ARG3, SARG4);
8105 switch (ARG2 /*cmd*/) {
8106 case VKI_A_GETPOLICY:
8107 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpolicy"),
8108 long, code, int, cmd, vki_uint32_t *, policy);
8109 PRE_MEM_WRITE("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
8110 break;
8111 case VKI_A_SETPOLICY:
8112 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpolicy"),
8113 long, code, int, cmd, vki_uint32_t *, policy);
8114 PRE_MEM_READ("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
8115 break;
8116 case VKI_A_GETKMASK:
8117 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getkmask"),
8118 long, code, int, cmd, vki_au_mask_t *, kmask);
8119 PRE_MEM_WRITE("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
8120 break;
8121 case VKI_A_SETKMASK:
8122 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setkmask"),
8123 long, code, int, cmd, vki_au_mask_t *, kmask);
8124 PRE_MEM_READ("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
8125 break;
8126 case VKI_A_GETQCTRL:
8127 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getqctrl"),
8128 long, code, int, cmd,
8129 struct vki_au_qctrl *, qctrl);
8130 PRE_MEM_WRITE("auditsys(qctrl)", ARG3,
8131 sizeof(struct vki_au_qctrl));
8132 break;
8133 case VKI_A_SETQCTRL:
8134 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setqctrl"),
8135 long, code, int, cmd,
8136 struct vki_au_qctrl *, qctrl);
8137 PRE_MEM_READ("auditsys(qctrl)", ARG3,
8138 sizeof(struct vki_au_qctrl));
8139 break;
8140 case VKI_A_GETCWD:
8141 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcwd"),
8142 long, code, int, cmd, char *, data, int, length);
8143 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
8144 break;
8145 case VKI_A_GETCAR:
8146 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcar"),
8147 long, code, int, cmd, char *, data, int, length);
8148 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
8149 break;
8150 #if defined(SOLARIS_AUDITON_STAT)
8151 case VKI_A_GETSTAT:
8152 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getstat"),
8153 long, code, int, cmd, vki_au_stat_t *, stats);
8154 PRE_MEM_WRITE("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
8155 break;
8156 case VKI_A_SETSTAT:
8157 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setstat"),
8158 long, code, int, cmd, vki_au_stat_t *, stats);
8159 PRE_MEM_READ("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
8160 break;
8161 #endif /* SOLARIS_AUDITON_STAT */
8162 case VKI_A_SETUMASK:
8163 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setumask"),
8164 long, code, int, cmd, vki_auditinfo_t *, umask);
8165 PRE_MEM_READ("auditsys(umask)", ARG3, sizeof(vki_auditinfo_t));
8166 break;
8167 case VKI_A_SETSMASK:
8168 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setsmask"),
8169 long, code, int, cmd, vki_auditinfo_t *, smask);
8170 PRE_MEM_READ("auditsys(smask)", ARG3, sizeof(vki_auditinfo_t));
8171 break;
8172 case VKI_A_GETCOND:
8173 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getcond"),
8174 long, code, int, cmd, int *, cond);
8175 PRE_MEM_WRITE("auditsys(cond)", ARG3, sizeof(int));
8176 break;
8177 case VKI_A_SETCOND:
8178 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setcond"),
8179 long, code, int, cmd, int *, state);
8180 PRE_MEM_READ("auditsys(cond)", ARG3, sizeof(int));
8181 break;
8182 case VKI_A_GETCLASS:
8183 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getclass"),
8184 long, code, int, cmd,
8185 vki_au_evclass_map_t *, classmap);
8187 if (ML_(safe_to_deref((void *) ARG3,
8188 sizeof(vki_au_evclass_map_t)))) {
8189 vki_au_evclass_map_t *classmap =
8190 (vki_au_evclass_map_t *) ARG3;
8191 PRE_FIELD_READ("auditsys(classmap.ec_number)",
8192 classmap->ec_number);
8193 PRE_MEM_WRITE("auditsys(classmap)", ARG3,
8194 sizeof(vki_au_evclass_map_t));
8196 break;
8197 case VKI_A_SETCLASS:
8198 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setclass"),
8199 long, code, int, cmd,
8200 vki_au_evclass_map_t *, classmap);
8202 if (ML_(safe_to_deref((void *) ARG3,
8203 sizeof(vki_au_evclass_map_t)))) {
8204 vki_au_evclass_map_t *classmap =
8205 (vki_au_evclass_map_t *) ARG3;
8206 PRE_FIELD_READ("auditsys(classmap.ec_number)",
8207 classmap->ec_number);
8208 PRE_FIELD_READ("auditsys(classmap.ec_class)",
8209 classmap->ec_class);
8211 break;
8212 case VKI_A_GETPINFO:
8213 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpinfo"),
8214 long, code, int, cmd,
8215 struct vki_auditpinfo *, apinfo);
8217 if (ML_(safe_to_deref((void *) ARG3,
8218 sizeof(struct vki_auditpinfo)))) {
8219 struct vki_auditpinfo *apinfo =
8220 (struct vki_auditpinfo *) ARG3;
8221 PRE_FIELD_READ("auditsys(apinfo.ap_pid)", apinfo->ap_pid);
8222 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
8223 sizeof(struct vki_auditpinfo));
8225 break;
8226 case VKI_A_SETPMASK:
8227 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpmask"),
8228 long, code, int, cmd,
8229 struct vki_auditpinfo *, apinfo);
8230 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
8231 sizeof(struct vki_auditpinfo));
8232 break;
8233 case VKI_A_GETPINFO_ADDR:
8234 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getpinfo_addr"),
8235 long, code, int, cmd,
8236 struct vki_auditpinfo_addr *, apinfo, int, length);
8238 if (ML_(safe_to_deref((void *) ARG3,
8239 sizeof(struct vki_auditpinfo_addr)))) {
8240 struct vki_auditpinfo_addr *apinfo_addr =
8241 (struct vki_auditpinfo_addr *) ARG3;
8242 PRE_FIELD_READ("auditsys(apinfo_addr.ap_pid)",
8243 apinfo_addr->ap_pid);
8244 PRE_MEM_WRITE("auditsys(apinfo_addr)", ARG3, ARG4);
8246 break;
8247 case VKI_A_GETKAUDIT:
8248 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getkaudit"),
8249 long, code, int, cmd,
8250 vki_auditinfo_addr_t *, kaudit, int, length);
8251 PRE_MEM_WRITE("auditsys(kaudit)", ARG3, ARG4);
8252 break;
8253 case VKI_A_SETKAUDIT:
8254 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "setkaudit"),
8255 long, code, int, cmd,
8256 vki_auditinfo_addr_t *, kaudit, int, length);
8257 PRE_MEM_READ("auditsys(kaudit)", ARG3, ARG4);
8258 break;
8259 case VKI_A_GETAMASK:
8260 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getamask"),
8261 long, code, int, cmd, vki_au_mask_t *, amask);
8262 PRE_MEM_WRITE("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
8263 break;
8264 case VKI_A_SETAMASK:
8265 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setamask"),
8266 long, code, int, cmd, vki_au_mask_t *, amask);
8267 PRE_MEM_READ("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
8268 break;
8269 default:
8270 VG_(unimplemented)("Syswrap of the auditsys(auditctl) call "
8271 "with cmd %lu.", ARG2);
8272 /*NOTREACHED*/
8273 break;
8275 break;
8276 case VKI_BSM_GETAUDIT_ADDR:
8277 /* Libbsm: int getaudit_addr(auditinfo_addr_t *ai, int len); */
8278 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8279 PRE_REG_READ3(long, SC2("auditsys", "getaudit_addr"), long, code,
8280 vki_auditinfo_addr_t *, ai, int, len);
8281 PRE_MEM_WRITE("auditsys(ai)", ARG2, ARG3);
8282 break;
8283 case VKI_BSM_SETAUDIT_ADDR:
8284 /* Libbsm: int setaudit_addr(auditinfo_addr_t *ai, int len); */
8285 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8286 PRE_REG_READ3(long, SC2("auditsys", "setaudit_addr"), long, code,
8287 vki_auditinfo_addr_t *, ai, int, len);
8288 PRE_MEM_READ("auditsys(ai)", ARG2, ARG3);
8289 break;
8290 case VKI_BSM_AUDITDOOR:
8291 /* Libbsm: int auditdoor(int fd); */
8292 PRINT("sys_auditsys ( %ld, %ld )", SARG1, SARG2);
8293 PRE_REG_READ2(long, SC2("auditsys", "door"), long, code, int, fd);
8295 /* Be strict. */
8296 if (!ML_(fd_allowed)(ARG2, SC2("auditsys", "door")"(fd)",
8297 tid, False))
8298 SET_STATUS_Failure(VKI_EBADF);
8299 break;
8300 default:
8301 VG_(unimplemented)("Syswrap of the auditsys call with code %lu.", ARG1);
8302 /*NOTREACHED*/
8303 break;
8307 POST(sys_auditsys)
8309 switch (ARG1 /*code*/) {
8310 case VKI_BSM_GETAUID:
8311 POST_MEM_WRITE(ARG2, sizeof(vki_au_id_t));
8312 break;
8313 case VKI_BSM_SETAUID:
8314 break;
8315 case VKI_BSM_GETAUDIT:
8316 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_t));
8317 break;
8318 case VKI_BSM_SETAUDIT:
8319 case VKI_BSM_AUDIT:
8320 break;
8321 case VKI_BSM_AUDITCTL:
8322 switch (ARG2 /*cmd*/) {
8323 case VKI_A_GETPOLICY:
8324 POST_MEM_WRITE(ARG3, sizeof(vki_uint32_t));
8325 break;
8326 case VKI_A_SETPOLICY:
8327 break;
8328 case VKI_A_GETKMASK:
8329 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
8330 break;
8331 case VKI_A_SETKMASK:
8332 break;
8333 case VKI_A_GETQCTRL:
8334 POST_MEM_WRITE(ARG3, sizeof(struct vki_au_qctrl));
8335 break;
8336 case VKI_A_SETQCTRL:
8337 break;
8338 case VKI_A_GETCWD:
8339 case VKI_A_GETCAR:
8340 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
8341 break;
8342 #if defined(SOLARIS_AUDITON_STAT)
8343 case VKI_A_GETSTAT:
8344 POST_MEM_WRITE(ARG3, sizeof(vki_au_stat_t));
8345 break;
8346 case VKI_A_SETSTAT:
8347 #endif /* SOLARIS_AUDITON_STAT */
8348 case VKI_A_SETUMASK:
8349 case VKI_A_SETSMASK:
8350 break;
8351 case VKI_A_GETCOND:
8352 POST_MEM_WRITE(ARG3, sizeof(int));
8353 break;
8354 case VKI_A_SETCOND:
8355 break;
8356 case VKI_A_GETCLASS:
8357 POST_MEM_WRITE(ARG3, sizeof(vki_au_evclass_map_t));
8358 break;
8359 case VKI_A_SETCLASS:
8360 break;
8361 case VKI_A_GETPINFO:
8362 POST_MEM_WRITE(ARG3, sizeof(struct vki_auditpinfo));
8363 break;
8364 case VKI_A_SETPMASK:
8365 break;
8366 case VKI_A_GETPINFO_ADDR:
8367 POST_MEM_WRITE(ARG3, sizeof(struct auditpinfo_addr));
8368 break;
8369 case VKI_A_GETKAUDIT:
8370 POST_MEM_WRITE(ARG3, sizeof(vki_auditinfo_addr_t));
8371 break;
8372 case VKI_A_SETKAUDIT:
8373 break;
8374 case VKI_A_GETAMASK:
8375 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
8376 break;
8377 case VKI_A_SETAMASK:
8378 break;
8380 break;
8381 case VKI_BSM_GETAUDIT_ADDR:
8382 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_addr_t));
8383 break;
8384 case VKI_BSM_SETAUDIT_ADDR:
8385 break;
8386 case VKI_BSM_AUDITDOOR:
8387 break;
8391 PRE(sys_p_online)
8393 /* int p_online(processorid_t processorid, int flag); */
8394 PRINT("sys_p_online ( %ld, %ld )", SARG1, SARG2);
8395 PRE_REG_READ2(long, "p_online", vki_processorid_t, processorid, int, flag);
8398 PRE(sys_sigqueue)
8400 /* int sigqueue(pid_t pid, int signo, void *value,
8401 int si_code, timespec_t *timeout);
8403 PRINT("sys_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
8404 SARG1, SARG2, ARG3, SARG4, ARG5);
8405 PRE_REG_READ5(long, "sigqueue", vki_pid_t, pid, int, signo,
8406 void *, value, int, si_code,
8407 vki_timespec_t *, timeout);
8409 if (ARG5)
8410 PRE_MEM_READ("sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
8412 if (!ML_(client_signal_OK)(ARG2)) {
8413 SET_STATUS_Failure(VKI_EINVAL);
8414 return;
8417 /* If we're sending SIGKILL, check to see if the target is one of
8418 our threads and handle it specially. */
8419 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
8420 SET_STATUS_Success(0);
8421 } else {
8422 SysRes res = VG_(do_syscall5)(SYSNO, ARG1, ARG2, ARG3, ARG4,
8423 ARG5);
8424 SET_STATUS_from_SysRes(res);
8427 if (VG_(clo_trace_signals))
8428 VG_(message)(Vg_DebugMsg,
8429 "sigqueue: signal %ld queued for pid %ld\n",
8430 SARG2, SARG1);
8432 /* Check to see if this gave us a pending signal. */
8433 *flags |= SfPollAfter;
8436 PRE(sys_clock_gettime)
8438 /* int clock_gettime(clockid_t clock_id, struct timespec *tp); */
8439 PRINT("sys_clock_gettime ( %ld, %#lx )", SARG1, ARG2);
8440 PRE_REG_READ2(long, "clock_gettime", vki_clockid_t, clock_id,
8441 struct timespec *, tp);
8442 PRE_MEM_WRITE("clock_gettime(tp)", ARG2, sizeof(struct vki_timespec));
8445 POST(sys_clock_gettime)
8447 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
8450 PRE(sys_clock_settime)
8452 /* int clock_settime(clockid_t clock_id, const struct timespec *tp); */
8453 PRINT("sys_clock_settime ( %ld, %#lx )", SARG1, ARG2);
8454 PRE_REG_READ2(long, "clock_settime", vki_clockid_t, clock_id,
8455 const struct timespec *, tp);
8456 PRE_MEM_READ("clock_settime(tp)", ARG2, sizeof(struct vki_timespec));
8459 PRE(sys_clock_getres)
8461 /* int clock_getres(clockid_t clock_id, struct timespec *res); */
8462 PRINT("sys_clock_getres ( %ld, %#lx )", SARG1, ARG2);
8463 PRE_REG_READ2(long, "clock_getres", vki_clockid_t, clock_id,
8464 struct timespec *, res);
8466 if (ARG2)
8467 PRE_MEM_WRITE("clock_getres(res)", ARG2, sizeof(struct vki_timespec));
8470 POST(sys_clock_getres)
8472 if (ARG2)
8473 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
8476 PRE(sys_timer_create)
8478 /* int timer_create(clockid_t clock_id,
8479 struct sigevent *evp, timer_t *timerid);
8481 PRINT("sys_timer_create ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
8482 PRE_REG_READ3(long, "timer_create", vki_clockid_t, clock_id,
8483 struct vki_sigevent *, evp, vki_timer_t *, timerid);
8485 if (ARG2) {
8486 struct vki_sigevent *evp = (struct vki_sigevent *) ARG2;
8487 PRE_FIELD_READ("timer_create(evp.sigev_notify)", evp->sigev_notify);
8488 PRE_FIELD_READ("timer_create(evp.sigev_signo)", evp->sigev_signo);
8489 PRE_FIELD_READ("timer_create(evp.sigev_value.sival_int)",
8490 evp->sigev_value.sival_int);
8492 /* Be safe. */
8493 if (ML_(safe_to_deref(evp, sizeof(struct vki_sigevent)))) {
8494 if ((evp->sigev_notify == VKI_SIGEV_PORT) ||
8495 (evp->sigev_notify == VKI_SIGEV_THREAD))
8496 PRE_MEM_READ("timer_create(evp.sigev_value.sival_ptr)",
8497 (Addr) evp->sigev_value.sival_ptr,
8498 sizeof(vki_port_notify_t));
8502 PRE_MEM_WRITE("timer_create(timerid)", ARG3, sizeof(vki_timer_t));
8505 POST(sys_timer_create)
8507 POST_MEM_WRITE(ARG3, sizeof(vki_timer_t));
8510 PRE(sys_timer_delete)
8512 /* int timer_delete(timer_t timerid); */
8513 PRINT("sys_timer_delete ( %ld )", SARG1);
8514 PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
8517 PRE(sys_timer_settime)
8519 /* int timer_settime(timer_t timerid, int flags,
8520 const struct itimerspec *value,
8521 struct itimerspec *ovalue);
8523 PRINT("sys_timer_settime ( %ld, %ld, %#lx, %#lx )",
8524 SARG1, SARG2, ARG3, ARG4);
8525 PRE_REG_READ4(long, "timer_settime", vki_timer_t, timerid,
8526 int, flags, const struct vki_itimerspec *, value,
8527 struct vki_itimerspec *, ovalue);
8528 PRE_MEM_READ("timer_settime(value)",
8529 ARG3, sizeof(struct vki_itimerspec));
8530 if (ARG4)
8531 PRE_MEM_WRITE("timer_settime(ovalue)",
8532 ARG4, sizeof(struct vki_itimerspec));
8535 POST(sys_timer_settime)
8537 if (ARG4)
8538 POST_MEM_WRITE(ARG4, sizeof(struct vki_itimerspec));
8541 PRE(sys_timer_gettime)
8543 /* int timer_gettime(timer_t timerid, struct itimerspec *value); */
8544 PRINT("sys_timer_gettime ( %ld, %#lx )", SARG1, ARG2);
8545 PRE_REG_READ2(long, "timer_gettime", vki_timer_t, timerid,
8546 struct vki_itimerspec *, value);
8547 PRE_MEM_WRITE("timer_gettime(value)",
8548 ARG2, sizeof(struct vki_itimerspec));
8551 POST(sys_timer_gettime)
8553 POST_MEM_WRITE(ARG2, sizeof(struct vki_itimerspec));
8556 PRE(sys_timer_getoverrun)
8558 /* int timer_getoverrun(timer_t timerid); */
8559 PRINT("sys_timer_getoverrun ( %ld )", SARG1);
8560 PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
8563 PRE(sys_facl)
8565 /* int facl(int fildes, int cmd, int nentries, void *aclbufp); */
8566 PRINT("sys_facl ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
8568 PRE_REG_READ4(long, "facl", int, fildes, int, cmd,
8569 int, nentries, void *, aclbufp);
8571 switch (ARG2 /*cmd*/) {
8572 case VKI_SETACL:
8573 if (ARG4)
8574 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_aclent_t));
8575 break;
8576 case VKI_GETACL:
8577 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8578 break;
8579 case VKI_GETACLCNT:
8580 break;
8581 case VKI_ACE_SETACL:
8582 if (ARG4)
8583 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_ace_t));
8584 break;
8585 case VKI_ACE_GETACL:
8586 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8587 break;
8588 case VKI_ACE_GETACLCNT:
8589 break;
8590 default:
8591 VG_(unimplemented)("Syswrap of the facl call with cmd %ld.", SARG2);
8592 /*NOTREACHED*/
8593 break;
8596 /* Be strict. */
8597 if (!ML_(fd_allowed)(ARG1, "facl", tid, False))
8598 SET_STATUS_Failure(VKI_EBADF);
8601 POST(sys_facl)
8603 switch (ARG2 /*cmd*/) {
8604 case VKI_SETACL:
8605 break;
8606 case VKI_GETACL:
8607 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8608 break;
8609 case VKI_GETACLCNT:
8610 break;
8611 case VKI_ACE_SETACL:
8612 break;
8613 case VKI_ACE_GETACL:
8614 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8615 break;
8616 case VKI_ACE_GETACLCNT:
8617 break;
8618 default:
8619 vg_assert(0);
8620 break;
8624 static Int pre_check_and_close_fds(ThreadId tid, const HChar *name,
8625 vki_door_desc_t *desc_ptr,
8626 vki_uint_t desc_num)
8628 vki_uint_t i;
8630 /* Verify passed file descriptors. */
8631 for (i = 0; i < desc_num; i++) {
8632 vki_door_desc_t *desc = &desc_ptr[i];
8633 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8634 (desc->d_attributes & DOOR_RELEASE)) {
8635 Int fd = desc->d_data.d_desc.d_descriptor;
8637 /* Detect and negate attempts by the client to close Valgrind's fds.
8638 Also if doing -d style logging (which is to fd = 2 = stderr),
8639 don't allow that to be closed either. */
8640 if (!ML_(fd_allowed)(fd, name, tid, False) ||
8641 (fd == 2 && VG_(debugLog_getLevel)() > 0))
8642 return VKI_EBADF;
8646 /* All fds are allowed, record information about the closed ones.
8648 Note: Recording information about any closed fds should generally happen
8649 in a post wrapper but it is not possible in this case because door calls
8650 are "very blocking", if the information was recorded after the syscall
8651 finishes then it would be out-of-date during the call, i.e. while the
8652 syscall is blocked in the kernel. Therefore, we record closed fds for
8653 this specific syscall in the PRE wrapper. Unfortunately, this creates
8654 a problem when the syscall fails, for example, door_call() can fail with
8655 EBADF or EFAULT and then no fds are released. If that happens the
8656 information about opened fds is incorrect. This should be very rare (I
8657 hope) and such a condition is also reported in the post wrapper. */
8658 if (VG_(clo_track_fds)) {
8659 for (i = 0; i < desc_num; i++) {
8660 vki_door_desc_t *desc = &desc_ptr[i];
8661 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8662 (desc->d_attributes & DOOR_RELEASE)) {
8663 Int fd = desc->d_data.d_desc.d_descriptor;
8664 ML_(record_fd_close)(fd);
8669 return 0;
8672 static void post_record_fds(ThreadId tid, const HChar *name,
8673 vki_door_desc_t *desc_ptr, vki_uint_t desc_num)
8675 vki_uint_t i;
8677 /* Record returned file descriptors. */
8678 for (i = 0; i < desc_num; i++) {
8679 vki_door_desc_t *desc = &desc_ptr[i];
8680 if (desc->d_attributes & DOOR_DESCRIPTOR) {
8681 Int fd = desc->d_data.d_desc.d_descriptor;
8682 if (!ML_(fd_allowed)(fd, name, tid, True)) {
8683 /* Unfortunately, we cannot recover at this point and have to fail
8684 hard. */
8685 VG_(message)(Vg_UserMsg, "The %s syscall returned an unallowed"
8686 "file descriptor %d.\n", name, fd);
8687 VG_(exit)(101);
8689 else if (VG_(clo_track_fds))
8690 ML_(record_fd_open_named)(tid, fd);
8695 /* Handles repository door protocol request over client door fd. */
8696 static void repository_door_pre_mem_door_call_hook(ThreadId tid, Int fd,
8697 void *data_ptr,
8698 SizeT data_size)
8700 vki_rep_protocol_request_t *p = (vki_rep_protocol_request_t *) data_ptr;
8701 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8702 "request->rpr_request)", p->rpr_request);
8704 if (ML_(safe_to_deref)(p, sizeof(vki_rep_protocol_request_t))) {
8705 switch (p->rpr_request) {
8706 case VKI_REP_PROTOCOL_CLOSE:
8707 break;
8708 case VKI_REP_PROTOCOL_ENTITY_SETUP:
8710 struct vki_rep_protocol_entity_setup *r =
8711 (struct vki_rep_protocol_entity_setup *) p;
8712 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8713 "entity_setup->rpr_entityid)", r->rpr_entityid);
8714 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8715 "entity_setup->rpr_entitytype)", r->rpr_entitytype);
8717 break;
8718 case VKI_REP_PROTOCOL_ENTITY_NAME:
8720 struct vki_rep_protocol_entity_name *r =
8721 (struct vki_rep_protocol_entity_name *) p;
8722 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8723 "entity_name->rpr_entityid)", r->rpr_entityid);
8724 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8725 "entity_name->rpr_answertype)", r->rpr_answertype);
8727 break;
8728 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 24) && (SOLARIS_REPCACHE_PROTOCOL_VERSION <= 30)
8729 case VKI_REP_PROTOCOL_ENTITY_FMRI:
8731 struct vki_rep_protocol_entity_fmri *r =
8732 (struct vki_rep_protocol_entity_fmri *) p;
8733 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8734 "entity_fmri->rpr_entityid)", r->rpr_entityid);
8736 break;
8737 #endif /* 24 <= SOLARIS_REPCACHE_PROTOCOL_VERSION =< 30 */
8738 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25)
8739 case VKI_REP_PROTOCOL_ENTITY_GET_ROOT:
8741 struct vki_rep_protocol_entity_root *r =
8742 (struct vki_rep_protocol_entity_root *) p;
8743 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8744 "entity_root->rpr_entityid)", r->rpr_entityid);
8745 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8746 "entity_root->rpr_outid)", r->rpr_outid);
8748 break;
8749 #endif /* SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25 */
8750 case VKI_REP_PROTOCOL_ENTITY_GET:
8752 struct vki_rep_protocol_entity_get *r =
8753 (struct vki_rep_protocol_entity_get *) p;
8754 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8755 "entity_get->rpr_entityid)", r->rpr_entityid);
8756 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8757 "entity_get->rpr_object)", r->rpr_object);
8759 break;
8760 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD:
8761 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 31)
8762 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD_COMPOSED:
8763 #endif
8765 struct vki_rep_protocol_entity_get_child *r =
8766 (struct vki_rep_protocol_entity_get_child *) p;
8767 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8768 "entity_get_child->rpr_entityid)", r->rpr_entityid);
8769 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8770 "entity_get_child->rpr_childid)", r->rpr_childid);
8771 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8772 "entity_get_child->rpr_name)", (Addr) r->rpr_name);
8774 break;
8775 case VKI_REP_PROTOCOL_ENTITY_GET_PARENT:
8777 struct vki_rep_protocol_entity_parent *r =
8778 (struct vki_rep_protocol_entity_parent *) p;
8779 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8780 "entity_get_parent->rpr_entityid)", r->rpr_entityid);
8781 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8782 "entity_get_parent->rpr_outid)", r->rpr_outid);
8784 break;
8785 case VKI_REP_PROTOCOL_ENTITY_RESET:
8787 struct vki_rep_protocol_entity_reset *r =
8788 (struct vki_rep_protocol_entity_reset *) p;
8789 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8790 "entity_reset->rpr_entityid)", r->rpr_entityid);
8792 break;
8793 case VKI_REP_PROTOCOL_ENTITY_TEARDOWN:
8795 struct vki_rep_protocol_entity_teardown *r =
8796 (struct vki_rep_protocol_entity_teardown *) p;
8797 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8798 "entity_teardown->rpr_entityid)", r->rpr_entityid);
8800 break;
8801 case VKI_REP_PROTOCOL_ITER_READ:
8803 struct vki_rep_protocol_iter_read *r =
8804 (struct vki_rep_protocol_iter_read *) p;
8805 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8806 "iter_read->rpr_iterid)", r->rpr_iterid);
8807 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8808 "iter_read->rpr_sequence)", r->rpr_sequence);
8809 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8810 "iter_read->rpr_entityid)", r->rpr_entityid);
8812 break;
8813 case VKI_REP_PROTOCOL_ITER_READ_VALUE:
8815 struct vki_rep_protocol_iter_read_value *r =
8816 (struct vki_rep_protocol_iter_read_value *) p;
8817 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8818 "iter_read_value->rpr_iterid)", r->rpr_iterid);
8819 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8820 "iter_read_value->rpr_sequence)", r->rpr_sequence);
8822 break;
8823 case VKI_REP_PROTOCOL_ITER_RESET:
8824 case VKI_REP_PROTOCOL_ITER_SETUP:
8825 case VKI_REP_PROTOCOL_ITER_TEARDOWN:
8827 struct vki_rep_protocol_iter_request *r =
8828 (struct vki_rep_protocol_iter_request *) p;
8829 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8830 "iter_request->rpr_iterid)", r->rpr_iterid);
8832 break;
8833 case VKI_REP_PROTOCOL_ITER_START:
8835 struct vki_rep_protocol_iter_start *r =
8836 (struct vki_rep_protocol_iter_start *) p;
8837 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8838 "iter_start->rpr_iterid)", r->rpr_iterid);
8839 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8840 "iter_start->rpr_entity)", r->rpr_entity);
8841 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8842 "iter_start->rpr_itertype)", r->rpr_itertype);
8843 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8844 "iter_start->rpr_flags)", r->rpr_flags);
8845 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8846 "iter_start->rpr_pattern)", (Addr) r->rpr_pattern);
8848 break;
8849 case VKI_REP_PROTOCOL_PROPERTY_GET_TYPE:
8850 case VKI_REP_PROTOCOL_PROPERTY_GET_VALUE:
8852 struct vki_rep_protocol_property_request *r =
8853 (struct vki_rep_protocol_property_request *) p;
8854 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8855 "property_request->rpr_entityid)", r->rpr_entityid);
8857 break;
8858 default:
8859 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8860 " where rpr_request=%#x.", p->rpr_request);
8861 /* NOTREACHED */
8862 break;
8867 /* Handles repository door protocol response over client door fd. */
8868 static void repository_door_post_mem_door_call_hook(ThreadId tid, Int fd,
8869 void *rbuf, SizeT rsize)
8871 /* :TODO: Ideally we would need to match the response type with the
8872 previous request because response itself does not contain any
8873 type identification.
8874 For now simply make defined whole response buffer. */
8875 POST_MEM_WRITE((Addr) rbuf, rsize);
8878 /* Pre-syscall checks for params->data_ptr contents of a door_call(). */
8879 static void door_call_pre_mem_params_data(ThreadId tid, Int fd,
8880 void *data_ptr, SizeT data_size)
8882 const HChar *pathname;
8884 /* Get pathname of the door file descriptor, if not already done.
8885 Needed to dissect door service on the pathname below. */
8886 if (!VG_(clo_track_fds) && !ML_(fd_recorded)(fd)) {
8887 ML_(record_fd_open_named)(tid, fd);
8889 pathname = ML_(find_fd_recorded_by_fd)(fd);
8891 /* Debug-only printing. */
8892 if (0) {
8893 VG_(printf)("PRE(door_call) with fd=%d and filename=%s\n",
8894 fd, pathname);
8897 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8898 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
8900 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8901 "kcf_door_arg_t->da_version)", p->da_version);
8902 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8903 "kcf_door_arg_t->da_iskernel)", p->da_iskernel);
8904 PRE_MEM_RASCIIZ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8905 "kcf_door_arg_t->da_u.filename)",
8906 (Addr) p->vki_da_u.filename);
8907 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8908 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
8910 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8911 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
8912 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8913 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8914 /* request from an application towards nscd */
8915 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8916 "nss_pheader->p_version)", p->p_version);
8917 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8918 "nss_pheader->dbd_off)", p->dbd_off);
8919 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8920 "nss_pheader->dbd_len)", p->dbd_len);
8921 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8922 "nss_pheader->key_off)", p->key_off);
8923 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8924 "nss_pheader->key_len)", p->key_len);
8925 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8926 "nss_pheader->data_off)", p->data_off);
8927 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8928 "nss_pheader->data_len)", p->data_len);
8929 /* Fields ext_off and ext_len are set only sporadically. */
8930 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8931 "nss_pheader->pbufsiz)", p->pbufsiz);
8932 PRE_MEM_WRITE("door_call(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
8933 (Addr) p, p->pbufsiz);
8935 if (p->dbd_len > 0) {
8936 vki_nss_dbd_t *dbd
8937 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
8939 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR
8940 "\", nss_dbd)", (Addr) dbd, sizeof(vki_nss_dbd_t));
8941 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
8942 if (dbd->o_name != 0)
8943 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8944 "\", nss_dbd->o_name)", (Addr) ((HChar *) p
8945 + p->dbd_off + dbd->o_name));
8946 if (dbd->o_config_name != 0)
8947 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8948 "\", nss_dbd->o_config_name)",
8949 (Addr) ((HChar *) p + p->dbd_off
8950 + dbd->o_config_name));
8951 if (dbd->o_default_config != 0)
8952 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8953 "\", nss_dbd->o_default_config)",
8954 (Addr) ((HChar *) p + p->dbd_off +
8955 dbd->o_default_config));
8959 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", nss->key)",
8960 (Addr) ((HChar *) p + p->key_off), p->key_len);
8961 } else {
8962 /* request from a child nscd towards parent nscd */
8963 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8966 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8967 vki_repository_door_request_t *p =
8968 (vki_repository_door_request_t *) data_ptr;
8970 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8971 "request->rdr_version)", p->rdr_version);
8972 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8973 "request->rdr_request)", p->rdr_request);
8974 if (ML_(safe_to_deref)(p, sizeof(vki_repository_door_request_t))) {
8975 if (p->rdr_version == VKI_REPOSITORY_DOOR_VERSION) {
8976 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8977 "request->rdr_flags)", p->rdr_flags);
8978 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8979 "request->rdr_debug)", p->rdr_debug);
8980 } else {
8981 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8982 " where version=%u.", p->rdr_version);
8985 } else {
8986 const OpenDoor *open_door = door_find_by_fd(fd);
8987 if ((open_door != NULL) && (open_door->pre_mem_hook != NULL)) {
8988 open_door->pre_mem_hook(tid, fd, data_ptr, data_size);
8989 } else {
8990 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
8992 * Be very lax about door syscall handling over unrecognized
8993 * door file descriptors. Does not require that full buffer
8994 * is initialized when writing. Without this, programs using
8995 * libdoor(3LIB) functionality with completely proprietary
8996 * semantics may report large number of false positives.
8998 } else {
8999 static Int moans = 3;
9001 /* generic default */
9002 if (moans > 0 && !VG_(clo_xml)) {
9003 moans--;
9004 VG_(umsg)(
9005 "Warning: noted and generically handled door call\n"
9006 " on file descriptor %d (filename: %s).\n"
9007 " This could cause spurious value errors to appear.\n"
9008 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
9009 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
9010 fd, pathname);
9012 PRE_MEM_READ("door_call(params->data_ptr)",
9013 (Addr) data_ptr, data_size);
9019 /* Post-syscall checks for params->rbuf contents of a door_call(). */
9020 static void door_call_post_mem_params_rbuf(ThreadId tid, Int fd,
9021 void *rbuf, SizeT rsize,
9022 const vki_door_desc_t *desc_ptr,
9023 vki_uint_t desc_num)
9025 const HChar *pathname = ML_(find_fd_recorded_by_fd)(fd);
9027 /* Debug-only printing. */
9028 if (0) {
9029 VG_(printf)("POST(door_call) with fd=%d and filename=%s\n",
9030 fd, pathname);
9033 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9034 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) rbuf;
9036 POST_FIELD_WRITE(p->da_version);
9037 POST_FIELD_WRITE(p->vki_da_u.result.status);
9038 POST_MEM_WRITE((Addr) p->vki_da_u.result.signature,
9039 p->vki_da_u.result.siglen);
9040 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9041 vki_nss_pheader_t *p = (vki_nss_pheader_t *) rbuf;
9043 POST_FIELD_WRITE(p->nsc_callnumber);
9044 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9045 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9046 /* response from nscd to an application */
9047 POST_FIELD_WRITE(p->p_status);
9048 POST_FIELD_WRITE(p->p_errno);
9049 POST_FIELD_WRITE(p->p_herrno);
9050 POST_FIELD_WRITE(p->dbd_off);
9051 POST_FIELD_WRITE(p->dbd_len);
9052 POST_FIELD_WRITE(p->key_off);
9053 POST_FIELD_WRITE(p->key_len);
9054 POST_FIELD_WRITE(p->data_off);
9055 POST_FIELD_WRITE(p->data_len);
9056 POST_FIELD_WRITE(p->ext_off);
9057 POST_FIELD_WRITE(p->ext_len);
9058 POST_FIELD_WRITE(p->pbufsiz);
9060 if (p->pbufsiz <= rsize) {
9061 if (p->dbd_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9062 SizeT len = MIN(p->dbd_len, p->pbufsiz - p->dbd_off);
9063 POST_MEM_WRITE((Addr) ((HChar *) p + p->dbd_off), len);
9066 if (p->key_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9067 SizeT len = MIN(p->key_len, p->pbufsiz - p->key_off);
9068 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), len);
9071 if (p->data_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9072 SizeT len = MIN(p->data_len, p->pbufsiz - p->data_off);
9073 POST_MEM_WRITE((Addr) ((HChar *) p + p->data_off), len);
9076 if (p->ext_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9077 SizeT len = MIN(p->ext_len, p->pbufsiz - p->ext_off);
9078 POST_MEM_WRITE((Addr) ((HChar *) p + p->ext_off), len);
9081 } else {
9082 /* response from parent nscd to a child nscd */
9083 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9086 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9087 POST_FIELD_WRITE(((vki_repository_door_response_t *) rbuf)->rdr_status);
9088 /* A new client door fd is passed over the global repository door. */
9089 if ((desc_ptr != NULL) && (desc_num > 0)) {
9090 if (desc_ptr[0].d_attributes & DOOR_DESCRIPTOR) {
9091 door_record_client(tid, desc_ptr[0].d_data.d_desc.d_descriptor,
9092 repository_door_pre_mem_door_call_hook,
9093 repository_door_post_mem_door_call_hook);
9096 } else {
9097 const OpenDoor *open_door = door_find_by_fd(fd);
9098 if ((open_door != NULL) && (open_door->post_mem_hook != NULL)) {
9099 open_door->post_mem_hook(tid, fd, rbuf, rsize);
9100 } else {
9101 /* generic default */
9102 POST_MEM_WRITE((Addr) rbuf, rsize);
9107 /* Pre-syscall checks for data_ptr contents in a door_return(). */
9108 static void door_return_pre_mem_data(ThreadId tid, Addr server_procedure,
9109 void *data_ptr, SizeT data_size)
9111 if ((data_size == 0) || (server_procedure == 0)) {
9112 /* There is nothing to check. This usually happens during thread's
9113 first call to door_return(). */
9114 return;
9117 /* Get pathname of the door file descriptor based on the
9118 door server procedure (that's all we have).
9119 Needed to dissect door service on the pathname below. */
9120 const OpenDoor *open_door = door_find_by_proc(server_procedure);
9121 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
9122 Int fd = (open_door != NULL) ? open_door->fd : -1;
9124 /* Debug-only printing. */
9125 if (0) {
9126 VG_(printf)("PRE(door_return) with fd=%d and filename=%s "
9127 "(nr_doors_recorded=%u)\n",
9128 fd, pathname, nr_doors_recorded);
9131 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9132 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
9134 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9135 "kcf_door_arg_t->da_version)", p->da_version);
9136 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9137 "kcf_door_arg_t->da_u.result.status)",
9138 p->vki_da_u.result.status);
9139 PRE_MEM_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9140 "kcf_door_arg_t->da_u.result.signature)",
9141 (Addr) p->vki_da_u.result.signature,
9142 p->vki_da_u.result.siglen);
9143 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9144 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
9146 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9147 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
9148 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9149 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9150 /* response from nscd to an application */
9151 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9152 "nss_pheader->p_status)", p->p_status);
9153 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9154 "nss_pheader->p_errno)", p->p_errno);
9155 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9156 "nss_pheader->p_herrno)", p->p_herrno);
9157 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9158 "nss_pheader->dbd_off)", p->dbd_off);
9159 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9160 "nss_pheader->dbd_len)", p->dbd_len);
9161 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9162 "nss_pheader->data_off)", p->data_off);
9163 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9164 "nss_pheader->data_len)", p->data_len);
9165 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9166 "nss_pheader->ext_off)", p->ext_off);
9167 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9168 "nss_pheader->ext_len)", p->ext_len);
9169 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9170 "nss_pheader->pbufsiz)", p->pbufsiz);
9171 PRE_MEM_WRITE("door_return(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
9172 (Addr) p, p->pbufsiz);
9173 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
9174 "\", nss->data)",
9175 (Addr) ((HChar *) p + p->data_off), p->data_len);
9176 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
9177 "\", nss->ext)",
9178 (Addr) ((HChar *) p + p->ext_off), p->ext_len);
9179 } else {
9180 /* response from parent nscd to a child nscd */
9181 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9184 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9185 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
9186 } else {
9187 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
9189 * Be very lax about door syscall handling over unrecognized
9190 * door file descriptors. Does not require that full buffer
9191 * is initialized when writing. Without this, programs using
9192 * libdoor(3LIB) functionality with completely proprietary
9193 * semantics may report large number of false positives.
9195 } else {
9196 static Int moans = 3;
9198 /* generic default */
9199 if (moans > 0 && !VG_(clo_xml)) {
9200 moans--;
9201 VG_(umsg)(
9202 "Warning: noted and generically handled door return\n"
9203 " on file descriptor %d (filename: %s).\n"
9204 " This could cause spurious value errors to appear.\n"
9205 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
9206 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
9207 fd, pathname);
9209 PRE_MEM_READ("door_return(data_ptr)",
9210 (Addr) data_ptr, data_size);
9215 /* Post-syscall checks for data_ptr contents in a door_return(). */
9216 static void door_return_post_mem_data(ThreadId tid, Addr server_procedure,
9217 void *data_ptr, SizeT data_size)
9219 const OpenDoor *open_door = door_find_by_proc(server_procedure);
9220 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
9222 /* Debug-only printing. */
9223 if (0) {
9224 Int fd = (open_door != NULL) ? open_door->fd : -1;
9225 VG_(printf)("POST(door_return) with fd=%d and filename=%s "
9226 "(nr_doors_recorded=%u)\n",
9227 fd, pathname, nr_doors_recorded);
9230 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9231 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
9233 POST_FIELD_WRITE(p->da_version);
9234 POST_FIELD_WRITE(p->da_iskernel);
9235 POST_MEM_WRITE((Addr) p->vki_da_u.filename,
9236 VG_(strlen)(p->vki_da_u.filename) + 1);
9237 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9238 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
9240 POST_FIELD_WRITE(p->nsc_callnumber);
9241 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9242 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9243 /* request from an application towards nscd */
9244 POST_FIELD_WRITE(p->p_version);
9245 POST_FIELD_WRITE(p->dbd_off);
9246 POST_FIELD_WRITE(p->dbd_len);
9247 POST_FIELD_WRITE(p->key_off);
9248 POST_FIELD_WRITE(p->key_len);
9249 POST_FIELD_WRITE(p->data_off);
9250 POST_FIELD_WRITE(p->data_len);
9251 POST_FIELD_WRITE(p->ext_off);
9252 POST_FIELD_WRITE(p->ext_len);
9253 POST_FIELD_WRITE(p->pbufsiz);
9255 if (p->dbd_len > 0) {
9256 vki_nss_dbd_t *dbd
9257 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
9259 POST_MEM_WRITE((Addr) dbd, sizeof(vki_nss_dbd_t));
9260 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
9261 SizeT headers_size = sizeof(vki_nss_pheader_t)
9262 + sizeof(vki_nss_dbd_t);
9264 if (dbd->o_name != 0) {
9265 HChar *name = (HChar *) p + p->dbd_off + dbd->o_name;
9266 SizeT name_len = VG_(strlen)(name) + 1;
9267 if (name_len <= data_size - headers_size)
9268 POST_MEM_WRITE((Addr) name, name_len);
9270 if (dbd->o_config_name != 0) {
9271 HChar *name = (HChar *) p + p->dbd_off + dbd->o_config_name;
9272 SizeT name_len = VG_(strlen)(name) + 1;
9273 if (name_len <= data_size - headers_size)
9274 POST_MEM_WRITE((Addr) name, name_len);
9276 if (dbd->o_default_config != 0) {
9277 HChar *name = (HChar *) p + p->dbd_off
9278 + dbd->o_default_config;
9279 SizeT name_len = VG_(strlen)(name) + 1;
9280 if (name_len <= data_size - headers_size)
9281 POST_MEM_WRITE((Addr) name, name_len);
9286 if (p->key_len <= data_size - p->key_off)
9287 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), p->key_len);
9288 } else {
9289 /* request from a child nscd towards parent nscd */
9290 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9293 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9294 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
9295 } else {
9296 /* generic default */
9297 POST_MEM_WRITE((Addr) data_ptr, data_size);
9301 PRE(sys_door)
9303 /* int doorfs(long arg1, long arg2, long arg3, long arg4, long arg5,
9304 long subcode); */
9305 ThreadState *tst = VG_(get_ThreadState)(tid);
9306 *flags |= SfMayBlock | SfPostOnFail;
9308 PRINT("sys_door ( %#lx, %#lx, %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3,
9309 ARG4, ARG5, SARG6);
9311 /* Macro PRE_REG_READ6 cannot be simply used because not all ARGs are used
9312 in door() syscall variants. Note that ARG6 (subcode) is used always. */
9313 #define PRE_REG_READ_SIXTH_ONLY \
9314 if (VG_(tdict).track_pre_reg_read) { \
9315 PRA6("door", long, subcode); \
9318 switch (ARG6 /*subcode*/) {
9319 case VKI_DOOR_CREATE:
9320 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9321 PRE_REG_READ_SIXTH_ONLY;
9322 /* Note: the first argument to DOOR_CREATE is a server procedure.
9323 This could lead to a problem if the kernel tries to force the
9324 execution of this procedure, similarly to how signal handlers are
9325 executed. Fortunately, the kernel never does that (for user-space
9326 server procedures). The procedure is always executed by the standard
9327 library. */
9328 break;
9329 case VKI_DOOR_REVOKE:
9330 PRE_REG_READ1(long, "door", long, arg1);
9331 PRE_REG_READ_SIXTH_ONLY;
9332 if (!ML_(fd_allowed)(ARG1, "door_revoke", tid, False))
9333 SET_STATUS_Failure(VKI_EBADF);
9334 break;
9335 case VKI_DOOR_INFO:
9336 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
9337 PRE_REG_READ_SIXTH_ONLY;
9338 PRE_MEM_WRITE("door_info(info)", ARG2, sizeof(vki_door_info_t));
9339 break;
9340 case VKI_DOOR_CALL:
9342 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
9343 PRE_REG_READ_SIXTH_ONLY;
9345 Int rval = 0;
9346 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9348 if (!ML_(fd_allowed)(ARG1, "door_call", tid, False))
9349 rval = VKI_EBADF;
9351 PRE_FIELD_READ("door_call(params->data_ptr)", params->data_ptr);
9352 PRE_FIELD_READ("door_call(params->data_size)", params->data_size);
9353 PRE_FIELD_READ("door_call(params->desc_ptr)", params->desc_ptr);
9354 PRE_FIELD_READ("door_call(params->desc_num)", params->desc_num);
9355 PRE_FIELD_READ("door_call(params->rbuf)", params->rbuf);
9356 PRE_FIELD_READ("door_call(params->rsize)", params->rsize);
9358 if (ML_(safe_to_deref)(params, sizeof(*params))) {
9359 if (params->data_ptr)
9360 door_call_pre_mem_params_data(tid, ARG1, params->data_ptr,
9361 params->data_size);
9363 if (params->desc_ptr) {
9364 SizeT desc_size = params->desc_num * sizeof(*params->desc_ptr);
9365 PRE_MEM_READ("door_call(params->desc_ptr)",
9366 (Addr)params->desc_ptr, desc_size);
9368 /* Do not record information about closed fds if we are going
9369 to fail the syscall and so no fds will be closed. */
9370 if ((rval == 0) &&
9371 (ML_(safe_to_deref)(params->desc_ptr, desc_size))) {
9372 rval = pre_check_and_close_fds(tid, "door_call",
9373 params->desc_ptr,
9374 params->desc_num);
9378 if (params->rbuf)
9379 PRE_MEM_WRITE("door_call(params->rbuf)", (Addr)params->rbuf,
9380 params->rsize);
9383 if (rval)
9384 SET_STATUS_Failure(rval);
9386 break;
9387 case VKI_DOOR_BIND:
9388 PRE_REG_READ1(long, "door", long, arg1);
9389 PRE_REG_READ_SIXTH_ONLY;
9390 VG_(unimplemented)("DOOR_BIND");
9391 break;
9392 case VKI_DOOR_UNBIND:
9393 PRE_REG_READ0(long, "door");
9394 PRE_REG_READ_SIXTH_ONLY;
9395 VG_(unimplemented)("DOOR_UNBIND");
9396 break;
9397 case VKI_DOOR_UNREFSYS:
9398 PRE_REG_READ0(long, "door");
9399 PRE_REG_READ_SIXTH_ONLY;
9400 VG_(unimplemented)("DOOR_UNREFSYS");
9401 break;
9402 case VKI_DOOR_UCRED:
9403 PRE_REG_READ1(long, "door", long, arg1);
9404 PRE_REG_READ_SIXTH_ONLY;
9405 VG_(unimplemented)("DOOR_UCRED");
9406 break;
9407 case VKI_DOOR_RETURN:
9408 PRE_REG_READ6(long, "door", long, arg1, long, arg2, long, arg3,
9409 long, arg4, long, arg5, long, subcode);
9411 /* Register %esp/%rsp is read and modified by the syscall. */
9412 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(sp)",
9413 VG_O_STACK_PTR, sizeof(UWord));
9414 /* Register %ebp/%rbp is not really read by the syscall, it is only
9415 written by it, but it is hard to determine when it is written so we
9416 make sure it is always valid prior to making the syscall. */
9417 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(bp)",
9418 VG_O_FRAME_PTR, sizeof(UWord));
9420 door_return_pre_mem_data(tid, tst->os_state.door_return_procedure,
9421 (void *) ARG1, ARG2);
9423 /* Do not tell the tool where the syscall is going to write the
9424 resulting data. It is necessary to skip this check because the data
9425 area starting at ARG4-ARG5 (of length ARG5) is usually on a client
9426 thread stack below the stack pointer and therefore it can be marked
9427 by a tool (for example, Memcheck) as inaccessible. It is ok to skip
9428 this check in this case because if there is something wrong with the
9429 data area then the syscall will fail or the error will be handled by
9430 POST_MEM_WRITE() in the post wrapper. */
9431 /*PRE_MEM_WRITE("door_return(sp)", ARG4 - ARG5, ARG5);*/
9433 if (ARG3) {
9434 vki_door_return_desc_t *desc_env = (vki_door_return_desc_t*)ARG3;
9436 PRE_MEM_READ("door_return(desc_env)", ARG3,
9437 sizeof(vki_door_return_desc_t));
9439 if (ML_(safe_to_deref)(desc_env, sizeof(*desc_env)) &&
9440 desc_env->desc_ptr) {
9441 Int rval;
9443 PRE_MEM_READ("door_return(desc_env->desc_ptr)",
9444 (Addr)desc_env->desc_ptr,
9445 desc_env->desc_num * sizeof(*desc_env->desc_ptr));
9447 rval = pre_check_and_close_fds(tid, "door_return",
9448 desc_env->desc_ptr,
9449 desc_env->desc_num);
9450 if (rval)
9451 SET_STATUS_Failure(rval);
9454 tst->os_state.in_door_return = True;
9455 tst->os_state.door_return_procedure = 0;
9456 break;
9457 case VKI_DOOR_GETPARAM:
9458 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9459 PRE_REG_READ_SIXTH_ONLY;
9460 VG_(unimplemented)("DOOR_GETPARAM");
9461 break;
9462 case VKI_DOOR_SETPARAM:
9463 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9464 PRE_REG_READ_SIXTH_ONLY;
9465 if (!ML_(fd_allowed)(ARG1, "door_setparam", tid, False))
9466 SET_STATUS_Failure(VKI_EBADF);
9467 break;
9468 default:
9469 VG_(unimplemented)("Syswrap of the door call with subcode %ld.", SARG6);
9470 /*NOTREACHED*/
9471 break;
9474 #undef PRE_REG_READ_SIXTH_ONLY
9477 POST(sys_door)
9479 ThreadState *tst = VG_(get_ThreadState)(tid);
9481 vg_assert(SUCCESS || FAILURE);
9483 /* Alter the tst->os_state.in_door_return flag. */
9484 if (ARG6 == VKI_DOOR_RETURN) {
9485 vg_assert(tst->os_state.in_door_return == True);
9486 tst->os_state.in_door_return = False;
9488 /* Inform the tool that %esp/%rsp and %ebp/%rbp were (potentially)
9489 modified. */
9490 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_STACK_PTR,
9491 sizeof(UWord));
9492 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_FRAME_PTR,
9493 sizeof(UWord));
9495 else
9496 vg_assert(tst->os_state.in_door_return == False);
9498 if (FAILURE) {
9499 if (VG_(clo_track_fds)) {
9500 /* See the discussion in pre_check_and_close_fds() to understand this
9501 part. */
9502 Bool loss = False;
9503 switch (ARG6 /*subcode*/) {
9504 case VKI_DOOR_CALL:
9505 if (ERR == VKI_EFAULT || ERR == VKI_EBADF)
9506 loss = True;
9507 break;
9508 case VKI_DOOR_RETURN:
9509 if (ERR == VKI_EFAULT || ERR == VKI_EINVAL)
9510 loss = True;
9511 break;
9512 default:
9513 break;
9515 if (loss)
9516 VG_(message)(Vg_UserMsg, "The door call failed with an "
9517 "unexpected error and information "
9518 "about open file descriptors can be "
9519 "now imprecise.\n");
9522 return;
9525 vg_assert(SUCCESS);
9527 switch (ARG6 /*subcode*/) {
9528 case VKI_DOOR_CREATE:
9529 door_record_server(tid, ARG1, RES);
9530 break;
9531 case VKI_DOOR_REVOKE:
9532 door_record_revoke(tid, ARG1);
9533 if (VG_(clo_track_fds))
9534 ML_(record_fd_close)(ARG1);
9535 break;
9536 case VKI_DOOR_INFO:
9537 POST_MEM_WRITE(ARG2, sizeof(vki_door_info_t));
9538 break;
9539 case VKI_DOOR_CALL:
9541 /* Note that all returned values are stored in the rbuf, i.e.
9542 data_ptr and desc_ptr points into this buffer. */
9543 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9545 if (params->rbuf) {
9546 Addr addr = (Addr)params->rbuf;
9547 if (!VG_(am_find_anon_segment)(addr)) {
9548 /* This segment is new and was mapped by the kernel. */
9549 UInt prot, flags;
9550 SizeT size;
9552 prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
9553 flags = VKI_MAP_ANONYMOUS;
9554 size = VG_PGROUNDUP(params->rsize);
9556 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_door), "
9557 "new segment: vaddr=%#lx, size=%#lx, "
9558 "prot=%#x, flags=%#x, fd=%ld, offset=%#llx\n",
9559 addr, size, prot, flags, (UWord)-1, (ULong)0);
9561 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, flags,
9562 -1, 0);
9564 /* Note: We don't notify the debuginfo reader about this
9565 mapping because there is no debug information stored in
9566 this segment. */
9569 door_call_post_mem_params_rbuf(tid, ARG1, (void *) addr,
9570 params->rsize, params->desc_ptr,
9571 params->desc_num);
9574 if (params->desc_ptr) {
9575 POST_MEM_WRITE((Addr)params->desc_ptr,
9576 params->desc_num * sizeof(vki_door_desc_t));
9577 post_record_fds(tid, "door_call", params->desc_ptr,
9578 params->desc_num);
9581 break;
9582 case VKI_DOOR_BIND:
9583 break;
9584 case VKI_DOOR_UNBIND:
9585 break;
9586 case VKI_DOOR_UNREFSYS:
9587 break;
9588 case VKI_DOOR_UCRED:
9589 break;
9590 case VKI_DOOR_RETURN:
9592 struct vki_door_results *results
9593 = (struct vki_door_results*)VG_(get_SP)(tid);
9595 tst->os_state.door_return_procedure = (Addr)results->pc;
9597 POST_MEM_WRITE((Addr)results, sizeof(*results));
9598 if (results->data_ptr)
9599 door_return_post_mem_data(tid,
9600 tst->os_state.door_return_procedure,
9601 results->data_ptr,
9602 results->data_size);
9603 if (results->desc_ptr) {
9604 POST_MEM_WRITE((Addr)results->desc_ptr,
9605 results->desc_num * sizeof(vki_door_desc_t));
9606 post_record_fds(tid, "door_return", results->desc_ptr,
9607 results->desc_num);
9610 POST_MEM_WRITE((Addr)results->door_info,
9611 sizeof(*results->door_info));
9613 break;
9614 case VKI_DOOR_GETPARAM:
9615 break;
9616 case VKI_DOOR_SETPARAM:
9617 break;
9618 default:
9619 vg_assert(0);
9620 break;
9624 PRE(sys_schedctl)
9626 /* caddr_t schedctl(void); */
9627 /* This syscall returns an address that points to struct sc_shared.
9628 This per-thread structure is used as an interface between the libc and
9629 the kernel. */
9630 PRINT("sys_schedctl ( )");
9631 PRE_REG_READ0(long, "schedctl");
9634 POST(sys_schedctl)
9636 Addr a = RES;
9637 ThreadState *tst = VG_(get_ThreadState)(tid);
9639 /* Stay sane. */
9640 vg_assert((tst->os_state.schedctl_data == 0) ||
9641 (tst->os_state.schedctl_data == a));
9642 tst->os_state.schedctl_data = a;
9644 /* Returned address points to a block in a mapped page. */
9645 if (!VG_(am_find_anon_segment)(a)) {
9646 Addr page = VG_PGROUNDDN(a);
9647 UInt prot = VKI_PROT_READ | VKI_PROT_WRITE;
9648 # if defined(SOLARIS_SCHEDCTL_PAGE_EXEC)
9649 prot |= VKI_PROT_EXEC;
9650 # endif /* SOLARIS_SCHEDCTL_PAGE_EXEC */
9651 UInt flags = VKI_MAP_ANONYMOUS;
9652 /* The kernel always allocates one page for the sc_shared struct. */
9653 SizeT size = VKI_PAGE_SIZE;
9655 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_schedctl), new segment: "
9656 "vaddr=%#lx, size=%#lx, prot=%#x, flags=%#x, fd=-1, "
9657 "offset=0\n", page, size, prot, flags);
9659 /* The kernel always places redzone before and after the allocated page.
9660 Check this assertion now; the tool can later request to allocate
9661 a Valgrind segment and aspacemgr will place it adjacent. */
9662 const NSegment *seg = VG_(am_find_nsegment)(page - 1);
9663 vg_assert(seg == NULL || seg->kind == SkResvn);
9664 seg = VG_(am_find_nsegment)(page + VKI_PAGE_SIZE);
9665 vg_assert(seg == NULL || seg->kind == SkResvn);
9667 /* The address space manager works with whole pages. */
9668 VG_(am_notify_client_mmap)(page, size, prot, flags, -1, 0);
9670 /* Note: It isn't needed to notify debuginfo about the new mapping
9671 because it's only an anonymous mapping. */
9672 /* Note: schedctl data are cleaned in two places:
9673 - for the tool when the thread exits
9674 - for the core in child's post-fork handler clean_schedctl_data(). */
9677 /* The tool needs per-thread granularity, not whole pages. */
9678 VG_TRACK(new_mem_mmap, a, sizeof(struct vki_sc_shared), True, True, True, 0);
9679 POST_MEM_WRITE(a, sizeof(struct vki_sc_shared));
9682 PRE(sys_pset)
9684 /* Kernel: int pset(int subcode, long arg1, long arg2, long arg3,
9685 long arg4); */
9686 switch (ARG1 /* subcode */) {
9687 case VKI_PSET_CREATE:
9688 /* Libc: int pset_create(psetid_t *newpset); */
9689 PRINT("sys_pset ( %ld, %#lx )", SARG1, ARG2);
9690 PRE_REG_READ2(long, SC2("pset", "create"), int, subcode,
9691 vki_psetid_t *, newpset);
9692 PRE_MEM_WRITE("pset(newpset)", ARG2, sizeof(vki_psetid_t));
9693 break;
9694 case VKI_PSET_DESTROY:
9695 /* Libc: int pset_destroy(psetid_t pset); */
9696 PRINT("sys_pset ( %ld, %ld )", SARG1, SARG2);
9697 PRE_REG_READ2(long, SC2("pset", "destroy"), int, subcode,
9698 vki_psetid_t, pset);
9699 break;
9700 case VKI_PSET_ASSIGN:
9701 /* Libc: int pset_assign(psetid_t pset, processorid_t cpu,
9702 psetid_t *opset); */
9703 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9704 PRE_REG_READ4(long, SC2("pset", "assign"), int, subcode,
9705 vki_psetid_t, pset, vki_processorid_t, cpu,
9706 vki_psetid_t *, opset);
9707 if (ARG4 != 0)
9708 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9709 break;
9710 case VKI_PSET_INFO:
9711 /* Libc: int pset_info(psetid_t pset, int *type, uint_t *numcpus,
9712 processorid_t *cpulist); */
9713 PRINT("sys_pset ( %ld, %ld, %#lx, %#lx, %#lx )", SARG1, SARG2, ARG3,
9714 ARG4, ARG5);
9715 PRE_REG_READ5(long, SC2("pset", "info"), int, subcode, vki_psetid_t, pset,
9716 int *, type, vki_uint_t *, numcpus,
9717 vki_processorid_t *, cpulist);
9718 if (ARG3 != 0)
9719 PRE_MEM_WRITE("pset(type)", ARG3, sizeof(int));
9720 if (ARG4 != 0)
9721 PRE_MEM_WRITE("pset(numcpus)", ARG4, sizeof(vki_uint_t));
9722 if ((ARG4 != 0) && (ARG5 != 0)) {
9723 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9724 if (ML_(safe_to_deref(numcpus, sizeof(vki_uint_t)))) {
9725 PRE_MEM_WRITE("pset(cpulist)", ARG5,
9726 *numcpus * sizeof(vki_processorid_t));
9727 /* If cpulist buffer is not large enough, it will hold only as many
9728 entries as fit in the buffer. However numcpus will contain the
9729 real number of cpus which will be greater than originally passed
9730 in. Stash the original value in unused ARG6. */
9731 ARG6 = *numcpus;
9734 break;
9735 case VKI_PSET_BIND:
9736 /* Libc: int pset_bind(psetid_t pset, idtype_t idtype, id_t id,
9737 psetid_t *opset); */
9738 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9739 SARG4, ARG5);
9740 PRE_REG_READ5(long, SC2("pset", "bind"), int, subcode, vki_psetid_t, pset,
9741 vki_idtype_t, idtype, vki_id_t, id, vki_psetid_t *, opset);
9742 if (ARG5 != 0)
9743 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9744 break;
9745 case VKI_PSET_BIND_LWP:
9746 /* Libc: int pset_bind_lwp(psetid_t pset, id_t id, pid_t pid,
9747 psetid_t *opset); */
9748 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9749 SARG4, ARG5);
9750 PRE_REG_READ5(long, SC2("pset", "bind_lwp"), int, subcode,
9751 vki_psetid_t, pset, vki_id_t, id, vki_pid_t, pid,
9752 vki_psetid_t *, opset);
9753 if (ARG5 != 0)
9754 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9755 break;
9756 case VKI_PSET_GETLOADAVG:
9757 /* Libc: int pset_getloadavg(psetid_t pset, double loadavg[],
9758 int nelem); */
9759 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9760 PRE_REG_READ4(long, SC2("pset", "getloadavg"), int, subcode,
9761 vki_psetid_t, pset, int *, buf, int, nelem);
9762 if (ARG3 != 0)
9763 PRE_MEM_WRITE("pset(buf)", ARG3, SARG4 * sizeof(int));
9764 break;
9765 case VKI_PSET_LIST:
9766 /* Libc: int pset_list(psetid_t *psetlist, uint_t *numpsets); */
9767 PRINT("sys_pset ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9768 PRE_REG_READ3(long, SC2("pset", "list"), int, subcode,
9769 vki_psetid_t *, psetlist, vki_uint_t *, numpsets);
9770 if (ARG3 != 0)
9771 PRE_MEM_WRITE("pset(numpsets)", ARG3, sizeof(vki_uint_t));
9772 if ((ARG2 != 0) && (ARG3 != 0)) {
9773 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9774 if (ML_(safe_to_deref(numpsets, sizeof(vki_uint_t)))) {
9775 PRE_MEM_WRITE("pset(psetlist)", ARG2,
9776 *numpsets * sizeof(vki_psetid_t));
9777 /* If psetlist buffer is not large enough, it will hold only as many
9778 entries as fit in the buffer. However numpsets will contain the
9779 real number of processor sets which will be greater than
9780 originally passed in. Stash the original value in unused ARG6. */
9781 ARG6 = *numpsets;
9784 break;
9785 # if defined(SOLARIS_PSET_GET_NAME)
9786 case VKI_PSET_GET_NAME:
9787 /* Libc: int pset_get_name(psetid_t psetid, char *buf, uint_t len); */
9788 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9789 PRE_REG_READ4(long, SC2("pset", "get_name"), int, subcode,
9790 vki_psetid_t, pset, char *, buf, vki_uint_t, len);
9791 PRE_MEM_WRITE("pset(buf)", ARG3, ARG4);
9792 break;
9793 # endif /* SOLARIS_PSET_GET_NAME */
9794 case VKI_PSET_SETATTR:
9795 /* Libc: int pset_setattr(psetid_t pset, uint_t attr); */
9796 PRINT("sys_pset ( %ld, %ld, %ld )", SARG1, SARG2, ARG3);
9797 PRE_REG_READ3(long, SC2("pset", "setattr"), int, subcode,
9798 vki_psetid_t, pset, vki_uint_t, attr);
9799 break;
9800 case VKI_PSET_GETATTR:
9801 /* Libc: int pset_getattr(psetid_t pset, uint_t *attr); */
9802 PRINT("sys_pset ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
9803 PRE_REG_READ3(long, SC2("pset", "getattr"), int, subcode,
9804 vki_psetid_t, pset, vki_uint_t *, attr);
9805 PRE_MEM_WRITE("pset(attr)", ARG3, sizeof(vki_uint_t));
9806 break;
9807 case VKI_PSET_ASSIGN_FORCED:
9808 /* Libc: int pset_assign_forced(psetid_t pset, processorid_t cpu,
9809 psetid_t *opset); */
9810 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9811 PRE_REG_READ4(long, SC2("pset", "assign_forced"), int, subcode,
9812 vki_psetid_t, pset, vki_processorid_t, cpu,
9813 vki_psetid_t *, opset);
9814 if (ARG4 != 0)
9815 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9816 break;
9817 default:
9818 VG_(unimplemented)("Syswrap of pset syscall with subcode %ld.", SARG1);
9819 /*NOTREACHED*/
9820 break;
9824 POST(sys_pset)
9826 switch (ARG1 /*subcode*/) {
9827 case VKI_PSET_CREATE:
9828 POST_MEM_WRITE(ARG2, sizeof(vki_psetid_t));
9829 break;
9830 case VKI_PSET_DESTROY:
9831 break;
9832 case VKI_PSET_ASSIGN:
9833 if (ARG4 != 0)
9834 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9835 break;
9836 case VKI_PSET_INFO:
9837 if (ARG3 != 0)
9838 POST_MEM_WRITE(ARG3, sizeof(int));
9839 if (ARG4 != 0)
9840 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
9841 if ((ARG4 != 0) && (ARG5 != 0)) {
9842 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9843 POST_MEM_WRITE(ARG5, MIN(*numcpus, ARG6) * sizeof(vki_processorid_t));
9845 break;
9846 case VKI_PSET_BIND:
9847 if (ARG5 != 0)
9848 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9849 break;
9850 case VKI_PSET_BIND_LWP:
9851 if (ARG5 != 0)
9852 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9853 break;
9854 case VKI_PSET_GETLOADAVG:
9855 if (ARG3 != 0)
9856 POST_MEM_WRITE(ARG3, MIN(SARG4, VKI_LOADAVG_NSTATS) * sizeof(int));
9857 break;
9858 case VKI_PSET_LIST:
9859 if (ARG3 != 0)
9860 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9861 if ((ARG2 != 0) && (ARG3 != 0)) {
9862 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9863 POST_MEM_WRITE(ARG2, MIN(*numpsets, ARG6) * sizeof(vki_psetid_t));
9865 break;
9866 # if defined(SOLARIS_PSET_GET_NAME)
9867 case VKI_PSET_GET_NAME:
9868 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
9869 break;
9870 # endif /* SOLARIS_PSET_GET_NAME */
9871 case VKI_PSET_SETATTR:
9872 break;
9873 case VKI_PSET_GETATTR:
9874 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9875 break;
9876 case VKI_PSET_ASSIGN_FORCED:
9877 if (ARG4 != 0)
9878 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9879 break;
9880 default:
9881 vg_assert(0);
9882 break;
9886 PRE(sys_resolvepath)
9888 /* int resolvepath(const char *path, char *buf, size_t bufsiz); */
9889 PRINT("sys_resolvepath ( %#lx(%s), %#lx, %lu )", ARG1, (HChar *) ARG1, ARG2,
9890 ARG3);
9891 PRE_REG_READ3(long, "resolvepath", const char *, path, char *, buf,
9892 vki_size_t, bufsiz);
9894 PRE_MEM_RASCIIZ("resolvepath(path)", ARG1);
9895 PRE_MEM_WRITE("resolvepath(buf)", ARG2, ARG3);
9898 POST(sys_resolvepath)
9900 POST_MEM_WRITE(ARG2, RES);
9903 PRE(sys_lwp_mutex_timedlock)
9905 /* int lwp_mutex_timedlock(lwp_mutex_t *lp, timespec_t *tsp,
9906 uintptr_t owner); */
9907 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9908 *flags |= SfMayBlock;
9909 PRINT("lwp_mutex_timedlock ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
9910 PRE_REG_READ3(long, "lwp_mutex_timedlock", lwp_mutex_t *, lp,
9911 timespec_t *, tsp, uintptr_t, owner);
9913 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_flag)", lp->vki_mutex_flag);
9914 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_type)", lp->vki_mutex_type);
9915 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_owner)",
9916 lp->vki_mutex_owner);
9917 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_ownerpid)",
9918 lp->vki_mutex_ownerpid);
9919 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_lockw)", lp->vki_mutex_lockw);
9920 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_lockw)",
9921 lp->vki_mutex_lockw);*/
9922 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_waiters)",
9923 lp->vki_mutex_waiters);
9924 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_waiters)",
9925 lp->vki_mutex_waiters);*/
9926 if (ARG2) {
9927 PRE_MEM_READ("lwp_mutex_timedlock(tsp)", ARG2, sizeof(vki_timespec_t));
9928 /*PRE_MEM_WRITE("lwp_mutex_timedlock(tsp)", ARG2,
9929 sizeof(vki_timespec_t));*/
9933 POST(sys_lwp_mutex_timedlock)
9935 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9936 POST_FIELD_WRITE(lp->vki_mutex_owner);
9937 POST_FIELD_WRITE(lp->vki_mutex_ownerpid);
9938 POST_FIELD_WRITE(lp->vki_mutex_lockw);
9939 POST_FIELD_WRITE(lp->vki_mutex_waiters);
9940 if (ARG2)
9941 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
9944 PRE(sys_lwp_rwlock_sys)
9946 /* int lwp_rwlock_sys(int subcode, lwp_rwlock_t *rwlp, timespec_t *tsp); */
9947 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
9948 switch (ARG1 /*subcode*/) {
9949 case 0:
9950 case 1:
9951 case 2:
9952 case 3:
9953 *flags |= SfMayBlock;
9954 switch (ARG1 /*subcode*/) {
9955 case 0:
9956 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9957 PRE_REG_READ3(long, SC2("lwp_rwlock", "rdlock"), int, subcode,
9958 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9959 break;
9960 case 1:
9961 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9962 PRE_REG_READ3(long, SC2("lwp_rwlock", "wrlock"), int, subcode,
9963 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9964 break;
9965 case 2:
9966 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9967 PRE_REG_READ2(long, SC2("lwp_rwlock", "tryrdlock"), int, subcode,
9968 lwp_rwlock_t *, rwlp);
9969 break;
9970 case 3:
9971 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9972 PRE_REG_READ2(long, SC2("lwp_rwlock", "trywrlock"), int, subcode,
9973 lwp_rwlock_t *, rwlp);
9974 break;
9975 default:
9976 vg_assert(0);
9977 break;
9980 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_type)", rwlp->vki_rwlock_type);
9981 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
9982 rwlp->vki_rwlock_readers);
9983 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
9984 rwlp->vki_rwlock_readers);*/
9986 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
9987 rwlp->mutex.vki_mutex_type);
9988 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_owner)",
9989 rwlp->mutex.vki_mutex_owner);
9990 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_ownerpid)",
9991 rwlp->mutex.vki_mutex_ownerpid);
9992 /* The mutex_lockw member is not really read by the kernel for this
9993 syscall but it seems better to mark it that way because when locking
9994 an rwlock the associated mutex has to be locked. */
9995 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_lockw)",
9996 rwlp->mutex.vki_mutex_lockw);
9997 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_lockw)",
9998 rwlp->mutex.vki_mutex_lockw);*/
9999 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_waiters)",
10000 rwlp->mutex.vki_mutex_waiters);
10001 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_waiters)",
10002 rwlp->mutex.vki_mutex_waiters);*/
10004 if ((ARG1 == 0 || ARG1 == 1) && ARG3)
10005 PRE_MEM_READ("lwp_rwlock(tsp)", ARG3, sizeof(vki_timespec_t));
10006 break;
10007 case 4:
10008 PRINT("sys_lwp_rwlock( %ld, %#lx )", SARG1, ARG2);
10009 PRE_REG_READ2(long, SC2("lwp_rwlock", "unlock"), int, subcode,
10010 lwp_rwlock_t *, rwlp);
10011 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
10012 rwlp->mutex.vki_mutex_type);
10013 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
10014 rwlp->vki_rwlock_readers);
10015 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
10016 rwlp->vki_rwlock_readers);*/
10017 break;
10018 default:
10019 VG_(unimplemented)("Syswrap of the lwp_rwlock_sys call with subcode %ld.",
10020 SARG1);
10021 /*NOTREACHED*/
10022 break;
10026 POST(sys_lwp_rwlock_sys)
10028 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
10029 switch (ARG1 /*subcode*/) {
10030 case 0:
10031 case 1:
10032 case 2:
10033 case 3:
10034 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
10035 POST_FIELD_WRITE(rwlp->vki_rwlock_owner);
10036 POST_FIELD_WRITE(rwlp->vki_rwlock_ownerpid);
10037 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_lockw);
10038 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_waiters);
10039 break;
10040 case 4:
10041 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
10042 break;
10043 default:
10044 vg_assert(0);
10045 break;
10049 PRE(sys_lwp_sema_timedwait)
10051 /* int lwp_sema_timedwait(lwp_sema_t *sema, timespec_t *timeout,
10052 int check_park); */
10053 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
10054 *flags |= SfMayBlock;
10055 PRINT("sys_lwp_sema_timewait ( %#lx, %#lx, %ld )", ARG1, ARG2, SARG3);
10056 PRE_REG_READ3(long, "lwp_sema_timedwait", lwp_sema_t *, sema,
10057 timespec_t *, timeout, int, check_park);
10059 PRE_FIELD_READ("lwp_sema_timedwait(sema->type)", sema->vki_sema_type);
10060 PRE_FIELD_READ("lwp_sema_timedwait(sema->count)", sema->vki_sema_count);
10061 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->count)",
10062 sema->vki_sema_count);*/
10063 PRE_FIELD_READ("lwp_sema_timedwait(sema->waiters)", sema->vki_sema_waiters);
10064 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->waiters)",
10065 sema->vki_sema_waiters);*/
10066 if (ARG2) {
10067 PRE_MEM_READ("lwp_sema_timedwait(timeout)", ARG2,
10068 sizeof(vki_timespec_t));
10069 /*PRE_MEM_WRITE("lwp_sema_timedwait(timeout)", ARG2,
10070 sizeof(vki_timespec_t));*/
10074 POST(sys_lwp_sema_timedwait)
10076 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
10077 POST_FIELD_WRITE(sema->vki_sema_count);
10078 POST_FIELD_WRITE(sema->vki_sema_waiters);
10079 if (ARG2)
10080 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
10083 PRE(sys_zone)
10085 /* Kernel: long zone(int cmd, void *arg1, void *arg2, void *arg3,
10086 void *arg4);
10088 switch (ARG1 /*cmd*/) {
10089 case VKI_ZONE_CREATE:
10090 /* Libc: zoneid_t zone_create(const char *name, const char *root,
10091 const struct priv_set *privs,
10092 const char *rctls, size_t rctlsz,
10093 const char *zfs, size_t zfssz,
10094 int *extended_error, int match,
10095 int doi, const bslabel_t *label,
10096 int flags);
10097 Kernel: zoneid_t zone_create(zone_def *zd);
10099 PRINT("sys_zone ( %ld, %#lx )", SARG1, ARG2);
10100 PRE_REG_READ2(long, SC2("zone", "create"), int, cmd,
10101 vki_zone_def *, zd);
10103 vki_zone_def *zd = (vki_zone_def *) ARG2;
10104 PRE_FIELD_READ("zone(zd.zone_name)", zd->zone_name);
10105 PRE_FIELD_READ("zone(zd.zone_root)", zd->zone_root);
10106 PRE_FIELD_READ("zone(zd.zone_privs)", zd->zone_privs);
10107 PRE_FIELD_READ("zone(zd.zone_privssz)", zd->zone_privssz);
10108 PRE_FIELD_READ("zone(zd.rctlbuf)", zd->rctlbuf);
10109 PRE_FIELD_READ("zone(zd.rctlbufsz)", zd->rctlbufsz);
10110 PRE_FIELD_READ("zone(zd.zfsbuf)", zd->zfsbuf);
10111 PRE_FIELD_READ("zone(zd.zfsbufsz)", zd->zfsbufsz);
10112 PRE_FIELD_READ("zone(zd.extended_error)", zd->extended_error);
10113 PRE_FIELD_READ("zone(zd.match)", zd->match);
10114 PRE_FIELD_READ("zone(zd.doi)", zd->doi);
10115 PRE_FIELD_READ("zone(zd.label)", zd->label);
10116 PRE_FIELD_READ("zone(zd.flags)", zd->flags);
10118 if (ML_(safe_to_deref((void *)ARG2, sizeof(vki_zone_def)))) {
10119 if (zd->zone_name)
10120 PRE_MEM_RASCIIZ("zone(zd.zone_name)", (Addr) zd->zone_name);
10121 if (zd->zone_root)
10122 PRE_MEM_RASCIIZ("zone(zd.zone_root)", (Addr) zd->zone_root);
10123 PRE_MEM_READ("zone(zd.zone_privs)", (Addr) zd->zone_privs,
10124 zd->zone_privssz);
10125 PRE_MEM_READ("zone(zd.rctlbuf)", (Addr) zd->rctlbuf,
10126 zd->rctlbufsz);
10127 PRE_MEM_READ("zone(zd.zfsbuf)",
10128 (Addr) zd->zfsbuf, zd->zfsbufsz);
10129 if (zd->label)
10130 PRE_MEM_READ("zone(zd.label)", (Addr) zd->label,
10131 sizeof(vki_bslabel_t));
10133 break;
10134 case VKI_ZONE_DESTROY:
10135 /* Libc: int zone_destroy(zoneid_t zoneid); */
10136 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10137 PRE_REG_READ2(long, SC2("zone", "destroy"), int, cmd,
10138 vki_zoneid_t, zoneid);
10139 break;
10140 case VKI_ZONE_GETATTR:
10141 /* Libc: ssize_t zone_getattr(zoneid_t zoneid, int attr,
10142 void *valp, size_t size);
10144 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %ld )",
10145 SARG1, SARG2, SARG3, ARG4, SARG5);
10146 PRE_REG_READ5(long, SC2("zone", "getattr"), int, cmd,
10147 vki_zoneid_t, zoneid, int, attr, void *, valp,
10148 vki_size_t, size);
10149 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
10150 break;
10151 case VKI_ZONE_ENTER:
10152 /* Libc: int zone_enter(zoneid_t zoneid); */
10153 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10154 PRE_REG_READ2(long, SC2("zone", "enter"), int, cmd,
10155 vki_zoneid_t, zoneid);
10156 break;
10157 case VKI_ZONE_LIST:
10158 /* Libc: int zone_list(zoneid_t *zonelist, uint_t *numzones); */
10159 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
10160 PRE_REG_READ3(long, SC2("zone", "list"), int, cmd,
10161 vki_zoneid_t *, zonelist, vki_uint_t *, numzones);
10163 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
10165 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
10166 if (ARG2)
10167 PRE_MEM_WRITE("zone(zonelist)", ARG2,
10168 *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
10170 break;
10171 case VKI_ZONE_SHUTDOWN:
10172 /* Libc: int zone_shutdown(zoneid_t zoneid); */
10173 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10174 PRE_REG_READ2(long, SC2("zone", "shutdown"), int, cmd,
10175 vki_zoneid_t, zoneid);
10176 break;
10177 case VKI_ZONE_LOOKUP:
10178 /* Libc: zoneid_t zone_lookup(const char *name); */
10179 PRINT("sys_zone ( %ld, %#lx(%s) )", SARG1, ARG2, (HChar *) ARG2);
10180 PRE_REG_READ2(long, SC2("zone", "lookup"), int, cmd,
10181 const char *, name);
10182 if (ARG2)
10183 PRE_MEM_RASCIIZ("zone(name)", ARG2);
10184 break;
10185 case VKI_ZONE_BOOT:
10186 /* Libc: int zone_boot(zoneid_t zoneid); */
10187 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10188 PRE_REG_READ2(long, SC2("zone", "boot"), int, cmd,
10189 vki_zoneid_t, zoneid);
10190 break;
10191 case VKI_ZONE_SETATTR:
10192 /* Libc: int zone_setattr(zoneid_t zoneid, int attr, void *valp,
10193 size_t size);
10195 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %lu )",
10196 SARG1, SARG2, SARG3, ARG4, ARG5);
10197 PRE_REG_READ5(long, SC2("zone", "setattr"), int, cmd,
10198 vki_zoneid_t, zoneid, int, attr, void *, valp,
10199 vki_size_t, size);
10200 PRE_MEM_READ("zone(valp)", ARG4, ARG5);
10201 break;
10202 case VKI_ZONE_ADD_DATALINK:
10203 /* Libc: int zone_add_datalink(zoneid_t zoneid,
10204 datalink_id_t linkid);
10206 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10207 PRE_REG_READ3(long, SC2("zone", "add_datalink"), int, cmd,
10208 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
10209 break;
10210 case VKI_ZONE_DEL_DATALINK:
10211 /* Libc: int zone_remove_datalink(zoneid_t zoneid,
10212 datalink_id_t linkid);
10214 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10215 PRE_REG_READ3(long, SC2("zone", "del_datalink"), int, cmd,
10216 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
10217 break;
10218 case VKI_ZONE_CHECK_DATALINK:
10219 /* Libc: int zone_check_datalink(zoneid_t *zoneidp,
10220 datalink_id_t linkid);
10222 PRINT("sys_zone ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10223 PRE_REG_READ3(long, SC2("zone", "check_datalink"), int, cmd,
10224 vki_zoneid_t *, zoneidp, vki_datalink_id_t, linkid);
10225 PRE_MEM_WRITE("zone(zoneidp)", ARG2, sizeof(vki_zoneid_t));
10226 break;
10227 case VKI_ZONE_LIST_DATALINK:
10228 /* Libc: int zone_list_datalink(zoneid_t zoneid, int *dlnump,
10229 datalink_id_t *linkids);
10231 PRINT("sys_zone ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
10232 PRE_REG_READ4(long, SC2("zone", "list_datalink"), int, cmd,
10233 vki_zoneid_t, zoneid, int *, dlnump,
10234 vki_datalink_id_t *, linkids);
10236 PRE_MEM_WRITE("zone(dlnump)", ARG3, sizeof(int));
10237 if (ML_(safe_to_deref((void *) ARG3, sizeof(int)))) {
10238 if (ARG4)
10239 PRE_MEM_WRITE("zone(linkids)", ARG4,
10240 *(int *) ARG3 * sizeof(vki_datalink_id_t));
10242 break;
10243 #if defined(SOLARIS_ZONE_DEFUNCT)
10244 case VKI_ZONE_LIST_DEFUNCT:
10245 /* Libc: int zone_list_defunct(uint64_t *uniqidlist,
10246 uint_t *numzones);
10248 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
10249 PRE_REG_READ3(long, SC2("zone", "list_defunct"), int, cmd,
10250 vki_uint64_t *, uniqidlist, vki_uint_t *, numzones);
10252 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
10254 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
10255 if (ARG2)
10256 PRE_MEM_WRITE("zone(uniqidlist)", ARG2,
10257 *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
10259 break;
10260 case VKI_ZONE_GETATTR_DEFUNCT:
10261 /* Libc: ssize_t zone_getattr_defunct(uint64_t uniqid, int attr,
10262 void *valp, size_t size);
10263 Kernel: ssize_t zone_getattr_defunct(uint64_t *uniqid, int attr,
10264 void *valp, size_t size);
10266 PRINT("sys_zone ( %ld, %#lx, %ld, %#lx, %lu )",
10267 SARG1, ARG2, SARG3, ARG4, ARG5);
10268 PRE_REG_READ5(long, SC2("zone", "getattr_defunct"), int, cmd,
10269 vki_uint64_t *, uniqid, int, attr,
10270 void *, valp, vki_size_t, size);
10272 PRE_MEM_READ("zone(uniqid)", ARG2, sizeof(vki_uint64_t));
10273 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
10274 break;
10275 #endif /* SOLARIS_ZONE_DEFUNCT */
10276 default:
10277 VG_(unimplemented)("Syswrap of the zone call with cmd %ld.", SARG1);
10278 /*NOTREACHED*/
10279 break;
10284 POST(sys_zone)
10286 switch (ARG1 /*cmd*/) {
10287 case VKI_ZONE_CREATE:
10288 case VKI_ZONE_DESTROY:
10289 break;
10290 case VKI_ZONE_GETATTR:
10291 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
10292 break;
10293 case VKI_ZONE_ENTER:
10294 break;
10295 case VKI_ZONE_LIST:
10296 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
10297 break;
10298 case VKI_ZONE_SHUTDOWN:
10299 case VKI_ZONE_LOOKUP:
10300 case VKI_ZONE_BOOT:
10301 case VKI_ZONE_SETATTR:
10302 case VKI_ZONE_ADD_DATALINK:
10303 case VKI_ZONE_DEL_DATALINK:
10304 break;
10305 case VKI_ZONE_CHECK_DATALINK:
10306 POST_MEM_WRITE(ARG2, sizeof(vki_zoneid_t));
10307 break;
10308 case VKI_ZONE_LIST_DATALINK:
10309 POST_MEM_WRITE(ARG4, *(int *) ARG3 * sizeof(vki_datalink_id_t));
10310 break;
10311 #if defined(SOLARIS_ZONE_DEFUNCT)
10312 case VKI_ZONE_LIST_DEFUNCT:
10313 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
10314 break;
10315 case VKI_ZONE_GETATTR_DEFUNCT:
10316 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
10317 break;
10318 #endif /* SOLARIS_ZONE_DEFUNCT */
10319 default:
10320 vg_assert(0);
10321 break;
10325 PRE(sys_getcwd)
10327 /* int getcwd(char *buf, size_t size); */
10328 /* Note: Generic getcwd() syswrap can't be used because it expects
10329 a different return value. */
10330 PRINT("sys_getcwd ( %#lx, %lu )", ARG1, ARG2);
10331 PRE_REG_READ2(long, "getcwd", char *, buf, vki_size_t, size);
10332 PRE_MEM_WRITE("getcwd(buf)", ARG1, ARG2);
10335 POST(sys_getcwd)
10337 POST_MEM_WRITE(ARG1, VG_(strlen)((HChar*)ARG1) + 1);
10340 PRE(sys_so_socket)
10342 /* int so_socket(int family, int type, int protocol, char *devpath,
10343 int version); */
10344 PRINT("sys_so_socket ( %ld, %ld, %ld, %#lx(%s), %ld)", SARG1, SARG2, SARG3,
10345 ARG4, (HChar *) ARG4, SARG5);
10346 PRE_REG_READ5(long, "socket", int, family, int, type, int, protocol,
10347 char *, devpath, int, version);
10348 if (ARG4)
10349 PRE_MEM_RASCIIZ("socket(devpath)", ARG4);
10352 POST(sys_so_socket)
10354 SysRes r;
10355 r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
10356 SET_STATUS_from_SysRes(r);
10359 PRE(sys_so_socketpair)
10361 /* int so_socketpair(int sv[2]); */
10362 /* This syscall is used to connect two already created sockets together. */
10363 PRINT("sys_so_socketpair ( %#lx )", ARG1);
10364 PRE_REG_READ1(long, "socketpair", int *, sv);
10365 PRE_MEM_READ("socketpair(sv)", ARG1, 2 * sizeof(int));
10366 /*PRE_MEM_WRITE("socketpair(sv)", ARG1, 2 * sizeof(int));*/
10367 if (ML_(safe_to_deref)((void*)ARG1, 2 * sizeof(int))) {
10368 int *fds = (int*)ARG1;
10369 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, False))
10370 SET_STATUS_Failure(VKI_EBADF);
10371 else if (!ML_(fd_allowed)(fds[1], "socketpair", tid, False))
10372 SET_STATUS_Failure(VKI_EBADF);
10376 POST(sys_so_socketpair)
10378 /* The kernel can return new file descriptors, in such a case we have to
10379 validate them. */
10380 int *fds = (int*)ARG1;
10381 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
10382 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, True))
10383 SET_STATUS_Failure(VKI_EMFILE);
10384 if (!ML_(fd_allowed)(fds[1], "socketpair", tid, True))
10385 SET_STATUS_Failure(VKI_EMFILE);
10386 if (FAILURE) {
10387 /* One or both of the file descriptors weren't allowed, close newly
10388 created file descriptors but don't close the already recorded
10389 ones. */
10390 if (!ML_(fd_recorded)(fds[0]))
10391 VG_(close)(fds[0]);
10392 if (!ML_(fd_recorded)(fds[1]))
10393 VG_(close)(fds[1]);
10395 else if (VG_(clo_track_fds)) {
10396 /* Everything went better than expected, record the newly created file
10397 descriptors. Note: If the kernel actually returns the original file
10398 descriptors, then ML_(record_fd_open_nameless) notices that these
10399 file descriptors have been already recorded. */
10400 ML_(record_fd_open_nameless)(tid, fds[0]);
10401 ML_(record_fd_open_nameless)(tid, fds[1]);
10405 PRE(sys_bind)
10407 /* int bind(int s, struct sockaddr *name, socklen_t namelen,
10408 int version); */
10409 PRINT("sys_bind ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10410 PRE_REG_READ4(long, "bind", int, s, struct sockaddr *, name,
10411 vki_socklen_t, namelen, int, version);
10412 ML_(generic_PRE_sys_bind)(tid, ARG1, ARG2, ARG3);
10415 PRE(sys_listen)
10417 /* int listen(int s, int backlog, int version); */
10418 PRINT("sys_listen ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10419 PRE_REG_READ3(long, "listen", int, s, int, backlog, int, version);
10422 PRE(sys_accept)
10424 #if defined(SOLARIS_NEW_ACCEPT_SYSCALL)
10425 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
10426 int version, int flags); */
10427 *flags |= SfMayBlock;
10428 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
10429 SARG5);
10430 PRE_REG_READ5(long, "accept", int, s, struct sockaddr *, addr,
10431 socklen_t *, addrlen, int, version, int, flags);
10432 #else
10433 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
10434 int version); */
10435 *flags |= SfMayBlock;
10436 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10437 PRE_REG_READ4(long, "accept", int, s, struct sockaddr *, addr,
10438 socklen_t *, addrlen, int, version);
10439 #endif /* SOLARIS_NEW_ACCEPT_SYSCALL */
10440 ML_(generic_PRE_sys_accept)(tid, ARG1, ARG2, ARG3);
10443 POST(sys_accept)
10445 SysRes r;
10446 r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
10447 ARG1, ARG2, ARG3);
10448 SET_STATUS_from_SysRes(r);
10451 PRE(sys_connect)
10453 /* int connect(int s, struct sockaddr *name, socklen_t namelen,
10454 int version); */
10455 *flags |= SfMayBlock;
10456 PRINT("sys_connect ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10457 PRE_REG_READ4(long, "connect", int, s, struct sockaddr *, name,
10458 vki_socklen_t, namelen, int, version);
10459 ML_(generic_PRE_sys_connect)(tid, ARG1, ARG2, ARG3);
10462 PRE(sys_shutdown)
10464 /* Kernel: int shutdown(int sock, int how, int version);
10465 Libc: int shutdown(int sock, int how);
10467 *flags |= SfMayBlock;
10468 PRINT("sys_shutdown ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10469 PRE_REG_READ3(int, "shutdown", int, sock, int, how, int, version);
10471 /* Be strict. */
10472 if (!ML_(fd_allowed)(ARG1, "shutdown", tid, False))
10473 SET_STATUS_Failure(VKI_EBADF);
10476 PRE(sys_recv)
10478 /* ssize_t recv(int s, void *buf, size_t len, int flags); */
10479 *flags |= SfMayBlock;
10480 PRINT("sys_recv ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10481 PRE_REG_READ4(long, "recv", int, s, void *, buf, vki_size_t, len,
10482 int, flags);
10483 ML_(generic_PRE_sys_recv)(tid, ARG1, ARG2, ARG3);
10486 POST(sys_recv)
10488 ML_(generic_POST_sys_recv)(tid, RES, ARG1, ARG2, ARG3);
10491 PRE(sys_recvfrom)
10493 /* ssize_t recvfrom(int s, void *buf, size_t len, int flags,
10494 struct sockaddr *from, socklen_t *fromlen); */
10495 *flags |= SfMayBlock;
10496 PRINT("sys_recvfrom ( %ld, %#lx, %lu, %ld, %#lx, %#lx )", SARG1, ARG2, ARG3,
10497 SARG4, ARG5, ARG6);
10498 PRE_REG_READ6(long, "recvfrom", int, s, void *, buf, vki_size_t, len,
10499 int, flags, struct sockaddr *, from, socklen_t *, fromlen);
10500 ML_(generic_PRE_sys_recvfrom)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10503 POST(sys_recvfrom)
10505 ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
10506 ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10509 PRE(sys_recvmsg)
10511 /* ssize_t recvmsg(int s, struct msghdr *msg, int flags); */
10512 *flags |= SfMayBlock;
10513 PRINT("sys_recvmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10514 PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
10515 ML_(generic_PRE_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10518 POST(sys_recvmsg)
10520 ML_(generic_POST_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2, RES);
10523 PRE(sys_send)
10525 /* ssize_t send(int s, const void *msg, size_t len, int flags); */
10526 *flags |= SfMayBlock;
10527 PRINT("sys_send ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10528 PRE_REG_READ4(long, "send", int, s, const void *, msg, vki_size_t, len,
10529 int, flags);
10530 ML_(generic_PRE_sys_send)(tid, ARG1, ARG2, ARG3);
10533 PRE(sys_sendmsg)
10535 /* ssize_t sendmsg(int s, const struct msghdr *msg, int flags); */
10536 *flags |= SfMayBlock;
10537 PRINT("sys_sendmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10538 PRE_REG_READ3(long, "sendmsg", int, s, const struct msghdr *, msg,
10539 int, flags);
10540 ML_(generic_PRE_sys_sendmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10543 PRE(sys_sendto)
10545 /* ssize_t sendto(int s, const void *msg, size_t len, int flags,
10546 const struct sockaddr *to, int tolen); */
10547 *flags |= SfMayBlock;
10548 PRINT("sys_sendto ( %ld, %#lx, %lu, %ld, %#lx, %ld )", SARG1, ARG2, ARG3,
10549 SARG4, ARG5, SARG6);
10550 PRE_REG_READ6(long, "sendto", int, s, const void *, msg, vki_size_t, len,
10551 int, flags, const struct sockaddr *, to, int, tolen);
10552 ML_(generic_PRE_sys_sendto)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10555 PRE(sys_getpeername)
10557 /* Kernel: int getpeername(int s, struct sockaddr *name,
10558 socklen_t *namelen, int version);
10559 Libc: int getpeername(int s, struct sockaddr *name,
10560 socklen_t *namelen);
10562 *flags |= SfMayBlock;
10563 PRINT("sys_getpeername ( %ld, %#lx, %#lx, %ld )",
10564 SARG1, ARG2, ARG3, SARG4);
10565 PRE_REG_READ4(long, "getpeername", int, s, struct vki_sockaddr *, name,
10566 vki_socklen_t *, namelen, int, version);
10567 ML_(buf_and_len_pre_check)(tid, ARG2, ARG3, "getpeername(name)",
10568 "getpeername(namelen)");
10570 /* Be strict. */
10571 if (!ML_(fd_allowed)(ARG1, "getpeername", tid, False))
10572 SET_STATUS_Failure(VKI_EBADF);
10575 POST(sys_getpeername)
10577 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES),
10578 ARG2, ARG3, "getpeername(namelen)");
10581 PRE(sys_getsockname)
10583 /* int getsockname(int s, struct sockaddr *name, socklen_t *namelen,
10584 int version); */
10585 PRINT("sys_getsockname ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10586 PRE_REG_READ4(long, "getsockname", int, s, struct sockaddr *, name,
10587 socklen_t *, namelen, int, version);
10588 ML_(generic_PRE_sys_getsockname)(tid, ARG1, ARG2, ARG3);
10591 POST(sys_getsockname)
10593 ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
10594 ARG1, ARG2, ARG3);
10597 PRE(sys_getsockopt)
10599 /* int getsockopt(int s, int level, int optname, void *optval,
10600 socklen_t *optlen, int version); */
10601 PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx, %ld )", SARG1, SARG2,
10602 SARG3, ARG4, ARG5, SARG6);
10603 PRE_REG_READ6(long, "getsockopt", int, s, int, level, int, optname,
10604 void *, optval, socklen_t *, option, int, version);
10605 if (ARG4)
10606 ML_(buf_and_len_pre_check)(tid, ARG4, ARG5, "getsockopt(optval)",
10607 "getsockopt(optlen)");
10610 POST(sys_getsockopt)
10612 if (ARG4)
10613 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES), ARG4,
10614 ARG5, "getsockopt(optlen_out)");
10617 PRE(sys_setsockopt)
10619 /* int setsockopt(int s, int level, int optname, const void *optval,
10620 socklen_t optlen, int version); */
10621 PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %lu, %ld )", SARG1, SARG2,
10622 SARG3, ARG4, ARG5, SARG6);
10623 PRE_REG_READ6(long, "setsockopt", int, s, int, level, int, optname,
10624 const void *, optval, vki_socklen_t, optlen, int, version);
10625 ML_(generic_PRE_sys_setsockopt)(tid, ARG1, ARG2, ARG3, ARG4, ARG5);
10628 PRE(sys_lwp_mutex_register)
10630 /* int lwp_mutex_register(lwp_mutex_t *mp, caddr_t uaddr); */
10631 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t*)ARG1;
10632 PRINT("sys_lwp_mutex_register ( %#lx, %#lx )", ARG1, ARG2);
10633 PRE_REG_READ2(long, "lwp_mutex_register", lwp_mutex_t *, mp,
10634 void *, uaddr);
10635 PRE_FIELD_READ("lwp_mutex_register(mp->mutex_type)", mp->vki_mutex_type);
10638 PRE(sys_uucopy)
10640 /* int uucopy(const void *s1, void *s2, size_t n); */
10641 PRINT("sys_uucopy ( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3);
10642 PRE_REG_READ3(long, "uucopy", const void *, s1, void *, s2, vki_size_t, n);
10644 /* Stay away from V segments. */
10645 if (!ML_(valid_client_addr)(ARG1, ARG3, tid, "uucopy(s1)")) {
10646 SET_STATUS_Failure(VKI_EFAULT);
10648 if (!ML_(valid_client_addr)(ARG2, ARG3, tid, "uucopy(s2)")) {
10649 SET_STATUS_Failure(VKI_EFAULT);
10652 if (FAILURE)
10653 return;
10655 /* XXX This is actually incorrect, we should be able to copy undefined
10656 values through to their new destination. */
10657 PRE_MEM_READ("uucopy(s1)", ARG1, ARG3);
10658 PRE_MEM_WRITE("uucopy(s2)", ARG2, ARG3);
10661 POST(sys_uucopy)
10663 POST_MEM_WRITE(ARG2, ARG3);
10666 PRE(sys_umount2)
10668 /* int umount2(const char *file, int mflag); */
10669 *flags |= SfMayBlock;
10670 PRINT("sys_umount2 ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
10671 PRE_REG_READ2(long, "umount2", const char *, file, int, mflag);
10672 PRE_MEM_RASCIIZ("umount2(file)", ARG1);
10675 PRE(fast_gethrtime)
10677 PRINT("fast_gethrtime ( )");
10678 PRE_REG_READ0(long, "gethrtime");
10681 PRE(fast_gethrvtime)
10683 PRINT("fast_gethrvtime ( )");
10684 PRE_REG_READ0(long, "gethrvtime");
10687 PRE(fast_gethrestime)
10689 /* Used by gettimeofday(3C). */
10690 PRINT("fast_gethrestime ( )");
10691 PRE_REG_READ0(long, "gethrestime");
10694 PRE(fast_getlgrp)
10696 /* Fasttrap number shared between gethomelgroup() and getcpuid(). */
10697 PRINT("fast_getlgrp ( )");
10698 PRE_REG_READ0(long, "getlgrp");
10701 #if defined(SOLARIS_GETHRT_FASTTRAP)
10702 PRE(fast_gethrt)
10704 /* Used by gethrtime(3C) when tsp & tscp HWCAPs are present. */
10705 PRINT("fast_gethrt ( )");
10706 PRE_REG_READ0(long, "gethrt");
10709 POST(fast_gethrt)
10711 if (RES == 0)
10712 return;
10714 VG_(change_mapping_ownership)(RES, False);
10716 #endif /* SOLARIS_GETHRT_FASTTRAP */
10718 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
10719 PRE(fast_getzoneoffset)
10721 /* Returns kernel's time zone offset data. */
10722 PRINT("fast_getzoneoffset ( )");
10723 PRE_REG_READ0(long, "get_zone_offset");
10726 POST(fast_getzoneoffset)
10728 if (RES == 0)
10729 return;
10731 VG_(change_mapping_ownership)(RES, False);
10733 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
10735 #undef PRE
10736 #undef POST
10738 /* ---------------------------------------------------------------------
10739 The Solaris syscall table
10740 ------------------------------------------------------------------ */
10742 /* Add a Solaris-specific, arch-independent wrapper to a syscall table. */
10743 #define SOLX_(sysno, name) \
10744 WRAPPER_ENTRY_X_(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10745 #define SOLXY(sysno, name) \
10746 WRAPPER_ENTRY_XY(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10748 #if defined(VGP_x86_solaris)
10749 /* Add an x86-solaris specific wrapper to a syscall table. */
10750 #define PLAX_(sysno, name) \
10751 WRAPPER_ENTRY_X_(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10752 #define PLAXY(sysno, name) \
10753 WRAPPER_ENTRY_XY(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10755 #elif defined(VGP_amd64_solaris)
10756 /* Add an amd64-solaris specific wrapper to a syscall table. */
10757 #define PLAX_(sysno, name) \
10758 WRAPPER_ENTRY_X_(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10759 #define PLAXY(sysno, name) \
10760 WRAPPER_ENTRY_XY(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10762 #else
10763 # error "Unknown platform"
10764 #endif
10767 GEN : handlers are in syswrap-generic.c
10768 SOL : handlers are in this file
10769 X_ : PRE handler only
10770 XY : PRE and POST handlers
10773 static SyscallTableEntry syscall_table[] = {
10774 SOLX_(__NR_exit, sys_exit), /* 1 */
10775 #if defined(SOLARIS_SPAWN_SYSCALL)
10776 SOLX_(__NR_spawn, sys_spawn), /* 2 */
10777 #endif /* SOLARIS_SPAWN_SYSCALL */
10778 GENXY(__NR_read, sys_read), /* 3 */
10779 GENX_(__NR_write, sys_write), /* 4 */
10780 #if defined(SOLARIS_OLD_SYSCALLS)
10781 SOLXY(__NR_open, sys_open), /* 5 */
10782 #endif /* SOLARIS_OLD_SYSCALLS */
10783 SOLXY(__NR_close, sys_close), /* 6 */
10784 SOLX_(__NR_linkat, sys_linkat), /* 7 */
10785 #if defined(SOLARIS_OLD_SYSCALLS)
10786 GENX_(__NR_link, sys_link), /* 9 */
10787 GENX_(__NR_unlink, sys_unlink), /* 10 */
10788 #endif /* SOLARIS_OLD_SYSCALLS */
10789 SOLX_(__NR_symlinkat, sys_symlinkat), /* 11 */
10790 GENX_(__NR_chdir, sys_chdir), /* 12 */
10791 SOLX_(__NR_time, sys_time), /* 13 */
10792 #if defined(SOLARIS_OLD_SYSCALLS)
10793 GENX_(__NR_chmod, sys_chmod), /* 15 */
10794 GENX_(__NR_chown, sys_chown), /* 16 */
10795 #endif /* SOLARIS_OLD_SYSCALLS */
10796 SOLX_(__NR_brk, sys_brk), /* 17 */
10797 #if defined(SOLARIS_OLD_SYSCALLS)
10798 SOLXY(__NR_stat, sys_stat), /* 18 */
10799 #endif /* SOLARIS_OLD_SYSCALLS */
10800 SOLX_(__NR_lseek, sys_lseek), /* 19 */
10801 GENX_(__NR_getpid, sys_getpid), /* 20 */
10802 SOLXY(__NR_mount, sys_mount), /* 21 */
10803 SOLXY(__NR_readlinkat, sys_readlinkat), /* 22 */
10804 GENX_(__NR_setuid, sys_setuid), /* 23 */
10805 GENX_(__NR_getuid, sys_getuid), /* 24 */
10806 SOLX_(__NR_stime, sys_stime), /* 25 */
10807 GENX_(__NR_alarm, sys_alarm), /* 27 */
10808 #if defined(SOLARIS_OLD_SYSCALLS)
10809 SOLXY(__NR_fstat, sys_fstat), /* 28 */
10810 #endif /* SOLARIS_OLD_SYSCALLS */
10811 GENX_(__NR_pause, sys_pause), /* 29 */
10812 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
10813 SOLXY(__NR_frealpathat, sys_frealpathat), /* 30 */
10814 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
10815 SOLX_(__NR_stty, sys_stty), /* 31 */
10816 SOLXY(__NR_gtty, sys_gtty), /* 32 */
10817 #if defined(SOLARIS_OLD_SYSCALLS)
10818 GENX_(__NR_access, sys_access), /* 33 */
10819 #endif /* SOLARIS_OLD_SYSCALLS */
10820 GENX_(__NR_kill, sys_kill), /* 37 */
10821 SOLX_(__NR_pgrpsys, sys_pgrpsys), /* 39 */
10822 SOLXY(__NR_pipe, sys_pipe), /* 42 */
10823 GENXY(__NR_times, sys_times), /* 43 */
10824 SOLX_(__NR_faccessat, sys_faccessat), /* 45 */
10825 GENX_(__NR_setgid, sys_setgid), /* 46 */
10826 GENX_(__NR_getgid, sys_getgid), /* 47 */
10827 SOLXY(__NR_mknodat, sys_mknodat), /* 48 */
10828 SOLXY(__NR_sysi86, sys_sysi86), /* 50 */
10829 SOLXY(__NR_shmsys, sys_shmsys), /* 52 */
10830 SOLXY(__NR_semsys, sys_semsys), /* 53 */
10831 SOLXY(__NR_ioctl, sys_ioctl), /* 54 */
10832 SOLX_(__NR_fchownat, sys_fchownat), /* 56 */
10833 SOLX_(__NR_fdsync, sys_fdsync), /* 58 */
10834 SOLX_(__NR_execve, sys_execve), /* 59 */
10835 GENX_(__NR_umask, sys_umask), /* 60 */
10836 GENX_(__NR_chroot, sys_chroot), /* 61 */
10837 SOLXY(__NR_fcntl, sys_fcntl), /* 62 */
10838 SOLX_(__NR_renameat, sys_renameat), /* 64 */
10839 SOLX_(__NR_unlinkat, sys_unlinkat), /* 65 */
10840 SOLXY(__NR_fstatat, sys_fstatat), /* 66 */
10841 #if defined(VGP_x86_solaris)
10842 PLAXY(__NR_fstatat64, sys_fstatat64), /* 67 */
10843 #endif /* VGP_x86_solaris */
10844 SOLXY(__NR_openat, sys_openat), /* 68 */
10845 #if defined(VGP_x86_solaris)
10846 PLAXY(__NR_openat64, sys_openat64), /* 69 */
10847 #endif /* VGP_x86_solaris */
10848 SOLXY(__NR_tasksys, sys_tasksys), /* 70 */
10849 SOLXY(__NR_getpagesizes, sys_getpagesizes), /* 73 */
10850 SOLXY(__NR_lwp_park, sys_lwp_park), /* 77 */
10851 SOLXY(__NR_sendfilev, sys_sendfilev), /* 78 */
10852 #if defined(SOLARIS_LWP_NAME_SYSCALL)
10853 SOLXY(__NR_lwp_name, sys_lwp_name), /* 79 */
10854 #endif /* SOLARIS_LWP_NAME_SYSCALL */
10855 #if defined(SOLARIS_OLD_SYSCALLS)
10856 GENX_(__NR_rmdir, sys_rmdir), /* 79 */
10857 GENX_(__NR_mkdir, sys_mkdir), /* 80 */
10858 #endif /* SOLARIS_OLD_SYSCALLS */
10859 GENXY(__NR_getdents, sys_getdents), /* 81 */
10860 SOLXY(__NR_privsys, sys_privsys), /* 82 */
10861 SOLXY(__NR_ucredsys, sys_ucredsys), /* 83 */
10862 SOLXY(__NR_sysfs, sys_sysfs), /* 84 */
10863 SOLXY(__NR_getmsg, sys_getmsg), /* 85 */
10864 SOLX_(__NR_putmsg, sys_putmsg), /* 86 */
10865 #if defined(SOLARIS_OLD_SYSCALLS)
10866 SOLXY(__NR_lstat, sys_lstat), /* 88 */
10867 GENX_(__NR_symlink, sys_symlink), /* 89 */
10868 GENX_(__NR_readlink, sys_readlink), /* 90 */
10869 #endif /* SOLARIS_OLD_SYSCALLS */
10870 GENX_(__NR_setgroups, sys_setgroups), /* 91 */
10871 GENXY(__NR_getgroups, sys_getgroups), /* 92 */
10872 #if defined(SOLARIS_OLD_SYSCALLS)
10873 GENX_(__NR_fchmod, sys_fchmod), /* 93 */
10874 GENX_(__NR_fchown, sys_fchown), /* 94 */
10875 #endif /* SOLARIS_OLD_SYSCALLS */
10876 SOLXY(__NR_sigprocmask, sys_sigprocmask), /* 95 */
10877 SOLX_(__NR_sigsuspend, sys_sigsuspend), /* 96 */
10878 GENXY(__NR_sigaltstack, sys_sigaltstack), /* 97 */
10879 SOLXY(__NR_sigaction, sys_sigaction), /* 98 */
10880 SOLXY(__NR_sigpending, sys_sigpending), /* 99 */
10881 SOLX_(__NR_context, sys_getsetcontext), /* 100 */
10882 SOLX_(__NR_fchmodat, sys_fchmodat), /* 101 */
10883 SOLX_(__NR_mkdirat, sys_mkdirat), /* 102 */
10884 SOLXY(__NR_statvfs, sys_statvfs), /* 103 */
10885 SOLXY(__NR_fstatvfs, sys_fstatvfs), /* 104 */
10886 SOLXY(__NR_nfssys, sys_nfssys), /* 106 */
10887 SOLXY(__NR_waitid, sys_waitid), /* 107 */
10888 SOLX_(__NR_sigsendsys, sys_sigsendsys), /* 108 */
10889 #if defined(SOLARIS_UTIMESYS_SYSCALL)
10890 SOLX_(__NR_utimesys, sys_utimesys), /* 110 */
10891 #endif /* SOLARIS_UTIMESYS_SYSCALL */
10892 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
10893 SOLX_(__NR_utimensat, sys_utimensat), /* 110 */
10894 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
10895 SOLXY(__NR_sigresend, sys_sigresend), /* 111 */
10896 SOLXY(__NR_priocntlsys, sys_priocntlsys), /* 112 */
10897 SOLX_(__NR_pathconf, sys_pathconf), /* 113 */
10898 SOLX_(__NR_mmap, sys_mmap), /* 115 */
10899 GENXY(__NR_mprotect, sys_mprotect), /* 116 */
10900 GENXY(__NR_munmap, sys_munmap), /* 117 */
10901 GENX_(__NR_fchdir, sys_fchdir), /* 120 */
10902 GENXY(__NR_readv, sys_readv), /* 121 */
10903 GENX_(__NR_writev, sys_writev), /* 122 */
10904 #if defined(SOLARIS_UUIDSYS_SYSCALL)
10905 SOLXY(__NR_uuidsys, sys_uuidsys), /* 124 */
10906 #endif /* SOLARIS_UUIDSYS_SYSCALL */
10907 SOLX_(__NR_mmapobj, sys_mmapobj), /* 127 */
10908 GENX_(__NR_setrlimit, sys_setrlimit), /* 128 */
10909 GENXY(__NR_getrlimit, sys_getrlimit), /* 129 */
10910 #if defined(SOLARIS_OLD_SYSCALLS)
10911 GENX_(__NR_lchown, sys_lchown), /* 130 */
10912 #endif /* SOLARIS_OLD_SYSCALLS */
10913 SOLX_(__NR_memcntl, sys_memcntl), /* 131 */
10914 SOLXY(__NR_getpmsg, sys_getpmsg), /* 132 */
10915 SOLX_(__NR_putpmsg, sys_putpmsg), /* 133 */
10916 #if defined(SOLARIS_OLD_SYSCALLS)
10917 SOLX_(__NR_rename, sys_rename), /* 134 */
10918 #endif /* SOLARIS_OLD_SYSCALLS */
10919 SOLXY(__NR_uname, sys_uname), /* 135 */
10920 SOLX_(__NR_setegid, sys_setegid), /* 136 */
10921 SOLX_(__NR_sysconfig, sys_sysconfig), /* 137 */
10922 SOLXY(__NR_systeminfo, sys_systeminfo), /* 139 */
10923 SOLX_(__NR_seteuid, sys_seteuid), /* 141 */
10924 SOLX_(__NR_forksys, sys_forksys), /* 142 */
10925 #if defined(SOLARIS_GETRANDOM_SYSCALL)
10926 SOLXY(__NR_getrandom, sys_getrandom), /* 143 */
10927 #endif /* SOLARIS_GETRANDOM_SYSCALL */
10928 SOLXY(__NR_sigtimedwait, sys_sigtimedwait), /* 144 */
10929 SOLX_(__NR_yield, sys_yield), /* 146 */
10930 SOLXY(__NR_lwp_sema_post, sys_lwp_sema_post), /* 148 */
10931 SOLXY(__NR_lwp_sema_trywait, sys_lwp_sema_trywait), /* 149 */
10932 SOLX_(__NR_lwp_detach, sys_lwp_detach), /* 150 */
10933 SOLXY(__NR_modctl, sys_modctl), /* 152 */
10934 SOLX_(__NR_fchroot, sys_fchroot), /* 153 */
10935 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
10936 SOLX_(__NR_system_stats, sys_system_stats), /* 154 */
10937 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
10938 SOLXY(__NR_gettimeofday, sys_gettimeofday), /* 156 */
10939 GENXY(__NR_getitimer, sys_getitimer), /* 157 */
10940 GENXY(__NR_setitimer, sys_setitimer), /* 158 */
10941 SOLX_(__NR_lwp_create, sys_lwp_create), /* 159 */
10942 SOLX_(__NR_lwp_exit, sys_lwp_exit), /* 160 */
10943 SOLX_(__NR_lwp_suspend, sys_lwp_suspend), /* 161 */
10944 SOLX_(__NR_lwp_continue, sys_lwp_continue), /* 162 */
10945 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
10946 SOLXY(__NR_lwp_sigqueue, sys_lwp_sigqueue), /* 163 */
10947 #else
10948 SOLXY(__NR_lwp_kill, sys_lwp_kill), /* 163 */
10949 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
10950 SOLX_(__NR_lwp_self, sys_lwp_self), /* 164 */
10951 SOLX_(__NR_lwp_sigmask, sys_lwp_sigmask), /* 165 */
10952 SOLX_(__NR_lwp_private, sys_lwp_private), /* 166 */
10953 SOLXY(__NR_lwp_wait, sys_lwp_wait), /* 167 */
10954 SOLXY(__NR_lwp_mutex_wakeup, sys_lwp_mutex_wakeup), /* 168 */
10955 SOLXY(__NR_lwp_cond_wait, sys_lwp_cond_wait), /* 170 */
10956 SOLXY(__NR_lwp_cond_signal, sys_lwp_cond_signal), /* 171 */
10957 SOLX_(__NR_lwp_cond_broadcast, sys_lwp_cond_broadcast), /* 172 */
10958 SOLXY(__NR_pread, sys_pread), /* 173 */
10959 SOLX_(__NR_pwrite, sys_pwrite), /* 174 */
10960 #if defined(VGP_x86_solaris)
10961 PLAX_(__NR_llseek, sys_llseek32), /* 175 */
10962 #endif /* VGP_x86_solaris */
10963 SOLXY(__NR_lgrpsys, sys_lgrpsys), /* 180 */
10964 SOLXY(__NR_rusagesys, sys_rusagesys), /* 181 */
10965 SOLXY(__NR_port, sys_port), /* 182 */
10966 SOLXY(__NR_pollsys, sys_pollsys), /* 183 */
10967 SOLXY(__NR_labelsys, sys_labelsys), /* 184 */
10968 SOLXY(__NR_acl, sys_acl), /* 185 */
10969 SOLXY(__NR_auditsys, sys_auditsys), /* 186 */
10970 SOLX_(__NR_p_online, sys_p_online), /* 189 */
10971 SOLX_(__NR_sigqueue, sys_sigqueue), /* 190 */
10972 SOLXY(__NR_clock_gettime, sys_clock_gettime), /* 191 */
10973 SOLX_(__NR_clock_settime, sys_clock_settime), /* 192 */
10974 SOLXY(__NR_clock_getres, sys_clock_getres), /* 193 */
10975 SOLXY(__NR_timer_create, sys_timer_create), /* 194 */
10976 SOLX_(__NR_timer_delete, sys_timer_delete), /* 195 */
10977 SOLXY(__NR_timer_settime, sys_timer_settime), /* 196 */
10978 SOLXY(__NR_timer_gettime, sys_timer_gettime), /* 197 */
10979 SOLX_(__NR_timer_getoverrun, sys_timer_getoverrun), /* 198 */
10980 GENXY(__NR_nanosleep, sys_nanosleep), /* 199 */
10981 SOLXY(__NR_facl, sys_facl), /* 200 */
10982 SOLXY(__NR_door, sys_door), /* 201 */
10983 GENX_(__NR_setreuid, sys_setreuid), /* 202 */
10984 GENX_(__NR_setregid, sys_setregid), /* 202 */
10985 SOLXY(__NR_schedctl, sys_schedctl), /* 206 */
10986 SOLXY(__NR_pset, sys_pset), /* 207 */
10987 SOLXY(__NR_resolvepath, sys_resolvepath), /* 209 */
10988 SOLXY(__NR_lwp_mutex_timedlock, sys_lwp_mutex_timedlock), /* 210 */
10989 SOLXY(__NR_lwp_sema_timedwait, sys_lwp_sema_timedwait), /* 211 */
10990 SOLXY(__NR_lwp_rwlock_sys, sys_lwp_rwlock_sys), /* 212 */
10991 #if defined(VGP_x86_solaris)
10992 GENXY(__NR_getdents64, sys_getdents64), /* 213 */
10993 PLAX_(__NR_mmap64, sys_mmap64), /* 214 */
10994 #if defined(SOLARIS_OLD_SYSCALLS)
10995 PLAXY(__NR_stat64, sys_stat64), /* 215 */
10996 PLAXY(__NR_lstat64, sys_lstat64), /* 216 */
10997 PLAXY(__NR_fstat64, sys_fstat64), /* 217 */
10998 #endif /* SOLARIS_OLD_SYSCALLS */
10999 PLAXY(__NR_statvfs64, sys_statvfs64), /* 218 */
11000 PLAXY(__NR_fstatvfs64, sys_fstatvfs64), /* 219 */
11001 #endif /* VGP_x86_solaris */
11002 #if defined(VGP_x86_solaris)
11003 PLAX_(__NR_setrlimit64, sys_setrlimit64), /* 220 */
11004 PLAXY(__NR_getrlimit64, sys_getrlimit64), /* 221 */
11005 PLAXY(__NR_pread64, sys_pread64), /* 222 */
11006 PLAX_(__NR_pwrite64, sys_pwrite64), /* 223 */
11007 #if defined(SOLARIS_OLD_SYSCALLS)
11008 PLAXY(__NR_open64, sys_open64), /* 225 */
11009 #endif /* SOLARIS_OLD_SYSCALLS */
11010 #endif /* VGP_x86_solaris */
11011 SOLXY(__NR_zone, sys_zone), /* 227 */
11012 SOLXY(__NR_getcwd, sys_getcwd), /* 229 */
11013 SOLXY(__NR_so_socket, sys_so_socket), /* 230 */
11014 SOLXY(__NR_so_socketpair, sys_so_socketpair), /* 231 */
11015 SOLX_(__NR_bind, sys_bind), /* 232 */
11016 SOLX_(__NR_listen, sys_listen), /* 233 */
11017 SOLXY(__NR_accept, sys_accept), /* 234 */
11018 SOLX_(__NR_connect, sys_connect), /* 235 */
11019 SOLX_(__NR_shutdown, sys_shutdown), /* 236 */
11020 SOLXY(__NR_recv, sys_recv), /* 237 */
11021 SOLXY(__NR_recvfrom, sys_recvfrom), /* 238 */
11022 SOLXY(__NR_recvmsg, sys_recvmsg), /* 239 */
11023 SOLX_(__NR_send, sys_send), /* 240 */
11024 SOLX_(__NR_sendmsg, sys_sendmsg), /* 241 */
11025 SOLX_(__NR_sendto, sys_sendto), /* 242 */
11026 SOLXY(__NR_getpeername, sys_getpeername), /* 243 */
11027 SOLXY(__NR_getsockname, sys_getsockname), /* 244 */
11028 SOLXY(__NR_getsockopt, sys_getsockopt), /* 245 */
11029 SOLX_(__NR_setsockopt, sys_setsockopt), /* 246 */
11030 SOLX_(__NR_lwp_mutex_register, sys_lwp_mutex_register), /* 252 */
11031 SOLXY(__NR_uucopy, sys_uucopy), /* 254 */
11032 SOLX_(__NR_umount2, sys_umount2) /* 255 */
11035 static SyscallTableEntry fasttrap_table[] = {
11036 SOLX_(__NR_gethrtime, fast_gethrtime), /* 3 */
11037 SOLX_(__NR_gethrvtime, fast_gethrvtime), /* 4 */
11038 SOLX_(__NR_gethrestime, fast_gethrestime), /* 5 */
11039 SOLX_(__NR_getlgrp, fast_getlgrp) /* 6 */
11040 #if defined(SOLARIS_GETHRT_FASTTRAP)
11042 SOLXY(__NR_gethrt, fast_gethrt) /* 7 */
11043 #endif /* SOLARIS_GETHRT_FASTTRAP */
11044 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
11046 SOLXY(__NR_getzoneoffset, fast_getzoneoffset) /* 8 */
11047 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
11051 SyscallTableEntry *ML_(get_solaris_syscall_entry)(UInt sysno)
11053 const UInt syscall_table_size
11054 = sizeof(syscall_table) / sizeof(syscall_table[0]);
11055 const UInt fasttrap_table_size
11056 = sizeof(fasttrap_table) / sizeof(fasttrap_table[0]);
11058 SyscallTableEntry *table;
11059 Int size;
11061 switch (VG_SOLARIS_SYSNO_CLASS(sysno)) {
11062 case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
11063 table = syscall_table;
11064 size = syscall_table_size;
11065 break;
11066 case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
11067 table = fasttrap_table;
11068 size = fasttrap_table_size;
11069 break;
11070 default:
11071 vg_assert(0);
11072 break;
11074 sysno = VG_SOLARIS_SYSNO_INDEX(sysno);
11075 if (sysno < size) {
11076 SyscallTableEntry *sys = &table[sysno];
11077 if (!sys->before)
11078 return NULL; /* no entry */
11079 return sys;
11082 /* Can't find a wrapper. */
11083 return NULL;
11086 #endif // defined(VGO_solaris)
11088 /*--------------------------------------------------------------------*/
11089 /*--- end ---*/
11090 /*--------------------------------------------------------------------*/