Add DRD suppression patterns for races triggered by std::ostream
[valgrind.git] / coregrind / m_syswrap / syswrap-solaris.c
blob22a75014bc5bd2202b4a2a58e88d3597e4a619bf
2 /*--------------------------------------------------------------------*/
3 /*--- Solaris-specific syscalls, etc. syswrap-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2011-2017 Petr Pavlu
11 setup@dagobah.cz
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 /* Copyright 2013-2017, Ivo Raisr <ivosh@ivosh.net>. */
33 /* Copyright 2015-2017, Tomas Jedlicka <jedlickat@gmail.com>. */
35 /* Copyright 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
37 #if defined(VGO_solaris)
39 #include "libvex_guest_offsets.h"
40 #include "pub_core_basics.h"
41 #include "pub_core_vki.h"
42 #include "pub_core_vkiscnums.h"
43 #include "pub_core_threadstate.h"
44 #include "pub_core_aspacemgr.h"
45 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
46 #include "pub_core_debuglog.h"
47 #include "pub_core_clientstate.h"
48 #include "pub_core_gdbserver.h"
49 #include "pub_core_inner.h"
50 #include "pub_core_libcassert.h"
51 #include "pub_core_libcbase.h"
52 #include "pub_core_libcfile.h"
53 #include "pub_core_libcprint.h"
54 #include "pub_core_libcproc.h"
55 #include "pub_core_libcsignal.h"
56 #include "pub_core_machine.h" // VG_(get_SP)
57 #include "pub_core_mallocfree.h"
58 #include "pub_core_options.h"
59 #include "pub_core_tooliface.h"
60 #include "pub_core_transtab.h" // VG_(discard_translations)
61 #include "pub_core_scheduler.h"
62 #include "pub_core_sigframe.h"
63 #include "pub_core_signals.h"
64 #include "pub_core_stacks.h"
65 #include "pub_core_syscall.h"
66 #include "pub_core_syswrap.h"
67 #include "pub_core_ume.h"
68 #if defined(ENABLE_INNER_CLIENT_REQUEST)
69 #include "pub_core_clreq.h"
70 #endif
72 #include "priv_types_n_macros.h"
73 #include "priv_syswrap-generic.h"
74 #include "priv_syswrap-main.h"
75 #include "priv_syswrap-solaris.h"
77 /* Return the number of non-dead and daemon threads.
78 count_daemon == True: count daemon threads
79 count_daemon == False: count non-daemon threads */
80 static UInt count_living_daemon_threads(Bool count_daemon)
82 UInt count = 0;
83 for (ThreadId tid = 1; tid < VG_N_THREADS; tid++)
84 if (VG_(threads)[tid].status != VgTs_Empty &&
85 VG_(threads)[tid].status != VgTs_Zombie &&
86 VG_(threads)[tid].os_state.daemon_thread == count_daemon)
87 count++;
89 return count;
92 /* Note: The following functions (thread_wrapper, run_a_thread_NORETURN,
93 ML_(start_thread_NORETURN), ML_(allocstack) and
94 VG_(main_thread_wrapper_NORETURN)) are based on the code in
95 syswrap-linux.c. Keep them synchronized! */
97 /* Run a thread from beginning to end and return the thread's
98 scheduler-return-code. */
99 static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
101 VgSchedReturnCode ret;
102 ThreadId tid = (ThreadId)tidW;
103 ThreadState *tst = VG_(get_ThreadState)(tid);
105 VG_(debugLog)(1, "syswrap-solaris",
106 "thread_wrapper(tid=%u): entry\n",
107 tid);
109 vg_assert(tst->status == VgTs_Init);
111 /* Make sure we get the CPU lock before doing anything significant. */
112 VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
114 if (0)
115 VG_(printf)("thread tid %u started: stack = %p\n", tid, (void *)&tid);
117 /* Make sure error reporting is enabled in the new thread. */
118 tst->err_disablement_level = 0;
120 if (tid == 1)
121 VG_TRACK(pre_thread_first_insn, tid);
122 else {
123 /* For newly created threads, VG_TRACK(pre_thread_first_insn, tid) is
124 invoked later from PRE(sys_getsetcontext)() when setucontext()
125 called from _thrp_setup() concludes new thread setup. Invoking it
126 here would be way too early - new thread has no stack, yet. */
129 tst->os_state.lwpid = VG_(gettid)();
130 tst->os_state.threadgroup = VG_(getpid)();
132 /* Thread created with all signals blocked; scheduler will set the
133 appropriate mask. */
135 ret = VG_(scheduler)(tid);
137 vg_assert(VG_(is_exiting)(tid));
139 vg_assert(tst->status == VgTs_Runnable);
140 vg_assert(VG_(is_running_thread)(tid));
142 VG_(debugLog)(1, "syswrap-solaris",
143 "thread_wrapper(tid=%u): exit, schedreturncode %s\n",
144 tid, VG_(name_of_VgSchedReturnCode)(ret));
146 /* Return to caller, still holding the lock. */
147 return ret;
150 /* Run a thread all the way to the end, then do appropriate exit actions
151 (this is the last-one-out-turn-off-the-lights bit). */
152 static void run_a_thread_NORETURN(Word tidW)
154 ThreadId tid = (ThreadId)tidW;
155 VgSchedReturnCode src;
156 Int c;
157 ThreadState *tst;
158 #ifdef ENABLE_INNER_CLIENT_REQUEST
159 Int registered_vgstack_id;
160 #endif
162 VG_(debugLog)(1, "syswrap-solaris",
163 "run_a_thread_NORETURN(tid=%u): pre-thread_wrapper\n",
164 tid);
166 tst = VG_(get_ThreadState)(tid);
167 vg_assert(tst);
169 /* A thread has two stacks:
170 * the simulated stack (used by the synthetic cpu. Guest process
171 is using this stack).
172 * the valgrind stack (used by the real cpu. Valgrind code is running
173 on this stack).
174 When Valgrind runs as an inner, it must signal that its (real) stack
175 is the stack to use by the outer to e.g. do stacktraces.
177 INNER_REQUEST
178 (registered_vgstack_id
179 = VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
180 tst->os_state.valgrind_stack_init_SP));
182 /* Run the thread all the way through. */
183 src = thread_wrapper(tid);
185 VG_(debugLog)(1, "syswrap-solaris",
186 "run_a_thread_NORETURN(tid=%u): post-thread_wrapper\n",
187 tid);
189 c = count_living_daemon_threads(False);
190 vg_assert(c >= 1); /* Stay sane. */
192 /* Tell the tool that schedctl data belonging to this thread are gone. */
193 Addr a = tst->os_state.schedctl_data;
194 if (a != 0)
195 VG_TRACK(die_mem_munmap, a, sizeof(struct vki_sc_shared));
197 /* Deregister thread's stack. */
198 if (tst->os_state.stk_id != NULL_STK_ID)
199 VG_(deregister_stack)(tst->os_state.stk_id);
201 /* Tell the tool this thread is exiting. */
202 VG_TRACK(pre_thread_ll_exit, tid);
204 /* If the thread is exiting with errors disabled, complain loudly;
205 doing so is bad (does the user know this has happened?) Also, in all
206 cases, be paranoid and clear the flag anyway so that the thread slot is
207 safe in this respect if later reallocated. This should be unnecessary
208 since the flag should be cleared when the slot is reallocated, in
209 thread_wrapper(). */
210 if (tst->err_disablement_level > 0) {
211 VG_(umsg)(
212 "WARNING: exiting thread has error reporting disabled.\n"
213 "WARNING: possibly as a result of some mistake in the use\n"
214 "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
216 VG_(debugLog)(
217 1, "syswrap-solaris",
218 "run_a_thread_NORETURN(tid=%u): "
219 "WARNING: exiting thread has err_disablement_level = %u\n",
220 tid, tst->err_disablement_level
223 tst->err_disablement_level = 0;
225 if (c == 1) {
226 UInt daemon_threads = count_living_daemon_threads(True);
227 if (daemon_threads == 0)
228 VG_(debugLog)(1, "syswrap-solaris",
229 "run_a_thread_NORETURN(tid=%u): "
230 "last one standing\n",
231 tid);
232 else
233 VG_(debugLog)(1, "syswrap-solaris",
234 "run_a_thread_NORETURN(tid=%u): "
235 "last non-daemon thread standing "
236 "[daemon threads=%u]\n",
237 tid, daemon_threads);
239 /* We are the last non-daemon thread standing. Keep hold of the lock and
240 carry on to show final tool results, then exit the entire system.
241 Use the continuation pointer set at startup in m_main. */
242 if ((src == VgSrc_ExitThread) && (daemon_threads > 0))
243 src = VgSrc_ExitProcess;
244 (*VG_(address_of_m_main_shutdown_actions_NORETURN))(tid, src);
246 else {
247 VG_(debugLog)(1, "syswrap-solaris",
248 "run_a_thread_NORETURN(tid=%u): "
249 "not last one standing\n",
250 tid);
252 /* OK, thread is dead, but others still exist. Just exit. */
254 /* This releases the run lock. */
255 VG_(exit_thread)(tid);
256 vg_assert(tst->status == VgTs_Zombie);
257 vg_assert(sizeof(tst->status) == 4);
259 INNER_REQUEST(VALGRIND_STACK_DEREGISTER(registered_vgstack_id));
261 /* We have to use this sequence to terminate the thread to
262 prevent a subtle race. If VG_(exit_thread)() had left the
263 ThreadState as Empty, then it could have been reallocated, reusing
264 the stack while we're doing these last cleanups. Instead,
265 VG_(exit_thread) leaves it as Zombie to prevent reallocation. We
266 need to make sure we don't touch the stack between marking it Empty
267 and exiting. Hence the assembler. */
268 #if defined(VGP_x86_solaris)
269 /* Luckily lwp_exit doesn't take any arguments so we don't have to mess
270 with the stack. */
271 __asm__ __volatile__ (
272 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
273 "movl $"VG_STRINGIFY(__NR_lwp_exit)", %%eax\n"
274 "int $0x91\n" /* lwp_exit() */
275 : [status] "=m" (tst->status)
276 : [EMPTY] "n" (VgTs_Empty)
277 : "eax", "edx", "cc", "memory");
278 #elif defined(VGP_amd64_solaris)
279 __asm__ __volatile__ (
280 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
281 "movq $"VG_STRINGIFY(__NR_lwp_exit)", %%rax\n"
282 "syscall\n" /* lwp_exit() */
283 : [status] "=m" (tst->status)
284 : [EMPTY] "n" (VgTs_Empty)
285 : "rax", "rdx", "cc", "memory");
286 #else
287 # error "Unknown platform"
288 #endif
290 VG_(core_panic)("Thread exit failed?\n");
293 /*NOTREACHED*/
294 vg_assert(0);
297 Word ML_(start_thread_NORETURN)(void *arg)
299 ThreadState *tst = (ThreadState*)arg;
300 ThreadId tid = tst->tid;
302 run_a_thread_NORETURN((Word)tid);
303 /*NOTREACHED*/
304 vg_assert(0);
307 /* Allocate a stack for this thread, if it doesn't already have one.
308 They're allocated lazily, and never freed. Returns the initial stack
309 pointer value to use, or 0 if allocation failed. */
310 Addr ML_(allocstack)(ThreadId tid)
312 ThreadState *tst = VG_(get_ThreadState)(tid);
313 VgStack *stack;
314 Addr initial_SP;
316 /* Either the stack_base and stack_init_SP are both zero (in which
317 case a stack hasn't been allocated) or they are both non-zero,
318 in which case it has. */
320 if (tst->os_state.valgrind_stack_base == 0)
321 vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
323 if (tst->os_state.valgrind_stack_base != 0)
324 vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
326 /* If no stack is present, allocate one. */
328 if (tst->os_state.valgrind_stack_base == 0) {
329 stack = VG_(am_alloc_VgStack)( &initial_SP );
330 if (stack) {
331 tst->os_state.valgrind_stack_base = (Addr)stack;
332 tst->os_state.valgrind_stack_init_SP = initial_SP;
336 if (0)
337 VG_(printf)("stack for tid %u at %p; init_SP=%p\n",
338 tid,
339 (void*)tst->os_state.valgrind_stack_base,
340 (void*)tst->os_state.valgrind_stack_init_SP);
342 return tst->os_state.valgrind_stack_init_SP;
345 /* Allocate a stack for the main thread, and run it all the way to the
346 end. Although we already have a working VgStack (VG_(interim_stack)) it's
347 better to allocate a new one, so that overflow detection works uniformly
348 for all threads. Also initialize the GDT (for normal threads, this is done
349 in the PRE wrapper of lwp_create). */
350 void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
352 Addr sp;
354 VG_(debugLog)(1, "syswrap-solaris",
355 "entering VG_(main_thread_wrapper_NORETURN)\n");
357 sp = ML_(allocstack)(tid);
358 #if defined(ENABLE_INNER_CLIENT_REQUEST)
360 // we must register the main thread stack before the call
361 // to ML_(call_on_new_stack_0_1), otherwise the outer valgrind
362 // reports 'write error' on the non registered stack.
363 ThreadState *tst = VG_(get_ThreadState)(tid);
364 INNER_REQUEST
365 ((void)
366 VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
367 tst->os_state.valgrind_stack_init_SP));
369 #endif
371 #if defined(VGP_x86_solaris)
373 ThreadState *tst = VG_(get_ThreadState)(tid);
374 ML_(setup_gdt)(&tst->arch.vex);
375 ML_(update_gdt_lwpgs)(tid);
377 #elif defined(VGP_amd64_solaris)
378 /* Nothing to do. */
379 #else
380 # error "Unknown platform"
381 #endif
383 /* If we can't even allocate the first thread's stack, we're hosed.
384 Give up. */
385 vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
387 /* Shouldn't be any other threads around yet. */
388 vg_assert(VG_(count_living_threads)() == 1);
390 ML_(call_on_new_stack_0_1)(
391 (Addr)sp, /* stack */
392 0, /* bogus return address */
393 run_a_thread_NORETURN, /* fn to call */
394 (Word)tid /* arg to give it */
397 /*NOTREACHED*/
398 vg_assert(0);
401 /* Deallocate the GDT for a thread. */
402 void VG_(cleanup_thread)(ThreadArchState *arch)
404 #if defined(VGP_x86_solaris)
405 ML_(cleanup_gdt)(&arch->vex);
406 #elif defined(VGP_amd64_solaris)
407 /* Nothing to do. */
408 #else
409 # error "Unknown platform"
410 #endif
414 * Notify core about spring cleaning of schedctl data pages for all threads
415 * in child post-fork handler. Libc will issue new schedctl syscalls for threads
416 * in the child when needs arise.
418 * See also POST(schedctl) and run_a_thread_NORETURN() when a thread exits.
420 static void clean_schedctl_data(ThreadId tid)
422 UInt i;
423 for (i = 0; i < VG_N_THREADS; i++) {
424 ThreadState *tst = &VG_(threads)[i];
425 if (tst->status != VgTs_Empty) {
426 Addr a = tst->os_state.schedctl_data;
427 if (a != 0) {
428 tst->os_state.schedctl_data = 0;
429 a = VG_PGROUNDDN(a);
430 if (VG_(am_find_anon_segment)(a))
431 VG_(am_notify_munmap)(a, VKI_PAGE_SIZE);
437 void VG_(syswrap_init)(void)
439 VG_(atfork)(NULL, NULL, clean_schedctl_data);
442 /* Changes ownership of a memory mapping shared between kernel and the client
443 process. This mapping should have already been pre-arranged during process
444 address space initialization happening in kernel. Valgrind on startup created
445 a segment for this mapping categorized as Valgrind's owned anonymous.
446 Size of this mapping typically varies among Solaris versions but should be
447 page aligned.
448 If 'once_only' is 'True', it is expected this function is called once only
449 and the mapping ownership has not been changed, yet [useful during
450 initialization]. If 'False', this function can be called many times but does
451 change ownership only upon the first invocation [useful in syscall wrappers].
453 void VG_(change_mapping_ownership)(Addr addr, Bool once_only)
455 const NSegment *seg = VG_(am_find_anon_segment)(addr);
456 vg_assert(seg != NULL);
457 vg_assert(seg->start == addr);
458 vg_assert(VG_IS_PAGE_ALIGNED(seg->start));
459 vg_assert(VG_IS_PAGE_ALIGNED(seg->end + 1));
460 SizeT size = seg->end - seg->start + 1;
461 vg_assert(size > 0);
463 Bool do_change = False;
464 if (once_only) {
465 vg_assert(VG_(am_is_valid_for_valgrind)(addr, size, VKI_PROT_READ));
466 do_change = True;
467 } else {
468 if (!VG_(am_is_valid_for_client)(addr, size, VKI_PROT_READ))
469 do_change = True;
472 if (do_change) {
473 Bool change_ownership_OK = VG_(am_change_ownership_v_to_c)(addr, size);
474 vg_assert(change_ownership_OK);
476 /* Tell the tool about just discovered mapping. */
477 VG_TRACK(new_mem_startup,
478 addr, size,
479 True /* readable? */,
480 False /* writable? */,
481 False /* executable? */,
482 0 /* di_handle */);
486 /* Calculate the Fletcher-32 checksum of a given buffer. */
487 UInt ML_(fletcher32)(UShort *buf, SizeT blocks)
489 UInt sum1 = 0;
490 UInt sum2 = 0;
491 SizeT i;
493 for (i = 0; i < blocks; i++) {
494 sum1 = (sum1 + buf[i]) % 0xffff;
495 sum2 = (sum2 + sum1) % 0xffff;
498 return (sum2 << 16) | sum1;
501 /* Calculate the Fletcher-64 checksum of a given buffer. */
502 ULong ML_(fletcher64)(UInt *buf, SizeT blocks)
504 ULong sum1 = 0;
505 ULong sum2 = 0;
506 SizeT i;
508 for (i = 0; i < blocks; i++) {
509 sum1 = (sum1 + buf[i]) % 0xffffffff;
510 sum2 = (sum2 + sum1) % 0xffffffff;
512 return (sum2 << 32) | sum1;
515 /* Save a complete context (VCPU state, sigmask) of a given client thread
516 into the vki_ucontext_t structure. This structure is supposed to be
517 allocated in the client memory, a caller must make sure that the memory can
518 be dereferenced. The active tool is informed about the save. */
519 void VG_(save_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part)
521 ThreadState *tst = VG_(get_ThreadState)(tid);
523 VG_TRACK(pre_mem_write, part, tid, "save_context(uc)", (Addr)uc,
524 sizeof(*uc));
526 uc->uc_flags = VKI_UC_ALL;
527 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_flags,
528 sizeof(uc->uc_flags));
530 /* Old context */
531 uc->uc_link = tst->os_state.oldcontext;
532 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_link,
533 sizeof(uc->uc_link));
535 /* Clear uc->vki_uc_signo. This slot is used by the signal machinery to
536 store a signal number. */
537 VKI_UC_SIGNO(uc) = 0;
539 /* Sigmask */
540 uc->uc_sigmask = tst->sig_mask;
541 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_sigmask,
542 sizeof(uc->uc_sigmask));
544 /* Stack */
546 if (tst->os_state.ustack
547 && ML_(safe_to_deref)(tst->os_state.ustack, sizeof(vki_stack_t))
548 && tst->os_state.ustack->ss_size) {
549 /* If ustack points to a valid stack copy it to ucontext. */
550 uc->uc_stack = *tst->os_state.ustack;
552 else {
553 /* Ustack is not valid. A correct stack has to be figured out
554 manually. */
555 SysRes res;
556 vki_stack_t altstack;
558 /* Get information about alternate stack. */
559 res = VG_(do_sys_sigaltstack)(tid, NULL, &altstack);
560 vg_assert(!sr_isError(res));
562 if (altstack.ss_flags == VKI_SS_ONSTACK) {
563 /* If the alternate stack is active copy it to ucontext. */
564 uc->uc_stack = altstack;
566 else {
567 /* No information about stack is present, save information about
568 current main stack to ucontext. This branch should be reached
569 only by the main thread. */
570 ThreadState *tst2 = VG_(get_ThreadState)(1);
571 uc->uc_stack.ss_sp = (void*)(tst2->client_stack_highest_byte + 1
572 - tst2->client_stack_szB);
573 uc->uc_stack.ss_size = tst2->client_stack_szB;
574 uc->uc_stack.ss_flags = 0;
578 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_stack,
579 sizeof(uc->uc_stack));
582 /* Save the architecture-specific part of the context. */
583 ML_(save_machine_context)(tid, uc, part);
586 /* Set a complete context (VCPU state, sigmask) of a given client thread
587 according to values passed in the vki_ucontext_t structure. This structure
588 is supposed to be allocated in the client memory, a caller must make sure
589 that the memory can be dereferenced. The active tool is informed about
590 what parts of the structure are read.
592 This function is a counterpart to VG_(save_context)(). */
593 void VG_(restore_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part,
594 Bool esp_is_thrptr)
596 ThreadState *tst = VG_(get_ThreadState)(tid);
597 Addr old_esp = VG_(get_SP)(tid);
599 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_flags)",
600 (Addr)&uc->uc_flags, sizeof(uc->uc_flags));
602 /* Old context */
603 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_link)",
604 (Addr)&uc->uc_link, sizeof(uc->uc_link));
605 tst->os_state.oldcontext = uc->uc_link;
607 /* Sigmask */
608 if (uc->uc_flags & VKI_UC_SIGMASK) {
609 SysRes res;
611 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_sigmask)",
612 (Addr)&uc->uc_sigmask, sizeof(uc->uc_sigmask));
613 res = VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, &uc->uc_sigmask,
614 NULL);
615 /* Setting signal mask should never fail. */
616 vg_assert(!sr_isError(res));
619 /* Stack */
620 if (uc->uc_flags & VKI_UC_STACK) {
621 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_stack)",
622 (Addr)&uc->uc_stack, sizeof(uc->uc_stack));
624 if (uc->uc_stack.ss_flags == VKI_SS_ONSTACK) {
625 /* This seems to be a little bit dangerous but it is what the kernel
626 does. */
627 if (VG_(clo_trace_signals))
628 VG_(dmsg)("restore_context, sigaltstack: tid %u, "
629 "ss %p{%p,sz=%lu,flags=%#x}\n",
630 tid, &uc->uc_stack, uc->uc_stack.ss_sp,
631 (SizeT)uc->uc_stack.ss_size, uc->uc_stack.ss_flags);
633 tst->altstack.ss_sp = uc->uc_stack.ss_sp;
634 tst->altstack.ss_size = uc->uc_stack.ss_size;
635 /* Do not copy ss_flags, they are calculated dynamically by
636 Valgrind. */
639 /* Copyout the new stack. */
640 if (tst->os_state.ustack
641 && VG_(am_is_valid_for_client)((Addr)tst->os_state.ustack,
642 sizeof(*tst->os_state.ustack),
643 VKI_PROT_WRITE))
644 *tst->os_state.ustack = uc->uc_stack;
645 VG_TRACK(post_mem_write, part, tid, (Addr)&tst->os_state.ustack,
646 sizeof(tst->os_state.ustack));
649 /* Restore the architecture-specific part of the context. */
650 ML_(restore_machine_context)(tid, uc, part, esp_is_thrptr);
652 /* If the thread stack is already known, kill the deallocated stack area.
653 This is important when returning from a signal handler. */
654 if (tst->client_stack_highest_byte && tst->client_stack_szB) {
655 Addr end = tst->client_stack_highest_byte;
656 Addr start = end + 1 - tst->client_stack_szB;
657 Addr new_esp = VG_(get_SP)(tid);
659 /* Make sure that the old and new stack pointer are on the same (active)
660 stack. Alternate stack is currently never affected by this code. */
661 if (start <= old_esp && old_esp <= end
662 && start <= new_esp && new_esp <= end
663 && new_esp > old_esp)
664 VG_TRACK(die_mem_stack, old_esp - VG_STACK_REDZONE_SZB,
665 (new_esp - old_esp) + VG_STACK_REDZONE_SZB);
669 /* Set a client stack associated with a given thread id according to values
670 passed in the vki_stack_t structure. */
671 static void set_stack(ThreadId tid, vki_stack_t *st)
673 ThreadState *tst = VG_(get_ThreadState)(tid);
674 Addr new_start, new_end;
675 SizeT new_size;
676 Addr cur_start;
677 SizeT cur_size;
679 VG_(debugLog)(2, "syswrap-solaris",
680 "set stack: sp=%#lx, size=%#lx.\n",
681 (Addr)st->ss_sp, (SizeT)st->ss_size);
683 /* Stay sane. */
684 vg_assert(st->ss_flags == 0);
686 new_start = (Addr)st->ss_sp;
687 new_end = new_start + st->ss_size - 1;
688 new_size = st->ss_size;
689 cur_start = tst->client_stack_highest_byte + 1
690 - tst->client_stack_szB;
691 cur_size = tst->client_stack_szB;
693 if (new_start == cur_start && new_size == cur_size) {
694 /* No change is requested, bail out. */
695 return;
698 if (tid == 1 && (new_size == 0 || new_size > VG_(clstk_max_size))) {
699 /* The main thread requests to use a stack without any size checking, or
700 too big stack. Fallback to the maximum allocated client stack. */
702 /* TODO I think it is possible to give up on setting main stack anyway.
703 Valgrind knows where it is located and it is already registered as
704 VG_(clstk_id). */
706 new_size = VG_(clstk_max_size);
707 new_end = tst->client_stack_highest_byte;
708 new_start = new_end + 1 - new_size;
711 if (tst->os_state.stk_id == NULL_STK_ID) {
712 /* This thread doesn't have a stack set yet. */
713 VG_(debugLog)(2, "syswrap-solaris",
714 "Stack set to %#lx-%#lx (new) for thread %u.\n",
715 new_start, new_end, tid);
716 tst->os_state.stk_id = VG_(register_stack)(new_start, new_end);
717 } else {
718 /* Change a thread stack. */
719 VG_(debugLog)(2, "syswrap-solaris",
720 "Stack set to %#lx-%#lx (change) for thread %u.\n",
721 new_start, new_end, tid);
722 VG_(change_stack)(tst->os_state.stk_id, new_start, new_end);
724 tst->client_stack_highest_byte = new_end;
725 tst->client_stack_szB = new_size;
728 /* ---------------------------------------------------------------------
729 Door tracking. Used mainly for server side where door_return()
730 parameters alone do not contain sufficient information.
731 Also used on client side when new door descriptors are passed via
732 door_call() in desc_ptr. Not used for tracking door descriptors
733 explicitly open()'ed [generic fd tracking is used in that case].
734 ------------------------------------------------------------------ */
736 /* One of these is allocated for each created door. */
737 typedef struct OpenDoor
739 Bool server; /* TRUE = server door, FALSE = client door */
740 Int fd; /* The file descriptor. */
741 union {
742 /* Server side. */
743 struct {
744 Addr server_procedure; /* The door server procedure. */
745 HChar *pathname; /* NULL if unknown. */
747 /* Client side. */
748 struct {
749 /* Hook called during PRE door_call()
750 to check contents of params->data_ptr. */
751 void (*pre_mem_hook)(ThreadId tid, Int fd,
752 void *data_ptr, SizeT data_size);
753 /* Hook called during POST door_call()
754 to define contents of params->rbuf. */
755 void (*post_mem_hook)(ThreadId tid, Int fd,
756 void *rbuf, SizeT rsize);
759 struct OpenDoor *next, *prev;
760 } OpenDoor;
762 /* List of allocated door fds. */
763 static OpenDoor *doors_recorded = NULL;
764 static UInt nr_doors_recorded = 0;
766 static OpenDoor *door_record_create(void)
768 OpenDoor *d = VG_(malloc)("syswrap.door_record_create.1", sizeof(OpenDoor));
769 d->prev = NULL;
770 d->next = doors_recorded;
771 if (doors_recorded != NULL)
772 doors_recorded->prev = d;
773 doors_recorded = d;
774 nr_doors_recorded += 1;
776 return d;
779 /* Records a server door. */
780 static void door_record_server(ThreadId tid, Addr server_procedure, Int fd)
782 OpenDoor *d = doors_recorded;
784 while (d != NULL) {
785 if ((d->server == TRUE) && (d->server_procedure == server_procedure)) {
786 if (d->pathname) {
787 VG_(free)(d->pathname);
789 break;
791 d = d->next;
794 if (d == NULL)
795 d = door_record_create();
796 vg_assert(d != NULL);
798 d->server = TRUE;
799 d->fd = fd;
800 d->server_procedure = server_procedure;
801 d->pathname = NULL;
804 /* Records a client door. */
805 static void door_record_client(ThreadId tid, Int fd,
806 void (*pre_mem_hook)(ThreadId tid, Int fd, void *data_ptr, SizeT data_size),
807 void (*post_mem_hook)(ThreadId tid, Int fd, void *rbuf, SizeT rsize))
809 OpenDoor *d = doors_recorded;
811 while (d != NULL) {
812 if ((d->server == FALSE) && (d->fd == fd))
813 break;
814 d = d->next;
817 if (d == NULL)
818 d = door_record_create();
819 vg_assert(d != NULL);
821 d->server = FALSE;
822 d->fd = fd;
823 d->pre_mem_hook = pre_mem_hook;
824 d->post_mem_hook = post_mem_hook;
827 /* Revokes an open door, be it server side or client side. */
828 static void door_record_revoke(ThreadId tid, Int fd)
830 OpenDoor *d = doors_recorded;
832 while (d != NULL) {
833 if (d->fd == fd) {
834 if (d->prev != NULL)
835 d->prev->next = d->next;
836 else
837 doors_recorded = d->next;
838 if (d->next != NULL)
839 d->next->prev = d->prev;
841 if ((d->server == TRUE) && (d->pathname != NULL))
842 VG_(free)(d->pathname);
843 VG_(free)(d);
844 nr_doors_recorded -= 1;
845 return;
847 d = d->next;
851 /* Attaches a server door to a filename. */
852 static void door_record_server_fattach(Int fd, HChar *pathname)
854 OpenDoor *d = doors_recorded;
856 while (d != NULL) {
857 if (d->fd == fd) {
858 vg_assert(d->server == TRUE);
860 if (d->pathname != NULL)
861 VG_(free)(d->pathname);
862 d->pathname = VG_(strdup)("syswrap.door_server_fattach.1", pathname);
863 return;
865 d = d->next;
869 /* Finds a server door based on server procedure. */
870 static const OpenDoor *door_find_by_proc(Addr server_procedure)
872 OpenDoor *d = doors_recorded;
874 while (d != NULL) {
875 if ((d->server) && (d->server_procedure == server_procedure))
876 return d;
877 d = d->next;
880 return NULL;
883 /* Finds a client door based on fd. */
884 static const OpenDoor *door_find_by_fd(Int fd)
886 OpenDoor *d = doors_recorded;
888 while (d != NULL) {
889 if ((d->server == FALSE) && (d->fd == fd))
890 return d;
891 d = d->next;
894 return NULL;
897 /* ---------------------------------------------------------------------
898 PRE/POST wrappers for Solaris-specific syscalls
899 ------------------------------------------------------------------ */
901 #define PRE(name) DEFN_PRE_TEMPLATE(solaris, name)
902 #define POST(name) DEFN_POST_TEMPLATE(solaris, name)
904 /* prototypes */
905 DECL_TEMPLATE(solaris, sys_exit);
906 #if defined(SOLARIS_SPAWN_SYSCALL)
907 DECL_TEMPLATE(solaris, sys_spawn);
908 #endif /* SOLARIS_SPAWN_SYSCALL */
909 #if defined(SOLARIS_OLD_SYSCALLS)
910 DECL_TEMPLATE(solaris, sys_open);
911 #endif /* SOLARIS_OLD_SYSCALLS */
912 DECL_TEMPLATE(solaris, sys_close);
913 DECL_TEMPLATE(solaris, sys_linkat);
914 DECL_TEMPLATE(solaris, sys_symlinkat);
915 DECL_TEMPLATE(solaris, sys_time);
916 DECL_TEMPLATE(solaris, sys_brk);
917 DECL_TEMPLATE(solaris, sys_stat);
918 DECL_TEMPLATE(solaris, sys_lseek);
919 DECL_TEMPLATE(solaris, sys_mount);
920 DECL_TEMPLATE(solaris, sys_readlinkat);
921 DECL_TEMPLATE(solaris, sys_stime);
922 DECL_TEMPLATE(solaris, sys_fstat);
923 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
924 DECL_TEMPLATE(solaris, sys_frealpathat);
925 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
926 DECL_TEMPLATE(solaris, sys_stty);
927 DECL_TEMPLATE(solaris, sys_gtty);
928 DECL_TEMPLATE(solaris, sys_pgrpsys);
929 DECL_TEMPLATE(solaris, sys_pipe);
930 DECL_TEMPLATE(solaris, sys_faccessat);
931 DECL_TEMPLATE(solaris, sys_mknodat);
932 DECL_TEMPLATE(solaris, sys_sysi86);
933 DECL_TEMPLATE(solaris, sys_shmsys);
934 DECL_TEMPLATE(solaris, sys_semsys);
935 DECL_TEMPLATE(solaris, sys_ioctl);
936 DECL_TEMPLATE(solaris, sys_fchownat);
937 DECL_TEMPLATE(solaris, sys_fdsync);
938 DECL_TEMPLATE(solaris, sys_execve);
939 DECL_TEMPLATE(solaris, sys_fcntl);
940 DECL_TEMPLATE(solaris, sys_renameat);
941 DECL_TEMPLATE(solaris, sys_unlinkat);
942 DECL_TEMPLATE(solaris, sys_fstatat);
943 DECL_TEMPLATE(solaris, sys_openat);
944 DECL_TEMPLATE(solaris, sys_tasksys);
945 DECL_TEMPLATE(solaris, sys_getpagesizes);
946 DECL_TEMPLATE(solaris, sys_lwp_park);
947 DECL_TEMPLATE(solaris, sys_sendfilev);
948 #if defined(SOLARIS_LWP_NAME_SYSCALL)
949 DECL_TEMPLATE(solaris, sys_lwp_name);
950 #endif /* SOLARIS_LWP_NAME_SYSCALL */
951 DECL_TEMPLATE(solaris, sys_privsys);
952 DECL_TEMPLATE(solaris, sys_ucredsys);
953 DECL_TEMPLATE(solaris, sys_sysfs);
954 DECL_TEMPLATE(solaris, sys_getmsg);
955 DECL_TEMPLATE(solaris, sys_putmsg);
956 DECL_TEMPLATE(solaris, sys_lstat);
957 DECL_TEMPLATE(solaris, sys_sigprocmask);
958 DECL_TEMPLATE(solaris, sys_sigsuspend);
959 DECL_TEMPLATE(solaris, sys_sigaction);
960 DECL_TEMPLATE(solaris, sys_sigpending);
961 DECL_TEMPLATE(solaris, sys_getsetcontext);
962 DECL_TEMPLATE(solaris, sys_fchmodat);
963 DECL_TEMPLATE(solaris, sys_mkdirat);
964 DECL_TEMPLATE(solaris, sys_statvfs);
965 DECL_TEMPLATE(solaris, sys_fstatvfs);
966 DECL_TEMPLATE(solaris, sys_nfssys);
967 DECL_TEMPLATE(solaris, sys_waitid);
968 DECL_TEMPLATE(solaris, sys_sigsendsys);
969 #if defined(SOLARIS_UTIMESYS_SYSCALL)
970 DECL_TEMPLATE(solaris, sys_utimesys);
971 #endif /* SOLARIS_UTIMESYS_SYSCALL */
972 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
973 DECL_TEMPLATE(solaris, sys_utimensat);
974 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
975 DECL_TEMPLATE(solaris, sys_sigresend);
976 DECL_TEMPLATE(solaris, sys_priocntlsys);
977 DECL_TEMPLATE(solaris, sys_pathconf);
978 DECL_TEMPLATE(solaris, sys_mmap);
979 #if defined(SOLARIS_UUIDSYS_SYSCALL)
980 DECL_TEMPLATE(solaris, sys_uuidsys);
981 #endif /* SOLARIS_UUIDSYS_SYSCALL */
982 DECL_TEMPLATE(solaris, sys_mmapobj);
983 DECL_TEMPLATE(solaris, sys_memcntl);
984 DECL_TEMPLATE(solaris, sys_getpmsg);
985 DECL_TEMPLATE(solaris, sys_putpmsg);
986 #if defined(SOLARIS_OLD_SYSCALLS)
987 DECL_TEMPLATE(solaris, sys_rename);
988 #endif /* SOLARIS_OLD_SYSCALLS */
989 DECL_TEMPLATE(solaris, sys_uname);
990 DECL_TEMPLATE(solaris, sys_setegid);
991 DECL_TEMPLATE(solaris, sys_sysconfig);
992 DECL_TEMPLATE(solaris, sys_systeminfo);
993 DECL_TEMPLATE(solaris, sys_seteuid);
994 DECL_TEMPLATE(solaris, sys_forksys);
995 #if defined(SOLARIS_GETRANDOM_SYSCALL)
996 DECL_TEMPLATE(solaris, sys_getrandom);
997 #endif /* SOLARIS_GETRANDOM_SYSCALL */
998 DECL_TEMPLATE(solaris, sys_sigtimedwait);
999 DECL_TEMPLATE(solaris, sys_yield);
1000 DECL_TEMPLATE(solaris, sys_lwp_sema_post);
1001 DECL_TEMPLATE(solaris, sys_lwp_sema_trywait);
1002 DECL_TEMPLATE(solaris, sys_lwp_detach);
1003 DECL_TEMPLATE(solaris, sys_modctl);
1004 DECL_TEMPLATE(solaris, sys_fchroot);
1005 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
1006 DECL_TEMPLATE(solaris, sys_system_stats);
1007 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
1008 DECL_TEMPLATE(solaris, sys_gettimeofday);
1009 DECL_TEMPLATE(solaris, sys_lwp_create);
1010 DECL_TEMPLATE(solaris, sys_lwp_exit);
1011 DECL_TEMPLATE(solaris, sys_lwp_suspend);
1012 DECL_TEMPLATE(solaris, sys_lwp_continue);
1013 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
1014 DECL_TEMPLATE(solaris, sys_lwp_sigqueue);
1015 #else
1016 DECL_TEMPLATE(solaris, sys_lwp_kill);
1017 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
1018 DECL_TEMPLATE(solaris, sys_lwp_self);
1019 DECL_TEMPLATE(solaris, sys_lwp_sigmask);
1020 DECL_TEMPLATE(solaris, sys_lwp_private);
1021 DECL_TEMPLATE(solaris, sys_lwp_wait);
1022 DECL_TEMPLATE(solaris, sys_lwp_mutex_wakeup);
1023 DECL_TEMPLATE(solaris, sys_lwp_cond_wait);
1024 DECL_TEMPLATE(solaris, sys_lwp_cond_signal);
1025 DECL_TEMPLATE(solaris, sys_lwp_cond_broadcast);
1026 DECL_TEMPLATE(solaris, sys_pread);
1027 DECL_TEMPLATE(solaris, sys_pwrite);
1028 DECL_TEMPLATE(solaris, sys_lgrpsys);
1029 DECL_TEMPLATE(solaris, sys_rusagesys);
1030 DECL_TEMPLATE(solaris, sys_port);
1031 DECL_TEMPLATE(solaris, sys_pollsys);
1032 DECL_TEMPLATE(solaris, sys_labelsys);
1033 DECL_TEMPLATE(solaris, sys_acl);
1034 DECL_TEMPLATE(solaris, sys_auditsys);
1035 DECL_TEMPLATE(solaris, sys_p_online);
1036 DECL_TEMPLATE(solaris, sys_sigqueue);
1037 DECL_TEMPLATE(solaris, sys_clock_gettime);
1038 DECL_TEMPLATE(solaris, sys_clock_settime);
1039 DECL_TEMPLATE(solaris, sys_clock_getres);
1040 DECL_TEMPLATE(solaris, sys_timer_create);
1041 DECL_TEMPLATE(solaris, sys_timer_delete);
1042 DECL_TEMPLATE(solaris, sys_timer_settime);
1043 DECL_TEMPLATE(solaris, sys_timer_gettime);
1044 DECL_TEMPLATE(solaris, sys_timer_getoverrun);
1045 DECL_TEMPLATE(solaris, sys_facl);
1046 DECL_TEMPLATE(solaris, sys_door);
1047 DECL_TEMPLATE(solaris, sys_schedctl);
1048 DECL_TEMPLATE(solaris, sys_pset);
1049 DECL_TEMPLATE(solaris, sys_resolvepath);
1050 DECL_TEMPLATE(solaris, sys_lwp_mutex_timedlock);
1051 DECL_TEMPLATE(solaris, sys_lwp_rwlock_sys);
1052 DECL_TEMPLATE(solaris, sys_lwp_sema_timedwait);
1053 DECL_TEMPLATE(solaris, sys_zone);
1054 DECL_TEMPLATE(solaris, sys_getcwd);
1055 DECL_TEMPLATE(solaris, sys_so_socket);
1056 DECL_TEMPLATE(solaris, sys_so_socketpair);
1057 DECL_TEMPLATE(solaris, sys_bind);
1058 DECL_TEMPLATE(solaris, sys_listen);
1059 DECL_TEMPLATE(solaris, sys_accept);
1060 DECL_TEMPLATE(solaris, sys_connect);
1061 DECL_TEMPLATE(solaris, sys_shutdown);
1062 DECL_TEMPLATE(solaris, sys_recv);
1063 DECL_TEMPLATE(solaris, sys_recvfrom);
1064 DECL_TEMPLATE(solaris, sys_recvmsg);
1065 DECL_TEMPLATE(solaris, sys_send);
1066 DECL_TEMPLATE(solaris, sys_sendmsg);
1067 DECL_TEMPLATE(solaris, sys_sendto);
1068 DECL_TEMPLATE(solaris, sys_getpeername);
1069 DECL_TEMPLATE(solaris, sys_getsockname);
1070 DECL_TEMPLATE(solaris, sys_getsockopt);
1071 DECL_TEMPLATE(solaris, sys_setsockopt);
1072 DECL_TEMPLATE(solaris, sys_lwp_mutex_register);
1073 DECL_TEMPLATE(solaris, sys_uucopy);
1074 DECL_TEMPLATE(solaris, sys_umount2);
1076 DECL_TEMPLATE(solaris, fast_gethrtime);
1077 DECL_TEMPLATE(solaris, fast_gethrvtime);
1078 DECL_TEMPLATE(solaris, fast_gethrestime);
1079 DECL_TEMPLATE(solaris, fast_getlgrp);
1080 #if defined(SOLARIS_GETHRT_FASTTRAP)
1081 DECL_TEMPLATE(solaris, fast_gethrt);
1082 #endif /* SOLARIS_GETHRT_FASTTRAP */
1083 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
1084 DECL_TEMPLATE(solaris, fast_getzoneoffset);
1085 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
1087 /* implementation */
1088 PRE(sys_exit)
1090 /* void exit(int status); */
1091 ThreadId t;
1093 PRINT("sys_exit( %ld )", SARG1);
1094 PRE_REG_READ1(void, "exit", int, status);
1096 for (t = 1; t < VG_N_THREADS; t++) {
1097 if (VG_(threads)[t].status == VgTs_Empty)
1098 continue;
1100 /* Assign the exit code, VG_(nuke_all_threads_except) will assign
1101 the exitreason. */
1102 VG_(threads)[t].os_state.exitcode = ARG1;
1105 /* Indicate in all other threads that the process is exiting.
1106 Then wait using VG_(reap_threads) for these threads to disappear.
1107 See comments in syswrap-linux.c, PRE(sys_exit_group) wrapper,
1108 for reasoning why this cannot give a deadlock. */
1109 VG_(nuke_all_threads_except)(tid, VgSrc_ExitProcess);
1110 VG_(reap_threads)(tid);
1111 VG_(threads)[tid].exitreason = VgSrc_ExitThread;
1112 /* We do assign VgSrc_ExitThread and not VgSrc_ExitProcess, as this thread
1113 is the thread calling exit_group and so its registers must be considered
1114 as not reachable. See pub_tool_machine.h VG_(apply_to_GP_regs). */
1116 /* We have to claim the syscall already succeeded. */
1117 SET_STATUS_Success(0);
1120 #if defined(SOLARIS_SPAWN_SYSCALL)
1121 static Bool spawn_pre_check_kfa(ThreadId tid, SyscallStatus *status,
1122 vki_kfile_attr_t *kfa)
1124 PRE_FIELD_READ("spawn(attrs->kfa_size)", kfa->kfa_size);
1125 PRE_FIELD_READ("spawn(attrs->kfa_type)", kfa->kfa_type);
1127 if (ML_(safe_to_deref)(kfa, kfa->kfa_size)) {
1128 switch (kfa->kfa_type) {
1129 case VKI_FA_DUP2:
1130 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1131 PRE_FIELD_READ("spawn(attrs->kfa_newfiledes)", kfa->kfa_newfiledes);
1132 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(dup2)", tid, False) ||
1133 !ML_(fd_allowed)(kfa->kfa_newfiledes, "spawn(dup2)", tid, False)) {
1134 SET_STATUS_Failure(VKI_EBADF);
1135 return False;
1137 break;
1138 case VKI_FA_CLOSE:
1139 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1140 /* If doing -d style logging (which is to fd = 2 = stderr),
1141 don't allow that filedes to be closed. See ML_(fd_allowed)(). */
1142 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(close)", tid, False) ||
1143 (kfa->kfa_filedes == 2 && VG_(debugLog_getLevel)() > 0)) {
1144 SET_STATUS_Failure(VKI_EBADF);
1145 return False;
1147 break;
1148 case VKI_FA_CLOSEFROM:
1149 /* :TODO: All file descriptors greater than or equal to
1150 kfa->kfa_filedes would have to be checked. */
1151 VG_(unimplemented)("Support for spawn() with file attribute type "
1152 "FA_CLOSEFROM.");
1153 break;
1154 case VKI_FA_OPEN:
1155 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1156 PRE_FIELD_READ("spawn(attrs->kfa_oflag)", kfa->kfa_oflag);
1157 PRE_FIELD_READ("spawn(attrs->kfa_mode)", kfa->kfa_mode);
1158 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(open)", tid, False)) {
1159 SET_STATUS_Failure(VKI_EBADF);
1160 return False;
1162 /* fallthrough */
1163 case VKI_FA_CHDIR:
1164 PRE_FIELD_READ("spawn(attrs->kfa_pathsize)", kfa->kfa_pathsize);
1165 if (kfa->kfa_pathsize != 0) {
1166 PRE_MEM_RASCIIZ("spawn(attrs->kfa_data)", (Addr) kfa->kfa_data);
1168 break;
1169 default:
1170 VG_(unimplemented)("Support for spawn() with file attribute type %u.",
1171 kfa->kfa_type);
1175 return True;
1178 PRE(sys_spawn)
1180 /* int spawn(char *path, void *attrs, size_t attrsize,
1181 char *argenv, size_t aesize); */
1182 PRINT("sys_spawn ( %#lx(%s), %#lx, %lu, %#lx, %lu )",
1183 ARG1, (HChar *) ARG1, ARG2, ARG3, ARG4, ARG5);
1184 PRE_REG_READ5(long, "spawn", const char *, path, void *, attrs,
1185 size_t, attrsize, char *, argenv, size_t, aesize);
1187 /* First check input arguments. */
1188 PRE_MEM_RASCIIZ("spawn(path)", ARG1);
1189 if (ARG3 > 0) {
1190 /* --- vki_kspawn_attr_t --
1191 | ksa_version |
1192 | ksa_size |
1193 | ksa_attr_off | -----| (only if != 0)
1194 | ksa_attr_size | |
1195 | ksa_path_off | =====|====| (only if != 0)
1196 | ksa_path_size | | |
1197 | ksa_shell_off | -----|----|----| (only if != 0)
1198 | ksa_shell_size | | | |
1199 | ksa_data[0] | | | |
1200 ------------------------ | | |
1201 | vki_spawn_attr_t | <----| | |
1202 ------------------------ | |
1203 | path | <---------| |
1204 ------------------------ |
1205 | shell | <---------------
1206 ------------------------
1207 | file actions | (not included in ksa_size, only in ARG3)
1208 ------------------------
1210 ksa_size = sizeof(vki_kspawn_attr_t) + ksa_attr_size + ksa_path_size +
1211 ksa_shell_size
1212 attrs_size (ARG3) = ksa_size + file actions size */
1214 vki_kspawn_attr_t *attrs = (vki_kspawn_attr_t *) ARG2;
1215 PRE_FIELD_READ("spawn(attrs->ksa_version)", attrs->ksa_version);
1216 PRE_FIELD_READ("spawn(attrs->ksa_size)", attrs->ksa_size);
1217 PRE_FIELD_READ("spawn(attrs->ksa_attr_off)", attrs->ksa_attr_off);
1218 PRE_FIELD_READ("spawn(attrs->ksa_path_off)", attrs->ksa_path_off);
1219 PRE_FIELD_READ("spawn(attrs->ksa_shell_off)", attrs->ksa_shell_off);
1221 if (ML_(safe_to_deref)(attrs, sizeof(vki_kspawn_attr_t))) {
1222 if (attrs->ksa_version != VKI_SPAWN_VERSION) {
1223 VG_(unimplemented)("Support for spawn() with attributes "
1224 "version %u.", attrs->ksa_version);
1227 if (attrs->ksa_attr_off != 0) {
1228 PRE_FIELD_READ("spawn(attrs->ksa_attr_size)", attrs->ksa_attr_size);
1229 vki_spawn_attr_t *sap =
1230 (vki_spawn_attr_t *) ((Addr) attrs + attrs->ksa_attr_off);
1231 PRE_MEM_READ("spawn(attrs->ksa_attr)",
1232 (Addr) sap, attrs->ksa_attr_size);
1233 if (ML_(safe_to_deref)(sap, sizeof(vki_spawn_attr_t))) {
1234 if (sap->sa_psflags & VKI_POSIX_SPAWN_SETVAMASK_NP) {
1235 VG_(unimplemented)("Support for spawn() with attributes flag "
1236 "including POSIX_SPAWN_SETVAMASK_NP.");
1238 /* paranoia */
1239 Int rem = sap->sa_psflags & ~(
1240 VKI_POSIX_SPAWN_RESETIDS | VKI_POSIX_SPAWN_SETPGROUP |
1241 VKI_POSIX_SPAWN_SETSIGDEF | VKI_POSIX_SPAWN_SETSIGMASK |
1242 VKI_POSIX_SPAWN_SETSCHEDPARAM | VKI_POSIX_SPAWN_SETSCHEDULER |
1243 VKI_POSIX_SPAWN_SETSID_NP | VKI_POSIX_SPAWN_SETVAMASK_NP |
1244 VKI_POSIX_SPAWN_SETSIGIGN_NP | VKI_POSIX_SPAWN_NOSIGCHLD_NP |
1245 VKI_POSIX_SPAWN_WAITPID_NP | VKI_POSIX_SPAWN_NOEXECERR_NP);
1246 if (rem != 0) {
1247 VG_(unimplemented)("Support for spawn() with attributes flag "
1248 "%#x.", sap->sa_psflags);
1253 if (attrs->ksa_path_off != 0) {
1254 PRE_FIELD_READ("spawn(attrs->ksa_path_size)", attrs->ksa_path_size);
1255 PRE_MEM_RASCIIZ("spawn(attrs->ksa_path)",
1256 (Addr) attrs + attrs->ksa_path_off);
1259 if (attrs->ksa_shell_off != 0) {
1260 PRE_FIELD_READ("spawn(attrs->ksa_shell_size)",
1261 attrs->ksa_shell_size);
1262 PRE_MEM_RASCIIZ("spawn(attrs->ksa_shell)",
1263 (Addr) attrs + attrs->ksa_shell_off);
1266 vki_kfile_attr_t *kfa = (vki_kfile_attr_t *) (ARG2 + attrs->ksa_size);
1267 while ((Addr) kfa < ARG2 + ARG3) {
1268 if (spawn_pre_check_kfa(tid, status, kfa) == False) {
1269 return;
1271 kfa = (vki_kfile_attr_t *) ((Addr) kfa + kfa->kfa_size);
1275 PRE_MEM_READ("spawn(argenv)", ARG4, ARG5);
1277 /* Check that the name at least begins in client-accessible storage. */
1278 if ((ARG1 == 0) || !ML_(safe_to_deref)((HChar *) ARG1, 1)) {
1279 SET_STATUS_Failure(VKI_EFAULT);
1280 return;
1283 /* Check that attrs reside in client-accessible storage. */
1284 if (ARG2 != 0) {
1285 if (!VG_(am_is_valid_for_client)(ARG2, ARG3, VKI_PROT_READ)) {
1286 SET_STATUS_Failure(VKI_EFAULT);
1287 return;
1291 /* Check that the argenv reside in client-accessible storage.
1292 Solaris disallows to perform spawn() without any arguments & environment
1293 variables specified. */
1294 if ((ARG4 == 0) /* obviously bogus */ ||
1295 !VG_(am_is_valid_for_client)(ARG4, ARG5, VKI_PROT_READ)) {
1296 SET_STATUS_Failure(VKI_EFAULT);
1297 return;
1300 /* Copy existing attrs or create empty minimal ones. */
1301 vki_kspawn_attr_t *attrs;
1302 SizeT attrs_size;
1303 if (ARG2 == 0) {
1304 /* minimalistic kspawn_attr_t + spawn_attr_t */
1305 attrs_size = sizeof(vki_kspawn_attr_t) + sizeof(vki_spawn_attr_t);
1306 attrs = VG_(calloc)("syswrap.spawn.1", 1, attrs_size);
1307 attrs->ksa_version = VKI_SPAWN_VERSION;
1308 attrs->ksa_size = attrs_size;
1309 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1310 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1311 } else if (((vki_kspawn_attr_t *) ARG2)->ksa_attr_off == 0) {
1312 /* existing kspawn_attr_t but missing spawn_attr_t */
1313 attrs_size = ARG3 + sizeof(vki_spawn_attr_t);
1314 attrs = VG_(calloc)("syswrap.spawn.2", 1, attrs_size);
1315 VG_(memcpy)(attrs, (void *) ARG2, sizeof(vki_kspawn_attr_t));
1316 SizeT file_actions_size = ARG3 - attrs->ksa_size;
1317 attrs->ksa_size += sizeof(vki_spawn_attr_t);
1318 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1319 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1320 if (attrs->ksa_path_off != 0) {
1321 VG_(memcpy)((HChar *) attrs + attrs->ksa_path_off +
1322 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1323 attrs->ksa_path_off, attrs->ksa_path_size);
1324 attrs->ksa_path_off += sizeof(vki_spawn_attr_t);
1326 if (attrs->ksa_shell_off != 0) {
1327 VG_(memcpy)((HChar *) attrs + attrs->ksa_shell_off +
1328 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1329 attrs->ksa_shell_off, attrs->ksa_shell_size);
1330 attrs->ksa_shell_off += sizeof(vki_spawn_attr_t);
1332 if (file_actions_size > 0) {
1333 VG_(memcpy)((HChar *) attrs + attrs_size - file_actions_size,
1334 (HChar *) ARG2 + ARG3 - file_actions_size,
1335 file_actions_size);
1337 } else {
1338 /* existing kspawn_attr_t + spawn_attr_t */
1339 attrs_size = ARG3;
1340 attrs = VG_(malloc)("syswrap.spawn.3", attrs_size);
1341 VG_(memcpy)(attrs, (void *) ARG2, attrs_size);
1343 vki_spawn_attr_t *spa = (vki_spawn_attr_t *) ((HChar *) attrs +
1344 attrs->ksa_attr_off);
1346 /* Convert argv and envp parts of argenv into their separate XArray's.
1347 Duplicate strings because argv and envp will be then modified. */
1348 XArray *argv = VG_(newXA)(VG_(malloc), "syswrap.spawn.4",
1349 VG_(free), sizeof(HChar *));
1350 XArray *envp = VG_(newXA)(VG_(malloc), "syswrap.spawn.5",
1351 VG_(free), sizeof(HChar *));
1353 HChar *argenv = (HChar *) ARG4;
1354 XArray *current_xa = argv;
1355 while ((Addr) argenv < ARG4 + ARG5) {
1356 if (*argenv == '\0') {
1357 argenv += 1;
1358 if (current_xa == argv) {
1359 current_xa = envp;
1360 if ((*argenv == '\0') && ((Addr) argenv == ARG4 + ARG5 - 1)) {
1361 /* envp part is empty, it contained only {NULL}. */
1362 break;
1364 } else {
1365 if ((Addr) argenv != ARG4 + ARG5) {
1366 if (VG_(clo_trace_syscalls))
1367 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1368 SET_STATUS_Failure(VKI_EINVAL);
1369 goto exit;
1371 break;
1375 if (*argenv != '\1') {
1376 if (VG_(clo_trace_syscalls))
1377 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1378 SET_STATUS_Failure(VKI_EINVAL);
1379 goto exit;
1381 argenv += 1;
1383 HChar *duplicate = VG_(strdup)("syswrap.spawn.6", argenv);
1384 VG_(addToXA)(current_xa, &duplicate);
1385 argenv += VG_(strlen)(argenv) + 1;
1388 /* Debug-only printing. */
1389 if (0) {
1390 VG_(printf)("\nARG1 = %#lx(%s)\n", ARG1, (HChar *) ARG1);
1391 VG_(printf)("ARG4 (argv) = ");
1392 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1393 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1396 VG_(printf)("\nARG4 (envp) = ");
1397 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1398 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1400 VG_(printf)("\n");
1403 /* Decide whether or not we want to trace the spawned child.
1404 Omit the executable name itself from child_argv. */
1405 const HChar **child_argv = VG_(malloc)("syswrap.spawn.7",
1406 (VG_(sizeXA)(argv) - 1) * sizeof(HChar *));
1407 for (Word i = 1; i < VG_(sizeXA)(argv); i++) {
1408 child_argv[i - 1] = *(HChar **) VG_(indexXA)(argv, i);
1410 Bool trace_this_child = VG_(should_we_trace_this_child)((HChar *) ARG1,
1411 child_argv);
1412 VG_(free)(child_argv);
1414 /* If we're tracing the child, and the launcher name looks bogus (possibly
1415 because launcher.c couldn't figure it out, see comments therein) then we
1416 have no option but to fail. */
1417 if (trace_this_child &&
1418 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
1419 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
1420 goto exit;
1423 /* Set up the child's exe path. */
1424 const HChar *path = (const HChar *) ARG1;
1425 const HChar *launcher_basename = NULL;
1426 if (trace_this_child) {
1427 /* We want to exec the launcher. */
1428 path = VG_(name_of_launcher);
1429 vg_assert(path != NULL);
1431 launcher_basename = VG_(strrchr)(path, '/');
1432 if ((launcher_basename == NULL) || (launcher_basename[1] == '\0')) {
1433 launcher_basename = path; /* hmm, tres dubious */
1434 } else {
1435 launcher_basename++;
1439 /* Set up the child's environment.
1441 Remove the valgrind-specific stuff from the environment so the child
1442 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
1443 unconditionally, since if we are tracing the child, the child valgrind
1444 will set up the appropriate client environment.
1446 Then, if tracing the child, set VALGRIND_LIB for it. */
1447 HChar **child_envp = VG_(calloc)("syswrap.spawn.8",
1448 VG_(sizeXA)(envp) + 1, sizeof(HChar *));
1449 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1450 child_envp[i] = *(HChar **) VG_(indexXA)(envp, i);
1452 VG_(env_remove_valgrind_env_stuff)(child_envp, /* ro_strings */ False,
1453 VG_(free));
1455 /* Stuff was removed from child_envp, reflect that in envp XArray. */
1456 VG_(dropTailXA)(envp, VG_(sizeXA)(envp));
1457 for (UInt i = 0; child_envp[i] != NULL; i++) {
1458 VG_(addToXA)(envp, &child_envp[i]);
1460 VG_(free)(child_envp);
1462 if (trace_this_child) {
1463 /* Set VALGRIND_LIB in envp. */
1464 SizeT len = VG_(strlen)(VALGRIND_LIB) + VG_(strlen)(VG_(libdir)) + 2;
1465 HChar *valstr = VG_(malloc)("syswrap.spawn.9", len);
1466 VG_(sprintf)(valstr, "%s=%s", VALGRIND_LIB, VG_(libdir));
1467 VG_(addToXA)(envp, &valstr);
1470 /* Set up the child's args. If not tracing it, they are left untouched.
1471 Otherwise, they are:
1473 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG4[1..],
1475 except that the first VG_(args_for_valgrind_noexecpass) args are
1476 omitted. */
1477 if (trace_this_child) {
1478 vg_assert(VG_(args_for_valgrind) != NULL);
1479 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
1480 vg_assert(VG_(args_for_valgrind_noexecpass)
1481 <= VG_(sizeXA)(VG_(args_for_valgrind)));
1483 /* So what args will there be? Bear with me... */
1484 /* ... launcher basename, ... */
1485 HChar *duplicate = VG_(strdup)("syswrap.spawn.10", launcher_basename);
1486 VG_(insertIndexXA)(argv, 0, &duplicate);
1488 /* ... Valgrind's args, ... */
1489 UInt v_args = VG_(sizeXA)(VG_(args_for_valgrind));
1490 v_args -= VG_(args_for_valgrind_noexecpass);
1491 for (Word i = VG_(args_for_valgrind_noexecpass);
1492 i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
1493 duplicate = VG_(strdup)("syswrap.spawn.11",
1494 *(HChar **) VG_(indexXA)(VG_(args_for_valgrind), i));
1495 VG_(insertIndexXA)(argv, 1 + i, &duplicate);
1498 /* ... name of client executable, ... */
1499 duplicate = VG_(strdup)("syswrap.spawn.12", (HChar *) ARG1);
1500 VG_(insertIndexXA)(argv, 1 + v_args, &duplicate);
1502 /* ... and args for client executable (without [0]). */
1503 duplicate = *(HChar **) VG_(indexXA)(argv, 1 + v_args + 1);
1504 VG_(free)(duplicate);
1505 VG_(removeIndexXA)(argv, 1 + v_args + 1);
1508 /* Debug-only printing. */
1509 if (0) {
1510 VG_(printf)("\npath = %s\n", path);
1511 VG_(printf)("argv = ");
1512 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1513 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1516 VG_(printf)("\nenvp = ");
1517 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1518 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1520 VG_(printf)("\n");
1523 /* Set the signal state up for spawned child.
1525 Signals set to be caught are equivalent to signals set to the default
1526 action, from the child's perspective.
1528 Therefore query SCSS and prepare default (DFL) and ignore (IGN) signal
1529 sets. Then combine these sets with those passed from client, if flags
1530 POSIX_SPAWN_SETSIGDEF, or POSIX_SPAWN_SETSIGIGN_NP have been specified.
1532 vki_sigset_t sig_default;
1533 vki_sigset_t sig_ignore;
1534 VG_(sigemptyset)(&sig_default);
1535 VG_(sigemptyset)(&sig_ignore);
1536 for (Int i = 1; i < VG_(max_signal); i++) {
1537 vki_sigaction_fromK_t sa;
1538 VG_(do_sys_sigaction)(i, NULL, &sa); /* query SCSS */
1539 if (sa.sa_handler == VKI_SIG_IGN) {
1540 VG_(sigaddset)(&sig_ignore, i);
1541 } else {
1542 VG_(sigaddset)(&sig_default, i);
1546 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGDEF) {
1547 VG_(sigaddset_from_set)(&spa->sa_sigdefault, &sig_default);
1548 } else {
1549 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGDEF;
1550 spa->sa_sigdefault = sig_default;
1553 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGIGN_NP) {
1554 VG_(sigaddset_from_set)(&spa->sa_sigignore, &sig_ignore);
1555 } else {
1556 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGIGN_NP;
1557 spa->sa_sigignore = sig_ignore;
1560 /* Set the signal mask for spawned child.
1562 Analogous to signal handlers: query SCSS for blocked signals mask
1563 and combine this mask with that passed from client, if flag
1564 POSIX_SPAWN_SETSIGMASK has been specified. */
1565 vki_sigset_t *sigmask = &VG_(get_ThreadState)(tid)->sig_mask;
1566 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGMASK) {
1567 VG_(sigaddset_from_set)(&spa->sa_sigmask, sigmask);
1568 } else {
1569 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGMASK;
1570 spa->sa_sigmask = *sigmask;
1573 /* Lastly, reconstruct argenv from argv + envp. */
1574 SizeT argenv_size = 1 + 1;
1575 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1576 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(argv, i)) + 2;
1578 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1579 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(envp, i)) + 2;
1582 argenv = VG_(malloc)("syswrap.spawn.13", argenv_size);
1583 HChar *current = argenv;
1584 #define COPY_CHAR_TO_ARGENV(dst, character) \
1585 do { \
1586 *(dst) = character; \
1587 (dst) += 1; \
1588 } while (0)
1589 #define COPY_STRING_TO_ARGENV(dst, src) \
1590 do { \
1591 COPY_CHAR_TO_ARGENV(dst, '\1'); \
1592 SizeT src_len = VG_(strlen)((src)) + 1; \
1593 VG_(memcpy)((dst), (src), src_len); \
1594 (dst) += src_len; \
1595 } while (0)
1597 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1598 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(argv, i));
1600 COPY_CHAR_TO_ARGENV(current, '\0');
1601 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1602 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(envp, i));
1604 COPY_CHAR_TO_ARGENV(current, '\0');
1605 vg_assert(current == argenv + argenv_size);
1606 #undef COPY_CHAR_TO_ARGENV
1607 #undef COPY_STRING_TOARGENV
1609 /* Actual spawn() syscall. */
1610 SysRes res = VG_(do_syscall5)(__NR_spawn, (UWord) path, (UWord) attrs,
1611 attrs_size, (UWord) argenv, argenv_size);
1612 SET_STATUS_from_SysRes(res);
1613 VG_(free)(argenv);
1615 if (SUCCESS) {
1616 PRINT(" spawn: process %d spawned child %ld\n", VG_(getpid)(), RES);
1619 exit:
1620 VG_(free)(attrs);
1621 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1622 VG_(free)(*(HChar **) VG_(indexXA)(argv, i));
1624 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1625 VG_(free)(*(HChar **) VG_(indexXA)(envp, i));
1627 VG_(deleteXA)(argv);
1628 VG_(deleteXA)(envp);
1630 #endif /* SOLARIS_SPAWN_SYSCALL */
1632 /* Handles the case where the open is of /proc/self/psinfo or
1633 /proc/<pid>/psinfo. Fetch fresh contents into psinfo_t,
1634 fake fname, psargs, argc and argv. Write the structure to the fake
1635 file we cooked up at startup (in m_main) and give out a copy of this
1636 fd. Also seek the cloned fd back to the start. */
1637 static Bool handle_psinfo_open(SyscallStatus *status,
1638 Bool use_openat,
1639 const HChar *filename,
1640 Int arg1, UWord arg3, UWord arg4)
1642 if (!ML_(safe_to_deref)((const void *) filename, 1))
1643 return False;
1645 HChar name[VKI_PATH_MAX]; // large enough
1646 VG_(sprintf)(name, "/proc/%d/psinfo", VG_(getpid)());
1648 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/psinfo"))
1649 return False;
1651 /* Use original arguments to open() or openat(). */
1652 SysRes sres;
1653 #if defined(SOLARIS_OLD_SYSCALLS)
1654 if (use_openat)
1655 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1656 arg3, arg4);
1657 else
1658 sres = VG_(do_syscall3)(SYS_open, (UWord) filename, arg3, arg4);
1659 #else
1660 vg_assert(use_openat == True);
1661 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1662 arg3, arg4);
1663 #endif /* SOLARIS_OLD_SYSCALLS */
1665 if (sr_isError(sres)) {
1666 SET_STATUS_from_SysRes(sres);
1667 return True;
1669 Int fd = sr_Res(sres);
1671 vki_psinfo_t psinfo;
1672 sres = VG_(do_syscall3)(SYS_read, fd, (UWord) &psinfo, sizeof(psinfo));
1673 if (sr_isError(sres)) {
1674 SET_STATUS_from_SysRes(sres);
1675 VG_(close)(fd);
1676 return True;
1678 if (sr_Res(sres) != sizeof(psinfo)) {
1679 SET_STATUS_Failure(VKI_ENODATA);
1680 VG_(close)(fd);
1681 return True;
1684 VG_(close)(fd);
1686 VG_(client_fname)(psinfo.pr_fname, sizeof(psinfo.pr_fname), True);
1687 VG_(client_cmd_and_args)(psinfo.pr_psargs, sizeof(psinfo.pr_psargs));
1689 Addr *ptr = (Addr *) VG_(get_initial_client_SP)();
1690 psinfo.pr_argc = *ptr++;
1691 psinfo.pr_argv = (Addr) ptr;
1693 sres = VG_(do_syscall4)(SYS_pwrite, VG_(cl_psinfo_fd),
1694 (UWord) &psinfo, sizeof(psinfo), 0);
1695 if (sr_isError(sres)) {
1696 SET_STATUS_from_SysRes(sres);
1697 return True;
1700 sres = VG_(dup)(VG_(cl_psinfo_fd));
1701 SET_STATUS_from_SysRes(sres);
1702 if (!sr_isError(sres)) {
1703 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1704 if (off < 0)
1705 SET_STATUS_Failure(VKI_EMFILE);
1708 return True;
1711 #if defined(SOLARIS_PROC_CMDLINE)
1712 /* Handles the case where the open is of /proc/self/cmdline or
1713 /proc/<pid>/cmdline. Just give it a copy of VG_(cl_cmdline_fd) for the
1714 fake file we cooked up at startup (in m_main). Also, seek the
1715 cloned fd back to the start. */
1716 static Bool handle_cmdline_open(SyscallStatus *status, const HChar *filename)
1718 if (!ML_(safe_to_deref)((const void *) filename, 1))
1719 return False;
1721 HChar name[VKI_PATH_MAX]; // large enough
1722 VG_(sprintf)(name, "/proc/%d/cmdline", VG_(getpid)());
1724 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/cmdline"))
1725 return False;
1727 SysRes sres = VG_(dup)(VG_(cl_cmdline_fd));
1728 SET_STATUS_from_SysRes(sres);
1729 if (!sr_isError(sres)) {
1730 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1731 if (off < 0)
1732 SET_STATUS_Failure(VKI_EMFILE);
1735 return True;
1737 #endif /* SOLARIS_PROC_CMDLINE */
1740 #if defined(SOLARIS_OLD_SYSCALLS)
1741 PRE(sys_open)
1743 /* int open(const char *filename, int flags);
1744 int open(const char *filename, int flags, mode_t mode); */
1746 if (ARG2 & VKI_O_CREAT) {
1747 /* 3-arg version */
1748 PRINT("sys_open ( %#lx(%s), %ld, %ld )", ARG1, (HChar *) ARG1,
1749 SARG2, ARG3);
1750 PRE_REG_READ3(long, "open", const char *, filename,
1751 int, flags, vki_mode_t, mode);
1752 } else {
1753 /* 2-arg version */
1754 PRINT("sys_open ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
1755 PRE_REG_READ2(long, "open", const char *, filename, int, flags);
1758 PRE_MEM_RASCIIZ("open(filename)", ARG1);
1760 if (ML_(handle_auxv_open)(status, (const HChar*)ARG1, ARG2))
1761 return;
1763 if (handle_psinfo_open(status, False /*use_openat*/, (const HChar*)ARG1, 0,
1764 ARG2, ARG3))
1765 return;
1767 *flags |= SfMayBlock;
1770 POST(sys_open)
1772 if (!ML_(fd_allowed)(RES, "open", tid, True)) {
1773 VG_(close)(RES);
1774 SET_STATUS_Failure(VKI_EMFILE);
1775 } else if (VG_(clo_track_fds))
1776 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG1);
1778 #endif /* SOLARIS_OLD_SYSCALLS */
1780 PRE(sys_close)
1782 WRAPPER_PRE_NAME(generic, sys_close)(tid, layout, arrghs, status,
1783 flags);
1786 POST(sys_close)
1788 WRAPPER_POST_NAME(generic, sys_close)(tid, arrghs, status);
1789 door_record_revoke(tid, ARG1);
1790 /* Possibly an explicitly open'ed client door fd was just closed.
1791 Generic sys_close wrapper calls this only if VG_(clo_track_fds) = True. */
1792 if (!VG_(clo_track_fds))
1793 ML_(record_fd_close)(ARG1);
1796 PRE(sys_linkat)
1798 /* int linkat(int fd1, const char *path1, int fd2,
1799 const char *path2, int flag);
1802 /* Interpret the first and third arguments as 32-bit values even on 64-bit
1803 architecture. This is different from Linux, for example, where glibc
1804 sign-extends them. */
1805 Int fd1 = (Int) ARG1;
1806 Int fd2 = (Int) ARG3;
1808 PRINT("sys_linkat ( %d, %#lx(%s), %d, %#lx(%s), %ld )",
1809 fd1, ARG2, (HChar *) ARG2, fd2, ARG4, (HChar *) ARG4, SARG5);
1810 PRE_REG_READ5(long, "linkat", int, fd1, const char *, path1,
1811 int, fd2, const char *, path2, int, flags);
1812 PRE_MEM_RASCIIZ("linkat(path1)", ARG2);
1813 PRE_MEM_RASCIIZ("linkat(path2)", ARG4);
1815 /* Be strict but ignore fd1/fd2 for absolute path1/path2. */
1816 if (fd1 != VKI_AT_FDCWD
1817 && ML_(safe_to_deref)((void *) ARG2, 1)
1818 && ((HChar *) ARG2)[0] != '/'
1819 && !ML_(fd_allowed)(fd1, "linkat", tid, False)) {
1820 SET_STATUS_Failure(VKI_EBADF);
1822 if (fd2 != VKI_AT_FDCWD
1823 && ML_(safe_to_deref)((void *) ARG4, 1)
1824 && ((HChar *) ARG4)[0] != '/'
1825 && !ML_(fd_allowed)(fd2, "linkat", tid, False)) {
1826 SET_STATUS_Failure(VKI_EBADF);
1829 *flags |= SfMayBlock;
1832 PRE(sys_symlinkat)
1834 /* int symlinkat(const char *path1, int fd, const char *path2); */
1836 /* Interpret the second argument as 32-bit value even on 64-bit architecture.
1837 This is different from Linux, for example, where glibc sign-extends it. */
1838 Int fd = (Int) ARG2;
1840 PRINT("sys_symlinkat ( %#lx(%s), %d, %#lx(%s) )",
1841 ARG1, (HChar *) ARG1, fd, ARG3, (HChar *) ARG3);
1842 PRE_REG_READ3(long, "symlinkat", const char *, path1, int, fd,
1843 const char *, path2);
1844 PRE_MEM_RASCIIZ("symlinkat(path1)", ARG1);
1845 PRE_MEM_RASCIIZ("symlinkat(path2)", ARG3);
1847 /* Be strict but ignore fd for absolute path2. */
1848 if (fd != VKI_AT_FDCWD
1849 && ML_(safe_to_deref)((void *) ARG3, 1)
1850 && ((HChar *) ARG3)[0] != '/'
1851 && !ML_(fd_allowed)(fd, "symlinkat", tid, False))
1852 SET_STATUS_Failure(VKI_EBADF);
1854 *flags |= SfMayBlock;
1857 PRE(sys_time)
1859 /* time_t time(); */
1860 PRINT("sys_time ( )");
1861 PRE_REG_READ0(long, "time");
1864 /* Data segment for brk (heap). It is an expandable anonymous mapping
1865 abutting a 1-page reservation. The data segment starts at VG_(brk_base)
1866 and runs up to VG_(brk_limit). None of these two values have to be
1867 page-aligned.
1868 Initial data segment is established (see initimg-solaris.c for rationale):
1869 - directly during client program image initialization,
1870 - or on demand when the executed program is the runtime linker itself,
1871 after it has loaded its target dynamic executable (see PRE(sys_mmapobj)),
1872 or when the first brk() syscall is made.
1874 Notable facts:
1875 - VG_(brk_base) is not page aligned; does not move
1876 - VG_(brk_limit) moves between [VG_(brk_base), data segment end]
1877 - data segment end is always page aligned
1878 - right after data segment end is 1-page reservation
1880 | heap | 1 page
1881 +------+------+--------------+-------+
1882 | BSS | anon | anon | resvn |
1883 +------+------+--------------+-------+
1885 ^ ^ ^ ^
1886 | | | |
1887 | | | data segment end
1888 | | VG_(brk_limit) -- no alignment constraint
1889 | brk_base_pgup -- page aligned
1890 VG_(brk_base) -- not page aligned -- does not move
1892 Because VG_(brk_base) is not page-aligned and is initially located within
1893 pre-established BSS (data) segment, special care has to be taken in the code
1894 below to handle this feature.
1896 Reservation segment is used to protect the data segment merging with
1897 a pre-existing segment. This should be no problem because address space
1898 manager ensures that requests for client address space are satisfied from
1899 the highest available addresses. However when memory is low, data segment
1900 can meet with mmap'ed objects and the reservation segment separates these.
1901 The page that contains VG_(brk_base) is already allocated by the program's
1902 loaded data segment. The brk syscall wrapper handles this special case. */
1904 static Bool brk_segment_established = False;
1906 /* Establishes initial data segment for brk (heap). */
1907 Bool VG_(setup_client_dataseg)(void)
1909 /* Segment size is initially at least 1 MB and at most 8 MB. */
1910 SizeT m1 = 1024 * 1024;
1911 SizeT m8 = 8 * m1;
1912 SizeT initial_size = VG_(client_rlimit_data).rlim_cur;
1913 VG_(debugLog)(1, "syswrap-solaris", "Setup client data (brk) segment "
1914 "at %#lx\n", VG_(brk_base));
1915 if (initial_size < m1)
1916 initial_size = m1;
1917 if (initial_size > m8)
1918 initial_size = m8;
1919 initial_size = VG_PGROUNDUP(initial_size);
1921 Addr anon_start = VG_PGROUNDUP(VG_(brk_base));
1922 SizeT anon_size = VG_PGROUNDUP(initial_size);
1923 Addr resvn_start = anon_start + anon_size;
1924 SizeT resvn_size = VKI_PAGE_SIZE;
1926 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
1927 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
1928 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
1929 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
1930 vg_assert(VG_(brk_base) == VG_(brk_limit));
1932 /* Find the loaded data segment and remember its protection. */
1933 const NSegment *seg = VG_(am_find_nsegment)(VG_(brk_base) - 1);
1934 vg_assert(seg != NULL);
1935 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
1936 | (seg->hasW ? VKI_PROT_WRITE : 0)
1937 | (seg->hasX ? VKI_PROT_EXEC : 0);
1939 /* Try to create the data segment and associated reservation where
1940 VG_(brk_base) says. */
1941 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
1942 anon_size);
1943 if (!ok) {
1944 /* That didn't work, we're hosed. */
1945 return False;
1948 /* Map the data segment. */
1949 SysRes sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
1950 vg_assert(!sr_isError(sres));
1951 vg_assert(sr_Res(sres) == anon_start);
1953 brk_segment_established = True;
1954 return True;
1957 /* Tell the tool about the client data segment and then kill it which will
1958 make it initially inaccessible/unaddressable. */
1959 void VG_(track_client_dataseg)(ThreadId tid)
1961 const NSegment *seg = VG_(am_find_nsegment)(VG_PGROUNDUP(VG_(brk_base)));
1962 vg_assert(seg != NULL);
1963 vg_assert(seg->kind == SkAnonC);
1965 VG_TRACK(new_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base), tid);
1966 VG_TRACK(die_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base));
1969 static void PRINTF_CHECK(1, 2)
1970 possibly_complain_brk(const HChar *format, ...)
1972 static Bool alreadyComplained = False;
1973 if (!alreadyComplained) {
1974 alreadyComplained = True;
1975 if (VG_(clo_verbosity) > 0) {
1976 va_list vargs;
1977 va_start(vargs, format);
1978 VG_(vmessage)(Vg_UserMsg, format, vargs);
1979 va_end(vargs);
1980 VG_(umsg)("(See section Limitations in the user manual.)\n");
1981 VG_(umsg)("NOTE: further instances of this message will not be "
1982 "shown.\n");
1987 PRE(sys_brk)
1989 /* unsigned long brk(caddr_t end_data_segment); */
1990 /* The Solaris kernel returns 0 on success.
1991 In addition to this, brk(0) returns current data segment end. This is
1992 very different from the Linux kernel, for example. */
1994 Addr old_brk_limit = VG_(brk_limit);
1995 /* If VG_(brk_base) is page-aligned then old_brk_base_pgup is equal to
1996 VG_(brk_base). */
1997 Addr old_brk_base_pgup = VG_PGROUNDUP(VG_(brk_base));
1998 Addr new_brk = ARG1;
1999 const NSegment *seg, *seg2;
2001 PRINT("sys_brk ( %#lx )", ARG1);
2002 PRE_REG_READ1(unsigned long, "brk", vki_caddr_t, end_data_segment);
2004 if (new_brk == 0) {
2005 /* brk(0) - specific to Solaris 11 only. */
2006 SET_STATUS_Success(old_brk_limit);
2007 return;
2010 /* Handle some trivial cases. */
2011 if (new_brk == old_brk_limit) {
2012 SET_STATUS_Success(0);
2013 return;
2015 if (new_brk < VG_(brk_base)) {
2016 /* Clearly impossible. */
2017 SET_STATUS_Failure(VKI_ENOMEM);
2018 return;
2020 if (new_brk - VG_(brk_base) > VG_(client_rlimit_data).rlim_cur) {
2021 SET_STATUS_Failure(VKI_ENOMEM);
2022 return;
2025 /* The brk base and limit must have been already set. */
2026 vg_assert(VG_(brk_base) != -1);
2027 vg_assert(VG_(brk_limit) != -1);
2029 if (!brk_segment_established) {
2030 /* Stay sane (because there should have been no brk activity yet). */
2031 vg_assert(VG_(brk_base) == VG_(brk_limit));
2033 if (!VG_(setup_client_dataseg)()) {
2034 possibly_complain_brk("Cannot map memory to initialize brk segment in "
2035 "thread #%d at %#lx\n", tid, VG_(brk_base));
2036 SET_STATUS_Failure(VKI_ENOMEM);
2037 return;
2040 VG_(track_client_dataseg)(tid);
2043 if (new_brk < old_brk_limit) {
2044 /* Shrinking the data segment. Be lazy and don't munmap the excess
2045 area. */
2046 if (old_brk_limit > old_brk_base_pgup) {
2047 /* Calculate new local brk (=MAX(new_brk, old_brk_base_pgup)). */
2048 Addr new_brk_local;
2049 if (new_brk < old_brk_base_pgup)
2050 new_brk_local = old_brk_base_pgup;
2051 else
2052 new_brk_local = new_brk;
2054 /* Find a segment at the beginning and at the end of the shrinked
2055 range. */
2056 seg = VG_(am_find_nsegment)(new_brk_local);
2057 seg2 = VG_(am_find_nsegment)(old_brk_limit - 1);
2058 vg_assert(seg);
2059 vg_assert(seg->kind == SkAnonC);
2060 vg_assert(seg2);
2061 vg_assert(seg == seg2);
2063 /* Discard any translations and zero-out the area. */
2064 if (seg->hasT)
2065 VG_(discard_translations)(new_brk_local,
2066 old_brk_limit - new_brk_local,
2067 "do_brk(shrink)");
2068 /* Since we're being lazy and not unmapping pages, we have to zero out
2069 the area, so that if the area later comes back into circulation, it
2070 will be filled with zeroes, as if it really had been unmapped and
2071 later remapped. Be a bit paranoid and try hard to ensure we're not
2072 going to segfault by doing the write - check that segment is
2073 writable. */
2074 if (seg->hasW)
2075 VG_(memset)((void*)new_brk_local, 0, old_brk_limit - new_brk_local);
2078 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2079 if (new_brk < old_brk_base_pgup) {
2080 /* Calculate old local brk (=MIN(old_brk_limit, old_brk_base_up)). */
2081 Addr old_brk_local;
2082 if (old_brk_limit < old_brk_base_pgup)
2083 old_brk_local = old_brk_limit;
2084 else
2085 old_brk_local = old_brk_base_pgup;
2087 /* Find a segment at the beginning and at the end of the shrinked
2088 range. */
2089 seg = VG_(am_find_nsegment)(new_brk);
2090 seg2 = VG_(am_find_nsegment)(old_brk_local - 1);
2091 vg_assert(seg);
2092 vg_assert(seg2);
2093 vg_assert(seg == seg2);
2095 /* Discard any translations and zero-out the area. */
2096 if (seg->hasT)
2097 VG_(discard_translations)(new_brk, old_brk_local - new_brk,
2098 "do_brk(shrink)");
2099 if (seg->hasW)
2100 VG_(memset)((void*)new_brk, 0, old_brk_local - new_brk);
2103 /* We are done, update VG_(brk_limit), tell the tool about the changes,
2104 and leave. */
2105 VG_(brk_limit) = new_brk;
2106 VG_TRACK(die_mem_brk, new_brk, old_brk_limit - new_brk);
2107 SET_STATUS_Success(0);
2108 return;
2111 /* We are expanding the brk segment. */
2113 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2114 if (old_brk_limit < old_brk_base_pgup) {
2115 /* Calculate new local brk (=MIN(new_brk, old_brk_base_pgup)). */
2116 Addr new_brk_local;
2117 if (new_brk < old_brk_base_pgup)
2118 new_brk_local = new_brk;
2119 else
2120 new_brk_local = old_brk_base_pgup;
2122 /* Find a segment at the beginning and at the end of the expanded
2123 range. */
2124 seg = VG_(am_find_nsegment)(old_brk_limit);
2125 seg2 = VG_(am_find_nsegment)(new_brk_local - 1);
2126 vg_assert(seg);
2127 vg_assert(seg2);
2128 vg_assert(seg == seg2);
2130 /* Nothing else to do. */
2133 if (new_brk > old_brk_base_pgup) {
2134 /* Calculate old local brk (=MAX(old_brk_limit, old_brk_base_pgup)). */
2135 Addr old_brk_local;
2136 if (old_brk_limit < old_brk_base_pgup)
2137 old_brk_local = old_brk_base_pgup;
2138 else
2139 old_brk_local = old_brk_limit;
2141 /* Find a segment at the beginning of the expanded range. */
2142 if (old_brk_local > old_brk_base_pgup)
2143 seg = VG_(am_find_nsegment)(old_brk_local - 1);
2144 else
2145 seg = VG_(am_find_nsegment)(old_brk_local);
2146 vg_assert(seg);
2147 vg_assert(seg->kind == SkAnonC);
2149 /* Find the 1-page reservation segment. */
2150 seg2 = VG_(am_next_nsegment)(seg, True/*forwards*/);
2151 vg_assert(seg2);
2152 vg_assert(seg2->kind == SkResvn);
2153 vg_assert(seg->end + 1 == seg2->start);
2154 vg_assert(seg2->end - seg2->start + 1 == VKI_PAGE_SIZE);
2156 if (new_brk <= seg2->start) {
2157 /* Still fits within the existing anon segment, nothing to do. */
2158 } else {
2159 /* Data segment limit was already checked. */
2160 Addr anon_start = seg->end + 1;
2161 Addr resvn_start = VG_PGROUNDUP(new_brk);
2162 SizeT anon_size = resvn_start - anon_start;
2163 SizeT resvn_size = VKI_PAGE_SIZE;
2164 SysRes sres;
2166 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
2167 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
2168 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
2169 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
2170 vg_assert(anon_size > 0);
2172 /* Address space manager checks for free address space for us;
2173 reservation would not be otherwise created. */
2174 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
2175 anon_size);
2176 if (!ok) {
2177 possibly_complain_brk("brk segment overflow in thread #%d: can not "
2178 "grow to %#lx\n", tid, new_brk);
2179 SET_STATUS_Failure(VKI_ENOMEM);
2180 return;
2183 /* Establish protection from the existing segment. */
2184 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
2185 | (seg->hasW ? VKI_PROT_WRITE : 0)
2186 | (seg->hasX ? VKI_PROT_EXEC : 0);
2188 /* Address space manager will merge old and new data segments. */
2189 sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
2190 if (sr_isError(sres)) {
2191 possibly_complain_brk("Cannot map memory to grow brk segment in "
2192 "thread #%d to %#lx\n", tid, new_brk);
2193 SET_STATUS_Failure(VKI_ENOMEM);
2194 return;
2196 vg_assert(sr_Res(sres) == anon_start);
2198 seg = VG_(am_find_nsegment)(old_brk_base_pgup);
2199 seg2 = VG_(am_find_nsegment)(VG_PGROUNDUP(new_brk) - 1);
2200 vg_assert(seg);
2201 vg_assert(seg2);
2202 vg_assert(seg == seg2);
2203 vg_assert(new_brk <= seg->end + 1);
2207 /* We are done, update VG_(brk_limit), tell the tool about the changes, and
2208 leave. */
2209 VG_(brk_limit) = new_brk;
2210 VG_TRACK(new_mem_brk, old_brk_limit, new_brk - old_brk_limit, tid);
2211 SET_STATUS_Success(0);
2214 PRE(sys_stat)
2216 /* int stat(const char *path, struct stat *buf); */
2217 /* Note: We could use here the sys_newstat generic wrapper, but the 'new'
2218 in its name is rather confusing in the Solaris context, thus we provide
2219 our own wrapper. */
2220 PRINT("sys_stat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
2221 PRE_REG_READ2(long, "stat", const char *, path, struct stat *, buf);
2223 PRE_MEM_RASCIIZ("stat(path)", ARG1);
2224 PRE_MEM_WRITE("stat(buf)", ARG2, sizeof(struct vki_stat));
2227 POST(sys_stat)
2229 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2232 PRE(sys_lseek)
2234 /* off_t lseek(int fildes, off_t offset, int whence); */
2235 PRINT("sys_lseek ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2236 PRE_REG_READ3(long, "lseek", int, fildes, vki_off_t, offset, int, whence);
2238 /* Stay sane. */
2239 if (!ML_(fd_allowed)(ARG1, "lseek", tid, False))
2240 SET_STATUS_Failure(VKI_EBADF);
2243 PRE(sys_mount)
2245 /* int mount(const char *spec, const char *dir, int mflag, char *fstype,
2246 char *dataptr, int datalen, char *optptr, int optlen); */
2247 *flags |= SfMayBlock;
2248 if (ARG3 & VKI_MS_OPTIONSTR) {
2249 /* 8-argument mount */
2250 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld, "
2251 "%#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3,
2252 ARG4, (HChar *) ARG4, ARG5, ARG6, ARG7, (HChar *) ARG7, SARG8);
2253 PRE_REG_READ8(long, "mount", const char *, spec, const char *, dir,
2254 int, mflag, char *, fstype, char *, dataptr, int, datalen,
2255 char *, optptr, int, optlen);
2257 else if (ARG3 & VKI_MS_DATA) {
2258 /* 6-argument mount */
2259 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld )",
2260 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4,
2261 (HChar *) ARG4, ARG5, SARG6);
2262 PRE_REG_READ6(long, "mount", const char *, spec, const char *, dir,
2263 int, mflag, char *, fstype, char *, dataptr,
2264 int, datalen);
2266 else {
2267 /* 4-argument mount */
2268 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s) )", ARG1,
2269 (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4, (HChar *) ARG4);
2270 PRE_REG_READ4(long, "mount", const char *, spec, const char *, dir,
2271 int, mflag, char *, fstype);
2273 if (ARG1)
2274 PRE_MEM_RASCIIZ("mount(spec)", ARG1);
2275 PRE_MEM_RASCIIZ("mount(dir)", ARG2);
2276 if (ARG4 && ARG4 >= 256) {
2277 /* If ARG4 < 256, then it's an index to a fs table in the kernel. */
2278 PRE_MEM_RASCIIZ("mount(fstype)", ARG4);
2280 if (ARG3 & (VKI_MS_DATA | VKI_MS_OPTIONSTR)) {
2281 if (ARG5)
2282 PRE_MEM_READ("mount(dataptr)", ARG5, ARG6);
2283 if ((ARG3 & VKI_MS_OPTIONSTR) && ARG7) {
2284 /* in/out buffer */
2285 PRE_MEM_RASCIIZ("mount(optptr)", ARG7);
2286 PRE_MEM_WRITE("mount(optptr)", ARG7, ARG8);
2291 POST(sys_mount)
2293 if (ARG3 & VKI_MS_OPTIONSTR) {
2294 POST_MEM_WRITE(ARG7, VG_(strlen)((HChar*)ARG7) + 1);
2295 } else if (ARG3 & VKI_MS_DATA) {
2296 if ((ARG2) &&
2297 (ARG3 & MS_NOMNTTAB) &&
2298 (VG_STREQ((HChar *) ARG4, "namefs")) &&
2299 (ARG6 == sizeof(struct vki_namefd)) &&
2300 ML_(safe_to_deref)((void *) ARG5, ARG6)) {
2301 /* Most likely an fattach() call for a door file descriptor. */
2302 door_record_server_fattach(((struct vki_namefd *) ARG5)->fd,
2303 (HChar *) ARG2);
2308 PRE(sys_readlinkat)
2310 /* ssize_t readlinkat(int dfd, const char *path, char *buf,
2311 size_t bufsiz); */
2312 HChar name[30]; // large enough
2313 Word saved = SYSNO;
2315 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2316 This is different from Linux, for example, where glibc sign-extends it. */
2317 Int dfd = (Int) ARG1;
2319 PRINT("sys_readlinkat ( %d, %#lx(%s), %#lx, %ld )", dfd, ARG2,
2320 (HChar *) ARG2, ARG3, SARG4);
2321 PRE_REG_READ4(long, "readlinkat", int, dfd, const char *, path,
2322 char *, buf, int, bufsiz);
2323 PRE_MEM_RASCIIZ("readlinkat(path)", ARG2);
2324 PRE_MEM_WRITE("readlinkat(buf)", ARG3, ARG4);
2326 /* Be strict but ignore dfd for absolute path. */
2327 if (dfd != VKI_AT_FDCWD
2328 && ML_(safe_to_deref)((void *) ARG2, 1)
2329 && ((HChar *) ARG2)[0] != '/'
2330 && !ML_(fd_allowed)(dfd, "readlinkat", tid, False)) {
2331 SET_STATUS_Failure(VKI_EBADF);
2332 return;
2335 /* Handle the case where readlinkat is looking at /proc/self/path/a.out or
2336 /proc/<pid>/path/a.out. */
2337 VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)());
2338 if (ML_(safe_to_deref)((void*)ARG2, 1) &&
2339 (!VG_(strcmp)((HChar*)ARG2, name) ||
2340 !VG_(strcmp)((HChar*)ARG2, "/proc/self/path/a.out"))) {
2341 VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd));
2342 SET_STATUS_from_SysRes(VG_(do_syscall4)(saved, dfd, (UWord)name, ARG3,
2343 ARG4));
2347 POST(sys_readlinkat)
2349 POST_MEM_WRITE(ARG3, RES);
2352 PRE(sys_stime)
2354 /* Kernel: int stime(time_t time); */
2355 PRINT("sys_stime ( %ld )", ARG1);
2356 PRE_REG_READ1(long, "stime", vki_time_t, time);
2359 PRE(sys_fstat)
2361 /* int fstat(int fildes, struct stat *buf); */
2362 /* Note: We could use here the sys_newfstat generic wrapper, but the 'new'
2363 in its name is rather confusing in the Solaris context, thus we provide
2364 our own wrapper. */
2365 PRINT("sys_fstat ( %ld, %#lx )", SARG1, ARG2);
2366 PRE_REG_READ2(long, "fstat", int, fildes, struct stat *, buf);
2367 PRE_MEM_WRITE("fstat(buf)", ARG2, sizeof(struct vki_stat));
2369 /* Be strict. */
2370 if (!ML_(fd_allowed)(ARG1, "fstat", tid, False))
2371 SET_STATUS_Failure(VKI_EBADF);
2374 POST(sys_fstat)
2376 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2379 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
2380 PRE(sys_frealpathat)
2382 /* int frealpathat(int fd, char *path, char *buf, size_t buflen); */
2384 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2385 This is different from Linux, for example, where glibc sign-extends it. */
2386 Int fd = (Int) ARG1;
2388 PRINT("sys_frealpathat ( %d, %#lx(%s), %#lx, %lu )",
2389 fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
2390 PRE_REG_READ4(long, "frealpathat", int, fd, char *, path,
2391 char *, buf, vki_size_t, buflen);
2392 PRE_MEM_RASCIIZ("frealpathat(path)", ARG2);
2393 PRE_MEM_WRITE("frealpathat(buf)", ARG3, ARG4);
2395 /* Be strict but ignore fd for absolute path. */
2396 if (fd != VKI_AT_FDCWD
2397 && ML_(safe_to_deref)((void *) ARG2, 1)
2398 && ((HChar *) ARG2)[0] != '/'
2399 && !ML_(fd_allowed)(fd, "frealpathat", tid, False))
2400 SET_STATUS_Failure(VKI_EBADF);
2403 POST(sys_frealpathat)
2405 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
2407 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
2409 PRE(sys_stty)
2411 /* int stty(int fd, const struct sgttyb *tty); */
2412 PRINT("sys_stty ( %ld, %#lx )", SARG1, ARG2);
2413 PRE_REG_READ2(long, "stty", int, fd,
2414 const struct vki_sgttyb *, tty);
2415 PRE_MEM_READ("stty(tty)", ARG2, sizeof(struct vki_sgttyb));
2417 /* Be strict. */
2418 if (!ML_(fd_allowed)(ARG1, "stty", tid, False))
2419 SET_STATUS_Failure(VKI_EBADF);
2422 PRE(sys_gtty)
2424 /* int gtty(int fd, struct sgttyb *tty); */
2425 PRINT("sys_gtty ( %ld, %#lx )", SARG1, ARG2);
2426 PRE_REG_READ2(long, "gtty", int, fd, struct vki_sgttyb *, tty);
2427 PRE_MEM_WRITE("gtty(tty)", ARG2, sizeof(struct vki_sgttyb));
2429 /* Be strict. */
2430 if (!ML_(fd_allowed)(ARG1, "gtty", tid, False))
2431 SET_STATUS_Failure(VKI_EBADF);
2434 POST(sys_gtty)
2436 POST_MEM_WRITE(ARG2, sizeof(struct vki_sgttyb));
2439 PRE(sys_pgrpsys)
2441 /* Kernel: int setpgrp(int flag, int pid, int pgid); */
2442 switch (ARG1 /*flag*/) {
2443 case 0:
2444 /* Libc: pid_t getpgrp(void); */
2445 PRINT("sys_pgrpsys ( %ld )", SARG1);
2446 PRE_REG_READ1(long, SC2("pgrpsys", "getpgrp"), int, flag);
2447 break;
2448 case 1:
2449 /* Libc: pid_t setpgrp(void); */
2450 PRINT("sys_pgrpsys ( %ld )", SARG1);
2451 PRE_REG_READ1(long, SC2("pgrpsys", "setpgrp"), int, flag);
2452 break;
2453 case 2:
2454 /* Libc: pid_t getsid(pid_t pid); */
2455 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2456 PRE_REG_READ2(long, SC2("pgrpsys", "getsid"), int, flag,
2457 vki_pid_t, pid);
2458 break;
2459 case 3:
2460 /* Libc: pid_t setsid(void); */
2461 PRINT("sys_pgrpsys ( %ld )", SARG1);
2462 PRE_REG_READ1(long, SC2("pgrpsys", "setsid"), int, flag);
2463 break;
2464 case 4:
2465 /* Libc: pid_t getpgid(pid_t pid); */
2466 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2467 PRE_REG_READ2(long, SC2("pgrpsys", "getpgid"), int, flag,
2468 vki_pid_t, pid);
2469 break;
2470 case 5:
2471 /* Libc: int setpgid(pid_t pid, pid_t pgid); */
2472 PRINT("sys_pgrpsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2473 PRE_REG_READ3(long, SC2("pgrpsys", "setpgid"), int, flag,
2474 vki_pid_t, pid, vki_pid_t, pgid);
2475 break;
2476 default:
2477 VG_(unimplemented)("Syswrap of the pgrpsys call with flag %ld.", SARG1);
2478 /*NOTREACHED*/
2479 break;
2483 PRE(sys_pipe)
2485 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2486 /* int pipe(int fildes[2], int flags); */
2487 PRINT("sys_pipe ( %#lx, %ld )", ARG1, SARG2);
2488 PRE_REG_READ2(long, "pipe", int *, fildes, int, flags);
2489 PRE_MEM_WRITE("pipe(fildes)", ARG1, 2 * sizeof(int));
2490 #else
2491 /* longlong_t pipe(); */
2492 PRINT("sys_pipe ( )");
2493 PRE_REG_READ0(long, "pipe");
2494 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2497 POST(sys_pipe)
2499 Int p0, p1;
2501 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2502 int *fds = (int*)ARG1;
2503 p0 = fds[0];
2504 p1 = fds[1];
2505 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
2506 #else
2507 p0 = RES;
2508 p1 = RESHI;
2509 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2511 if (!ML_(fd_allowed)(p0, "pipe", tid, True) ||
2512 !ML_(fd_allowed)(p1, "pipe", tid, True)) {
2513 VG_(close)(p0);
2514 VG_(close)(p1);
2515 SET_STATUS_Failure(VKI_EMFILE);
2517 else if (VG_(clo_track_fds)) {
2518 ML_(record_fd_open_nameless)(tid, p0);
2519 ML_(record_fd_open_nameless)(tid, p1);
2523 PRE(sys_faccessat)
2525 /* int faccessat(int fd, const char *path, int amode, int flag); */
2527 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2528 This is different from Linux, for example, where glibc sign-extends it. */
2529 Int fd = (Int) ARG1;
2531 PRINT("sys_faccessat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2532 (HChar *) ARG2, SARG3, SARG4);
2533 PRE_REG_READ4(long, "faccessat", int, fd, const char *, path,
2534 int, amode, int, flag);
2535 PRE_MEM_RASCIIZ("faccessat(path)", ARG2);
2537 /* Be strict but ignore fd for absolute path. */
2538 if (fd != VKI_AT_FDCWD
2539 && ML_(safe_to_deref)((void *) ARG2, 1)
2540 && ((HChar *) ARG2)[0] != '/'
2541 && !ML_(fd_allowed)(fd, "faccessat", tid, False))
2542 SET_STATUS_Failure(VKI_EBADF);
2545 PRE(sys_mknodat)
2547 /* int mknodat(int fd, char *fname, mode_t fmode, dev_t dev); */
2549 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2550 This is different from Linux, for example, where glibc sign-extends it. */
2551 Int fd = (Int) ARG1;
2553 PRINT("sys_mknodat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2554 (HChar *) ARG2, SARG3, SARG4);
2555 PRE_REG_READ4(long, "mknodat", int, fd, const char *, fname,
2556 vki_mode_t, fmode, vki_dev_t, dev);
2557 PRE_MEM_RASCIIZ("mknodat(fname)", ARG2);
2559 /* Be strict but ignore fd for absolute path. */
2560 if (fd != VKI_AT_FDCWD
2561 && ML_(safe_to_deref)((void *) ARG2, 1)
2562 && ((HChar *) ARG2)[0] != '/'
2563 && !ML_(fd_allowed)(fd, "mknodat", tid, False))
2564 SET_STATUS_Failure(VKI_EBADF);
2566 *flags |= SfMayBlock;
2569 POST(sys_mknodat)
2571 if (!ML_(fd_allowed)(RES, "mknodat", tid, True)) {
2572 VG_(close)(RES);
2573 SET_STATUS_Failure(VKI_EMFILE);
2574 } else if (VG_(clo_track_fds))
2575 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG2);
2578 PRE(sys_sysi86)
2580 /* int sysi86(int cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); */
2581 PRINT("sys_sysi86 ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
2582 PRE_REG_READ4(long, "sysi86", int, cmd, uintptr_t, arg1, uintptr_t, arg2,
2583 uintptr_t, arg3);
2585 switch (ARG1 /*cmd*/) {
2586 case VKI_SI86FPSTART:
2587 PRE_MEM_WRITE("sysi86(fp_hw)", ARG2, sizeof(vki_uint_t));
2588 /* ARG3 is a desired x87 FCW value, ARG4 is a desired SSE MXCSR value.
2589 They are passed to the kernel but V will change them later anyway
2590 (this is a general Valgrind limitation described in the official
2591 documentation). */
2592 break;
2593 default:
2594 VG_(unimplemented)("Syswrap of the sysi86 call with cmd %ld.", SARG1);
2595 /*NOTREACHED*/
2596 break;
2600 POST(sys_sysi86)
2602 switch (ARG1 /*cmd*/) {
2603 case VKI_SI86FPSTART:
2604 POST_MEM_WRITE(ARG2, sizeof(vki_uint_t));
2605 break;
2606 default:
2607 vg_assert(0);
2608 break;
2612 PRE(sys_shmsys)
2614 /* Kernel: uintptr_t shmsys(int opcode, uintptr_t a0, uintptr_t a1,
2615 uintptr_t a2, uintptr_t a3);
2617 *flags |= SfMayBlock;
2619 switch (ARG1 /*opcode*/) {
2620 case VKI_SHMAT:
2621 /* Libc: void *shmat(int shmid, const void *shmaddr, int shmflg); */
2622 PRINT("sys_shmsys ( %ld, %ld, %#lx, %ld )",
2623 SARG1, SARG2, ARG3, SARG4);
2624 PRE_REG_READ4(long, SC2("shmsys", "shmat"), int, opcode,
2625 int, shmid, const void *, shmaddr, int, shmflg);
2627 UWord addr = ML_(generic_PRE_sys_shmat)(tid, ARG2, ARG3, ARG4);
2628 if (addr == 0)
2629 SET_STATUS_Failure(VKI_EINVAL);
2630 else
2631 ARG3 = addr;
2632 break;
2634 case VKI_SHMCTL:
2635 /* Libc: int shmctl(int shmid, int cmd, struct shmid_ds *buf); */
2636 switch (ARG3 /* cmd */) {
2637 case VKI_SHM_LOCK:
2638 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2639 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "lock"),
2640 int, opcode, int, shmid, int, cmd);
2641 break;
2642 case VKI_SHM_UNLOCK:
2643 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2644 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "unlock"),
2645 int, opcode, int, shmid, int, cmd);
2646 break;
2647 case VKI_IPC_RMID:
2648 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2649 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "rmid"),
2650 int, opcode, int, shmid, int, cmd);
2651 break;
2652 case VKI_IPC_SET:
2653 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2654 SARG1, SARG2, SARG3, ARG4);
2655 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set"),
2656 int, opcode, int, shmid, int, cmd,
2657 struct vki_shmid_ds *, buf);
2659 struct vki_shmid_ds *buf = (struct vki_shmid_ds *) ARG4;
2660 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.uid)",
2661 buf->shm_perm.uid);
2662 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.gid)",
2663 buf->shm_perm.gid);
2664 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.mode)",
2665 buf->shm_perm.mode);
2666 break;
2667 case VKI_IPC_STAT:
2668 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2669 SARG1, SARG2, SARG3, ARG4);
2670 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat"),
2671 int, opcode, int, shmid, int, cmd,
2672 struct vki_shmid_ds *, buf);
2673 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat, buf)", ARG4,
2674 sizeof(struct vki_shmid_ds));
2675 break;
2676 case VKI_IPC_SET64:
2677 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2678 SARG1, SARG2, SARG3, ARG4);
2679 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set64"),
2680 int, opcode, int, shmid, int, cmd,
2681 struct vki_shmid_ds64 *, buf);
2683 struct vki_shmid_ds64 *buf64 = (struct vki_shmid_ds64 *) ARG4;
2684 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2685 "buf->shmx_perm.ipcx_uid)",
2686 buf64->shmx_perm.ipcx_uid);
2687 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2688 "buf->shmx_perm.ipcx_gid)",
2689 buf64->shmx_perm.ipcx_gid);
2690 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2691 "buf->shmx_perm.ipcx_mode)",
2692 buf64->shmx_perm.ipcx_mode);
2693 break;
2694 case VKI_IPC_STAT64:
2695 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2696 SARG1, SARG2, SARG3, ARG4);
2697 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat64"),
2698 int, opcode, int, shmid, int, cmd,
2699 struct vki_shmid_ds64 *, buf);
2700 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat64, buf)", ARG4,
2701 sizeof(struct vki_shmid_ds64));
2702 break;
2703 #if defined(SOLARIS_SHM_NEW)
2704 case VKI_IPC_XSTAT64:
2705 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2706 SARG1, SARG2, SARG3, ARG4);
2707 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "xstat64"),
2708 int, opcode, int, shmid, int, cmd,
2709 struct vki_shmid_ds64 *, buf);
2710 PRE_MEM_WRITE("shmsys(shmctl, ipc_xstat64, buf)", ARG4,
2711 sizeof(struct vki_shmid_xds64));
2712 break;
2713 #endif /* SOLARIS_SHM_NEW */
2714 default:
2715 VG_(unimplemented)("Syswrap of the shmsys(shmctl) call with "
2716 "cmd %ld.", SARG3);
2717 /*NOTREACHED*/
2718 break;
2720 break;
2722 case VKI_SHMDT:
2723 /* Libc: int shmdt(const void *shmaddr); */
2724 PRINT("sys_shmsys ( %ld, %#lx )", SARG1, ARG2);
2725 PRE_REG_READ2(long, SC2("shmsys", "shmdt"), int, opcode,
2726 const void *, shmaddr);
2728 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG2))
2729 SET_STATUS_Failure(VKI_EINVAL);
2730 break;
2732 case VKI_SHMGET:
2733 /* Libc: int shmget(key_t key, size_t size, int shmflg); */
2734 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2735 SARG1, SARG2, ARG3, ARG4);
2736 PRE_REG_READ4(long, SC2("shmsys", "shmget"), int, opcode,
2737 vki_key_t, key, vki_size_t, size, int, shmflg);
2738 break;
2740 case VKI_SHMIDS:
2741 /* Libc: int shmids(int *buf, uint_t nids, uint_t *pnids); */
2742 PRINT("sys_shmsys ( %ld, %#lx, %lu, %#lx )",
2743 SARG1, ARG2, ARG3, ARG4);
2744 PRE_REG_READ4(long, SC2("shmsys", "shmids"), int, opcode,
2745 int *, buf, vki_uint_t, nids, vki_uint_t *, pnids);
2747 PRE_MEM_WRITE("shmsys(shmids, buf)", ARG2, ARG3 * sizeof(int *));
2748 PRE_MEM_WRITE("shmsys(shmids, pnids)", ARG4, sizeof(vki_uint_t));
2749 break;
2751 #if defined(SOLARIS_SHM_NEW)
2752 case VKI_SHMADV:
2753 /* Libc: int shmadv(int shmid, uint_t cmd, uint_t *advice); */
2754 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2755 SARG1, SARG2, ARG3, ARG4);
2756 PRE_REG_READ4(long, SC2("shmsys", "shmadv"), int, opcode,
2757 int, shmid, vki_uint_t, cmd, vki_uint_t *, advice);
2759 switch (ARG3 /*cmd*/) {
2760 case VKI_SHM_ADV_GET:
2761 PRE_MEM_WRITE("shmsys(shmadv, advice)", ARG4,
2762 sizeof(vki_uint_t));
2763 break;
2764 case VKI_SHM_ADV_SET:
2765 PRE_MEM_READ("shmsys(shmadv, advice)", ARG4,
2766 sizeof(vki_uint_t));
2767 break;
2768 default:
2769 VG_(unimplemented)("Syswrap of the shmsys(shmadv) call with "
2770 "cmd %lu.", ARG3);
2771 /*NOTREACHED*/
2772 break;
2774 break;
2776 case VKI_SHMGET_OSM:
2777 /* Libc: int shmget_osm(key_t key, size_t size, int shmflg,
2778 size_t granule_sz);
2780 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld, %lu )",
2781 SARG1, SARG2, ARG3, SARG4, ARG5);
2782 PRE_REG_READ5(long, SC2("shmsys", "shmget_osm"), int, opcode,
2783 vki_key_t, key, vki_size_t, size, int, shmflg,
2784 vki_size_t, granule_sz);
2785 break;
2786 #endif /* SOLARIS_SHM_NEW */
2788 default:
2789 VG_(unimplemented)("Syswrap of the shmsys call with opcode %ld.",
2790 SARG1);
2791 /*NOTREACHED*/
2792 break;
2796 POST(sys_shmsys)
2798 switch (ARG1 /*opcode*/) {
2799 case VKI_SHMAT:
2800 ML_(generic_POST_sys_shmat)(tid, RES, ARG2, ARG3, ARG4);
2801 break;
2803 case VKI_SHMCTL:
2804 switch (ARG3 /*cmd*/) {
2805 case VKI_SHM_LOCK:
2806 case VKI_SHM_UNLOCK:
2807 case VKI_IPC_RMID:
2808 case VKI_IPC_SET:
2809 break;
2810 case VKI_IPC_STAT:
2811 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds));
2812 break;
2813 case VKI_IPC_SET64:
2814 break;
2815 case VKI_IPC_STAT64:
2816 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds64));
2817 break;
2818 #if defined(SOLARIS_SHM_NEW)
2819 case VKI_IPC_XSTAT64:
2820 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_xds64));
2821 break;
2822 #endif /* SOLARIS_SHM_NEW */
2823 default:
2824 vg_assert(0);
2825 break;
2827 break;
2829 case VKI_SHMDT:
2830 ML_(generic_POST_sys_shmdt)(tid, RES, ARG2);
2831 break;
2833 case VKI_SHMGET:
2834 break;
2836 case VKI_SHMIDS:
2838 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2840 uint_t *pnids = (vki_uint_t *) ARG4;
2841 if (*pnids <= ARG3)
2842 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
2844 break;
2846 #if defined(SOLARIS_SHM_NEW)
2847 case VKI_SHMADV:
2848 switch (ARG3 /*cmd*/) {
2849 case VKI_SHM_ADV_GET:
2850 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2851 break;
2852 case VKI_SHM_ADV_SET:
2853 break;
2854 default:
2855 vg_assert(0);
2856 break;
2858 break;
2860 case VKI_SHMGET_OSM:
2861 break;
2862 #endif /* SOLARIS_SHM_NEW */
2864 default:
2865 vg_assert(0);
2866 break;
2870 PRE(sys_semsys)
2872 /* Kernel: int semsys(int opcode, uintptr_t a1, uintptr_t a2, uintptr_t a3,
2873 uintptr_t a4);
2875 *flags |= SfMayBlock;
2877 switch (ARG1 /*opcode*/) {
2878 case VKI_SEMCTL:
2879 /* Libc: int semctl(int semid, int semnum, int cmd...); */
2880 switch (ARG4) {
2881 case VKI_IPC_STAT:
2882 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2883 SARG1, SARG2, SARG3, SARG4, ARG5);
2884 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat"), int, opcode,
2885 int, semid, int, semnum, int, cmd,
2886 struct vki_semid_ds *, arg);
2887 break;
2888 case VKI_IPC_SET:
2889 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2890 SARG1, SARG2, SARG3, SARG4, ARG5);
2891 PRE_REG_READ5(long, SC3("semsys", "semctl", "set"), int, opcode,
2892 int, semid, int, semnum, int, cmd,
2893 struct vki_semid_ds *, arg);
2894 break;
2895 case VKI_IPC_STAT64:
2896 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2897 SARG1, SARG2, SARG3, SARG4, ARG5);
2898 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat64"), int, opcode,
2899 int, semid, int, semnum, int, cmd,
2900 struct vki_semid64_ds *, arg);
2901 break;
2902 case VKI_IPC_SET64:
2903 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2904 SARG1, SARG2, SARG3, SARG4, ARG5);
2905 PRE_REG_READ5(long, SC3("semsys", "semctl", "set64"), int, opcode,
2906 int, semid, int, semnum, int, cmd,
2907 struct vki_semid64_ds *, arg);
2908 break;
2909 case VKI_IPC_RMID:
2910 PRINT("sys_semsys ( %ld, %ld, %ld )", SARG1, SARG3, SARG4);
2911 PRE_REG_READ3(long, SC3("semsys", "semctl", "rmid"), int, opcode,
2912 int, semid, int, cmd);
2913 break;
2914 case VKI_GETALL:
2915 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2916 SARG1, SARG2, SARG4, ARG5);
2917 PRE_REG_READ4(long, SC3("semsys", "semctl", "getall"), int, opcode,
2918 int, semid, int, cmd, ushort_t *, arg);
2919 break;
2920 case VKI_SETALL:
2921 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2922 SARG1, SARG2, SARG4, ARG5);
2923 PRE_REG_READ4(long, SC3("semsys", "semctl", "setall"), int, opcode,
2924 int, semid, int, cmd, ushort_t *, arg);
2925 break;
2926 case VKI_GETVAL:
2927 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2928 SARG1, SARG2, SARG3, SARG4);
2929 PRE_REG_READ4(long, SC3("semsys", "semctl", "getval"), int, opcode,
2930 int, semid, int, semnum, int, cmd);
2931 break;
2932 case VKI_SETVAL:
2933 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2934 SARG1, SARG2, SARG3, SARG4, ARG5);
2935 PRE_REG_READ5(long, SC3("semsys", "semctl", "setval"), int, opcode,
2936 int, semid, int, semnum, int, cmd,
2937 union vki_semun *, arg);
2938 break;
2939 case VKI_GETPID:
2940 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2941 SARG1, SARG2, SARG3, SARG4);
2942 PRE_REG_READ4(long, SC3("semsys", "semctl", "getpid"), int, opcode,
2943 int, semid, int, semnum, int, cmd);
2944 break;
2945 case VKI_GETNCNT:
2946 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2947 SARG1, SARG2, SARG3, SARG4);
2948 PRE_REG_READ4(long, SC3("semsys", "semctl", "getncnt"),
2949 int, opcode, int, semid, int, semnum, int, cmd);
2950 break;
2951 case VKI_GETZCNT:
2952 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2953 SARG1, SARG2, SARG3, SARG4);
2954 PRE_REG_READ4(long, SC3("semsys", "semctl", "getzcnt"),
2955 int, opcode, int, semid, int, semnum, int, cmd);
2956 break;
2957 default:
2958 VG_(unimplemented)("Syswrap of the semsys(semctl) call "
2959 "with cmd %ld.", SARG4);
2960 /*NOTREACHED*/
2961 break;
2963 ML_(generic_PRE_sys_semctl)(tid, ARG2, ARG3, ARG4, ARG5);
2964 break;
2965 case VKI_SEMGET:
2966 /* Libc: int semget(key_t key, int nsems, int semflg); */
2967 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )", SARG1, SARG2, SARG3, SARG4);
2968 PRE_REG_READ4(long, SC2("semsys", "semget"), int, opcode,
2969 vki_key_t, key, int, nsems, int, semflg);
2970 break;
2971 case VKI_SEMOP:
2972 /* Libc: int semop(int semid, struct sembuf *sops, size_t nsops); */
2973 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
2974 PRE_REG_READ4(long, SC2("semsys", "semop"), int, opcode, int, semid,
2975 struct vki_sembuf *, sops, vki_size_t, nsops);
2976 ML_(generic_PRE_sys_semop)(tid, ARG2, ARG3, ARG4);
2977 break;
2978 case VKI_SEMIDS:
2979 /* Libc: int semids(int *buf, uint_t nids, uint_t *pnids); */
2980 PRINT("sys_semsys ( %ld, %#lx, %lu, %#lx )", SARG1, ARG2, ARG3, ARG4);
2981 PRE_REG_READ4(long, SC2("semsys", "semids"), int, opcode, int *, buf,
2982 vki_uint_t, nids, vki_uint_t *, pnids);
2984 PRE_MEM_WRITE("semsys(semids, buf)", ARG2, ARG3 * sizeof(int *));
2985 PRE_MEM_WRITE("semsys(semids, pnids)", ARG4, sizeof(vki_uint_t));
2986 break;
2987 case VKI_SEMTIMEDOP:
2988 /* Libc: int semtimedop(int semid, struct sembuf *sops, size_t nsops,
2989 const struct timespec *timeout);
2991 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu, %#lx )", SARG1, SARG2, ARG3,
2992 ARG4, ARG5);
2993 PRE_REG_READ5(long, SC2("semsys", "semtimedop"), int, opcode,
2994 int, semid, struct vki_sembuf *, sops, vki_size_t, nsops,
2995 struct vki_timespec *, timeout);
2996 ML_(generic_PRE_sys_semtimedop)(tid, ARG2, ARG3, ARG4, ARG5);
2997 break;
2998 default:
2999 VG_(unimplemented)("Syswrap of the semsys call with opcode %ld.", SARG1);
3000 /*NOTREACHED*/
3001 break;
3005 POST(sys_semsys)
3007 switch (ARG1 /*opcode*/) {
3008 case VKI_SEMCTL:
3009 ML_(generic_POST_sys_semctl)(tid, RES, ARG2, ARG3, ARG4, ARG5);
3010 break;
3011 case VKI_SEMGET:
3012 case VKI_SEMOP:
3013 break;
3014 case VKI_SEMIDS:
3016 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
3018 uint_t *pnids = (uint_t *)ARG4;
3019 if (*pnids <= ARG3)
3020 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
3022 break;
3023 case VKI_SEMTIMEDOP:
3024 break;
3025 default:
3026 vg_assert(0);
3027 break;
3031 /* ---------------------------------------------------------------------
3032 ioctl wrappers
3033 ------------------------------------------------------------------ */
3035 PRE(sys_ioctl)
3037 /* int ioctl(int fildes, int request, ...); */
3038 *flags |= SfMayBlock;
3040 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3041 architectures. */
3042 Int cmd = (Int) ARG2;
3044 switch (cmd /*request*/) {
3045 /* Handle 2-arg specially here (they do not use ARG3 at all). */
3046 case VKI_DINFOIDENT:
3047 case VKI_TIOCNOTTY:
3048 case VKI_TIOCSCTTY:
3049 PRINT("sys_ioctl ( %ld, %#lx )", SARG1, ARG2);
3050 PRE_REG_READ2(long, "ioctl", int, fd, int, request);
3051 break;
3052 /* And now come the 3-arg ones. */
3053 default:
3054 PRINT("sys_ioctl ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
3055 PRE_REG_READ3(long, "ioctl", int, fd, int, request, intptr_t, arg);
3056 break;
3059 switch (cmd /*request*/) {
3060 /* pools */
3061 case VKI_POOL_STATUSQ:
3062 PRE_MEM_WRITE("ioctl(POOL_STATUSQ)", ARG3, sizeof(vki_pool_status_t));
3063 break;
3065 /* mntio */
3066 case VKI_MNTIOC_GETEXTMNTENT:
3068 PRE_MEM_READ("ioctl(MNTIOC_GETEXTMNTENT)",
3069 ARG3, sizeof(struct vki_mntentbuf));
3071 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3072 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3073 PRE_MEM_WRITE("ioctl(MNTIOC_GETEXTMNTENT, embuf->mbuf_emp)",
3074 (Addr) embuf->mbuf_emp, sizeof(struct vki_extmnttab));
3075 PRE_MEM_WRITE("ioctl(MNTIOC_GETEXTMNTENT, embuf->mbuf_buf)",
3076 (Addr) embuf->mbuf_buf, embuf->mbuf_bufsize);
3079 break;
3081 case VKI_MNTIOC_GETMNTANY:
3083 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY)",
3084 ARG3, sizeof(struct vki_mntentbuf));
3086 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3087 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3088 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_emp)",
3089 (Addr) embuf->mbuf_emp, sizeof(struct vki_mnttab));
3090 PRE_MEM_WRITE("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_buf)",
3091 (Addr) embuf->mbuf_buf, embuf->mbuf_bufsize);
3093 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3094 if (ML_(safe_to_deref(mnt, sizeof(struct vki_mnttab)))) {
3095 if (mnt->mnt_special != NULL)
3096 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_special)",
3097 (Addr) mnt->mnt_special);
3098 if (mnt->mnt_mountp != NULL)
3099 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mountp)",
3100 (Addr) mnt->mnt_mountp);
3101 if (mnt->mnt_fstype != NULL)
3102 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_fstype)",
3103 (Addr) mnt->mnt_fstype);
3104 if (mnt->mnt_mntopts != NULL)
3105 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mntopts)",
3106 (Addr) mnt->mnt_mntopts);
3107 if (mnt->mnt_time != NULL)
3108 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_time)",
3109 (Addr) mnt->mnt_time);
3113 break;
3115 /* termio/termios */
3116 case VKI_TCGETA:
3117 PRE_MEM_WRITE("ioctl(TCGETA)", ARG3, sizeof(struct vki_termio));
3118 break;
3119 case VKI_TCGETS:
3120 PRE_MEM_WRITE("ioctl(TCGETS)", ARG3, sizeof(struct vki_termios));
3121 break;
3122 case VKI_TCSETS:
3123 PRE_MEM_READ("ioctl(TCSETS)", ARG3, sizeof(struct vki_termios));
3124 break;
3125 case VKI_TCSETSW:
3126 PRE_MEM_READ("ioctl(TCSETSW)", ARG3, sizeof(struct vki_termios));
3127 break;
3128 case VKI_TCSETSF:
3129 PRE_MEM_READ("ioctl(TCSETSF)", ARG3, sizeof(struct vki_termios));
3130 break;
3131 case VKI_TIOCGWINSZ:
3132 PRE_MEM_WRITE("ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize));
3133 break;
3134 case VKI_TIOCSWINSZ:
3135 PRE_MEM_READ("ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize));
3136 break;
3137 case VKI_TIOCGPGRP:
3138 PRE_MEM_WRITE("ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t));
3139 break;
3140 case VKI_TIOCSPGRP:
3141 PRE_MEM_READ("ioctl(TIOCSPGRP)", ARG3, sizeof(vki_pid_t));
3142 break;
3143 case VKI_TIOCGSID:
3144 PRE_MEM_WRITE("ioctl(TIOCGSID)", ARG3, sizeof(vki_pid_t));
3145 break;
3146 case VKI_TIOCNOTTY:
3147 case VKI_TIOCSCTTY:
3148 break;
3150 /* STREAMS */
3151 case VKI_I_PUSH:
3152 PRE_MEM_RASCIIZ("ioctl(I_PUSH)", ARG3);
3153 break;
3154 case VKI_I_FLUSH:
3155 break;
3156 case VKI_I_STR:
3158 PRE_MEM_READ("ioctl(I_STR)", ARG3, sizeof(struct vki_strioctl));
3160 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3161 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3162 if ((p->ic_dp != NULL) && (p->ic_len > 0)) {
3163 PRE_MEM_READ("ioctl(I_STR, strioctl->ic_dp)",
3164 (Addr) p->ic_dp, p->ic_len);
3168 break;
3169 case VKI_I_FIND:
3170 PRE_MEM_RASCIIZ("ioctl(I_FIND)", ARG3);
3171 break;
3172 case VKI_I_PEEK:
3174 /* Try hard not to mark strpeek->*buf.len members as being read. */
3175 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3177 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.maxlen)",
3178 p->ctlbuf.maxlen);
3179 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.len)",
3180 p->ctlbuf.len);
3181 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3182 p->ctlbuf.buf);
3183 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.maxlen)",
3184 p->databuf.maxlen);
3185 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->databuf.len)",
3186 p->databuf.len);
3187 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.buf)",
3188 p->databuf.buf);
3189 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->flags)", p->flags);
3190 /*PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->flags)", p->flags);*/
3192 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3193 if (p->ctlbuf.buf && p->ctlbuf.maxlen > 0)
3194 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3195 (Addr)p->ctlbuf.buf, p->ctlbuf.maxlen);
3196 if (p->databuf.buf && p->databuf.maxlen > 0)
3197 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->databuf.buf)",
3198 (Addr)p->databuf.buf, p->databuf.maxlen);
3201 break;
3202 case VKI_I_CANPUT:
3203 break;
3205 /* sockio */
3206 case VKI_SIOCGIFCONF:
3208 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3209 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_len)", p->ifc_len);
3210 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_buf)", p->ifc_buf);
3211 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3212 if ((p->ifc_buf != NULL) && (p->ifc_len > 0))
3213 PRE_MEM_WRITE("ioctl(SIOCGIFCONF, ifconf->ifc_buf)",
3214 (Addr) p->ifc_buf, p->ifc_len);
3216 /* ifc_len gets also written to during SIOCGIFCONF ioctl. */
3218 break;
3219 case VKI_SIOCGIFFLAGS:
3221 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3222 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3223 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_flags)", p->ifr_flags);
3225 break;
3226 case VKI_SIOCGIFNETMASK:
3228 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3229 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3230 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_addr)", p->ifr_addr);
3232 break;
3233 case VKI_SIOCGIFNUM:
3234 PRE_MEM_WRITE("ioctl(SIOCGIFNUM)", ARG3, sizeof(int));
3235 break;
3236 case VKI_SIOCGLIFBRDADDR:
3238 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3239 PRE_FIELD_READ("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_name)",
3240 p->lifr_name);
3241 PRE_FIELD_WRITE("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_addr)",
3242 p->lifr_addr);
3244 break;
3245 case VKI_SIOCGLIFCONF:
3247 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3248 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_len)", p->lifc_len);
3249 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)", p->lifc_buf);
3250 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_family)",
3251 p->lifc_family);
3252 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_flags)",
3253 p->lifc_flags);
3254 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3255 if ((p->lifc_buf != NULL) && (p->lifc_len > 0))
3256 PRE_MEM_WRITE("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)",
3257 (Addr) p->lifc_buf, p->lifc_len);
3259 /* lifc_len gets also written to during SIOCGLIFCONF ioctl. */
3261 break;
3262 case VKI_SIOCGLIFFLAGS:
3264 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3265 PRE_FIELD_READ("ioctl(SIOCGLIFFLAGS, lifreq->lifr_name)",
3266 p->lifr_name);
3267 PRE_FIELD_WRITE("ioctl(SIOCGLIFFLAGS, lifreq->lifr_flags)",
3268 p->lifr_flags);
3270 break;
3271 case VKI_SIOCGLIFNETMASK:
3273 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3274 PRE_FIELD_READ("ioctl(SIOCGLIFNETMASK, lifreq->lifr_name)",
3275 p->lifr_name);
3276 PRE_FIELD_WRITE("ioctl(SIOCGLIFNETMASK, lifreq->lifr_addr)",
3277 p->lifr_addr);
3279 break;
3280 case VKI_SIOCGLIFNUM:
3282 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3283 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_family)",
3284 p->lifn_family);
3285 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_flags)",
3286 p->lifn_flags);
3287 PRE_FIELD_WRITE("ioctl(SIOCGLIFNUM, lifn->lifn_count)",
3288 p->lifn_count);
3290 break;
3292 /* filio */
3293 case VKI_FIOSETOWN:
3294 PRE_MEM_READ("ioctl(FIOSETOWN)", ARG3, sizeof(vki_pid_t));
3295 break;
3296 case VKI_FIOGETOWN:
3297 PRE_MEM_WRITE("ioctl(FIOGETOWN)", ARG3, sizeof(vki_pid_t));
3298 break;
3300 /* CRYPTO */
3301 case VKI_CRYPTO_GET_PROVIDER_LIST:
3303 vki_crypto_get_provider_list_t *pl =
3304 (vki_crypto_get_provider_list_t *) ARG3;
3305 PRE_FIELD_READ("ioctl(CRYPTO_GET_PROVIDER_LIST, pl->pl_count)",
3306 pl->pl_count);
3308 if (ML_(safe_to_deref)(pl, sizeof(*pl))) {
3309 PRE_MEM_WRITE("ioctl(CRYPTO_GET_PROVIDER_LIST)", ARG3,
3310 MAX(1, pl->pl_count) *
3311 sizeof(vki_crypto_get_provider_list_t));
3313 /* Save the requested count to unused ARG4 below,
3314 when we know pre-handler succeeded.
3317 break;
3319 /* dtrace */
3320 case VKI_DTRACEHIOC_REMOVE:
3321 break;
3322 case VKI_DTRACEHIOC_ADDDOF:
3324 vki_dof_helper_t *dh = (vki_dof_helper_t *) ARG3;
3325 PRE_MEM_RASCIIZ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_mod)",
3326 (Addr) dh->dofhp_mod);
3327 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_addr",
3328 dh->dofhp_addr);
3329 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_dof",
3330 dh->dofhp_dof);
3332 break;
3334 /* devinfo */
3335 case VKI_DINFOUSRLD:
3336 /* We should do PRE_MEM_WRITE here but the question is for how many? */
3337 break;
3338 case VKI_DINFOIDENT:
3339 break;
3341 default:
3342 ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3);
3343 break;
3346 /* Be strict. */
3347 if (!ML_(fd_allowed)(ARG1, "ioctl", tid, False)) {
3348 SET_STATUS_Failure(VKI_EBADF);
3349 } else if (ARG2 == VKI_CRYPTO_GET_PROVIDER_LIST) {
3350 /* Save the requested count to unused ARG4 now. */
3351 ARG4 = ARG3;
3355 POST(sys_ioctl)
3357 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3358 architectures. */
3359 Int cmd = (Int) ARG2;
3361 switch (cmd /*request*/) {
3362 /* pools */
3363 case VKI_POOL_STATUSQ:
3364 POST_MEM_WRITE(ARG3, sizeof(vki_pool_status_t));
3365 break;
3367 /* mntio */
3368 case VKI_MNTIOC_GETEXTMNTENT:
3370 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3371 struct vki_extmnttab *mnt = (struct vki_extmnttab *) embuf->mbuf_emp;
3373 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_extmnttab));
3374 if (mnt != NULL) {
3375 if (mnt->mnt_special != NULL)
3376 POST_MEM_WRITE((Addr) mnt->mnt_special,
3377 VG_(strlen)(mnt->mnt_special) + 1);
3378 if (mnt->mnt_mountp != NULL)
3379 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3380 VG_(strlen)(mnt->mnt_mountp) + 1);
3381 if (mnt->mnt_fstype != NULL)
3382 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3383 VG_(strlen)(mnt->mnt_fstype) + 1);
3384 if (mnt->mnt_mntopts != NULL)
3385 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3386 VG_(strlen)(mnt->mnt_mntopts) + 1);
3387 if (mnt->mnt_time != NULL)
3388 POST_MEM_WRITE((Addr) mnt->mnt_time,
3389 VG_(strlen)(mnt->mnt_time) + 1);
3392 break;
3394 case VKI_MNTIOC_GETMNTANY:
3396 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3397 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3399 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_mnttab));
3400 if (mnt != NULL) {
3401 if (mnt->mnt_special != NULL)
3402 POST_MEM_WRITE((Addr) mnt->mnt_special,
3403 VG_(strlen)(mnt->mnt_special) + 1);
3404 if (mnt->mnt_mountp != NULL)
3405 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3406 VG_(strlen)(mnt->mnt_mountp) + 1);
3407 if (mnt->mnt_fstype != NULL)
3408 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3409 VG_(strlen)(mnt->mnt_fstype) + 1);
3410 if (mnt->mnt_mntopts != NULL)
3411 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3412 VG_(strlen)(mnt->mnt_mntopts) + 1);
3413 if (mnt->mnt_time != NULL)
3414 POST_MEM_WRITE((Addr) mnt->mnt_time,
3415 VG_(strlen)(mnt->mnt_time) + 1);
3418 break;
3420 /* termio/termios */
3421 case VKI_TCGETA:
3422 POST_MEM_WRITE(ARG3, sizeof(struct vki_termio));
3423 break;
3424 case VKI_TCGETS:
3425 POST_MEM_WRITE(ARG3, sizeof(struct vki_termios));
3426 break;
3427 case VKI_TCSETS:
3428 break;
3429 case VKI_TCSETSW:
3430 break;
3431 case VKI_TCSETSF:
3432 break;
3433 case VKI_TIOCGWINSZ:
3434 POST_MEM_WRITE(ARG3, sizeof(struct vki_winsize));
3435 break;
3436 case VKI_TIOCSWINSZ:
3437 break;
3438 case VKI_TIOCGPGRP:
3439 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3440 break;
3441 case VKI_TIOCSPGRP:
3442 break;
3443 case VKI_TIOCGSID:
3444 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3445 break;
3446 case VKI_TIOCNOTTY:
3447 case VKI_TIOCSCTTY:
3448 break;
3450 /* STREAMS */
3451 case VKI_I_PUSH:
3452 break;
3453 case VKI_I_FLUSH:
3454 break;
3455 case VKI_I_STR:
3457 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3459 POST_FIELD_WRITE(p->ic_len);
3460 if ((p->ic_dp != NULL) && (p->ic_len > 0))
3461 POST_MEM_WRITE((Addr) p->ic_dp, p->ic_len);
3463 break;
3464 case VKI_I_FIND:
3465 break;
3466 case VKI_I_PEEK:
3468 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3470 POST_FIELD_WRITE(p->ctlbuf.len);
3471 POST_FIELD_WRITE(p->databuf.len);
3472 POST_FIELD_WRITE(p->flags);
3474 if (p->ctlbuf.buf && p->ctlbuf.len > 0)
3475 POST_MEM_WRITE((Addr)p->ctlbuf.buf, p->ctlbuf.len);
3476 if (p->databuf.buf && p->databuf.len > 0)
3477 POST_MEM_WRITE((Addr)p->databuf.buf, p->databuf.len);
3479 break;
3480 case VKI_I_CANPUT:
3481 break;
3483 /* sockio */
3484 case VKI_SIOCGIFCONF:
3486 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3487 POST_FIELD_WRITE(p->ifc_len);
3488 POST_FIELD_WRITE(p->ifc_req);
3489 if ((p->ifc_req != NULL) && (p->ifc_len > 0))
3490 POST_MEM_WRITE((Addr) p->ifc_req, p->ifc_len);
3492 break;
3493 case VKI_SIOCGIFFLAGS:
3495 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3496 POST_FIELD_WRITE(p->ifr_flags);
3498 break;
3499 case VKI_SIOCGIFNETMASK:
3501 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3502 POST_FIELD_WRITE(p->ifr_addr);
3504 break;
3505 case VKI_SIOCGIFNUM:
3506 POST_MEM_WRITE(ARG3, sizeof(int));
3507 break;
3508 case VKI_SIOCGLIFBRDADDR:
3510 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3511 POST_FIELD_WRITE(p->lifr_addr);
3513 break;
3514 case VKI_SIOCGLIFCONF:
3516 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3517 POST_FIELD_WRITE(p->lifc_len);
3518 POST_FIELD_WRITE(p->lifc_req);
3519 if ((p->lifc_req != NULL) && (p->lifc_len > 0))
3520 POST_MEM_WRITE((Addr) p->lifc_req, p->lifc_len);
3522 break;
3523 case VKI_SIOCGLIFFLAGS:
3525 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3526 POST_FIELD_WRITE(p->lifr_flags);
3528 break;
3529 case VKI_SIOCGLIFNETMASK:
3531 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3532 POST_FIELD_WRITE(p->lifr_addr);
3534 break;
3535 case VKI_SIOCGLIFNUM:
3537 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3538 POST_FIELD_WRITE(p->lifn_count);
3540 break;
3542 /* filio */
3543 case VKI_FIOSETOWN:
3544 break;
3545 case VKI_FIOGETOWN:
3546 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3547 break;
3549 /* CRYPTO */
3550 case VKI_CRYPTO_GET_PROVIDER_LIST:
3552 vki_crypto_get_provider_list_t *pl =
3553 (vki_crypto_get_provider_list_t *) ARG3;
3555 POST_FIELD_WRITE(pl->pl_count);
3556 POST_FIELD_WRITE(pl->pl_return_value);
3558 if ((ARG4 > 0) && (pl->pl_return_value == VKI_CRYPTO_SUCCESS))
3559 POST_MEM_WRITE((Addr) pl->pl_list, pl->pl_count *
3560 sizeof(vki_crypto_provider_entry_t));
3562 break;
3564 /* dtrace */
3565 case VKI_DTRACEHIOC_REMOVE:
3566 case VKI_DTRACEHIOC_ADDDOF:
3567 break;
3569 /* devinfo */
3570 case VKI_DINFOUSRLD:
3571 POST_MEM_WRITE(ARG3, RES);
3572 break;
3573 case VKI_DINFOIDENT:
3574 break;
3576 default:
3577 /* Not really anything to do since ioctl direction hints are hardly used
3578 on Solaris. */
3579 break;
3583 PRE(sys_fchownat)
3585 /* int fchownat(int fd, const char *path, uid_t owner, gid_t group,
3586 int flag); */
3588 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
3589 This is different from Linux, for example, where glibc sign-extends it. */
3590 Int fd = (Int) ARG1;
3592 PRINT("sys_fchownat ( %d, %#lx(%s), %ld, %ld, %ld )", fd,
3593 ARG2, (HChar *) ARG2, SARG3, SARG4, ARG5);
3594 PRE_REG_READ5(long, "fchownat", int, fd, const char *, path,
3595 vki_uid_t, owner, vki_gid_t, group, int, flag);
3597 if (ARG2)
3598 PRE_MEM_RASCIIZ("fchownat(path)", ARG2);
3600 /* Be strict but ignore fd for absolute path. */
3601 if (fd != VKI_AT_FDCWD
3602 && ML_(safe_to_deref)((void *) ARG2, 1)
3603 && ((HChar *) ARG2)[0] != '/'
3604 && !ML_(fd_allowed)(fd, "fchownat", tid, False))
3605 SET_STATUS_Failure(VKI_EBADF);
3608 PRE(sys_fdsync)
3610 /* int fdsync(int fd, int flag); */
3611 PRINT("sys_fdsync ( %ld, %ld )", SARG1, SARG2);
3612 PRE_REG_READ2(long, "fdsync", int, fd, int, flag);
3614 /* Be strict. */
3615 if (!ML_(fd_allowed)(ARG1, "fdsync", tid, False))
3616 SET_STATUS_Failure(VKI_EBADF);
3619 PRE(sys_execve)
3621 Int i, j;
3622 /* This is a Solaris specific version of the generic pre-execve wrapper. */
3624 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3625 /* int execve(uintptr_t file, const char **argv, const char **envp,
3626 int flags); */
3627 PRINT("sys_execve ( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
3628 PRE_REG_READ4(long, "execve", uintptr_t, file, const char **, argv,
3629 const char **, envp, int, flags);
3631 #else
3633 /* int execve(const char *fname, const char **argv, const char **envp); */
3634 PRINT("sys_execve ( %#lx(%s), %#lx, %#lx )",
3635 ARG1, (HChar *) ARG1, ARG2, ARG3);
3636 PRE_REG_READ3(long, "execve", const char *, file, const char **, argv,
3637 const char **, envp);
3638 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3640 Bool ARG1_is_fd = False;
3641 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3642 if (ARG4 & VKI_EXEC_DESCRIPTOR) {
3643 ARG1_is_fd = True;
3645 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3647 if (ARG1_is_fd == False)
3648 PRE_MEM_RASCIIZ("execve(filename)", ARG1);
3649 if (ARG2)
3650 ML_(pre_argv_envp)(ARG2, tid, "execve(argv)", "execve(argv[i])");
3651 if (ARG3)
3652 ML_(pre_argv_envp)(ARG3, tid, "execve(envp)", "execve(envp[i])");
3654 /* Erk. If the exec fails, then the following will have made a mess of
3655 things which makes it hard for us to continue. The right thing to do is
3656 piece everything together again in POST(execve), but that's close to
3657 impossible. Instead, we make an effort to check that the execve will
3658 work before actually doing it. */
3660 const HChar *fname = (const HChar *) ARG1;
3661 if (ARG1_is_fd) {
3662 if (!ML_(fd_allowed)(ARG1, "execve", tid, False)) {
3663 SET_STATUS_Failure(VKI_EBADF);
3664 return;
3667 if (VG_(resolve_filename)(ARG1, &fname) == False) {
3668 SET_STATUS_Failure(VKI_EBADF);
3669 return;
3672 struct vg_stat stats;
3673 if (VG_(fstat)(ARG1, &stats) != 0) {
3674 SET_STATUS_Failure(VKI_EBADF);
3675 return;
3678 if (stats.nlink > 1)
3679 VG_(unimplemented)("Syswrap of execve where fd points to a hardlink.");
3682 /* Check that the name at least begins in client-accessible storage. */
3683 if (ARG1_is_fd == False) {
3684 if ((fname == NULL) || !ML_(safe_to_deref)(fname, 1)) {
3685 SET_STATUS_Failure(VKI_EFAULT);
3686 return;
3690 /* Check that the args at least begin in client-accessible storage.
3691 Solaris disallows to perform the exec without any arguments specified.
3693 if (!ARG2 /* obviously bogus */ ||
3694 !VG_(am_is_valid_for_client)(ARG2, 1, VKI_PROT_READ)) {
3695 SET_STATUS_Failure(VKI_EFAULT);
3696 return;
3699 /* Debug-only printing. */
3700 if (0) {
3701 VG_(printf)("ARG1 = %#lx(%s)\n", ARG1, fname);
3702 if (ARG2) {
3703 Int q;
3704 HChar** vec = (HChar**)ARG2;
3706 VG_(printf)("ARG2 = ");
3707 for (q = 0; vec[q]; q++)
3708 VG_(printf)("%p(%s) ", vec[q], vec[q]);
3709 VG_(printf)("\n");
3711 else
3712 VG_(printf)("ARG2 = null\n");
3715 /* Decide whether or not we want to follow along. */
3716 /* Make 'child_argv' be a pointer to the child's arg vector (skipping the
3717 exe name) */
3718 const HChar **child_argv = (const HChar **) ARG2;
3719 if (child_argv[0] == NULL)
3720 child_argv = NULL;
3721 Bool trace_this_child = VG_(should_we_trace_this_child)(fname, child_argv);
3723 /* Do the important checks: it is a file, is executable, permissions are
3724 ok, etc. We allow setuid executables to run only in the case when
3725 we are not simulating them, that is, they to be run natively. */
3726 Bool setuid_allowed = trace_this_child ? False : True;
3727 SysRes res = VG_(pre_exec_check)(fname, NULL, setuid_allowed);
3728 if (sr_isError(res)) {
3729 SET_STATUS_Failure(sr_Err(res));
3730 return;
3733 /* If we're tracing the child, and the launcher name looks bogus (possibly
3734 because launcher.c couldn't figure it out, see comments therein) then we
3735 have no option but to fail. */
3736 if (trace_this_child &&
3737 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
3738 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
3739 return;
3742 /* After this point, we can't recover if the execve fails. */
3743 VG_(debugLog)(1, "syswrap", "Exec of %s\n", fname);
3745 /* Terminate gdbserver if it is active. */
3746 if (VG_(clo_vgdb) != Vg_VgdbNo) {
3747 /* If the child will not be traced, we need to terminate gdbserver to
3748 cleanup the gdbserver resources (e.g. the FIFO files). If child will
3749 be traced, we also terminate gdbserver: the new Valgrind will start a
3750 fresh gdbserver after exec. */
3751 VG_(gdbserver)(0);
3754 /* Resistance is futile. Nuke all other threads. POSIX mandates this.
3755 (Really, nuke them all, since the new process will make its own new
3756 thread.) */
3757 VG_(nuke_all_threads_except)(tid, VgSrc_ExitThread);
3758 VG_(reap_threads)(tid);
3760 /* Set up the child's exe path. */
3761 const HChar *path = fname;
3762 const HChar *launcher_basename = NULL;
3763 if (trace_this_child) {
3764 /* We want to exec the launcher. Get its pre-remembered path. */
3765 path = VG_(name_of_launcher);
3766 /* VG_(name_of_launcher) should have been acquired by m_main at
3767 startup. */
3768 vg_assert(path);
3770 launcher_basename = VG_(strrchr)(path, '/');
3771 if (!launcher_basename || launcher_basename[1] == '\0')
3772 launcher_basename = path; /* hmm, tres dubious */
3773 else
3774 launcher_basename++;
3777 /* Set up the child's environment.
3779 Remove the valgrind-specific stuff from the environment so the child
3780 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
3781 unconditionally, since if we are tracing the child, the child valgrind
3782 will set up the appropriate client environment. Nb: we make a copy of
3783 the environment before trying to mangle it as it might be in read-only
3784 memory (bug #101881).
3786 Then, if tracing the child, set VALGRIND_LIB for it. */
3787 HChar **envp = NULL;
3788 if (ARG3 != 0) {
3789 envp = VG_(env_clone)((HChar**)ARG3);
3790 vg_assert(envp != NULL);
3791 VG_(env_remove_valgrind_env_stuff)(envp, True /*ro_strings*/, NULL);
3794 if (trace_this_child) {
3795 /* Set VALGRIND_LIB in ARG3 (the environment). */
3796 VG_(env_setenv)( &envp, VALGRIND_LIB, VG_(libdir));
3799 /* Set up the child's args. If not tracing it, they are simply ARG2.
3800 Otherwise, they are:
3802 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG2[1..],
3804 except that the first VG_(args_for_valgrind_noexecpass) args are
3805 omitted. */
3806 HChar **argv = NULL;
3807 if (!trace_this_child)
3808 argv = (HChar **) ARG2;
3809 else {
3810 Int tot_args;
3812 vg_assert(VG_(args_for_valgrind));
3813 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
3814 vg_assert(VG_(args_for_valgrind_noexecpass)
3815 <= VG_(sizeXA)(VG_(args_for_valgrind)));
3817 /* How many args in total will there be? */
3818 /* launcher basename */
3819 tot_args = 1;
3820 /* V's args */
3821 tot_args += VG_(sizeXA)(VG_(args_for_valgrind));
3822 tot_args -= VG_(args_for_valgrind_noexecpass);
3823 /* name of client exe */
3824 tot_args++;
3825 /* args for client exe, skipping [0] */
3826 HChar **arg2copy = (HChar **) ARG2;
3827 if (arg2copy[0] != NULL)
3828 for (i = 1; arg2copy[i]; i++)
3829 tot_args++;
3830 /* allocate */
3831 argv = VG_(malloc)("syswrap.exec.5", (tot_args + 1) * sizeof(HChar*));
3832 /* copy */
3833 j = 0;
3834 argv[j++] = CONST_CAST(HChar *, launcher_basename);
3835 for (i = 0; i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
3836 if (i < VG_(args_for_valgrind_noexecpass))
3837 continue;
3838 argv[j++] = *(HChar**)VG_(indexXA)(VG_(args_for_valgrind), i);
3840 argv[j++] = CONST_CAST(HChar *, fname);
3841 if (arg2copy[0] != NULL)
3842 for (i = 1; arg2copy[i]; i++)
3843 argv[j++] = arg2copy[i];
3844 argv[j++] = NULL;
3845 /* check */
3846 vg_assert(j == tot_args + 1);
3849 /* Set the signal state up for exec.
3851 We need to set the real signal state to make sure the exec'd process
3852 gets SIG_IGN properly.
3854 Also set our real sigmask to match the client's sigmask so that the
3855 exec'd child will get the right mask. First we need to clear out any
3856 pending signals so they they don't get delivered, which would confuse
3857 things.
3859 XXX This is a bug - the signals should remain pending, and be delivered
3860 to the new process after exec. There's also a race-condition, since if
3861 someone delivers us a signal between the sigprocmask and the execve,
3862 we'll still get the signal. Oh well.
3865 vki_sigset_t allsigs;
3866 vki_siginfo_t info;
3868 /* What this loop does: it queries SCSS (the signal state that the
3869 client _thinks_ the kernel is in) by calling VG_(do_sys_sigaction),
3870 and modifies the real kernel signal state accordingly. */
3871 for (i = 1; i < VG_(max_signal); i++) {
3872 vki_sigaction_fromK_t sa_f;
3873 vki_sigaction_toK_t sa_t;
3874 VG_(do_sys_sigaction)(i, NULL, &sa_f);
3875 VG_(convert_sigaction_fromK_to_toK)(&sa_f, &sa_t);
3876 VG_(sigaction)(i, &sa_t, NULL);
3879 VG_(sigfillset)(&allsigs);
3880 while (VG_(sigtimedwait_zero)(&allsigs, &info) > 0)
3883 ThreadState *tst = VG_(get_ThreadState)(tid);
3884 VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL);
3887 /* Debug-only printing. */
3888 if (0) {
3889 HChar **cpp;
3890 VG_(printf)("exec: %s\n", path);
3891 for (cpp = argv; cpp && *cpp; cpp++)
3892 VG_(printf)("argv: %s\n", *cpp);
3893 if (0)
3894 for (cpp = envp; cpp && *cpp; cpp++)
3895 VG_(printf)("env: %s\n", *cpp);
3898 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3899 res = VG_(do_syscall4)(__NR_execve, (UWord) path, (UWord) argv,
3900 (UWord) envp, ARG4 & ~VKI_EXEC_DESCRIPTOR);
3901 #else
3902 res = VG_(do_syscall3)(__NR_execve, (UWord) path, (UWord) argv,
3903 (UWord) envp);
3904 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3905 SET_STATUS_from_SysRes(res);
3907 /* If we got here, then the execve failed. We've already made way too much
3908 of a mess to continue, so we have to abort. */
3909 vg_assert(FAILURE);
3910 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3911 if (ARG1_is_fd)
3912 VG_(message)(Vg_UserMsg, "execve(%ld, %#lx, %#lx, %lu) failed, "
3913 "errno %ld\n", SARG1, ARG2, ARG3, ARG4, ERR);
3914 else
3915 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx, %ld) failed, errno"
3916 " %lu\n", ARG1, (HChar *) ARG1, ARG2, ARG3, SARG4, ERR);
3917 #else
3918 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx) failed, errno %lu\n",
3919 ARG1, (HChar *) ARG1, ARG2, ARG3, ERR);
3920 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3921 VG_(message)(Vg_UserMsg, "EXEC FAILED: I can't recover from "
3922 "execve() failing, so I'm dying.\n");
3923 VG_(message)(Vg_UserMsg, "Add more stringent tests in PRE(sys_execve), "
3924 "or work out how to recover.\n");
3925 VG_(exit)(101);
3926 /*NOTREACHED*/
3929 static void pre_mem_read_flock(ThreadId tid, struct vki_flock *lock)
3931 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3932 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3933 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3934 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3937 #if defined(VGP_x86_solaris)
3938 static void pre_mem_read_flock64(ThreadId tid, struct vki_flock64 *lock)
3940 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3941 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3942 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3943 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3945 #endif /* VGP_x86_solaris */
3947 PRE(sys_fcntl)
3949 /* int fcntl(int fildes, int cmd, ...); */
3951 switch (ARG2 /*cmd*/) {
3952 /* These ones ignore ARG3. */
3953 case VKI_F_GETFD:
3954 case VKI_F_GETFL:
3955 case VKI_F_GETXFL:
3956 PRINT("sys_fcntl ( %ld, %ld )", SARG1, SARG2);
3957 PRE_REG_READ2(long, "fcntl", int, fildes, int, cmd);
3958 break;
3960 /* These ones use ARG3 as "arg". */
3961 case VKI_F_DUPFD:
3962 case VKI_F_DUPFD_CLOEXEC:
3963 case VKI_F_SETFD:
3964 case VKI_F_SETFL:
3965 case VKI_F_DUP2FD:
3966 case VKI_F_BADFD:
3967 PRINT("sys_fcntl ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
3968 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd, int, arg);
3969 /* Check if a client program isn't going to poison any of V's output
3970 fds. */
3971 if (ARG2 == VKI_F_DUP2FD &&
3972 !ML_(fd_allowed)(ARG3, "fcntl(F_DUP2FD)", tid, False)) {
3973 SET_STATUS_Failure(VKI_EBADF);
3974 return;
3976 break;
3978 /* These ones use ARG3 as "native lock" (input only). */
3979 case VKI_F_SETLK:
3980 case VKI_F_SETLKW:
3981 case VKI_F_ALLOCSP:
3982 case VKI_F_FREESP:
3983 case VKI_F_SETLK_NBMAND:
3984 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3985 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3986 struct flock *, lock);
3987 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
3988 break;
3990 /* This one uses ARG3 as "native lock" (input&output). */
3991 case VKI_F_GETLK:
3992 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3993 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3994 struct flock *, lock);
3995 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
3996 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock));
3997 break;
3999 #if defined(VGP_x86_solaris)
4000 /* These ones use ARG3 as "transitional 64b lock" (input only). */
4001 case VKI_F_SETLK64:
4002 case VKI_F_SETLKW64:
4003 case VKI_F_ALLOCSP64:
4004 case VKI_F_FREESP64:
4005 case VKI_F_SETLK64_NBMAND:
4006 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4007 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4008 struct flock64 *, lock);
4009 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
4010 break;
4012 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
4013 case VKI_F_GETLK64:
4014 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4015 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4016 struct flock64 *, lock);
4017 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
4018 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock64));
4019 break;
4020 #endif /* VGP_x86_solaris */
4022 /* These ones use ARG3 as "fshare". */
4023 case VKI_F_SHARE:
4024 case VKI_F_UNSHARE:
4025 case VKI_F_SHARE_NBMAND:
4026 PRINT("sys_fcntl[ARG3=='fshare'] ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4027 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
4028 struct fshare *, sh);
4029 PRE_MEM_READ("fcntl(fshare)", ARG3, sizeof(struct vki_fshare));
4030 break;
4032 default:
4033 VG_(unimplemented)("Syswrap of the fcntl call with cmd %ld.", SARG2);
4034 /*NOTREACHED*/
4035 break;
4038 if (ARG2 == VKI_F_SETLKW
4039 #if defined(VGP_x86_solaris)
4040 || ARG2 == VKI_F_SETLKW64
4041 #endif /* VGP_x86_solaris */
4043 *flags |= SfMayBlock;
4045 /* Be strict. */
4046 if (!ML_(fd_allowed)(ARG1, "fcntl", tid, False))
4047 SET_STATUS_Failure(VKI_EBADF);
4050 POST(sys_fcntl)
4052 switch (ARG2 /*cmd*/) {
4053 case VKI_F_DUPFD:
4054 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD)", tid, True)) {
4055 VG_(close)(RES);
4056 SET_STATUS_Failure(VKI_EMFILE);
4057 } else if (VG_(clo_track_fds))
4058 ML_(record_fd_open_named)(tid, RES);
4059 break;
4061 case VKI_F_DUPFD_CLOEXEC:
4062 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD_CLOEXEC)", tid, True)) {
4063 VG_(close)(RES);
4064 SET_STATUS_Failure(VKI_EMFILE);
4065 } else if (VG_(clo_track_fds))
4066 ML_(record_fd_open_named)(tid, RES);
4067 break;
4069 case VKI_F_DUP2FD:
4070 if (!ML_(fd_allowed)(RES, "fcntl(F_DUP2FD)", tid, True)) {
4071 VG_(close)(RES);
4072 SET_STATUS_Failure(VKI_EMFILE);
4073 } else if (VG_(clo_track_fds))
4074 ML_(record_fd_open_named)(tid, RES);
4075 break;
4077 /* This one uses ARG3 as "native lock" (input&output). */
4078 case VKI_F_GETLK:
4079 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock));
4080 break;
4082 #if defined(VGP_x86_solaris)
4083 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
4084 case VKI_F_GETLK64:
4085 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock64));
4086 break;
4087 #endif /* VGP_x86_solaris */
4089 default:
4090 break;
4094 PRE(sys_renameat)
4096 /* int renameat(int fromfd, const char *old, int tofd, const char *new); */
4098 /* Interpret the first and third arguments as 32-bit values even on 64-bit
4099 architecture. This is different from Linux, for example, where glibc
4100 sign-extends them. */
4101 Int fromfd = (Int) ARG1;
4102 Int tofd = (Int) ARG3;
4104 *flags |= SfMayBlock;
4105 PRINT("sys_renameat ( %d, %#lx(%s), %d, %#lx(%s) )", fromfd,
4106 ARG2, (HChar *) ARG2, tofd, ARG4, (HChar *) ARG4);
4107 PRE_REG_READ4(long, "renameat", int, fromfd, const char *, old,
4108 int, tofd, const char *, new);
4110 PRE_MEM_RASCIIZ("renameat(old)", ARG2);
4111 PRE_MEM_RASCIIZ("renameat(new)", ARG4);
4113 /* Be strict but ignore fromfd/tofd for absolute old/new. */
4114 if (fromfd != VKI_AT_FDCWD
4115 && ML_(safe_to_deref)((void *) ARG2, 1)
4116 && ((HChar *) ARG2)[0] != '/'
4117 && !ML_(fd_allowed)(fromfd, "renameat", tid, False)) {
4118 SET_STATUS_Failure(VKI_EBADF);
4120 if (tofd != VKI_AT_FDCWD
4121 && ML_(safe_to_deref)((void *) ARG4, 1)
4122 && ((HChar *) ARG4)[0] != '/'
4123 && !ML_(fd_allowed)(tofd, "renameat", tid, False)) {
4124 SET_STATUS_Failure(VKI_EBADF);
4128 PRE(sys_unlinkat)
4130 /* int unlinkat(int dirfd, const char *pathname, int flags); */
4132 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4133 This is different from Linux, for example, where glibc sign-extends it. */
4134 Int dfd = (Int) ARG1;
4136 *flags |= SfMayBlock;
4137 PRINT("sys_unlinkat ( %d, %#lx(%s), %ld )", dfd, ARG2, (HChar *) ARG2,
4138 SARG3);
4139 PRE_REG_READ3(long, "unlinkat", int, dirfd, const char *, pathname,
4140 int, flags);
4141 PRE_MEM_RASCIIZ("unlinkat(pathname)", ARG2);
4143 /* Be strict but ignore dfd for absolute pathname. */
4144 if (dfd != VKI_AT_FDCWD
4145 && ML_(safe_to_deref)((void *) ARG2, 1)
4146 && ((HChar *) ARG2)[0] != '/'
4147 && !ML_(fd_allowed)(dfd, "unlinkat", tid, False))
4148 SET_STATUS_Failure(VKI_EBADF);
4151 PRE(sys_fstatat)
4153 /* int fstatat(int fildes, const char *path, struct stat *buf,
4154 int flag); */
4156 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4157 This is different from Linux, for example, where glibc sign-extends it. */
4158 Int fd = (Int) ARG1;
4160 PRINT("sys_fstatat ( %d, %#lx(%s), %#lx, %ld )", fd, ARG2,
4161 (HChar *) ARG2, ARG3, SARG4);
4162 PRE_REG_READ4(long, "fstatat", int, fildes, const char *, path,
4163 struct stat *, buf, int, flag);
4164 if (ARG2) {
4165 /* Only test ARG2 if it isn't NULL. The kernel treats the NULL-case as
4166 fstat(fildes, buf). */
4167 PRE_MEM_RASCIIZ("fstatat(path)", ARG2);
4169 PRE_MEM_WRITE("fstatat(buf)", ARG3, sizeof(struct vki_stat));
4171 /* Be strict but ignore fildes for absolute path. */
4172 if (fd != VKI_AT_FDCWD
4173 && ML_(safe_to_deref)((void *) ARG2, 1)
4174 && ((HChar *) ARG2)[0] != '/'
4175 && !ML_(fd_allowed)(fd, "fstatat", tid, False))
4176 SET_STATUS_Failure(VKI_EBADF);
4179 POST(sys_fstatat)
4181 POST_MEM_WRITE(ARG3, sizeof(struct vki_stat));
4184 PRE(sys_openat)
4186 /* int openat(int fildes, const char *filename, int flags);
4187 int openat(int fildes, const char *filename, int flags, mode_t mode); */
4189 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4190 This is different from Linux, for example, where glibc sign-extends it. */
4191 Int fd = (Int) ARG1;
4193 if (ARG3 & VKI_O_CREAT) {
4194 /* 4-arg version */
4195 PRINT("sys_openat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2, (HChar *) ARG2,
4196 SARG3, SARG4);
4197 PRE_REG_READ4(long, "openat", int, fildes, const char *, filename,
4198 int, flags, vki_mode_t, mode);
4200 else {
4201 /* 3-arg version */
4202 PRINT("sys_openat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2,
4203 SARG3);
4204 PRE_REG_READ3(long, "openat", int, fildes, const char *, filename,
4205 int, flags);
4208 PRE_MEM_RASCIIZ("openat(filename)", ARG2);
4210 /* Be strict but ignore fildes for absolute pathname. */
4211 if (fd != VKI_AT_FDCWD
4212 && ML_(safe_to_deref)((void *) ARG2, 1)
4213 && ((HChar *) ARG2)[0] != '/'
4214 && !ML_(fd_allowed)(fd, "openat", tid, False)) {
4215 SET_STATUS_Failure(VKI_EBADF);
4216 return;
4219 if (ML_(handle_auxv_open)(status, (const HChar *) ARG2, ARG3))
4220 return;
4222 if (handle_psinfo_open(status, True /*use_openat*/, (const HChar *) ARG2,
4223 fd, ARG3, ARG4))
4224 return;
4226 #if defined(SOLARIS_PROC_CMDLINE)
4227 if (handle_cmdline_open(status, (const HChar *) ARG2))
4228 return;
4229 #endif /* SOLARIS_PROC_CMDLINE */
4231 *flags |= SfMayBlock;
4234 POST(sys_openat)
4236 if (!ML_(fd_allowed)(RES, "openat", tid, True)) {
4237 VG_(close)(RES);
4238 SET_STATUS_Failure(VKI_EMFILE);
4240 else if (VG_(clo_track_fds))
4241 ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
4244 PRE(sys_tasksys)
4246 /* Kernel: long tasksys(int code, projid_t projid, uint_t flags,
4247 void *projidbuf, size_t pbufsz);
4249 switch (ARG1 /*code*/) {
4250 case 0:
4251 /* Libc: taskid_t settaskid(projid_t project, uint_t flags); */
4252 PRINT("sys_tasksys ( %ld, %ld, %lu )", SARG1, SARG2, ARG3);
4253 PRE_REG_READ3(long, SC2("tasksys", "settaskid"), int, code,
4254 vki_projid_t, projid, vki_uint_t, flags);
4255 break;
4256 case 1:
4257 /* Libc: taskid_t gettaskid(void); */
4258 PRINT("sys_tasksys ( %ld )", SARG1);
4259 PRE_REG_READ1(long, SC2("tasksys", "gettaskid"), int, code);
4260 break;
4261 case 2:
4262 /* Libc: projid_t getprojid(void); */
4263 PRINT("sys_tasksys ( %ld )", SARG1);
4264 PRE_REG_READ1(long, SC2("tasksys", "getprojid"), int, code);
4265 break;
4266 case 3:
4267 /* Libproject: size_t projlist(id_t *idbuf, size_t idbufsz); */
4268 PRINT("sys_tasksys ( %ld, %#lx, %lu )", SARG1, ARG4, ARG5);
4269 PRE_REG_READ3(long, SC2("tasksys", "projlist"), int, code,
4270 vki_id_t *, idbuf, vki_size_t, idbufsz);
4271 PRE_MEM_WRITE("tasksys(idbuf)", ARG4, ARG5);
4272 break;
4273 default:
4274 VG_(unimplemented)("Syswrap of the tasksys call with code %ld.", SARG1);
4275 /*NOTREACHED*/
4276 break;
4280 POST(sys_tasksys)
4282 switch (ARG1 /*code*/) {
4283 case 0:
4284 case 1:
4285 case 2:
4286 break;
4287 case 3:
4288 if ((ARG4 != 0) && (ARG5 != 0))
4289 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
4290 break;
4291 default:
4292 vg_assert(0);
4293 break;
4297 PRE(sys_lwp_park)
4299 /* Kernel: int lwp_park(int which, uintptr_t arg1, uintptr_t arg2);
4301 *flags |= SfMayBlock;
4302 switch (ARG1 /*which*/) {
4303 case 0:
4304 /* Libc: int lwp_park(timespec_t *timeout, id_t lwpid); */
4305 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4306 PRE_REG_READ3(long, SC2("lwp_park", "lwp_park"), int, which,
4307 timespec_t *, timeout, vki_id_t, lwpid);
4308 if (ARG2) {
4309 PRE_MEM_READ("lwp_park(timeout)", ARG2, sizeof(vki_timespec_t));
4310 /*PRE_MEM_WRITE("lwp_park(timeout)", ARG2,
4311 sizeof(vki_timespec_t));*/
4313 break;
4314 case 1:
4315 /* Libc: int lwp_unpark(id_t lwpid); */
4316 PRINT("sys_lwp_park ( %ld, %ld )", SARG1, SARG2);
4317 PRE_REG_READ2(long, SC2("lwp_park", "lwp_unpark"), int, which,
4318 vki_id_t, lwpid);
4319 break;
4320 case 2:
4321 /* Libc: int lwp_unpark_all(id_t *lwpid, int nids); */
4322 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4323 PRE_REG_READ3(long, SC2("lwp_park", "lwp_unpark_all"), int, which,
4324 id_t *, lwpid, int, nids);
4325 PRE_MEM_READ("lwp_park(lwpid)", ARG2, ARG3 * sizeof(vki_id_t));
4326 break;
4327 default:
4328 VG_(unimplemented)("Syswrap of the lwp_park call with which %ld.", SARG1);
4329 /*NOTREACHED*/
4330 break;
4334 POST(sys_lwp_park)
4336 switch (ARG1 /*which*/) {
4337 case 0:
4338 if (ARG2)
4339 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
4340 break;
4341 case 1:
4342 case 2:
4343 break;
4344 default:
4345 vg_assert(0);
4346 break;
4350 PRE(sys_sendfilev)
4352 /* Kernel: ssize_t sendfilev(int opcode, int fd,
4353 const struct sendfilevec *vec,
4354 int sfvcnt, size_t *xferred);
4356 PRINT("sys_sendfilev ( %ld, %ld, %#lx, %ld, %#lx )",
4357 SARG1, SARG2, ARG3, SARG4, ARG5);
4359 switch (ARG1 /*opcode*/) {
4360 case VKI_SENDFILEV:
4362 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4363 const struct vki_sendfilevec *, vec,
4364 int, sfvcnt, vki_size_t *, xferred);
4366 PRE_MEM_READ("sendfilev(vec)", ARG3,
4367 ARG4 * sizeof(struct vki_sendfilevec));
4368 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4370 struct vki_sendfilevec *vec = (struct vki_sendfilevec *) ARG3;
4371 if (ML_(safe_to_deref)(vec, ARG4 *
4372 sizeof(struct vki_sendfilevec))) {
4373 UInt i;
4374 for (i = 0; i < ARG4; i++) {
4375 HChar desc[35]; // large enough
4376 if (vec[i].sfv_fd == VKI_SFV_FD_SELF) {
4377 VG_(snprintf)(desc, sizeof(desc),
4378 "sendfilev(vec[%u].sfv_off", i);
4379 PRE_MEM_READ(desc, vec[i].sfv_off, vec[i].sfv_len);
4380 } else {
4381 VG_(snprintf)(desc, sizeof(desc),
4382 "sendfilev(vec[%u].sfv_fd)", i);
4383 if (!ML_(fd_allowed)(vec[i].sfv_fd, desc, tid, False))
4384 SET_STATUS_Failure(VKI_EBADF);
4389 break;
4390 case VKI_SENDFILEV64:
4392 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4393 const struct vki_sendfilevec64 *, vec,
4394 int, sfvcnt, vki_size_t *, xferred);
4396 PRE_MEM_READ("sendfilev(vec)", ARG3,
4397 ARG4 * sizeof(struct vki_sendfilevec64));
4398 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4400 struct vki_sendfilevec64 *vec64 =
4401 (struct vki_sendfilevec64 *) ARG3;
4402 if (ML_(safe_to_deref)(vec64, ARG4 *
4403 sizeof(struct vki_sendfilevec64))) {
4404 UInt i;
4405 for (i = 0; i < ARG4; i++) {
4406 HChar desc[35]; // large enough
4407 if (vec64[i].sfv_fd == VKI_SFV_FD_SELF) {
4408 VG_(snprintf)(desc, sizeof(desc),
4409 "sendfilev(vec[%u].sfv_off", i);
4410 PRE_MEM_READ(desc, vec64[i].sfv_off, vec64[i].sfv_len);
4411 } else {
4412 VG_(snprintf)(desc, sizeof(desc),
4413 "sendfilev(vec[%u].sfv_fd)", i);
4414 if (!ML_(fd_allowed)(vec64[i].sfv_fd, desc,
4415 tid, False))
4416 SET_STATUS_Failure(VKI_EBADF);
4421 break;
4422 default:
4423 VG_(unimplemented)("Syswrap of the sendfilev call with "
4424 "opcode %ld.", SARG1);
4425 /*NOTREACHED*/
4426 break;
4429 /* Be strict. */
4430 if (!ML_(fd_allowed)(ARG2, "sendfilev(fd)", tid, False))
4431 SET_STATUS_Failure(VKI_EBADF);
4433 *flags |= SfMayBlock;
4436 POST(sys_sendfilev)
4438 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
4441 #if defined(SOLARIS_LWP_NAME_SYSCALL)
4442 PRE(sys_lwp_name)
4444 /* int lwp_name(int opcode, id_t lwpid, char *name, size_t len); */
4445 PRINT("sys_lwp_name ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
4447 switch (ARG1 /*opcode*/) {
4448 case 0:
4449 /* lwp_setname */
4450 PRE_REG_READ3(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4451 char *, name);
4452 PRE_MEM_RASCIIZ("lwp_name(name)", ARG3);
4453 break;
4454 case 1:
4455 /* lwp_getname */
4456 PRE_REG_READ4(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4457 char *, name, vki_size_t, len);
4458 PRE_MEM_WRITE("lwp_name(name)", ARG3, ARG4);
4459 break;
4460 default:
4461 VG_(unimplemented)("Syswrap of the lwp_name call with opcode %ld.", SARG1);
4462 /*NOTREACHED*/
4463 break;
4467 POST(sys_lwp_name)
4469 switch (ARG1 /*opcode*/) {
4470 case 0:
4471 if (ARG3) { // Paranoia
4472 const HChar *new_name = (const HChar *) ARG3;
4473 ThreadState *tst = VG_(get_ThreadState)(tid);
4474 SizeT new_len = VG_(strlen)(new_name);
4476 /* Don't bother reusing the memory. This is a rare event. */
4477 tst->thread_name = VG_(realloc)("syswrap.lwp_name", tst->thread_name,
4478 new_len + 1);
4479 VG_(strcpy)(tst->thread_name, new_name);
4481 break;
4482 case 1:
4483 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4484 break;
4485 default:
4486 vg_assert(0);
4487 break;
4490 #endif /* SOLARIS_LWP_NAME_SYSCALL */
4492 PRE(sys_privsys)
4494 /* Kernel: int privsys(int code, priv_op_t op, priv_ptype_t type,
4495 void *buf, size_t bufsize, int itype);
4497 switch (ARG1 /*code*/) {
4498 case VKI_PRIVSYS_SETPPRIV:
4499 /* Libc: int setppriv(priv_op_t op, priv_ptype_t type,
4500 const priv_set_t *pset);
4502 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4503 ARG4, ARG5);
4504 PRE_REG_READ5(long, SC2("privsys", "setppriv"), int, code,
4505 vki_priv_op_t, op, vki_priv_ptype_t, type,
4506 const priv_set_t *, pset, vki_size_t, bufsize);
4507 PRE_MEM_READ("privsys(pset)", ARG4, ARG5);
4508 break;
4509 case VKI_PRIVSYS_GETPPRIV:
4510 /* Libc: int getppriv(priv_ptype_t type, priv_set_t *pset);
4511 priv_set_t *pset -> void *buf
4513 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4514 ARG4, ARG5);
4515 PRE_REG_READ5(long, SC2("privsys", "getppriv"), int, code,
4516 vki_priv_op_t, op, vki_priv_ptype_t, type, priv_set_t *, pset,
4517 vki_size_t, bufsize);
4518 PRE_MEM_WRITE("privsys(pset)", ARG4, ARG5);
4519 break;
4520 case VKI_PRIVSYS_GETIMPLINFO:
4521 /* Libc: int getprivinfo(priv_impl_info_t *buf, size_t bufsize);
4522 priv_impl_info_t *buf -> void *buf
4524 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4525 ARG4, ARG5);
4526 PRE_REG_READ5(long, SC2("privsys", "getprivinfo"), int, code,
4527 vki_priv_op_t, op, vki_priv_ptype_t, type,
4528 priv_impl_info_t *, buf, vki_size_t, bufsize);
4529 PRE_MEM_WRITE("privsys(buf)", ARG4, ARG5);
4530 break;
4531 case VKI_PRIVSYS_SETPFLAGS:
4532 /* Libc: int setpflags(uint_t flag, uint_t val);
4533 uint_t flag -> priv_op_t op
4534 uint_t val -> priv_ptype_t type
4536 PRINT("sys_privsys ( %ld, %lu, %lu )", SARG1, ARG2, ARG3);
4537 PRE_REG_READ3(long, SC2("privsys", "setpflags"), int, code,
4538 vki_uint_t, flag, vki_uint_t, val);
4539 break;
4540 case VKI_PRIVSYS_GETPFLAGS:
4541 /* Libc: uint_t getpflags(uint_t flag);
4542 uint_t flag -> priv_op_t op
4544 PRINT("sys_privsys ( %ld, %lu )", SARG1, ARG2);
4545 PRE_REG_READ2(long, SC2("privsys", "setpflags"), int, code,
4546 vki_uint_t, flag);
4547 break;
4548 case VKI_PRIVSYS_ISSETUGID:
4549 /* Libc: int issetugid(void); */
4550 PRINT("sys_privsys ( %ld )", SARG1);
4551 PRE_REG_READ1(long, SC2("privsys", "issetugid"), int, code);
4552 break;
4553 case VKI_PRIVSYS_PFEXEC_REG:
4554 /* Libc: int register_pfexec(int did);
4555 int did -> priv_op_t op
4557 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4558 PRE_REG_READ2(long, SC2("privsys", "register_pfexec"), int, code,
4559 int, did);
4560 break;
4561 case VKI_PRIVSYS_PFEXEC_UNREG:
4562 /* Libc: int unregister_pfexec(int did); */
4563 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4564 PRE_REG_READ2(long, SC2("privsys", "unregister_pfexec"), int, code,
4565 int, did);
4566 break;
4567 default:
4568 VG_(unimplemented)("Syswrap of the privsys call with code %ld.", SARG1);
4569 /*NOTREACHED*/
4570 break;
4573 /* Be strict. */
4574 if ((ARG1 == VKI_PRIVSYS_PFEXEC_REG ||
4575 ARG1 == VKI_PRIVSYS_PFEXEC_UNREG) &&
4576 !ML_(fd_allowed)(ARG2, "privsys", tid, False))
4577 SET_STATUS_Failure(VKI_EBADF);
4580 POST(sys_privsys)
4582 switch (ARG1 /*code*/) {
4583 case VKI_PRIVSYS_SETPPRIV:
4584 break;
4585 case VKI_PRIVSYS_GETPPRIV:
4586 POST_MEM_WRITE(ARG4, sizeof(vki_priv_set_t));
4587 break;
4588 case VKI_PRIVSYS_GETIMPLINFO:
4589 /* The kernel copy outs data of size min(bufsize, privinfosize).
4590 Unfortunately, it does not seem to be possible to easily obtain the
4591 privinfosize value. The code below optimistically marks all ARG5
4592 bytes (aka bufsize) as written by the kernel. */
4593 POST_MEM_WRITE(ARG4, ARG5);
4594 break;
4595 case VKI_PRIVSYS_SETPFLAGS:
4596 case VKI_PRIVSYS_GETPFLAGS:
4597 case VKI_PRIVSYS_ISSETUGID:
4598 case VKI_PRIVSYS_PFEXEC_REG:
4599 case VKI_PRIVSYS_PFEXEC_UNREG:
4600 break;
4601 default:
4602 vg_assert(0);
4603 break;
4607 PRE(sys_ucredsys)
4609 /* Kernel: int ucredsys(int code, int obj, void *buf); */
4610 PRINT("sys_ucredsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4612 switch (ARG1 /*code*/) {
4613 case VKI_UCREDSYS_UCREDGET:
4614 /* Libc: ucred_t *ucred_get(pid_t pid); */
4615 PRE_REG_READ3(long, SC2("ucredsys", "ucredget"), int, code,
4616 vki_pid_t, pid, vki_ucred_t *, buf);
4617 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4618 break;
4620 case VKI_UCREDSYS_GETPEERUCRED:
4621 /* Libc: int getpeerucred(int fd, ucred_t **ucred); */
4622 PRE_REG_READ3(long, SC2("ucredsys", "getpeerucred"), int, code,
4623 int, fd, vki_ucred_t *, buf);
4624 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4626 /* Be strict. */
4627 if (!ML_(fd_allowed)(ARG2, "ucredsys", tid, False))
4628 SET_STATUS_Failure(VKI_EBADF);
4629 break;
4631 default:
4632 VG_(unimplemented)("Syswrap of the ucredsys call with code %ld.", SARG1);
4633 /*NOTREACHED*/
4634 break;
4638 POST(sys_ucredsys)
4640 switch (ARG1 /*code*/) {
4641 case VKI_UCREDSYS_UCREDGET:
4642 case VKI_UCREDSYS_GETPEERUCRED:
4643 vg_assert(ARG3 != 0);
4644 POST_MEM_WRITE(ARG3, ((vki_ucred_t *) ARG3)->uc_size);
4645 break;
4647 default:
4648 vg_assert(0);
4649 break;
4653 PRE(sys_sysfs)
4655 /* Kernel: int sysfs(int opcode, long a1, long a2); */
4656 PRINT("sys_sysfs ( %ld, %ld, %ld )", SARG1, SARG2, ARG3);
4658 switch (ARG1 /*opcode*/) {
4659 case VKI_GETFSIND:
4660 /* Libc: int sysfs(int opcode, const char *fsname); */
4661 PRE_REG_READ2(long, SC2("sysfs", "getfsind"), int, opcode,
4662 const char *, fsname);
4663 PRE_MEM_RASCIIZ("sysfs(fsname)", ARG2);
4664 break;
4665 case VKI_GETFSTYP:
4666 /* Libc: int sysfs(int opcode, int fs_index, char *buf); */
4667 PRE_REG_READ3(long, SC2("sysfs", "getfstyp"), int, opcode,
4668 int, fs_index, char *, buf);
4669 PRE_MEM_WRITE("sysfs(buf)", ARG3, VKI_FSTYPSZ + 1);
4670 break;
4671 case VKI_GETNFSTYP:
4672 /* Libc: int sysfs(int opcode); */
4673 PRE_REG_READ1(long, SC2("sysfs", "getnfstyp"), int, opcode);
4674 break;
4675 default:
4676 VG_(unimplemented)("Syswrap of the sysfs call with opcode %ld.", SARG1);
4677 /*NOTREACHED*/
4678 break;
4682 POST(sys_sysfs)
4684 switch (ARG1 /*opcode*/) {
4685 case VKI_GETFSIND:
4686 case VKI_GETNFSTYP:
4687 break;
4688 case VKI_GETFSTYP:
4689 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4690 break;
4691 default:
4692 vg_assert(0);
4693 break;
4698 PRE(sys_getmsg)
4700 /* int getmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4701 int *flagsp); */
4702 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4703 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4704 *flags |= SfMayBlock;
4705 PRINT("sys_getmsg ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
4706 PRE_REG_READ4(long, "getmsg", int, fildes, struct vki_strbuf *, ctlptr,
4707 struct vki_strbuf *, dataptr, int *, flagsp);
4708 if (ctrlptr) {
4709 PRE_FIELD_READ("getmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
4710 PRE_FIELD_WRITE("getmsg(ctrlptr->len)", ctrlptr->len);
4711 PRE_FIELD_READ("getmsg(ctrlptr->buf)", ctrlptr->buf);
4712 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4713 && ctrlptr->maxlen > 0)
4714 PRE_MEM_WRITE("getmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4715 ctrlptr->maxlen);
4717 if (dataptr) {
4718 PRE_FIELD_READ("getmsg(dataptr->maxlen)", dataptr->maxlen);
4719 PRE_FIELD_WRITE("getmsg(dataptr->len)", dataptr->len);
4720 PRE_FIELD_READ("getmsg(dataptr->buf)", dataptr->buf);
4721 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4722 && dataptr->maxlen > 0)
4723 PRE_MEM_WRITE("getmsg(dataptr->buf)", (Addr)dataptr->buf,
4724 dataptr->maxlen);
4726 PRE_MEM_READ("getmsg(flagsp)", ARG4, sizeof(int));
4727 /*PRE_MEM_WRITE("getmsg(flagsp)", ARG4, sizeof(int));*/
4729 /* Be strict. */
4730 if (!ML_(fd_allowed)(ARG1, "getmsg", tid, False))
4731 SET_STATUS_Failure(VKI_EBADF);
4734 POST(sys_getmsg)
4736 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4737 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4739 if (ctrlptr && ctrlptr->len > 0)
4740 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
4741 if (dataptr && dataptr->len > 0)
4742 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
4743 POST_MEM_WRITE(ARG4, sizeof(int));
4746 PRE(sys_putmsg)
4748 /* int putmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4749 int flags); */
4750 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4751 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4752 *flags |= SfMayBlock;
4753 PRINT("sys_putmsg ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
4754 PRE_REG_READ4(long, "putmsg", int, fildes, struct vki_strbuf *, ctrlptr,
4755 struct vki_strbuf *, dataptr, int, flags);
4756 if (ctrlptr) {
4757 PRE_FIELD_READ("putmsg(ctrlptr->len)", ctrlptr->len);
4758 PRE_FIELD_READ("putmsg(ctrlptr->buf)", ctrlptr->buf);
4759 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4760 && ctrlptr->len > 0)
4761 PRE_MEM_READ("putmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4762 ctrlptr->len);
4764 if (dataptr) {
4765 PRE_FIELD_READ("putmsg(dataptr->len)", dataptr->len);
4766 PRE_FIELD_READ("putmsg(dataptr->buf)", dataptr->buf);
4767 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4768 && dataptr->len > 0)
4769 PRE_MEM_READ("putmsg(dataptr->buf)", (Addr)dataptr->buf,
4770 dataptr->len);
4773 /* Be strict. */
4774 if (!ML_(fd_allowed)(ARG1, "putmsg", tid, False))
4775 SET_STATUS_Failure(VKI_EBADF);
4778 PRE(sys_lstat)
4780 /* int lstat(const char *path, struct stat *buf); */
4781 /* Note: We could use here the sys_newlstat generic wrapper, but the 'new'
4782 in its name is rather confusing in the Solaris context, thus we provide
4783 our own wrapper. */
4784 PRINT("sys_lstat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
4785 PRE_REG_READ2(long, "lstat", const char *, path, struct stat *, buf);
4787 PRE_MEM_RASCIIZ("lstat(path)", ARG1);
4788 PRE_MEM_WRITE("lstat(buf)", ARG2, sizeof(struct vki_stat));
4791 POST(sys_lstat)
4793 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
4796 PRE(sys_sigprocmask)
4798 /* int sigprocmask(int how, const sigset_t *set, sigset_t *oset); */
4799 PRINT("sys_sigprocmask ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4800 PRE_REG_READ3(long, "sigprocmask",
4801 int, how, vki_sigset_t *, set, vki_sigset_t *, oset);
4802 if (ARG2)
4803 PRE_MEM_READ("sigprocmask(set)", ARG2, sizeof(vki_sigset_t));
4804 if (ARG3)
4805 PRE_MEM_WRITE("sigprocmask(oset)", ARG3, sizeof(vki_sigset_t));
4807 /* Be safe. */
4808 if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_sigset_t)))) {
4809 SET_STATUS_Failure(VKI_EFAULT);
4811 if (ARG3 && !ML_(safe_to_deref((void*)ARG3, sizeof(vki_sigset_t)))) {
4812 SET_STATUS_Failure(VKI_EFAULT);
4815 if (!FAILURE)
4816 SET_STATUS_from_SysRes(
4817 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, (vki_sigset_t*)ARG2,
4818 (vki_sigset_t*)ARG3)
4821 if (SUCCESS)
4822 *flags |= SfPollAfter;
4825 POST(sys_sigprocmask)
4827 if (ARG3)
4828 POST_MEM_WRITE(ARG3, sizeof(vki_sigset_t));
4831 PRE(sys_sigsuspend)
4833 *flags |= SfMayBlock;
4835 /* int sigsuspend(const sigset_t *set); */
4836 PRINT("sys_sigsuspend ( %#lx )", ARG1);
4837 PRE_REG_READ1(long, "sigsuspend", vki_sigset_t *, set);
4838 PRE_MEM_READ("sigsuspend(set)", ARG1, sizeof(vki_sigset_t));
4840 /* Be safe. */
4841 if (ARG1 && ML_(safe_to_deref((void *) ARG1, sizeof(vki_sigset_t)))) {
4842 VG_(sigdelset)((vki_sigset_t *) ARG1, VG_SIGVGKILL);
4843 /* We cannot mask VG_SIGVGKILL, as otherwise this thread would not
4844 be killable by VG_(nuke_all_threads_except).
4845 We thus silently ignore the user request to mask this signal.
4846 Note that this is similar to what is done for e.g.
4847 sigprocmask (see m_signals.c calculate_SKSS_from_SCSS). */
4851 PRE(sys_sigaction)
4853 /* int sigaction(int signal, const struct sigaction *act,
4854 struct sigaction *oact); */
4855 PRINT("sys_sigaction ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4856 PRE_REG_READ3(long, "sigaction", int, signal,
4857 const struct sigaction *, act, struct sigaction *, oact);
4859 /* Note that on Solaris, vki_sigaction_toK_t and vki_sigaction_fromK_t are
4860 both typedefs of 'struct sigaction'. */
4862 if (ARG2) {
4863 vki_sigaction_toK_t *sa = (vki_sigaction_toK_t*)ARG2;
4864 PRE_FIELD_READ("sigaction(act->sa_flags)", sa->sa_flags);
4865 PRE_FIELD_READ("sigaction(act->sa_handler)", sa->ksa_handler);
4866 PRE_FIELD_READ("sigaction(act->sa_mask)", sa->sa_mask);
4868 if (ARG3)
4869 PRE_MEM_WRITE("sigaction(oact)", ARG3, sizeof(vki_sigaction_fromK_t));
4871 /* Be safe. */
4872 if (ARG2 && !ML_(safe_to_deref((void*)ARG2,
4873 sizeof(vki_sigaction_toK_t)))) {
4874 SET_STATUS_Failure(VKI_EFAULT);
4876 if (ARG3 && !ML_(safe_to_deref((void*)ARG3,
4877 sizeof(vki_sigaction_fromK_t)))) {
4878 SET_STATUS_Failure(VKI_EFAULT);
4881 if (!FAILURE)
4882 SET_STATUS_from_SysRes(
4883 VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t*)ARG2,
4884 (vki_sigaction_fromK_t*)ARG3));
4887 POST(sys_sigaction)
4889 if (ARG3)
4890 POST_MEM_WRITE(ARG3, sizeof(vki_sigaction_fromK_t));
4893 PRE(sys_sigpending)
4895 /* int sigpending(int flag, sigset_t *setp); */
4896 PRINT("sys_sigpending ( %ld, %#lx )", SARG1, ARG2);
4897 PRE_REG_READ2(long, "sigpending", int, flag, sigset_t *, setp);
4898 PRE_MEM_WRITE("sigpending(setp)", ARG2, sizeof(vki_sigset_t));
4901 POST(sys_sigpending)
4903 POST_MEM_WRITE(ARG2, sizeof(vki_sigset_t));
4906 PRE(sys_getsetcontext)
4908 /* Kernel: int getsetcontext(int flag, void *arg) */
4909 ThreadState *tst = VG_(get_ThreadState)(tid);
4910 PRINT("sys_getsetcontext ( %ld, %#lx )", SARG1, ARG2);
4911 switch (ARG1 /*flag*/) {
4912 case VKI_GETCONTEXT:
4913 /* Libc: int getcontext(ucontext_t *ucp); */
4914 PRE_REG_READ2(long, SC2("getsetcontext", "getcontext"), int, flag,
4915 ucontext_t *, ucp);
4916 PRE_MEM_WRITE("getsetcontext(ucp)", ARG2, sizeof(vki_ucontext_t));
4918 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4919 SET_STATUS_Failure(VKI_EFAULT);
4920 return;
4922 VG_(save_context)(tid, (vki_ucontext_t*)ARG2, Vg_CoreSysCall);
4923 SET_STATUS_Success(0);
4924 break;
4925 case VKI_SETCONTEXT:
4926 /* Libc: int setcontext(const ucontext_t *ucp); */
4927 PRE_REG_READ2(long, SC2("getsetcontext", "setcontext"), int, flag,
4928 const ucontext_t *, ucp);
4930 if (!ARG2) {
4931 /* Setting NULL context causes thread exit. */
4932 tst->exitreason = VgSrc_ExitThread;
4933 tst->os_state.exitcode = 0;
4934 SET_STATUS_Success(0);
4935 return;
4938 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4939 SET_STATUS_Failure(VKI_EFAULT);
4940 return;
4943 VG_(restore_context)(tid, (vki_ucontext_t*)ARG2,
4944 Vg_CoreSysCall, False/*esp_is_thrptr*/);
4945 /* Tell the driver not to update the guest state with the "result". */
4946 *flags |= SfNoWriteResult;
4947 /* Check to see if any signals arose as a result of this. */
4948 *flags |= SfPollAfter;
4950 /* Check if this is a possible return from a signal handler. */
4951 VG_(sigframe_return)(tid, (vki_ucontext_t*)ARG2);
4953 SET_STATUS_Success(0);
4954 break;
4955 case VKI_GETUSTACK:
4956 /* Libc: int getustack(stack_t **spp); */
4957 PRE_REG_READ2(long, SC2("getsetcontext", "getustack"), int, flag,
4958 stack_t **, spp);
4959 PRE_MEM_WRITE("getsetcontext(spp)", ARG2, sizeof(vki_stack_t*));
4961 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t*)))) {
4962 SET_STATUS_Failure(VKI_EFAULT);
4963 return;
4966 *(vki_stack_t**)ARG2 = tst->os_state.ustack;
4967 POST_MEM_WRITE(ARG2, sizeof(vki_stack_t*));
4968 SET_STATUS_Success(0);
4969 break;
4970 case VKI_SETUSTACK:
4972 /* Libc: int setustack(stack_t *sp); */
4973 PRE_REG_READ2(long, SC2("getsetcontext", "setustack"), int, flag,
4974 stack_t *, sp);
4976 /* The kernel does not read the stack data instantly but it can read
4977 them later so it is better to make sure the data are defined. */
4978 PRE_MEM_READ("getsetcontext_setustack(sp)", ARG2, sizeof(vki_stack_t));
4980 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) {
4981 SET_STATUS_Failure(VKI_EFAULT);
4982 return;
4985 vki_stack_t *old_stack = tst->os_state.ustack;
4986 tst->os_state.ustack = (vki_stack_t*)ARG2;
4988 /* The thread is setting the ustack pointer. It is a good time to get
4989 information about its stack. */
4990 if (tst->os_state.ustack->ss_flags == 0) {
4991 /* If the sanity check of ss_flags passed set the stack. */
4992 set_stack(tid, tst->os_state.ustack);
4994 if ((old_stack == NULL) && (tid > 1)) {
4995 /* New thread creation is now completed. Inform the tool. */
4996 VG_TRACK(pre_thread_first_insn, tid);
5000 SET_STATUS_Success(0);
5002 break;
5003 default:
5004 VG_(unimplemented)("Syswrap of the context call with flag %ld.", SARG1);
5005 /*NOTREACHED*/
5006 break;
5010 PRE(sys_fchmodat)
5012 /* int fchmodat(int fd, const char *path, mode_t mode, int flag); */
5014 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5015 This is different from Linux, for example, where glibc sign-extends it. */
5016 Int fd = (Int) ARG1;
5018 PRINT("sys_fchmodat ( %d, %#lx(%s), %ld, %ld )",
5019 fd, ARG2, (HChar *) ARG2, SARG3, SARG4);
5020 PRE_REG_READ4(long, "fchmodat",
5021 int, fd, const char *, path, vki_mode_t, mode, int, flag);
5023 if (ARG2)
5024 PRE_MEM_RASCIIZ("fchmodat(path)", ARG2);
5026 /* Be strict but ignore fd for absolute path. */
5027 if (fd != VKI_AT_FDCWD
5028 && ML_(safe_to_deref)((void *) ARG2, 1)
5029 && ((HChar *) ARG2)[0] != '/'
5030 && !ML_(fd_allowed)(fd, "fchmodat", tid, False))
5031 SET_STATUS_Failure(VKI_EBADF);
5034 PRE(sys_mkdirat)
5036 /* int mkdirat(int fd, const char *path, mode_t mode); */
5038 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5039 This is different from Linux, for example, where glibc sign-extends it. */
5040 Int fd = (Int) ARG1;
5042 *flags |= SfMayBlock;
5043 PRINT("sys_mkdirat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2, SARG3);
5044 PRE_REG_READ3(long, "mkdirat", int, fd, const char *, path,
5045 vki_mode_t, mode);
5046 PRE_MEM_RASCIIZ("mkdirat(path)", ARG2);
5048 /* Be strict but ignore fd for absolute path. */
5049 if (fd != VKI_AT_FDCWD
5050 && ML_(safe_to_deref)((void *) ARG2, 1)
5051 && ((HChar *) ARG2)[0] != '/'
5052 && !ML_(fd_allowed)(fd, "mkdirat", tid, False))
5053 SET_STATUS_Failure(VKI_EBADF);
5056 static void do_statvfs_post(struct vki_statvfs *stats, ThreadId tid)
5058 POST_FIELD_WRITE(stats->f_bsize);
5059 POST_FIELD_WRITE(stats->f_frsize);
5060 POST_FIELD_WRITE(stats->f_blocks);
5061 POST_FIELD_WRITE(stats->f_bfree);
5062 POST_FIELD_WRITE(stats->f_bavail);
5063 POST_FIELD_WRITE(stats->f_files);
5064 POST_FIELD_WRITE(stats->f_ffree);
5065 POST_FIELD_WRITE(stats->f_favail);
5066 POST_FIELD_WRITE(stats->f_fsid);
5067 POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
5068 POST_FIELD_WRITE(stats->f_flag);
5069 POST_FIELD_WRITE(stats->f_namemax);
5070 POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
5073 PRE(sys_statvfs)
5075 /* int statvfs(const char *path, struct statvfs *buf); */
5076 *flags |= SfMayBlock;
5077 PRINT("sys_statvfs ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
5078 PRE_REG_READ2(long, "statvfs", const char *, path,
5079 struct vki_statvfs *, buf);
5080 PRE_MEM_RASCIIZ("statvfs(path)", ARG1);
5081 PRE_MEM_WRITE("statvfs(buf)", ARG2, sizeof(struct vki_statvfs));
5084 POST(sys_statvfs)
5086 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
5089 PRE(sys_fstatvfs)
5091 /* int fstatvfs(int fd, struct statvfs *buf); */
5092 *flags |= SfMayBlock;
5093 PRINT("sys_fstatvfs ( %ld, %#lx )", SARG1, ARG2);
5094 PRE_REG_READ2(long, "fstatvfs", int, fd, struct vki_statvfs *, buf);
5095 PRE_MEM_WRITE("fstatvfs(buf)", ARG2, sizeof(struct vki_statvfs));
5097 /* Be strict. */
5098 if (!ML_(fd_allowed)(ARG1, "fstatvfs", tid, False))
5099 SET_STATUS_Failure(VKI_EBADF);
5102 POST(sys_fstatvfs)
5104 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
5107 PRE(sys_nfssys)
5109 /* int nfssys(enum nfssys_op opcode, void *arg); */
5110 *flags |= SfMayBlock;
5111 PRINT("sys_nfssys ( %ld, %#lx )", SARG1, ARG2);
5113 switch (ARG1 /*opcode*/) {
5114 case VKI_NFS_REVAUTH:
5115 PRE_REG_READ2(long, SC2("nfssys", "nfs_revauth"), int, opcode,
5116 struct vki_nfs_revauth_args *, args);
5117 PRE_MEM_READ("nfssys(arg)", ARG2,
5118 sizeof(struct vki_nfs_revauth_args));
5119 break;
5120 default:
5121 VG_(unimplemented)("Syswrap of the nfssys call with opcode %ld.", SARG1);
5122 /*NOTREACHED*/
5123 break;
5127 POST(sys_nfssys)
5129 switch (ARG1 /*opcode*/) {
5130 case VKI_NFS_REVAUTH:
5131 break;
5132 default:
5133 vg_assert(0);
5134 break;
5138 PRE(sys_waitid)
5140 /* int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); */
5141 *flags |= SfMayBlock;
5142 PRINT("sys_waitid( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
5143 PRE_REG_READ4(long, "waitid", vki_idtype_t, idtype, vki_id_t, id,
5144 siginfo_t *, infop, int, options);
5145 PRE_MEM_WRITE("waitid(infop)", ARG3, sizeof(vki_siginfo_t));
5148 POST(sys_waitid)
5150 POST_MEM_WRITE(ARG3, sizeof(vki_siginfo_t));
5153 PRE(sys_sigsendsys)
5155 /* int sigsendsys(procset_t *psp, int sig); */
5156 PRINT("sys_sigsendsys( %#lx, %ld )", ARG1, SARG2);
5157 PRE_REG_READ2(long, "sigsendsys", vki_procset_t *, psp, int, signal);
5158 PRE_MEM_READ("sigsendsys(psp)", ARG1, sizeof(vki_procset_t));
5160 if (!ML_(client_signal_OK)(ARG1)) {
5161 SET_STATUS_Failure(VKI_EINVAL);
5163 if (!ML_(safe_to_deref)((void *) ARG1, sizeof(vki_procset_t))) {
5164 SET_STATUS_Failure(VKI_EFAULT);
5167 /* Exit early if there are problems. */
5168 if (FAILURE)
5169 return;
5171 vki_procset_t *psp = (vki_procset_t *) ARG1;
5172 switch (psp->p_op) {
5173 case VKI_POP_AND:
5174 break;
5175 default:
5176 VG_(unimplemented)("Syswrap of the sigsendsys call with op %u.",
5177 psp->p_op);
5180 UInt pid;
5181 if ((psp->p_lidtype == VKI_P_PID) && (psp->p_ridtype == VKI_P_ALL)) {
5182 pid = psp->p_lid;
5183 } else if ((psp->p_lidtype == VKI_P_ALL) && (psp->p_ridtype == VKI_P_PID)) {
5184 pid = psp->p_rid;
5185 } else {
5186 VG_(unimplemented)("Syswrap of the sigsendsys call with lidtype %u and"
5187 "ridtype %u.", psp->p_lidtype, psp->p_ridtype);
5190 if (VG_(clo_trace_signals))
5191 VG_(message)(Vg_DebugMsg, "sigsendsys: sending signal to process %d\n",
5192 pid);
5194 /* Handle SIGKILL specially. */
5195 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(pid, -1)) {
5196 SET_STATUS_Success(0);
5197 return;
5200 /* Check to see if this gave us a pending signal. */
5201 *flags |= SfPollAfter;
5204 #if defined(SOLARIS_UTIMESYS_SYSCALL)
5205 PRE(sys_utimesys)
5207 /* Kernel: int utimesys(int code, uintptr_t arg1, uintptr_t arg2,
5208 uintptr_t arg3, uintptr_t arg4);
5211 switch (ARG1 /*code*/) {
5212 case 0:
5213 /* Libc: int futimens(int fd, const timespec_t times[2]); */
5214 PRINT("sys_utimesys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
5215 PRE_REG_READ3(long, "utimesys", int, code, int, fd,
5216 const vki_timespec_t *, times);
5217 if (ARG3)
5218 PRE_MEM_READ("utimesys(times)", ARG3, 2 * sizeof(vki_timespec_t));
5220 /* Be strict. */
5221 if (!ML_(fd_allowed)(ARG2, "utimesys", tid, False))
5222 SET_STATUS_Failure(VKI_EBADF);
5223 break;
5224 case 1:
5226 /* Libc: int utimensat(int fd, const char *path,
5227 const timespec_t times[2], int flag);
5230 /* Interpret the second argument as 32-bit value even on 64-bit
5231 architecture. This is different from Linux, for example, where glibc
5232 sign-extends it. */
5233 Int fd = (Int) ARG2;
5235 PRINT("sys_utimesys ( %ld, %d, %#lx(%s), %#lx, %ld )",
5236 SARG1, fd, ARG3, (HChar *) ARG3, ARG4, SARG5);
5237 PRE_REG_READ5(long, "utimesys", int, code, int, fd, const char *, path,
5238 const vki_timespec_t *, times, int, flag);
5239 if (ARG3)
5240 PRE_MEM_RASCIIZ("utimesys(path)", ARG3);
5241 if (ARG4)
5242 PRE_MEM_READ("utimesys(times)", ARG4, 2 * sizeof(vki_timespec_t));
5244 /* Be strict but ignore fd for absolute path. */
5245 if (fd != VKI_AT_FDCWD
5246 && ML_(safe_to_deref)((void *) ARG3, 1)
5247 && ((HChar *) ARG3)[0] != '/'
5248 && !ML_(fd_allowed)(fd, "utimesys", tid, False))
5249 SET_STATUS_Failure(VKI_EBADF);
5250 break;
5252 default:
5253 VG_(unimplemented)("Syswrap of the utimesys call with code %ld.", SARG1);
5254 /*NOTREACHED*/
5255 break;
5258 #endif /* SOLARIS_UTIMESYS_SYSCALL */
5260 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
5261 PRE(sys_utimensat)
5263 /* int utimensat(int fd, const char *path, const timespec_t times[2],
5264 int flag);
5267 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5268 This is different from Linux, for example, where glibc sign-extends it. */
5269 Int fd = (Int) ARG1;
5271 PRINT("sys_utimensat ( %d, %#lx(%s), %#lx, %ld )",
5272 fd, ARG2, (HChar *) ARG2, ARG3, SARG4);
5273 PRE_REG_READ4(long, "utimensat", int, fd, const char *, path,
5274 const vki_timespec_t *, times, int, flag);
5275 if (ARG2)
5276 PRE_MEM_RASCIIZ("utimensat(path)", ARG2);
5277 if (ARG3)
5278 PRE_MEM_READ("utimensat(times)", ARG3, 2 * sizeof(vki_timespec_t));
5280 /* Be strict but ignore fd for absolute path. */
5281 if (fd != VKI_AT_FDCWD
5282 && ML_(safe_to_deref)((void *) ARG2, 1)
5283 && ((HChar *) ARG2)[0] != '/'
5284 && !ML_(fd_allowed)(fd, "utimensat", tid, False))
5285 SET_STATUS_Failure(VKI_EBADF);
5287 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
5289 PRE(sys_sigresend)
5291 /* int sigresend(int signal, siginfo_t *siginfo, sigset_t *mask); */
5292 /* Sends a signal to the calling thread, the mask parameter specifies a new
5293 signal mask. */
5295 /* Static (const) mask accessible from outside of this function. */
5296 static vki_sigset_t block_all;
5298 PRINT("sys_sigresend( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
5299 PRE_REG_READ3(long, "sigresend", int, signal, vki_siginfo_t *, siginfo,
5300 vki_sigset_t *, mask);
5302 if (ARG2)
5303 PRE_MEM_READ("sigresend(siginfo)", ARG2, sizeof(vki_siginfo_t));
5304 PRE_MEM_WRITE("sigresend(mask)", ARG3, sizeof(vki_sigset_t));
5306 /* Check the signal and mask. */
5307 if (!ML_(client_signal_OK)(ARG1)) {
5308 SET_STATUS_Failure(VKI_EINVAL);
5310 if (!ML_(safe_to_deref)((void*)ARG3, sizeof(vki_sigset_t))) {
5311 SET_STATUS_Failure(VKI_EFAULT);
5314 /* Exit early if there are problems. */
5315 if (FAILURE)
5316 return;
5318 /* Save the requested mask to unused ARG4. */
5319 ARG4 = ARG3;
5321 /* Fake the requested sigmask with a block-all mask. If the syscall
5322 succeeds then we will block "all" signals for a few instructions (in
5323 syscall-x86-solaris.S) but the correct mask will be almost instantly set
5324 again by a call to sigprocmask (also in syscall-x86-solaris.S). If the
5325 syscall fails then the mask is not changed, so everything is ok too. */
5326 VG_(sigfillset)(&block_all);
5327 ARG3 = (UWord)&block_all;
5329 /* Check to see if this gave us a pending signal. */
5330 *flags |= SfPollAfter;
5332 if (VG_(clo_trace_signals))
5333 VG_(message)(Vg_DebugMsg, "sigresend: resending signal %ld\n", ARG1);
5335 /* Handle SIGKILL specially. */
5336 if (ARG1 == VKI_SIGKILL && ML_(do_sigkill)(tid, -1)) {
5337 SET_STATUS_Success(0);
5338 return;
5341 /* Ask to handle this syscall via the slow route, since that's the only one
5342 that sets tst->status to VgTs_WaitSys. If the result of doing the
5343 syscall is an immediate run of async_signalhandler() in m_signals.c,
5344 then we need the thread to be properly tidied away. */
5345 *flags |= SfMayBlock;
5348 POST(sys_sigresend)
5350 /* The syscall succeeded, set the requested mask. */
5351 VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, (vki_sigset_t*)ARG4, NULL);
5353 if (VG_(clo_trace_signals))
5354 VG_(message)(Vg_DebugMsg, "sigresend: resent signal %lu\n", ARG1);
5357 static void mem_priocntlsys_parm_ok(ThreadId tid, Bool pre, Bool reade,
5358 vki_pc_vaparm_t *parm)
5360 if (reade)
5361 return;
5363 if (pre)
5364 PRE_FIELD_WRITE("priocntlsys(parm)", parm->pc_parm);
5365 else
5366 POST_FIELD_WRITE(parm->pc_parm);
5369 static void mem_priocntlsys_parm(ThreadId tid, Bool pre, Bool reade,
5370 const HChar *clname,
5371 vki_pc_vaparm_t *parm)
5373 /* This function is used to handle the PC_SETXPARMS and PC_GETXPARMS
5374 parameters. In the case of PC_SETXPARMS, the code below merely checks
5375 if all parameters are scalar, PRE_MEM_READ() for these parameters is
5376 already done by the PC_SETXPARMS handler in PRE(sys_priocntlsys).
5378 A caller of this function is responsible for checking that clname and
5379 &parm->key can be dereferenced. */
5381 if (VG_STREQ(clname, "RT")) {
5382 switch (parm->pc_key) {
5383 case VKI_RT_KY_PRI:
5384 case VKI_RT_KY_TQSECS:
5385 case VKI_RT_KY_TQNSECS:
5386 case VKI_RT_KY_TQSIG:
5387 /* Scalar values that are stored directly in pc_parm. */
5388 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5389 return;
5392 else if (VG_STREQ(clname, "TS")) {
5393 switch (parm->pc_key) {
5394 case VKI_TS_KY_UPRILIM:
5395 case VKI_TS_KY_UPRI:
5396 /* Scalar values that are stored directly in pc_parm. */
5397 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5398 return;
5401 else if (VG_STREQ(clname, "IA")) {
5402 switch (parm->pc_key) {
5403 case VKI_IA_KY_UPRILIM:
5404 case VKI_IA_KY_UPRI:
5405 case VKI_IA_KY_MODE:
5406 /* Scalar values that are stored directly in pc_parm. */
5407 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5408 return;
5411 else if (VG_STREQ(clname, "FSS")) {
5412 switch (parm->pc_key) {
5413 case VKI_FSS_KY_UPRILIM:
5414 case VKI_FSS_KY_UPRI:
5415 /* Scalar values that are stored directly in pc_parm. */
5416 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5417 return;
5420 else if (VG_STREQ(clname, "FX")) {
5421 switch (parm->pc_key) {
5422 case VKI_FX_KY_UPRILIM:
5423 case VKI_FX_KY_UPRI:
5424 case VKI_FX_KY_TQSECS:
5425 case VKI_FX_KY_TQNSECS:
5426 /* Scalar values that are stored directly in pc_parm. */
5427 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5428 return;
5431 else {
5432 /* Unknown class. */
5433 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5434 clname);
5435 /*NOTREACHED*/
5438 /* The class is known but pc_key is unknown. */
5439 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s "
5440 "and pc_key=%d.", clname, parm->pc_key);
5441 /*NOTREACHED*/
5444 PRE(sys_priocntlsys)
5446 /* long priocntlsys(int pc_version, procset_t *psp, int cmd, caddr_t arg,
5447 caddr_t arg2); */
5449 if (ARG1 != 1) {
5450 /* Only the first version of priocntlsys is supported by the code below.
5452 VG_(unimplemented)("Syswrap of the priocntlsys where pc_version=%lu.",
5453 ARG1);
5454 /*NOTREACHED*/
5457 PRINT("sys_priocntlsys ( %ld, %#lx, %ld, %#lx, %#lx )", SARG1, ARG2, SARG3,
5458 ARG4, ARG5);
5459 PRE_REG_READ5(long, "priocntlsys", int, pc_version, procset_t *, psp,
5460 int, cmd, void *, arg, void *, arg2);
5462 switch (ARG3 /*cmd*/) {
5463 case VKI_PC_GETCID:
5464 if (ARG4) {
5465 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5466 PRE_MEM_RASCIIZ("priocntlsys(clname)", (Addr)info->pc_clname);
5467 /* The next line says that the complete pcinfo_t structure can be
5468 written, but this actually isn't true for pc_clname which is
5469 always only read. */
5470 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5472 break;
5473 case VKI_PC_GETCLINFO:
5474 if (ARG4) {
5475 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5476 PRE_FIELD_READ("priocntlsys(cid)", info->pc_cid);
5477 /* The next line says that the complete pcinfo_t structure can be
5478 written, but this actually isn't true for pc_cid which is
5479 always only read. */
5480 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5482 break;
5483 case VKI_PC_SETPARMS:
5484 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5485 /* The next line says that the complete pcparms_t structure is read
5486 which is never actually true (we are too pessimistic here).
5487 Unfortunately we can't do better because we don't know what
5488 process class is involved. */
5489 PRE_MEM_READ("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5490 break;
5491 case VKI_PC_GETPARMS:
5492 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5493 PRE_MEM_WRITE("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5494 break;
5495 case VKI_PC_GETPRIRANGE:
5497 vki_pcpri_t *pcpri = (vki_pcpri_t*)ARG4;
5498 PRE_FIELD_READ("priocntlsys(cid)", pcpri->pc_cid);
5500 PRE_MEM_WRITE("priocntlsys(pri)", ARG4, sizeof(vki_pcpri_t));
5501 break;
5502 case VKI_PC_DONICE:
5503 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5505 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5506 PRE_FIELD_READ("priocntlsys(op)", nicee->pc_op);
5507 if (ML_(safe_to_deref)(&nicee->pc_op, sizeof(nicee->pc_op))) {
5508 switch (nicee->pc_op) {
5509 case VKI_PC_GETNICE:
5510 PRE_FIELD_WRITE("priocntlsys(val)", nicee->pc_val);
5511 break;
5512 case VKI_PC_SETNICE:
5513 PRE_FIELD_READ("priocntlsys(val)", nicee->pc_val);
5514 break;
5515 default:
5516 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5517 "cmd=PC_DONICE and pc_op=%d", nicee->pc_op);
5518 /*NOTREACHED*/
5519 break;
5523 break;
5524 case VKI_PC_SETXPARMS:
5525 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5526 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5527 if (ARG5) {
5528 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5529 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5530 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5531 sizeof(parms->pc_vaparmscnt))) {
5532 vki_uint_t i;
5533 PRE_MEM_READ("priocntlsys(parms)", (Addr)parms->pc_parms,
5534 parms->pc_vaparmscnt * sizeof(parms->pc_parms[0]));
5535 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5536 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5537 if (ML_(safe_to_deref)(parm, sizeof(*parm)) &&
5538 ML_(safe_to_deref)((void*)ARG4, 1))
5539 mem_priocntlsys_parm(tid, True /*pre*/, True /*read*/,
5540 (HChar*)ARG4, parm);
5544 break;
5545 case VKI_PC_GETXPARMS:
5546 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5547 if (ARG4)
5548 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5549 if (ARG5) {
5550 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5551 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5552 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5553 sizeof(parms->pc_vaparmscnt))) {
5554 vki_uint_t i;
5555 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5556 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5557 PRE_MEM_READ("priocntlsys(parms)", (Addr)&parm->pc_key,
5558 parms->pc_vaparmscnt * sizeof(parm->pc_key));
5559 if (ML_(safe_to_deref)(&parm->pc_key,
5560 sizeof(parm->pc_key))) {
5561 /* First handle PC_KY_CLNAME, then class specific keys.
5562 Note that PC_KY_CLNAME can be used only with
5563 ARG4==NULL && parms->pc_vaparmscnt==1. We are not so
5564 strict here and handle this special case as a regular
5565 one which makes the code simpler. */
5566 if (parm->pc_key == VKI_PC_KY_CLNAME)
5567 PRE_MEM_WRITE("priocntlsys(clname)", parm->pc_parm,
5568 VKI_PC_CLNMSZ);
5569 else if (ARG4 && ML_(safe_to_deref)((void*)ARG4, 1))
5570 mem_priocntlsys_parm(tid, True /*pre*/,
5571 False /*read*/, (HChar*)ARG4,
5572 parm);
5577 break;
5578 case VKI_PC_SETDFLCL:
5579 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5580 break;
5581 case VKI_PC_GETDFLCL:
5582 if (ARG4) {
5583 /* GETDFLCL writes to the ARG4 buffer only if ARG4 isn't NULL. Also
5584 note that if ARG4 is NULL then the syscall succeeds. */
5585 PRE_MEM_WRITE("priocntlsys(clname)", ARG4, VKI_PC_CLNMSZ);
5587 break;
5588 case VKI_PC_DOPRIO:
5589 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5591 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5592 PRE_FIELD_READ("priocntlsys(op)", prio->pc_op);
5593 if (ML_(safe_to_deref)(&prio->pc_op, sizeof(prio->pc_op))) {
5594 switch (prio->pc_op) {
5595 case VKI_PC_GETPRIO:
5596 PRE_FIELD_WRITE("priocntlsys(cid)", prio->pc_cid);
5597 PRE_FIELD_WRITE("priocntlsys(val)", prio->pc_val);
5598 break;
5599 case VKI_PC_SETPRIO:
5600 PRE_FIELD_READ("priocntlsys(cid)", prio->pc_cid);
5601 PRE_FIELD_READ("priocntlsys(val)", prio->pc_val);
5602 break;
5603 default:
5604 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5605 "cmd=PC_DOPRIO and pc_op=%d", prio->pc_op);
5606 /*NOTREACHED*/
5607 break;
5611 break;
5612 case VKI_PC_ADMIN:
5613 default:
5614 VG_(unimplemented)("Syswrap of the priocntlsys call with cmd %ld.", SARG3);
5615 /*NOTREACHED*/
5616 break;
5620 static void post_mem_write_priocntlsys_clinfo(ThreadId tid,
5621 const HChar *clname, Addr clinfo)
5623 if (VG_STREQ(clname, "RT"))
5624 POST_MEM_WRITE(clinfo, sizeof(vki_rtinfo_t));
5625 else if (VG_STREQ(clname, "TS"))
5626 POST_MEM_WRITE(clinfo, sizeof(vki_tsinfo_t));
5627 else if (VG_STREQ(clname, "IA"))
5628 POST_MEM_WRITE(clinfo, sizeof(vki_iainfo_t));
5629 else if (VG_STREQ(clname, "FSS"))
5630 POST_MEM_WRITE(clinfo, sizeof(vki_fssinfo_t));
5631 else if (VG_STREQ(clname, "FX"))
5632 POST_MEM_WRITE(clinfo, sizeof(vki_fxinfo_t));
5633 else if (VG_STREQ(clname, "SDC")) {
5634 /* Relax. */
5636 else {
5637 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5638 clname);
5639 /*NOTREACHED*/
5643 POST(sys_priocntlsys)
5645 switch (ARG3 /*cmd*/) {
5646 case VKI_PC_GETCID:
5647 if (ARG4) {
5648 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5649 POST_FIELD_WRITE(info->pc_cid);
5650 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5651 (Addr)&info->pc_clinfo);
5653 break;
5654 case VKI_PC_GETCLINFO:
5655 if (ARG4) {
5656 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5657 POST_MEM_WRITE((Addr)info->pc_clname,
5658 VG_(strlen)((HChar*)info->pc_clname) + 1);
5659 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5660 (Addr)&info->pc_clinfo);
5662 break;
5663 case VKI_PC_SETPARMS:
5664 /* Relax. */
5665 break;
5666 case VKI_PC_GETPARMS:
5667 /* The next line says that the complete pcparms_t structure is
5668 written which is never actually true (we are too optimistic here).
5669 Unfortunately we can't do better because we don't know what
5670 process class is involved. */
5671 POST_MEM_WRITE(ARG4, sizeof(vki_pcparms_t));
5672 break;
5673 case VKI_PC_GETPRIRANGE:
5674 POST_MEM_WRITE(ARG4, sizeof(vki_pcpri_t));
5675 break;
5676 case VKI_PC_DONICE:
5678 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5679 if (nicee->pc_op == VKI_PC_GETNICE)
5680 POST_FIELD_WRITE(nicee->pc_val);
5682 break;
5683 case VKI_PC_SETXPARMS:
5684 /* Relax. */
5685 break;
5686 case VKI_PC_GETXPARMS:
5688 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5689 vki_uint_t i;
5690 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5691 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5692 if (parm->pc_key == VKI_PC_KY_CLNAME)
5693 POST_MEM_WRITE(parm->pc_parm,
5694 VG_(strlen)((HChar*)(Addr)parm->pc_parm) + 1);
5695 else if (ARG4)
5696 mem_priocntlsys_parm(tid, False /*pre*/, False /*read*/,
5697 (HChar*)ARG4, parm);
5700 break;
5701 case VKI_PC_SETDFLCL:
5702 /* Relax. */
5703 break;
5704 case VKI_PC_GETDFLCL:
5705 if (ARG4)
5706 POST_MEM_WRITE(ARG4, VG_(strlen)((HChar*)ARG4) + 1);
5707 break;
5708 case VKI_PC_DOPRIO:
5710 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5711 if (prio->pc_op == VKI_PC_GETPRIO) {
5712 POST_FIELD_WRITE(prio->pc_cid);
5713 POST_FIELD_WRITE(prio->pc_val);
5716 break;
5717 case VKI_PC_ADMIN:
5718 default:
5719 vg_assert(0);
5720 break;
5724 PRE(sys_pathconf)
5726 /* long pathconf(const char *path, int name); */
5727 PRINT("sys_pathconf ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
5728 PRE_REG_READ2(long, "pathconf", const char *, path, int, name);
5729 PRE_MEM_RASCIIZ("pathconf(path)", ARG1);
5732 PRE(sys_mmap)
5734 /* void *mmap(void *addr, size_t len, int prot, int flags,
5735 int fildes, off_t off); */
5736 SysRes r;
5737 OffT offset;
5739 /* Stay sane. */
5740 vg_assert(VKI_PAGE_SIZE == 4096);
5741 vg_assert(sizeof(offset) == sizeof(ARG6));
5743 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx )",
5744 ARG1, ARG2, ARG3, ARG4, SARG5, ARG6);
5745 PRE_REG_READ6(long, "mmap", void *, start, vki_size_t, length,
5746 int, prot, int, flags, int, fd, vki_off_t, offset);
5748 /* Make sure that if off < 0 then it's passed correctly to the generic mmap
5749 wraper. */
5750 offset = *(OffT*)&ARG6;
5752 r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
5753 SET_STATUS_from_SysRes(r);
5756 #if defined(SOLARIS_UUIDSYS_SYSCALL)
5757 PRE(sys_uuidsys)
5759 /* int uuidsys(struct uuid *uuid); */
5760 PRINT("sys_uuidsys ( %#lx )", ARG1);
5761 PRE_REG_READ1(long, "uuidsys", struct vki_uuid *, uuid);
5762 PRE_MEM_WRITE("uuidsys(uuid)", ARG1, sizeof(struct vki_uuid));
5765 POST(sys_uuidsys)
5767 POST_MEM_WRITE(ARG1, sizeof(struct vki_uuid));
5769 #endif /* SOLARIS_UUIDSYS_SYSCALL */
5771 /* Syscall mmapobj emulation. Processes ELF program headers
5772 and maps them into correct place in memory. Not an easy task, though.
5773 ELF program header of PT_LOAD/PT_SUNWBSS type specifies:
5774 o p_vaddr - actually a memory offset
5775 o p_memsz - total segment size, including text, data and BSS
5776 o p_filesz - file-based segment size mapping (includes only text and data);
5777 p_memsz - p_filesz is the size of BSS
5778 o p_offset - offset into the ELF file where the file-based mapping starts
5780 Several problematic areas to cover here:
5781 1. p_offset can contain a value which is not page-aligned. In that case
5782 we mmap a part of the file prior to p_offset to make the start address
5783 page-aligned.
5784 2. Partially unused page after the file-based mapping must be zeroed.
5785 3. The first mapping is flagged with MR_HDR_ELF and needs to contain
5786 the ELF header. This information is used and verified by the dynamic
5787 linker (ld.so.1). */
5788 static SysRes mmapobj_process_phdrs(ThreadId tid, Int fd,
5789 vki_mmapobj_result_t *storage,
5790 vki_uint_t *elements,
5791 const VKI_ESZ(Ehdr) *ehdr,
5792 const VKI_ESZ(Phdr) *phdrs)
5794 #define ADVANCE_PHDR(ehdr, phdr) \
5795 (const VKI_ESZ(Phdr) *) ((const HChar *) (phdr) + (ehdr)->e_phentsize)
5797 SysRes res;
5798 Int i;
5799 Int first_segment_idx = -1;
5800 UInt idx;
5801 UInt segments = 0; /* loadable segments */
5802 Addr start_addr = 0;
5803 Addr end_addr = 0;
5804 Addr elfbrk = 0;
5805 SizeT max_align = VKI_PAGE_SIZE;
5807 /* 1. First pass over phdrs - determine number, span and max alignment. */
5808 const VKI_ESZ(Phdr) *phdr = phdrs;
5809 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5810 /* Skip this header if no memory is requested. */
5811 if (phdr->p_memsz == 0)
5812 continue;
5814 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5815 Off64T offset = 0;
5817 if (VG_(clo_trace_syscalls))
5818 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
5819 "program header #%u: addr=%#lx type=%#lx "
5820 "prot=%#lx memsz=%#lx filesz=%#lx file "
5821 "offset=%#lx\n", idx, phdr->p_vaddr,
5822 (UWord) phdr->p_type, (UWord) phdr->p_flags,
5823 phdr->p_memsz, phdr->p_filesz, phdr->p_offset);
5825 if (segments == 0) {
5826 first_segment_idx = idx;
5828 if (phdr->p_filesz == 0) {
5829 VG_(unimplemented)("Syswrap of the mmapobj call with the first "
5830 "loadable ELF program header specifying "
5831 "p_filesz == 0");
5832 /*NOTREACHED*/
5833 return res;
5836 /* Address of the first segment must be either NULL or within the
5837 first page. */
5838 if ((ehdr->e_type == VKI_ET_DYN) &&
5839 ((phdr->p_vaddr & VKI_PAGEMASK) != 0)) {
5840 if (VG_(clo_trace_syscalls))
5841 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5842 "ELF program header #%u does not land on "
5843 "the first page (vaddr=%#lx)\n", idx,
5844 phdr->p_vaddr);
5845 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5848 start_addr = phdr->p_vaddr;
5849 /* The first segment is mapped from the beginning of the file (to
5850 include also the ELF header), so include this memory as well.
5851 Later on we flag this mapping with MR_HDR_ELF. */
5852 offset = phdr->p_offset;
5855 if (phdr->p_align > 1) {
5856 if ((phdr->p_vaddr % phdr->p_align) !=
5857 (phdr->p_offset % phdr->p_align)) {
5858 if (VG_(clo_trace_syscalls))
5859 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5860 "ELF program header #%u does not have "
5861 "congruent offset and vaddr (vaddr=%#lx "
5862 "file offset=%#lx align=%#lx)\n", idx,
5863 phdr->p_vaddr, phdr->p_offset,
5864 phdr->p_align);
5865 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5869 if (phdr->p_vaddr < end_addr) {
5870 if (VG_(clo_trace_syscalls))
5871 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5872 "ELF program header #%u specifies overlaping "
5873 "address (vaddr=%#lx end_addr=%#lx)\n",
5874 idx, phdr->p_vaddr, end_addr);
5875 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5878 end_addr = elfbrk = phdr->p_vaddr + phdr->p_memsz + offset;
5879 end_addr = VG_PGROUNDUP(end_addr);
5880 if (phdr->p_align > max_align) {
5881 max_align = phdr->p_align;
5884 segments += 1;
5888 /* Alignment check - it should be power of two. */
5889 if ((max_align & (max_align - 1)) != 0) {
5890 if (VG_(clo_trace_syscalls))
5891 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5892 "is not a power of 2 (%#lx)\n", max_align);
5893 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5895 vg_assert(max_align >= VKI_PAGE_SIZE);
5897 #if defined(VGP_x86_solaris)
5898 if (max_align > VKI_UINT_MAX) {
5899 if (VG_(clo_trace_syscalls))
5900 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5901 "for 32-bit ELF is >32-bits (%#lx)\n", max_align);
5902 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5904 #endif /* VGP_x86_solaris */
5906 if (segments == 0) {
5907 if (VG_(clo_trace_syscalls))
5908 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5909 "to map (0 loadable segments)");
5910 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5913 vg_assert(end_addr >= start_addr);
5914 SizeT span = end_addr - start_addr;
5915 if (span == 0) {
5916 if (VG_(clo_trace_syscalls))
5917 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5918 "to map (%u loadable segments spanning 0 bytes)\n",
5919 segments);
5920 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5922 vg_assert(first_segment_idx >= 0);
5924 if (segments > *elements) {
5925 if (VG_(clo_trace_syscalls))
5926 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: too many "
5927 "segments (%u)\n", segments);
5928 return VG_(mk_SysRes_Error)(VKI_E2BIG);
5931 if (VG_(clo_trace_syscalls))
5932 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: there "
5933 "are %u loadable segments spanning %#lx bytes; max "
5934 "align is %#lx\n", segments, span, max_align);
5936 /* Now get the aspacemgr oraculum advisory.
5937 Later on we mmap file-based and BSS mappings into this address space area
5938 as required and leave the holes unmapped. */
5939 if (ehdr->e_type == VKI_ET_DYN) {
5940 MapRequest mreq = {MAlign, max_align, span};
5941 Bool ok;
5942 start_addr = VG_(am_get_advisory)(&mreq, True /* forClient */, &ok);
5943 if (!ok) {
5944 if (VG_(clo_trace_syscalls))
5945 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5946 "failed to reserve address space of %#lx bytes "
5947 "with alignment %#lx\n", span, max_align);
5948 return VG_(mk_SysRes_Error)(VKI_ENOMEM);
5950 vg_assert(VG_ROUNDUP(start_addr, max_align) == start_addr);
5952 if (VG_(clo_trace_syscalls))
5953 VG_(debugLog)(2, "syswrap-solaris", "PRE(sys_mmapobj): address space "
5954 "reserved at: vaddr=%#lx size=%#lx\n",
5955 start_addr, span);
5956 } else {
5957 vg_assert(ehdr->e_type == VKI_ET_EXEC);
5958 /* ET_EXEC uses fixed mappings. Will be checked when processing phdrs. */
5961 /* This is an utterly ugly hack, the aspacemgr assumes that only one
5962 segment is added at the time. However we add here multiple segments so
5963 AM_SANITY_CHECK inside the aspacemgr can easily fail. We want to
5964 prevent that thus we disable these checks. The scheduler will check the
5965 aspacemgr sanity after the syscall. */
5966 UInt sanity_level = VG_(clo_sanity_level);
5967 VG_(clo_sanity_level) = 1;
5969 /* 2. Second pass over phdrs - map the program headers and fill in
5970 the mmapobj_result_t array. */
5971 phdr = phdrs;
5972 *elements = 0;
5973 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5974 /* Skip this header if no memory is requested. */
5975 if (phdr->p_memsz == 0)
5976 continue;
5978 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5979 UInt prot = 0;
5980 if (phdr->p_flags & VKI_PF_R)
5981 prot |= VKI_PROT_READ;
5982 if (phdr->p_flags & VKI_PF_W)
5983 prot |= VKI_PROT_WRITE;
5984 if (phdr->p_flags & VKI_PF_X)
5985 prot |= VKI_PROT_EXEC;
5987 vki_mmapobj_result_t *mrp = &storage[*elements];
5988 mrp->mr_msize = phdr->p_memsz;
5989 mrp->mr_fsize = phdr->p_filesz;
5990 mrp->mr_offset = 0;
5991 mrp->mr_prot = prot;
5992 mrp->mr_flags = 0;
5993 Off64T file_offset = phdr->p_offset;
5994 if (idx == first_segment_idx) {
5995 mrp->mr_flags = VKI_MR_HDR_ELF;
5996 if (ehdr->e_type == VKI_ET_DYN) {
5997 if (phdr->p_offset > 0) {
5998 /* Include the ELF header into the first segment.
5999 This means we ignore p_offset from the program header
6000 and map from file offset 0. */
6001 mrp->mr_msize += phdr->p_offset;
6002 mrp->mr_fsize += phdr->p_offset;
6003 file_offset = 0;
6005 } else {
6006 vg_assert(ehdr->e_type == VKI_ET_EXEC);
6007 start_addr = phdr->p_vaddr;
6011 /* p_vaddr is absolute for ET_EXEC, and relative for ET_DYN. */
6012 mrp->mr_addr = (vki_caddr_t) phdr->p_vaddr;
6013 if (ehdr->e_type == VKI_ET_DYN) {
6014 mrp->mr_addr += start_addr;
6017 SizeT page_offset = (Addr) mrp->mr_addr & VKI_PAGEOFFSET;
6018 if (page_offset > 0) {
6019 vg_assert(file_offset >= page_offset);
6020 /* Mapping address does not start at the beginning of a page.
6021 Therefore include some bytes before to make it page aligned. */
6022 mrp->mr_addr -= page_offset;
6023 mrp->mr_msize += page_offset;
6024 mrp->mr_offset = page_offset;
6025 file_offset -= page_offset;
6027 SizeT file_size = mrp->mr_fsize + mrp->mr_offset;
6028 if (VG_(clo_trace_syscalls))
6029 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
6030 "mmapobj result #%u: addr=%#lx msize=%#lx "
6031 "fsize=%#lx mr_offset=%#lx prot=%#x flags=%#x\n",
6032 *elements, (Addr) mrp->mr_addr,
6033 (UWord) mrp->mr_msize, (UWord) mrp->mr_fsize,
6034 (UWord) mrp->mr_offset, mrp->mr_prot,
6035 mrp->mr_flags);
6037 UInt flags = VKI_MAP_PRIVATE | VKI_MAP_FIXED;
6038 if ((mrp->mr_prot & (VKI_PROT_WRITE | VKI_PROT_EXEC)) ==
6039 VKI_PROT_EXEC) {
6040 flags |= VKI_MAP_TEXT;
6041 } else {
6042 flags |= VKI_MAP_INITDATA;
6045 /* Determine if there will be partially unused page after file-based
6046 mapping. If so, then we need to zero it explicitly afterwards. */
6047 Addr mapping_end = (Addr) mrp->mr_addr + file_size;
6048 SizeT zeroed_size = VG_PGROUNDUP(mapping_end) - mapping_end;
6049 Bool mprotect_needed = False;
6050 if ((zeroed_size > 0) && ((prot & VKI_PROT_WRITE) == 0)) {
6051 prot |= VKI_PROT_WRITE;
6052 mprotect_needed = True;
6055 if (ehdr->e_type == VKI_ET_EXEC) {
6056 /* Now check if the requested address space is available. */
6057 if (!VG_(am_is_free_or_resvn)((Addr) mrp->mr_addr, mrp->mr_msize)) {
6058 if (VG_(clo_trace_syscalls))
6059 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6060 "requested segment at %#lx with size of "
6061 "%#lx bytes is not available\n",
6062 (Addr) mrp->mr_addr, (UWord) mrp->mr_msize);
6063 res = VG_(mk_SysRes_Error)(VKI_EADDRINUSE);
6064 goto mmap_error;
6068 if (file_size > 0) {
6069 res = VG_(am_mmap_file_fixed_client_flags)((Addr) mrp->mr_addr,
6070 file_size, prot, flags, fd, file_offset);
6071 if (sr_isError(res)) {
6072 if (VG_(clo_trace_syscalls))
6073 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6074 "mmap failed: addr=%#lx size=%#lx prot=%#x "
6075 "flags=%#x fd=%d file offset=%#llx\n",
6076 (Addr) mrp->mr_addr, file_size,
6077 prot, flags, fd, file_offset);
6078 goto mmap_error;
6081 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
6082 "segment: vaddr=%#lx size=%#lx prot=%#x "
6083 "flags=%#x fd=%d file offset=%#llx\n",
6084 (Addr) mrp->mr_addr, file_size, mrp->mr_prot,
6085 flags, fd, file_offset);
6088 if (zeroed_size > 0) {
6089 /* Now zero out the end of partially used page. */
6090 VG_(memset)((void *) mapping_end, 0, zeroed_size);
6091 if (mprotect_needed) {
6092 prot &= ~VKI_PROT_WRITE;
6093 res = VG_(do_syscall3)(SYS_mprotect, (Addr) mrp->mr_addr,
6094 file_size, prot);
6095 if (sr_isError(res)) {
6096 if (VG_(clo_trace_syscalls))
6097 VG_(debugLog)(3, "syswrap-solaris",
6098 "mmapobj_process_phdrs: mprotect failed: "
6099 "addr=%#lx size=%#lx prot=%#x",
6100 (Addr) mrp->mr_addr, file_size, prot);
6101 /* Mapping for this segment was already established. */
6102 idx += 1;
6103 goto mmap_error;
6108 if (file_size > 0) {
6109 ML_(notify_core_and_tool_of_mmap)((Addr) mrp->mr_addr, file_size,
6110 prot, flags, fd, file_offset);
6113 /* Page(s) after the mapping backed up by the file are part of BSS.
6114 They need to be mmap'ed over with correct flags and will be
6115 implicitly zeroed. */
6116 mapping_end = VG_PGROUNDUP(mrp->mr_addr + mrp->mr_msize);
6117 Addr page_end = VG_PGROUNDUP(mrp->mr_addr + file_size);
6118 vg_assert(mapping_end >= page_end);
6119 zeroed_size = mapping_end - page_end;
6120 if (zeroed_size > 0) {
6121 flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS;
6122 res = VG_(am_mmap_anon_fixed_client)(page_end, zeroed_size, prot);
6123 if (sr_isError(res)) {
6124 if (VG_(clo_trace_syscalls))
6125 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
6126 "mmap_anon failed: addr=%#lx size=%#lx "
6127 "prot=%#x\n", page_end, zeroed_size, prot);
6128 idx += 1; /* mapping for this segment was already established */
6129 goto mmap_error;
6132 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
6133 "anonymous segment (BSS): vaddr=%#lx size=%#lx "
6134 "prot=%#x\n", page_end, zeroed_size, prot);
6135 ML_(notify_core_and_tool_of_mmap)(page_end, zeroed_size,
6136 prot, flags, -1, 0);
6139 VG_(di_notify_mmap)((Addr) mrp->mr_addr, False /*allow_SkFileV*/, fd);
6141 *elements += 1;
6142 vg_assert(*elements <= segments);
6146 if ((ehdr->e_type == VKI_ET_EXEC) && (!brk_segment_established)) {
6147 vg_assert(VG_(brk_base) == VG_(brk_limit));
6148 vg_assert(VG_(brk_base) == -1);
6149 VG_(brk_base) = VG_(brk_limit) = elfbrk;
6151 if (!VG_(setup_client_dataseg)()) {
6152 VG_(umsg)("Cannot map memory to initialize brk segment in thread #%d "
6153 "at %#lx\n", tid, VG_(brk_base));
6154 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
6155 goto mmap_error;
6158 VG_(track_client_dataseg)(tid);
6161 /* Restore VG_(clo_sanity_level). The scheduler will perform the aspacemgr
6162 sanity check after the syscall. */
6163 VG_(clo_sanity_level) = sanity_level;
6165 return VG_(mk_SysRes_Success)(0);
6167 mmap_error:
6168 for (i = idx - 1; i > 0; i--) {
6169 Bool discard_translations;
6170 Addr addr = (Addr) storage[i].mr_addr;
6172 VG_(am_munmap_client)(&discard_translations, addr, storage[i].mr_msize);
6173 ML_(notify_core_and_tool_of_munmap)(addr, storage[i].mr_msize);
6175 *elements = 0;
6176 return res;
6178 #undef ADVANCE_PHDR
6181 static SysRes mmapobj_interpret(ThreadId tid, Int fd,
6182 vki_mmapobj_result_t *storage,
6183 vki_uint_t *elements)
6185 SysRes res;
6187 struct vg_stat stats;
6188 if (VG_(fstat)(fd, &stats) != 0) {
6189 return VG_(mk_SysRes_Error)(VKI_EBADF);
6192 if (stats.size < sizeof(VKI_ESZ(Ehdr))) {
6193 if (VG_(clo_trace_syscalls))
6194 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: insufficient "
6195 "file size (%lld)\n", stats.size);
6196 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6199 /* Align the header buffer appropriately. */
6200 vki_ulong_t lheader[sizeof(VKI_ESZ(Ehdr)) / sizeof(vki_ulong_t) + 1];
6201 HChar *header = (HChar *) &lheader;
6203 res = VG_(pread)(fd, header, sizeof(VKI_ESZ(Ehdr)), 0);
6204 if (sr_isError(res)) {
6205 if (VG_(clo_trace_syscalls))
6206 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6207 "header failed\n");
6208 return res;
6209 } else if (sr_Res(res) != sizeof(VKI_ESZ(Ehdr))) {
6210 if (VG_(clo_trace_syscalls))
6211 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6212 "header failed - only %lu bytes out of %lu\n",
6213 sr_Res(res), (UWord) sizeof(VKI_ESZ(Ehdr)));
6214 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6217 /* Verify file type is ELF. */
6218 if ((header[VKI_EI_MAG0] != VKI_ELFMAG0) ||
6219 (header[VKI_EI_MAG1] != VKI_ELFMAG1) ||
6220 (header[VKI_EI_MAG2] != VKI_ELFMAG2) ||
6221 (header[VKI_EI_MAG3] != VKI_ELFMAG3)) {
6222 if (VG_(clo_trace_syscalls))
6223 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6224 "missing magic\n");
6225 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6228 if (header[VKI_EI_CLASS] != VG_ELF_CLASS) {
6229 if (VG_(clo_trace_syscalls))
6230 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF class "
6231 "mismatch (%u vs %u)\n", header[VKI_EI_CLASS],
6232 VG_ELF_CLASS);
6233 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6236 VKI_ESZ(Ehdr) *ehdr = (VKI_ESZ(Ehdr) *) header;
6237 if ((ehdr->e_type != VKI_ET_EXEC) && (ehdr->e_type != VKI_ET_DYN)) {
6238 VG_(unimplemented)("Syswrap of the mmapobj call with ELF type %u.",
6239 ehdr->e_type);
6240 /*NOTREACHED*/
6241 return res;
6244 if (ehdr->e_phnum == VKI_PN_XNUM) {
6245 VG_(unimplemented)("Syswrap of the mmapobj call with number of ELF "
6246 "program headers == PN_XNUM");
6247 /*NOTREACHED*/
6248 return res;
6251 /* Check alignment. */
6252 #if defined(VGP_x86_solaris)
6253 if (!VG_IS_4_ALIGNED(ehdr->e_phentsize)) {
6254 #elif defined(VGP_amd64_solaris)
6255 if (!VG_IS_8_ALIGNED(ehdr->e_phentsize)) {
6256 #else
6257 # error "Unknown platform"
6258 #endif
6259 if (VG_(clo_trace_syscalls))
6260 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6261 "phentsize not aligned properly (%u)\n",
6262 ehdr->e_phentsize);
6263 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6266 SizeT phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
6267 if (phdrs_size == 0) {
6268 if (VG_(clo_trace_syscalls))
6269 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: no ELF "
6270 "program headers\n");
6271 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6274 VKI_ESZ(Phdr) *phdrs = VG_(malloc)("syswrap.mi.1", phdrs_size);
6275 res = VG_(pread)(fd, phdrs, phdrs_size, ehdr->e_phoff);
6276 if (sr_isError(res)) {
6277 if (VG_(clo_trace_syscalls))
6278 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6279 "program headers failed\n");
6280 VG_(free)(phdrs);
6281 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6282 } else if (sr_Res(res) != phdrs_size) {
6283 if (VG_(clo_trace_syscalls))
6284 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6285 "program headers failed - only %lu bytes out of %lu\n",
6286 sr_Res(res), phdrs_size);
6287 VG_(free)(phdrs);
6288 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6291 if (VG_(clo_trace_syscalls))
6292 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_interpret: %u ELF "
6293 "program headers with total size of %lu bytes\n",
6294 ehdr->e_phnum, phdrs_size);
6296 /* Now process the program headers. */
6297 res = mmapobj_process_phdrs(tid, fd, storage, elements, ehdr, phdrs);
6298 VG_(free)(phdrs);
6299 return res;
6302 PRE(sys_mmapobj)
6304 /* int mmapobj(int fd, uint_t flags, mmapobj_result_t *storage,
6305 uint_t *elements, void *arg); */
6306 PRINT("sys_mmapobj ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6307 ARG4, ARG5);
6308 PRE_REG_READ5(long, "mmapobj", int, fd, vki_uint_t, flags,
6309 mmapobj_result_t *, storage, uint_t *, elements,
6310 void *, arg);
6312 PRE_MEM_READ("mmapobj(elements)", ARG4, sizeof(vki_uint_t));
6313 /*PRE_MEM_WRITE("mmapobj(elements)", ARG4, sizeof(vki_uint_t));*/
6314 if (ML_(safe_to_deref)((void*)ARG4, sizeof(vki_uint_t))) {
6315 vki_uint_t *u = (vki_uint_t*)ARG4;
6316 PRE_MEM_WRITE("mmapobj(storage)", ARG3,
6317 *u * sizeof(vki_mmapobj_result_t));
6320 if (ARG2 & VKI_MMOBJ_PADDING)
6321 PRE_MEM_READ("mmapobj(arg)", ARG5, sizeof(vki_size_t));
6323 /* Be strict. */
6324 if (!ML_(fd_allowed)(ARG1, "mmapobj", tid, False)) {
6325 SET_STATUS_Failure(VKI_EBADF);
6326 return;
6329 /* We cannot advise mmapobj about desired address(es). Unfortunately
6330 kernel places mappings from mmapobj at the end of process address
6331 space, defeating memcheck's optimized fast 2-level array algorithm.
6332 So we need to emulate what mmapobj does in the kernel. */
6334 /* Sanity check on parameters. */
6335 if ((ARG2 & ~VKI_MMOBJ_ALL_FLAGS) != 0) {
6336 SET_STATUS_Failure(VKI_EINVAL);
6337 return;
6340 if (!ML_(safe_to_deref)((void *) ARG4, sizeof(vki_uint_t))) {
6341 SET_STATUS_Failure(VKI_EFAULT);
6342 return;
6344 vki_uint_t *elements = (vki_uint_t *) ARG4;
6346 if (*elements > 0) {
6347 if (!ML_(safe_to_deref)((void *) ARG3,
6348 *elements * sizeof(vki_mmapobj_result_t))) {
6349 SET_STATUS_Failure(VKI_EFAULT);
6350 return;
6354 /* For now, supported is only MMOBJ_INTERPRET and no MMOBJ_PADDING. */
6355 if (ARG2 != VKI_MMOBJ_INTERPRET) {
6356 VG_(unimplemented)("Syswrap of the mmapobj call with flags %lu.", ARG2);
6357 /*NOTREACHED*/
6358 return;
6361 SysRes res = mmapobj_interpret(tid, (Int) ARG1,
6362 (vki_mmapobj_result_t *) ARG3, elements);
6363 SET_STATUS_from_SysRes(res);
6365 if (!sr_isError(res)) {
6366 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
6368 UInt idx;
6369 for (idx = 0; idx < *(vki_uint_t *) ARG4; idx++) {
6370 vki_mmapobj_result_t *mrp = &((vki_mmapobj_result_t *) ARG3)[idx];
6371 POST_FIELD_WRITE(mrp->mr_addr);
6372 POST_FIELD_WRITE(mrp->mr_msize);
6373 POST_FIELD_WRITE(mrp->mr_fsize);
6374 POST_FIELD_WRITE(mrp->mr_prot);
6375 POST_FIELD_WRITE(mrp->mr_flags);
6376 POST_FIELD_WRITE(mrp->mr_offset);
6381 PRE(sys_memcntl)
6383 /* int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
6384 int attr, int mask); */
6385 PRINT("sys_memcntl ( %#lx, %#lx, %ld, %#lx, %#lx, %#lx )", ARG1, ARG2,
6386 SARG3, ARG4, ARG5, ARG6);
6387 PRE_REG_READ6(long, "memcntl", void *, addr, vki_size_t, len, int, cmd,
6388 void *, arg, int, attr, int, mask);
6390 if (ARG3 != VKI_MC_LOCKAS && ARG3 != VKI_MC_UNLOCKAS &&
6391 !ML_(valid_client_addr)(ARG1, ARG2, tid, "memcntl")) {
6392 /* MC_LOCKAS and MC_UNLOCKAS work on the complete address space thus we
6393 don't check the address range validity if these commands are
6394 requested. */
6395 SET_STATUS_Failure(VKI_ENOMEM);
6396 return;
6399 if (ARG3 == VKI_MC_HAT_ADVISE)
6400 PRE_MEM_READ("memcntl(arg)", ARG4, sizeof(struct vki_memcntl_mha));
6403 PRE(sys_getpmsg)
6405 /* int getpmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
6406 int *bandp, int *flagsp); */
6407 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6408 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6409 *flags |= SfMayBlock;
6410 PRINT("sys_getpmsg ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6411 ARG4, ARG5);
6412 PRE_REG_READ5(long, "getpmsg", int, fildes, struct vki_strbuf *, ctlptr,
6413 struct vki_strbuf *, dataptr, int *, bandp, int *, flagsp);
6414 if (ctrlptr) {
6415 PRE_FIELD_READ("getpmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
6416 PRE_FIELD_WRITE("getpmsg(ctrlptr->len)", ctrlptr->len);
6417 PRE_FIELD_READ("getpmsg(ctrlptr->buf)", ctrlptr->buf);
6418 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6419 && ctrlptr->maxlen > 0)
6420 PRE_MEM_WRITE("getpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6421 ctrlptr->maxlen);
6423 if (dataptr) {
6424 PRE_FIELD_READ("getpmsg(dataptr->maxlen)", dataptr->maxlen);
6425 PRE_FIELD_WRITE("getpmsg(dataptr->len)", dataptr->len);
6426 PRE_FIELD_READ("getpmsg(dataptr->buf)", dataptr->buf);
6427 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6428 && dataptr->maxlen > 0)
6429 PRE_MEM_WRITE("getpmsg(dataptr->buf)", (Addr)dataptr->buf,
6430 dataptr->maxlen);
6432 PRE_MEM_READ("getpmsg(bandp)", ARG4, sizeof(int));
6433 /*PRE_MEM_WRITE("getpmsg(bandp)", ARG4, sizeof(int));*/
6434 PRE_MEM_READ("getpmsg(flagsp)", ARG5, sizeof(int));
6435 /*PRE_MEM_WRITE("getpmsg(flagsp)", ARG5, sizeof(int));*/
6437 /* Be strict. */
6438 if (!ML_(fd_allowed)(ARG1, "getpmsg", tid, False))
6439 SET_STATUS_Failure(VKI_EBADF);
6442 POST(sys_getpmsg)
6444 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6445 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6447 if (ctrlptr && ctrlptr->len > 0)
6448 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
6449 if (dataptr && dataptr->len > 0)
6450 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
6451 POST_MEM_WRITE(ARG4, sizeof(int));
6452 POST_MEM_WRITE(ARG5, sizeof(int));
6455 PRE(sys_putpmsg)
6457 /* int putpmsg(int fildes, const struct strbuf *ctlptr,
6458 const struct strbuf *dataptr, int band, int flags); */
6459 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6460 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6461 *flags |= SfMayBlock;
6462 PRINT("sys_putpmsg ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
6463 SARG5);
6464 PRE_REG_READ5(long, "putpmsg", int, fildes, struct vki_strbuf *, ctrlptr,
6465 struct vki_strbuf *, dataptr, int, band, int, flags);
6466 if (ctrlptr) {
6467 PRE_FIELD_READ("putpmsg(ctrlptr->len)", ctrlptr->len);
6468 PRE_FIELD_READ("putpmsg(ctrlptr->buf)", ctrlptr->buf);
6469 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6470 && ctrlptr->len > 0)
6471 PRE_MEM_READ("putpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6472 ctrlptr->len);
6474 if (dataptr) {
6475 PRE_FIELD_READ("putpmsg(dataptr->len)", dataptr->len);
6476 PRE_FIELD_READ("putpmsg(dataptr->buf)", dataptr->buf);
6477 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6478 && dataptr->len > 0)
6479 PRE_MEM_READ("putpmsg(dataptr->buf)", (Addr)dataptr->buf,
6480 dataptr->len);
6483 /* Be strict. */
6484 if (!ML_(fd_allowed)(ARG1, "putpmsg", tid, False))
6485 SET_STATUS_Failure(VKI_EBADF);
6488 #if defined(SOLARIS_OLD_SYSCALLS)
6489 PRE(sys_rename)
6491 /* int rename(const char *from, const char *to); */
6493 *flags |= SfMayBlock;
6494 PRINT("sys_rename ( %#lx(%s), %#lx(%s) )",
6495 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2);
6496 PRE_REG_READ2(long, "rename", const char *, from, const char *, to);
6498 PRE_MEM_RASCIIZ("rename(from)", ARG1);
6499 PRE_MEM_RASCIIZ("rename(to)", ARG2);
6501 #endif /* SOLARIS_OLD_SYSCALLS */
6503 PRE(sys_uname)
6505 /* int uname(struct utsname *name); */
6506 PRINT("sys_uname ( %#lx )", ARG1);
6507 PRE_REG_READ1(long, "uname", struct vki_utsname *, name);
6508 PRE_MEM_WRITE("uname(name)", ARG1, sizeof(struct vki_utsname));
6511 POST(sys_uname)
6513 struct vki_utsname *name = (struct vki_utsname *) ARG1;
6514 POST_MEM_WRITE((Addr) name->sysname, VG_(strlen)(name->sysname) + 1);
6515 POST_MEM_WRITE((Addr) name->nodename, VG_(strlen)(name->nodename) + 1);
6516 POST_MEM_WRITE((Addr) name->release, VG_(strlen)(name->release) + 1);
6517 POST_MEM_WRITE((Addr) name->version, VG_(strlen)(name->version) + 1);
6518 POST_MEM_WRITE((Addr) name->machine, VG_(strlen)(name->machine) + 1);
6521 PRE(sys_setegid)
6523 /* int setegid(gid_t egid); */
6524 PRINT("sys_setegid ( %ld )", SARG1);
6525 PRE_REG_READ1(long, "setegid", vki_gid_t, egid);
6528 PRE(sys_sysconfig)
6530 /* long sysconf(int name); */
6531 PRINT("sys_sysconfig ( %ld )", SARG1);
6532 PRE_REG_READ1(long, "sysconf", int, name);
6534 if (ARG1 == VKI_CONFIG_OPEN_FILES)
6535 SET_STATUS_Success(VG_(fd_soft_limit));
6538 PRE(sys_systeminfo)
6540 /* int sysinfo(int command, char *buf, long count); */
6541 PRINT("sys_systeminfo ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
6542 PRE_REG_READ3(long, "sysinfo", int, command, char *, buf, long, count);
6544 switch (ARG1 /*command*/) {
6545 case VKI_SI_SYSNAME:
6546 case VKI_SI_HOSTNAME:
6547 case VKI_SI_RELEASE:
6548 case VKI_SI_VERSION:
6549 case VKI_SI_MACHINE:
6550 case VKI_SI_ARCHITECTURE:
6551 case VKI_SI_HW_SERIAL:
6552 case VKI_SI_HW_PROVIDER:
6553 case VKI_SI_SRPC_DOMAIN:
6554 case VKI_SI_PLATFORM:
6555 case VKI_SI_ISALIST:
6556 case VKI_SI_DHCP_CACHE:
6557 case VKI_SI_ARCHITECTURE_32:
6558 case VKI_SI_ARCHITECTURE_64:
6559 case VKI_SI_ARCHITECTURE_K:
6560 case VKI_SI_ARCHITECTURE_NATIVE:
6561 PRE_MEM_WRITE("sysinfo(buf)", ARG2, ARG3);
6562 break;
6564 case VKI_SI_SET_HOSTNAME:
6565 case VKI_SI_SET_SRCP_DOMAIN:
6566 PRE_MEM_RASCIIZ("sysinfo(buf)", ARG2);
6567 break;
6569 default:
6570 VG_(unimplemented)("Syswrap of the sysinfo call with command %ld.", SARG1);
6571 /*NOTREACHED*/
6572 break;
6576 POST(sys_systeminfo)
6578 if (ARG1 != VKI_SI_SET_HOSTNAME && ARG1 != VKI_SI_SET_SRCP_DOMAIN)
6579 POST_MEM_WRITE(ARG2, MIN(RES, ARG3));
6582 PRE(sys_seteuid)
6584 /* int seteuid(uid_t euid); */
6585 PRINT("sys_seteuid ( %ld )", SARG1);
6586 PRE_REG_READ1(long, "seteuid", vki_uid_t, euid);
6589 PRE(sys_forksys)
6591 /* int64_t forksys(int subcode, int flags); */
6592 Int fds[2];
6593 Int res;
6594 PRINT("sys_forksys ( %ld, %ld )", SARG1, SARG2);
6595 PRE_REG_READ2(long, "forksys", int, subcode, int, flags);
6597 if (ARG1 == 1) {
6598 /* Support for forkall() requires changes to the big lock processing
6599 which are not yet implemented. */
6600 VG_(unimplemented)("Support for forkall().");
6601 /*NOTREACHED*/
6602 return;
6605 if (ARG1 != 0 && ARG1 != 2) {
6606 VG_(unimplemented)("Syswrap of the forksys call where subcode=%ld.",
6607 SARG1);
6608 /*NOTREACHED*/
6611 if (ARG1 == 2) {
6612 /* vfork() is requested. Translate it to a normal fork() but work around
6613 a problem with posix_spawn() which relies on the real vfork()
6614 behaviour. See a description in vg_preloaded.c for details. */
6615 res = VG_(pipe)(fds);
6616 vg_assert(res == 0);
6618 vg_assert(fds[0] != fds[1]);
6620 /* Move to Valgrind fds and set close-on-exec flag on both of them (done
6621 by VG_(safe_fd). */
6622 fds[0] = VG_(safe_fd)(fds[0]);
6623 fds[1] = VG_(safe_fd)(fds[1]);
6624 vg_assert(fds[0] != fds[1]);
6626 vg_assert(VG_(vfork_fildes_addr) != NULL);
6627 vg_assert(*VG_(vfork_fildes_addr) == -1);
6628 *VG_(vfork_fildes_addr) = fds[0];
6631 VG_(do_atfork_pre)(tid);
6632 SET_STATUS_from_SysRes(VG_(do_syscall2)(__NR_forksys, 0, ARG2));
6634 if (!SUCCESS) {
6635 /* vfork */
6636 if (ARG1 == 2) {
6637 VG_(close)(fds[0]);
6638 VG_(close)(fds[1]);
6641 return;
6644 if (RESHI) {
6645 VG_(do_atfork_child)(tid);
6647 /* vfork */
6648 if (ARG1 == 2)
6649 VG_(close)(fds[1]);
6651 # if defined(SOLARIS_PT_SUNDWTRACE_THRP)
6652 /* Kernel can map a new page as a scratch space of the DTrace fasttrap
6653 provider. There is no way we can directly get its address - it's all
6654 private to the kernel. Fish it the slow way. */
6655 Addr addr;
6656 SizeT size;
6657 UInt prot;
6658 Bool found = VG_(am_search_for_new_segment)(&addr, &size, &prot);
6659 if (found) {
6660 VG_(debugLog)(1, "syswrap-solaris", "PRE(forksys), new segment: "
6661 "vaddr=%#lx, size=%#lx, prot=%#x\n", addr, size, prot);
6662 vg_assert(prot == (VKI_PROT_READ | VKI_PROT_EXEC));
6663 vg_assert(size == VKI_PAGE_SIZE);
6664 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, VKI_MAP_ANONYMOUS,
6665 -1, 0);
6667 /* Note: We don't notify the debuginfo reader about this mapping
6668 because there is no debug information stored in this segment. */
6670 # endif /* SOLARIS_PT_SUNDWTRACE_THRP */
6672 else {
6673 VG_(do_atfork_parent)(tid);
6675 /* Print information about the fork. */
6676 PRINT(" fork: process %d created child %d\n", VG_(getpid)(),
6677 (Int)RES);
6679 /* vfork */
6680 if (ARG1 == 2) {
6681 /* Wait for the child to finish (exec or exit). */
6682 UChar w;
6684 VG_(close)(fds[0]);
6686 res = VG_(read)(fds[1], &w, 1);
6687 if (res == 1)
6688 SET_STATUS_Failure(w);
6689 VG_(close)(fds[1]);
6691 *VG_(vfork_fildes_addr) = -1;
6696 #if defined(SOLARIS_GETRANDOM_SYSCALL)
6697 PRE(sys_getrandom)
6699 /* int getrandom(void *buf, size_t buflen, uint_t flags); */
6700 PRINT("sys_getrandom ( %#lx, %lu, %lu )", ARG1, ARG2, ARG3);
6701 PRE_REG_READ3(long, "getrandom", void *, buf, vki_size_t, buflen,
6702 vki_uint_t, flags);
6703 PRE_MEM_WRITE("getrandom(buf)", ARG1, ARG2);
6706 POST(sys_getrandom)
6708 POST_MEM_WRITE(ARG1, RES);
6710 #endif /* SOLARIS_GETRANDOM_SYSCALL */
6712 PRE(sys_sigtimedwait)
6714 /* int sigtimedwait(const sigset_t *set, siginfo_t *info,
6715 const timespec_t *timeout); */
6716 *flags |= SfMayBlock;
6717 PRINT("sys_sigtimedwait ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
6718 PRE_REG_READ3(long, "sigtimedwait", vki_sigset_t *, set,
6719 vki_siginfo_t *, info, vki_timespec_t *, timeout);
6720 PRE_MEM_READ("sigtimewait(set)", ARG1, sizeof(vki_sigset_t));
6721 if (ARG2)
6722 PRE_MEM_WRITE("sigtimedwait(info)", ARG2, sizeof(vki_siginfo_t));
6723 if (ARG3)
6724 PRE_MEM_READ("sigtimedwait(timeout)", ARG3, sizeof(vki_timespec_t));
6727 POST(sys_sigtimedwait)
6729 if (ARG2)
6730 POST_MEM_WRITE(ARG2, sizeof(vki_siginfo_t));
6733 PRE(sys_yield)
6735 /* void yield(void); */
6736 *flags |= SfMayBlock;
6737 PRINT("sys_yield ( )");
6738 PRE_REG_READ0(long, "yield");
6741 PRE(sys_lwp_sema_post)
6743 /* int lwp_sema_post(lwp_sema_t *sema); */
6744 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6745 *flags |= SfMayBlock;
6746 PRINT("sys_lwp_sema_post ( %#lx )", ARG1);
6747 PRE_REG_READ1(long, "lwp_sema_post", lwp_sema_t *, sema);
6749 PRE_FIELD_READ("lwp_sema_post(sema->type)", sema->vki_sema_type);
6750 PRE_FIELD_READ("lwp_sema_post(sema->count)", sema->vki_sema_count);
6751 /*PRE_FIELD_WRITE("lwp_sema_post(sema->count)", sema->vki_sema_count);*/
6752 PRE_FIELD_READ("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);
6753 /*PRE_FIELD_WRITE("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);*/
6756 POST(sys_lwp_sema_post)
6758 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6759 POST_FIELD_WRITE(sema->vki_sema_count);
6760 POST_FIELD_WRITE(sema->vki_sema_waiters);
6763 PRE(sys_lwp_sema_trywait)
6765 /* int lwp_sema_trywait(lwp_sema_t *sema); */
6766 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6767 PRINT("sys_lwp_sema_trywait ( %#lx )", ARG1);
6768 PRE_REG_READ1(long, "lwp_sema_trywait", lwp_sema_t *, sema);
6770 PRE_FIELD_READ("lwp_sema_trywait(sema->type)", sema->vki_sema_type);
6771 PRE_FIELD_READ("lwp_sema_trywait(sema->count)", sema->vki_sema_count);
6772 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->count)", sema->vki_sema_count);*/
6773 PRE_FIELD_READ("lwp_sema_trywait(sema->waiters)", sema->vki_sema_waiters);
6774 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->waiters)",
6775 sema->vki_sema_waiters);*/
6778 POST(sys_lwp_sema_trywait)
6780 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6781 POST_FIELD_WRITE(sema->vki_sema_count);
6782 POST_FIELD_WRITE(sema->vki_sema_waiters);
6785 PRE(sys_lwp_detach)
6787 /* int lwp_detach(id_t lwpid); */
6788 PRINT("sys_lwp_detach ( %ld )", SARG1);
6789 PRE_REG_READ1(long, "lwp_detach", vki_id_t, lwpid);
6792 PRE(sys_modctl)
6794 /* int modctl(int cmd, uintptr_t a1, uintptr_t a2, uintptr_t a3,
6795 uintptr_t a4, uintptr_t a5); */
6796 *flags |= SfMayBlock;
6798 switch (ARG1 /*cmd*/) {
6799 case VKI_MODLOAD:
6800 /* int modctl_modload(int use_path, char *filename, int *rvp); */
6801 PRINT("sys_modctl ( %ld, %ld, %#lx(%s), %#lx )",
6802 SARG1, ARG2, ARG3, (HChar *) ARG3, ARG4);
6803 PRE_REG_READ4(long, SC2("modctl", "modload"),
6804 int, cmd, int, use_path, char *, filename, int *, rvp);
6805 PRE_MEM_RASCIIZ("modctl(filaneme)", ARG3);
6806 if (ARG4 != 0) {
6807 PRE_MEM_WRITE("modctl(rvp)", ARG4, sizeof(int *));
6809 break;
6810 case VKI_MODUNLOAD:
6811 /* int modctl_modunload(modid_t id); */
6812 PRINT("sys_modctl ( %ld, %ld )", SARG1, SARG2);
6813 PRE_REG_READ2(long, SC2("modctl", "modunload"),
6814 int, cmd, vki_modid_t, id);
6815 break;
6816 case VKI_MODINFO: {
6817 /* int modctl_modinfo(modid_t id, struct modinfo *umodi); */
6818 PRINT("sys_modctl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
6819 PRE_REG_READ3(long, SC2("modctl", "modinfo"),
6820 int, cmd, vki_modid_t, id, struct modinfo *, umodi);
6822 struct vki_modinfo *umodi = (struct vki_modinfo *) ARG3;
6823 PRE_FIELD_READ("modctl(umodi->mi_info)", umodi->mi_info);
6824 PRE_FIELD_READ("modctl(umodi->mi_id)", umodi->mi_id);
6825 PRE_FIELD_READ("modctl(umodi->mi_nextid)", umodi->mi_nextid);
6826 PRE_MEM_WRITE("modctl(umodi)", ARG3, sizeof(struct vki_modinfo));
6827 break;
6830 # if defined(SOLARIS_MODCTL_MODNVL)
6831 case VKI_MODNVL_DEVLINKSYNC:
6832 /* int modnvl_devlinksync(sysnvl_op_t a1, uintptr_t a2, uintptr_t a3,
6833 uintptr_t a4); */
6834 switch (ARG2 /*op*/) {
6836 # if defined(HAVE_SYS_SYSNVL_H)
6837 case VKI_SYSNVL_OP_GET:
6838 PRE_REG_READ5(long, SC3("modctl", "modnvl_devlinksync", "get"),
6839 int, cmd, sysnvl_op_t, a1, char *, bufp,
6840 uint64_t *, buflenp, uint64_t *, genp);
6841 # else
6842 case VKI_MODCTL_NVL_OP_GET:
6843 PRE_REG_READ5(long, SC3("modctl", "modnvl_devlinksync", "get"),
6844 int, cmd, modctl_nvl_op_t, a1, char *, bufp,
6845 uint64_t *, buflenp, uint64_t *, genp);
6846 # endif /* HAVE_SYS_SYSNVL_H */
6848 PRINT("sys_modctl ( %ld, %lu, %#lx, %#lx, %#lx )",
6849 SARG1, ARG2, ARG3, ARG4, ARG5);
6850 PRE_MEM_WRITE("modctl(buflenp)", ARG4, sizeof(vki_uint64_t));
6851 if (ML_(safe_to_deref)((vki_uint64_t *) ARG4, sizeof(vki_uint64_t))) {
6852 if (ARG3 != 0) {
6853 PRE_MEM_WRITE("modctl(bufp)", ARG3, *(vki_uint64_t *) ARG4);
6856 if (ARG5 != 0) {
6857 PRE_MEM_WRITE("modctl(genp)", ARG5, sizeof(vki_uint64_t));
6859 break;
6861 # if defined(HAVE_SYS_SYSNVL_H)
6862 case VKI_SYSNVL_OP_UPDATE:
6863 PRE_REG_READ4(long, SC3("modctl", "modnvl_devlinksync", "update"),
6864 int, cmd, sysnvl_op_t, a1, char *, bufp,
6865 uint64_t *, buflenp);
6866 # else
6867 case VKI_MODCTL_NVL_OP_UPDATE:
6868 PRE_REG_READ4(long, SC3("modctl", "modnvl_devlinksync", "update"),
6869 int, cmd, modctl_nvl_op_t, a1, char *, bufp,
6870 uint64_t *, buflenp);
6871 # endif /* HAVE_SYS_SYSNVL_H */
6873 PRINT("sys_modctl ( %ld, %lu, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
6874 PRE_MEM_READ("modctl(buflenp)", ARG4, sizeof(vki_uint64_t));
6875 if (ML_(safe_to_deref)((vki_uint64_t *) ARG4, sizeof(vki_uint64_t))) {
6876 PRE_MEM_READ("modctl(bufp)", ARG3, *(vki_uint64_t *) ARG4);
6878 break;
6880 default:
6881 VG_(unimplemented)("Syswrap of the modctl call with command "
6882 "MODNVL_DEVLINKSYNC and op %ld.", ARG2);
6883 /*NOTREACHED*/
6884 break;
6886 break;
6888 case VKI_MODDEVINFO_CACHE_TS:
6889 /* int modctl_devinfo_cache_ts(uint64_t *utsp); */
6890 PRINT("sys_modctl ( %ld, %#lx )", SARG1, ARG2);
6891 PRE_REG_READ2(long, SC2("modctl", "moddevinfo_cache_ts"),
6892 int, cmd, uint64_t *, utsp);
6893 PRE_MEM_WRITE("modctl(utsp)", ARG2, sizeof(vki_uint64_t));
6894 break;
6895 # endif /* SOLARIS_MODCTL_MODNVL */
6897 default:
6898 VG_(unimplemented)("Syswrap of the modctl call with command %ld.", SARG1);
6899 /*NOTREACHED*/
6900 break;
6904 POST(sys_modctl)
6906 switch (ARG1 /*cmd*/) {
6907 case VKI_MODLOAD:
6908 if (ARG4 != 0) {
6909 POST_MEM_WRITE(ARG4, sizeof(int *));
6911 break;
6912 case VKI_MODUNLOAD:
6913 break;
6914 case VKI_MODINFO:
6915 POST_MEM_WRITE(ARG3, sizeof(struct vki_modinfo));
6916 break;
6917 # if defined(SOLARIS_MODCTL_MODNVL)
6918 case VKI_MODNVL_DEVLINKSYNC:
6919 switch (ARG2 /*op*/) {
6921 # if defined(HAVE_SYS_SYSNVL_H)
6922 case VKI_SYSNVL_OP_GET:
6923 # else
6924 case VKI_MODCTL_NVL_OP_GET:
6925 # endif /* HAVE_SYS_SYSNVL_H */
6927 POST_MEM_WRITE(ARG4, sizeof(vki_uint64_t));
6928 if (ARG3 != 0) {
6929 POST_MEM_WRITE(ARG3, *(vki_uint64_t *) ARG4);
6931 if (ARG5 != 0) {
6932 POST_MEM_WRITE(ARG5, sizeof(vki_uint64_t));
6934 break;
6936 # if defined(HAVE_SYS_SYSNVL_H)
6937 case VKI_SYSNVL_OP_UPDATE:
6938 # else
6939 case VKI_MODCTL_NVL_OP_UPDATE:
6940 # endif /* HAVE_SYS_SYSNVL_H */
6941 break;
6943 default:
6944 vg_assert(0);
6945 break;
6947 break;
6948 case VKI_MODDEVINFO_CACHE_TS:
6949 POST_MEM_WRITE(ARG2, sizeof(vki_uint64_t));
6950 break;
6951 # endif /* SOLARIS_MODCTL_MODNVL */
6953 default:
6954 vg_assert(0);
6955 break;
6959 PRE(sys_fchroot)
6961 /* int fchroot(int fd); */
6962 PRINT("sys_fchroot ( %ld )", SARG1);
6963 PRE_REG_READ1(long, "fchroot", int, fd);
6965 /* Be strict. */
6966 if (!ML_(fd_allowed)(ARG1, "fchroot", tid, False))
6967 SET_STATUS_Failure(VKI_EBADF);
6970 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
6971 PRE(sys_system_stats)
6973 /* void system_stats(int flag); */
6974 PRINT("sys_system_stats ( %ld )", SARG1);
6975 PRE_REG_READ1(void, "system_stats", int, flag);
6977 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
6979 PRE(sys_gettimeofday)
6981 /* Kernel: int gettimeofday(struct timeval *tp); */
6982 PRINT("sys_gettimeofday ( %#lx )", ARG1);
6983 PRE_REG_READ1(long, "gettimeofday", struct timeval *, tp);
6984 if (ARG1)
6985 PRE_timeval_WRITE("gettimeofday(tp)", ARG1);
6988 POST(sys_gettimeofday)
6990 if (ARG1)
6991 POST_timeval_WRITE(ARG1);
6994 PRE(sys_lwp_create)
6996 /* int lwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) */
6998 ThreadId ctid;
6999 ThreadState *ptst;
7000 ThreadState *ctst;
7001 Addr stack;
7002 SysRes res;
7003 vki_ucontext_t uc;
7004 Bool tool_informed = False;
7006 PRINT("sys_lwp_create ( %#lx, %ld, %#lx )", ARG1, ARG2, ARG3);
7007 PRE_REG_READ3(long, "lwp_create", ucontext_t *, ucp, int, flags,
7008 id_t *, new_lwp);
7010 if (ARG3 != 0)
7011 PRE_MEM_WRITE("lwp_create(new_lwp)", ARG3, sizeof(vki_id_t));
7013 /* If we can't deref ucontext_t then we can't do anything. */
7014 if (!ML_(safe_to_deref)((void*)ARG1, sizeof(vki_ucontext_t))) {
7015 SET_STATUS_Failure(VKI_EINVAL);
7016 return;
7019 ctid = VG_(alloc_ThreadState)();
7020 ptst = VG_(get_ThreadState)(tid);
7021 ctst = VG_(get_ThreadState)(ctid);
7023 /* Stay sane. */
7024 vg_assert(VG_(is_running_thread)(tid));
7025 vg_assert(VG_(is_valid_tid)(ctid));
7027 stack = ML_(allocstack)(ctid);
7028 if (!stack) {
7029 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
7030 goto out;
7033 /* First inherit parent's guest state */
7034 ctst->arch.vex = ptst->arch.vex;
7035 ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
7036 ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
7038 /* Set up some values. */
7039 ctst->os_state.parent = tid;
7040 ctst->os_state.threadgroup = ptst->os_state.threadgroup;
7041 ctst->sig_mask = ptst->sig_mask;
7042 ctst->tmp_sig_mask = ptst->sig_mask;
7044 /* No stack definition should be currently present. The stack will be set
7045 later by libc by a setustack() call (the getsetcontext syscall). */
7046 ctst->client_stack_highest_byte = 0;
7047 ctst->client_stack_szB = 0;
7048 vg_assert(ctst->os_state.stk_id == NULL_STK_ID);
7050 /* Inform a tool that a new thread is created. This has to be done before
7051 any other core->tool event is sent. */
7052 vg_assert(VG_(owns_BigLock_LL)(tid));
7053 VG_TRACK(pre_thread_ll_create, tid, ctid);
7054 tool_informed = True;
7056 #if defined(VGP_x86_solaris)
7057 /* Set up GDT (this has to be done before calling
7058 VG_(restore_context)(). */
7059 ML_(setup_gdt)(&ctst->arch.vex);
7060 #elif defined(VGP_amd64_solaris)
7061 /* Nothing to do. */
7062 #else
7063 # error "Unknown platform"
7064 #endif
7066 /* Now set up the new thread according to ucontext_t. */
7067 VG_(restore_context)(ctid, (vki_ucontext_t*)ARG1, Vg_CoreSysCall,
7068 True/*esp_is_thrptr*/);
7070 /* Set up V thread (this also tells the kernel to block all signals in the
7071 thread). */
7072 ML_(setup_start_thread_context)(ctid, &uc);
7074 /* Actually create the new thread. */
7075 res = VG_(do_syscall3)(__NR_lwp_create, (UWord)&uc, ARG2, ARG3);
7077 if (!sr_isError(res)) {
7078 if (ARG3 != 0)
7079 POST_MEM_WRITE(ARG3, sizeof(vki_id_t));
7080 if (ARG2 & VKI_LWP_DAEMON)
7081 ctst->os_state.daemon_thread = True;
7084 out:
7085 if (sr_isError(res)) {
7086 if (tool_informed) {
7087 /* Tell a tool the thread exited in a hurry. */
7088 VG_TRACK(pre_thread_ll_exit, ctid);
7091 /* lwp_create failed. */
7092 VG_(cleanup_thread)(&ctst->arch);
7093 ctst->status = VgTs_Empty;
7096 SET_STATUS_from_SysRes(res);
7099 PRE(sys_lwp_exit)
7101 /* void syslwp_exit(); */
7102 ThreadState *tst = VG_(get_ThreadState)(tid);
7103 PRINT("sys_lwp_exit ( )");
7104 PRE_REG_READ0(long, "lwp_exit");
7106 /* Set the thread's status to be exiting, then claim that the syscall
7107 succeeded. */
7108 tst->exitreason = VgSrc_ExitThread;
7109 tst->os_state.exitcode = 0;
7110 SET_STATUS_Success(0);
7113 PRE(sys_lwp_suspend)
7115 /* int lwp_suspend(id_t lwpid); */
7116 ThreadState *tst = VG_(get_ThreadState)(tid);
7117 PRINT("sys_lwp_suspend ( %ld )", SARG1);
7118 PRE_REG_READ1(long, "lwp_suspend", vki_id_t, lwpid);
7120 if (ARG1 == tst->os_state.lwpid) {
7121 /* Set the SfMayBlock flag only if the currently running thread should
7122 be suspended. If this flag was used also when suspending other
7123 threads then it could happen that a thread holding the_BigLock would
7124 be suspended and Valgrind would hang. */
7125 *flags |= SfMayBlock;
7129 PRE(sys_lwp_continue)
7131 /* int lwp_continue(id_t target_lwp); */
7132 PRINT("sys_lwp_continue ( %ld )", SARG1);
7133 PRE_REG_READ1(long, "lwp_continue", vki_id_t, target_lwp);
7136 static void
7137 do_lwp_sigqueue(const HChar *syscall_name, UWord target_lwp, UWord signo,
7138 SyscallStatus *status, UWord *flags)
7140 if (!ML_(client_signal_OK)(signo)) {
7141 SET_STATUS_Failure(VKI_EINVAL);
7142 return;
7145 /* Check to see if this gave us a pending signal. */
7146 *flags |= SfPollAfter;
7148 if (VG_(clo_trace_signals))
7149 VG_(message)(Vg_DebugMsg, "%s: sending signal %lu to thread %lu\n",
7150 syscall_name, signo, target_lwp);
7152 /* If we're sending SIGKILL, check to see if the target is one of our
7153 threads and handle it specially. */
7154 if (signo == VKI_SIGKILL && ML_(do_sigkill)(target_lwp, -1)) {
7155 SET_STATUS_Success(0);
7156 return;
7159 /* Ask to handle this syscall via the slow route, since that's the only one
7160 that sets tst->status to VgTs_WaitSys. If the result of doing the
7161 syscall is an immediate run of async_signalhandler() in m_signals.c,
7162 then we need the thread to be properly tidied away. */
7163 *flags |= SfMayBlock;
7166 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
7167 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID)
7168 PRE(sys_lwp_sigqueue)
7170 /* int lwp_sigqueue(pid_t target_pid, id_t target_lwp, int signal,
7171 void *value, int si_code, timespec_t *timeout);
7173 PRINT("sys_lwp_sigqueue ( %ld, %ld, %ld, %#lx, %ld, %#lx )",
7174 SARG1, SARG2, SARG3, ARG4, SARG5, ARG6);
7175 PRE_REG_READ6(long, "lwp_sigqueue", vki_pid_t, target_pid,
7176 vki_id_t, target_lwp, int, signal, void *, value, int, si_code,
7177 vki_timespec_t *, timeout);
7179 if (ARG6)
7180 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG6, sizeof(vki_timespec_t));
7182 if ((ARG1 == 0) || (ARG1 == VG_(getpid)())) {
7183 do_lwp_sigqueue("lwp_sigqueue", ARG2, ARG3, status, flags);
7184 } else {
7185 /* Signal is sent to a different process. */
7186 if (VG_(clo_trace_signals))
7187 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sending signal %ld to "
7188 "process %ld, thread %ld\n", SARG3, SARG1, SARG2);
7189 *flags |= SfMayBlock;
7193 POST(sys_lwp_sigqueue)
7195 if (VG_(clo_trace_signals))
7196 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %ld to process %ld, "
7197 "thread %ld\n", SARG3, SARG1, SARG2);
7200 #else
7202 PRE(sys_lwp_sigqueue)
7204 /* int lwp_sigqueue(id_t target_lwp, int signal, void *value,
7205 int si_code, timespec_t *timeout);
7207 PRINT("sys_lwp_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
7208 SARG1, SARG2, ARG3, SARG4, ARG5);
7209 PRE_REG_READ5(long, "lwp_sigqueue", vki_id_t, target_lwp, int, signal,
7210 void *, value, int, si_code, vki_timespec_t *, timeout);
7212 if (ARG5)
7213 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
7215 do_lwp_sigqueue("lwp_sigqueue", ARG1, ARG2, status, flags);
7218 POST(sys_lwp_sigqueue)
7220 if (VG_(clo_trace_signals))
7221 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %lu to thread %lu\n",
7222 ARG2, ARG1);
7226 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID */
7228 #else
7230 PRE(sys_lwp_kill)
7232 /* int lwp_kill(id_t target_lwp, int signal); */
7233 PRINT("sys_lwp_kill ( %ld, %ld )", SARG1, SARG2);
7234 PRE_REG_READ2(long, "lwp_kill", vki_id_t, target_lwp, int, signal);
7236 do_lwp_sigqueue("lwp_kill", ARG1, ARG2, status, flags);
7239 POST(sys_lwp_kill)
7241 if (VG_(clo_trace_signals))
7242 VG_(message)(Vg_DebugMsg, "lwp_kill: sent signal %lu to thread %lu\n",
7243 ARG2, ARG1);
7245 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
7247 PRE(sys_lwp_self)
7249 /* id_t lwp_self(void); */
7250 PRINT("sys_lwp_self ( )");
7251 PRE_REG_READ0(long, "lwp_self");
7254 PRE(sys_lwp_sigmask)
7256 /* int64_t lwp_sigmask(int how, uint_t bits0, uint_t bits1, uint_t bits2,
7257 uint_t bits3); */
7258 vki_sigset_t sigset;
7259 PRINT("sys_lwp_sigmask ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
7260 ARG4, ARG5);
7261 PRE_REG_READ5(long, "lwp_sigmask", int, how, vki_uint_t, bits0,
7262 vki_uint_t, bits1, vki_uint_t, bits2, vki_uint_t, bits3);
7264 sigset.__sigbits[0] = ARG2;
7265 sigset.__sigbits[1] = ARG3;
7266 sigset.__sigbits[2] = ARG4;
7267 sigset.__sigbits[3] = ARG5;
7269 SET_STATUS_from_SysRes(
7270 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, &sigset, NULL)
7273 if (SUCCESS)
7274 *flags |= SfPollAfter;
7277 PRE(sys_lwp_private)
7279 /* int lwp_private(int cmd, int which, uintptr_t base); */
7280 ThreadState *tst = VG_(get_ThreadState)(tid);
7281 Int supported_base, supported_sel;
7282 PRINT("sys_lwp_private ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7283 PRE_REG_READ3(long, "lwp_private", int, cmd, int, which,
7284 uintptr_t, base);
7286 /* Note: Only the %gs base is currently supported on x86 and the %fs base
7287 on amd64. Support for the %fs base on x86 and for the %gs base on amd64
7288 should be added. Anything else is probably a client program error. */
7289 #if defined(VGP_x86_solaris)
7290 supported_base = VKI_LWP_GSBASE;
7291 supported_sel = VKI_LWPGS_SEL;
7292 #elif defined(VGP_amd64_solaris)
7293 supported_base = VKI_LWP_FSBASE;
7294 supported_sel = 0;
7295 #else
7296 #error "Unknown platform"
7297 #endif
7298 if (ARG2 != supported_base) {
7299 VG_(unimplemented)("Syswrap of the lwp_private call where which=%ld.",
7300 SARG2);
7301 /*NOTREACHED*/
7304 switch (ARG1 /*cmd*/) {
7305 case VKI_LWP_SETPRIVATE:
7306 #if defined(VGP_x86_solaris)
7307 tst->os_state.thrptr = ARG3;
7308 ML_(update_gdt_lwpgs)(tid);
7309 #elif defined(VGP_amd64_solaris)
7310 tst->arch.vex.guest_FS_CONST = ARG3;
7311 #else
7312 #error "Unknown platform"
7313 #endif
7314 SET_STATUS_Success(supported_sel);
7315 break;
7316 case VKI_LWP_GETPRIVATE:
7318 int thrptr;
7319 #if defined(VGP_x86_solaris)
7320 thrptr = tst->os_state.thrptr;
7321 #elif defined(VGP_amd64_solaris)
7322 thrptr = tst->arch.vex.guest_FS_CONST;
7323 #else
7324 #error "Unknown platform"
7325 #endif
7327 if (thrptr == 0) {
7328 SET_STATUS_Failure(VKI_EINVAL);
7329 return;
7332 #if defined(VGP_x86_solaris)
7333 if (tst->arch.vex.guest_GS != supported_sel) {
7334 SET_STATUS_Failure(VKI_EINVAL);
7335 return;
7337 #elif defined(VGP_amd64_solaris)
7338 /* Valgrind on amd64 does not allow to change the gs register so
7339 a check that guest_GS is equal to supported_sel is not needed
7340 here. */
7341 #else
7342 #error "Unknown platform"
7343 #endif
7345 PRE_MEM_WRITE("lwp_private(base)", ARG3, sizeof(Addr));
7346 if (!ML_(safe_to_deref((void*)ARG3, sizeof(Addr)))) {
7347 SET_STATUS_Failure(VKI_EFAULT);
7348 return;
7350 *(Addr*)ARG3 = thrptr;
7351 POST_MEM_WRITE((Addr)ARG3, sizeof(Addr));
7352 SET_STATUS_Success(0);
7353 break;
7355 default:
7356 VG_(unimplemented)("Syswrap of the lwp_private call where cmd=%ld.",
7357 SARG1);
7358 /*NOTREACHED*/
7359 break;
7363 PRE(sys_lwp_wait)
7365 /* int lwp_wait(id_t lwpid, id_t *departed); */
7366 *flags |= SfMayBlock;
7367 PRINT("sys_lwp_wait ( %ld, %#lx )", SARG1, ARG2);
7368 PRE_REG_READ2(long, "lwp_wait", vki_id_t, lwpid, vki_id_t *, departed);
7369 if (ARG2)
7370 PRE_MEM_WRITE("lwp_wait(departed)", ARG2, sizeof(vki_id_t));
7373 POST(sys_lwp_wait)
7375 POST_MEM_WRITE(ARG2, sizeof(vki_id_t));
7378 PRE(sys_lwp_mutex_wakeup)
7380 /* int lwp_mutex_wakeup(lwp_mutex_t *lp, int release_all); */
7381 *flags |= SfMayBlock;
7382 PRINT("sys_lwp_mutex_wakeup ( %#lx, %ld )", ARG1, SARG2);
7383 PRE_REG_READ2(long, "lwp_mutex_wakeup", vki_lwp_mutex_t *, lp,
7384 int, release_all);
7385 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
7386 PRE_FIELD_READ("lwp_mutex_wakeup(lp->mutex_type)", lp->vki_mutex_type);
7387 PRE_FIELD_WRITE("lwp_mutex_wakeup(lp->mutex_waiters)",
7388 lp->vki_mutex_waiters);
7391 POST(sys_lwp_mutex_wakeup)
7393 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
7394 POST_FIELD_WRITE(lp->vki_mutex_waiters);
7397 PRE(sys_lwp_cond_wait)
7399 /* int lwp_cond_wait(lwp_cond_t *cvp, lwp_mutex_t *mp, timespec_t *tsp,
7400 int check_park); */
7401 *flags |= SfMayBlock;
7402 PRINT("sys_lwp_cond_wait( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
7403 PRE_REG_READ4(long, "lwp_cond_wait", vki_lwp_cond_t *, cvp,
7404 vki_lwp_mutex_t *, mp, vki_timespec_t *, tsp, int, check_part);
7406 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7407 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7408 PRE_FIELD_READ("lwp_cond_wait(cvp->type)", cvp->vki_cond_type);
7409 PRE_FIELD_READ("lwp_cond_wait(cvp->waiters_kernel)",
7410 cvp->vki_cond_waiters_kernel);
7411 PRE_FIELD_READ("lwp_cond_wait(mp->mutex_type)", mp->vki_mutex_type);
7412 PRE_FIELD_WRITE("lwp_cond_wait(mp->mutex_waiters)", mp->vki_mutex_waiters);
7413 if (ARG3 != 0)
7414 PRE_MEM_READ("lwp_cond_wait(tsp)", ARG3, sizeof(vki_timespec_t));
7417 POST(sys_lwp_cond_wait)
7419 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7420 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7421 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7422 POST_FIELD_WRITE(mp->vki_mutex_waiters);
7423 if (ARG3 != 0)
7424 POST_MEM_WRITE(ARG3, sizeof(vki_timespec_t));
7427 PRE(sys_lwp_cond_signal)
7429 /* int lwp_cond_signal(lwp_cond_t *cvp); */
7430 *flags |= SfMayBlock;
7431 PRINT("sys_lwp_cond_signal( %#lx )", ARG1);
7432 PRE_REG_READ1(long, "lwp_cond_signal", vki_lwp_cond_t *, cvp);
7434 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7435 PRE_FIELD_READ("lwp_cond_signal(cvp->type)", cvp->vki_cond_type);
7436 PRE_FIELD_READ("lwp_cond_signal(cvp->waiters_kernel)",
7437 cvp->vki_cond_waiters_kernel);
7440 POST(sys_lwp_cond_signal)
7442 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7443 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7446 PRE(sys_lwp_cond_broadcast)
7448 /* int lwp_cond_broadcast(lwp_cond_t *cvp); */
7449 *flags |= SfMayBlock;
7450 PRINT("sys_lwp_cond_broadcast ( %#lx )", ARG1);
7451 PRE_REG_READ1(long, "lwp_cond_broadcast", vki_lwp_cond_t *, cvp);
7453 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7454 PRE_FIELD_READ("lwp_cond_broadcast(cvp->type)", cvp->vki_cond_type);
7455 PRE_FIELD_READ("lwp_cond_broadcast(cvp->waiters_kernel)",
7456 cvp->vki_cond_waiters_kernel);
7457 /*PRE_FIELD_WRITE("lwp_cond_broadcast(cvp->waiters_kernel)",
7458 cvp->vki_cond_waiters_kernel);*/
7461 POST(sys_lwp_cond_broadcast)
7463 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7464 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7467 PRE(sys_pread)
7469 /* ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset); */
7470 *flags |= SfMayBlock;
7471 PRINT("sys_pread ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7472 PRE_REG_READ4(long, "pread", int, fildes, void *, buf,
7473 vki_size_t, nbyte, vki_off_t, offset);
7474 PRE_MEM_WRITE("pread(buf)", ARG2, ARG3);
7476 /* Be strict. */
7477 if (!ML_(fd_allowed)(ARG1, "pread", tid, False))
7478 SET_STATUS_Failure(VKI_EBADF);
7481 POST(sys_pread)
7483 POST_MEM_WRITE(ARG2, RES);
7486 PRE(sys_pwrite)
7488 /* ssize_t pwrite(int fildes, const void *buf, size_t nbyte,
7489 off_t offset); */
7490 *flags |= SfMayBlock;
7491 PRINT("sys_pwrite ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7492 PRE_REG_READ4(long, "pwrite", int, fildes, const void *, buf,
7493 vki_size_t, nbyte, vki_off_t, offset);
7494 PRE_MEM_READ("pwrite(buf)", ARG2, ARG3);
7496 /* Be strict. */
7497 if (!ML_(fd_allowed)(ARG1, "pwrite", tid, False))
7498 SET_STATUS_Failure(VKI_EBADF);
7501 PRE(sys_getpagesizes)
7503 /* int getpagesizes(int legacy, size_t *buf, int nelem); */
7504 PRINT("sys_getpagesizes ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7505 PRE_REG_READ3(long, "getpagesizes", int, legacy, size_t *, buf,
7506 int, nelem);
7507 if (ARG2)
7508 PRE_MEM_WRITE("getpagesizes(buf)", ARG2, ARG3 * sizeof(vki_size_t));
7511 POST(sys_getpagesizes)
7513 if (ARG2)
7514 POST_MEM_WRITE(ARG2, RES * sizeof(vki_size_t));
7517 PRE(sys_lgrpsys)
7519 /* Kernel: int lgrpsys(int subcode, long ia, void *ap); */
7520 switch (ARG1 /*subcode*/) {
7521 case VKI_LGRP_SYS_MEMINFO:
7522 PRINT("sys_lgrpsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7523 PRE_REG_READ3(long, SC2("lgrpsys", "meminfo"), int, subcode,
7524 int, addr_count, vki_meminfo_t *, minfo);
7525 PRE_MEM_READ("lgrpsys(minfo)", ARG3, sizeof(vki_meminfo_t));
7527 if (ML_(safe_to_deref)((vki_meminfo_t *) ARG3, sizeof(vki_meminfo_t))) {
7528 vki_meminfo_t *minfo = (vki_meminfo_t *) ARG3;
7529 PRE_MEM_READ("lgrpsys(minfo->mi_inaddr)",
7530 (Addr) minfo->mi_inaddr, SARG2 * sizeof(vki_uint64_t));
7531 PRE_MEM_READ("lgrpsys(minfo->mi_info_req)", (Addr) minfo->mi_info_req,
7532 minfo->mi_info_count * sizeof(vki_uint_t));
7533 PRE_MEM_WRITE("lgrpsys(minfo->mi_outdata)", (Addr) minfo->mi_outdata,
7534 SARG2 * minfo->mi_info_count * sizeof(vki_uint64_t));
7535 PRE_MEM_WRITE("lgrpsys(minfo->mi_validity)",
7536 (Addr) minfo->mi_validity, SARG2 * sizeof(vki_uint_t));
7538 break;
7539 case VKI_LGRP_SYS_GENERATION:
7540 /* Liblgrp: lgrp_gen_t lgrp_generation(lgrp_view_t view); */
7541 PRINT("sys_lgrpsys ( %ld, %ld )", SARG1, SARG2);
7542 PRE_REG_READ2(long, SC2("lgrpsys", "generation"), int, subcode,
7543 vki_lgrp_view_t, view);
7544 break;
7545 case VKI_LGRP_SYS_VERSION:
7546 /* Liblgrp: int lgrp_version(int version); */
7547 PRINT("sys_lgrpsys ( %ld, %ld )", SARG1, SARG2);
7548 PRE_REG_READ2(long, SC2("lgrpsys", "version"), int, subcode,
7549 int, version);
7550 break;
7551 case VKI_LGRP_SYS_SNAPSHOT:
7552 /* Liblgrp: int lgrp_snapshot(void *buf, size_t bufsize); */
7553 PRINT("sys_lgrpsys ( %ld, %lu, %#lx )", SARG1, ARG2, ARG3);
7554 PRE_REG_READ3(long, SC2("lgrpsys", "snapshot"), int, subcode,
7555 vki_size_t, bufsize, void *, buf);
7556 PRE_MEM_WRITE("lgrpsys(buf)", ARG3, ARG2);
7557 break;
7558 default:
7559 VG_(unimplemented)("Syswrap of the lgrpsys call with subcode %ld.",
7560 SARG1);
7561 /*NOTREACHED*/
7562 break;
7566 POST(sys_lgrpsys)
7568 switch (ARG1 /*subcode*/) {
7569 case VKI_LGRP_SYS_MEMINFO:
7571 vki_meminfo_t *minfo = (vki_meminfo_t *) ARG3;
7572 POST_MEM_WRITE((Addr) minfo->mi_outdata,
7573 SARG2 * minfo->mi_info_count * sizeof(vki_uint64_t));
7574 POST_MEM_WRITE((Addr) minfo->mi_validity, SARG2 * sizeof(vki_uint_t));
7576 break;
7577 case VKI_LGRP_SYS_GENERATION:
7578 case VKI_LGRP_SYS_VERSION:
7579 break;
7580 case VKI_LGRP_SYS_SNAPSHOT:
7581 POST_MEM_WRITE(ARG3, RES);
7582 break;
7583 default:
7584 vg_assert(0);
7585 break;
7589 PRE(sys_rusagesys)
7591 /* Kernel: int rusagesys(int code, void *arg1, void *arg2,
7592 void *arg3, void *arg4); */
7593 switch (ARG1 /*code*/) {
7594 case VKI__RUSAGESYS_GETRUSAGE:
7595 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7596 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7597 /* Libc: int getrusage(int who, struct rusage *r_usage); */
7598 PRINT("sys_rusagesys ( %ld, %#lx )", SARG1, ARG2);
7599 PRE_REG_READ2(long, SC2("rusagesys", "getrusage"), int, code,
7600 struct vki_rusage *, r_usage);
7601 PRE_MEM_WRITE("rusagesys(r_usage)", ARG2, sizeof(struct vki_rusage));
7602 break;
7604 case VKI__RUSAGESYS_GETVMUSAGE:
7605 /* Libc: int getvmusage(uint_t flags, time_t age,
7606 vmusage_t *buf, size_t *nres); */
7607 PRINT("sys_rusagesys ( %ld, %lu, %ld, %#lx, %#lx )",
7608 SARG1, ARG2, SARG3, ARG4, ARG5);
7609 PRE_REG_READ5(long, SC2("rusagesys", "getvmusage"), int, code,
7610 vki_uint_t, flags, vki_time_t, age,
7611 vki_vmusage_t *, buf, vki_size_t *, nres);
7612 PRE_MEM_READ("rusagesys(nres)", ARG5, sizeof(vki_size_t));
7613 /* PRE_MEM_WRITE("rusagesys(nres)", ARG5, sizeof(vki_size_t)); */
7615 if (ML_(safe_to_deref)((void *) ARG5, sizeof(vki_size_t))) {
7616 vki_size_t *nres = (vki_size_t *) ARG5;
7617 PRE_MEM_WRITE("rusagesys(buf)", ARG4,
7618 *nres * sizeof(vki_vmusage_t));
7620 *flags |= SfMayBlock;
7621 break;
7623 default:
7624 VG_(unimplemented)("Syswrap of the rusagesys call with code %ld.", SARG1);
7625 /*NOTREACHED*/
7626 break;
7630 POST(sys_rusagesys)
7632 switch (ARG1 /*code*/) {
7633 case VKI__RUSAGESYS_GETRUSAGE:
7634 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7635 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7636 POST_MEM_WRITE(ARG2, sizeof(struct vki_rusage));
7637 break;
7638 case VKI__RUSAGESYS_GETVMUSAGE:
7640 vki_size_t *nres = (vki_size_t *) ARG5;
7641 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
7642 POST_MEM_WRITE(ARG4, *nres * sizeof(vki_vmusage_t));
7644 break;
7645 default:
7646 vg_assert(0);
7647 break;
7651 PRE(sys_port)
7653 /* Kernel: int64_t portfs(int opcode, uintptr_t a0, uintptr_t a1,
7654 uintptr_t a2, uintptr_t a3, uintptr_t a4); */
7655 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7656 *flags |= SfMayBlock;
7657 switch (opcode) {
7658 case VKI_PORT_CREATE:
7659 PRINT("sys_port ( %ld )", SARG1);
7660 PRE_REG_READ1(long, SC2("port", "create"), int, opcode);
7661 break;
7662 case VKI_PORT_ASSOCIATE:
7663 case VKI_PORT_DISSOCIATE:
7664 PRINT("sys_port ( %ld, %ld, %ld, %#lx, %ld, %#lx )", SARG1, SARG2, SARG3,
7665 ARG4, SARG5, ARG6);
7666 if (opcode == VKI_PORT_ASSOCIATE) {
7667 PRE_REG_READ6(long, SC2("port", "associate"), int, opcode, int, a0,
7668 int, a1, uintptr_t, a2, int, a3, void *, a4);
7670 else {
7671 PRE_REG_READ6(long, SC2("port", "dissociate"), int, opcode, int, a0,
7672 int, a1, uintptr_t, a2, int, a3, void *, a4);
7675 switch (ARG3 /*source*/) {
7676 case VKI_PORT_SOURCE_FD:
7677 if (!ML_(fd_allowed)(ARG4, "port", tid, False)) {
7678 SET_STATUS_Failure(VKI_EBADF);
7680 break;
7681 case VKI_PORT_SOURCE_FILE:
7683 struct vki_file_obj *fo = (struct vki_file_obj *)ARG4;
7684 PRE_MEM_READ("port(file_obj)", ARG4, sizeof(struct vki_file_obj));
7685 if (ML_(safe_to_deref)(&fo->fo_name, sizeof(fo->fo_name)))
7686 PRE_MEM_RASCIIZ("port(file_obj->fo_name)", (Addr)fo->fo_name);
7688 break;
7689 default:
7690 VG_(unimplemented)("Syswrap of the port_associate/dissociate call "
7691 "type %ld.", SARG3);
7692 /*NOTREACHED*/
7693 break;
7695 break;
7696 case VKI_PORT_SEND:
7697 PRINT("sys_port ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
7698 PRE_REG_READ4(long, SC2("port", "send"), int, opcode, int, a0, int, a1,
7699 void *, a2);
7700 break;
7701 case VKI_PORT_SENDN:
7702 PRINT("sys_port ( %ld, %#lx, %#lx, %lu, %lx, %#lx)", SARG1, ARG2, ARG3,
7703 ARG4, ARG5, ARG6);
7704 PRE_REG_READ6(long, SC2("port", "sendn"), int, opcode, int *, a0,
7705 int *, a1, vki_uint_t, a2, int, a3, void *, a4);
7706 PRE_MEM_READ("port(ports)", ARG2, ARG4 * sizeof(int));
7707 PRE_MEM_WRITE("port(errors)", ARG3, ARG4 * sizeof(int));
7708 break;
7709 case VKI_PORT_GET:
7710 PRINT("sys_port ( %ld, %ld, %#lx, %ld, %ld, %#lx )", SARG1, SARG2, ARG3,
7711 SARG4, SARG5, ARG6);
7712 PRE_REG_READ6(long, SC2("port", "get"), int, opcode, int, a0,
7713 port_event_t *, a1, vki_time_t, a2, long, a3,
7714 timespec_t *, a4);
7715 PRE_MEM_WRITE("port(uevp)", ARG3, sizeof(vki_port_event_t));
7716 break;
7717 case VKI_PORT_GETN:
7718 PRINT("sys_port ( %ld, %ld, %#lx, %lu, %lu, %#lx )", SARG1, SARG2, ARG3,
7719 ARG4, ARG5, ARG6);
7720 PRE_REG_READ6(long, SC2("port", "getn"), int, opcode, int, a0,
7721 port_event_t *, a1, vki_uint_t, a2, vki_uint_t, a3,
7722 timespec_t *, a4);
7723 if (ARG6)
7724 PRE_MEM_READ("port(timeout)", ARG6, sizeof(vki_timespec_t));
7725 PRE_MEM_WRITE("port(uevp)", ARG3, ARG4 * sizeof(vki_port_event_t));
7726 break;
7727 case VKI_PORT_ALERT:
7728 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, SARG4,
7729 ARG5);
7730 PRE_REG_READ5(long, SC2("port", "alert"), int, opcode, int, a0, int, a1,
7731 int, a2, void *, a3);
7732 break;
7733 case VKI_PORT_DISPATCH:
7734 // FIXME: check order: SARG2, SARG1 or SARG1, SARG2 ??
7735 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx, %#lx )", SARG2, SARG1, SARG3,
7736 SARG4, ARG5, ARG6);
7737 PRE_REG_READ6(long, SC2("port", "dispatch"), int, opcode, int, a0,
7738 int, a1, int, a2, uintptr_t, a3, void *, a4);
7739 break;
7740 default:
7741 VG_(unimplemented)("Syswrap of the port call with opcode %ld.", SARG1);
7742 /*NOTREACHED*/
7743 break;
7746 /* Be strict. */
7747 if ((opcode != VKI_PORT_CREATE && opcode != VKI_PORT_SENDN) &&
7748 !ML_(fd_allowed)(ARG2, "port", tid, False))
7749 SET_STATUS_Failure(VKI_EBADF);
7752 POST(sys_port)
7754 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7755 switch (opcode) {
7756 case VKI_PORT_CREATE:
7757 if (!ML_(fd_allowed)(RES, "port", tid, True)) {
7758 VG_(close)(RES);
7759 SET_STATUS_Failure(VKI_EMFILE);
7761 else if (VG_(clo_track_fds))
7762 ML_(record_fd_open_named)(tid, RES);
7763 break;
7764 case VKI_PORT_ASSOCIATE:
7765 case VKI_PORT_DISSOCIATE:
7766 case VKI_PORT_SEND:
7767 break;
7768 case VKI_PORT_SENDN:
7769 if (RES != ARG4) {
7770 /* If there is any error then the whole errors area is written. */
7771 POST_MEM_WRITE(ARG3, ARG4 * sizeof(int));
7773 break;
7774 case VKI_PORT_GET:
7775 POST_MEM_WRITE(ARG3, sizeof(vki_port_event_t));
7776 break;
7777 case VKI_PORT_GETN:
7778 POST_MEM_WRITE(ARG3, RES * sizeof(vki_port_event_t));
7779 break;
7780 case VKI_PORT_ALERT:
7781 case VKI_PORT_DISPATCH:
7782 break;
7783 default:
7784 VG_(unimplemented)("Syswrap of the port call with opcode %lu.", ARG1);
7785 /*NOTREACHED*/
7786 break;
7790 PRE(sys_pollsys)
7792 /* int pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeout,
7793 sigset_t *set); */
7794 UWord i;
7795 struct vki_pollfd *ufds = (struct vki_pollfd *)ARG1;
7797 *flags |= SfMayBlock | SfPostOnFail;
7799 PRINT("sys_pollsys ( %#lx, %lu, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
7800 PRE_REG_READ4(long, "poll", pollfd_t *, fds, vki_nfds_t, nfds,
7801 timespec_t *, timeout, sigset_t *, set);
7803 for (i = 0; i < ARG2; i++) {
7804 vki_pollfd_t *u = &ufds[i];
7805 PRE_FIELD_READ("poll(ufds.fd)", u->fd);
7806 /* XXX Check if it's valid? */
7807 PRE_FIELD_READ("poll(ufds.events)", u->events);
7808 PRE_FIELD_WRITE("poll(ufds.revents)", u->revents);
7811 if (ARG3)
7812 PRE_MEM_READ("poll(timeout)", ARG3, sizeof(vki_timespec_t));
7814 if (ARG4) {
7815 PRE_MEM_READ("poll(set)", ARG4, sizeof(vki_sigset_t));
7817 const vki_sigset_t *guest_sigmask = (vki_sigset_t *) ARG4;
7818 if (!ML_(safe_to_deref)(guest_sigmask, sizeof(vki_sigset_t))) {
7819 ARG4 = 1; /* Something recognisable to POST() hook. */
7820 } else {
7821 vki_sigset_t *vg_sigmask =
7822 VG_(malloc)("syswrap.pollsys.1", sizeof(vki_sigset_t));
7823 ARG4 = (Addr) vg_sigmask;
7824 *vg_sigmask = *guest_sigmask;
7825 VG_(sanitize_client_sigmask)(vg_sigmask);
7830 POST(sys_pollsys)
7832 vg_assert(SUCCESS || FAILURE);
7834 if (SUCCESS && (RES >= 0)) {
7835 UWord i;
7836 vki_pollfd_t *ufds = (vki_pollfd_t*)ARG1;
7837 for (i = 0; i < ARG2; i++)
7838 POST_FIELD_WRITE(ufds[i].revents);
7841 if ((ARG4 != 0) && (ARG4 != 1)) {
7842 VG_(free)((vki_sigset_t *) ARG4);
7846 PRE(sys_labelsys)
7848 /* Kernel: int labelsys(int op, void *a1, void *a2, void *a3,
7849 void *a4, void *a5); */
7851 switch (ARG1 /*op*/) {
7852 case VKI_TSOL_SYSLABELING:
7853 /* Libc: int is_system_labeled(void); */
7854 PRINT("sys_labelsys ( %ld )", SARG1);
7855 PRE_REG_READ1(long, SC2("labelsys", "syslabeling"), int, op);
7856 break;
7858 case VKI_TSOL_TNRH:
7859 /* Libtsnet: int tnrh(int cmd, tsol_rhent_t *buf); */
7860 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7861 PRE_REG_READ3(long, SC2("labelsys", "tnrh"), int, op, int, cmd,
7862 vki_tsol_rhent_t *, buf);
7863 if (ARG2 != VKI_TNDB_FLUSH)
7864 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_rhent_t));
7865 break;
7867 case VKI_TSOL_TNRHTP:
7868 /* Libtsnet: int tnrhtp(int cmd, tsol_tpent_t *buf); */
7869 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7870 PRE_REG_READ3(long, SC2("labelsys", "tnrhtp"), int, op, int, cmd,
7871 vki_tsol_tpent_t *, buf);
7872 if (ARG2 != VKI_TNDB_FLUSH)
7873 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_tpent_t));
7874 break;
7876 case VKI_TSOL_TNMLP:
7877 /* Libtsnet: int tnmlp(int cmd, tsol_mlpent_t *buf); */
7878 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7879 PRE_REG_READ3(long, SC2("labelsys", "tnmlp"), int, op, int, cmd,
7880 vki_tsol_mlpent_t *, buf);
7881 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_mlpent_t));
7882 break;
7884 case VKI_TSOL_GETLABEL:
7885 /* Libtsol: int getlabel(const char *path, bslabel_t *label); */
7886 PRINT("sys_labelsys ( %ld, %#lx(%s), %#lx )",
7887 SARG1, ARG2, (HChar *) ARG2, ARG3);
7888 PRE_REG_READ3(long, SC2("labelsys", "getlabel"), int, op,
7889 const char *, path, vki_bslabel_t *, label);
7890 PRE_MEM_RASCIIZ("labelsys(path)", ARG2);
7891 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7892 break;
7894 case VKI_TSOL_FGETLABEL:
7895 /* Libtsol: int fgetlabel(int fd, bslabel_t *label); */
7896 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7897 PRE_REG_READ3(long, SC2("labelsys", "fgetlabel"), int, op,
7898 int, fd, vki_bslabel_t *, label);
7899 /* Be strict. */
7900 if (!ML_(fd_allowed)(ARG2, "labelsys(fgetlabel)", tid, False))
7901 SET_STATUS_Failure(VKI_EBADF);
7902 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7903 break;
7905 #if defined(SOLARIS_TSOL_CLEARANCE)
7906 case VKI_TSOL_GETCLEARANCE:
7907 /* Libtsol: int getclearance(bslabel_t *clearance); */
7908 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7909 PRE_REG_READ2(long, SC2("labelsys", "getclearance"), int, op,
7910 vki_bslabel_t *, clearance);
7911 PRE_MEM_WRITE("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7912 break;
7914 case VKI_TSOL_SETCLEARANCE:
7915 /* Libtsol: int setclearance(bslabel_t *clearance); */
7916 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7917 PRE_REG_READ2(long, SC2("labelsys", "setclearance"), int, op,
7918 vki_bslabel_t *, clearance);
7919 PRE_MEM_READ("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7920 break;
7921 #endif /* SOLARIS_TSOL_CLEARANCE */
7923 default:
7924 VG_(unimplemented)("Syswrap of the labelsys call with op %ld.", SARG1);
7925 /*NOTREACHED*/
7926 break;
7930 POST(sys_labelsys)
7932 switch (ARG1 /*op*/) {
7933 case VKI_TSOL_SYSLABELING:
7934 break;
7936 case VKI_TSOL_TNRH:
7937 switch (ARG2 /*cmd*/) {
7938 case VKI_TNDB_LOAD:
7939 case VKI_TNDB_DELETE:
7940 case VKI_TNDB_FLUSH:
7941 break;
7942 #if defined(SOLARIS_TNDB_GET_TNIP)
7943 case TNDB_GET_TNIP:
7944 #endif /* SOLARIS_TNDB_GET_TNIP */
7945 case VKI_TNDB_GET:
7946 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_rhent_t));
7947 break;
7948 default:
7949 vg_assert(0);
7950 break;
7952 break;
7954 case VKI_TSOL_TNRHTP:
7955 switch (ARG2 /*cmd*/) {
7956 case VKI_TNDB_LOAD:
7957 case VKI_TNDB_DELETE:
7958 case VKI_TNDB_FLUSH:
7959 break;
7960 case VKI_TNDB_GET:
7961 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_tpent_t));
7962 break;
7963 default:
7964 vg_assert(0);
7965 break;
7967 break;
7969 case VKI_TSOL_TNMLP:
7970 switch (ARG2 /*cmd*/) {
7971 case VKI_TNDB_LOAD:
7972 case VKI_TNDB_DELETE:
7973 case VKI_TNDB_FLUSH:
7974 break;
7975 case VKI_TNDB_GET:
7976 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_mlpent_t));
7977 break;
7978 default:
7979 vg_assert(0);
7980 break;
7982 break;
7984 case VKI_TSOL_GETLABEL:
7985 case VKI_TSOL_FGETLABEL:
7986 POST_MEM_WRITE(ARG3, sizeof(vki_bslabel_t));
7987 break;
7989 #if defined(SOLARIS_TSOL_CLEARANCE)
7990 case VKI_TSOL_GETCLEARANCE:
7991 POST_MEM_WRITE(ARG2, sizeof(vki_bslabel_t));
7992 break;
7994 case VKI_TSOL_SETCLEARANCE:
7995 break;
7996 #endif /* SOLARIS_TSOL_CLEARANCE */
7998 default:
7999 vg_assert(0);
8000 break;
8004 PRE(sys_acl)
8006 /* int acl(char *pathp, int cmd, int nentries, void *aclbufp); */
8007 PRINT("sys_acl ( %#lx(%s), %ld, %ld, %#lx )", ARG1, (HChar *) ARG1, SARG2,
8008 SARG3, ARG4);
8010 PRE_REG_READ4(long, "acl", char *, pathp, int, cmd,
8011 int, nentries, void *, aclbufp);
8012 PRE_MEM_RASCIIZ("acl(pathp)", ARG1);
8014 switch (ARG2 /*cmd*/) {
8015 case VKI_SETACL:
8016 if (ARG4)
8017 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8018 break;
8019 case VKI_GETACL:
8020 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8021 break;
8022 case VKI_GETACLCNT:
8023 break;
8024 case VKI_ACE_SETACL:
8025 if (ARG4)
8026 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8027 break;
8028 case VKI_ACE_GETACL:
8029 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8030 break;
8031 case VKI_ACE_GETACLCNT:
8032 break;
8033 default:
8034 VG_(unimplemented)("Syswrap of the acl call with cmd %ld.", SARG2);
8035 /*NOTREACHED*/
8036 break;
8040 POST(sys_acl)
8042 switch (ARG2 /*cmd*/) {
8043 case VKI_SETACL:
8044 break;
8045 case VKI_GETACL:
8046 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8047 break;
8048 case VKI_GETACLCNT:
8049 break;
8050 case VKI_ACE_SETACL:
8051 break;
8052 case VKI_ACE_GETACL:
8053 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8054 break;
8055 case VKI_ACE_GETACLCNT:
8056 break;
8057 default:
8058 vg_assert(0);
8059 break;
8063 PRE(sys_auditsys)
8065 /* Kernel: int auditsys(long code, long a1, long a2, long a3, long a4); */
8066 switch (ARG1 /*code*/) {
8067 case VKI_BSM_GETAUID:
8068 /* Libbsm: int getauid(au_id_t *auid); */
8069 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8070 PRE_REG_READ2(long, SC2("auditsys", "getauid"), long, code,
8071 vki_au_id_t *, auid);
8072 PRE_MEM_WRITE("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
8073 break;
8074 case VKI_BSM_SETAUID:
8075 /* Libbsm: int setauid(au_id_t *auid); */
8076 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8077 PRE_REG_READ2(long, SC2("auditsys", "setauid"), long, code,
8078 vki_au_id_t *, auid);
8079 PRE_MEM_READ("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
8080 break;
8081 case VKI_BSM_GETAUDIT:
8082 /* Libbsm: int getaudit(auditinfo_t *ai); */
8083 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8084 PRE_REG_READ2(long, SC2("auditsys", "getaudit"), long, code,
8085 vki_auditinfo_t *, ai);
8086 PRE_MEM_WRITE("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
8087 break;
8088 case VKI_BSM_SETAUDIT:
8089 /* Libbsm: int setaudit(auditinfo_t *ai); */
8090 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
8091 PRE_REG_READ2(long, SC2("auditsys", "setaudit"), long, code,
8092 vki_auditinfo_t *, ai);
8093 PRE_MEM_READ("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
8094 break;
8095 case VKI_BSM_AUDIT:
8096 /* Libbsm: int audit(void *record, int length); */
8097 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8098 PRE_REG_READ3(long, SC2("auditsys", "audit"), long, code,
8099 void *, record, int, length);
8100 PRE_MEM_READ("auditsys(record)", ARG2, ARG3);
8101 break;
8102 case VKI_BSM_AUDITCTL:
8103 /* Libbsm: int auditon(int cmd, caddr_t data, int length); */
8104 PRINT("sys_auditsys ( %ld, %ld, %#lx, %ld )",
8105 SARG1, SARG2, ARG3, SARG4);
8107 switch (ARG2 /*cmd*/) {
8108 case VKI_A_GETPOLICY:
8109 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpolicy"),
8110 long, code, int, cmd, vki_uint32_t *, policy);
8111 PRE_MEM_WRITE("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
8112 break;
8113 case VKI_A_SETPOLICY:
8114 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpolicy"),
8115 long, code, int, cmd, vki_uint32_t *, policy);
8116 PRE_MEM_READ("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
8117 break;
8118 case VKI_A_GETKMASK:
8119 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getkmask"),
8120 long, code, int, cmd, vki_au_mask_t *, kmask);
8121 PRE_MEM_WRITE("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
8122 break;
8123 case VKI_A_SETKMASK:
8124 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setkmask"),
8125 long, code, int, cmd, vki_au_mask_t *, kmask);
8126 PRE_MEM_READ("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
8127 break;
8128 case VKI_A_GETQCTRL:
8129 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getqctrl"),
8130 long, code, int, cmd,
8131 struct vki_au_qctrl *, qctrl);
8132 PRE_MEM_WRITE("auditsys(qctrl)", ARG3,
8133 sizeof(struct vki_au_qctrl));
8134 break;
8135 case VKI_A_SETQCTRL:
8136 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setqctrl"),
8137 long, code, int, cmd,
8138 struct vki_au_qctrl *, qctrl);
8139 PRE_MEM_READ("auditsys(qctrl)", ARG3,
8140 sizeof(struct vki_au_qctrl));
8141 break;
8142 case VKI_A_GETCWD:
8143 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcwd"),
8144 long, code, int, cmd, char *, data, int, length);
8145 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
8146 break;
8147 case VKI_A_GETCAR:
8148 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcar"),
8149 long, code, int, cmd, char *, data, int, length);
8150 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
8151 break;
8152 #if defined(SOLARIS_AUDITON_STAT)
8153 case VKI_A_GETSTAT:
8154 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getstat"),
8155 long, code, int, cmd, vki_au_stat_t *, stats);
8156 PRE_MEM_WRITE("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
8157 break;
8158 case VKI_A_SETSTAT:
8159 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setstat"),
8160 long, code, int, cmd, vki_au_stat_t *, stats);
8161 PRE_MEM_READ("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
8162 break;
8163 #endif /* SOLARIS_AUDITON_STAT */
8164 case VKI_A_SETUMASK:
8165 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setumask"),
8166 long, code, int, cmd, vki_auditinfo_t *, umask);
8167 PRE_MEM_READ("auditsys(umask)", ARG3, sizeof(vki_auditinfo_t));
8168 break;
8169 case VKI_A_SETSMASK:
8170 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setsmask"),
8171 long, code, int, cmd, vki_auditinfo_t *, smask);
8172 PRE_MEM_READ("auditsys(smask)", ARG3, sizeof(vki_auditinfo_t));
8173 break;
8174 case VKI_A_GETCOND:
8175 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getcond"),
8176 long, code, int, cmd, int *, cond);
8177 PRE_MEM_WRITE("auditsys(cond)", ARG3, sizeof(int));
8178 break;
8179 case VKI_A_SETCOND:
8180 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setcond"),
8181 long, code, int, cmd, int *, state);
8182 PRE_MEM_READ("auditsys(cond)", ARG3, sizeof(int));
8183 break;
8184 case VKI_A_GETCLASS:
8185 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getclass"),
8186 long, code, int, cmd,
8187 vki_au_evclass_map_t *, classmap);
8189 if (ML_(safe_to_deref((void *) ARG3,
8190 sizeof(vki_au_evclass_map_t)))) {
8191 vki_au_evclass_map_t *classmap =
8192 (vki_au_evclass_map_t *) ARG3;
8193 PRE_FIELD_READ("auditsys(classmap.ec_number)",
8194 classmap->ec_number);
8195 PRE_MEM_WRITE("auditsys(classmap)", ARG3,
8196 sizeof(vki_au_evclass_map_t));
8198 break;
8199 case VKI_A_SETCLASS:
8200 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setclass"),
8201 long, code, int, cmd,
8202 vki_au_evclass_map_t *, classmap);
8204 if (ML_(safe_to_deref((void *) ARG3,
8205 sizeof(vki_au_evclass_map_t)))) {
8206 vki_au_evclass_map_t *classmap =
8207 (vki_au_evclass_map_t *) ARG3;
8208 PRE_FIELD_READ("auditsys(classmap.ec_number)",
8209 classmap->ec_number);
8210 PRE_FIELD_READ("auditsys(classmap.ec_class)",
8211 classmap->ec_class);
8213 break;
8214 case VKI_A_GETPINFO:
8215 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpinfo"),
8216 long, code, int, cmd,
8217 struct vki_auditpinfo *, apinfo);
8219 if (ML_(safe_to_deref((void *) ARG3,
8220 sizeof(struct vki_auditpinfo)))) {
8221 struct vki_auditpinfo *apinfo =
8222 (struct vki_auditpinfo *) ARG3;
8223 PRE_FIELD_READ("auditsys(apinfo.ap_pid)", apinfo->ap_pid);
8224 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
8225 sizeof(struct vki_auditpinfo));
8227 break;
8228 case VKI_A_SETPMASK:
8229 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpmask"),
8230 long, code, int, cmd,
8231 struct vki_auditpinfo *, apinfo);
8232 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
8233 sizeof(struct vki_auditpinfo));
8234 break;
8235 case VKI_A_GETPINFO_ADDR:
8236 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getpinfo_addr"),
8237 long, code, int, cmd,
8238 struct vki_auditpinfo_addr *, apinfo, int, length);
8240 if (ML_(safe_to_deref((void *) ARG3,
8241 sizeof(struct vki_auditpinfo_addr)))) {
8242 struct vki_auditpinfo_addr *apinfo_addr =
8243 (struct vki_auditpinfo_addr *) ARG3;
8244 PRE_FIELD_READ("auditsys(apinfo_addr.ap_pid)",
8245 apinfo_addr->ap_pid);
8246 PRE_MEM_WRITE("auditsys(apinfo_addr)", ARG3, ARG4);
8248 break;
8249 case VKI_A_GETKAUDIT:
8250 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getkaudit"),
8251 long, code, int, cmd,
8252 vki_auditinfo_addr_t *, kaudit, int, length);
8253 PRE_MEM_WRITE("auditsys(kaudit)", ARG3, ARG4);
8254 break;
8255 case VKI_A_SETKAUDIT:
8256 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "setkaudit"),
8257 long, code, int, cmd,
8258 vki_auditinfo_addr_t *, kaudit, int, length);
8259 PRE_MEM_READ("auditsys(kaudit)", ARG3, ARG4);
8260 break;
8261 case VKI_A_GETAMASK:
8262 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getamask"),
8263 long, code, int, cmd, vki_au_mask_t *, amask);
8264 PRE_MEM_WRITE("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
8265 break;
8266 case VKI_A_SETAMASK:
8267 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setamask"),
8268 long, code, int, cmd, vki_au_mask_t *, amask);
8269 PRE_MEM_READ("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
8270 break;
8271 default:
8272 VG_(unimplemented)("Syswrap of the auditsys(auditctl) call "
8273 "with cmd %lu.", ARG2);
8274 /*NOTREACHED*/
8275 break;
8277 break;
8278 case VKI_BSM_GETAUDIT_ADDR:
8279 /* Libbsm: int getaudit_addr(auditinfo_addr_t *ai, int len); */
8280 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8281 PRE_REG_READ3(long, SC2("auditsys", "getaudit_addr"), long, code,
8282 vki_auditinfo_addr_t *, ai, int, len);
8283 PRE_MEM_WRITE("auditsys(ai)", ARG2, ARG3);
8284 break;
8285 case VKI_BSM_SETAUDIT_ADDR:
8286 /* Libbsm: int setaudit_addr(auditinfo_addr_t *ai, int len); */
8287 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
8288 PRE_REG_READ3(long, SC2("auditsys", "setaudit_addr"), long, code,
8289 vki_auditinfo_addr_t *, ai, int, len);
8290 PRE_MEM_READ("auditsys(ai)", ARG2, ARG3);
8291 break;
8292 case VKI_BSM_AUDITDOOR:
8293 /* Libbsm: int auditdoor(int fd); */
8294 PRINT("sys_auditsys ( %ld, %ld )", SARG1, SARG2);
8295 PRE_REG_READ2(long, SC2("auditsys", "door"), long, code, int, fd);
8297 /* Be strict. */
8298 if (!ML_(fd_allowed)(ARG2, SC2("auditsys", "door")"(fd)",
8299 tid, False))
8300 SET_STATUS_Failure(VKI_EBADF);
8301 break;
8302 default:
8303 VG_(unimplemented)("Syswrap of the auditsys call with code %lu.", ARG1);
8304 /*NOTREACHED*/
8305 break;
8309 POST(sys_auditsys)
8311 switch (ARG1 /*code*/) {
8312 case VKI_BSM_GETAUID:
8313 POST_MEM_WRITE(ARG2, sizeof(vki_au_id_t));
8314 break;
8315 case VKI_BSM_SETAUID:
8316 break;
8317 case VKI_BSM_GETAUDIT:
8318 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_t));
8319 break;
8320 case VKI_BSM_SETAUDIT:
8321 case VKI_BSM_AUDIT:
8322 break;
8323 case VKI_BSM_AUDITCTL:
8324 switch (ARG2 /*cmd*/) {
8325 case VKI_A_GETPOLICY:
8326 POST_MEM_WRITE(ARG3, sizeof(vki_uint32_t));
8327 break;
8328 case VKI_A_SETPOLICY:
8329 break;
8330 case VKI_A_GETKMASK:
8331 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
8332 break;
8333 case VKI_A_SETKMASK:
8334 break;
8335 case VKI_A_GETQCTRL:
8336 POST_MEM_WRITE(ARG3, sizeof(struct vki_au_qctrl));
8337 break;
8338 case VKI_A_SETQCTRL:
8339 break;
8340 case VKI_A_GETCWD:
8341 case VKI_A_GETCAR:
8342 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
8343 break;
8344 #if defined(SOLARIS_AUDITON_STAT)
8345 case VKI_A_GETSTAT:
8346 POST_MEM_WRITE(ARG3, sizeof(vki_au_stat_t));
8347 break;
8348 case VKI_A_SETSTAT:
8349 #endif /* SOLARIS_AUDITON_STAT */
8350 case VKI_A_SETUMASK:
8351 case VKI_A_SETSMASK:
8352 break;
8353 case VKI_A_GETCOND:
8354 POST_MEM_WRITE(ARG3, sizeof(int));
8355 break;
8356 case VKI_A_SETCOND:
8357 break;
8358 case VKI_A_GETCLASS:
8359 POST_MEM_WRITE(ARG3, sizeof(vki_au_evclass_map_t));
8360 break;
8361 case VKI_A_SETCLASS:
8362 break;
8363 case VKI_A_GETPINFO:
8364 POST_MEM_WRITE(ARG3, sizeof(struct vki_auditpinfo));
8365 break;
8366 case VKI_A_SETPMASK:
8367 break;
8368 case VKI_A_GETPINFO_ADDR:
8369 POST_MEM_WRITE(ARG3, sizeof(struct auditpinfo_addr));
8370 break;
8371 case VKI_A_GETKAUDIT:
8372 POST_MEM_WRITE(ARG3, sizeof(vki_auditinfo_addr_t));
8373 break;
8374 case VKI_A_SETKAUDIT:
8375 break;
8376 case VKI_A_GETAMASK:
8377 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
8378 break;
8379 case VKI_A_SETAMASK:
8380 break;
8382 break;
8383 case VKI_BSM_GETAUDIT_ADDR:
8384 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_addr_t));
8385 break;
8386 case VKI_BSM_SETAUDIT_ADDR:
8387 break;
8388 case VKI_BSM_AUDITDOOR:
8389 break;
8393 PRE(sys_p_online)
8395 /* int p_online(processorid_t processorid, int flag); */
8396 PRINT("sys_p_online ( %ld, %ld )", SARG1, SARG2);
8397 PRE_REG_READ2(long, "p_online", vki_processorid_t, processorid, int, flag);
8400 PRE(sys_sigqueue)
8402 /* int sigqueue(pid_t pid, int signo, void *value,
8403 int si_code, timespec_t *timeout);
8405 PRINT("sys_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
8406 SARG1, SARG2, ARG3, SARG4, ARG5);
8407 PRE_REG_READ5(long, "sigqueue", vki_pid_t, pid, int, signo,
8408 void *, value, int, si_code,
8409 vki_timespec_t *, timeout);
8411 if (ARG5)
8412 PRE_MEM_READ("sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
8414 if (!ML_(client_signal_OK)(ARG2)) {
8415 SET_STATUS_Failure(VKI_EINVAL);
8416 return;
8419 /* If we're sending SIGKILL, check to see if the target is one of
8420 our threads and handle it specially. */
8421 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
8422 SET_STATUS_Success(0);
8423 } else {
8424 SysRes res = VG_(do_syscall5)(SYSNO, ARG1, ARG2, ARG3, ARG4,
8425 ARG5);
8426 SET_STATUS_from_SysRes(res);
8429 if (VG_(clo_trace_signals))
8430 VG_(message)(Vg_DebugMsg,
8431 "sigqueue: signal %ld queued for pid %ld\n",
8432 SARG2, SARG1);
8434 /* Check to see if this gave us a pending signal. */
8435 *flags |= SfPollAfter;
8438 PRE(sys_clock_gettime)
8440 /* int clock_gettime(clockid_t clock_id, struct timespec *tp); */
8441 PRINT("sys_clock_gettime ( %ld, %#lx )", SARG1, ARG2);
8442 PRE_REG_READ2(long, "clock_gettime", vki_clockid_t, clock_id,
8443 struct timespec *, tp);
8444 PRE_MEM_WRITE("clock_gettime(tp)", ARG2, sizeof(struct vki_timespec));
8447 POST(sys_clock_gettime)
8449 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
8452 PRE(sys_clock_settime)
8454 /* int clock_settime(clockid_t clock_id, const struct timespec *tp); */
8455 PRINT("sys_clock_settime ( %ld, %#lx )", SARG1, ARG2);
8456 PRE_REG_READ2(long, "clock_settime", vki_clockid_t, clock_id,
8457 const struct timespec *, tp);
8458 PRE_MEM_READ("clock_settime(tp)", ARG2, sizeof(struct vki_timespec));
8461 PRE(sys_clock_getres)
8463 /* int clock_getres(clockid_t clock_id, struct timespec *res); */
8464 PRINT("sys_clock_getres ( %ld, %#lx )", SARG1, ARG2);
8465 PRE_REG_READ2(long, "clock_getres", vki_clockid_t, clock_id,
8466 struct timespec *, res);
8468 if (ARG2)
8469 PRE_MEM_WRITE("clock_getres(res)", ARG2, sizeof(struct vki_timespec));
8472 POST(sys_clock_getres)
8474 if (ARG2)
8475 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
8478 PRE(sys_timer_create)
8480 /* int timer_create(clockid_t clock_id,
8481 struct sigevent *evp, timer_t *timerid);
8483 PRINT("sys_timer_create ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
8484 PRE_REG_READ3(long, "timer_create", vki_clockid_t, clock_id,
8485 struct vki_sigevent *, evp, vki_timer_t *, timerid);
8487 if (ARG2) {
8488 struct vki_sigevent *evp = (struct vki_sigevent *) ARG2;
8489 PRE_FIELD_READ("timer_create(evp.sigev_notify)", evp->sigev_notify);
8490 PRE_FIELD_READ("timer_create(evp.sigev_signo)", evp->sigev_signo);
8491 PRE_FIELD_READ("timer_create(evp.sigev_value.sival_int)",
8492 evp->sigev_value.sival_int);
8494 /* Be safe. */
8495 if (ML_(safe_to_deref(evp, sizeof(struct vki_sigevent)))) {
8496 if ((evp->sigev_notify == VKI_SIGEV_PORT) ||
8497 (evp->sigev_notify == VKI_SIGEV_THREAD))
8498 PRE_MEM_READ("timer_create(evp.sigev_value.sival_ptr)",
8499 (Addr) evp->sigev_value.sival_ptr,
8500 sizeof(vki_port_notify_t));
8504 PRE_MEM_WRITE("timer_create(timerid)", ARG3, sizeof(vki_timer_t));
8507 POST(sys_timer_create)
8509 POST_MEM_WRITE(ARG3, sizeof(vki_timer_t));
8512 PRE(sys_timer_delete)
8514 /* int timer_delete(timer_t timerid); */
8515 PRINT("sys_timer_delete ( %ld )", SARG1);
8516 PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
8519 PRE(sys_timer_settime)
8521 /* int timer_settime(timer_t timerid, int flags,
8522 const struct itimerspec *value,
8523 struct itimerspec *ovalue);
8525 PRINT("sys_timer_settime ( %ld, %ld, %#lx, %#lx )",
8526 SARG1, SARG2, ARG3, ARG4);
8527 PRE_REG_READ4(long, "timer_settime", vki_timer_t, timerid,
8528 int, flags, const struct vki_itimerspec *, value,
8529 struct vki_itimerspec *, ovalue);
8530 PRE_MEM_READ("timer_settime(value)",
8531 ARG3, sizeof(struct vki_itimerspec));
8532 if (ARG4)
8533 PRE_MEM_WRITE("timer_settime(ovalue)",
8534 ARG4, sizeof(struct vki_itimerspec));
8537 POST(sys_timer_settime)
8539 if (ARG4)
8540 POST_MEM_WRITE(ARG4, sizeof(struct vki_itimerspec));
8543 PRE(sys_timer_gettime)
8545 /* int timer_gettime(timer_t timerid, struct itimerspec *value); */
8546 PRINT("sys_timer_gettime ( %ld, %#lx )", SARG1, ARG2);
8547 PRE_REG_READ2(long, "timer_gettime", vki_timer_t, timerid,
8548 struct vki_itimerspec *, value);
8549 PRE_MEM_WRITE("timer_gettime(value)",
8550 ARG2, sizeof(struct vki_itimerspec));
8553 POST(sys_timer_gettime)
8555 POST_MEM_WRITE(ARG2, sizeof(struct vki_itimerspec));
8558 PRE(sys_timer_getoverrun)
8560 /* int timer_getoverrun(timer_t timerid); */
8561 PRINT("sys_timer_getoverrun ( %ld )", SARG1);
8562 PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
8565 PRE(sys_facl)
8567 /* int facl(int fildes, int cmd, int nentries, void *aclbufp); */
8568 PRINT("sys_facl ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
8570 PRE_REG_READ4(long, "facl", int, fildes, int, cmd,
8571 int, nentries, void *, aclbufp);
8573 switch (ARG2 /*cmd*/) {
8574 case VKI_SETACL:
8575 if (ARG4)
8576 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_aclent_t));
8577 break;
8578 case VKI_GETACL:
8579 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8580 break;
8581 case VKI_GETACLCNT:
8582 break;
8583 case VKI_ACE_SETACL:
8584 if (ARG4)
8585 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_ace_t));
8586 break;
8587 case VKI_ACE_GETACL:
8588 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8589 break;
8590 case VKI_ACE_GETACLCNT:
8591 break;
8592 default:
8593 VG_(unimplemented)("Syswrap of the facl call with cmd %ld.", SARG2);
8594 /*NOTREACHED*/
8595 break;
8598 /* Be strict. */
8599 if (!ML_(fd_allowed)(ARG1, "facl", tid, False))
8600 SET_STATUS_Failure(VKI_EBADF);
8603 POST(sys_facl)
8605 switch (ARG2 /*cmd*/) {
8606 case VKI_SETACL:
8607 break;
8608 case VKI_GETACL:
8609 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8610 break;
8611 case VKI_GETACLCNT:
8612 break;
8613 case VKI_ACE_SETACL:
8614 break;
8615 case VKI_ACE_GETACL:
8616 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8617 break;
8618 case VKI_ACE_GETACLCNT:
8619 break;
8620 default:
8621 vg_assert(0);
8622 break;
8626 static Int pre_check_and_close_fds(ThreadId tid, const HChar *name,
8627 vki_door_desc_t *desc_ptr,
8628 vki_uint_t desc_num)
8630 vki_uint_t i;
8632 /* Verify passed file descriptors. */
8633 for (i = 0; i < desc_num; i++) {
8634 vki_door_desc_t *desc = &desc_ptr[i];
8635 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8636 (desc->d_attributes & DOOR_RELEASE)) {
8637 Int fd = desc->d_data.d_desc.d_descriptor;
8639 /* Detect and negate attempts by the client to close Valgrind's fds.
8640 Also if doing -d style logging (which is to fd = 2 = stderr),
8641 don't allow that to be closed either. */
8642 if (!ML_(fd_allowed)(fd, name, tid, False) ||
8643 (fd == 2 && VG_(debugLog_getLevel)() > 0))
8644 return VKI_EBADF;
8648 /* All fds are allowed, record information about the closed ones.
8650 Note: Recording information about any closed fds should generally happen
8651 in a post wrapper but it is not possible in this case because door calls
8652 are "very blocking", if the information was recorded after the syscall
8653 finishes then it would be out-of-date during the call, i.e. while the
8654 syscall is blocked in the kernel. Therefore, we record closed fds for
8655 this specific syscall in the PRE wrapper. Unfortunately, this creates
8656 a problem when the syscall fails, for example, door_call() can fail with
8657 EBADF or EFAULT and then no fds are released. If that happens the
8658 information about opened fds is incorrect. This should be very rare (I
8659 hope) and such a condition is also reported in the post wrapper. */
8660 if (VG_(clo_track_fds)) {
8661 for (i = 0; i < desc_num; i++) {
8662 vki_door_desc_t *desc = &desc_ptr[i];
8663 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8664 (desc->d_attributes & DOOR_RELEASE)) {
8665 Int fd = desc->d_data.d_desc.d_descriptor;
8666 ML_(record_fd_close)(fd);
8671 return 0;
8674 static void post_record_fds(ThreadId tid, const HChar *name,
8675 vki_door_desc_t *desc_ptr, vki_uint_t desc_num)
8677 vki_uint_t i;
8679 /* Record returned file descriptors. */
8680 for (i = 0; i < desc_num; i++) {
8681 vki_door_desc_t *desc = &desc_ptr[i];
8682 if (desc->d_attributes & DOOR_DESCRIPTOR) {
8683 Int fd = desc->d_data.d_desc.d_descriptor;
8684 if (!ML_(fd_allowed)(fd, name, tid, True)) {
8685 /* Unfortunately, we cannot recover at this point and have to fail
8686 hard. */
8687 VG_(message)(Vg_UserMsg, "The %s syscall returned an unallowed"
8688 "file descriptor %d.\n", name, fd);
8689 VG_(exit)(101);
8691 else if (VG_(clo_track_fds))
8692 ML_(record_fd_open_named)(tid, fd);
8697 /* Handles repository door protocol request over client door fd. */
8698 static void repository_door_pre_mem_door_call_hook(ThreadId tid, Int fd,
8699 void *data_ptr,
8700 SizeT data_size)
8702 vki_rep_protocol_request_t *p = (vki_rep_protocol_request_t *) data_ptr;
8703 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8704 "request->rpr_request)", p->rpr_request);
8706 if (ML_(safe_to_deref)(p, sizeof(vki_rep_protocol_request_t))) {
8707 switch (p->rpr_request) {
8708 case VKI_REP_PROTOCOL_CLOSE:
8709 break;
8710 case VKI_REP_PROTOCOL_ENTITY_SETUP:
8712 struct vki_rep_protocol_entity_setup *r =
8713 (struct vki_rep_protocol_entity_setup *) p;
8714 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8715 "entity_setup->rpr_entityid)", r->rpr_entityid);
8716 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8717 "entity_setup->rpr_entitytype)", r->rpr_entitytype);
8719 break;
8720 case VKI_REP_PROTOCOL_ENTITY_NAME:
8722 struct vki_rep_protocol_entity_name *r =
8723 (struct vki_rep_protocol_entity_name *) p;
8724 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8725 "entity_name->rpr_entityid)", r->rpr_entityid);
8726 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8727 "entity_name->rpr_answertype)", r->rpr_answertype);
8729 break;
8730 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 24) && (SOLARIS_REPCACHE_PROTOCOL_VERSION <= 30)
8731 case VKI_REP_PROTOCOL_ENTITY_FMRI:
8733 struct vki_rep_protocol_entity_fmri *r =
8734 (struct vki_rep_protocol_entity_fmri *) p;
8735 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8736 "entity_fmri->rpr_entityid)", r->rpr_entityid);
8738 break;
8739 #endif /* 24 <= SOLARIS_REPCACHE_PROTOCOL_VERSION =< 30 */
8740 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25)
8741 case VKI_REP_PROTOCOL_ENTITY_GET_ROOT:
8743 struct vki_rep_protocol_entity_root *r =
8744 (struct vki_rep_protocol_entity_root *) p;
8745 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8746 "entity_root->rpr_entityid)", r->rpr_entityid);
8747 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8748 "entity_root->rpr_outid)", r->rpr_outid);
8750 break;
8751 #endif /* SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25 */
8752 case VKI_REP_PROTOCOL_ENTITY_GET:
8754 struct vki_rep_protocol_entity_get *r =
8755 (struct vki_rep_protocol_entity_get *) p;
8756 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8757 "entity_get->rpr_entityid)", r->rpr_entityid);
8758 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8759 "entity_get->rpr_object)", r->rpr_object);
8761 break;
8762 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD:
8763 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 31)
8764 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD_COMPOSED:
8765 #endif
8767 struct vki_rep_protocol_entity_get_child *r =
8768 (struct vki_rep_protocol_entity_get_child *) p;
8769 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8770 "entity_get_child->rpr_entityid)", r->rpr_entityid);
8771 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8772 "entity_get_child->rpr_childid)", r->rpr_childid);
8773 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8774 "entity_get_child->rpr_name)", (Addr) r->rpr_name);
8776 break;
8777 case VKI_REP_PROTOCOL_ENTITY_GET_PARENT:
8779 struct vki_rep_protocol_entity_parent *r =
8780 (struct vki_rep_protocol_entity_parent *) p;
8781 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8782 "entity_get_parent->rpr_entityid)", r->rpr_entityid);
8783 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8784 "entity_get_parent->rpr_outid)", r->rpr_outid);
8786 break;
8787 case VKI_REP_PROTOCOL_ENTITY_RESET:
8789 struct vki_rep_protocol_entity_reset *r =
8790 (struct vki_rep_protocol_entity_reset *) p;
8791 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8792 "entity_reset->rpr_entityid)", r->rpr_entityid);
8794 break;
8795 case VKI_REP_PROTOCOL_ENTITY_TEARDOWN:
8797 struct vki_rep_protocol_entity_teardown *r =
8798 (struct vki_rep_protocol_entity_teardown *) p;
8799 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8800 "entity_teardown->rpr_entityid)", r->rpr_entityid);
8802 break;
8803 case VKI_REP_PROTOCOL_ITER_READ:
8805 struct vki_rep_protocol_iter_read *r =
8806 (struct vki_rep_protocol_iter_read *) p;
8807 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8808 "iter_read->rpr_iterid)", r->rpr_iterid);
8809 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8810 "iter_read->rpr_sequence)", r->rpr_sequence);
8811 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8812 "iter_read->rpr_entityid)", r->rpr_entityid);
8814 break;
8815 case VKI_REP_PROTOCOL_ITER_READ_VALUE:
8817 struct vki_rep_protocol_iter_read_value *r =
8818 (struct vki_rep_protocol_iter_read_value *) p;
8819 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8820 "iter_read_value->rpr_iterid)", r->rpr_iterid);
8821 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8822 "iter_read_value->rpr_sequence)", r->rpr_sequence);
8824 break;
8825 case VKI_REP_PROTOCOL_ITER_RESET:
8826 case VKI_REP_PROTOCOL_ITER_SETUP:
8827 case VKI_REP_PROTOCOL_ITER_TEARDOWN:
8829 struct vki_rep_protocol_iter_request *r =
8830 (struct vki_rep_protocol_iter_request *) p;
8831 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8832 "iter_request->rpr_iterid)", r->rpr_iterid);
8834 break;
8835 case VKI_REP_PROTOCOL_ITER_START:
8837 struct vki_rep_protocol_iter_start *r =
8838 (struct vki_rep_protocol_iter_start *) p;
8839 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8840 "iter_start->rpr_iterid)", r->rpr_iterid);
8841 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8842 "iter_start->rpr_entity)", r->rpr_entity);
8843 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8844 "iter_start->rpr_itertype)", r->rpr_itertype);
8845 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8846 "iter_start->rpr_flags)", r->rpr_flags);
8847 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8848 "iter_start->rpr_pattern)", (Addr) r->rpr_pattern);
8850 break;
8851 case VKI_REP_PROTOCOL_PROPERTY_GET_TYPE:
8852 case VKI_REP_PROTOCOL_PROPERTY_GET_VALUE:
8854 struct vki_rep_protocol_property_request *r =
8855 (struct vki_rep_protocol_property_request *) p;
8856 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8857 "property_request->rpr_entityid)", r->rpr_entityid);
8859 break;
8860 default:
8861 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8862 " where rpr_request=%#x.", p->rpr_request);
8863 /* NOTREACHED */
8864 break;
8869 /* Handles repository door protocol response over client door fd. */
8870 static void repository_door_post_mem_door_call_hook(ThreadId tid, Int fd,
8871 void *rbuf, SizeT rsize)
8873 /* :TODO: Ideally we would need to match the response type with the
8874 previous request because response itself does not contain any
8875 type identification.
8876 For now simply make defined whole response buffer. */
8877 POST_MEM_WRITE((Addr) rbuf, rsize);
8880 /* Pre-syscall checks for params->data_ptr contents of a door_call(). */
8881 static void door_call_pre_mem_params_data(ThreadId tid, Int fd,
8882 void *data_ptr, SizeT data_size)
8884 const HChar *pathname;
8886 /* Get pathname of the door file descriptor, if not already done.
8887 Needed to dissect door service on the pathname below. */
8888 if (!VG_(clo_track_fds) && !ML_(fd_recorded)(fd)) {
8889 ML_(record_fd_open_named)(tid, fd);
8891 pathname = ML_(find_fd_recorded_by_fd)(fd);
8893 /* Debug-only printing. */
8894 if (0) {
8895 VG_(printf)("PRE(door_call) with fd=%d and filename=%s\n",
8896 fd, pathname);
8899 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8900 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
8902 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8903 "kcf_door_arg_t->da_version)", p->da_version);
8904 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8905 "kcf_door_arg_t->da_iskernel)", p->da_iskernel);
8906 PRE_MEM_RASCIIZ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8907 "kcf_door_arg_t->da_u.filename)",
8908 (Addr) p->vki_da_u.filename);
8909 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8910 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
8912 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8913 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
8914 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8915 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8916 /* request from an application towards nscd */
8917 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8918 "nss_pheader->p_version)", p->p_version);
8919 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8920 "nss_pheader->dbd_off)", p->dbd_off);
8921 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8922 "nss_pheader->dbd_len)", p->dbd_len);
8923 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8924 "nss_pheader->key_off)", p->key_off);
8925 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8926 "nss_pheader->key_len)", p->key_len);
8927 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8928 "nss_pheader->data_off)", p->data_off);
8929 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8930 "nss_pheader->data_len)", p->data_len);
8931 /* Fields ext_off and ext_len are set only sporadically. */
8932 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8933 "nss_pheader->pbufsiz)", p->pbufsiz);
8934 PRE_MEM_WRITE("door_call(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
8935 (Addr) p, p->pbufsiz);
8937 if (p->dbd_len > 0) {
8938 vki_nss_dbd_t *dbd
8939 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
8941 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR
8942 "\", nss_dbd)", (Addr) dbd, sizeof(vki_nss_dbd_t));
8943 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
8944 if (dbd->o_name != 0)
8945 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8946 "\", nss_dbd->o_name)", (Addr) ((HChar *) p
8947 + p->dbd_off + dbd->o_name));
8948 if (dbd->o_config_name != 0)
8949 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8950 "\", nss_dbd->o_config_name)",
8951 (Addr) ((HChar *) p + p->dbd_off
8952 + dbd->o_config_name));
8953 if (dbd->o_default_config != 0)
8954 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8955 "\", nss_dbd->o_default_config)",
8956 (Addr) ((HChar *) p + p->dbd_off +
8957 dbd->o_default_config));
8961 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", nss->key)",
8962 (Addr) ((HChar *) p + p->key_off), p->key_len);
8963 } else {
8964 /* request from a child nscd towards parent nscd */
8965 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8968 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8969 vki_repository_door_request_t *p =
8970 (vki_repository_door_request_t *) data_ptr;
8972 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8973 "request->rdr_version)", p->rdr_version);
8974 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8975 "request->rdr_request)", p->rdr_request);
8976 if (ML_(safe_to_deref)(p, sizeof(vki_repository_door_request_t))) {
8977 if (p->rdr_version == VKI_REPOSITORY_DOOR_VERSION) {
8978 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8979 "request->rdr_flags)", p->rdr_flags);
8980 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8981 "request->rdr_debug)", p->rdr_debug);
8982 } else {
8983 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8984 " where version=%u.", p->rdr_version);
8987 } else {
8988 const OpenDoor *open_door = door_find_by_fd(fd);
8989 if ((open_door != NULL) && (open_door->pre_mem_hook != NULL)) {
8990 open_door->pre_mem_hook(tid, fd, data_ptr, data_size);
8991 } else {
8992 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
8994 * Be very lax about door syscall handling over unrecognized
8995 * door file descriptors. Does not require that full buffer
8996 * is initialized when writing. Without this, programs using
8997 * libdoor(3LIB) functionality with completely proprietary
8998 * semantics may report large number of false positives.
9000 } else {
9001 static Int moans = 3;
9003 /* generic default */
9004 if (moans > 0 && !VG_(clo_xml)) {
9005 moans--;
9006 VG_(umsg)(
9007 "Warning: noted and generically handled door call\n"
9008 " on file descriptor %d (filename: %s).\n"
9009 " This could cause spurious value errors to appear.\n"
9010 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
9011 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
9012 fd, pathname);
9014 PRE_MEM_READ("door_call(params->data_ptr)",
9015 (Addr) data_ptr, data_size);
9021 /* Post-syscall checks for params->rbuf contents of a door_call(). */
9022 static void door_call_post_mem_params_rbuf(ThreadId tid, Int fd,
9023 void *rbuf, SizeT rsize,
9024 const vki_door_desc_t *desc_ptr,
9025 vki_uint_t desc_num)
9027 const HChar *pathname = ML_(find_fd_recorded_by_fd)(fd);
9029 /* Debug-only printing. */
9030 if (0) {
9031 VG_(printf)("POST(door_call) with fd=%d and filename=%s\n",
9032 fd, pathname);
9035 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9036 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) rbuf;
9038 POST_FIELD_WRITE(p->da_version);
9039 POST_FIELD_WRITE(p->vki_da_u.result.status);
9040 POST_MEM_WRITE((Addr) p->vki_da_u.result.signature,
9041 p->vki_da_u.result.siglen);
9042 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9043 vki_nss_pheader_t *p = (vki_nss_pheader_t *) rbuf;
9045 POST_FIELD_WRITE(p->nsc_callnumber);
9046 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9047 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9048 /* response from nscd to an application */
9049 POST_FIELD_WRITE(p->p_status);
9050 POST_FIELD_WRITE(p->p_errno);
9051 POST_FIELD_WRITE(p->p_herrno);
9052 POST_FIELD_WRITE(p->dbd_off);
9053 POST_FIELD_WRITE(p->dbd_len);
9054 POST_FIELD_WRITE(p->key_off);
9055 POST_FIELD_WRITE(p->key_len);
9056 POST_FIELD_WRITE(p->data_off);
9057 POST_FIELD_WRITE(p->data_len);
9058 POST_FIELD_WRITE(p->ext_off);
9059 POST_FIELD_WRITE(p->ext_len);
9060 POST_FIELD_WRITE(p->pbufsiz);
9062 if (p->pbufsiz <= rsize) {
9063 if (p->dbd_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9064 SizeT len = MIN(p->dbd_len, p->pbufsiz - p->dbd_off);
9065 POST_MEM_WRITE((Addr) ((HChar *) p + p->dbd_off), len);
9068 if (p->key_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9069 SizeT len = MIN(p->key_len, p->pbufsiz - p->key_off);
9070 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), len);
9073 if (p->data_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9074 SizeT len = MIN(p->data_len, p->pbufsiz - p->data_off);
9075 POST_MEM_WRITE((Addr) ((HChar *) p + p->data_off), len);
9078 if (p->ext_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
9079 SizeT len = MIN(p->ext_len, p->pbufsiz - p->ext_off);
9080 POST_MEM_WRITE((Addr) ((HChar *) p + p->ext_off), len);
9083 } else {
9084 /* response from parent nscd to a child nscd */
9085 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9088 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9089 POST_FIELD_WRITE(((vki_repository_door_response_t *) rbuf)->rdr_status);
9090 /* A new client door fd is passed over the global repository door. */
9091 if ((desc_ptr != NULL) && (desc_num > 0)) {
9092 if (desc_ptr[0].d_attributes & DOOR_DESCRIPTOR) {
9093 door_record_client(tid, desc_ptr[0].d_data.d_desc.d_descriptor,
9094 repository_door_pre_mem_door_call_hook,
9095 repository_door_post_mem_door_call_hook);
9098 } else {
9099 const OpenDoor *open_door = door_find_by_fd(fd);
9100 if ((open_door != NULL) && (open_door->post_mem_hook != NULL)) {
9101 open_door->post_mem_hook(tid, fd, rbuf, rsize);
9102 } else {
9103 /* generic default */
9104 POST_MEM_WRITE((Addr) rbuf, rsize);
9109 /* Pre-syscall checks for data_ptr contents in a door_return(). */
9110 static void door_return_pre_mem_data(ThreadId tid, Addr server_procedure,
9111 void *data_ptr, SizeT data_size)
9113 if ((data_size == 0) || (server_procedure == 0)) {
9114 /* There is nothing to check. This usually happens during thread's
9115 first call to door_return(). */
9116 return;
9119 /* Get pathname of the door file descriptor based on the
9120 door server procedure (that's all we have).
9121 Needed to dissect door service on the pathname below. */
9122 const OpenDoor *open_door = door_find_by_proc(server_procedure);
9123 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
9124 Int fd = (open_door != NULL) ? open_door->fd : -1;
9126 /* Debug-only printing. */
9127 if (0) {
9128 VG_(printf)("PRE(door_return) with fd=%d and filename=%s "
9129 "(nr_doors_recorded=%u)\n",
9130 fd, pathname, nr_doors_recorded);
9133 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9134 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
9136 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9137 "kcf_door_arg_t->da_version)", p->da_version);
9138 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9139 "kcf_door_arg_t->da_u.result.status)",
9140 p->vki_da_u.result.status);
9141 PRE_MEM_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
9142 "kcf_door_arg_t->da_u.result.signature)",
9143 (Addr) p->vki_da_u.result.signature,
9144 p->vki_da_u.result.siglen);
9145 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9146 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
9148 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9149 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
9150 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9151 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9152 /* response from nscd to an application */
9153 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9154 "nss_pheader->p_status)", p->p_status);
9155 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9156 "nss_pheader->p_errno)", p->p_errno);
9157 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9158 "nss_pheader->p_herrno)", p->p_herrno);
9159 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9160 "nss_pheader->dbd_off)", p->dbd_off);
9161 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9162 "nss_pheader->dbd_len)", p->dbd_len);
9163 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9164 "nss_pheader->data_off)", p->data_off);
9165 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9166 "nss_pheader->data_len)", p->data_len);
9167 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9168 "nss_pheader->ext_off)", p->ext_off);
9169 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9170 "nss_pheader->ext_len)", p->ext_len);
9171 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
9172 "nss_pheader->pbufsiz)", p->pbufsiz);
9173 PRE_MEM_WRITE("door_return(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
9174 (Addr) p, p->pbufsiz);
9175 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
9176 "\", nss->data)",
9177 (Addr) ((HChar *) p + p->data_off), p->data_len);
9178 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
9179 "\", nss->ext)",
9180 (Addr) ((HChar *) p + p->ext_off), p->ext_len);
9181 } else {
9182 /* response from parent nscd to a child nscd */
9183 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9186 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9187 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
9188 } else {
9189 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
9191 * Be very lax about door syscall handling over unrecognized
9192 * door file descriptors. Does not require that full buffer
9193 * is initialized when writing. Without this, programs using
9194 * libdoor(3LIB) functionality with completely proprietary
9195 * semantics may report large number of false positives.
9197 } else {
9198 static Int moans = 3;
9200 /* generic default */
9201 if (moans > 0 && !VG_(clo_xml)) {
9202 moans--;
9203 VG_(umsg)(
9204 "Warning: noted and generically handled door return\n"
9205 " on file descriptor %d (filename: %s).\n"
9206 " This could cause spurious value errors to appear.\n"
9207 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
9208 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
9209 fd, pathname);
9211 PRE_MEM_READ("door_return(data_ptr)",
9212 (Addr) data_ptr, data_size);
9217 /* Post-syscall checks for data_ptr contents in a door_return(). */
9218 static void door_return_post_mem_data(ThreadId tid, Addr server_procedure,
9219 void *data_ptr, SizeT data_size)
9221 const OpenDoor *open_door = door_find_by_proc(server_procedure);
9222 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
9224 /* Debug-only printing. */
9225 if (0) {
9226 Int fd = (open_door != NULL) ? open_door->fd : -1;
9227 VG_(printf)("POST(door_return) with fd=%d and filename=%s "
9228 "(nr_doors_recorded=%u)\n",
9229 fd, pathname, nr_doors_recorded);
9232 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
9233 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
9235 POST_FIELD_WRITE(p->da_version);
9236 POST_FIELD_WRITE(p->da_iskernel);
9237 POST_MEM_WRITE((Addr) p->vki_da_u.filename,
9238 VG_(strlen)(p->vki_da_u.filename) + 1);
9239 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
9240 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
9242 POST_FIELD_WRITE(p->nsc_callnumber);
9243 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
9244 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
9245 /* request from an application towards nscd */
9246 POST_FIELD_WRITE(p->p_version);
9247 POST_FIELD_WRITE(p->dbd_off);
9248 POST_FIELD_WRITE(p->dbd_len);
9249 POST_FIELD_WRITE(p->key_off);
9250 POST_FIELD_WRITE(p->key_len);
9251 POST_FIELD_WRITE(p->data_off);
9252 POST_FIELD_WRITE(p->data_len);
9253 POST_FIELD_WRITE(p->ext_off);
9254 POST_FIELD_WRITE(p->ext_len);
9255 POST_FIELD_WRITE(p->pbufsiz);
9257 if (p->dbd_len > 0) {
9258 vki_nss_dbd_t *dbd
9259 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
9261 POST_MEM_WRITE((Addr) dbd, sizeof(vki_nss_dbd_t));
9262 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
9263 SizeT headers_size = sizeof(vki_nss_pheader_t)
9264 + sizeof(vki_nss_dbd_t);
9266 if (dbd->o_name != 0) {
9267 HChar *name = (HChar *) p + p->dbd_off + dbd->o_name;
9268 SizeT name_len = VG_(strlen)(name) + 1;
9269 if (name_len <= data_size - headers_size)
9270 POST_MEM_WRITE((Addr) name, name_len);
9272 if (dbd->o_config_name != 0) {
9273 HChar *name = (HChar *) p + p->dbd_off + dbd->o_config_name;
9274 SizeT name_len = VG_(strlen)(name) + 1;
9275 if (name_len <= data_size - headers_size)
9276 POST_MEM_WRITE((Addr) name, name_len);
9278 if (dbd->o_default_config != 0) {
9279 HChar *name = (HChar *) p + p->dbd_off
9280 + dbd->o_default_config;
9281 SizeT name_len = VG_(strlen)(name) + 1;
9282 if (name_len <= data_size - headers_size)
9283 POST_MEM_WRITE((Addr) name, name_len);
9288 if (p->key_len <= data_size - p->key_off)
9289 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), p->key_len);
9290 } else {
9291 /* request from a child nscd towards parent nscd */
9292 VG_(unimplemented)("Door wrapper of child/parent nscd.");
9295 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
9296 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
9297 } else {
9298 /* generic default */
9299 POST_MEM_WRITE((Addr) data_ptr, data_size);
9303 PRE(sys_door)
9305 /* int doorfs(long arg1, long arg2, long arg3, long arg4, long arg5,
9306 long subcode); */
9307 ThreadState *tst = VG_(get_ThreadState)(tid);
9308 *flags |= SfMayBlock | SfPostOnFail;
9310 PRINT("sys_door ( %#lx, %#lx, %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3,
9311 ARG4, ARG5, SARG6);
9313 /* Macro PRE_REG_READ6 cannot be simply used because not all ARGs are used
9314 in door() syscall variants. Note that ARG6 (subcode) is used always. */
9315 #define PRE_REG_READ_SIXTH_ONLY \
9316 if (VG_(tdict).track_pre_reg_read) { \
9317 PRA6("door", long, subcode); \
9320 switch (ARG6 /*subcode*/) {
9321 case VKI_DOOR_CREATE:
9322 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9323 PRE_REG_READ_SIXTH_ONLY;
9324 /* Note: the first argument to DOOR_CREATE is a server procedure.
9325 This could lead to a problem if the kernel tries to force the
9326 execution of this procedure, similarly to how signal handlers are
9327 executed. Fortunately, the kernel never does that (for user-space
9328 server procedures). The procedure is always executed by the standard
9329 library. */
9330 break;
9331 case VKI_DOOR_REVOKE:
9332 PRE_REG_READ1(long, "door", long, arg1);
9333 PRE_REG_READ_SIXTH_ONLY;
9334 if (!ML_(fd_allowed)(ARG1, "door_revoke", tid, False))
9335 SET_STATUS_Failure(VKI_EBADF);
9336 break;
9337 case VKI_DOOR_INFO:
9338 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
9339 PRE_REG_READ_SIXTH_ONLY;
9340 PRE_MEM_WRITE("door_info(info)", ARG2, sizeof(vki_door_info_t));
9341 break;
9342 case VKI_DOOR_CALL:
9344 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
9345 PRE_REG_READ_SIXTH_ONLY;
9347 Int rval = 0;
9348 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9350 if (!ML_(fd_allowed)(ARG1, "door_call", tid, False))
9351 rval = VKI_EBADF;
9353 PRE_FIELD_READ("door_call(params->data_ptr)", params->data_ptr);
9354 PRE_FIELD_READ("door_call(params->data_size)", params->data_size);
9355 PRE_FIELD_READ("door_call(params->desc_ptr)", params->desc_ptr);
9356 PRE_FIELD_READ("door_call(params->desc_num)", params->desc_num);
9357 PRE_FIELD_READ("door_call(params->rbuf)", params->rbuf);
9358 PRE_FIELD_READ("door_call(params->rsize)", params->rsize);
9360 if (ML_(safe_to_deref)(params, sizeof(*params))) {
9361 if (params->data_ptr)
9362 door_call_pre_mem_params_data(tid, ARG1, params->data_ptr,
9363 params->data_size);
9365 if (params->desc_ptr) {
9366 SizeT desc_size = params->desc_num * sizeof(*params->desc_ptr);
9367 PRE_MEM_READ("door_call(params->desc_ptr)",
9368 (Addr)params->desc_ptr, desc_size);
9370 /* Do not record information about closed fds if we are going
9371 to fail the syscall and so no fds will be closed. */
9372 if ((rval == 0) &&
9373 (ML_(safe_to_deref)(params->desc_ptr, desc_size))) {
9374 rval = pre_check_and_close_fds(tid, "door_call",
9375 params->desc_ptr,
9376 params->desc_num);
9380 if (params->rbuf)
9381 PRE_MEM_WRITE("door_call(params->rbuf)", (Addr)params->rbuf,
9382 params->rsize);
9385 if (rval)
9386 SET_STATUS_Failure(rval);
9388 break;
9389 case VKI_DOOR_BIND:
9390 PRE_REG_READ1(long, "door", long, arg1);
9391 PRE_REG_READ_SIXTH_ONLY;
9392 VG_(unimplemented)("DOOR_BIND");
9393 break;
9394 case VKI_DOOR_UNBIND:
9395 PRE_REG_READ0(long, "door");
9396 PRE_REG_READ_SIXTH_ONLY;
9397 VG_(unimplemented)("DOOR_UNBIND");
9398 break;
9399 case VKI_DOOR_UNREFSYS:
9400 PRE_REG_READ0(long, "door");
9401 PRE_REG_READ_SIXTH_ONLY;
9402 VG_(unimplemented)("DOOR_UNREFSYS");
9403 break;
9404 case VKI_DOOR_UCRED:
9405 PRE_REG_READ1(long, "door", long, arg1);
9406 PRE_REG_READ_SIXTH_ONLY;
9407 VG_(unimplemented)("DOOR_UCRED");
9408 break;
9409 case VKI_DOOR_RETURN:
9410 PRE_REG_READ6(long, "door", long, arg1, long, arg2, long, arg3,
9411 long, arg4, long, arg5, long, subcode);
9413 /* Register %esp/%rsp is read and modified by the syscall. */
9414 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(sp)",
9415 VG_O_STACK_PTR, sizeof(UWord));
9416 /* Register %ebp/%rbp is not really read by the syscall, it is only
9417 written by it, but it is hard to determine when it is written so we
9418 make sure it is always valid prior to making the syscall. */
9419 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(bp)",
9420 VG_O_FRAME_PTR, sizeof(UWord));
9422 door_return_pre_mem_data(tid, tst->os_state.door_return_procedure,
9423 (void *) ARG1, ARG2);
9425 /* Do not tell the tool where the syscall is going to write the
9426 resulting data. It is necessary to skip this check because the data
9427 area starting at ARG4-ARG5 (of length ARG5) is usually on a client
9428 thread stack below the stack pointer and therefore it can be marked
9429 by a tool (for example, Memcheck) as inaccessible. It is ok to skip
9430 this check in this case because if there is something wrong with the
9431 data area then the syscall will fail or the error will be handled by
9432 POST_MEM_WRITE() in the post wrapper. */
9433 /*PRE_MEM_WRITE("door_return(sp)", ARG4 - ARG5, ARG5);*/
9435 if (ARG3) {
9436 vki_door_return_desc_t *desc_env = (vki_door_return_desc_t*)ARG3;
9438 PRE_MEM_READ("door_return(desc_env)", ARG3,
9439 sizeof(vki_door_return_desc_t));
9441 if (ML_(safe_to_deref)(desc_env, sizeof(*desc_env)) &&
9442 desc_env->desc_ptr) {
9443 Int rval;
9445 PRE_MEM_READ("door_return(desc_env->desc_ptr)",
9446 (Addr)desc_env->desc_ptr,
9447 desc_env->desc_num * sizeof(*desc_env->desc_ptr));
9449 rval = pre_check_and_close_fds(tid, "door_return",
9450 desc_env->desc_ptr,
9451 desc_env->desc_num);
9452 if (rval)
9453 SET_STATUS_Failure(rval);
9456 tst->os_state.in_door_return = True;
9457 tst->os_state.door_return_procedure = 0;
9458 break;
9459 case VKI_DOOR_GETPARAM:
9460 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9461 PRE_REG_READ_SIXTH_ONLY;
9462 VG_(unimplemented)("DOOR_GETPARAM");
9463 break;
9464 case VKI_DOOR_SETPARAM:
9465 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
9466 PRE_REG_READ_SIXTH_ONLY;
9467 if (!ML_(fd_allowed)(ARG1, "door_setparam", tid, False))
9468 SET_STATUS_Failure(VKI_EBADF);
9469 break;
9470 default:
9471 VG_(unimplemented)("Syswrap of the door call with subcode %ld.", SARG6);
9472 /*NOTREACHED*/
9473 break;
9476 #undef PRE_REG_READ_SIXTH_ONLY
9479 POST(sys_door)
9481 ThreadState *tst = VG_(get_ThreadState)(tid);
9483 vg_assert(SUCCESS || FAILURE);
9485 /* Alter the tst->os_state.in_door_return flag. */
9486 if (ARG6 == VKI_DOOR_RETURN) {
9487 vg_assert(tst->os_state.in_door_return == True);
9488 tst->os_state.in_door_return = False;
9490 /* Inform the tool that %esp/%rsp and %ebp/%rbp were (potentially)
9491 modified. */
9492 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_STACK_PTR,
9493 sizeof(UWord));
9494 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_FRAME_PTR,
9495 sizeof(UWord));
9497 else
9498 vg_assert(tst->os_state.in_door_return == False);
9500 if (FAILURE) {
9501 if (VG_(clo_track_fds)) {
9502 /* See the discussion in pre_check_and_close_fds() to understand this
9503 part. */
9504 Bool loss = False;
9505 switch (ARG6 /*subcode*/) {
9506 case VKI_DOOR_CALL:
9507 if (ERR == VKI_EFAULT || ERR == VKI_EBADF)
9508 loss = True;
9509 break;
9510 case VKI_DOOR_RETURN:
9511 if (ERR == VKI_EFAULT || ERR == VKI_EINVAL)
9512 loss = True;
9513 break;
9514 default:
9515 break;
9517 if (loss)
9518 VG_(message)(Vg_UserMsg, "The door call failed with an "
9519 "unexpected error and information "
9520 "about open file descriptors can be "
9521 "now imprecise.\n");
9524 return;
9527 vg_assert(SUCCESS);
9529 switch (ARG6 /*subcode*/) {
9530 case VKI_DOOR_CREATE:
9531 door_record_server(tid, ARG1, RES);
9532 break;
9533 case VKI_DOOR_REVOKE:
9534 door_record_revoke(tid, ARG1);
9535 if (VG_(clo_track_fds))
9536 ML_(record_fd_close)(ARG1);
9537 break;
9538 case VKI_DOOR_INFO:
9539 POST_MEM_WRITE(ARG2, sizeof(vki_door_info_t));
9540 break;
9541 case VKI_DOOR_CALL:
9543 /* Note that all returned values are stored in the rbuf, i.e.
9544 data_ptr and desc_ptr points into this buffer. */
9545 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9547 if (params->rbuf) {
9548 Addr addr = (Addr)params->rbuf;
9549 if (!VG_(am_find_anon_segment)(addr)) {
9550 /* This segment is new and was mapped by the kernel. */
9551 UInt prot, flags;
9552 SizeT size;
9554 prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
9555 flags = VKI_MAP_ANONYMOUS;
9556 size = VG_PGROUNDUP(params->rsize);
9558 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_door), "
9559 "new segment: vaddr=%#lx, size=%#lx, "
9560 "prot=%#x, flags=%#x, fd=%ld, offset=%#llx\n",
9561 addr, size, prot, flags, (UWord)-1, (ULong)0);
9563 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, flags,
9564 -1, 0);
9566 /* Note: We don't notify the debuginfo reader about this
9567 mapping because there is no debug information stored in
9568 this segment. */
9571 door_call_post_mem_params_rbuf(tid, ARG1, (void *) addr,
9572 params->rsize, params->desc_ptr,
9573 params->desc_num);
9576 if (params->desc_ptr) {
9577 POST_MEM_WRITE((Addr)params->desc_ptr,
9578 params->desc_num * sizeof(vki_door_desc_t));
9579 post_record_fds(tid, "door_call", params->desc_ptr,
9580 params->desc_num);
9583 break;
9584 case VKI_DOOR_BIND:
9585 break;
9586 case VKI_DOOR_UNBIND:
9587 break;
9588 case VKI_DOOR_UNREFSYS:
9589 break;
9590 case VKI_DOOR_UCRED:
9591 break;
9592 case VKI_DOOR_RETURN:
9594 struct vki_door_results *results
9595 = (struct vki_door_results*)VG_(get_SP)(tid);
9597 tst->os_state.door_return_procedure = (Addr)results->pc;
9599 POST_MEM_WRITE((Addr)results, sizeof(*results));
9600 if (results->data_ptr)
9601 door_return_post_mem_data(tid,
9602 tst->os_state.door_return_procedure,
9603 results->data_ptr,
9604 results->data_size);
9605 if (results->desc_ptr) {
9606 POST_MEM_WRITE((Addr)results->desc_ptr,
9607 results->desc_num * sizeof(vki_door_desc_t));
9608 post_record_fds(tid, "door_return", results->desc_ptr,
9609 results->desc_num);
9612 POST_MEM_WRITE((Addr)results->door_info,
9613 sizeof(*results->door_info));
9615 break;
9616 case VKI_DOOR_GETPARAM:
9617 break;
9618 case VKI_DOOR_SETPARAM:
9619 break;
9620 default:
9621 vg_assert(0);
9622 break;
9626 PRE(sys_schedctl)
9628 /* caddr_t schedctl(void); */
9629 /* This syscall returns an address that points to struct sc_shared.
9630 This per-thread structure is used as an interface between the libc and
9631 the kernel. */
9632 PRINT("sys_schedctl ( )");
9633 PRE_REG_READ0(long, "schedctl");
9636 POST(sys_schedctl)
9638 Addr a = RES;
9639 ThreadState *tst = VG_(get_ThreadState)(tid);
9641 /* Stay sane. */
9642 vg_assert((tst->os_state.schedctl_data == 0) ||
9643 (tst->os_state.schedctl_data == a));
9644 tst->os_state.schedctl_data = a;
9646 /* Returned address points to a block in a mapped page. */
9647 if (!VG_(am_find_anon_segment)(a)) {
9648 Addr page = VG_PGROUNDDN(a);
9649 UInt prot = VKI_PROT_READ | VKI_PROT_WRITE;
9650 # if defined(SOLARIS_SCHEDCTL_PAGE_EXEC)
9651 prot |= VKI_PROT_EXEC;
9652 # endif /* SOLARIS_SCHEDCTL_PAGE_EXEC */
9653 UInt flags = VKI_MAP_ANONYMOUS;
9654 /* The kernel always allocates one page for the sc_shared struct. */
9655 SizeT size = VKI_PAGE_SIZE;
9657 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_schedctl), new segment: "
9658 "vaddr=%#lx, size=%#lx, prot=%#x, flags=%#x, fd=-1, "
9659 "offset=0\n", page, size, prot, flags);
9661 /* The kernel always places redzone before and after the allocated page.
9662 Check this assertion now; the tool can later request to allocate
9663 a Valgrind segment and aspacemgr will place it adjacent. */
9664 const NSegment *seg = VG_(am_find_nsegment)(page - 1);
9665 vg_assert(seg == NULL || seg->kind == SkResvn);
9666 seg = VG_(am_find_nsegment)(page + VKI_PAGE_SIZE);
9667 vg_assert(seg == NULL || seg->kind == SkResvn);
9669 /* The address space manager works with whole pages. */
9670 VG_(am_notify_client_mmap)(page, size, prot, flags, -1, 0);
9672 /* Note: It isn't needed to notify debuginfo about the new mapping
9673 because it's only an anonymous mapping. */
9674 /* Note: schedctl data are cleaned in two places:
9675 - for the tool when the thread exits
9676 - for the core in child's post-fork handler clean_schedctl_data(). */
9679 /* The tool needs per-thread granularity, not whole pages. */
9680 VG_TRACK(new_mem_mmap, a, sizeof(struct vki_sc_shared), True, True, True, 0);
9681 POST_MEM_WRITE(a, sizeof(struct vki_sc_shared));
9684 PRE(sys_pset)
9686 /* Kernel: int pset(int subcode, long arg1, long arg2, long arg3,
9687 long arg4); */
9688 switch (ARG1 /* subcode */) {
9689 case VKI_PSET_CREATE:
9690 /* Libc: int pset_create(psetid_t *newpset); */
9691 PRINT("sys_pset ( %ld, %#lx )", SARG1, ARG2);
9692 PRE_REG_READ2(long, SC2("pset", "create"), int, subcode,
9693 vki_psetid_t *, newpset);
9694 PRE_MEM_WRITE("pset(newpset)", ARG2, sizeof(vki_psetid_t));
9695 break;
9696 case VKI_PSET_DESTROY:
9697 /* Libc: int pset_destroy(psetid_t pset); */
9698 PRINT("sys_pset ( %ld, %ld )", SARG1, SARG2);
9699 PRE_REG_READ2(long, SC2("pset", "destroy"), int, subcode,
9700 vki_psetid_t, pset);
9701 break;
9702 case VKI_PSET_ASSIGN:
9703 /* Libc: int pset_assign(psetid_t pset, processorid_t cpu,
9704 psetid_t *opset); */
9705 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9706 PRE_REG_READ4(long, SC2("pset", "assign"), int, subcode,
9707 vki_psetid_t, pset, vki_processorid_t, cpu,
9708 vki_psetid_t *, opset);
9709 if (ARG4 != 0)
9710 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9711 break;
9712 case VKI_PSET_INFO:
9713 /* Libc: int pset_info(psetid_t pset, int *type, uint_t *numcpus,
9714 processorid_t *cpulist); */
9715 PRINT("sys_pset ( %ld, %ld, %#lx, %#lx, %#lx )", SARG1, SARG2, ARG3,
9716 ARG4, ARG5);
9717 PRE_REG_READ5(long, SC2("pset", "info"), int, subcode, vki_psetid_t, pset,
9718 int *, type, vki_uint_t *, numcpus,
9719 vki_processorid_t *, cpulist);
9720 if (ARG3 != 0)
9721 PRE_MEM_WRITE("pset(type)", ARG3, sizeof(int));
9722 if (ARG4 != 0)
9723 PRE_MEM_WRITE("pset(numcpus)", ARG4, sizeof(vki_uint_t));
9724 if ((ARG4 != 0) && (ARG5 != 0)) {
9725 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9726 if (ML_(safe_to_deref(numcpus, sizeof(vki_uint_t)))) {
9727 PRE_MEM_WRITE("pset(cpulist)", ARG5,
9728 *numcpus * sizeof(vki_processorid_t));
9729 /* If cpulist buffer is not large enough, it will hold only as many
9730 entries as fit in the buffer. However numcpus will contain the
9731 real number of cpus which will be greater than originally passed
9732 in. Stash the original value in unused ARG6. */
9733 ARG6 = *numcpus;
9736 break;
9737 case VKI_PSET_BIND:
9738 /* Libc: int pset_bind(psetid_t pset, idtype_t idtype, id_t id,
9739 psetid_t *opset); */
9740 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9741 SARG4, ARG5);
9742 PRE_REG_READ5(long, SC2("pset", "bind"), int, subcode, vki_psetid_t, pset,
9743 vki_idtype_t, idtype, vki_id_t, id, vki_psetid_t *, opset);
9744 if (ARG5 != 0)
9745 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9746 break;
9747 case VKI_PSET_BIND_LWP:
9748 /* Libc: int pset_bind_lwp(psetid_t pset, id_t id, pid_t pid,
9749 psetid_t *opset); */
9750 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9751 SARG4, ARG5);
9752 PRE_REG_READ5(long, SC2("pset", "bind_lwp"), int, subcode,
9753 vki_psetid_t, pset, vki_id_t, id, vki_pid_t, pid,
9754 vki_psetid_t *, opset);
9755 if (ARG5 != 0)
9756 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9757 break;
9758 case VKI_PSET_GETLOADAVG:
9759 /* Libc: int pset_getloadavg(psetid_t pset, double loadavg[],
9760 int nelem); */
9761 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9762 PRE_REG_READ4(long, SC2("pset", "getloadavg"), int, subcode,
9763 vki_psetid_t, pset, int *, buf, int, nelem);
9764 if (ARG3 != 0)
9765 PRE_MEM_WRITE("pset(buf)", ARG3, SARG4 * sizeof(int));
9766 break;
9767 case VKI_PSET_LIST:
9768 /* Libc: int pset_list(psetid_t *psetlist, uint_t *numpsets); */
9769 PRINT("sys_pset ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9770 PRE_REG_READ3(long, SC2("pset", "list"), int, subcode,
9771 vki_psetid_t *, psetlist, vki_uint_t *, numpsets);
9772 if (ARG3 != 0)
9773 PRE_MEM_WRITE("pset(numpsets)", ARG3, sizeof(vki_uint_t));
9774 if ((ARG2 != 0) && (ARG3 != 0)) {
9775 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9776 if (ML_(safe_to_deref(numpsets, sizeof(vki_uint_t)))) {
9777 PRE_MEM_WRITE("pset(psetlist)", ARG2,
9778 *numpsets * sizeof(vki_psetid_t));
9779 /* If psetlist buffer is not large enough, it will hold only as many
9780 entries as fit in the buffer. However numpsets will contain the
9781 real number of processor sets which will be greater than
9782 originally passed in. Stash the original value in unused ARG6. */
9783 ARG6 = *numpsets;
9786 break;
9787 # if defined(SOLARIS_PSET_GET_NAME)
9788 case VKI_PSET_GET_NAME:
9789 /* Libc: int pset_get_name(psetid_t psetid, char *buf, uint_t len); */
9790 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9791 PRE_REG_READ4(long, SC2("pset", "get_name"), int, subcode,
9792 vki_psetid_t, pset, char *, buf, vki_uint_t, len);
9793 PRE_MEM_WRITE("pset(buf)", ARG3, ARG4);
9794 break;
9795 # endif /* SOLARIS_PSET_GET_NAME */
9796 case VKI_PSET_SETATTR:
9797 /* Libc: int pset_setattr(psetid_t pset, uint_t attr); */
9798 PRINT("sys_pset ( %ld, %ld, %ld )", SARG1, SARG2, ARG3);
9799 PRE_REG_READ3(long, SC2("pset", "setattr"), int, subcode,
9800 vki_psetid_t, pset, vki_uint_t, attr);
9801 break;
9802 case VKI_PSET_GETATTR:
9803 /* Libc: int pset_getattr(psetid_t pset, uint_t *attr); */
9804 PRINT("sys_pset ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
9805 PRE_REG_READ3(long, SC2("pset", "getattr"), int, subcode,
9806 vki_psetid_t, pset, vki_uint_t *, attr);
9807 PRE_MEM_WRITE("pset(attr)", ARG3, sizeof(vki_uint_t));
9808 break;
9809 case VKI_PSET_ASSIGN_FORCED:
9810 /* Libc: int pset_assign_forced(psetid_t pset, processorid_t cpu,
9811 psetid_t *opset); */
9812 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9813 PRE_REG_READ4(long, SC2("pset", "assign_forced"), int, subcode,
9814 vki_psetid_t, pset, vki_processorid_t, cpu,
9815 vki_psetid_t *, opset);
9816 if (ARG4 != 0)
9817 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9818 break;
9819 default:
9820 VG_(unimplemented)("Syswrap of pset syscall with subcode %ld.", SARG1);
9821 /*NOTREACHED*/
9822 break;
9826 POST(sys_pset)
9828 switch (ARG1 /*subcode*/) {
9829 case VKI_PSET_CREATE:
9830 POST_MEM_WRITE(ARG2, sizeof(vki_psetid_t));
9831 break;
9832 case VKI_PSET_DESTROY:
9833 break;
9834 case VKI_PSET_ASSIGN:
9835 if (ARG4 != 0)
9836 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9837 break;
9838 case VKI_PSET_INFO:
9839 if (ARG3 != 0)
9840 POST_MEM_WRITE(ARG3, sizeof(int));
9841 if (ARG4 != 0)
9842 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
9843 if ((ARG4 != 0) && (ARG5 != 0)) {
9844 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9845 POST_MEM_WRITE(ARG5, MIN(*numcpus, ARG6) * sizeof(vki_processorid_t));
9847 break;
9848 case VKI_PSET_BIND:
9849 if (ARG5 != 0)
9850 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9851 break;
9852 case VKI_PSET_BIND_LWP:
9853 if (ARG5 != 0)
9854 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9855 break;
9856 case VKI_PSET_GETLOADAVG:
9857 if (ARG3 != 0)
9858 POST_MEM_WRITE(ARG3, MIN(SARG4, VKI_LOADAVG_NSTATS) * sizeof(int));
9859 break;
9860 case VKI_PSET_LIST:
9861 if (ARG3 != 0)
9862 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9863 if ((ARG2 != 0) && (ARG3 != 0)) {
9864 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9865 POST_MEM_WRITE(ARG2, MIN(*numpsets, ARG6) * sizeof(vki_psetid_t));
9867 break;
9868 # if defined(SOLARIS_PSET_GET_NAME)
9869 case VKI_PSET_GET_NAME:
9870 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
9871 break;
9872 # endif /* SOLARIS_PSET_GET_NAME */
9873 case VKI_PSET_SETATTR:
9874 break;
9875 case VKI_PSET_GETATTR:
9876 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9877 break;
9878 case VKI_PSET_ASSIGN_FORCED:
9879 if (ARG4 != 0)
9880 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9881 break;
9882 default:
9883 vg_assert(0);
9884 break;
9888 PRE(sys_resolvepath)
9890 /* int resolvepath(const char *path, char *buf, size_t bufsiz); */
9891 PRINT("sys_resolvepath ( %#lx(%s), %#lx, %lu )", ARG1, (HChar *) ARG1, ARG2,
9892 ARG3);
9893 PRE_REG_READ3(long, "resolvepath", const char *, path, char *, buf,
9894 vki_size_t, bufsiz);
9896 PRE_MEM_RASCIIZ("resolvepath(path)", ARG1);
9897 PRE_MEM_WRITE("resolvepath(buf)", ARG2, ARG3);
9900 POST(sys_resolvepath)
9902 POST_MEM_WRITE(ARG2, RES);
9905 PRE(sys_lwp_mutex_timedlock)
9907 /* int lwp_mutex_timedlock(lwp_mutex_t *lp, timespec_t *tsp,
9908 uintptr_t owner); */
9909 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9910 *flags |= SfMayBlock;
9911 PRINT("lwp_mutex_timedlock ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
9912 PRE_REG_READ3(long, "lwp_mutex_timedlock", lwp_mutex_t *, lp,
9913 timespec_t *, tsp, uintptr_t, owner);
9915 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_flag)", lp->vki_mutex_flag);
9916 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_type)", lp->vki_mutex_type);
9917 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_owner)",
9918 lp->vki_mutex_owner);
9919 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_ownerpid)",
9920 lp->vki_mutex_ownerpid);
9921 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_lockw)", lp->vki_mutex_lockw);
9922 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_lockw)",
9923 lp->vki_mutex_lockw);*/
9924 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_waiters)",
9925 lp->vki_mutex_waiters);
9926 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_waiters)",
9927 lp->vki_mutex_waiters);*/
9928 if (ARG2) {
9929 PRE_MEM_READ("lwp_mutex_timedlock(tsp)", ARG2, sizeof(vki_timespec_t));
9930 /*PRE_MEM_WRITE("lwp_mutex_timedlock(tsp)", ARG2,
9931 sizeof(vki_timespec_t));*/
9935 POST(sys_lwp_mutex_timedlock)
9937 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9938 POST_FIELD_WRITE(lp->vki_mutex_owner);
9939 POST_FIELD_WRITE(lp->vki_mutex_ownerpid);
9940 POST_FIELD_WRITE(lp->vki_mutex_lockw);
9941 POST_FIELD_WRITE(lp->vki_mutex_waiters);
9942 if (ARG2)
9943 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
9946 PRE(sys_lwp_rwlock_sys)
9948 /* int lwp_rwlock_sys(int subcode, lwp_rwlock_t *rwlp, timespec_t *tsp); */
9949 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
9950 switch (ARG1 /*subcode*/) {
9951 case 0:
9952 case 1:
9953 case 2:
9954 case 3:
9955 *flags |= SfMayBlock;
9956 switch (ARG1 /*subcode*/) {
9957 case 0:
9958 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9959 PRE_REG_READ3(long, SC2("lwp_rwlock", "rdlock"), int, subcode,
9960 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9961 break;
9962 case 1:
9963 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9964 PRE_REG_READ3(long, SC2("lwp_rwlock", "wrlock"), int, subcode,
9965 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9966 break;
9967 case 2:
9968 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9969 PRE_REG_READ2(long, SC2("lwp_rwlock", "tryrdlock"), int, subcode,
9970 lwp_rwlock_t *, rwlp);
9971 break;
9972 case 3:
9973 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9974 PRE_REG_READ2(long, SC2("lwp_rwlock", "trywrlock"), int, subcode,
9975 lwp_rwlock_t *, rwlp);
9976 break;
9977 default:
9978 vg_assert(0);
9979 break;
9982 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_type)", rwlp->vki_rwlock_type);
9983 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
9984 rwlp->vki_rwlock_readers);
9985 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
9986 rwlp->vki_rwlock_readers);*/
9988 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
9989 rwlp->mutex.vki_mutex_type);
9990 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_owner)",
9991 rwlp->mutex.vki_mutex_owner);
9992 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_ownerpid)",
9993 rwlp->mutex.vki_mutex_ownerpid);
9994 /* The mutex_lockw member is not really read by the kernel for this
9995 syscall but it seems better to mark it that way because when locking
9996 an rwlock the associated mutex has to be locked. */
9997 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_lockw)",
9998 rwlp->mutex.vki_mutex_lockw);
9999 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_lockw)",
10000 rwlp->mutex.vki_mutex_lockw);*/
10001 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_waiters)",
10002 rwlp->mutex.vki_mutex_waiters);
10003 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_waiters)",
10004 rwlp->mutex.vki_mutex_waiters);*/
10006 if ((ARG1 == 0 || ARG1 == 1) && ARG3)
10007 PRE_MEM_READ("lwp_rwlock(tsp)", ARG3, sizeof(vki_timespec_t));
10008 break;
10009 case 4:
10010 PRINT("sys_lwp_rwlock( %ld, %#lx )", SARG1, ARG2);
10011 PRE_REG_READ2(long, SC2("lwp_rwlock", "unlock"), int, subcode,
10012 lwp_rwlock_t *, rwlp);
10013 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
10014 rwlp->mutex.vki_mutex_type);
10015 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
10016 rwlp->vki_rwlock_readers);
10017 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
10018 rwlp->vki_rwlock_readers);*/
10019 break;
10020 default:
10021 VG_(unimplemented)("Syswrap of the lwp_rwlock_sys call with subcode %ld.",
10022 SARG1);
10023 /*NOTREACHED*/
10024 break;
10028 POST(sys_lwp_rwlock_sys)
10030 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
10031 switch (ARG1 /*subcode*/) {
10032 case 0:
10033 case 1:
10034 case 2:
10035 case 3:
10036 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
10037 POST_FIELD_WRITE(rwlp->vki_rwlock_owner);
10038 POST_FIELD_WRITE(rwlp->vki_rwlock_ownerpid);
10039 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_lockw);
10040 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_waiters);
10041 break;
10042 case 4:
10043 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
10044 break;
10045 default:
10046 vg_assert(0);
10047 break;
10051 PRE(sys_lwp_sema_timedwait)
10053 /* int lwp_sema_timedwait(lwp_sema_t *sema, timespec_t *timeout,
10054 int check_park); */
10055 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
10056 *flags |= SfMayBlock;
10057 PRINT("sys_lwp_sema_timewait ( %#lx, %#lx, %ld )", ARG1, ARG2, SARG3);
10058 PRE_REG_READ3(long, "lwp_sema_timedwait", lwp_sema_t *, sema,
10059 timespec_t *, timeout, int, check_park);
10061 PRE_FIELD_READ("lwp_sema_timedwait(sema->type)", sema->vki_sema_type);
10062 PRE_FIELD_READ("lwp_sema_timedwait(sema->count)", sema->vki_sema_count);
10063 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->count)",
10064 sema->vki_sema_count);*/
10065 PRE_FIELD_READ("lwp_sema_timedwait(sema->waiters)", sema->vki_sema_waiters);
10066 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->waiters)",
10067 sema->vki_sema_waiters);*/
10068 if (ARG2) {
10069 PRE_MEM_READ("lwp_sema_timedwait(timeout)", ARG2,
10070 sizeof(vki_timespec_t));
10071 /*PRE_MEM_WRITE("lwp_sema_timedwait(timeout)", ARG2,
10072 sizeof(vki_timespec_t));*/
10076 POST(sys_lwp_sema_timedwait)
10078 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
10079 POST_FIELD_WRITE(sema->vki_sema_count);
10080 POST_FIELD_WRITE(sema->vki_sema_waiters);
10081 if (ARG2)
10082 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
10085 PRE(sys_zone)
10087 /* Kernel: long zone(int cmd, void *arg1, void *arg2, void *arg3,
10088 void *arg4);
10090 switch (ARG1 /*cmd*/) {
10091 case VKI_ZONE_CREATE:
10092 /* Libc: zoneid_t zone_create(const char *name, const char *root,
10093 const struct priv_set *privs,
10094 const char *rctls, size_t rctlsz,
10095 const char *zfs, size_t zfssz,
10096 int *extended_error, int match,
10097 int doi, const bslabel_t *label,
10098 int flags);
10099 Kernel: zoneid_t zone_create(zone_def *zd);
10101 PRINT("sys_zone ( %ld, %#lx )", SARG1, ARG2);
10102 PRE_REG_READ2(long, SC2("zone", "create"), int, cmd,
10103 vki_zone_def *, zd);
10105 vki_zone_def *zd = (vki_zone_def *) ARG2;
10106 PRE_FIELD_READ("zone(zd.zone_name)", zd->zone_name);
10107 PRE_FIELD_READ("zone(zd.zone_root)", zd->zone_root);
10108 PRE_FIELD_READ("zone(zd.zone_privs)", zd->zone_privs);
10109 PRE_FIELD_READ("zone(zd.zone_privssz)", zd->zone_privssz);
10110 PRE_FIELD_READ("zone(zd.rctlbuf)", zd->rctlbuf);
10111 PRE_FIELD_READ("zone(zd.rctlbufsz)", zd->rctlbufsz);
10112 PRE_FIELD_READ("zone(zd.zfsbuf)", zd->zfsbuf);
10113 PRE_FIELD_READ("zone(zd.zfsbufsz)", zd->zfsbufsz);
10114 PRE_FIELD_READ("zone(zd.extended_error)", zd->extended_error);
10115 PRE_FIELD_READ("zone(zd.match)", zd->match);
10116 PRE_FIELD_READ("zone(zd.doi)", zd->doi);
10117 PRE_FIELD_READ("zone(zd.label)", zd->label);
10118 PRE_FIELD_READ("zone(zd.flags)", zd->flags);
10120 if (ML_(safe_to_deref((void *)ARG2, sizeof(vki_zone_def)))) {
10121 if (zd->zone_name)
10122 PRE_MEM_RASCIIZ("zone(zd.zone_name)", (Addr) zd->zone_name);
10123 if (zd->zone_root)
10124 PRE_MEM_RASCIIZ("zone(zd.zone_root)", (Addr) zd->zone_root);
10125 PRE_MEM_READ("zone(zd.zone_privs)", (Addr) zd->zone_privs,
10126 zd->zone_privssz);
10127 PRE_MEM_READ("zone(zd.rctlbuf)", (Addr) zd->rctlbuf,
10128 zd->rctlbufsz);
10129 PRE_MEM_READ("zone(zd.zfsbuf)",
10130 (Addr) zd->zfsbuf, zd->zfsbufsz);
10131 if (zd->label)
10132 PRE_MEM_READ("zone(zd.label)", (Addr) zd->label,
10133 sizeof(vki_bslabel_t));
10135 break;
10136 case VKI_ZONE_DESTROY:
10137 /* Libc: int zone_destroy(zoneid_t zoneid); */
10138 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10139 PRE_REG_READ2(long, SC2("zone", "destroy"), int, cmd,
10140 vki_zoneid_t, zoneid);
10141 break;
10142 case VKI_ZONE_GETATTR:
10143 /* Libc: ssize_t zone_getattr(zoneid_t zoneid, int attr,
10144 void *valp, size_t size);
10146 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %ld )",
10147 SARG1, SARG2, SARG3, ARG4, SARG5);
10148 PRE_REG_READ5(long, SC2("zone", "getattr"), int, cmd,
10149 vki_zoneid_t, zoneid, int, attr, void *, valp,
10150 vki_size_t, size);
10151 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
10152 break;
10153 case VKI_ZONE_ENTER:
10154 /* Libc: int zone_enter(zoneid_t zoneid); */
10155 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10156 PRE_REG_READ2(long, SC2("zone", "enter"), int, cmd,
10157 vki_zoneid_t, zoneid);
10158 break;
10159 case VKI_ZONE_LIST:
10160 /* Libc: int zone_list(zoneid_t *zonelist, uint_t *numzones); */
10161 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
10162 PRE_REG_READ3(long, SC2("zone", "list"), int, cmd,
10163 vki_zoneid_t *, zonelist, vki_uint_t *, numzones);
10165 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
10167 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
10168 if (ARG2)
10169 PRE_MEM_WRITE("zone(zonelist)", ARG2,
10170 *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
10172 break;
10173 case VKI_ZONE_SHUTDOWN:
10174 /* Libc: int zone_shutdown(zoneid_t zoneid); */
10175 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10176 PRE_REG_READ2(long, SC2("zone", "shutdown"), int, cmd,
10177 vki_zoneid_t, zoneid);
10178 break;
10179 case VKI_ZONE_LOOKUP:
10180 /* Libc: zoneid_t zone_lookup(const char *name); */
10181 PRINT("sys_zone ( %ld, %#lx(%s) )", SARG1, ARG2, (HChar *) ARG2);
10182 PRE_REG_READ2(long, SC2("zone", "lookup"), int, cmd,
10183 const char *, name);
10184 if (ARG2)
10185 PRE_MEM_RASCIIZ("zone(name)", ARG2);
10186 break;
10187 case VKI_ZONE_BOOT:
10188 /* Libc: int zone_boot(zoneid_t zoneid); */
10189 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
10190 PRE_REG_READ2(long, SC2("zone", "boot"), int, cmd,
10191 vki_zoneid_t, zoneid);
10192 break;
10193 case VKI_ZONE_SETATTR:
10194 /* Libc: int zone_setattr(zoneid_t zoneid, int attr, void *valp,
10195 size_t size);
10197 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %lu )",
10198 SARG1, SARG2, SARG3, ARG4, ARG5);
10199 PRE_REG_READ5(long, SC2("zone", "setattr"), int, cmd,
10200 vki_zoneid_t, zoneid, int, attr, void *, valp,
10201 vki_size_t, size);
10202 PRE_MEM_READ("zone(valp)", ARG4, ARG5);
10203 break;
10204 case VKI_ZONE_ADD_DATALINK:
10205 /* Libc: int zone_add_datalink(zoneid_t zoneid,
10206 datalink_id_t linkid);
10208 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10209 PRE_REG_READ3(long, SC2("zone", "add_datalink"), int, cmd,
10210 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
10211 break;
10212 case VKI_ZONE_DEL_DATALINK:
10213 /* Libc: int zone_remove_datalink(zoneid_t zoneid,
10214 datalink_id_t linkid);
10216 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10217 PRE_REG_READ3(long, SC2("zone", "del_datalink"), int, cmd,
10218 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
10219 break;
10220 case VKI_ZONE_CHECK_DATALINK:
10221 /* Libc: int zone_check_datalink(zoneid_t *zoneidp,
10222 datalink_id_t linkid);
10224 PRINT("sys_zone ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10225 PRE_REG_READ3(long, SC2("zone", "check_datalink"), int, cmd,
10226 vki_zoneid_t *, zoneidp, vki_datalink_id_t, linkid);
10227 PRE_MEM_WRITE("zone(zoneidp)", ARG2, sizeof(vki_zoneid_t));
10228 break;
10229 case VKI_ZONE_LIST_DATALINK:
10230 /* Libc: int zone_list_datalink(zoneid_t zoneid, int *dlnump,
10231 datalink_id_t *linkids);
10233 PRINT("sys_zone ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
10234 PRE_REG_READ4(long, SC2("zone", "list_datalink"), int, cmd,
10235 vki_zoneid_t, zoneid, int *, dlnump,
10236 vki_datalink_id_t *, linkids);
10238 PRE_MEM_WRITE("zone(dlnump)", ARG3, sizeof(int));
10239 if (ML_(safe_to_deref((void *) ARG3, sizeof(int)))) {
10240 if (ARG4)
10241 PRE_MEM_WRITE("zone(linkids)", ARG4,
10242 *(int *) ARG3 * sizeof(vki_datalink_id_t));
10244 break;
10245 #if defined(SOLARIS_ZONE_DEFUNCT)
10246 case VKI_ZONE_LIST_DEFUNCT:
10247 /* Libc: int zone_list_defunct(uint64_t *uniqidlist,
10248 uint_t *numzones);
10250 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
10251 PRE_REG_READ3(long, SC2("zone", "list_defunct"), int, cmd,
10252 vki_uint64_t *, uniqidlist, vki_uint_t *, numzones);
10254 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
10256 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
10257 if (ARG2)
10258 PRE_MEM_WRITE("zone(uniqidlist)", ARG2,
10259 *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
10261 break;
10262 case VKI_ZONE_GETATTR_DEFUNCT:
10263 /* Libc: ssize_t zone_getattr_defunct(uint64_t uniqid, int attr,
10264 void *valp, size_t size);
10265 Kernel: ssize_t zone_getattr_defunct(uint64_t *uniqid, int attr,
10266 void *valp, size_t size);
10268 PRINT("sys_zone ( %ld, %#lx, %ld, %#lx, %lu )",
10269 SARG1, ARG2, SARG3, ARG4, ARG5);
10270 PRE_REG_READ5(long, SC2("zone", "getattr_defunct"), int, cmd,
10271 vki_uint64_t *, uniqid, int, attr,
10272 void *, valp, vki_size_t, size);
10274 PRE_MEM_READ("zone(uniqid)", ARG2, sizeof(vki_uint64_t));
10275 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
10276 break;
10277 #endif /* SOLARIS_ZONE_DEFUNCT */
10278 default:
10279 VG_(unimplemented)("Syswrap of the zone call with cmd %ld.", SARG1);
10280 /*NOTREACHED*/
10281 break;
10286 POST(sys_zone)
10288 switch (ARG1 /*cmd*/) {
10289 case VKI_ZONE_CREATE:
10290 case VKI_ZONE_DESTROY:
10291 break;
10292 case VKI_ZONE_GETATTR:
10293 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
10294 break;
10295 case VKI_ZONE_ENTER:
10296 break;
10297 case VKI_ZONE_LIST:
10298 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
10299 break;
10300 case VKI_ZONE_SHUTDOWN:
10301 case VKI_ZONE_LOOKUP:
10302 case VKI_ZONE_BOOT:
10303 case VKI_ZONE_SETATTR:
10304 case VKI_ZONE_ADD_DATALINK:
10305 case VKI_ZONE_DEL_DATALINK:
10306 break;
10307 case VKI_ZONE_CHECK_DATALINK:
10308 POST_MEM_WRITE(ARG2, sizeof(vki_zoneid_t));
10309 break;
10310 case VKI_ZONE_LIST_DATALINK:
10311 POST_MEM_WRITE(ARG4, *(int *) ARG3 * sizeof(vki_datalink_id_t));
10312 break;
10313 #if defined(SOLARIS_ZONE_DEFUNCT)
10314 case VKI_ZONE_LIST_DEFUNCT:
10315 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
10316 break;
10317 case VKI_ZONE_GETATTR_DEFUNCT:
10318 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
10319 break;
10320 #endif /* SOLARIS_ZONE_DEFUNCT */
10321 default:
10322 vg_assert(0);
10323 break;
10327 PRE(sys_getcwd)
10329 /* int getcwd(char *buf, size_t size); */
10330 /* Note: Generic getcwd() syswrap can't be used because it expects
10331 a different return value. */
10332 PRINT("sys_getcwd ( %#lx, %lu )", ARG1, ARG2);
10333 PRE_REG_READ2(long, "getcwd", char *, buf, vki_size_t, size);
10334 PRE_MEM_WRITE("getcwd(buf)", ARG1, ARG2);
10337 POST(sys_getcwd)
10339 POST_MEM_WRITE(ARG1, VG_(strlen)((HChar*)ARG1) + 1);
10342 PRE(sys_so_socket)
10344 /* int so_socket(int family, int type, int protocol, char *devpath,
10345 int version); */
10346 PRINT("sys_so_socket ( %ld, %ld, %ld, %#lx(%s), %ld)", SARG1, SARG2, SARG3,
10347 ARG4, (HChar *) ARG4, SARG5);
10348 PRE_REG_READ5(long, "socket", int, family, int, type, int, protocol,
10349 char *, devpath, int, version);
10350 if (ARG4)
10351 PRE_MEM_RASCIIZ("socket(devpath)", ARG4);
10354 POST(sys_so_socket)
10356 SysRes r;
10357 r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
10358 SET_STATUS_from_SysRes(r);
10361 PRE(sys_so_socketpair)
10363 /* int so_socketpair(int sv[2]); */
10364 /* This syscall is used to connect two already created sockets together. */
10365 PRINT("sys_so_socketpair ( %#lx )", ARG1);
10366 PRE_REG_READ1(long, "socketpair", int *, sv);
10367 PRE_MEM_READ("socketpair(sv)", ARG1, 2 * sizeof(int));
10368 /*PRE_MEM_WRITE("socketpair(sv)", ARG1, 2 * sizeof(int));*/
10369 if (ML_(safe_to_deref)((void*)ARG1, 2 * sizeof(int))) {
10370 int *fds = (int*)ARG1;
10371 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, False))
10372 SET_STATUS_Failure(VKI_EBADF);
10373 else if (!ML_(fd_allowed)(fds[1], "socketpair", tid, False))
10374 SET_STATUS_Failure(VKI_EBADF);
10378 POST(sys_so_socketpair)
10380 /* The kernel can return new file descriptors, in such a case we have to
10381 validate them. */
10382 int *fds = (int*)ARG1;
10383 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
10384 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, True))
10385 SET_STATUS_Failure(VKI_EMFILE);
10386 if (!ML_(fd_allowed)(fds[1], "socketpair", tid, True))
10387 SET_STATUS_Failure(VKI_EMFILE);
10388 if (FAILURE) {
10389 /* One or both of the file descriptors weren't allowed, close newly
10390 created file descriptors but don't close the already recorded
10391 ones. */
10392 if (!ML_(fd_recorded)(fds[0]))
10393 VG_(close)(fds[0]);
10394 if (!ML_(fd_recorded)(fds[1]))
10395 VG_(close)(fds[1]);
10397 else if (VG_(clo_track_fds)) {
10398 /* Everything went better than expected, record the newly created file
10399 descriptors. Note: If the kernel actually returns the original file
10400 descriptors, then ML_(record_fd_open_nameless) notices that these
10401 file descriptors have been already recorded. */
10402 ML_(record_fd_open_nameless)(tid, fds[0]);
10403 ML_(record_fd_open_nameless)(tid, fds[1]);
10407 PRE(sys_bind)
10409 /* int bind(int s, struct sockaddr *name, socklen_t namelen,
10410 int version); */
10411 PRINT("sys_bind ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10412 PRE_REG_READ4(long, "bind", int, s, struct sockaddr *, name,
10413 vki_socklen_t, namelen, int, version);
10414 ML_(generic_PRE_sys_bind)(tid, ARG1, ARG2, ARG3);
10417 PRE(sys_listen)
10419 /* int listen(int s, int backlog, int version); */
10420 PRINT("sys_listen ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10421 PRE_REG_READ3(long, "listen", int, s, int, backlog, int, version);
10424 PRE(sys_accept)
10426 #if defined(SOLARIS_NEW_ACCEPT_SYSCALL)
10427 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
10428 int version, int flags); */
10429 *flags |= SfMayBlock;
10430 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
10431 SARG5);
10432 PRE_REG_READ5(long, "accept", int, s, struct sockaddr *, addr,
10433 socklen_t *, addrlen, int, version, int, flags);
10434 #else
10435 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
10436 int version); */
10437 *flags |= SfMayBlock;
10438 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10439 PRE_REG_READ4(long, "accept", int, s, struct sockaddr *, addr,
10440 socklen_t *, addrlen, int, version);
10441 #endif /* SOLARIS_NEW_ACCEPT_SYSCALL */
10442 ML_(generic_PRE_sys_accept)(tid, ARG1, ARG2, ARG3);
10445 POST(sys_accept)
10447 SysRes r;
10448 r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
10449 ARG1, ARG2, ARG3);
10450 SET_STATUS_from_SysRes(r);
10453 PRE(sys_connect)
10455 /* int connect(int s, struct sockaddr *name, socklen_t namelen,
10456 int version); */
10457 *flags |= SfMayBlock;
10458 PRINT("sys_connect ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10459 PRE_REG_READ4(long, "connect", int, s, struct sockaddr *, name,
10460 vki_socklen_t, namelen, int, version);
10461 ML_(generic_PRE_sys_connect)(tid, ARG1, ARG2, ARG3);
10464 PRE(sys_shutdown)
10466 /* Kernel: int shutdown(int sock, int how, int version);
10467 Libc: int shutdown(int sock, int how);
10469 *flags |= SfMayBlock;
10470 PRINT("sys_shutdown ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
10471 PRE_REG_READ3(int, "shutdown", int, sock, int, how, int, version);
10473 /* Be strict. */
10474 if (!ML_(fd_allowed)(ARG1, "shutdown", tid, False))
10475 SET_STATUS_Failure(VKI_EBADF);
10478 PRE(sys_recv)
10480 /* ssize_t recv(int s, void *buf, size_t len, int flags); */
10481 *flags |= SfMayBlock;
10482 PRINT("sys_recv ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10483 PRE_REG_READ4(long, "recv", int, s, void *, buf, vki_size_t, len,
10484 int, flags);
10485 ML_(generic_PRE_sys_recv)(tid, ARG1, ARG2, ARG3);
10488 POST(sys_recv)
10490 ML_(generic_POST_sys_recv)(tid, RES, ARG1, ARG2, ARG3);
10493 PRE(sys_recvfrom)
10495 /* ssize_t recvfrom(int s, void *buf, size_t len, int flags,
10496 struct sockaddr *from, socklen_t *fromlen); */
10497 *flags |= SfMayBlock;
10498 PRINT("sys_recvfrom ( %ld, %#lx, %lu, %ld, %#lx, %#lx )", SARG1, ARG2, ARG3,
10499 SARG4, ARG5, ARG6);
10500 PRE_REG_READ6(long, "recvfrom", int, s, void *, buf, vki_size_t, len,
10501 int, flags, struct sockaddr *, from, socklen_t *, fromlen);
10502 ML_(generic_PRE_sys_recvfrom)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10505 POST(sys_recvfrom)
10507 ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
10508 ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10511 PRE(sys_recvmsg)
10513 /* ssize_t recvmsg(int s, struct msghdr *msg, int flags); */
10514 *flags |= SfMayBlock;
10515 PRINT("sys_recvmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10516 PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
10517 ML_(generic_PRE_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10520 POST(sys_recvmsg)
10522 ML_(generic_POST_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2, RES);
10525 PRE(sys_send)
10527 /* ssize_t send(int s, const void *msg, size_t len, int flags); */
10528 *flags |= SfMayBlock;
10529 PRINT("sys_send ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
10530 PRE_REG_READ4(long, "send", int, s, const void *, msg, vki_size_t, len,
10531 int, flags);
10532 ML_(generic_PRE_sys_send)(tid, ARG1, ARG2, ARG3);
10535 PRE(sys_sendmsg)
10537 /* ssize_t sendmsg(int s, const struct msghdr *msg, int flags); */
10538 *flags |= SfMayBlock;
10539 PRINT("sys_sendmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10540 PRE_REG_READ3(long, "sendmsg", int, s, const struct msghdr *, msg,
10541 int, flags);
10542 ML_(generic_PRE_sys_sendmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10545 PRE(sys_sendto)
10547 /* ssize_t sendto(int s, const void *msg, size_t len, int flags,
10548 const struct sockaddr *to, int tolen); */
10549 *flags |= SfMayBlock;
10550 PRINT("sys_sendto ( %ld, %#lx, %lu, %ld, %#lx, %ld )", SARG1, ARG2, ARG3,
10551 SARG4, ARG5, SARG6);
10552 PRE_REG_READ6(long, "sendto", int, s, const void *, msg, vki_size_t, len,
10553 int, flags, const struct sockaddr *, to, int, tolen);
10554 ML_(generic_PRE_sys_sendto)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10557 PRE(sys_getpeername)
10559 /* Kernel: int getpeername(int s, struct sockaddr *name,
10560 socklen_t *namelen, int version);
10561 Libc: int getpeername(int s, struct sockaddr *name,
10562 socklen_t *namelen);
10564 *flags |= SfMayBlock;
10565 PRINT("sys_getpeername ( %ld, %#lx, %#lx, %ld )",
10566 SARG1, ARG2, ARG3, SARG4);
10567 PRE_REG_READ4(long, "getpeername", int, s, struct vki_sockaddr *, name,
10568 vki_socklen_t *, namelen, int, version);
10569 ML_(buf_and_len_pre_check)(tid, ARG2, ARG3, "getpeername(name)",
10570 "getpeername(namelen)");
10572 /* Be strict. */
10573 if (!ML_(fd_allowed)(ARG1, "getpeername", tid, False))
10574 SET_STATUS_Failure(VKI_EBADF);
10577 POST(sys_getpeername)
10579 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES),
10580 ARG2, ARG3, "getpeername(namelen)");
10583 PRE(sys_getsockname)
10585 /* int getsockname(int s, struct sockaddr *name, socklen_t *namelen,
10586 int version); */
10587 PRINT("sys_getsockname ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10588 PRE_REG_READ4(long, "getsockname", int, s, struct sockaddr *, name,
10589 socklen_t *, namelen, int, version);
10590 ML_(generic_PRE_sys_getsockname)(tid, ARG1, ARG2, ARG3);
10593 POST(sys_getsockname)
10595 ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
10596 ARG1, ARG2, ARG3);
10599 PRE(sys_getsockopt)
10601 /* int getsockopt(int s, int level, int optname, void *optval,
10602 socklen_t *optlen, int version); */
10603 PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx, %ld )", SARG1, SARG2,
10604 SARG3, ARG4, ARG5, SARG6);
10605 PRE_REG_READ6(long, "getsockopt", int, s, int, level, int, optname,
10606 void *, optval, socklen_t *, option, int, version);
10607 if (ARG4)
10608 ML_(buf_and_len_pre_check)(tid, ARG4, ARG5, "getsockopt(optval)",
10609 "getsockopt(optlen)");
10612 POST(sys_getsockopt)
10614 if (ARG4)
10615 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES), ARG4,
10616 ARG5, "getsockopt(optlen_out)");
10619 PRE(sys_setsockopt)
10621 /* int setsockopt(int s, int level, int optname, const void *optval,
10622 socklen_t optlen, int version); */
10623 PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %lu, %ld )", SARG1, SARG2,
10624 SARG3, ARG4, ARG5, SARG6);
10625 PRE_REG_READ6(long, "setsockopt", int, s, int, level, int, optname,
10626 const void *, optval, vki_socklen_t, optlen, int, version);
10627 ML_(generic_PRE_sys_setsockopt)(tid, ARG1, ARG2, ARG3, ARG4, ARG5);
10630 PRE(sys_lwp_mutex_register)
10632 /* int lwp_mutex_register(lwp_mutex_t *mp, caddr_t uaddr); */
10633 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t*)ARG1;
10634 PRINT("sys_lwp_mutex_register ( %#lx, %#lx )", ARG1, ARG2);
10635 PRE_REG_READ2(long, "lwp_mutex_register", lwp_mutex_t *, mp,
10636 void *, uaddr);
10637 PRE_FIELD_READ("lwp_mutex_register(mp->mutex_type)", mp->vki_mutex_type);
10640 PRE(sys_uucopy)
10642 /* int uucopy(const void *s1, void *s2, size_t n); */
10643 PRINT("sys_uucopy ( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3);
10644 PRE_REG_READ3(long, "uucopy", const void *, s1, void *, s2, vki_size_t, n);
10646 /* Stay away from V segments. */
10647 if (!ML_(valid_client_addr)(ARG1, ARG3, tid, "uucopy(s1)")) {
10648 SET_STATUS_Failure(VKI_EFAULT);
10650 if (!ML_(valid_client_addr)(ARG2, ARG3, tid, "uucopy(s2)")) {
10651 SET_STATUS_Failure(VKI_EFAULT);
10654 if (FAILURE)
10655 return;
10657 /* XXX This is actually incorrect, we should be able to copy undefined
10658 values through to their new destination. */
10659 PRE_MEM_READ("uucopy(s1)", ARG1, ARG3);
10660 PRE_MEM_WRITE("uucopy(s2)", ARG2, ARG3);
10663 POST(sys_uucopy)
10665 POST_MEM_WRITE(ARG2, ARG3);
10668 PRE(sys_umount2)
10670 /* int umount2(const char *file, int mflag); */
10671 *flags |= SfMayBlock;
10672 PRINT("sys_umount2 ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
10673 PRE_REG_READ2(long, "umount2", const char *, file, int, mflag);
10674 PRE_MEM_RASCIIZ("umount2(file)", ARG1);
10677 PRE(fast_gethrtime)
10679 PRINT("fast_gethrtime ( )");
10680 PRE_REG_READ0(long, "gethrtime");
10683 PRE(fast_gethrvtime)
10685 PRINT("fast_gethrvtime ( )");
10686 PRE_REG_READ0(long, "gethrvtime");
10689 PRE(fast_gethrestime)
10691 /* Used by gettimeofday(3C). */
10692 PRINT("fast_gethrestime ( )");
10693 PRE_REG_READ0(long, "gethrestime");
10696 PRE(fast_getlgrp)
10698 /* Fasttrap number shared between gethomelgroup() and getcpuid(). */
10699 PRINT("fast_getlgrp ( )");
10700 PRE_REG_READ0(long, "getlgrp");
10703 #if defined(SOLARIS_GETHRT_FASTTRAP)
10704 PRE(fast_gethrt)
10706 /* Used by gethrtime(3C) when tsp & tscp HWCAPs are present. */
10707 PRINT("fast_gethrt ( )");
10708 PRE_REG_READ0(long, "gethrt");
10711 POST(fast_gethrt)
10713 if (RES == 0)
10714 return;
10716 VG_(change_mapping_ownership)(RES, False);
10718 #endif /* SOLARIS_GETHRT_FASTTRAP */
10720 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
10721 PRE(fast_getzoneoffset)
10723 /* Returns kernel's time zone offset data. */
10724 PRINT("fast_getzoneoffset ( )");
10725 PRE_REG_READ0(long, "get_zone_offset");
10728 POST(fast_getzoneoffset)
10730 if (RES == 0)
10731 return;
10733 VG_(change_mapping_ownership)(RES, False);
10735 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
10737 #undef PRE
10738 #undef POST
10740 /* ---------------------------------------------------------------------
10741 The Solaris syscall table
10742 ------------------------------------------------------------------ */
10744 /* Add a Solaris-specific, arch-independent wrapper to a syscall table. */
10745 #define SOLX_(sysno, name) \
10746 WRAPPER_ENTRY_X_(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10747 #define SOLXY(sysno, name) \
10748 WRAPPER_ENTRY_XY(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10750 #if defined(VGP_x86_solaris)
10751 /* Add an x86-solaris specific wrapper to a syscall table. */
10752 #define PLAX_(sysno, name) \
10753 WRAPPER_ENTRY_X_(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10754 #define PLAXY(sysno, name) \
10755 WRAPPER_ENTRY_XY(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10757 #elif defined(VGP_amd64_solaris)
10758 /* Add an amd64-solaris specific wrapper to a syscall table. */
10759 #define PLAX_(sysno, name) \
10760 WRAPPER_ENTRY_X_(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10761 #define PLAXY(sysno, name) \
10762 WRAPPER_ENTRY_XY(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10764 #else
10765 # error "Unknown platform"
10766 #endif
10769 GEN : handlers are in syswrap-generic.c
10770 SOL : handlers are in this file
10771 X_ : PRE handler only
10772 XY : PRE and POST handlers
10775 static SyscallTableEntry syscall_table[] = {
10776 SOLX_(__NR_exit, sys_exit), /* 1 */
10777 #if defined(SOLARIS_SPAWN_SYSCALL)
10778 SOLX_(__NR_spawn, sys_spawn), /* 2 */
10779 #endif /* SOLARIS_SPAWN_SYSCALL */
10780 GENXY(__NR_read, sys_read), /* 3 */
10781 GENX_(__NR_write, sys_write), /* 4 */
10782 #if defined(SOLARIS_OLD_SYSCALLS)
10783 SOLXY(__NR_open, sys_open), /* 5 */
10784 #endif /* SOLARIS_OLD_SYSCALLS */
10785 SOLXY(__NR_close, sys_close), /* 6 */
10786 SOLX_(__NR_linkat, sys_linkat), /* 7 */
10787 #if defined(SOLARIS_OLD_SYSCALLS)
10788 GENX_(__NR_link, sys_link), /* 9 */
10789 GENX_(__NR_unlink, sys_unlink), /* 10 */
10790 #endif /* SOLARIS_OLD_SYSCALLS */
10791 SOLX_(__NR_symlinkat, sys_symlinkat), /* 11 */
10792 GENX_(__NR_chdir, sys_chdir), /* 12 */
10793 SOLX_(__NR_time, sys_time), /* 13 */
10794 #if defined(SOLARIS_OLD_SYSCALLS)
10795 GENX_(__NR_chmod, sys_chmod), /* 15 */
10796 GENX_(__NR_chown, sys_chown), /* 16 */
10797 #endif /* SOLARIS_OLD_SYSCALLS */
10798 SOLX_(__NR_brk, sys_brk), /* 17 */
10799 #if defined(SOLARIS_OLD_SYSCALLS)
10800 SOLXY(__NR_stat, sys_stat), /* 18 */
10801 #endif /* SOLARIS_OLD_SYSCALLS */
10802 SOLX_(__NR_lseek, sys_lseek), /* 19 */
10803 GENX_(__NR_getpid, sys_getpid), /* 20 */
10804 SOLXY(__NR_mount, sys_mount), /* 21 */
10805 SOLXY(__NR_readlinkat, sys_readlinkat), /* 22 */
10806 GENX_(__NR_setuid, sys_setuid), /* 23 */
10807 GENX_(__NR_getuid, sys_getuid), /* 24 */
10808 SOLX_(__NR_stime, sys_stime), /* 25 */
10809 GENX_(__NR_alarm, sys_alarm), /* 27 */
10810 #if defined(SOLARIS_OLD_SYSCALLS)
10811 SOLXY(__NR_fstat, sys_fstat), /* 28 */
10812 #endif /* SOLARIS_OLD_SYSCALLS */
10813 GENX_(__NR_pause, sys_pause), /* 29 */
10814 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
10815 SOLXY(__NR_frealpathat, sys_frealpathat), /* 30 */
10816 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
10817 SOLX_(__NR_stty, sys_stty), /* 31 */
10818 SOLXY(__NR_gtty, sys_gtty), /* 32 */
10819 #if defined(SOLARIS_OLD_SYSCALLS)
10820 GENX_(__NR_access, sys_access), /* 33 */
10821 #endif /* SOLARIS_OLD_SYSCALLS */
10822 GENX_(__NR_kill, sys_kill), /* 37 */
10823 SOLX_(__NR_pgrpsys, sys_pgrpsys), /* 39 */
10824 SOLXY(__NR_pipe, sys_pipe), /* 42 */
10825 GENXY(__NR_times, sys_times), /* 43 */
10826 SOLX_(__NR_faccessat, sys_faccessat), /* 45 */
10827 GENX_(__NR_setgid, sys_setgid), /* 46 */
10828 GENX_(__NR_getgid, sys_getgid), /* 47 */
10829 SOLXY(__NR_mknodat, sys_mknodat), /* 48 */
10830 SOLXY(__NR_sysi86, sys_sysi86), /* 50 */
10831 SOLXY(__NR_shmsys, sys_shmsys), /* 52 */
10832 SOLXY(__NR_semsys, sys_semsys), /* 53 */
10833 SOLXY(__NR_ioctl, sys_ioctl), /* 54 */
10834 SOLX_(__NR_fchownat, sys_fchownat), /* 56 */
10835 SOLX_(__NR_fdsync, sys_fdsync), /* 58 */
10836 SOLX_(__NR_execve, sys_execve), /* 59 */
10837 GENX_(__NR_umask, sys_umask), /* 60 */
10838 GENX_(__NR_chroot, sys_chroot), /* 61 */
10839 SOLXY(__NR_fcntl, sys_fcntl), /* 62 */
10840 SOLX_(__NR_renameat, sys_renameat), /* 64 */
10841 SOLX_(__NR_unlinkat, sys_unlinkat), /* 65 */
10842 SOLXY(__NR_fstatat, sys_fstatat), /* 66 */
10843 #if defined(VGP_x86_solaris)
10844 PLAXY(__NR_fstatat64, sys_fstatat64), /* 67 */
10845 #endif /* VGP_x86_solaris */
10846 SOLXY(__NR_openat, sys_openat), /* 68 */
10847 #if defined(VGP_x86_solaris)
10848 PLAXY(__NR_openat64, sys_openat64), /* 69 */
10849 #endif /* VGP_x86_solaris */
10850 SOLXY(__NR_tasksys, sys_tasksys), /* 70 */
10851 SOLXY(__NR_getpagesizes, sys_getpagesizes), /* 73 */
10852 SOLXY(__NR_lwp_park, sys_lwp_park), /* 77 */
10853 SOLXY(__NR_sendfilev, sys_sendfilev), /* 78 */
10854 #if defined(SOLARIS_LWP_NAME_SYSCALL)
10855 SOLXY(__NR_lwp_name, sys_lwp_name), /* 79 */
10856 #endif /* SOLARIS_LWP_NAME_SYSCALL */
10857 #if defined(SOLARIS_OLD_SYSCALLS)
10858 GENX_(__NR_rmdir, sys_rmdir), /* 79 */
10859 GENX_(__NR_mkdir, sys_mkdir), /* 80 */
10860 #endif /* SOLARIS_OLD_SYSCALLS */
10861 GENXY(__NR_getdents, sys_getdents), /* 81 */
10862 SOLXY(__NR_privsys, sys_privsys), /* 82 */
10863 SOLXY(__NR_ucredsys, sys_ucredsys), /* 83 */
10864 SOLXY(__NR_sysfs, sys_sysfs), /* 84 */
10865 SOLXY(__NR_getmsg, sys_getmsg), /* 85 */
10866 SOLX_(__NR_putmsg, sys_putmsg), /* 86 */
10867 #if defined(SOLARIS_OLD_SYSCALLS)
10868 SOLXY(__NR_lstat, sys_lstat), /* 88 */
10869 GENX_(__NR_symlink, sys_symlink), /* 89 */
10870 GENX_(__NR_readlink, sys_readlink), /* 90 */
10871 #endif /* SOLARIS_OLD_SYSCALLS */
10872 GENX_(__NR_setgroups, sys_setgroups), /* 91 */
10873 GENXY(__NR_getgroups, sys_getgroups), /* 92 */
10874 #if defined(SOLARIS_OLD_SYSCALLS)
10875 GENX_(__NR_fchmod, sys_fchmod), /* 93 */
10876 GENX_(__NR_fchown, sys_fchown), /* 94 */
10877 #endif /* SOLARIS_OLD_SYSCALLS */
10878 SOLXY(__NR_sigprocmask, sys_sigprocmask), /* 95 */
10879 SOLX_(__NR_sigsuspend, sys_sigsuspend), /* 96 */
10880 GENXY(__NR_sigaltstack, sys_sigaltstack), /* 97 */
10881 SOLXY(__NR_sigaction, sys_sigaction), /* 98 */
10882 SOLXY(__NR_sigpending, sys_sigpending), /* 99 */
10883 SOLX_(__NR_context, sys_getsetcontext), /* 100 */
10884 SOLX_(__NR_fchmodat, sys_fchmodat), /* 101 */
10885 SOLX_(__NR_mkdirat, sys_mkdirat), /* 102 */
10886 SOLXY(__NR_statvfs, sys_statvfs), /* 103 */
10887 SOLXY(__NR_fstatvfs, sys_fstatvfs), /* 104 */
10888 SOLXY(__NR_nfssys, sys_nfssys), /* 106 */
10889 SOLXY(__NR_waitid, sys_waitid), /* 107 */
10890 SOLX_(__NR_sigsendsys, sys_sigsendsys), /* 108 */
10891 #if defined(SOLARIS_UTIMESYS_SYSCALL)
10892 SOLX_(__NR_utimesys, sys_utimesys), /* 110 */
10893 #endif /* SOLARIS_UTIMESYS_SYSCALL */
10894 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
10895 SOLX_(__NR_utimensat, sys_utimensat), /* 110 */
10896 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
10897 SOLXY(__NR_sigresend, sys_sigresend), /* 111 */
10898 SOLXY(__NR_priocntlsys, sys_priocntlsys), /* 112 */
10899 SOLX_(__NR_pathconf, sys_pathconf), /* 113 */
10900 SOLX_(__NR_mmap, sys_mmap), /* 115 */
10901 GENXY(__NR_mprotect, sys_mprotect), /* 116 */
10902 GENXY(__NR_munmap, sys_munmap), /* 117 */
10903 GENX_(__NR_fchdir, sys_fchdir), /* 120 */
10904 GENXY(__NR_readv, sys_readv), /* 121 */
10905 GENX_(__NR_writev, sys_writev), /* 122 */
10906 #if defined(SOLARIS_UUIDSYS_SYSCALL)
10907 SOLXY(__NR_uuidsys, sys_uuidsys), /* 124 */
10908 #endif /* SOLARIS_UUIDSYS_SYSCALL */
10909 SOLX_(__NR_mmapobj, sys_mmapobj), /* 127 */
10910 GENX_(__NR_setrlimit, sys_setrlimit), /* 128 */
10911 GENXY(__NR_getrlimit, sys_getrlimit), /* 129 */
10912 #if defined(SOLARIS_OLD_SYSCALLS)
10913 GENX_(__NR_lchown, sys_lchown), /* 130 */
10914 #endif /* SOLARIS_OLD_SYSCALLS */
10915 SOLX_(__NR_memcntl, sys_memcntl), /* 131 */
10916 SOLXY(__NR_getpmsg, sys_getpmsg), /* 132 */
10917 SOLX_(__NR_putpmsg, sys_putpmsg), /* 133 */
10918 #if defined(SOLARIS_OLD_SYSCALLS)
10919 SOLX_(__NR_rename, sys_rename), /* 134 */
10920 #endif /* SOLARIS_OLD_SYSCALLS */
10921 SOLXY(__NR_uname, sys_uname), /* 135 */
10922 SOLX_(__NR_setegid, sys_setegid), /* 136 */
10923 SOLX_(__NR_sysconfig, sys_sysconfig), /* 137 */
10924 SOLXY(__NR_systeminfo, sys_systeminfo), /* 139 */
10925 SOLX_(__NR_seteuid, sys_seteuid), /* 141 */
10926 SOLX_(__NR_forksys, sys_forksys), /* 142 */
10927 #if defined(SOLARIS_GETRANDOM_SYSCALL)
10928 SOLXY(__NR_getrandom, sys_getrandom), /* 143 */
10929 #endif /* SOLARIS_GETRANDOM_SYSCALL */
10930 SOLXY(__NR_sigtimedwait, sys_sigtimedwait), /* 144 */
10931 SOLX_(__NR_yield, sys_yield), /* 146 */
10932 SOLXY(__NR_lwp_sema_post, sys_lwp_sema_post), /* 148 */
10933 SOLXY(__NR_lwp_sema_trywait, sys_lwp_sema_trywait), /* 149 */
10934 SOLX_(__NR_lwp_detach, sys_lwp_detach), /* 150 */
10935 SOLXY(__NR_modctl, sys_modctl), /* 152 */
10936 SOLX_(__NR_fchroot, sys_fchroot), /* 153 */
10937 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
10938 SOLX_(__NR_system_stats, sys_system_stats), /* 154 */
10939 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
10940 SOLXY(__NR_gettimeofday, sys_gettimeofday), /* 156 */
10941 GENXY(__NR_getitimer, sys_getitimer), /* 157 */
10942 GENXY(__NR_setitimer, sys_setitimer), /* 158 */
10943 SOLX_(__NR_lwp_create, sys_lwp_create), /* 159 */
10944 SOLX_(__NR_lwp_exit, sys_lwp_exit), /* 160 */
10945 SOLX_(__NR_lwp_suspend, sys_lwp_suspend), /* 161 */
10946 SOLX_(__NR_lwp_continue, sys_lwp_continue), /* 162 */
10947 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
10948 SOLXY(__NR_lwp_sigqueue, sys_lwp_sigqueue), /* 163 */
10949 #else
10950 SOLXY(__NR_lwp_kill, sys_lwp_kill), /* 163 */
10951 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
10952 SOLX_(__NR_lwp_self, sys_lwp_self), /* 164 */
10953 SOLX_(__NR_lwp_sigmask, sys_lwp_sigmask), /* 165 */
10954 SOLX_(__NR_lwp_private, sys_lwp_private), /* 166 */
10955 SOLXY(__NR_lwp_wait, sys_lwp_wait), /* 167 */
10956 SOLXY(__NR_lwp_mutex_wakeup, sys_lwp_mutex_wakeup), /* 168 */
10957 SOLXY(__NR_lwp_cond_wait, sys_lwp_cond_wait), /* 170 */
10958 SOLXY(__NR_lwp_cond_signal, sys_lwp_cond_signal), /* 171 */
10959 SOLX_(__NR_lwp_cond_broadcast, sys_lwp_cond_broadcast), /* 172 */
10960 SOLXY(__NR_pread, sys_pread), /* 173 */
10961 SOLX_(__NR_pwrite, sys_pwrite), /* 174 */
10962 #if defined(VGP_x86_solaris)
10963 PLAX_(__NR_llseek, sys_llseek32), /* 175 */
10964 #endif /* VGP_x86_solaris */
10965 SOLXY(__NR_lgrpsys, sys_lgrpsys), /* 180 */
10966 SOLXY(__NR_rusagesys, sys_rusagesys), /* 181 */
10967 SOLXY(__NR_port, sys_port), /* 182 */
10968 SOLXY(__NR_pollsys, sys_pollsys), /* 183 */
10969 SOLXY(__NR_labelsys, sys_labelsys), /* 184 */
10970 SOLXY(__NR_acl, sys_acl), /* 185 */
10971 SOLXY(__NR_auditsys, sys_auditsys), /* 186 */
10972 SOLX_(__NR_p_online, sys_p_online), /* 189 */
10973 SOLX_(__NR_sigqueue, sys_sigqueue), /* 190 */
10974 SOLXY(__NR_clock_gettime, sys_clock_gettime), /* 191 */
10975 SOLX_(__NR_clock_settime, sys_clock_settime), /* 192 */
10976 SOLXY(__NR_clock_getres, sys_clock_getres), /* 193 */
10977 SOLXY(__NR_timer_create, sys_timer_create), /* 194 */
10978 SOLX_(__NR_timer_delete, sys_timer_delete), /* 195 */
10979 SOLXY(__NR_timer_settime, sys_timer_settime), /* 196 */
10980 SOLXY(__NR_timer_gettime, sys_timer_gettime), /* 197 */
10981 SOLX_(__NR_timer_getoverrun, sys_timer_getoverrun), /* 198 */
10982 GENXY(__NR_nanosleep, sys_nanosleep), /* 199 */
10983 SOLXY(__NR_facl, sys_facl), /* 200 */
10984 SOLXY(__NR_door, sys_door), /* 201 */
10985 GENX_(__NR_setreuid, sys_setreuid), /* 202 */
10986 GENX_(__NR_setregid, sys_setregid), /* 202 */
10987 SOLXY(__NR_schedctl, sys_schedctl), /* 206 */
10988 SOLXY(__NR_pset, sys_pset), /* 207 */
10989 SOLXY(__NR_resolvepath, sys_resolvepath), /* 209 */
10990 SOLXY(__NR_lwp_mutex_timedlock, sys_lwp_mutex_timedlock), /* 210 */
10991 SOLXY(__NR_lwp_sema_timedwait, sys_lwp_sema_timedwait), /* 211 */
10992 SOLXY(__NR_lwp_rwlock_sys, sys_lwp_rwlock_sys), /* 212 */
10993 #if defined(VGP_x86_solaris)
10994 GENXY(__NR_getdents64, sys_getdents64), /* 213 */
10995 PLAX_(__NR_mmap64, sys_mmap64), /* 214 */
10996 #if defined(SOLARIS_OLD_SYSCALLS)
10997 PLAXY(__NR_stat64, sys_stat64), /* 215 */
10998 PLAXY(__NR_lstat64, sys_lstat64), /* 216 */
10999 PLAXY(__NR_fstat64, sys_fstat64), /* 217 */
11000 #endif /* SOLARIS_OLD_SYSCALLS */
11001 PLAXY(__NR_statvfs64, sys_statvfs64), /* 218 */
11002 PLAXY(__NR_fstatvfs64, sys_fstatvfs64), /* 219 */
11003 #endif /* VGP_x86_solaris */
11004 #if defined(VGP_x86_solaris)
11005 PLAX_(__NR_setrlimit64, sys_setrlimit64), /* 220 */
11006 PLAXY(__NR_getrlimit64, sys_getrlimit64), /* 221 */
11007 PLAXY(__NR_pread64, sys_pread64), /* 222 */
11008 PLAX_(__NR_pwrite64, sys_pwrite64), /* 223 */
11009 #if defined(SOLARIS_OLD_SYSCALLS)
11010 PLAXY(__NR_open64, sys_open64), /* 225 */
11011 #endif /* SOLARIS_OLD_SYSCALLS */
11012 #endif /* VGP_x86_solaris */
11013 SOLXY(__NR_zone, sys_zone), /* 227 */
11014 SOLXY(__NR_getcwd, sys_getcwd), /* 229 */
11015 SOLXY(__NR_so_socket, sys_so_socket), /* 230 */
11016 SOLXY(__NR_so_socketpair, sys_so_socketpair), /* 231 */
11017 SOLX_(__NR_bind, sys_bind), /* 232 */
11018 SOLX_(__NR_listen, sys_listen), /* 233 */
11019 SOLXY(__NR_accept, sys_accept), /* 234 */
11020 SOLX_(__NR_connect, sys_connect), /* 235 */
11021 SOLX_(__NR_shutdown, sys_shutdown), /* 236 */
11022 SOLXY(__NR_recv, sys_recv), /* 237 */
11023 SOLXY(__NR_recvfrom, sys_recvfrom), /* 238 */
11024 SOLXY(__NR_recvmsg, sys_recvmsg), /* 239 */
11025 SOLX_(__NR_send, sys_send), /* 240 */
11026 SOLX_(__NR_sendmsg, sys_sendmsg), /* 241 */
11027 SOLX_(__NR_sendto, sys_sendto), /* 242 */
11028 SOLXY(__NR_getpeername, sys_getpeername), /* 243 */
11029 SOLXY(__NR_getsockname, sys_getsockname), /* 244 */
11030 SOLXY(__NR_getsockopt, sys_getsockopt), /* 245 */
11031 SOLX_(__NR_setsockopt, sys_setsockopt), /* 246 */
11032 SOLX_(__NR_lwp_mutex_register, sys_lwp_mutex_register), /* 252 */
11033 SOLXY(__NR_uucopy, sys_uucopy), /* 254 */
11034 SOLX_(__NR_umount2, sys_umount2) /* 255 */
11037 static SyscallTableEntry fasttrap_table[] = {
11038 SOLX_(__NR_gethrtime, fast_gethrtime), /* 3 */
11039 SOLX_(__NR_gethrvtime, fast_gethrvtime), /* 4 */
11040 SOLX_(__NR_gethrestime, fast_gethrestime), /* 5 */
11041 SOLX_(__NR_getlgrp, fast_getlgrp) /* 6 */
11042 #if defined(SOLARIS_GETHRT_FASTTRAP)
11044 SOLXY(__NR_gethrt, fast_gethrt) /* 7 */
11045 #endif /* SOLARIS_GETHRT_FASTTRAP */
11046 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
11048 SOLXY(__NR_getzoneoffset, fast_getzoneoffset) /* 8 */
11049 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
11053 SyscallTableEntry *ML_(get_solaris_syscall_entry)(UInt sysno)
11055 const UInt syscall_table_size
11056 = sizeof(syscall_table) / sizeof(syscall_table[0]);
11057 const UInt fasttrap_table_size
11058 = sizeof(fasttrap_table) / sizeof(fasttrap_table[0]);
11060 SyscallTableEntry *table;
11061 Int size;
11063 switch (VG_SOLARIS_SYSNO_CLASS(sysno)) {
11064 case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
11065 table = syscall_table;
11066 size = syscall_table_size;
11067 break;
11068 case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
11069 table = fasttrap_table;
11070 size = fasttrap_table_size;
11071 break;
11072 default:
11073 vg_assert(0);
11074 break;
11076 sysno = VG_SOLARIS_SYSNO_INDEX(sysno);
11077 if (sysno < size) {
11078 SyscallTableEntry *sys = &table[sysno];
11079 if (!sys->before)
11080 return NULL; /* no entry */
11081 return sys;
11084 /* Can't find a wrapper. */
11085 return NULL;
11088 #endif // defined(VGO_solaris)
11090 /*--------------------------------------------------------------------*/
11091 /*--- end ---*/
11092 /*--------------------------------------------------------------------*/