drd: Add a consistency check
[valgrind.git] / coregrind / m_syswrap / syswrap-amd64-darwin.c
blobb1790bb4767aa914228e24249b4975eee2df6606
2 /*--------------------------------------------------------------------*/
3 /*--- Darwin-specific syscalls, etc. syswrap-amd64-darwin.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2005-2013 Apple Inc.
11 Greg Parker gparker@apple.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 #if defined(VGP_amd64_darwin)
33 #include "config.h" // DARWIN_VERS
34 #include "pub_core_basics.h"
35 #include "pub_core_vki.h"
36 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_xarray.h"
40 #include "pub_core_clientstate.h"
41 #include "pub_core_debuglog.h"
42 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
43 #include "pub_core_transtab.h" // VG_(discard_translations)
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_options.h"
52 #include "pub_core_scheduler.h"
53 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_tooliface.h"
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
61 #include "priv_syswrap-darwin.h" /* for decls of darwin-ish wrappers */
62 #include "priv_syswrap-main.h"
65 #include <mach/mach.h>
67 static void x86_thread_state64_from_vex(x86_thread_state64_t *mach,
68 VexGuestAMD64State *vex)
70 mach->__rax = vex->guest_RAX;
71 mach->__rbx = vex->guest_RBX;
72 mach->__rcx = vex->guest_RCX;
73 mach->__rdx = vex->guest_RDX;
74 mach->__rdi = vex->guest_RDI;
75 mach->__rsi = vex->guest_RSI;
76 mach->__rbp = vex->guest_RBP;
77 mach->__rsp = vex->guest_RSP;
78 mach->__rflags = LibVEX_GuestAMD64_get_rflags(vex);
79 mach->__rip = vex->guest_RIP;
80 mach->__r8 = vex->guest_R8;
81 mach->__r9 = vex->guest_R9;
82 mach->__r10 = vex->guest_R10;
83 mach->__r11 = vex->guest_R11;
84 mach->__r12 = vex->guest_R12;
85 mach->__r13 = vex->guest_R13;
86 mach->__r14 = vex->guest_R14;
87 mach->__r15 = vex->guest_R15;
88 /* GrP fixme
89 mach->__cs = vex->guest_CS;
90 mach->__fs = vex->guest_FS;
91 mach->__gs = vex->guest_GS;
96 static void x86_float_state64_from_vex(x86_float_state64_t *mach,
97 VexGuestAMD64State *vex)
99 // DDD: #warning GrP fixme fp state
100 // JRS: what about the YMMHI bits? Are they important?
101 VG_(memcpy)(&mach->__fpu_xmm0, &vex->guest_YMM0, sizeof(mach->__fpu_xmm0));
102 VG_(memcpy)(&mach->__fpu_xmm1, &vex->guest_YMM1, sizeof(mach->__fpu_xmm1));
103 VG_(memcpy)(&mach->__fpu_xmm2, &vex->guest_YMM2, sizeof(mach->__fpu_xmm2));
104 VG_(memcpy)(&mach->__fpu_xmm3, &vex->guest_YMM3, sizeof(mach->__fpu_xmm3));
105 VG_(memcpy)(&mach->__fpu_xmm4, &vex->guest_YMM4, sizeof(mach->__fpu_xmm4));
106 VG_(memcpy)(&mach->__fpu_xmm5, &vex->guest_YMM5, sizeof(mach->__fpu_xmm5));
107 VG_(memcpy)(&mach->__fpu_xmm6, &vex->guest_YMM6, sizeof(mach->__fpu_xmm6));
108 VG_(memcpy)(&mach->__fpu_xmm7, &vex->guest_YMM7, sizeof(mach->__fpu_xmm7));
109 VG_(memcpy)(&mach->__fpu_xmm8, &vex->guest_YMM8, sizeof(mach->__fpu_xmm8));
110 VG_(memcpy)(&mach->__fpu_xmm9, &vex->guest_YMM9, sizeof(mach->__fpu_xmm9));
111 VG_(memcpy)(&mach->__fpu_xmm10, &vex->guest_YMM10, sizeof(mach->__fpu_xmm10));
112 VG_(memcpy)(&mach->__fpu_xmm11, &vex->guest_YMM11, sizeof(mach->__fpu_xmm11));
113 VG_(memcpy)(&mach->__fpu_xmm12, &vex->guest_YMM12, sizeof(mach->__fpu_xmm12));
114 VG_(memcpy)(&mach->__fpu_xmm13, &vex->guest_YMM13, sizeof(mach->__fpu_xmm13));
115 VG_(memcpy)(&mach->__fpu_xmm14, &vex->guest_YMM14, sizeof(mach->__fpu_xmm14));
116 VG_(memcpy)(&mach->__fpu_xmm15, &vex->guest_YMM15, sizeof(mach->__fpu_xmm15));
120 void thread_state_from_vex(thread_state_t mach_generic,
121 thread_state_flavor_t flavor,
122 mach_msg_type_number_t count,
123 VexGuestArchState *vex_generic)
125 VexGuestAMD64State *vex = (VexGuestAMD64State *)vex_generic;
127 switch (flavor) {
128 case x86_THREAD_STATE64:
129 vg_assert(count == x86_THREAD_STATE64_COUNT);
130 x86_thread_state64_from_vex((x86_thread_state64_t *)mach_generic, vex);
131 break;
133 case x86_FLOAT_STATE64:
134 vg_assert(count == x86_FLOAT_STATE64_COUNT);
135 x86_float_state64_from_vex((x86_float_state64_t *)mach_generic, vex);
136 break;
138 case x86_THREAD_STATE:
139 ((x86_float_state_t *)mach_generic)->fsh.flavor = flavor;
140 ((x86_float_state_t *)mach_generic)->fsh.count = count;
141 x86_thread_state64_from_vex(&((x86_thread_state_t *)mach_generic)->uts.ts64, vex);
142 break;
144 case x86_FLOAT_STATE:
145 ((x86_float_state_t *)mach_generic)->fsh.flavor = flavor;
146 ((x86_float_state_t *)mach_generic)->fsh.count = count;
147 x86_float_state64_from_vex(&((x86_float_state_t *)mach_generic)->ufs.fs64, vex);
148 break;
150 case x86_EXCEPTION_STATE:
151 VG_(printf)("thread_state_from_vex: TODO, want exception state\n");
152 vg_assert(0);
154 default:
155 VG_(printf)("thread_state_from_vex: flavor:%#x\n", flavor);
156 vg_assert(0);
161 static void x86_thread_state64_to_vex(const x86_thread_state64_t *mach,
162 VexGuestAMD64State *vex)
164 LibVEX_GuestAMD64_initialise(vex);
165 vex->guest_RAX = mach->__rax;
166 vex->guest_RBX = mach->__rbx;
167 vex->guest_RCX = mach->__rcx;
168 vex->guest_RDX = mach->__rdx;
169 vex->guest_RDI = mach->__rdi;
170 vex->guest_RSI = mach->__rsi;
171 vex->guest_RBP = mach->__rbp;
172 vex->guest_RSP = mach->__rsp;
173 // DDD: #warning GrP fixme eflags
174 vex->guest_RIP = mach->__rip;
175 vex->guest_R8 = mach->__r8;
176 vex->guest_R9 = mach->__r9;
177 vex->guest_R10 = mach->__r10;
178 vex->guest_R11 = mach->__r11;
179 vex->guest_R12 = mach->__r12;
180 vex->guest_R13 = mach->__r13;
181 vex->guest_R14 = mach->__r14;
182 vex->guest_R15 = mach->__r15;
183 /* GrP fixme
184 vex->guest_CS = mach->__cs;
185 vex->guest_FS = mach->__fs;
186 vex->guest_GS = mach->__gs;
190 static void x86_float_state64_to_vex(const x86_float_state64_t *mach,
191 VexGuestAMD64State *vex)
193 // DDD: #warning GrP fixme fp state
194 // JRS: what about the YMMHI bits? Are they important?
195 VG_(memcpy)(&vex->guest_YMM0, &mach->__fpu_xmm0, sizeof(mach->__fpu_xmm0));
196 VG_(memcpy)(&vex->guest_YMM1, &mach->__fpu_xmm1, sizeof(mach->__fpu_xmm1));
197 VG_(memcpy)(&vex->guest_YMM2, &mach->__fpu_xmm2, sizeof(mach->__fpu_xmm2));
198 VG_(memcpy)(&vex->guest_YMM3, &mach->__fpu_xmm3, sizeof(mach->__fpu_xmm3));
199 VG_(memcpy)(&vex->guest_YMM4, &mach->__fpu_xmm4, sizeof(mach->__fpu_xmm4));
200 VG_(memcpy)(&vex->guest_YMM5, &mach->__fpu_xmm5, sizeof(mach->__fpu_xmm5));
201 VG_(memcpy)(&vex->guest_YMM6, &mach->__fpu_xmm6, sizeof(mach->__fpu_xmm6));
202 VG_(memcpy)(&vex->guest_YMM7, &mach->__fpu_xmm7, sizeof(mach->__fpu_xmm7));
203 VG_(memcpy)(&vex->guest_YMM8, &mach->__fpu_xmm8, sizeof(mach->__fpu_xmm8));
204 VG_(memcpy)(&vex->guest_YMM9, &mach->__fpu_xmm9, sizeof(mach->__fpu_xmm9));
205 VG_(memcpy)(&vex->guest_YMM10, &mach->__fpu_xmm10, sizeof(mach->__fpu_xmm10));
206 VG_(memcpy)(&vex->guest_YMM11, &mach->__fpu_xmm11, sizeof(mach->__fpu_xmm11));
207 VG_(memcpy)(&vex->guest_YMM12, &mach->__fpu_xmm12, sizeof(mach->__fpu_xmm12));
208 VG_(memcpy)(&vex->guest_YMM13, &mach->__fpu_xmm13, sizeof(mach->__fpu_xmm13));
209 VG_(memcpy)(&vex->guest_YMM14, &mach->__fpu_xmm14, sizeof(mach->__fpu_xmm14));
210 VG_(memcpy)(&vex->guest_YMM15, &mach->__fpu_xmm15, sizeof(mach->__fpu_xmm15));
214 void thread_state_to_vex(const thread_state_t mach_generic,
215 thread_state_flavor_t flavor,
216 mach_msg_type_number_t count,
217 VexGuestArchState *vex_generic)
219 VexGuestAMD64State *vex = (VexGuestAMD64State *)vex_generic;
221 switch(flavor) {
222 case x86_THREAD_STATE64:
223 vg_assert(count == x86_THREAD_STATE64_COUNT);
224 x86_thread_state64_to_vex((const x86_thread_state64_t*)mach_generic,vex);
225 break;
226 case x86_FLOAT_STATE64:
227 vg_assert(count == x86_FLOAT_STATE64_COUNT);
228 x86_float_state64_to_vex((const x86_float_state64_t*)mach_generic,vex);
229 break;
231 default:
232 vg_assert(0);
233 break;
238 ThreadState *build_thread(const thread_state_t state,
239 thread_state_flavor_t flavor,
240 mach_msg_type_number_t count)
242 ThreadId tid = VG_(alloc_ThreadState)();
243 ThreadState *tst = VG_(get_ThreadState)(tid);
245 vg_assert(flavor == x86_THREAD_STATE64);
246 vg_assert(count == x86_THREAD_STATE64_COUNT);
248 // Initialize machine registers
250 thread_state_to_vex(state, flavor, count, &tst->arch.vex);
252 I_die_here;
253 // GrP fixme signals, sig_mask, tmp_sig_mask, os_state.parent
255 find_stack_segment(tid, tst->arch.vex.guest_RSP);
257 return tst;
261 // Edit the thread state to send to the real kernel.
262 // The real thread will run start_thread_NORETURN(tst)
263 // on a separate non-client stack.
264 void hijack_thread_state(thread_state_t mach_generic,
265 thread_state_flavor_t flavor,
266 mach_msg_type_number_t count,
267 ThreadState *tst)
269 x86_thread_state64_t *mach = (x86_thread_state64_t *)mach_generic;
270 char *stack;
272 vg_assert(flavor == x86_THREAD_STATE64);
273 vg_assert(count == x86_THREAD_STATE64_COUNT);
275 stack = (char *)allocstack(tst->tid);
276 stack -= 64+320; // make room for top frame
277 memset(stack, 0, 64+320); // ...and clear it
278 *(uintptr_t *)stack = 0; // push fake return address
280 mach->__rdi = (uintptr_t)tst; // arg1 = tst
281 mach->__rip = (uintptr_t)&start_thread_NORETURN;
282 mach->__rsp = (uintptr_t)stack;
286 /* Call f(arg1), but first switch stacks, using 'stack' as the new
287 stack, and use 'retaddr' as f's return-to address. Also, clear all
288 the integer registers before entering f.*/
289 __attribute__((noreturn))
290 void call_on_new_stack_0_1 ( Addr stack,
291 Addr retaddr,
292 void (*f)(Word),
293 Word arg1 );
294 // %rdi == stack (must be 16-byte aligned)
295 // %rsi == retaddr
296 // %rdx == f
297 // %rcx == arg1
298 asm(
299 ".globl _call_on_new_stack_0_1\n"
300 "_call_on_new_stack_0_1:\n"
301 " movq %rsp, %rbp\n" // remember old stack pointer
302 " movq %rdi, %rsp\n" // set new stack
303 " movq %rcx, %rdi\n" // set arg1
304 " pushq %rsi\n" // retaddr to new stack
305 " pushq %rdx\n" // f to new stack
306 " movq $0, %rax\n" // zero all other GP regs
307 " movq $0, %rbx\n"
308 " movq $0, %rcx\n"
309 " movq $0, %rdx\n"
310 " movq $0, %rsi\n"
311 " movq $0, %rbp\n"
312 " movq $0, %r8\n"
313 " movq $0, %r9\n"
314 " movq $0, %r10\n"
315 " movq $0, %r11\n"
316 " movq $0, %r12\n"
317 " movq $0, %r13\n"
318 " movq $0, %r14\n"
319 " movq $0, %r15\n"
320 " ret\n" // jump to f
321 " ud2\n" // should never get here
324 asm(
325 ".globl _pthread_hijack_asm\n"
326 "_pthread_hijack_asm:\n"
327 " movq %rsp,%rbp\n"
328 " push $0\n" // alignment pad
329 " push %rbp\n" // original sp
330 // other values stay where they are in registers
331 " push $0\n" // fake return address
332 " jmp _pthread_hijack\n"
337 void pthread_hijack(Addr self, Addr kport, Addr func, Addr func_arg,
338 Addr stacksize, Addr flags, Addr sp)
340 vki_sigset_t blockall;
341 ThreadState *tst = (ThreadState *)func_arg;
342 VexGuestAMD64State *vex = &tst->arch.vex;
344 // VG_(printf)("pthread_hijack pthread %p, machthread %p, func %p, arg %p, stack %p, flags %p, stack %p\n", self, kport, func, func_arg, stacksize, flags, sp);
346 // Wait for parent thread's permission.
347 // The parent thread holds V's lock on our behalf.
348 semaphore_wait(tst->os_state.child_go);
350 /* Start the thread with all signals blocked. VG_(scheduler) will
351 set the mask correctly when we finally get there. */
352 VG_(sigfillset)(&blockall);
353 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
355 // Set thread's registers
356 // Do this FIRST because some code below tries to collect a backtrace,
357 // which requires valid register data.
358 LibVEX_GuestAMD64_initialise(vex);
359 vex->guest_RIP = pthread_starter;
360 vex->guest_RDI = self;
361 vex->guest_RSI = kport;
362 vex->guest_RDX = func;
363 vex->guest_RCX = tst->os_state.func_arg;
364 vex->guest_R8 = stacksize;
365 vex->guest_R9 = flags;
366 vex->guest_RSP = sp;
368 // Record thread's stack and Mach port and pthread struct
369 tst->os_state.pthread = self;
370 tst->os_state.lwpid = kport;
371 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "thread-%p");
373 if ((flags & 0x01000000) == 0) {
374 // kernel allocated stack - needs mapping
375 Addr stack = VG_PGROUNDUP(sp) - stacksize;
376 tst->client_stack_highest_byte = stack+stacksize-1;
377 tst->client_stack_szB = stacksize;
379 // pthread structure
380 ML_(notify_core_and_tool_of_mmap)(
381 stack+stacksize, pthread_structsize,
382 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
383 // stack contents
384 ML_(notify_core_and_tool_of_mmap)(
385 stack, stacksize,
386 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
387 // guard page
388 ML_(notify_core_and_tool_of_mmap)(
389 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
390 0, VKI_MAP_PRIVATE, -1, 0);
391 } else {
392 // client allocated stack
393 find_stack_segment(tst->tid, sp);
395 ML_(sync_mappings)("after", "pthread_hijack", 0);
397 // DDD: should this be here rather than in POST(sys_bsdthread_create)?
398 // But we don't have ptid here...
399 //VG_TRACK ( pre_thread_ll_create, ptid, tst->tid );
401 // Tell parent thread's POST(sys_bsdthread_create) that we're done
402 // initializing registers and mapping memory.
403 semaphore_signal(tst->os_state.child_done);
404 // LOCK IS GONE BELOW THIS POINT
406 // Go!
407 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
408 start_thread_NORETURN, (Word)tst);
410 /*NOTREACHED*/
411 vg_assert(0);
416 asm(
417 ".globl _wqthread_hijack_asm\n"
418 "_wqthread_hijack_asm:\n"
419 " movq %rsp,%r9\n" // original sp
420 // other values stay where they are in registers
421 " push $0\n" // fake return address
422 " jmp _wqthread_hijack\n"
426 /* wqthread note: The kernel may create or destroy pthreads in the
427 wqthread pool at any time with no userspace interaction,
428 and wqthread_start may be entered at any time with no userspace
429 interaction.
430 To handle this in valgrind, we create and destroy a valgrind
431 thread for every work item.
433 void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem,
434 Int reuse, Addr sp)
436 ThreadState *tst;
437 VexGuestAMD64State *vex;
438 Addr stack;
439 SizeT stacksize;
440 vki_sigset_t blockall;
442 /* When we enter here we hold no lock (!), so we better acquire it
443 pronto. Why do we hold no lock? Because (presumably) the only
444 way to get here is as a result of a SfMayBlock syscall
445 "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the
446 lock. At least that's clear for the 'reuse' case. The
447 non-reuse case? Dunno, perhaps it's a new thread the kernel
448 pulled out of a hat. In any case we still need to take a
449 lock. */
450 VG_(acquire_BigLock_LL)("wqthread_hijack");
452 if (0) VG_(printf)(
453 "wqthread_hijack: self %#lx, kport %#lx, "
454 "stackaddr %#lx, workitem %#lx, reuse/flags %x, sp %#lx\n",
455 self, kport, stackaddr, workitem, reuse, sp);
457 /* Start the thread with all signals blocked. VG_(scheduler) will
458 set the mask correctly when we finally get there. */
459 VG_(sigfillset)(&blockall);
460 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
462 /* For 10.7 and earlier, |reuse| appeared to be used as a simple
463 boolean. In 10.8 and later its name changed to |flags| and has
464 various other bits OR-d into it too, so it's necessary to fish
465 out just the relevant parts. Hence: */
466 # if DARWIN_VERS <= DARWIN_10_7
467 Bool is_reuse = reuse != 0;
468 # elif DARWIN_VERS == DARWIN_10_8 || DARWIN_VERS == DARWIN_10_9
469 Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0;
470 # elif DARWIN_VERS == DARWIN_10_10
471 // XXX FIXME is this correct?
472 Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0;
473 # else
474 # error "Unsupported Darwin version"
475 # endif
477 if (is_reuse) {
479 /* For whatever reason, tst->os_state.pthread appear to have a
480 constant offset of 96 on 10.7, but zero on 10.6 and 10.5. No
481 idea why. */
482 # if DARWIN_VERS <= DARWIN_10_6
483 UWord magic_delta = 0;
484 # elif DARWIN_VERS == DARWIN_10_7 || DARWIN_VERS == DARWIN_10_8
485 UWord magic_delta = 0x60;
486 # elif DARWIN_VERS == DARWIN_10_9 || DARWIN_VERS == DARWIN_10_10
487 UWord magic_delta = 0xE0;
488 # else
489 # error "magic_delta: to be computed on new OS version"
490 // magic_delta = tst->os_state.pthread - self
491 # endif
493 // This thread already exists; we're merely re-entering
494 // after leaving via workq_ops(WQOPS_THREAD_RETURN).
495 // Don't allocate any V thread resources.
496 // Do reset thread registers.
497 ThreadId tid = VG_(lwpid_to_vgtid)(kport);
498 vg_assert(VG_(is_valid_tid)(tid));
499 vg_assert(mach_thread_self() == kport);
501 tst = VG_(get_ThreadState)(tid);
503 if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, "
504 "tst->os_state.pthread %#lx\n",
505 tst->os_state.pthread == self ? "SAME" : "DIFF",
506 tid, tst, tst->os_state.pthread);
508 vex = &tst->arch.vex;
509 vg_assert(tst->os_state.pthread - magic_delta == self);
511 else {
512 // This is a new thread.
513 tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)());
514 vex = &tst->arch.vex;
515 allocstack(tst->tid);
516 LibVEX_GuestAMD64_initialise(vex);
519 // Set thread's registers
520 // Do this FIRST because some code below tries to collect a backtrace,
521 // which requires valid register data.
522 vex->guest_RIP = wqthread_starter;
523 vex->guest_RDI = self;
524 vex->guest_RSI = kport;
525 vex->guest_RDX = stackaddr;
526 vex->guest_RCX = workitem;
527 vex->guest_R8 = reuse;
528 vex->guest_R9 = 0;
529 vex->guest_RSP = sp;
531 stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE
532 stack = VG_PGROUNDUP(sp) - stacksize;
534 if (is_reuse) {
535 // Continue V's thread back in the scheduler.
536 // The client thread is of course in another location entirely.
538 /* Drop the lock before going into
539 ML_(wqthread_continue_NORETURN). The latter will immediately
540 attempt to reacquire it in non-LL mode, which is a bit
541 wasteful but I don't think is harmful. A better solution
542 would be to not drop the lock but instead "upgrade" it from a
543 LL lock to a full lock, but that's too much like hard work
544 right now. */
545 VG_(release_BigLock_LL)("wqthread_hijack(1)");
546 ML_(wqthread_continue_NORETURN)(tst->tid);
548 else {
549 // Record thread's stack and Mach port and pthread struct
550 tst->os_state.pthread = self;
551 tst->os_state.lwpid = kport;
552 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p");
554 // kernel allocated stack - needs mapping
555 tst->client_stack_highest_byte = stack+stacksize-1;
556 tst->client_stack_szB = stacksize;
558 // GrP fixme scheduler lock?!
560 // pthread structure
561 ML_(notify_core_and_tool_of_mmap)(
562 stack+stacksize, pthread_structsize,
563 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
564 // stack contents
565 // GrP fixme uninitialized!
566 ML_(notify_core_and_tool_of_mmap)(
567 stack, stacksize,
568 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
569 // guard page
570 // GrP fixme ban_mem_stack!
571 ML_(notify_core_and_tool_of_mmap)(
572 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
573 0, VKI_MAP_PRIVATE, -1, 0);
575 ML_(sync_mappings)("after", "wqthread_hijack", 0);
577 // Go!
578 /* Same comments as the 'release' in the then-clause.
579 start_thread_NORETURN calls run_thread_NORETURN calls
580 thread_wrapper which acquires the lock before continuing.
581 Let's hope nothing non-thread-local happens until that point.
583 DDD: I think this is plain wrong .. if we get to
584 thread_wrapper not holding the lock, and someone has recycled
585 this thread slot in the meantime, we're hosed. Is that
586 possible, though? */
587 VG_(release_BigLock_LL)("wqthread_hijack(2)");
588 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
589 start_thread_NORETURN, (Word)tst);
592 /*NOTREACHED*/
593 vg_assert(0);
596 #endif // defined(VGP_amd64_darwin)
598 /*--------------------------------------------------------------------*/
599 /*--- end ---*/
600 /*--------------------------------------------------------------------*/