FreeBSD: add file descriptor tracking for _umtx_op
[valgrind.git] / coregrind / m_syswrap / syswrap-amd64-darwin.c
blob6227352eee920c554601f503eb139f204eb90ee7
2 /*--------------------------------------------------------------------*/
3 /*--- Darwin-specific syscalls, etc. syswrap-amd64-darwin.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2005-2017 Apple Inc.
11 Greg Parker gparker@apple.com
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #if defined(VGP_amd64_darwin)
31 #include "config.h" // DARWIN_VERS
32 #include "pub_core_basics.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_threadstate.h"
35 #include "pub_core_aspacemgr.h"
36 #include "pub_core_xarray.h"
37 #include "pub_core_clientstate.h"
38 #include "pub_core_debuglog.h"
39 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
40 #include "pub_core_transtab.h" // VG_(discard_translations)
41 #include "pub_core_libcbase.h"
42 #include "pub_core_libcassert.h"
43 #include "pub_core_libcfile.h"
44 #include "pub_core_libcprint.h"
45 #include "pub_core_libcproc.h"
46 #include "pub_core_libcsignal.h"
47 #include "pub_core_mallocfree.h"
48 #include "pub_core_options.h"
49 #include "pub_core_scheduler.h"
50 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
51 #include "pub_core_signals.h"
52 #include "pub_core_syscall.h"
53 #include "pub_core_syswrap.h"
54 #include "pub_core_tooliface.h"
56 #include "priv_types_n_macros.h"
57 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
58 #include "priv_syswrap-darwin.h" /* for decls of darwin-ish wrappers */
59 #include "priv_syswrap-main.h"
62 #include <mach/mach.h>
64 static void x86_thread_state64_from_vex(x86_thread_state64_t *mach,
65 VexGuestAMD64State *vex)
67 mach->__rax = vex->guest_RAX;
68 mach->__rbx = vex->guest_RBX;
69 mach->__rcx = vex->guest_RCX;
70 mach->__rdx = vex->guest_RDX;
71 mach->__rdi = vex->guest_RDI;
72 mach->__rsi = vex->guest_RSI;
73 mach->__rbp = vex->guest_RBP;
74 mach->__rsp = vex->guest_RSP;
75 mach->__rflags = LibVEX_GuestAMD64_get_rflags(vex);
76 mach->__rip = vex->guest_RIP;
77 mach->__r8 = vex->guest_R8;
78 mach->__r9 = vex->guest_R9;
79 mach->__r10 = vex->guest_R10;
80 mach->__r11 = vex->guest_R11;
81 mach->__r12 = vex->guest_R12;
82 mach->__r13 = vex->guest_R13;
83 mach->__r14 = vex->guest_R14;
84 mach->__r15 = vex->guest_R15;
85 /* GrP fixme
86 mach->__cs = vex->guest_CS;
87 mach->__fs = vex->guest_FS;
88 mach->__gs = vex->guest_GS;
93 static void x86_float_state64_from_vex(x86_float_state64_t *mach,
94 VexGuestAMD64State *vex)
96 // DDD: #warning GrP fixme fp state
97 // JRS: what about the YMMHI bits? Are they important?
98 VG_(memcpy)(&mach->__fpu_xmm0, &vex->guest_YMM0, sizeof(mach->__fpu_xmm0));
99 VG_(memcpy)(&mach->__fpu_xmm1, &vex->guest_YMM1, sizeof(mach->__fpu_xmm1));
100 VG_(memcpy)(&mach->__fpu_xmm2, &vex->guest_YMM2, sizeof(mach->__fpu_xmm2));
101 VG_(memcpy)(&mach->__fpu_xmm3, &vex->guest_YMM3, sizeof(mach->__fpu_xmm3));
102 VG_(memcpy)(&mach->__fpu_xmm4, &vex->guest_YMM4, sizeof(mach->__fpu_xmm4));
103 VG_(memcpy)(&mach->__fpu_xmm5, &vex->guest_YMM5, sizeof(mach->__fpu_xmm5));
104 VG_(memcpy)(&mach->__fpu_xmm6, &vex->guest_YMM6, sizeof(mach->__fpu_xmm6));
105 VG_(memcpy)(&mach->__fpu_xmm7, &vex->guest_YMM7, sizeof(mach->__fpu_xmm7));
106 VG_(memcpy)(&mach->__fpu_xmm8, &vex->guest_YMM8, sizeof(mach->__fpu_xmm8));
107 VG_(memcpy)(&mach->__fpu_xmm9, &vex->guest_YMM9, sizeof(mach->__fpu_xmm9));
108 VG_(memcpy)(&mach->__fpu_xmm10, &vex->guest_YMM10, sizeof(mach->__fpu_xmm10));
109 VG_(memcpy)(&mach->__fpu_xmm11, &vex->guest_YMM11, sizeof(mach->__fpu_xmm11));
110 VG_(memcpy)(&mach->__fpu_xmm12, &vex->guest_YMM12, sizeof(mach->__fpu_xmm12));
111 VG_(memcpy)(&mach->__fpu_xmm13, &vex->guest_YMM13, sizeof(mach->__fpu_xmm13));
112 VG_(memcpy)(&mach->__fpu_xmm14, &vex->guest_YMM14, sizeof(mach->__fpu_xmm14));
113 VG_(memcpy)(&mach->__fpu_xmm15, &vex->guest_YMM15, sizeof(mach->__fpu_xmm15));
117 void thread_state_from_vex(thread_state_t mach_generic,
118 thread_state_flavor_t flavor,
119 mach_msg_type_number_t count,
120 VexGuestArchState *vex_generic)
122 VexGuestAMD64State *vex = (VexGuestAMD64State *)vex_generic;
124 switch (flavor) {
125 case x86_THREAD_STATE64:
126 vg_assert(count == x86_THREAD_STATE64_COUNT);
127 x86_thread_state64_from_vex((x86_thread_state64_t *)mach_generic, vex);
128 break;
130 case x86_FLOAT_STATE64:
131 vg_assert(count == x86_FLOAT_STATE64_COUNT);
132 x86_float_state64_from_vex((x86_float_state64_t *)mach_generic, vex);
133 break;
135 case x86_THREAD_STATE:
136 ((x86_float_state_t *)mach_generic)->fsh.flavor = flavor;
137 ((x86_float_state_t *)mach_generic)->fsh.count = count;
138 x86_thread_state64_from_vex(&((x86_thread_state_t *)mach_generic)->uts.ts64, vex);
139 break;
141 case x86_FLOAT_STATE:
142 ((x86_float_state_t *)mach_generic)->fsh.flavor = flavor;
143 ((x86_float_state_t *)mach_generic)->fsh.count = count;
144 x86_float_state64_from_vex(&((x86_float_state_t *)mach_generic)->ufs.fs64, vex);
145 break;
147 case x86_EXCEPTION_STATE:
148 VG_(printf)("thread_state_from_vex: TODO, want exception state\n");
149 vg_assert(0);
151 default:
152 VG_(printf)("thread_state_from_vex: flavor:%#x\n", flavor);
153 vg_assert(0);
158 static void x86_thread_state64_to_vex(const x86_thread_state64_t *mach,
159 VexGuestAMD64State *vex)
161 LibVEX_GuestAMD64_initialise(vex);
162 vex->guest_RAX = mach->__rax;
163 vex->guest_RBX = mach->__rbx;
164 vex->guest_RCX = mach->__rcx;
165 vex->guest_RDX = mach->__rdx;
166 vex->guest_RDI = mach->__rdi;
167 vex->guest_RSI = mach->__rsi;
168 vex->guest_RBP = mach->__rbp;
169 vex->guest_RSP = mach->__rsp;
170 // DDD: #warning GrP fixme eflags
171 vex->guest_RIP = mach->__rip;
172 vex->guest_R8 = mach->__r8;
173 vex->guest_R9 = mach->__r9;
174 vex->guest_R10 = mach->__r10;
175 vex->guest_R11 = mach->__r11;
176 vex->guest_R12 = mach->__r12;
177 vex->guest_R13 = mach->__r13;
178 vex->guest_R14 = mach->__r14;
179 vex->guest_R15 = mach->__r15;
180 /* GrP fixme
181 vex->guest_CS = mach->__cs;
182 vex->guest_FS = mach->__fs;
183 vex->guest_GS = mach->__gs;
187 static void x86_float_state64_to_vex(const x86_float_state64_t *mach,
188 VexGuestAMD64State *vex)
190 // DDD: #warning GrP fixme fp state
191 // JRS: what about the YMMHI bits? Are they important?
192 VG_(memcpy)(&vex->guest_YMM0, &mach->__fpu_xmm0, sizeof(mach->__fpu_xmm0));
193 VG_(memcpy)(&vex->guest_YMM1, &mach->__fpu_xmm1, sizeof(mach->__fpu_xmm1));
194 VG_(memcpy)(&vex->guest_YMM2, &mach->__fpu_xmm2, sizeof(mach->__fpu_xmm2));
195 VG_(memcpy)(&vex->guest_YMM3, &mach->__fpu_xmm3, sizeof(mach->__fpu_xmm3));
196 VG_(memcpy)(&vex->guest_YMM4, &mach->__fpu_xmm4, sizeof(mach->__fpu_xmm4));
197 VG_(memcpy)(&vex->guest_YMM5, &mach->__fpu_xmm5, sizeof(mach->__fpu_xmm5));
198 VG_(memcpy)(&vex->guest_YMM6, &mach->__fpu_xmm6, sizeof(mach->__fpu_xmm6));
199 VG_(memcpy)(&vex->guest_YMM7, &mach->__fpu_xmm7, sizeof(mach->__fpu_xmm7));
200 VG_(memcpy)(&vex->guest_YMM8, &mach->__fpu_xmm8, sizeof(mach->__fpu_xmm8));
201 VG_(memcpy)(&vex->guest_YMM9, &mach->__fpu_xmm9, sizeof(mach->__fpu_xmm9));
202 VG_(memcpy)(&vex->guest_YMM10, &mach->__fpu_xmm10, sizeof(mach->__fpu_xmm10));
203 VG_(memcpy)(&vex->guest_YMM11, &mach->__fpu_xmm11, sizeof(mach->__fpu_xmm11));
204 VG_(memcpy)(&vex->guest_YMM12, &mach->__fpu_xmm12, sizeof(mach->__fpu_xmm12));
205 VG_(memcpy)(&vex->guest_YMM13, &mach->__fpu_xmm13, sizeof(mach->__fpu_xmm13));
206 VG_(memcpy)(&vex->guest_YMM14, &mach->__fpu_xmm14, sizeof(mach->__fpu_xmm14));
207 VG_(memcpy)(&vex->guest_YMM15, &mach->__fpu_xmm15, sizeof(mach->__fpu_xmm15));
211 void thread_state_to_vex(const thread_state_t mach_generic,
212 thread_state_flavor_t flavor,
213 mach_msg_type_number_t count,
214 VexGuestArchState *vex_generic)
216 VexGuestAMD64State *vex = (VexGuestAMD64State *)vex_generic;
218 switch(flavor) {
219 case x86_THREAD_STATE64:
220 vg_assert(count == x86_THREAD_STATE64_COUNT);
221 x86_thread_state64_to_vex((const x86_thread_state64_t*)mach_generic,vex);
222 break;
223 case x86_FLOAT_STATE64:
224 vg_assert(count == x86_FLOAT_STATE64_COUNT);
225 x86_float_state64_to_vex((const x86_float_state64_t*)mach_generic,vex);
226 break;
228 default:
229 vg_assert(0);
230 break;
235 ThreadState *build_thread(const thread_state_t state,
236 thread_state_flavor_t flavor,
237 mach_msg_type_number_t count)
239 ThreadId tid = VG_(alloc_ThreadState)();
240 ThreadState *tst = VG_(get_ThreadState)(tid);
242 vg_assert(flavor == x86_THREAD_STATE64);
243 vg_assert(count == x86_THREAD_STATE64_COUNT);
245 // Initialize machine registers
247 thread_state_to_vex(state, flavor, count, &tst->arch.vex);
249 I_die_here;
250 // GrP fixme signals, sig_mask, tmp_sig_mask, os_state.parent
252 find_stack_segment(tid, tst->arch.vex.guest_RSP);
254 return tst;
258 // Edit the thread state to send to the real kernel.
259 // The real thread will run start_thread_NORETURN(tst)
260 // on a separate non-client stack.
261 void hijack_thread_state(thread_state_t mach_generic,
262 thread_state_flavor_t flavor,
263 mach_msg_type_number_t count,
264 ThreadState *tst)
266 x86_thread_state64_t *mach = (x86_thread_state64_t *)mach_generic;
267 char *stack;
269 vg_assert(flavor == x86_THREAD_STATE64);
270 vg_assert(count == x86_THREAD_STATE64_COUNT);
272 stack = (char *)allocstack(tst->tid);
273 stack -= 64+320; // make room for top frame
274 memset(stack, 0, 64+320); // ...and clear it
275 *(uintptr_t *)stack = 0; // push fake return address
277 mach->__rdi = (uintptr_t)tst; // arg1 = tst
278 mach->__rip = (uintptr_t)&start_thread_NORETURN;
279 mach->__rsp = (uintptr_t)stack;
283 /* Call f(arg1), but first switch stacks, using 'stack' as the new
284 stack, and use 'retaddr' as f's return-to address. Also, clear all
285 the integer registers before entering f.*/
286 __attribute__((noreturn))
287 void call_on_new_stack_0_1 ( Addr stack,
288 Addr retaddr,
289 void (*f)(Word),
290 Word arg1 );
291 // %rdi == stack (must be 16-byte aligned)
292 // %rsi == retaddr
293 // %rdx == f
294 // %rcx == arg1
295 asm(
296 ".globl _call_on_new_stack_0_1\n"
297 "_call_on_new_stack_0_1:\n"
298 " movq %rsp, %rbp\n" // remember old stack pointer
299 " movq %rdi, %rsp\n" // set new stack
300 " movq %rcx, %rdi\n" // set arg1
301 " pushq %rsi\n" // retaddr to new stack
302 " pushq %rdx\n" // f to new stack
303 " movq $0, %rax\n" // zero all other GP regs
304 " movq $0, %rbx\n"
305 " movq $0, %rcx\n"
306 " movq $0, %rdx\n"
307 " movq $0, %rsi\n"
308 " movq $0, %rbp\n"
309 " movq $0, %r8\n"
310 " movq $0, %r9\n"
311 " movq $0, %r10\n"
312 " movq $0, %r11\n"
313 " movq $0, %r12\n"
314 " movq $0, %r13\n"
315 " movq $0, %r14\n"
316 " movq $0, %r15\n"
317 " ret\n" // jump to f
318 " ud2\n" // should never get here
321 asm(
322 ".globl _pthread_hijack_asm\n"
323 "_pthread_hijack_asm:\n"
324 " movq %rsp,%rbp\n"
325 " push $0\n" // alignment pad
326 " push %rbp\n" // original sp
327 // other values stay where they are in registers
328 " push $0\n" // fake return address
329 " jmp _pthread_hijack\n"
334 void pthread_hijack(Addr self, Addr kport, Addr func, Addr func_arg,
335 Addr stacksize, Addr flags, Addr sp)
337 vki_sigset_t blockall;
338 ThreadState *tst = (ThreadState *)func_arg;
339 VexGuestAMD64State *vex = &tst->arch.vex;
341 // VG_(printf)("pthread_hijack pthread %p, machthread %p, func %p, arg %p, stack %p, flags %p, stack %p\n", self, kport, func, func_arg, stacksize, flags, sp);
343 // Wait for parent thread's permission.
344 // The parent thread holds V's lock on our behalf.
345 semaphore_wait(tst->os_state.child_go);
347 /* Start the thread with all signals blocked. VG_(scheduler) will
348 set the mask correctly when we finally get there. */
349 VG_(sigfillset)(&blockall);
350 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
352 // Set thread's registers
353 // Do this FIRST because some code below tries to collect a backtrace,
354 // which requires valid register data.
355 LibVEX_GuestAMD64_initialise(vex);
356 vex->guest_RIP = pthread_starter;
357 vex->guest_RDI = self;
358 vex->guest_RSI = kport;
359 vex->guest_RDX = func;
360 vex->guest_RCX = tst->os_state.func_arg;
361 vex->guest_R8 = stacksize;
362 vex->guest_R9 = flags;
363 vex->guest_RSP = sp;
364 #if DARWIN_VERS >= DARWIN_10_12
365 vex->guest_GS_CONST = self + pthread_tsd_offset;
366 #endif
368 // Record thread's stack and Mach port and pthread struct
369 tst->os_state.pthread = self;
370 tst->os_state.lwpid = kport;
371 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "thread-%p");
373 if ((flags & 0x01000000) == 0) {
374 // kernel allocated stack - needs mapping
375 Addr stack = VG_PGROUNDUP(sp) - stacksize;
376 tst->client_stack_highest_byte = stack+stacksize-1;
377 tst->client_stack_szB = stacksize;
379 // pthread structure
380 ML_(notify_core_and_tool_of_mmap)(
381 stack+stacksize, pthread_structsize,
382 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
383 // stack contents
384 ML_(notify_core_and_tool_of_mmap)(
385 stack, stacksize,
386 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
387 // guard page
388 ML_(notify_core_and_tool_of_mmap)(
389 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
390 0, VKI_MAP_PRIVATE, -1, 0);
391 } else {
392 // client allocated stack
393 find_stack_segment(tst->tid, sp);
395 ML_(sync_mappings)("after", "pthread_hijack", 0);
397 // DDD: should this be here rather than in POST(sys_bsdthread_create)?
398 // But we don't have ptid here...
399 //VG_TRACK ( pre_thread_ll_create, ptid, tst->tid );
401 // Tell parent thread's POST(sys_bsdthread_create) that we're done
402 // initializing registers and mapping memory.
403 semaphore_signal(tst->os_state.child_done);
404 // LOCK IS GONE BELOW THIS POINT
406 // Go!
407 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
408 start_thread_NORETURN, (Word)tst);
410 /*NOTREACHED*/
411 vg_assert(0);
416 asm(
417 ".globl _wqthread_hijack_asm\n"
418 "_wqthread_hijack_asm:\n"
419 " movq %rsp,%r9\n" // original sp
420 // other values stay where they are in registers
421 " push $0\n" // fake return address
422 " jmp _wqthread_hijack\n"
426 /* wqthread note: The kernel may create or destroy pthreads in the
427 wqthread pool at any time with no userspace interaction,
428 and wqthread_start may be entered at any time with no userspace
429 interaction.
430 To handle this in valgrind, we create and destroy a valgrind
431 thread for every work item.
433 void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem,
434 Int reuse, Addr sp)
436 ThreadState *tst;
437 VexGuestAMD64State *vex;
438 Addr stack;
439 SizeT stacksize;
440 vki_sigset_t blockall;
442 /* When we enter here we hold no lock (!), so we better acquire it
443 pronto. Why do we hold no lock? Because (presumably) the only
444 way to get here is as a result of a SfMayBlock syscall
445 "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the
446 lock. At least that's clear for the 'reuse' case. The
447 non-reuse case? Dunno, perhaps it's a new thread the kernel
448 pulled out of a hat. In any case we still need to take a
449 lock. */
450 VG_(acquire_BigLock_LL)("wqthread_hijack");
452 if (0) VG_(printf)(
453 "wqthread_hijack: self %#lx, kport %#lx, "
454 "stackaddr %#lx, workitem %#lx, reuse/flags %x, sp %#lx\n",
455 self, kport, stackaddr, workitem, (UInt)reuse, sp);
457 /* Start the thread with all signals blocked. VG_(scheduler) will
458 set the mask correctly when we finally get there. */
459 VG_(sigfillset)(&blockall);
460 VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);
462 /* For 10.7 and earlier, |reuse| appeared to be used as a simple
463 boolean. In 10.8 and later its name changed to |flags| and has
464 various other bits OR-d into it too, so it's necessary to fish
465 out just the relevant parts. Hence: */
466 # if DARWIN_VERS <= DARWIN_10_7
467 Bool is_reuse = reuse != 0;
468 # elif DARWIN_VERS > DARWIN_10_7
469 Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0;
470 # else
471 # error "Unsupported Darwin version"
472 # endif
474 if (is_reuse) {
476 /* For whatever reason, tst->os_state.pthread appear to have a
477 constant offset of 96 on 10.7, but zero on 10.6 and 10.5. No
478 idea why. */
479 # if DARWIN_VERS <= DARWIN_10_6
480 UWord magic_delta = 0;
481 # elif DARWIN_VERS == DARWIN_10_7 || DARWIN_VERS == DARWIN_10_8
482 UWord magic_delta = 0x60;
483 # elif DARWIN_VERS == DARWIN_10_9 \
484 || DARWIN_VERS == DARWIN_10_10 \
485 || DARWIN_VERS == DARWIN_10_11 \
486 || DARWIN_VERS == DARWIN_10_12 \
487 || DARWIN_VERS == DARWIN_10_13
488 UWord magic_delta = 0xE0;
489 # else
490 # error "magic_delta: to be computed on new OS version"
491 // magic_delta = tst->os_state.pthread - self
492 # endif
494 // This thread already exists; we're merely re-entering
495 // after leaving via workq_ops(WQOPS_THREAD_RETURN).
496 // Don't allocate any V thread resources.
497 // Do reset thread registers.
498 ThreadId tid = VG_(lwpid_to_vgtid)(kport);
499 vg_assert(VG_(is_valid_tid)(tid));
500 vg_assert(mach_thread_self() == kport);
502 tst = VG_(get_ThreadState)(tid);
504 if (0) VG_(printf)("wqthread_hijack reuse %s: tid %u, tst %p, "
505 "tst->os_state.pthread %#lx, self %#lx\n",
506 tst->os_state.pthread == self ? "SAME" : "DIFF",
507 tid, (void *)tst, tst->os_state.pthread, self);
509 vex = &tst->arch.vex;
510 vg_assert(tst->os_state.pthread - magic_delta == self);
512 else {
513 // This is a new thread.
514 tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)());
515 vex = &tst->arch.vex;
516 allocstack(tst->tid);
517 LibVEX_GuestAMD64_initialise(vex);
520 // Set thread's registers
521 // Do this FIRST because some code below tries to collect a backtrace,
522 // which requires valid register data.
523 vex->guest_RIP = wqthread_starter;
524 vex->guest_RDI = self;
525 vex->guest_RSI = kport;
526 vex->guest_RDX = stackaddr;
527 vex->guest_RCX = workitem;
528 vex->guest_R8 = reuse;
529 vex->guest_R9 = 0;
530 vex->guest_RSP = sp;
532 stacksize = 512*1024; // wq stacks are always DEFAULT_STACK_SIZE
533 stack = VG_PGROUNDUP(sp) - stacksize;
535 if (is_reuse) {
536 // Continue V's thread back in the scheduler.
537 // The client thread is of course in another location entirely.
539 /* Drop the lock before going into
540 ML_(wqthread_continue_NORETURN). The latter will immediately
541 attempt to reacquire it in non-LL mode, which is a bit
542 wasteful but I don't think is harmful. A better solution
543 would be to not drop the lock but instead "upgrade" it from a
544 LL lock to a full lock, but that's too much like hard work
545 right now. */
546 VG_(release_BigLock_LL)("wqthread_hijack(1)");
547 ML_(wqthread_continue_NORETURN)(tst->tid);
549 else {
550 // Record thread's stack and Mach port and pthread struct
551 tst->os_state.pthread = self;
552 tst->os_state.lwpid = kport;
553 record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p");
555 // kernel allocated stack - needs mapping
556 tst->client_stack_highest_byte = stack+stacksize-1;
557 tst->client_stack_szB = stacksize;
559 // GrP fixme scheduler lock?!
561 // pthread structure
562 ML_(notify_core_and_tool_of_mmap)(
563 stack+stacksize, pthread_structsize,
564 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
565 // stack contents
566 // GrP fixme uninitialized!
567 ML_(notify_core_and_tool_of_mmap)(
568 stack, stacksize,
569 VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
570 // guard page
571 // GrP fixme ban_mem_stack!
572 ML_(notify_core_and_tool_of_mmap)(
573 stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
574 0, VKI_MAP_PRIVATE, -1, 0);
576 ML_(sync_mappings)("after", "wqthread_hijack", 0);
578 // Go!
579 /* Same comments as the 'release' in the then-clause.
580 start_thread_NORETURN calls run_a_thread_NORETURN calls
581 thread_wrapper which acquires the lock before continuing.
582 Let's hope nothing non-thread-local happens until that point.
584 DDD: I think this is plain wrong .. if we get to
585 thread_wrapper not holding the lock, and someone has recycled
586 this thread slot in the meantime, we're hosed. Is that
587 possible, though? */
588 VG_(release_BigLock_LL)("wqthread_hijack(2)");
589 call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0,
590 start_thread_NORETURN, (Word)tst);
593 /*NOTREACHED*/
594 vg_assert(0);
597 #endif // defined(VGP_amd64_darwin)
599 /*--------------------------------------------------------------------*/
600 /*--- end ---*/
601 /*--------------------------------------------------------------------*/