2 /*--------------------------------------------------------------------*/
3 /*--- Thread scheduling. scheduler.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2017 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
32 Valgrind tries to emulate the kernel's threading as closely as
33 possible. The client does all threading via the normal syscalls
34 (on Linux: clone, etc). Valgrind emulates this by creating exactly
35 the same process structure as would be created without Valgrind.
36 There are no extra threads.
38 The main difference is that Valgrind only allows one client thread
39 to run at once. This is controlled with the CPU Big Lock,
40 "the_BigLock". Any time a thread wants to run client code or
41 manipulate any shared state (which is anything other than its own
42 ThreadState entry), it must hold the_BigLock.
44 When a thread is about to block in a blocking syscall, it releases
45 the_BigLock, and re-takes it when it becomes runnable again (either
46 because the syscall finished, or we took a signal).
48 VG_(scheduler) therefore runs in each thread. It returns only when
49 the thread is exiting, either because it exited itself, or it was
50 told to exit by another thread.
52 This file is almost entirely OS-independent. The details of how
53 the OS handles threading and signalling are abstracted away and
54 implemented elsewhere. [Some of the functions have worked their
55 way back for the moment, until we do an OS port in earnest...]
59 #include "pub_core_basics.h"
60 #include "pub_core_debuglog.h"
61 #include "pub_core_vki.h"
62 #include "pub_core_vkiscnums.h" // __NR_sched_yield
63 #include "pub_core_threadstate.h"
64 #include "pub_core_clientstate.h"
65 #include "pub_core_aspacemgr.h"
66 #include "pub_core_clreq.h" // for VG_USERREQ__*
67 #include "pub_core_dispatch.h"
68 #include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
69 #include "pub_core_extension.h"
70 #include "pub_core_gdbserver.h" // for VG_(gdbserver)/VG_(gdbserver_activity)
71 #include "pub_core_libcbase.h"
72 #include "pub_core_libcassert.h"
73 #include "pub_core_libcprint.h"
74 #include "pub_core_libcproc.h"
75 #include "pub_core_libcsignal.h"
76 #if defined(VGO_darwin)
77 #include "pub_core_mach.h"
79 #include "pub_core_machine.h"
80 #include "pub_core_mallocfree.h"
81 #include "pub_core_options.h"
82 #include "pub_core_replacemalloc.h"
83 #include "pub_core_sbprofile.h"
84 #include "pub_core_signals.h"
85 #include "pub_core_stacks.h"
86 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
87 #include "pub_core_syscall.h"
88 #include "pub_core_syswrap.h"
89 #include "pub_core_tooliface.h"
90 #include "pub_core_translate.h" // For VG_(translate)()
91 #include "pub_core_transtab.h"
92 #include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
93 #include "priv_sched-lock.h"
94 #include "pub_core_scheduler.h" // self
95 #include "pub_core_redir.h"
96 #include "libvex_emnote.h" // VexEmNote
99 /* ---------------------------------------------------------------------
100 Types and globals for the scheduler.
101 ------------------------------------------------------------------ */
103 /* ThreadId and ThreadState are defined elsewhere*/
105 /* If False, a fault is Valgrind-internal (ie, a bug) */
106 Bool
VG_(in_generated_code
) = False
;
108 /* 64-bit counter for the number of basic blocks done. */
109 static ULong bbs_done
= 0;
111 /* Counter to see if vgdb activity is to be verified.
112 When nr of bbs done reaches vgdb_next_poll, scheduler will
113 poll for gdbserver activity. VG_(force_vgdb_poll) and
114 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
115 to control when the next poll will be done. */
116 static ULong vgdb_next_poll
;
119 static void do_client_request ( ThreadId tid
);
120 static void scheduler_sanity ( ThreadId tid
);
121 static void mostly_clear_thread_record ( ThreadId tid
);
124 static ULong n_scheduling_events_MINOR
= 0;
125 static ULong n_scheduling_events_MAJOR
= 0;
127 /* Stats: number of XIndirs looked up in the fast cache, the number of hits in
128 ways 1, 2 and 3, and the number of misses. The number of hits in way 0 isn't
129 recorded because it can be computed from these five numbers. */
130 static ULong stats__n_xIndirs
= 0;
131 static ULong stats__n_xIndir_hits1
= 0;
132 static ULong stats__n_xIndir_hits2
= 0;
133 static ULong stats__n_xIndir_hits3
= 0;
134 static ULong stats__n_xIndir_misses
= 0;
136 /* And 32-bit temp bins for the above, so that 32-bit platforms don't
137 have to do 64 bit incs on the hot path through
138 VG_(disp_cp_xindir). */
139 /*global*/ UInt
VG_(stats__n_xIndirs_32
) = 0;
140 /*global*/ UInt
VG_(stats__n_xIndir_hits1_32
) = 0;
141 /*global*/ UInt
VG_(stats__n_xIndir_hits2_32
) = 0;
142 /*global*/ UInt
VG_(stats__n_xIndir_hits3_32
) = 0;
143 /*global*/ UInt
VG_(stats__n_xIndir_misses_32
) = 0;
145 /* Sanity checking counts. */
146 static UInt sanity_fast_count
= 0;
147 static UInt sanity_slow_count
= 0;
149 void VG_(print_scheduler_stats
)(void)
151 VG_(message
)(Vg_DebugMsg
,
152 "scheduler: %'llu event checks.\n", bbs_done
);
155 = stats__n_xIndirs
- stats__n_xIndir_hits1
- stats__n_xIndir_hits2
156 - stats__n_xIndir_hits3
- stats__n_xIndir_misses
;
157 VG_(message
)(Vg_DebugMsg
,
158 "scheduler: %'llu indir transfers, "
159 "%'llu misses (1 in %llu) ..\n",
160 stats__n_xIndirs
, stats__n_xIndir_misses
,
161 stats__n_xIndirs
/ (stats__n_xIndir_misses
162 ? stats__n_xIndir_misses
: 1));
163 VG_(message
)(Vg_DebugMsg
,
164 "scheduler: .. of which: %'llu hit0, %'llu hit1, "
165 "%'llu hit2, %'llu hit3, %'llu missed\n",
167 stats__n_xIndir_hits1
,
168 stats__n_xIndir_hits2
,
169 stats__n_xIndir_hits3
,
170 stats__n_xIndir_misses
);
172 VG_(message
)(Vg_DebugMsg
,
173 "scheduler: %'llu/%'llu major/minor sched events.\n",
174 n_scheduling_events_MAJOR
, n_scheduling_events_MINOR
);
175 VG_(message
)(Vg_DebugMsg
,
176 " sanity: %u cheap, %u expensive checks.\n",
177 sanity_fast_count
, sanity_slow_count
);
181 * Mutual exclusion object used to serialize threads.
183 static struct sched_lock
*the_BigLock
;
186 /* ---------------------------------------------------------------------
187 Helper functions for the scheduler.
188 ------------------------------------------------------------------ */
190 static void maybe_progress_report ( UInt reporting_interval_seconds
)
192 /* This is when the next report is due, in user cpu milliseconds since
193 process start. This is a global variable so this won't be thread-safe
194 if Valgrind is ever made multithreaded. For now it's fine. */
195 static UInt next_report_due_at
= 0;
197 /* First of all, figure out whether another report is due. It
199 UInt user_ms
= VG_(get_user_milliseconds
)();
200 if (LIKELY(user_ms
< next_report_due_at
))
203 Bool first_ever_call
= next_report_due_at
== 0;
205 /* A report is due. First, though, set the time for the next report. */
206 next_report_due_at
+= 1000 * reporting_interval_seconds
;
208 /* If it's been an excessively long time since the last check, we
209 might have gone more than one reporting interval forward. Guard
211 while (next_report_due_at
<= user_ms
)
212 next_report_due_at
+= 1000 * reporting_interval_seconds
;
214 /* Also we don't want to report anything on the first call, but we
215 have to wait till this point to leave, so that we set up the
216 next-call time correctly. */
220 /* Print the report. */
221 UInt user_cpu_seconds
= user_ms
/ 1000;
222 UInt wallclock_seconds
= VG_(read_millisecond_timer
)() / 1000;
223 Double millionEvCs
= ((Double
)bbs_done
) / 1000000.0;
224 Double thousandTIns
= ((Double
)VG_(get_bbs_translated
)()) / 1000.0;
225 Double thousandTOuts
= ((Double
)VG_(get_bbs_discarded_or_dumped
)()) / 1000.0;
226 UInt nThreads
= VG_(count_living_threads
)();
228 if (VG_(clo_verbosity
) > 0) {
229 VG_(dmsg
)("PROGRESS: U %'us, W %'us, %.1f%% CPU, EvC %.2fM, "
230 "TIn %.1fk, TOut %.1fk, #thr %u\n",
231 user_cpu_seconds
, wallclock_seconds
,
233 * (Double
)(user_cpu_seconds
)
234 / (Double
)(wallclock_seconds
== 0 ? 1 : wallclock_seconds
),
236 thousandTIns
, thousandTOuts
, nThreads
);
241 void print_sched_event ( ThreadId tid
, const HChar
* what
)
243 VG_(message
)(Vg_DebugMsg
, " SCHED[%u]: %s\n", tid
, what
);
246 /* For showing SB profiles, if the user asks to see them. */
248 void maybe_show_sb_profile ( void )
250 /* DO NOT MAKE NON-STATIC */
251 static ULong bbs_done_lastcheck
= 0;
253 vg_assert(VG_(clo_profyle_interval
) > 0);
254 Long delta
= (Long
)(bbs_done
- bbs_done_lastcheck
);
255 vg_assert(delta
>= 0);
256 if ((ULong
)delta
>= VG_(clo_profyle_interval
)) {
257 bbs_done_lastcheck
= bbs_done
;
258 VG_(get_and_show_SB_profile
)(bbs_done
);
263 const HChar
* name_of_sched_event ( UInt event
)
266 case VEX_TRC_JMP_INVALICACHE
: return "INVALICACHE";
267 case VEX_TRC_JMP_FLUSHDCACHE
: return "FLUSHDCACHE";
268 case VEX_TRC_JMP_NOREDIR
: return "NOREDIR";
269 case VEX_TRC_JMP_SIGILL
: return "SIGILL";
270 case VEX_TRC_JMP_SIGTRAP
: return "SIGTRAP";
271 case VEX_TRC_JMP_SIGSEGV
: return "SIGSEGV";
272 case VEX_TRC_JMP_SIGBUS
: return "SIGBUS";
273 case VEX_TRC_JMP_SIGFPE_INTOVF
:
274 case VEX_TRC_JMP_SIGFPE_INTDIV
: return "SIGFPE";
275 case VEX_TRC_JMP_EMWARN
: return "EMWARN";
276 case VEX_TRC_JMP_EMFAIL
: return "EMFAIL";
277 case VEX_TRC_JMP_CLIENTREQ
: return "CLIENTREQ";
278 case VEX_TRC_JMP_YIELD
: return "YIELD";
279 case VEX_TRC_JMP_NODECODE
: return "NODECODE";
280 case VEX_TRC_JMP_MAPFAIL
: return "MAPFAIL";
281 case VEX_TRC_JMP_EXTENSION
: return "EXTENSION";
282 case VEX_TRC_JMP_SYS_SYSCALL
: return "SYSCALL";
283 case VEX_TRC_JMP_SYS_INT32
: return "INT32";
284 case VEX_TRC_JMP_SYS_INT128
: return "INT128";
285 case VEX_TRC_JMP_SYS_INT129
: return "INT129";
286 case VEX_TRC_JMP_SYS_INT130
: return "INT130";
287 case VEX_TRC_JMP_SYS_INT145
: return "INT145";
288 case VEX_TRC_JMP_SYS_INT210
: return "INT210";
289 case VEX_TRC_JMP_SYS_SYSENTER
: return "SYSENTER";
290 case VEX_TRC_JMP_BORING
: return "VEX_BORING";
292 case VG_TRC_BORING
: return "VG_BORING";
293 case VG_TRC_INNER_FASTMISS
: return "FASTMISS";
294 case VG_TRC_INNER_COUNTERZERO
: return "COUNTERZERO";
295 case VG_TRC_FAULT_SIGNAL
: return "FAULTSIGNAL";
296 case VG_TRC_INVARIANT_FAILED
: return "INVFAILED";
297 case VG_TRC_CHAIN_ME_TO_SLOW_EP
: return "CHAIN_ME_SLOW";
298 case VG_TRC_CHAIN_ME_TO_FAST_EP
: return "CHAIN_ME_FAST";
299 default: return "??UNKNOWN??";
303 /* Allocate a completely empty ThreadState record. */
304 ThreadId
VG_(alloc_ThreadState
) ( void )
307 for (i
= 1; i
< VG_N_THREADS
; i
++) {
308 if (VG_(threads
)[i
].status
== VgTs_Empty
) {
309 VG_(threads
)[i
].status
= VgTs_Init
;
310 VG_(threads
)[i
].exitreason
= VgSrc_None
;
311 if (VG_(threads
)[i
].thread_name
)
312 VG_(free
)(VG_(threads
)[i
].thread_name
);
313 VG_(threads
)[i
].thread_name
= NULL
;
317 VG_(printf
)("Use --max-threads=INT to specify a larger number of threads\n"
318 "and rerun valgrind\n");
319 VG_(core_panic
)("Max number of threads is too low");
324 Mark a thread as Runnable. This will block until the_BigLock is
325 available, so that we get exclusive access to all the shared
326 structures and the CPU. Up until we get the_BigLock, we must not
327 touch any shared state.
329 When this returns, we'll actually be running.
331 void VG_(acquire_BigLock
)(ThreadId tid
, const HChar
* who
)
336 if (VG_(clo_trace_sched
)) {
337 HChar buf
[VG_(strlen
)(who
) + 30];
338 VG_(sprintf
)(buf
, "waiting for lock (%s)", who
);
339 print_sched_event(tid
, buf
);
343 /* First, acquire the_BigLock. We can't do anything else safely
344 prior to this point. Even doing debug printing prior to this
345 point is, technically, wrong. */
346 VG_(acquire_BigLock_LL
)(NULL
);
348 tst
= VG_(get_ThreadState
)(tid
);
350 vg_assert(tst
->status
!= VgTs_Runnable
);
352 tst
->status
= VgTs_Runnable
;
354 if (VG_(running_tid
) != VG_INVALID_THREADID
)
355 VG_(printf
)("tid %u found %u running\n", tid
, VG_(running_tid
));
356 vg_assert(VG_(running_tid
) == VG_INVALID_THREADID
);
357 VG_(running_tid
) = tid
;
359 { Addr gsp
= VG_(get_SP
)(tid
);
360 if (NULL
!= VG_(tdict
).track_new_mem_stack_w_ECU
)
361 VG_(unknown_SP_update_w_ECU
)(gsp
, gsp
, 0/*unknown origin*/);
363 VG_(unknown_SP_update
)(gsp
, gsp
);
366 if (VG_(clo_trace_sched
)) {
367 HChar buf
[VG_(strlen
)(who
) + 30];
368 VG_(sprintf
)(buf
, " acquired lock (%s)", who
);
369 print_sched_event(tid
, buf
);
374 Set a thread into a sleeping state, and give up exclusive access to
375 the CPU. On return, the thread must be prepared to block until it
376 is ready to run again (generally this means blocking in a syscall,
377 but it may mean that we remain in a Runnable state and we're just
378 yielding the CPU to another thread).
380 void VG_(release_BigLock
)(ThreadId tid
, ThreadStatus sleepstate
,
383 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
385 vg_assert(tst
->status
== VgTs_Runnable
);
387 vg_assert(sleepstate
== VgTs_WaitSys
||
388 sleepstate
== VgTs_Yielding
);
390 tst
->status
= sleepstate
;
392 vg_assert(VG_(running_tid
) == tid
);
393 VG_(running_tid
) = VG_INVALID_THREADID
;
395 if (VG_(clo_trace_sched
)) {
396 const HChar
*status
= VG_(name_of_ThreadStatus
)(sleepstate
);
397 HChar buf
[VG_(strlen
)(who
) + VG_(strlen
)(status
) + 30];
398 VG_(sprintf
)(buf
, "releasing lock (%s) -> %s", who
, status
);
399 print_sched_event(tid
, buf
);
402 /* Release the_BigLock; this will reschedule any runnable
404 VG_(release_BigLock_LL
)(NULL
);
407 static void init_BigLock(void)
409 vg_assert(!the_BigLock
);
410 the_BigLock
= ML_(create_sched_lock
)();
413 static void deinit_BigLock(void)
415 ML_(destroy_sched_lock
)(the_BigLock
);
419 /* See pub_core_scheduler.h for description */
420 void VG_(acquire_BigLock_LL
) ( const HChar
* who
)
422 ML_(acquire_sched_lock
)(the_BigLock
);
425 /* See pub_core_scheduler.h for description */
426 void VG_(release_BigLock_LL
) ( const HChar
* who
)
428 ML_(release_sched_lock
)(the_BigLock
);
431 Bool
VG_(owns_BigLock_LL
) ( ThreadId tid
)
433 return (ML_(get_sched_lock_owner
)(the_BigLock
)
434 == VG_(threads
)[tid
].os_state
.lwpid
);
438 /* Clear out the ThreadState and release the semaphore. Leaves the
439 ThreadState in VgTs_Zombie state, so that it doesn't get
440 reallocated until the caller is really ready. */
441 void VG_(exit_thread
)(ThreadId tid
)
443 vg_assert(VG_(is_valid_tid
)(tid
));
444 vg_assert(VG_(is_running_thread
)(tid
));
445 vg_assert(VG_(is_exiting
)(tid
));
447 mostly_clear_thread_record(tid
);
448 VG_(running_tid
) = VG_INVALID_THREADID
;
450 /* There should still be a valid exitreason for this thread */
451 vg_assert(VG_(threads
)[tid
].exitreason
!= VgSrc_None
);
453 if (VG_(clo_trace_sched
))
454 print_sched_event(tid
, "release lock in VG_(exit_thread)");
456 VG_(release_BigLock_LL
)(NULL
);
459 /* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
460 out of the syscall and onto doing the next thing, whatever that is.
461 If it isn't blocked in a syscall, has no effect on the thread. */
462 void VG_(get_thread_out_of_syscall
)(ThreadId tid
)
464 vg_assert(VG_(is_valid_tid
)(tid
));
465 vg_assert(!VG_(is_running_thread
)(tid
));
467 if (VG_(threads
)[tid
].status
== VgTs_WaitSys
) {
468 if (VG_(clo_trace_signals
)) {
469 VG_(message
)(Vg_DebugMsg
,
470 "get_thread_out_of_syscall zaps tid %u lwp %d\n",
471 tid
, VG_(threads
)[tid
].os_state
.lwpid
);
473 # if defined(VGO_darwin)
475 // GrP fixme use mach primitives on darwin?
476 // GrP fixme thread_abort_safely?
477 // GrP fixme race for thread with WaitSys set but not in syscall yet?
478 extern kern_return_t
thread_abort(mach_port_t
);
479 thread_abort(VG_(threads
)[tid
].os_state
.lwpid
);
483 __attribute__((unused
))
484 Int r
= VG_(tkill
)(VG_(threads
)[tid
].os_state
.lwpid
, VG_SIGVGKILL
);
485 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
486 I'm really not sure. Here's a race scenario which argues
487 that we shoudn't; but equally I'm not sure the scenario is
488 even possible, because of constraints caused by the question
489 of who holds the BigLock when.
491 Target thread tid does sys_read on a socket and blocks. This
492 function gets called, and we observe correctly that tid's
493 status is WaitSys but then for whatever reason this function
494 goes very slowly for a while. Then data arrives from
495 wherever, tid's sys_read returns, tid exits. Then we do
496 tkill on tid, but tid no longer exists; tkill returns an
497 error code and the assert fails. */
498 /* vg_assert(r == 0); */
505 Yield the CPU for a short time to let some other thread run.
507 void VG_(vg_yield
)(void)
509 ThreadId tid
= VG_(running_tid
);
511 vg_assert(tid
!= VG_INVALID_THREADID
);
512 vg_assert(VG_(threads
)[tid
].os_state
.lwpid
== VG_(gettid
)());
514 VG_(release_BigLock
)(tid
, VgTs_Yielding
, "VG_(vg_yield)");
517 Tell the kernel we're yielding.
519 # if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
520 VG_(do_syscall0
)(__NR_sched_yield
);
521 # elif defined(VGO_solaris)
522 VG_(do_syscall0
)(__NR_yield
);
527 VG_(acquire_BigLock
)(tid
, "VG_(vg_yield)");
531 /* Set the standard set of blocked signals, used whenever we're not
532 running a client syscall. */
533 static void block_signals(void)
537 VG_(sigfillset
)(&mask
);
539 /* Don't block these because they're synchronous */
540 VG_(sigdelset
)(&mask
, VKI_SIGSEGV
);
541 VG_(sigdelset
)(&mask
, VKI_SIGBUS
);
542 VG_(sigdelset
)(&mask
, VKI_SIGFPE
);
543 VG_(sigdelset
)(&mask
, VKI_SIGILL
);
544 VG_(sigdelset
)(&mask
, VKI_SIGTRAP
);
545 VG_(sigdelset
)(&mask
, VKI_SIGSYS
);
547 /* Can't block these anyway */
548 VG_(sigdelset
)(&mask
, VKI_SIGSTOP
);
549 VG_(sigdelset
)(&mask
, VKI_SIGKILL
);
551 VG_(sigprocmask
)(VKI_SIG_SETMASK
, &mask
, NULL
);
554 static void os_state_clear(ThreadState
*tst
)
556 tst
->os_state
.lwpid
= 0;
557 tst
->os_state
.threadgroup
= 0;
558 tst
->os_state
.stk_id
= NULL_STK_ID
;
559 # if defined(VGO_linux)
560 /* no other fields to clear */
561 # elif defined(VGO_freebsd)
562 /* no other fields to clear */
563 # elif defined(VGO_darwin)
564 tst
->os_state
.post_mach_trap_fn
= NULL
;
565 tst
->os_state
.pthread
= 0;
566 tst
->os_state
.func_arg
= 0;
567 VG_(memset
)(&tst
->os_state
.child_go
, 0, sizeof(tst
->os_state
.child_go
));
568 VG_(memset
)(&tst
->os_state
.child_done
, 0, sizeof(tst
->os_state
.child_done
));
569 tst
->os_state
.wq_jmpbuf_valid
= False
;
570 tst
->os_state
.remote_port
= 0;
571 tst
->os_state
.msgh_id
= 0;
572 VG_(memset
)(&tst
->os_state
.mach_args
, 0, sizeof(tst
->os_state
.mach_args
));
573 # elif defined(VGO_solaris)
574 # if defined(VGP_x86_solaris)
575 tst
->os_state
.thrptr
= 0;
577 tst
->os_state
.ustack
= NULL
;
578 tst
->os_state
.in_door_return
= False
;
579 tst
->os_state
.door_return_procedure
= 0;
580 tst
->os_state
.oldcontext
= NULL
;
581 tst
->os_state
.schedctl_data
= 0;
582 tst
->os_state
.daemon_thread
= False
;
588 static void os_state_init(ThreadState
*tst
)
590 tst
->os_state
.valgrind_stack_base
= 0;
591 tst
->os_state
.valgrind_stack_init_SP
= 0;
596 void mostly_clear_thread_record ( ThreadId tid
)
598 vki_sigset_t savedmask
;
600 vg_assert(tid
< VG_N_THREADS
);
601 VG_(cleanup_thread
)(&VG_(threads
)[tid
].arch
);
602 VG_(threads
)[tid
].tid
= tid
;
604 /* Leave the thread in Zombie, so that it doesn't get reallocated
605 until the caller is finally done with the thread stack. */
606 VG_(threads
)[tid
].status
= VgTs_Zombie
;
608 VG_(sigemptyset
)(&VG_(threads
)[tid
].sig_mask
);
609 VG_(sigemptyset
)(&VG_(threads
)[tid
].tmp_sig_mask
);
611 os_state_clear(&VG_(threads
)[tid
]);
613 /* start with no altstack */
614 VG_(threads
)[tid
].altstack
.ss_sp
= (void *)0xdeadbeef;
615 VG_(threads
)[tid
].altstack
.ss_size
= 0;
616 VG_(threads
)[tid
].altstack
.ss_flags
= VKI_SS_DISABLE
;
618 VG_(clear_out_queued_signals
)(tid
, &savedmask
);
620 VG_(threads
)[tid
].sched_jmpbuf_valid
= False
;
624 Called in the child after fork. If the parent has multiple
625 threads, then we've inherited a VG_(threads) array describing them,
626 but only the thread which called fork() is actually alive in the
627 child. This functions needs to clean up all those other thread
630 Whichever tid in the parent which called fork() becomes the
631 master_tid in the child. That's because the only living slot in
632 VG_(threads) in the child after fork is VG_(threads)[tid], and it
633 would be too hard to try to re-number the thread and relocate the
634 thread state down to VG_(threads)[1].
636 This function also needs to reinitialize the_BigLock, since
637 otherwise we may end up sharing its state with the parent, which
638 would be deeply confusing.
640 static void sched_fork_cleanup(ThreadId me
)
643 vg_assert(VG_(running_tid
) == me
);
645 # if defined(VGO_darwin)
646 // GrP fixme hack reset Mach ports
650 VG_(threads
)[me
].os_state
.lwpid
= VG_(gettid
)();
651 VG_(threads
)[me
].os_state
.threadgroup
= VG_(getpid
)();
653 /* clear out all the unused thread slots */
654 for (tid
= 1; tid
< VG_N_THREADS
; tid
++) {
656 mostly_clear_thread_record(tid
);
657 VG_(threads
)[tid
].status
= VgTs_Empty
;
658 VG_(clear_syscallInfo
)(tid
);
662 /* re-init and take the sema */
665 VG_(acquire_BigLock_LL
)(NULL
);
669 /* First phase of initialisation of the scheduler. Initialise the
670 bigLock, zeroise the VG_(threads) structure and decide on the
671 ThreadId of the root thread.
673 ThreadId
VG_(scheduler_init_phase1
) ( void )
678 VG_(debugLog
)(1,"sched","sched_init_phase1\n");
680 if (VG_(clo_fair_sched
) != disable_fair_sched
681 && !ML_(set_sched_lock_impl
)(sched_lock_ticket
)
682 && VG_(clo_fair_sched
) == enable_fair_sched
)
684 VG_(printf
)("Error: fair scheduling is not supported on this system.\n");
688 if (VG_(clo_verbosity
) > 1) {
689 VG_(message
)(Vg_DebugMsg
,
690 "Scheduler: using %s scheduler lock implementation.\n",
691 ML_(get_sched_lock_name
)());
696 for (i
= 0 /* NB; not 1 */; i
< VG_N_THREADS
; i
++) {
697 /* Paranoia .. completely zero it out. */
698 VG_(memset
)( & VG_(threads
)[i
], 0, sizeof( VG_(threads
)[i
] ) );
700 VG_(threads
)[i
].sig_queue
= NULL
;
702 os_state_init(&VG_(threads
)[i
]);
703 mostly_clear_thread_record(i
);
705 VG_(threads
)[i
].status
= VgTs_Empty
;
706 VG_(threads
)[i
].client_stack_szB
= 0;
707 VG_(threads
)[i
].client_stack_highest_byte
= (Addr
)NULL
;
708 VG_(threads
)[i
].err_disablement_level
= 0;
709 VG_(threads
)[i
].thread_name
= NULL
;
712 tid_main
= VG_(alloc_ThreadState
)();
714 /* Bleh. Unfortunately there are various places in the system that
715 assume that the main thread has a ThreadId of 1.
716 - Helgrind (possibly)
717 - stack overflow message in default_action() in m_signals.c
718 - definitely a lot more places
720 vg_assert(tid_main
== 1);
726 /* Second phase of initialisation of the scheduler. Given the root
727 ThreadId computed by first phase of initialisation, fill in stack
728 details and acquire bigLock. Initialise the scheduler. This is
729 called at startup. The caller subsequently initialises the guest
730 state components of this main thread.
732 void VG_(scheduler_init_phase2
) ( ThreadId tid_main
,
736 VG_(debugLog
)(1,"sched","sched_init_phase2: tid_main=%u, "
737 "cls_end=0x%lx, cls_sz=%lu\n",
738 tid_main
, clstack_end
, clstack_size
);
740 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end
+1));
741 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size
));
743 VG_(threads
)[tid_main
].client_stack_highest_byte
745 VG_(threads
)[tid_main
].client_stack_szB
748 VG_(atfork
)(NULL
, NULL
, sched_fork_cleanup
);
752 /* ---------------------------------------------------------------------
753 Helpers for running translations.
754 ------------------------------------------------------------------ */
756 /* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
757 mask state, but does need to pass "val" through. jumped must be a
759 #define SCHEDSETJMP(tid, jumped, stmt) \
761 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
763 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
764 if ((jumped) == ((UWord)0)) { \
765 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
766 _qq_tst->sched_jmpbuf_valid = True; \
768 } else if (VG_(clo_trace_sched)) \
769 VG_(printf)("SCHEDSETJMP(line %d) tid %u, jumped=%lu\n", \
770 __LINE__, tid, jumped); \
771 vg_assert(_qq_tst->sched_jmpbuf_valid); \
772 _qq_tst->sched_jmpbuf_valid = False; \
776 /* Do various guest state alignment checks prior to running a thread.
777 Specifically, check that what we have matches Vex's guest state
778 layout requirements. See libvex.h for details, but in short the
779 requirements are: There must be no holes in between the primary
780 guest state, its two copies, and the spill area. In short, all 4
781 areas must be aligned on the LibVEX_GUEST_STATE_ALIGN boundary and
782 be placed back-to-back without holes in between. */
783 static void do_pre_run_checks ( volatile ThreadState
* tst
)
785 Addr a_vex
= (Addr
) & tst
->arch
.vex
;
786 Addr a_vexsh1
= (Addr
) & tst
->arch
.vex_shadow1
;
787 Addr a_vexsh2
= (Addr
) & tst
->arch
.vex_shadow2
;
788 Addr a_spill
= (Addr
) & tst
->arch
.vex_spill
;
789 UInt sz_vex
= (UInt
) sizeof tst
->arch
.vex
;
790 UInt sz_vexsh1
= (UInt
) sizeof tst
->arch
.vex_shadow1
;
791 UInt sz_vexsh2
= (UInt
) sizeof tst
->arch
.vex_shadow2
;
792 UInt sz_spill
= (UInt
) sizeof tst
->arch
.vex_spill
;
795 VG_(printf
)("gst %p %u, sh1 %p %u, "
796 "sh2 %p %u, spill %p %u\n",
797 (void*)a_vex
, sz_vex
,
798 (void*)a_vexsh1
, sz_vexsh1
,
799 (void*)a_vexsh2
, sz_vexsh2
,
800 (void*)a_spill
, sz_spill
);
802 vg_assert(sz_vex
% LibVEX_GUEST_STATE_ALIGN
== 0);
803 vg_assert(sz_vexsh1
% LibVEX_GUEST_STATE_ALIGN
== 0);
804 vg_assert(sz_vexsh2
% LibVEX_GUEST_STATE_ALIGN
== 0);
805 vg_assert(sz_spill
% LibVEX_GUEST_STATE_ALIGN
== 0);
807 vg_assert(a_vex
% LibVEX_GUEST_STATE_ALIGN
== 0);
808 vg_assert(a_vexsh1
% LibVEX_GUEST_STATE_ALIGN
== 0);
809 vg_assert(a_vexsh2
% LibVEX_GUEST_STATE_ALIGN
== 0);
810 vg_assert(a_spill
% LibVEX_GUEST_STATE_ALIGN
== 0);
812 /* Check that the guest state and its two shadows have the same
813 size, and that there are no holes in between. The latter is
814 important because Memcheck assumes that it can reliably access
815 the shadows by indexing off a pointer to the start of the
816 primary guest state area. */
817 vg_assert(sz_vex
== sz_vexsh1
);
818 vg_assert(sz_vex
== sz_vexsh2
);
819 vg_assert(a_vex
+ 1 * sz_vex
== a_vexsh1
);
820 vg_assert(a_vex
+ 2 * sz_vex
== a_vexsh2
);
821 /* Also check there's no hole between the second shadow area and
823 vg_assert(sz_spill
== LibVEX_N_SPILL_BYTES
);
824 vg_assert(a_vex
+ 3 * sz_vex
== a_spill
);
826 # if defined(VGA_x86)
827 /* x86 XMM regs must form an array, ie, have no holes in
830 (offsetof(VexGuestX86State
,guest_XMM7
)
831 - offsetof(VexGuestX86State
,guest_XMM0
))
832 == (8/*#regs*/-1) * 16/*bytes per reg*/
834 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State
,guest_XMM0
)));
835 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State
,guest_FPREG
)));
836 vg_assert(8 == offsetof(VexGuestX86State
,guest_EAX
));
837 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State
,guest_EAX
)));
838 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State
,guest_EIP
)));
841 # if defined(VGA_amd64)
842 /* amd64 YMM regs must form an array, ie, have no holes in
845 (offsetof(VexGuestAMD64State
,guest_YMM16
)
846 - offsetof(VexGuestAMD64State
,guest_YMM0
))
847 == (17/*#regs*/-1) * 32/*bytes per reg*/
849 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State
,guest_YMM0
)));
850 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State
,guest_FPREG
)));
851 vg_assert(16 == offsetof(VexGuestAMD64State
,guest_RAX
));
852 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State
,guest_RAX
)));
853 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State
,guest_RIP
)));
856 # if defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
857 /* ppc guest_state vector regs must be 16 byte aligned for
858 loads/stores. This is important! */
859 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_VSR0
));
860 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_VSR0
));
861 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_VSR0
));
862 /* be extra paranoid .. */
863 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_VSR1
));
864 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_VSR1
));
865 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_VSR1
));
868 # if defined(VGA_arm)
869 /* arm guest_state VFP regs must be 8 byte aligned for
870 loads/stores. Let's use 16 just to be on the safe side. */
871 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_D0
));
872 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_D0
));
873 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_D0
));
874 /* be extra paranoid .. */
875 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex
.guest_D1
));
876 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow1
.guest_D1
));
877 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow2
.guest_D1
));
880 # if defined(VGA_arm64)
881 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex
.guest_X0
));
882 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow1
.guest_X0
));
883 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow2
.guest_X0
));
884 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_Q0
));
885 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_Q0
));
886 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_Q0
));
889 # if defined(VGA_s390x)
890 /* no special requirements */
893 # if defined(VGA_mips32) || defined(VGA_mips64)
894 /* no special requirements */
898 // NO_VGDB_POLL value ensures vgdb is not polled, while
899 // VGDB_POLL_ASAP ensures that the next scheduler call
900 // will cause a poll.
901 #define NO_VGDB_POLL 0xffffffffffffffffULL
902 #define VGDB_POLL_ASAP 0x0ULL
904 void VG_(disable_vgdb_poll
) (void )
906 vgdb_next_poll
= NO_VGDB_POLL
;
908 void VG_(force_vgdb_poll
) ( void )
910 vgdb_next_poll
= VGDB_POLL_ASAP
;
913 /* Run the thread tid for a while, and return a VG_TRC_* value
914 indicating why VG_(disp_run_translations) stopped, and possibly an
915 auxiliary word. Also, only allow the thread to run for at most
916 *dispatchCtrP events. If (as is the normal case) use_alt_host_addr
917 is False, we are running ordinary redir'd translations, and we
918 should therefore start by looking up the guest next IP in TT. If
919 it is True then we ignore the guest next IP and just run from
920 alt_host_addr, which presumably points at host code for a no-redir
923 Return results are placed in two_words. two_words[0] is set to the
924 TRC. In the case where that is VG_TRC_CHAIN_ME_TO_{SLOW,FAST}_EP,
925 the address to patch is placed in two_words[1].
928 void run_thread_for_a_while ( /*OUT*/HWord
* two_words
,
929 /*MOD*/Int
* dispatchCtrP
,
932 Bool use_alt_host_addr
)
934 volatile HWord jumped
= 0;
935 volatile ThreadState
* tst
= NULL
; /* stop gcc complaining */
936 volatile Int done_this_time
= 0;
937 volatile HWord host_code_addr
= 0;
940 vg_assert(VG_(is_valid_tid
)(tid
));
941 vg_assert(VG_(is_running_thread
)(tid
));
942 vg_assert(!VG_(is_exiting
)(tid
));
943 vg_assert(*dispatchCtrP
> 0);
945 tst
= VG_(get_ThreadState
)(tid
);
946 do_pre_run_checks( tst
);
949 /* Futz with the XIndir stats counters. */
950 vg_assert(VG_(stats__n_xIndirs_32
) == 0);
951 vg_assert(VG_(stats__n_xIndir_hits1_32
) == 0);
952 vg_assert(VG_(stats__n_xIndir_hits2_32
) == 0);
953 vg_assert(VG_(stats__n_xIndir_hits3_32
) == 0);
954 vg_assert(VG_(stats__n_xIndir_misses_32
) == 0);
956 /* Clear return area. */
957 two_words
[0] = two_words
[1] = 0;
959 /* Figure out where we're starting from. */
960 if (use_alt_host_addr
) {
961 /* unusual case -- no-redir translation */
962 host_code_addr
= alt_host_addr
;
964 /* normal case -- redir translation */
965 Addr host_from_fast_cache
= 0;
966 Bool found_in_fast_cache
967 = VG_(lookupInFastCache
)( &host_from_fast_cache
,
968 (Addr
)tst
->arch
.vex
.VG_INSTR_PTR
);
969 if (found_in_fast_cache
) {
970 host_code_addr
= host_from_fast_cache
;
973 /* not found in VG_(tt_fast). Searching here the transtab
974 improves the performance compared to returning directly
976 Bool found
= VG_(search_transtab
)(&res
, NULL
, NULL
,
977 (Addr
)tst
->arch
.vex
.VG_INSTR_PTR
,
981 host_code_addr
= res
;
983 /* At this point, we know that we intended to start at a
984 normal redir translation, but it was not found. In
985 which case we can return now claiming it's not
987 two_words
[0] = VG_TRC_INNER_FASTMISS
; /* hmm, is that right? */
992 /* We have either a no-redir or a redir translation. */
993 vg_assert(host_code_addr
!= 0); /* implausible */
995 /* there should be no undealt-with signals */
996 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
998 /* Set up event counter stuff for the run. */
999 tst
->arch
.vex
.host_EvC_COUNTER
= *dispatchCtrP
;
1000 tst
->arch
.vex
.host_EvC_FAILADDR
1001 = (HWord
)VG_(fnptr_to_fnentry
)( &VG_(disp_cp_evcheck_fail
) );
1003 /* Invalidate any in-flight LL/SC transactions, in the case that we're
1004 using the fallback LL/SC implementation. See bugs 344524 and 369459. */
1005 # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
1006 || defined(VGP_nanomips_linux)
1007 tst
->arch
.vex
.guest_LLaddr
= (RegWord
)(-1);
1008 # elif defined(VGP_arm64_linux) || defined(VGP_arm64_freebsd)
1009 tst
->arch
.vex
.guest_LLSC_SIZE
= 0;
1014 Int i
, err
= VG_(sigprocmask
)(VKI_SIG_SETMASK
, NULL
, &m
);
1015 vg_assert(err
== 0);
1016 VG_(printf
)("tid %u: entering code with unblocked signals: ", tid
);
1017 for (i
= 1; i
<= _VKI_NSIG
; i
++)
1018 if (!VG_(sigismember
)(&m
, i
))
1019 VG_(printf
)("%d ", i
);
1023 /* Set up return-value area. */
1025 // Tell the tool this thread is about to run client code
1026 VG_TRACK( start_client_code
, tid
, bbs_done
);
1028 vg_assert(VG_(in_generated_code
) == False
);
1029 VG_(in_generated_code
) = True
;
1034 VG_(disp_run_translations
)(
1036 (volatile void*)&tst
->arch
.vex
,
1041 vg_assert(VG_(in_generated_code
) == True
);
1042 VG_(in_generated_code
) = False
;
1044 if (jumped
!= (HWord
)0) {
1045 /* We get here if the client took a fault that caused our signal
1046 handler to longjmp. */
1047 vg_assert(two_words
[0] == 0 && two_words
[1] == 0); // correct?
1048 two_words
[0] = VG_TRC_FAULT_SIGNAL
;
1053 /* Merge the 32-bit XIndir/miss counters into the 64 bit versions,
1054 and zero out the 32-bit ones in preparation for the next run of
1056 stats__n_xIndirs
+= (ULong
)VG_(stats__n_xIndirs_32
);
1057 VG_(stats__n_xIndirs_32
) = 0;
1058 stats__n_xIndir_hits1
+= (ULong
)VG_(stats__n_xIndir_hits1_32
);
1059 VG_(stats__n_xIndir_hits1_32
) = 0;
1060 stats__n_xIndir_hits2
+= (ULong
)VG_(stats__n_xIndir_hits2_32
);
1061 VG_(stats__n_xIndir_hits2_32
) = 0;
1062 stats__n_xIndir_hits3
+= (ULong
)VG_(stats__n_xIndir_hits3_32
);
1063 VG_(stats__n_xIndir_hits3_32
) = 0;
1064 stats__n_xIndir_misses
+= (ULong
)VG_(stats__n_xIndir_misses_32
);
1065 VG_(stats__n_xIndir_misses_32
) = 0;
1067 /* Inspect the event counter. */
1068 vg_assert((Int
)tst
->arch
.vex
.host_EvC_COUNTER
>= -1);
1069 vg_assert(tst
->arch
.vex
.host_EvC_FAILADDR
1070 == (HWord
)VG_(fnptr_to_fnentry
)( &VG_(disp_cp_evcheck_fail
)) );
1072 /* The number of events done this time is the difference between
1073 the event counter originally and what it is now. Except -- if
1074 it has gone negative (to -1) then the transition 0 to -1 doesn't
1075 correspond to a real executed block, so back it out. It's like
1076 this because the event checks decrement the counter first and
1077 check it for negativeness second, hence the 0 to -1 transition
1078 causes a bailout and the block it happens in isn't executed. */
1080 Int dispatchCtrAfterwards
= (Int
)tst
->arch
.vex
.host_EvC_COUNTER
;
1081 done_this_time
= *dispatchCtrP
- dispatchCtrAfterwards
;
1082 if (dispatchCtrAfterwards
== -1) {
1085 /* If the generated code drives the counter below -1, something
1086 is seriously wrong. */
1087 vg_assert(dispatchCtrAfterwards
>= 0);
1091 vg_assert(done_this_time
>= 0);
1092 bbs_done
+= (ULong
)done_this_time
;
1094 *dispatchCtrP
-= done_this_time
;
1095 vg_assert(*dispatchCtrP
>= 0);
1097 // Tell the tool this thread has stopped running client code
1098 VG_TRACK( stop_client_code
, tid
, bbs_done
);
1100 if (bbs_done
>= vgdb_next_poll
) {
1101 if (VG_(clo_vgdb_poll
))
1102 vgdb_next_poll
= bbs_done
+ (ULong
)VG_(clo_vgdb_poll
);
1104 /* value was changed due to gdbserver invocation via ptrace */
1105 vgdb_next_poll
= NO_VGDB_POLL
;
1106 if (VG_(gdbserver_activity
) (tid
))
1107 VG_(gdbserver
) (tid
);
1110 /* TRC value and possible auxiliary patch-address word are already
1111 in two_words[0] and [1] respectively, as a result of the call to
1112 VG_(run_innerloop). */
1114 if (two_words
[0] == VG_TRC_CHAIN_ME_TO_SLOW_EP
1115 || two_words
[0] == VG_TRC_CHAIN_ME_TO_FAST_EP
) {
1116 vg_assert(two_words
[1] != 0); /* we have a legit patch addr */
1118 vg_assert(two_words
[1] == 0); /* nobody messed with it */
1123 /* ---------------------------------------------------------------------
1124 The scheduler proper.
1125 ------------------------------------------------------------------ */
1127 static void handle_tt_miss ( ThreadId tid
)
1130 Addr ip
= VG_(get_IP
)(tid
);
1132 /* Trivial event. Miss in the fast-cache. Do a full
1134 found
= VG_(search_transtab
)( NULL
, NULL
, NULL
,
1135 ip
, True
/*upd_fast_cache*/ );
1136 if (UNLIKELY(!found
)) {
1137 /* Not found; we need to request a translation. */
1138 if (VG_(translate
)( tid
, ip
, /*debug*/False
, 0/*not verbose*/,
1139 bbs_done
, True
/*allow redirection*/ )) {
1140 found
= VG_(search_transtab
)( NULL
, NULL
, NULL
,
1142 vg_assert2(found
, "handle_tt_miss: missing tt_fast entry");
1145 // If VG_(translate)() fails, it's because it had to throw a
1146 // signal because the client jumped to a bad address. That
1147 // means that either a signal has been set up for delivery,
1148 // or the thread has been marked for termination. Either
1149 // way, we just need to go back into the scheduler loop.
1155 void handle_chain_me ( ThreadId tid
, void* place_to_chain
, Bool toFastEP
)
1158 Addr ip
= VG_(get_IP
)(tid
);
1159 SECno to_sNo
= INV_SNO
;
1160 TTEno to_tteNo
= INV_TTE
;
1162 found
= VG_(search_transtab
)( NULL
, &to_sNo
, &to_tteNo
,
1163 ip
, False
/*dont_upd_fast_cache*/ );
1165 /* Not found; we need to request a translation. */
1166 if (VG_(translate
)( tid
, ip
, /*debug*/False
, 0/*not verbose*/,
1167 bbs_done
, True
/*allow redirection*/ )) {
1168 found
= VG_(search_transtab
)( NULL
, &to_sNo
, &to_tteNo
,
1170 vg_assert2(found
, "handle_chain_me: missing tt_fast entry");
1172 // If VG_(translate)() fails, it's because it had to throw a
1173 // signal because the client jumped to a bad address. That
1174 // means that either a signal has been set up for delivery,
1175 // or the thread has been marked for termination. Either
1176 // way, we just need to go back into the scheduler loop.
1181 vg_assert(to_sNo
!= INV_SNO
);
1182 vg_assert(to_tteNo
!= INV_TTE
);
1184 /* So, finally we know where to patch through to. Do the patching
1185 and update the various admin tables that allow it to be undone
1186 in the case that the destination block gets deleted. */
1187 VG_(tt_tc_do_chaining
)( place_to_chain
,
1188 to_sNo
, to_tteNo
, toFastEP
);
1191 static void handle_syscall(ThreadId tid
, UInt trc
)
1193 ThreadState
* volatile tst
= VG_(get_ThreadState
)(tid
);
1194 volatile UWord jumped
;
1196 /* Syscall may or may not block; either way, it will be
1197 complete by the time this call returns, and we'll be
1198 runnable again. We could take a signal while the
1201 if (VG_(clo_sanity_level
) >= 3) {
1202 HChar buf
[50]; // large enough
1203 VG_(sprintf
)(buf
, "(BEFORE SYSCALL, tid %u)", tid
);
1204 Bool ok
= VG_(am_do_sync_check
)(buf
, __FILE__
, __LINE__
);
1208 SCHEDSETJMP(tid
, jumped
, VG_(client_syscall
)(tid
, trc
));
1210 if (VG_(clo_sanity_level
) >= 3) {
1211 HChar buf
[50]; // large enough
1212 VG_(sprintf
)(buf
, "(AFTER SYSCALL, tid %u)", tid
);
1213 Bool ok
= VG_(am_do_sync_check
)(buf
, __FILE__
, __LINE__
);
1217 if (!VG_(is_running_thread
)(tid
))
1218 VG_(printf
)("tid %u not running; VG_(running_tid)=%u, tid %u status %u\n",
1219 tid
, VG_(running_tid
), tid
, tst
->status
);
1220 vg_assert(VG_(is_running_thread
)(tid
));
1222 if (jumped
!= (UWord
)0) {
1224 VG_(poll_signals
)(tid
);
1228 static void handle_extension(ThreadId tid
)
1230 volatile UWord jumped
;
1231 enum ExtensionError err
;
1233 SCHEDSETJMP(tid
, jumped
, err
= VG_(client_extension
)(tid
));
1234 vg_assert(VG_(is_running_thread
)(tid
));
1236 if (jumped
!= (UWord
)0) {
1238 VG_(poll_signals
)(tid
);
1239 } else if (err
!= ExtErr_OK
) {
1240 Addr addr
= VG_(get_IP
)(tid
);
1243 VG_(synth_sigill
)(tid
, addr
);
1246 VG_(core_panic
)("scheduler: bad return code from extension");
1251 /* tid just requested a jump to the noredir version of its current
1252 program counter. So make up that translation if needed, run it,
1253 and return the resulting thread return code in two_words[]. */
1255 void handle_noredir_jump ( /*OUT*/HWord
* two_words
,
1256 /*MOD*/Int
* dispatchCtrP
,
1259 /* Clear return area. */
1260 two_words
[0] = two_words
[1] = 0;
1263 Addr ip
= VG_(get_IP
)(tid
);
1265 Bool found
= VG_(search_unredir_transtab
)( &hcode
, ip
);
1267 /* Not found; we need to request a translation. */
1268 if (VG_(translate
)( tid
, ip
, /*debug*/False
, 0/*not verbose*/, bbs_done
,
1269 False
/*NO REDIRECTION*/ )) {
1271 found
= VG_(search_unredir_transtab
)( &hcode
, ip
);
1272 vg_assert2(found
, "unredir translation missing after creation?!");
1274 // If VG_(translate)() fails, it's because it had to throw a
1275 // signal because the client jumped to a bad address. That
1276 // means that either a signal has been set up for delivery,
1277 // or the thread has been marked for termination. Either
1278 // way, we just need to go back into the scheduler loop.
1279 two_words
[0] = VG_TRC_BORING
;
1286 vg_assert(hcode
!= 0);
1288 /* Otherwise run it and return the resulting VG_TRC_* value. */
1289 vg_assert(*dispatchCtrP
> 0); /* so as to guarantee progress */
1290 run_thread_for_a_while( two_words
, dispatchCtrP
, tid
,
1291 hcode
, True
/*use hcode*/ );
1296 Run a thread until it wants to exit.
1298 We assume that the caller has already called VG_(acquire_BigLock) for
1299 us, so we own the VCPU. Also, all signals are blocked.
1301 VgSchedReturnCode
VG_(scheduler
) ( ThreadId tid
)
1303 /* Holds the remaining size of this thread's "timeslice". */
1304 Int dispatch_ctr
= 0;
1306 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1307 static Bool vgdb_startup_action_done
= False
;
1309 if (VG_(clo_trace_sched
))
1310 print_sched_event(tid
, "entering VG_(scheduler)");
1312 /* Do vgdb initialization (but once). Only the first (main) task
1313 starting up will do the below.
1314 Initialize gdbserver earlier than at the first
1315 thread VG_(scheduler) is causing problems:
1316 * at the end of VG_(scheduler_init_phase2) :
1317 The main thread is in VgTs_Init state, but in a not yet
1318 consistent state => the thread cannot be reported to gdb
1319 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1320 back the guest registers to gdb).
1321 * at end of valgrind_main, just
1322 before VG_(main_thread_wrapper_NORETURN)(1) :
1323 The main thread is still in VgTs_Init state but in a
1324 more advanced state. However, the thread state is not yet
1325 completely initialized : a.o., the os_state is not yet fully
1326 set => the thread is then not properly reported to gdb,
1327 which is then confused (causing e.g. a duplicate thread be
1328 shown, without thread id).
1329 * it would be possible to initialize gdbserver "lower" in the
1330 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1331 these are platform dependent and the place at which
1332 the thread state is completely initialized is not
1333 specific anymore to the main thread (so a similar "do it only
1334 once" would be needed).
1336 => a "once only" initialization here is the best compromise. */
1337 if (!vgdb_startup_action_done
) {
1338 vg_assert(tid
== 1); // it must be the main thread.
1339 vgdb_startup_action_done
= True
;
1340 if (VG_(clo_vgdb
) != Vg_VgdbNo
) {
1341 /* If we have to poll, ensures we do an initial poll at first
1342 scheduler call. Otherwise, ensure no poll (unless interrupted
1344 if (VG_(clo_vgdb_poll
))
1345 VG_(force_vgdb_poll
) ();
1347 VG_(disable_vgdb_poll
) ();
1349 VG_(gdbserver_prerun_action
) (1);
1351 VG_(disable_vgdb_poll
) ();
1355 if (SimHintiS(SimHint_no_nptl_pthread_stackcache
, VG_(clo_sim_hints
))
1357 /* We disable the stack cache the first time we see a thread other
1358 than the main thread appearing. At this moment, we are sure the pthread
1359 lib loading is done/variable was initialised by pthread lib/... */
1360 if (VG_(client__stack_cache_actsize__addr
)) {
1361 if (*VG_(client__stack_cache_actsize__addr
) == 0) {
1362 VG_(debugLog
)(1,"sched",
1363 "pthread stack cache size disable done"
1365 *VG_(client__stack_cache_actsize__addr
) = 1000 * 1000 * 1000;
1366 /* Set a value big enough to be above the hardcoded maximum stack
1367 cache size in glibc, small enough to allow a pthread stack size
1368 to be added without risk of overflow. */
1372 * glibc 2.34 no longer has stack_cache_actsize as a visible variable
1373 * so we switch to using the GLIBC_TUNABLES env var. Processing for that
1374 * is done in initimg-linux.c / setup_client_env for all glibc
1376 * If we don't detect stack_cache_actsize we want to be able to tell
1377 * whether it is an unexpected error or if it is no longer there.
1378 * In the latter case we don't print a warning.
1380 Bool print_warning
= True
;
1381 if (VG_(client__gnu_get_libc_version_addr
) != NULL
) {
1382 const HChar
* gnu_libc_version
= VG_(client__gnu_get_libc_version_addr
)();
1383 if (gnu_libc_version
!= NULL
) {
1384 HChar
* glibc_version_tok
= VG_(strdup
)("scheduler.1", gnu_libc_version
);
1385 const HChar
* str_major
= VG_(strtok
)(glibc_version_tok
, ".");
1386 Long major
= VG_(strtoll10
)(str_major
, NULL
);
1387 const HChar
* str_minor
= VG_(strtok
)(NULL
, ".");
1388 Long minor
= VG_(strtoll10
)(str_minor
, NULL
);
1389 if (major
>= 2 && minor
>= 34) {
1390 print_warning
= False
;
1392 VG_(free
)(glibc_version_tok
);
1397 if (print_warning
) {
1398 VG_(debugLog
)(0,"sched",
1399 "WARNING: pthread stack cache cannot be disabled!\n");
1401 VG_(clo_sim_hints
) &= ~SimHint2S(SimHint_no_nptl_pthread_stackcache
);
1402 /* Remove SimHint_no_nptl_pthread_stackcache from VG_(clo_sim_hints)
1403 to avoid having a msg for all following threads. */
1407 /* set the proper running signal mask */
1410 vg_assert(VG_(is_running_thread
)(tid
));
1412 dispatch_ctr
= VG_(clo_scheduling_quantum
);
1414 while (!VG_(is_exiting
)(tid
)) {
1416 vg_assert(dispatch_ctr
>= 0);
1417 if (dispatch_ctr
== 0) {
1419 /* Our slice is done, so yield the CPU to another thread. On
1420 Linux, this doesn't sleep between sleeping and running,
1421 since that would take too much time. */
1423 /* 4 July 06: it seems that a zero-length nsleep is needed to
1424 cause async thread cancellation (canceller.c) to terminate
1425 in finite time; else it is in some kind of race/starvation
1426 situation and completion is arbitrarily delayed (although
1427 this is not a deadlock).
1429 Unfortunately these sleeps cause MPI jobs not to terminate
1430 sometimes (some kind of livelock). So sleeping once
1431 every N opportunities appears to work. */
1433 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1434 sys_yield also helps the problem, whilst not crashing apps. */
1436 VG_(release_BigLock
)(tid
, VgTs_Yielding
,
1437 "VG_(scheduler):timeslice");
1438 /* ------------ now we don't have The Lock ------------ */
1440 VG_(acquire_BigLock
)(tid
, "VG_(scheduler):timeslice");
1441 /* ------------ now we do have The Lock ------------ */
1443 /* OK, do some relatively expensive housekeeping stuff */
1444 scheduler_sanity(tid
);
1445 VG_(sanity_check_general
)(False
);
1447 /* Possibly make a progress report */
1448 if (UNLIKELY(VG_(clo_progress_interval
) > 0)) {
1449 maybe_progress_report( VG_(clo_progress_interval
) );
1452 /* Look for any pending signals for this thread, and set them up
1454 VG_(poll_signals
)(tid
);
1456 if (VG_(is_exiting
)(tid
))
1457 break; /* poll_signals picked up a fatal signal */
1459 /* For stats purposes only. */
1460 n_scheduling_events_MAJOR
++;
1462 /* Figure out how many bbs to ask vg_run_innerloop to do. */
1463 dispatch_ctr
= VG_(clo_scheduling_quantum
);
1466 vg_assert(tst
->tid
== tid
);
1467 vg_assert(tst
->os_state
.lwpid
== VG_(gettid
)());
1470 /* For stats purposes only. */
1471 n_scheduling_events_MINOR
++;
1474 VG_(message
)(Vg_DebugMsg
, "thread %u: running for %d bbs\n",
1475 tid
, dispatch_ctr
- 1 );
1477 HWord trc
[2]; /* "two_words" */
1478 run_thread_for_a_while( &trc
[0],
1480 tid
, 0/*ignored*/, False
);
1482 if (VG_(clo_trace_sched
) && VG_(clo_verbosity
) > 2) {
1483 const HChar
*name
= name_of_sched_event(trc
[0]);
1484 HChar buf
[VG_(strlen
)(name
) + 10]; // large enough
1485 VG_(sprintf
)(buf
, "TRC: %s", name
);
1486 print_sched_event(tid
, buf
);
1489 if (trc
[0] == VEX_TRC_JMP_NOREDIR
) {
1490 /* If we got a request to run a no-redir version of
1491 something, do so now -- handle_noredir_jump just (creates
1492 and) runs that one translation. The flip side is that the
1493 noredir translation can't itself return another noredir
1494 request -- that would be nonsensical. It can, however,
1495 return VG_TRC_BORING, which just means keep going as
1497 /* Note that the fact that we need to continue with a
1498 no-redir jump is not recorded anywhere else in this
1499 thread's state. So we *must* execute the block right now
1500 -- we can't fail to execute it and later resume with it,
1501 because by then we'll have forgotten the fact that it
1502 should be run as no-redir, but will get run as a normal
1503 potentially-redir'd, hence screwing up. This really ought
1504 to be cleaned up, by noting in the guest state that the
1505 next block to be executed should be no-redir. Then we can
1506 suspend and resume at any point, which isn't the case at
1508 /* We can't enter a no-redir translation with the dispatch
1509 ctr set to zero, for the reasons commented just above --
1510 we need to force it to execute right now. So, if the
1511 dispatch ctr is zero, set it to one. Note that this would
1512 have the bad side effect of holding the Big Lock arbitrary
1513 long should there be an arbitrarily long sequence of
1514 back-to-back no-redir translations to run. But we assert
1515 just below that this translation cannot request another
1516 no-redir jump, so we should be safe against that. */
1517 if (dispatch_ctr
== 0) {
1520 handle_noredir_jump( &trc
[0],
1523 vg_assert(trc
[0] != VEX_TRC_JMP_NOREDIR
);
1525 /* This can't be allowed to happen, since it means the block
1526 didn't execute, and we have no way to resume-as-noredir
1527 after we get more timeslice. But I don't think it ever
1528 can, since handle_noredir_jump will assert if the counter
1529 is zero on entry. */
1530 vg_assert(trc
[0] != VG_TRC_INNER_COUNTERZERO
);
1531 /* This asserts the same thing. */
1532 vg_assert(dispatch_ctr
>= 0);
1534 /* A no-redir translation can't return with a chain-me
1535 request, since chaining in the no-redir cache is too
1537 vg_assert(trc
[0] != VG_TRC_CHAIN_ME_TO_SLOW_EP
1538 && trc
[0] != VG_TRC_CHAIN_ME_TO_FAST_EP
);
1542 case VEX_TRC_JMP_BORING
:
1543 /* assisted dispatch, no event. Used by no-redir
1544 translations to force return to the scheduler. */
1546 /* no special event, just keep going. */
1549 case VG_TRC_INNER_FASTMISS
:
1550 vg_assert(dispatch_ctr
>= 0);
1551 handle_tt_miss(tid
);
1554 case VG_TRC_CHAIN_ME_TO_SLOW_EP
: {
1555 if (0) VG_(printf
)("sched: CHAIN_TO_SLOW_EP: %p\n", (void*)trc
[1] );
1556 handle_chain_me(tid
, (void*)trc
[1], False
);
1560 case VG_TRC_CHAIN_ME_TO_FAST_EP
: {
1561 if (0) VG_(printf
)("sched: CHAIN_TO_FAST_EP: %p\n", (void*)trc
[1] );
1562 handle_chain_me(tid
, (void*)trc
[1], True
);
1566 case VEX_TRC_JMP_CLIENTREQ
:
1567 do_client_request(tid
);
1570 case VEX_TRC_JMP_EXTENSION
: {
1571 handle_extension(tid
);
1575 case VEX_TRC_JMP_SYS_INT128
: /* x86-linux */
1576 case VEX_TRC_JMP_SYS_INT129
: /* x86-darwin */
1577 case VEX_TRC_JMP_SYS_INT130
: /* x86-darwin */
1578 case VEX_TRC_JMP_SYS_INT145
: /* x86-solaris */
1579 case VEX_TRC_JMP_SYS_INT210
: /* x86-solaris */
1580 /* amd64-linux, ppc32-linux, amd64-darwin, amd64-solaris */
1581 case VEX_TRC_JMP_SYS_SYSCALL
:
1582 handle_syscall(tid
, trc
[0]);
1583 if (VG_(clo_sanity_level
) >= 3)
1584 VG_(sanity_check_general
)(True
); /* sanity-check every syscall */
1587 case VEX_TRC_JMP_YIELD
:
1588 /* Explicit yield, because this thread is in a spin-lock
1589 or something. Only let the thread run for a short while
1590 longer. Because swapping to another thread is expensive,
1591 we're prepared to let this thread eat a little more CPU
1592 before swapping to another. That means that short term
1593 spins waiting for hardware to poke memory won't cause a
1595 if (dispatch_ctr
> 300)
1599 case VG_TRC_INNER_COUNTERZERO
:
1600 /* Timeslice is out. Let a new thread be scheduled. */
1601 vg_assert(dispatch_ctr
== 0);
1604 case VG_TRC_FAULT_SIGNAL
:
1605 /* Everything should be set up (either we're exiting, or
1606 about to start in a signal handler). */
1609 case VEX_TRC_JMP_MAPFAIL
:
1610 /* Failure of arch-specific address translation (x86/amd64
1611 segment override use) */
1612 /* jrs 2005 03 11: is this correct? */
1613 VG_(synth_fault
)(tid
);
1616 case VEX_TRC_JMP_EMWARN
: {
1617 static Int counts
[EmNote_NUMBER
];
1618 static Bool counts_initted
= False
;
1623 if (!counts_initted
) {
1624 counts_initted
= True
;
1625 for (q
= 0; q
< EmNote_NUMBER
; q
++)
1628 ew
= (VexEmNote
)VG_(threads
)[tid
].arch
.vex
.guest_EMNOTE
;
1629 what
= (ew
< 0 || ew
>= EmNote_NUMBER
)
1631 : LibVEX_EmNote_string(ew
);
1632 show
= (ew
< 0 || ew
>= EmNote_NUMBER
)
1635 if (show
&& VG_(clo_show_emwarns
) && !VG_(clo_xml
)) {
1636 VG_(message
)( Vg_UserMsg
,
1637 "Emulation warning: unsupported action:\n");
1638 VG_(message
)( Vg_UserMsg
, " %s\n", what
);
1639 VG_(get_and_pp_StackTrace
)( tid
, VG_(clo_backtrace_size
) );
1644 case VEX_TRC_JMP_EMFAIL
: {
1647 ew
= (VexEmNote
)VG_(threads
)[tid
].arch
.vex
.guest_EMNOTE
;
1648 what
= (ew
< 0 || ew
>= EmNote_NUMBER
)
1650 : LibVEX_EmNote_string(ew
);
1651 VG_(message
)( Vg_UserMsg
,
1652 "Emulation fatal error -- Valgrind cannot continue:\n");
1653 VG_(message
)( Vg_UserMsg
, " %s\n", what
);
1654 VG_(get_and_pp_StackTrace
)( tid
, VG_(clo_backtrace_size
) );
1655 VG_(message
)(Vg_UserMsg
, "\n");
1656 VG_(message
)(Vg_UserMsg
, "Valgrind has to exit now. Sorry.\n");
1657 VG_(message
)(Vg_UserMsg
, "\n");
1662 case VEX_TRC_JMP_SIGILL
:
1663 VG_(synth_sigill
)(tid
, VG_(get_IP
)(tid
));
1666 case VEX_TRC_JMP_SIGTRAP
:
1667 VG_(synth_sigtrap
)(tid
);
1670 case VEX_TRC_JMP_SIGSEGV
:
1671 VG_(synth_fault
)(tid
);
1674 case VEX_TRC_JMP_SIGBUS
:
1675 VG_(synth_sigbus
)(tid
);
1678 case VEX_TRC_JMP_SIGFPE
:
1679 VG_(synth_sigfpe
)(tid
, 0);
1682 case VEX_TRC_JMP_SIGFPE_INTDIV
:
1683 VG_(synth_sigfpe
)(tid
, VKI_FPE_INTDIV
);
1686 case VEX_TRC_JMP_SIGFPE_INTOVF
:
1687 VG_(synth_sigfpe
)(tid
, VKI_FPE_INTOVF
);
1690 case VEX_TRC_JMP_NODECODE
: {
1691 Addr addr
= VG_(get_IP
)(tid
);
1693 if (VG_(clo_sigill_diag
)) {
1695 "valgrind: Unrecognised instruction at address %#lx.\n", addr
);
1696 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
1697 # define M(a) VG_(umsg)(a "\n");
1698 M("Your program just tried to execute an instruction that Valgrind" );
1699 M("did not recognise. There are two possible reasons for this." );
1700 M("1. Your program has a bug and erroneously jumped to a non-code" );
1701 M(" location. If you are running Memcheck and you just saw a" );
1702 M(" warning about a bad jump, it's probably your program's fault.");
1703 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1704 M(" i.e. it's Valgrind's fault. If you think this is the case or");
1705 M(" you are not sure, please let us know and we'll try to fix it.");
1706 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1707 M("probably kill your program." );
1710 # if defined(VGA_s390x)
1711 /* Now that the complaint is out we need to adjust the guest_IA. The
1712 reason is that -- after raising the exception -- execution will
1713 continue with the insn that follows the invalid insn. As the first
1714 2 bits of the invalid insn determine its length in the usual way,
1715 we can compute the address of the next insn here and adjust the
1716 guest_IA accordingly. This adjustment is essential and tested by
1717 none/tests/s390x/op_exception.c (which would loop forever
1719 UChar byte
= ((UChar
*)addr
)[0];
1720 UInt insn_length
= ((((byte
>> 6) + 1) >> 1) + 1) << 1;
1721 Addr next_insn_addr
= addr
+ insn_length
;
1722 VG_(set_IP
)(tid
, next_insn_addr
);
1724 VG_(synth_sigill
)(tid
, addr
);
1728 case VEX_TRC_JMP_INVALICACHE
:
1729 VG_(discard_translations
)(
1730 (Addr
)VG_(threads
)[tid
].arch
.vex
.guest_CMSTART
,
1731 VG_(threads
)[tid
].arch
.vex
.guest_CMLEN
,
1732 "scheduler(VEX_TRC_JMP_INVALICACHE)"
1735 VG_(printf
)("dump translations done.\n");
1738 case VEX_TRC_JMP_FLUSHDCACHE
: {
1739 void* start
= (void*)(Addr
)VG_(threads
)[tid
].arch
.vex
.guest_CMSTART
;
1740 SizeT len
= VG_(threads
)[tid
].arch
.vex
.guest_CMLEN
;
1741 VG_(debugLog
)(2, "sched", "flush_dcache(%p, %lu)\n", start
, len
);
1742 VG_(flush_dcache
)(start
, len
);
1746 case VG_TRC_INVARIANT_FAILED
:
1747 /* This typically happens if, after running generated code,
1748 it is detected that host CPU settings (eg, FPU/Vector
1749 control words) are not as they should be. Vex's code
1750 generation specifies the state such control words should
1751 be in on entry to Vex-generated code, and they should be
1752 unchanged on exit from it. Failure of this assertion
1753 usually means a bug in Vex's code generation. */
1755 // __asm__ __volatile__ (
1756 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1757 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1758 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1760 vg_assert2(0, "VG_(scheduler), phase 3: "
1761 "run_innerloop detected host "
1762 "state invariant failure", trc
);
1764 case VEX_TRC_JMP_SYS_SYSENTER
:
1765 /* Do whatever simulation is appropriate for an x86 sysenter
1766 instruction. Note that it is critical to set this thread's
1767 guest_EIP to point at the code to execute after the
1768 sysenter, since Vex-generated code will not have set it --
1769 vex does not know what it should be. Vex sets the next
1770 address to zero, so if you don't set guest_EIP, the thread
1771 will jump to zero afterwards and probably die as a result. */
1772 # if defined(VGP_x86_linux)
1773 vg_assert2(0, "VG_(scheduler), phase 3: "
1774 "sysenter_x86 on x86-linux is not supported");
1775 # elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
1776 /* return address in client edx */
1777 VG_(threads
)[tid
].arch
.vex
.guest_EIP
1778 = VG_(threads
)[tid
].arch
.vex
.guest_EDX
;
1779 handle_syscall(tid
, trc
[0]);
1781 vg_assert2(0, "VG_(scheduler), phase 3: "
1782 "sysenter_x86 on non-x86 platform?!?!");
1787 vg_assert2(0, "VG_(scheduler), phase 3: "
1788 "unexpected thread return code (%u)", trc
[0]);
1792 } /* switch (trc) */
1794 if (UNLIKELY(VG_(clo_profyle_sbs
)) && VG_(clo_profyle_interval
) > 0)
1795 maybe_show_sb_profile();
1798 if (VG_(clo_trace_sched
))
1799 print_sched_event(tid
, "exiting VG_(scheduler)");
1801 vg_assert(VG_(is_exiting
)(tid
));
1803 return tst
->exitreason
;
1807 void VG_(nuke_all_threads_except
) ( ThreadId me
, VgSchedReturnCode src
)
1811 vg_assert(VG_(is_running_thread
)(me
));
1813 for (tid
= 1; tid
< VG_N_THREADS
; tid
++) {
1815 || VG_(threads
)[tid
].status
== VgTs_Empty
)
1819 "VG_(nuke_all_threads_except): nuking tid %u\n", tid
);
1821 VG_(threads
)[tid
].exitreason
= src
;
1822 if (src
== VgSrc_FatalSig
)
1823 VG_(threads
)[tid
].os_state
.fatalsig
= VKI_SIGKILL
;
1824 VG_(get_thread_out_of_syscall
)(tid
);
1829 /* ---------------------------------------------------------------------
1830 Specifying shadow register values
1831 ------------------------------------------------------------------ */
1833 #if defined(VGA_x86)
1834 # define VG_CLREQ_ARGS guest_EAX
1835 # define VG_CLREQ_RET guest_EDX
1836 #elif defined(VGA_amd64)
1837 # define VG_CLREQ_ARGS guest_RAX
1838 # define VG_CLREQ_RET guest_RDX
1839 #elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
1840 # define VG_CLREQ_ARGS guest_GPR4
1841 # define VG_CLREQ_RET guest_GPR3
1842 #elif defined(VGA_arm)
1843 # define VG_CLREQ_ARGS guest_R4
1844 # define VG_CLREQ_RET guest_R3
1845 #elif defined(VGA_arm64)
1846 # define VG_CLREQ_ARGS guest_X4
1847 # define VG_CLREQ_RET guest_X3
1848 #elif defined (VGA_s390x)
1849 # define VG_CLREQ_ARGS guest_r2
1850 # define VG_CLREQ_RET guest_r3
1851 #elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
1852 # define VG_CLREQ_ARGS guest_r12
1853 # define VG_CLREQ_RET guest_r11
1855 # error Unknown arch
1858 #define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1859 #define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1860 #define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
1862 // These macros write a value to a client's thread register, and tell the
1863 // tool that it's happened (if necessary).
1865 #define SET_CLREQ_RETVAL(zztid, zzval) \
1866 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1867 VG_TRACK( post_reg_write, \
1868 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1871 #define SET_CLCALL_RETVAL(zztid, zzval, f) \
1872 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1873 VG_TRACK( post_reg_write_clientcall_return, \
1874 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1878 /* ---------------------------------------------------------------------
1879 Handle client requests.
1880 ------------------------------------------------------------------ */
1882 // OS-specific(?) client requests
1883 static Bool
os_client_request(ThreadId tid
, UWord
*args
)
1885 Bool handled
= True
;
1887 vg_assert(VG_(is_running_thread
)(tid
));
1890 case VG_USERREQ__FREERES_DONE
:
1891 /* This is equivalent to an exit() syscall, but we don't set the
1892 exitcode (since it might already be set) */
1893 if (0 || VG_(clo_trace_syscalls
) || VG_(clo_trace_sched
))
1894 VG_(message
)(Vg_DebugMsg
,
1895 "__gnu_cxx::__freeres() and __libc_freeres() wrapper "
1896 "done; really quitting!\n");
1897 VG_(threads
)[tid
].exitreason
= VgSrc_ExitThread
;
1909 /* Write out a client message, possibly including a back trace. Return
1910 the number of characters written. In case of XML output, the format
1911 string as well as any arguments it requires will be XML'ified.
1912 I.e. special characters such as the angle brackets will be translated
1913 into proper escape sequences. */
1915 Int
print_client_message( ThreadId tid
, const HChar
*format
,
1916 va_list *vargsp
, Bool include_backtrace
)
1921 /* Translate the format string as follows:
1926 Yes, yes, it's simplified but in synch with
1927 myvprintf_str_XML_simplistic and VG_(debugLog_vprintf).
1930 /* Allocate a buffer that is for sure large enough. */
1931 HChar xml_format
[VG_(strlen
)(format
) * 5 + 1];
1934 HChar
*q
= xml_format
;
1936 for (p
= format
; *p
; ++p
) {
1938 case '<': VG_(strcpy
)(q
, "<"); q
+= 4; break;
1939 case '>': VG_(strcpy
)(q
, ">"); q
+= 4; break;
1940 case '&': VG_(strcpy
)(q
, "&"); q
+= 5; break;
1942 /* Careful: make sure %%s stays %%s */
1959 VG_(printf_xml
)( "<clientmsg>\n" );
1960 VG_(printf_xml
)( " <tid>%u</tid>\n", tid
);
1961 const ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1962 if (tst
->thread_name
)
1963 VG_(printf_xml
)(" <threadname>%s</threadname>\n", tst
->thread_name
);
1964 VG_(printf_xml
)( " <text>" );
1965 count
= VG_(vprintf_xml
)( xml_format
, *vargsp
);
1966 VG_(printf_xml
)( " </text>\n" );
1968 count
= VG_(vmessage
)( Vg_ClientMsg
, format
, *vargsp
);
1969 VG_(message_flush
)();
1972 if (include_backtrace
)
1973 VG_(get_and_pp_StackTrace
)( tid
, VG_(clo_backtrace_size
) );
1976 VG_(printf_xml
)( "</clientmsg>\n" );
1982 /* Do a client request for the thread tid. After the request, tid may
1983 or may not still be runnable; if not, the scheduler will have to
1984 choose a new thread to run.
1987 void do_client_request ( ThreadId tid
)
1989 UWord
* arg
= (UWord
*)(Addr
)(CLREQ_ARGS(VG_(threads
)[tid
].arch
));
1990 UWord req_no
= arg
[0];
1993 VG_(printf
)("req no = 0x%lx, arg = %p\n", req_no
, arg
);
1996 case VG_USERREQ__CLIENT_CALL0
: {
1997 UWord (*f
)(ThreadId
) = (__typeof__(f
))arg
[1];
1999 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f
);
2001 SET_CLCALL_RETVAL(tid
, f ( tid
), (Addr
)f
);
2004 case VG_USERREQ__CLIENT_CALL1
: {
2005 UWord (*f
)(ThreadId
, UWord
) = (__typeof__(f
))arg
[1];
2007 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f
);
2009 SET_CLCALL_RETVAL(tid
, f ( tid
, arg
[2] ), (Addr
)f
);
2012 case VG_USERREQ__CLIENT_CALL2
: {
2013 UWord (*f
)(ThreadId
, UWord
, UWord
) = (__typeof__(f
))arg
[1];
2015 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f
);
2017 SET_CLCALL_RETVAL(tid
, f ( tid
, arg
[2], arg
[3] ), (Addr
)f
);
2020 case VG_USERREQ__CLIENT_CALL3
: {
2021 UWord (*f
)(ThreadId
, UWord
, UWord
, UWord
) = (__typeof__(f
))arg
[1];
2023 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f
);
2025 SET_CLCALL_RETVAL(tid
, f ( tid
, arg
[2], arg
[3], arg
[4] ), (Addr
)f
);
2029 // Nb: this looks like a circular definition, because it kind of is.
2030 // See comment in valgrind.h to understand what's going on.
2031 case VG_USERREQ__RUNNING_ON_VALGRIND
:
2032 SET_CLREQ_RETVAL(tid
, RUNNING_ON_VALGRIND
+1);
2035 case VG_USERREQ__PRINTF
: {
2036 const HChar
* format
= (HChar
*)arg
[1];
2037 /* JRS 2010-Jan-28: this is DEPRECATED; use the
2038 _VALIST_BY_REF version instead */
2039 if (sizeof(va_list) != sizeof(UWord
))
2040 goto va_list_casting_error_NORETURN
;
2045 u
.uw
= (unsigned long)arg
[2];
2047 print_client_message( tid
, format
, &u
.vargs
,
2048 /* include_backtrace */ False
);
2049 SET_CLREQ_RETVAL( tid
, count
);
2053 case VG_USERREQ__PRINTF_BACKTRACE
: {
2054 const HChar
* format
= (HChar
*)arg
[1];
2055 /* JRS 2010-Jan-28: this is DEPRECATED; use the
2056 _VALIST_BY_REF version instead */
2057 if (sizeof(va_list) != sizeof(UWord
))
2058 goto va_list_casting_error_NORETURN
;
2063 u
.uw
= (unsigned long)arg
[2];
2065 print_client_message( tid
, format
, &u
.vargs
,
2066 /* include_backtrace */ True
);
2067 SET_CLREQ_RETVAL( tid
, count
);
2071 case VG_USERREQ__PRINTF_VALIST_BY_REF
: {
2072 const HChar
* format
= (HChar
*)arg
[1];
2073 va_list* vargsp
= (va_list*)arg
[2];
2075 print_client_message( tid
, format
, vargsp
,
2076 /* include_backtrace */ False
);
2078 SET_CLREQ_RETVAL( tid
, count
);
2082 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF
: {
2083 const HChar
* format
= (HChar
*)arg
[1];
2084 va_list* vargsp
= (va_list*)arg
[2];
2086 print_client_message( tid
, format
, vargsp
,
2087 /* include_backtrace */ True
);
2088 SET_CLREQ_RETVAL( tid
, count
);
2092 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF
: {
2093 va_list* vargsp
= (va_list*)arg
[2];
2095 VG_(vmessage
)( Vg_DebugMsg
, (HChar
*)arg
[1], *vargsp
);
2096 VG_(message_flush
)();
2097 SET_CLREQ_RETVAL( tid
, count
);
2101 case VG_USERREQ__ADD_IFUNC_TARGET
: {
2102 VG_(redir_add_ifunc_target
)( arg
[1], arg
[2] );
2103 SET_CLREQ_RETVAL( tid
, 0);
2106 case VG_USERREQ__STACK_REGISTER
: {
2107 UWord sid
= VG_(register_stack
)((Addr
)arg
[1], (Addr
)arg
[2]);
2108 SET_CLREQ_RETVAL( tid
, sid
);
2109 VG_TRACK(register_stack
, (Addr
)arg
[1], (Addr
)arg
[2]);
2112 case VG_USERREQ__STACK_DEREGISTER
: {
2113 VG_(deregister_stack
)(arg
[1]);
2114 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2117 case VG_USERREQ__STACK_CHANGE
: {
2118 VG_(change_stack
)(arg
[1], (Addr
)arg
[2], (Addr
)arg
[3]);
2119 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2122 case VG_USERREQ__GET_MALLOCFUNCS
: {
2123 struct vg_mallocfunc_info
*info
= (struct vg_mallocfunc_info
*)arg
[1];
2125 info
->tl_malloc
= VG_(tdict
).tool_malloc
;
2126 info
->tl_calloc
= VG_(tdict
).tool_calloc
;
2127 info
->tl_realloc
= VG_(tdict
).tool_realloc
;
2128 info
->tl_memalign
= VG_(tdict
).tool_memalign
;
2129 info
->tl___builtin_new
= VG_(tdict
).tool___builtin_new
;
2130 info
->tl___builtin_new_aligned
= VG_(tdict
).tool___builtin_new_aligned
;
2131 info
->tl___builtin_vec_new
= VG_(tdict
).tool___builtin_vec_new
;
2132 info
->tl___builtin_vec_new_aligned
= VG_(tdict
).tool___builtin_vec_new_aligned
;
2133 info
->tl_free
= VG_(tdict
).tool_free
;
2134 info
->tl___builtin_delete
= VG_(tdict
).tool___builtin_delete
;
2135 info
->tl___builtin_delete_aligned
= VG_(tdict
).tool___builtin_delete_aligned
;
2136 info
->tl___builtin_vec_delete
= VG_(tdict
).tool___builtin_vec_delete
;
2137 info
->tl___builtin_vec_delete_aligned
= VG_(tdict
).tool___builtin_vec_delete_aligned
;
2138 info
->tl_malloc_usable_size
= VG_(tdict
).tool_malloc_usable_size
;
2140 info
->mallinfo
= VG_(mallinfo
);
2141 info
->clo_trace_malloc
= VG_(clo_trace_malloc
);
2142 info
->clo_realloc_zero_bytes_frees
= VG_(clo_realloc_zero_bytes_frees
);
2144 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2149 /* Requests from the client program */
2151 case VG_USERREQ__DISCARD_TRANSLATIONS
:
2152 if (VG_(clo_verbosity
) > 2)
2153 VG_(printf
)( "client request: DISCARD_TRANSLATIONS,"
2154 " addr %p, len %lu\n",
2155 (void*)arg
[1], arg
[2] );
2157 VG_(discard_translations
)(
2158 arg
[1], arg
[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
2161 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2164 case VG_USERREQ__INNER_THREADS
:
2165 if (VG_(clo_verbosity
) > 2)
2166 VG_(printf
)( "client request: INNER_THREADS,"
2169 VG_(inner_threads
) = (ThreadState
*)arg
[1];
2170 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2173 case VG_USERREQ__COUNT_ERRORS
:
2174 SET_CLREQ_RETVAL( tid
, VG_(get_n_errs_found
)() );
2177 case VG_USERREQ__CLO_CHANGE
:
2178 VG_(process_dynamic_option
) (cloD
, (HChar
*)arg
[1]);
2179 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2182 case VG_USERREQ__LOAD_PDB_DEBUGINFO
:
2183 VG_(di_notify_pdb_debuginfo
)( arg
[1], arg
[2], arg
[3], arg
[4] );
2184 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2187 case VG_USERREQ__MAP_IP_TO_SRCLOC
: {
2189 HChar
* buf64
= (HChar
*)arg
[2]; // points to a HChar [64] array
2190 const HChar
*buf
; // points to a string of unknown size
2192 VG_(memset
)(buf64
, 0, 64);
2195 // Unless the guest would become epoch aware (and would need to
2196 // describe IP addresses of dlclosed libs), using cur_ep is a
2197 // reasonable choice.
2198 const DiEpoch cur_ep
= VG_(current_DiEpoch
)();
2200 Bool ok
= VG_(get_filename_linenum
)(
2201 cur_ep
, ip
, &buf
, NULL
, &linenum
2204 /* For backward compatibility truncate the filename to
2206 VG_(strncpy
)(buf64
, buf
, 50);
2209 for (i
= 0; i
< 50; i
++) {
2213 VG_(sprintf
)(buf64
+i
, ":%u", linenum
); // safe
2218 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2222 case VG_USERREQ__CHANGE_ERR_DISABLEMENT
: {
2223 Word delta
= arg
[1];
2224 vg_assert(delta
== 1 || delta
== -1);
2225 ThreadState
* tst
= VG_(get_ThreadState
)(tid
);
2227 if (delta
== 1 && tst
->err_disablement_level
< 0xFFFFFFFF) {
2228 tst
->err_disablement_level
++;
2231 if (delta
== -1 && tst
->err_disablement_level
> 0) {
2232 tst
->err_disablement_level
--;
2234 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2238 case VG_USERREQ__GDB_MONITOR_COMMAND
: {
2240 ret
= (UWord
) VG_(client_monitor_command
) ((HChar
*)arg
[1]);
2241 SET_CLREQ_RETVAL(tid
, ret
);
2245 case VG_USERREQ__MALLOCLIKE_BLOCK
:
2246 case VG_USERREQ__RESIZEINPLACE_BLOCK
:
2247 case VG_USERREQ__FREELIKE_BLOCK
:
2248 // Ignore them if the addr is NULL; otherwise pass onto the tool.
2250 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2256 case VG_USERREQ__VEX_INIT_FOR_IRI
:
2257 LibVEX_InitIRI ( (IRICB
*)arg
[1] );
2262 if (os_client_request(tid
, arg
)) {
2263 // do nothing, os_client_request() handled it
2264 } else if (VG_(needs
).client_requests
) {
2267 if (VG_(clo_verbosity
) > 2)
2268 VG_(printf
)("client request: code %lx, addr %p, len %lu\n",
2269 arg
[0], (void*)arg
[1], arg
[2] );
2271 if ( VG_TDICT_CALL(tool_handle_client_request
, tid
, arg
, &ret
) )
2272 SET_CLREQ_RETVAL(tid
, ret
);
2274 static Bool whined
= False
;
2276 if (!whined
&& VG_(clo_verbosity
) > 2) {
2277 // Allow for requests in core, but defined by tools, which
2278 // have 0 and 0 in their two high bytes.
2279 HChar c1
= (arg
[0] >> 24) & 0xff;
2280 HChar c2
= (arg
[0] >> 16) & 0xff;
2281 if (c1
== 0) c1
= '_';
2282 if (c2
== 0) c2
= '_';
2283 VG_(message
)(Vg_UserMsg
, "Warning:\n"
2284 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
2285 " VG_(needs).client_requests should be set?\n",
2286 arg
[0], c1
, c2
, arg
[0] & 0xffff);
2295 va_list_casting_error_NORETURN
:
2297 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
2298 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
2299 "on a platform where they cannot be supported. Please use the\n"
2300 "equivalent _VALIST_BY_REF versions instead.\n"
2302 "This is a binary-incompatible change in Valgrind's client request\n"
2303 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
2304 "are expected to almost never see this message. The only case in\n"
2305 "which you might see this message is if your code uses the macros\n"
2306 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
2307 "to recompile such code, using the header files from this version of\n"
2308 "Valgrind, and not any previous version.\n"
2310 "If you see this message in any other circumstances, it is probably\n"
2311 "a bug in Valgrind. In this case, please file a bug report at\n"
2313 " http://www.valgrind.org/support/bug_reports.html\n"
2321 /* ---------------------------------------------------------------------
2322 Sanity checking (permanently engaged)
2323 ------------------------------------------------------------------ */
2325 /* Internal consistency checks on the sched structures. */
2327 void scheduler_sanity ( ThreadId tid
)
2330 Int lwpid
= VG_(gettid
)();
2332 if (!VG_(is_running_thread
)(tid
)) {
2333 VG_(message
)(Vg_DebugMsg
,
2334 "Thread %u is supposed to be running, "
2335 "but doesn't own the_BigLock (owned by %u)\n",
2336 tid
, VG_(running_tid
));
2340 if (lwpid
!= VG_(threads
)[tid
].os_state
.lwpid
) {
2341 VG_(message
)(Vg_DebugMsg
,
2342 "Thread %u supposed to be in LWP %d, but we're actually %d\n",
2343 tid
, VG_(threads
)[tid
].os_state
.lwpid
, VG_(gettid
)());
2347 if (lwpid
!= ML_(get_sched_lock_owner
)(the_BigLock
)) {
2348 VG_(message
)(Vg_DebugMsg
,
2349 "Thread (LWPID) %u doesn't own the_BigLock\n",
2355 /* Periodically show the state of all threads, for debugging
2357 static UInt lasttime
= 0;
2359 now
= VG_(read_millisecond_timer
)();
2360 if ((!bad
) && (lasttime
+ 4000/*ms*/ <= now
)) {
2362 VG_(printf
)("\n------------ Sched State at %d ms ------------\n",
2364 VG_(show_sched_status
)(True
, // host_stacktrace
2365 True
, // stack_usage
2366 True
); // exited_threads);
2370 /* core_panic also shows the sched status, which is why we don't
2371 show it above if bad==True. */
2373 VG_(core_panic
)("scheduler_sanity: failed");
2376 void VG_(sanity_check_general
) ( Bool force_expensive
)
2380 static UInt next_slow_check_at
= 1;
2381 static UInt slow_check_interval
= 25;
2383 if (VG_(clo_sanity_level
) < 1) return;
2385 /* --- First do all the tests that we can do quickly. ---*/
2387 sanity_fast_count
++;
2389 /* Check stuff pertaining to the memory check system. */
2391 /* Check that nobody has spuriously claimed that the first or
2392 last 16 pages of memory have become accessible [...] */
2393 if (VG_(needs
).sanity_checks
) {
2394 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check
));
2397 /* --- Now some more expensive checks. ---*/
2399 /* Once every now and again, check some more expensive stuff.
2400 Gradually increase the interval between such checks so as not to
2401 burden long-running programs too much. */
2402 if ( force_expensive
2403 || VG_(clo_sanity_level
) >= 2
2404 || (VG_(clo_sanity_level
) == 1
2405 && sanity_fast_count
== next_slow_check_at
)) {
2407 if (0) VG_(printf
)("SLOW at %u\n", sanity_fast_count
-1);
2409 next_slow_check_at
= sanity_fast_count
- 1 + slow_check_interval
;
2410 slow_check_interval
++;
2411 sanity_slow_count
++;
2413 if (VG_(needs
).sanity_checks
) {
2414 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check
));
2417 /* Look for stack overruns. Visit all threads. */
2418 for (tid
= 1; tid
< VG_N_THREADS
; tid
++) {
2422 if (VG_(threads
)[tid
].status
== VgTs_Empty
||
2423 VG_(threads
)[tid
].status
== VgTs_Zombie
)
2428 VG_(get_ThreadState
)(tid
)->os_state
.valgrind_stack_base
;
2430 = 4096; // Let's say. Checking more causes lots of L2 misses.
2432 = VG_(am_get_VgStack_unused_szB
)(stack
, limit
);
2433 if (remains
< limit
)
2434 VG_(message
)(Vg_DebugMsg
,
2435 "WARNING: Thread %u is within %lu bytes "
2436 "of running out of valgrind stack!\n"
2437 "Valgrind stack size can be increased "
2438 "using --valgrind-stacksize=....\n",
2443 if (VG_(clo_sanity_level
) >= 2) {
2444 /* Check sanity of the low-level memory manager. Note that bugs
2445 in the client's code can cause this to fail, so we don't do
2446 this check unless specially asked for. And because it's
2447 potentially very expensive. */
2448 VG_(sanity_check_malloc_all
)();
2452 /*--------------------------------------------------------------------*/
2454 /*--------------------------------------------------------------------*/