2 /*--------------------------------------------------------------------*/
3 /*--- Thread scheduling. scheduler.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2017 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
32 Valgrind tries to emulate the kernel's threading as closely as
33 possible. The client does all threading via the normal syscalls
34 (on Linux: clone, etc). Valgrind emulates this by creating exactly
35 the same process structure as would be created without Valgrind.
36 There are no extra threads.
38 The main difference is that Valgrind only allows one client thread
39 to run at once. This is controlled with the CPU Big Lock,
40 "the_BigLock". Any time a thread wants to run client code or
41 manipulate any shared state (which is anything other than its own
42 ThreadState entry), it must hold the_BigLock.
44 When a thread is about to block in a blocking syscall, it releases
45 the_BigLock, and re-takes it when it becomes runnable again (either
46 because the syscall finished, or we took a signal).
48 VG_(scheduler) therefore runs in each thread. It returns only when
49 the thread is exiting, either because it exited itself, or it was
50 told to exit by another thread.
52 This file is almost entirely OS-independent. The details of how
53 the OS handles threading and signalling are abstracted away and
54 implemented elsewhere. [Some of the functions have worked their
55 way back for the moment, until we do an OS port in earnest...]
59 #include "pub_core_basics.h"
60 #include "pub_core_debuglog.h"
61 #include "pub_core_vki.h"
62 #include "pub_core_vkiscnums.h" // __NR_sched_yield
63 #include "pub_core_threadstate.h"
64 #include "pub_core_clientstate.h"
65 #include "pub_core_aspacemgr.h"
66 #include "pub_core_clreq.h" // for VG_USERREQ__*
67 #include "pub_core_dispatch.h"
68 #include "pub_core_errormgr.h" // For VG_(get_n_errs_found)()
69 #include "pub_core_gdbserver.h" // for VG_(gdbserver)/VG_(gdbserver_activity)
70 #include "pub_core_libcbase.h"
71 #include "pub_core_libcassert.h"
72 #include "pub_core_libcprint.h"
73 #include "pub_core_libcproc.h"
74 #include "pub_core_libcsignal.h"
75 #if defined(VGO_darwin)
76 #include "pub_core_mach.h"
78 #include "pub_core_machine.h"
79 #include "pub_core_mallocfree.h"
80 #include "pub_core_options.h"
81 #include "pub_core_replacemalloc.h"
82 #include "pub_core_sbprofile.h"
83 #include "pub_core_signals.h"
84 #include "pub_core_stacks.h"
85 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
86 #include "pub_core_syscall.h"
87 #include "pub_core_syswrap.h"
88 #include "pub_core_tooliface.h"
89 #include "pub_core_translate.h" // For VG_(translate)()
90 #include "pub_core_transtab.h"
91 #include "pub_core_debuginfo.h" // VG_(di_notify_pdb_debuginfo)
92 #include "priv_sched-lock.h"
93 #include "pub_core_scheduler.h" // self
94 #include "pub_core_redir.h"
95 #include "libvex_emnote.h" // VexEmNote
98 /* ---------------------------------------------------------------------
99 Types and globals for the scheduler.
100 ------------------------------------------------------------------ */
102 /* ThreadId and ThreadState are defined elsewhere*/
104 /* Defines the thread-scheduling timeslice, in terms of the number of
105 basic blocks we attempt to run each thread for. Smaller values
106 give finer interleaving but much increased scheduling overheads. */
107 #define SCHEDULING_QUANTUM 100000
109 /* If False, a fault is Valgrind-internal (ie, a bug) */
110 Bool
VG_(in_generated_code
) = False
;
112 /* 64-bit counter for the number of basic blocks done. */
113 static ULong bbs_done
= 0;
115 /* Counter to see if vgdb activity is to be verified.
116 When nr of bbs done reaches vgdb_next_poll, scheduler will
117 poll for gdbserver activity. VG_(force_vgdb_poll) and
118 VG_(disable_vgdb_poll) allows the valgrind core (e.g. m_gdbserver)
119 to control when the next poll will be done. */
120 static ULong vgdb_next_poll
;
123 static void do_client_request ( ThreadId tid
);
124 static void scheduler_sanity ( ThreadId tid
);
125 static void mostly_clear_thread_record ( ThreadId tid
);
128 static ULong n_scheduling_events_MINOR
= 0;
129 static ULong n_scheduling_events_MAJOR
= 0;
131 /* Stats: number of XIndirs looked up in the fast cache, the number of hits in
132 ways 1, 2 and 3, and the number of misses. The number of hits in way 0 isn't
133 recorded because it can be computed from these five numbers. */
134 static ULong stats__n_xIndirs
= 0;
135 static ULong stats__n_xIndir_hits1
= 0;
136 static ULong stats__n_xIndir_hits2
= 0;
137 static ULong stats__n_xIndir_hits3
= 0;
138 static ULong stats__n_xIndir_misses
= 0;
140 /* And 32-bit temp bins for the above, so that 32-bit platforms don't
141 have to do 64 bit incs on the hot path through
142 VG_(disp_cp_xindir). */
143 /*global*/ UInt
VG_(stats__n_xIndirs_32
) = 0;
144 /*global*/ UInt
VG_(stats__n_xIndir_hits1_32
) = 0;
145 /*global*/ UInt
VG_(stats__n_xIndir_hits2_32
) = 0;
146 /*global*/ UInt
VG_(stats__n_xIndir_hits3_32
) = 0;
147 /*global*/ UInt
VG_(stats__n_xIndir_misses_32
) = 0;
149 /* Sanity checking counts. */
150 static UInt sanity_fast_count
= 0;
151 static UInt sanity_slow_count
= 0;
153 void VG_(print_scheduler_stats
)(void)
155 VG_(message
)(Vg_DebugMsg
,
156 "scheduler: %'llu event checks.\n", bbs_done
);
159 = stats__n_xIndirs
- stats__n_xIndir_hits1
- stats__n_xIndir_hits2
160 - stats__n_xIndir_hits3
- stats__n_xIndir_misses
;
161 VG_(message
)(Vg_DebugMsg
,
162 "scheduler: %'llu indir transfers, "
163 "%'llu misses (1 in %llu) ..\n",
164 stats__n_xIndirs
, stats__n_xIndir_misses
,
165 stats__n_xIndirs
/ (stats__n_xIndir_misses
166 ? stats__n_xIndir_misses
: 1));
167 VG_(message
)(Vg_DebugMsg
,
168 "scheduler: .. of which: %'llu hit0, %'llu hit1, "
169 "%'llu hit2, %'llu hit3, %'llu missed\n",
171 stats__n_xIndir_hits1
,
172 stats__n_xIndir_hits2
,
173 stats__n_xIndir_hits3
,
174 stats__n_xIndir_misses
);
176 VG_(message
)(Vg_DebugMsg
,
177 "scheduler: %'llu/%'llu major/minor sched events.\n",
178 n_scheduling_events_MAJOR
, n_scheduling_events_MINOR
);
179 VG_(message
)(Vg_DebugMsg
,
180 " sanity: %u cheap, %u expensive checks.\n",
181 sanity_fast_count
, sanity_slow_count
);
185 * Mutual exclusion object used to serialize threads.
187 static struct sched_lock
*the_BigLock
;
190 /* ---------------------------------------------------------------------
191 Helper functions for the scheduler.
192 ------------------------------------------------------------------ */
194 static void maybe_progress_report ( UInt reporting_interval_seconds
)
196 /* This is when the next report is due, in user cpu milliseconds since
197 process start. This is a global variable so this won't be thread-safe
198 if Valgrind is ever made multithreaded. For now it's fine. */
199 static UInt next_report_due_at
= 0;
201 /* First of all, figure out whether another report is due. It
203 UInt user_ms
= VG_(get_user_milliseconds
)();
204 if (LIKELY(user_ms
< next_report_due_at
))
207 Bool first_ever_call
= next_report_due_at
== 0;
209 /* A report is due. First, though, set the time for the next report. */
210 next_report_due_at
+= 1000 * reporting_interval_seconds
;
212 /* If it's been an excessively long time since the last check, we
213 might have gone more than one reporting interval forward. Guard
215 while (next_report_due_at
<= user_ms
)
216 next_report_due_at
+= 1000 * reporting_interval_seconds
;
218 /* Also we don't want to report anything on the first call, but we
219 have to wait till this point to leave, so that we set up the
220 next-call time correctly. */
224 /* Print the report. */
225 UInt user_cpu_seconds
= user_ms
/ 1000;
226 UInt wallclock_seconds
= VG_(read_millisecond_timer
)() / 1000;
227 Double millionEvCs
= ((Double
)bbs_done
) / 1000000.0;
228 Double thousandTIns
= ((Double
)VG_(get_bbs_translated
)()) / 1000.0;
229 Double thousandTOuts
= ((Double
)VG_(get_bbs_discarded_or_dumped
)()) / 1000.0;
230 UInt nThreads
= VG_(count_living_threads
)();
232 if (VG_(clo_verbosity
) > 0) {
233 VG_(dmsg
)("PROGRESS: U %'us, W %'us, %.1f%% CPU, EvC %.2fM, "
234 "TIn %.1fk, TOut %.1fk, #thr %u\n",
235 user_cpu_seconds
, wallclock_seconds
,
237 * (Double
)(user_cpu_seconds
)
238 / (Double
)(wallclock_seconds
== 0 ? 1 : wallclock_seconds
),
240 thousandTIns
, thousandTOuts
, nThreads
);
245 void print_sched_event ( ThreadId tid
, const HChar
* what
)
247 VG_(message
)(Vg_DebugMsg
, " SCHED[%u]: %s\n", tid
, what
);
250 /* For showing SB profiles, if the user asks to see them. */
252 void maybe_show_sb_profile ( void )
254 /* DO NOT MAKE NON-STATIC */
255 static ULong bbs_done_lastcheck
= 0;
257 vg_assert(VG_(clo_profyle_interval
) > 0);
258 Long delta
= (Long
)(bbs_done
- bbs_done_lastcheck
);
259 vg_assert(delta
>= 0);
260 if ((ULong
)delta
>= VG_(clo_profyle_interval
)) {
261 bbs_done_lastcheck
= bbs_done
;
262 VG_(get_and_show_SB_profile
)(bbs_done
);
267 const HChar
* name_of_sched_event ( UInt event
)
270 case VEX_TRC_JMP_INVALICACHE
: return "INVALICACHE";
271 case VEX_TRC_JMP_FLUSHDCACHE
: return "FLUSHDCACHE";
272 case VEX_TRC_JMP_NOREDIR
: return "NOREDIR";
273 case VEX_TRC_JMP_SIGILL
: return "SIGILL";
274 case VEX_TRC_JMP_SIGTRAP
: return "SIGTRAP";
275 case VEX_TRC_JMP_SIGSEGV
: return "SIGSEGV";
276 case VEX_TRC_JMP_SIGBUS
: return "SIGBUS";
277 case VEX_TRC_JMP_SIGFPE_INTOVF
:
278 case VEX_TRC_JMP_SIGFPE_INTDIV
: return "SIGFPE";
279 case VEX_TRC_JMP_EMWARN
: return "EMWARN";
280 case VEX_TRC_JMP_EMFAIL
: return "EMFAIL";
281 case VEX_TRC_JMP_CLIENTREQ
: return "CLIENTREQ";
282 case VEX_TRC_JMP_YIELD
: return "YIELD";
283 case VEX_TRC_JMP_NODECODE
: return "NODECODE";
284 case VEX_TRC_JMP_MAPFAIL
: return "MAPFAIL";
285 case VEX_TRC_JMP_SYS_SYSCALL
: return "SYSCALL";
286 case VEX_TRC_JMP_SYS_INT32
: return "INT32";
287 case VEX_TRC_JMP_SYS_INT128
: return "INT128";
288 case VEX_TRC_JMP_SYS_INT129
: return "INT129";
289 case VEX_TRC_JMP_SYS_INT130
: return "INT130";
290 case VEX_TRC_JMP_SYS_INT145
: return "INT145";
291 case VEX_TRC_JMP_SYS_INT210
: return "INT210";
292 case VEX_TRC_JMP_SYS_SYSENTER
: return "SYSENTER";
293 case VEX_TRC_JMP_BORING
: return "VEX_BORING";
295 case VG_TRC_BORING
: return "VG_BORING";
296 case VG_TRC_INNER_FASTMISS
: return "FASTMISS";
297 case VG_TRC_INNER_COUNTERZERO
: return "COUNTERZERO";
298 case VG_TRC_FAULT_SIGNAL
: return "FAULTSIGNAL";
299 case VG_TRC_INVARIANT_FAILED
: return "INVFAILED";
300 case VG_TRC_CHAIN_ME_TO_SLOW_EP
: return "CHAIN_ME_SLOW";
301 case VG_TRC_CHAIN_ME_TO_FAST_EP
: return "CHAIN_ME_FAST";
302 default: return "??UNKNOWN??";
306 /* Allocate a completely empty ThreadState record. */
307 ThreadId
VG_(alloc_ThreadState
) ( void )
310 for (i
= 1; i
< VG_N_THREADS
; i
++) {
311 if (VG_(threads
)[i
].status
== VgTs_Empty
) {
312 VG_(threads
)[i
].status
= VgTs_Init
;
313 VG_(threads
)[i
].exitreason
= VgSrc_None
;
314 if (VG_(threads
)[i
].thread_name
)
315 VG_(free
)(VG_(threads
)[i
].thread_name
);
316 VG_(threads
)[i
].thread_name
= NULL
;
320 VG_(printf
)("Use --max-threads=INT to specify a larger number of threads\n"
321 "and rerun valgrind\n");
322 VG_(core_panic
)("Max number of threads is too low");
327 Mark a thread as Runnable. This will block until the_BigLock is
328 available, so that we get exclusive access to all the shared
329 structures and the CPU. Up until we get the_BigLock, we must not
330 touch any shared state.
332 When this returns, we'll actually be running.
334 void VG_(acquire_BigLock
)(ThreadId tid
, const HChar
* who
)
339 if (VG_(clo_trace_sched
)) {
340 HChar buf
[VG_(strlen
)(who
) + 30];
341 VG_(sprintf
)(buf
, "waiting for lock (%s)", who
);
342 print_sched_event(tid
, buf
);
346 /* First, acquire the_BigLock. We can't do anything else safely
347 prior to this point. Even doing debug printing prior to this
348 point is, technically, wrong. */
349 VG_(acquire_BigLock_LL
)(NULL
);
351 tst
= VG_(get_ThreadState
)(tid
);
353 vg_assert(tst
->status
!= VgTs_Runnable
);
355 tst
->status
= VgTs_Runnable
;
357 if (VG_(running_tid
) != VG_INVALID_THREADID
)
358 VG_(printf
)("tid %u found %u running\n", tid
, VG_(running_tid
));
359 vg_assert(VG_(running_tid
) == VG_INVALID_THREADID
);
360 VG_(running_tid
) = tid
;
362 { Addr gsp
= VG_(get_SP
)(tid
);
363 if (NULL
!= VG_(tdict
).track_new_mem_stack_w_ECU
)
364 VG_(unknown_SP_update_w_ECU
)(gsp
, gsp
, 0/*unknown origin*/);
366 VG_(unknown_SP_update
)(gsp
, gsp
);
369 if (VG_(clo_trace_sched
)) {
370 HChar buf
[VG_(strlen
)(who
) + 30];
371 VG_(sprintf
)(buf
, " acquired lock (%s)", who
);
372 print_sched_event(tid
, buf
);
377 Set a thread into a sleeping state, and give up exclusive access to
378 the CPU. On return, the thread must be prepared to block until it
379 is ready to run again (generally this means blocking in a syscall,
380 but it may mean that we remain in a Runnable state and we're just
381 yielding the CPU to another thread).
383 void VG_(release_BigLock
)(ThreadId tid
, ThreadStatus sleepstate
,
386 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
388 vg_assert(tst
->status
== VgTs_Runnable
);
390 vg_assert(sleepstate
== VgTs_WaitSys
||
391 sleepstate
== VgTs_Yielding
);
393 tst
->status
= sleepstate
;
395 vg_assert(VG_(running_tid
) == tid
);
396 VG_(running_tid
) = VG_INVALID_THREADID
;
398 if (VG_(clo_trace_sched
)) {
399 const HChar
*status
= VG_(name_of_ThreadStatus
)(sleepstate
);
400 HChar buf
[VG_(strlen
)(who
) + VG_(strlen
)(status
) + 30];
401 VG_(sprintf
)(buf
, "releasing lock (%s) -> %s", who
, status
);
402 print_sched_event(tid
, buf
);
405 /* Release the_BigLock; this will reschedule any runnable
407 VG_(release_BigLock_LL
)(NULL
);
410 static void init_BigLock(void)
412 vg_assert(!the_BigLock
);
413 the_BigLock
= ML_(create_sched_lock
)();
416 static void deinit_BigLock(void)
418 ML_(destroy_sched_lock
)(the_BigLock
);
422 /* See pub_core_scheduler.h for description */
423 void VG_(acquire_BigLock_LL
) ( const HChar
* who
)
425 ML_(acquire_sched_lock
)(the_BigLock
);
428 /* See pub_core_scheduler.h for description */
429 void VG_(release_BigLock_LL
) ( const HChar
* who
)
431 ML_(release_sched_lock
)(the_BigLock
);
434 Bool
VG_(owns_BigLock_LL
) ( ThreadId tid
)
436 return (ML_(get_sched_lock_owner
)(the_BigLock
)
437 == VG_(threads
)[tid
].os_state
.lwpid
);
441 /* Clear out the ThreadState and release the semaphore. Leaves the
442 ThreadState in VgTs_Zombie state, so that it doesn't get
443 reallocated until the caller is really ready. */
444 void VG_(exit_thread
)(ThreadId tid
)
446 vg_assert(VG_(is_valid_tid
)(tid
));
447 vg_assert(VG_(is_running_thread
)(tid
));
448 vg_assert(VG_(is_exiting
)(tid
));
450 mostly_clear_thread_record(tid
);
451 VG_(running_tid
) = VG_INVALID_THREADID
;
453 /* There should still be a valid exitreason for this thread */
454 vg_assert(VG_(threads
)[tid
].exitreason
!= VgSrc_None
);
456 if (VG_(clo_trace_sched
))
457 print_sched_event(tid
, "release lock in VG_(exit_thread)");
459 VG_(release_BigLock_LL
)(NULL
);
462 /* If 'tid' is blocked in a syscall, send it SIGVGKILL so as to get it
463 out of the syscall and onto doing the next thing, whatever that is.
464 If it isn't blocked in a syscall, has no effect on the thread. */
465 void VG_(get_thread_out_of_syscall
)(ThreadId tid
)
467 vg_assert(VG_(is_valid_tid
)(tid
));
468 vg_assert(!VG_(is_running_thread
)(tid
));
470 if (VG_(threads
)[tid
].status
== VgTs_WaitSys
) {
471 if (VG_(clo_trace_signals
)) {
472 VG_(message
)(Vg_DebugMsg
,
473 "get_thread_out_of_syscall zaps tid %u lwp %d\n",
474 tid
, VG_(threads
)[tid
].os_state
.lwpid
);
476 # if defined(VGO_darwin)
478 // GrP fixme use mach primitives on darwin?
479 // GrP fixme thread_abort_safely?
480 // GrP fixme race for thread with WaitSys set but not in syscall yet?
481 extern kern_return_t
thread_abort(mach_port_t
);
482 thread_abort(VG_(threads
)[tid
].os_state
.lwpid
);
486 __attribute__((unused
))
487 Int r
= VG_(tkill
)(VG_(threads
)[tid
].os_state
.lwpid
, VG_SIGVGKILL
);
488 /* JRS 2009-Mar-20: should we assert for r==0 (tkill succeeded)?
489 I'm really not sure. Here's a race scenario which argues
490 that we shoudn't; but equally I'm not sure the scenario is
491 even possible, because of constraints caused by the question
492 of who holds the BigLock when.
494 Target thread tid does sys_read on a socket and blocks. This
495 function gets called, and we observe correctly that tid's
496 status is WaitSys but then for whatever reason this function
497 goes very slowly for a while. Then data arrives from
498 wherever, tid's sys_read returns, tid exits. Then we do
499 tkill on tid, but tid no longer exists; tkill returns an
500 error code and the assert fails. */
501 /* vg_assert(r == 0); */
508 Yield the CPU for a short time to let some other thread run.
510 void VG_(vg_yield
)(void)
512 ThreadId tid
= VG_(running_tid
);
514 vg_assert(tid
!= VG_INVALID_THREADID
);
515 vg_assert(VG_(threads
)[tid
].os_state
.lwpid
== VG_(gettid
)());
517 VG_(release_BigLock
)(tid
, VgTs_Yielding
, "VG_(vg_yield)");
520 Tell the kernel we're yielding.
522 # if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
523 VG_(do_syscall0
)(__NR_sched_yield
);
524 # elif defined(VGO_solaris)
525 VG_(do_syscall0
)(__NR_yield
);
530 VG_(acquire_BigLock
)(tid
, "VG_(vg_yield)");
534 /* Set the standard set of blocked signals, used whenever we're not
535 running a client syscall. */
536 static void block_signals(void)
540 VG_(sigfillset
)(&mask
);
542 /* Don't block these because they're synchronous */
543 VG_(sigdelset
)(&mask
, VKI_SIGSEGV
);
544 VG_(sigdelset
)(&mask
, VKI_SIGBUS
);
545 VG_(sigdelset
)(&mask
, VKI_SIGFPE
);
546 VG_(sigdelset
)(&mask
, VKI_SIGILL
);
547 VG_(sigdelset
)(&mask
, VKI_SIGTRAP
);
548 VG_(sigdelset
)(&mask
, VKI_SIGSYS
);
550 /* Can't block these anyway */
551 VG_(sigdelset
)(&mask
, VKI_SIGSTOP
);
552 VG_(sigdelset
)(&mask
, VKI_SIGKILL
);
554 VG_(sigprocmask
)(VKI_SIG_SETMASK
, &mask
, NULL
);
557 static void os_state_clear(ThreadState
*tst
)
559 tst
->os_state
.lwpid
= 0;
560 tst
->os_state
.threadgroup
= 0;
561 tst
->os_state
.stk_id
= NULL_STK_ID
;
562 # if defined(VGO_linux)
563 /* no other fields to clear */
564 # elif defined(VGO_freebsd)
565 /* no other fields to clear */
566 # elif defined(VGO_darwin)
567 tst
->os_state
.post_mach_trap_fn
= NULL
;
568 tst
->os_state
.pthread
= 0;
569 tst
->os_state
.func_arg
= 0;
570 VG_(memset
)(&tst
->os_state
.child_go
, 0, sizeof(tst
->os_state
.child_go
));
571 VG_(memset
)(&tst
->os_state
.child_done
, 0, sizeof(tst
->os_state
.child_done
));
572 tst
->os_state
.wq_jmpbuf_valid
= False
;
573 tst
->os_state
.remote_port
= 0;
574 tst
->os_state
.msgh_id
= 0;
575 VG_(memset
)(&tst
->os_state
.mach_args
, 0, sizeof(tst
->os_state
.mach_args
));
576 # elif defined(VGO_solaris)
577 # if defined(VGP_x86_solaris)
578 tst
->os_state
.thrptr
= 0;
580 tst
->os_state
.ustack
= NULL
;
581 tst
->os_state
.in_door_return
= False
;
582 tst
->os_state
.door_return_procedure
= 0;
583 tst
->os_state
.oldcontext
= NULL
;
584 tst
->os_state
.schedctl_data
= 0;
585 tst
->os_state
.daemon_thread
= False
;
591 static void os_state_init(ThreadState
*tst
)
593 tst
->os_state
.valgrind_stack_base
= 0;
594 tst
->os_state
.valgrind_stack_init_SP
= 0;
599 void mostly_clear_thread_record ( ThreadId tid
)
601 vki_sigset_t savedmask
;
603 vg_assert(tid
>= 0 && tid
< VG_N_THREADS
);
604 VG_(cleanup_thread
)(&VG_(threads
)[tid
].arch
);
605 VG_(threads
)[tid
].tid
= tid
;
607 /* Leave the thread in Zombie, so that it doesn't get reallocated
608 until the caller is finally done with the thread stack. */
609 VG_(threads
)[tid
].status
= VgTs_Zombie
;
611 VG_(sigemptyset
)(&VG_(threads
)[tid
].sig_mask
);
612 VG_(sigemptyset
)(&VG_(threads
)[tid
].tmp_sig_mask
);
614 os_state_clear(&VG_(threads
)[tid
]);
616 /* start with no altstack */
617 VG_(threads
)[tid
].altstack
.ss_sp
= (void *)0xdeadbeef;
618 VG_(threads
)[tid
].altstack
.ss_size
= 0;
619 VG_(threads
)[tid
].altstack
.ss_flags
= VKI_SS_DISABLE
;
621 VG_(clear_out_queued_signals
)(tid
, &savedmask
);
623 VG_(threads
)[tid
].sched_jmpbuf_valid
= False
;
627 Called in the child after fork. If the parent has multiple
628 threads, then we've inherited a VG_(threads) array describing them,
629 but only the thread which called fork() is actually alive in the
630 child. This functions needs to clean up all those other thread
633 Whichever tid in the parent which called fork() becomes the
634 master_tid in the child. That's because the only living slot in
635 VG_(threads) in the child after fork is VG_(threads)[tid], and it
636 would be too hard to try to re-number the thread and relocate the
637 thread state down to VG_(threads)[1].
639 This function also needs to reinitialize the_BigLock, since
640 otherwise we may end up sharing its state with the parent, which
641 would be deeply confusing.
643 static void sched_fork_cleanup(ThreadId me
)
646 vg_assert(VG_(running_tid
) == me
);
648 # if defined(VGO_darwin)
649 // GrP fixme hack reset Mach ports
653 VG_(threads
)[me
].os_state
.lwpid
= VG_(gettid
)();
654 VG_(threads
)[me
].os_state
.threadgroup
= VG_(getpid
)();
656 /* clear out all the unused thread slots */
657 for (tid
= 1; tid
< VG_N_THREADS
; tid
++) {
659 mostly_clear_thread_record(tid
);
660 VG_(threads
)[tid
].status
= VgTs_Empty
;
661 VG_(clear_syscallInfo
)(tid
);
665 /* re-init and take the sema */
668 VG_(acquire_BigLock_LL
)(NULL
);
672 /* First phase of initialisation of the scheduler. Initialise the
673 bigLock, zeroise the VG_(threads) structure and decide on the
674 ThreadId of the root thread.
676 ThreadId
VG_(scheduler_init_phase1
) ( void )
681 VG_(debugLog
)(1,"sched","sched_init_phase1\n");
683 if (VG_(clo_fair_sched
) != disable_fair_sched
684 && !ML_(set_sched_lock_impl
)(sched_lock_ticket
)
685 && VG_(clo_fair_sched
) == enable_fair_sched
)
687 VG_(printf
)("Error: fair scheduling is not supported on this system.\n");
691 if (VG_(clo_verbosity
) > 1) {
692 VG_(message
)(Vg_DebugMsg
,
693 "Scheduler: using %s scheduler lock implementation.\n",
694 ML_(get_sched_lock_name
)());
699 for (i
= 0 /* NB; not 1 */; i
< VG_N_THREADS
; i
++) {
700 /* Paranoia .. completely zero it out. */
701 VG_(memset
)( & VG_(threads
)[i
], 0, sizeof( VG_(threads
)[i
] ) );
703 VG_(threads
)[i
].sig_queue
= NULL
;
705 os_state_init(&VG_(threads
)[i
]);
706 mostly_clear_thread_record(i
);
708 VG_(threads
)[i
].status
= VgTs_Empty
;
709 VG_(threads
)[i
].client_stack_szB
= 0;
710 VG_(threads
)[i
].client_stack_highest_byte
= (Addr
)NULL
;
711 VG_(threads
)[i
].err_disablement_level
= 0;
712 VG_(threads
)[i
].thread_name
= NULL
;
715 tid_main
= VG_(alloc_ThreadState
)();
717 /* Bleh. Unfortunately there are various places in the system that
718 assume that the main thread has a ThreadId of 1.
719 - Helgrind (possibly)
720 - stack overflow message in default_action() in m_signals.c
721 - definitely a lot more places
723 vg_assert(tid_main
== 1);
729 /* Second phase of initialisation of the scheduler. Given the root
730 ThreadId computed by first phase of initialisation, fill in stack
731 details and acquire bigLock. Initialise the scheduler. This is
732 called at startup. The caller subsequently initialises the guest
733 state components of this main thread.
735 void VG_(scheduler_init_phase2
) ( ThreadId tid_main
,
739 VG_(debugLog
)(1,"sched","sched_init_phase2: tid_main=%u, "
740 "cls_end=0x%lx, cls_sz=%lu\n",
741 tid_main
, clstack_end
, clstack_size
);
743 vg_assert(VG_IS_PAGE_ALIGNED(clstack_end
+1));
744 vg_assert(VG_IS_PAGE_ALIGNED(clstack_size
));
746 VG_(threads
)[tid_main
].client_stack_highest_byte
748 VG_(threads
)[tid_main
].client_stack_szB
751 VG_(atfork
)(NULL
, NULL
, sched_fork_cleanup
);
755 /* ---------------------------------------------------------------------
756 Helpers for running translations.
757 ------------------------------------------------------------------ */
759 /* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
760 mask state, but does need to pass "val" through. jumped must be a
762 #define SCHEDSETJMP(tid, jumped, stmt) \
764 ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
766 (jumped) = VG_MINIMAL_SETJMP(_qq_tst->sched_jmpbuf); \
767 if ((jumped) == ((UWord)0)) { \
768 vg_assert(!_qq_tst->sched_jmpbuf_valid); \
769 _qq_tst->sched_jmpbuf_valid = True; \
771 } else if (VG_(clo_trace_sched)) \
772 VG_(printf)("SCHEDSETJMP(line %d) tid %u, jumped=%lu\n", \
773 __LINE__, tid, jumped); \
774 vg_assert(_qq_tst->sched_jmpbuf_valid); \
775 _qq_tst->sched_jmpbuf_valid = False; \
779 /* Do various guest state alignment checks prior to running a thread.
780 Specifically, check that what we have matches Vex's guest state
781 layout requirements. See libvex.h for details, but in short the
782 requirements are: There must be no holes in between the primary
783 guest state, its two copies, and the spill area. In short, all 4
784 areas must be aligned on the LibVEX_GUEST_STATE_ALIGN boundary and
785 be placed back-to-back without holes in between. */
786 static void do_pre_run_checks ( volatile ThreadState
* tst
)
788 Addr a_vex
= (Addr
) & tst
->arch
.vex
;
789 Addr a_vexsh1
= (Addr
) & tst
->arch
.vex_shadow1
;
790 Addr a_vexsh2
= (Addr
) & tst
->arch
.vex_shadow2
;
791 Addr a_spill
= (Addr
) & tst
->arch
.vex_spill
;
792 UInt sz_vex
= (UInt
) sizeof tst
->arch
.vex
;
793 UInt sz_vexsh1
= (UInt
) sizeof tst
->arch
.vex_shadow1
;
794 UInt sz_vexsh2
= (UInt
) sizeof tst
->arch
.vex_shadow2
;
795 UInt sz_spill
= (UInt
) sizeof tst
->arch
.vex_spill
;
798 VG_(printf
)("gst %p %u, sh1 %p %u, "
799 "sh2 %p %u, spill %p %u\n",
800 (void*)a_vex
, sz_vex
,
801 (void*)a_vexsh1
, sz_vexsh1
,
802 (void*)a_vexsh2
, sz_vexsh2
,
803 (void*)a_spill
, sz_spill
);
805 vg_assert(sz_vex
% LibVEX_GUEST_STATE_ALIGN
== 0);
806 vg_assert(sz_vexsh1
% LibVEX_GUEST_STATE_ALIGN
== 0);
807 vg_assert(sz_vexsh2
% LibVEX_GUEST_STATE_ALIGN
== 0);
808 vg_assert(sz_spill
% LibVEX_GUEST_STATE_ALIGN
== 0);
810 vg_assert(a_vex
% LibVEX_GUEST_STATE_ALIGN
== 0);
811 vg_assert(a_vexsh1
% LibVEX_GUEST_STATE_ALIGN
== 0);
812 vg_assert(a_vexsh2
% LibVEX_GUEST_STATE_ALIGN
== 0);
813 vg_assert(a_spill
% LibVEX_GUEST_STATE_ALIGN
== 0);
815 /* Check that the guest state and its two shadows have the same
816 size, and that there are no holes in between. The latter is
817 important because Memcheck assumes that it can reliably access
818 the shadows by indexing off a pointer to the start of the
819 primary guest state area. */
820 vg_assert(sz_vex
== sz_vexsh1
);
821 vg_assert(sz_vex
== sz_vexsh2
);
822 vg_assert(a_vex
+ 1 * sz_vex
== a_vexsh1
);
823 vg_assert(a_vex
+ 2 * sz_vex
== a_vexsh2
);
824 /* Also check there's no hole between the second shadow area and
826 vg_assert(sz_spill
== LibVEX_N_SPILL_BYTES
);
827 vg_assert(a_vex
+ 3 * sz_vex
== a_spill
);
829 # if defined(VGA_x86)
830 /* x86 XMM regs must form an array, ie, have no holes in
833 (offsetof(VexGuestX86State
,guest_XMM7
)
834 - offsetof(VexGuestX86State
,guest_XMM0
))
835 == (8/*#regs*/-1) * 16/*bytes per reg*/
837 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State
,guest_XMM0
)));
838 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestX86State
,guest_FPREG
)));
839 vg_assert(8 == offsetof(VexGuestX86State
,guest_EAX
));
840 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State
,guest_EAX
)));
841 vg_assert(VG_IS_4_ALIGNED(offsetof(VexGuestX86State
,guest_EIP
)));
844 # if defined(VGA_amd64)
845 /* amd64 YMM regs must form an array, ie, have no holes in
848 (offsetof(VexGuestAMD64State
,guest_YMM16
)
849 - offsetof(VexGuestAMD64State
,guest_YMM0
))
850 == (17/*#regs*/-1) * 32/*bytes per reg*/
852 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State
,guest_YMM0
)));
853 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State
,guest_FPREG
)));
854 vg_assert(16 == offsetof(VexGuestAMD64State
,guest_RAX
));
855 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State
,guest_RAX
)));
856 vg_assert(VG_IS_8_ALIGNED(offsetof(VexGuestAMD64State
,guest_RIP
)));
859 # if defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
860 /* ppc guest_state vector regs must be 16 byte aligned for
861 loads/stores. This is important! */
862 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_VSR0
));
863 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_VSR0
));
864 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_VSR0
));
865 /* be extra paranoid .. */
866 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_VSR1
));
867 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_VSR1
));
868 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_VSR1
));
871 # if defined(VGA_arm)
872 /* arm guest_state VFP regs must be 8 byte aligned for
873 loads/stores. Let's use 16 just to be on the safe side. */
874 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_D0
));
875 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_D0
));
876 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_D0
));
877 /* be extra paranoid .. */
878 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex
.guest_D1
));
879 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow1
.guest_D1
));
880 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow2
.guest_D1
));
883 # if defined(VGA_arm64)
884 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex
.guest_X0
));
885 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow1
.guest_X0
));
886 vg_assert(VG_IS_8_ALIGNED(& tst
->arch
.vex_shadow2
.guest_X0
));
887 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex
.guest_Q0
));
888 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow1
.guest_Q0
));
889 vg_assert(VG_IS_16_ALIGNED(& tst
->arch
.vex_shadow2
.guest_Q0
));
892 # if defined(VGA_s390x)
893 /* no special requirements */
896 # if defined(VGA_mips32) || defined(VGA_mips64)
897 /* no special requirements */
901 // NO_VGDB_POLL value ensures vgdb is not polled, while
902 // VGDB_POLL_ASAP ensures that the next scheduler call
903 // will cause a poll.
904 #define NO_VGDB_POLL 0xffffffffffffffffULL
905 #define VGDB_POLL_ASAP 0x0ULL
907 void VG_(disable_vgdb_poll
) (void )
909 vgdb_next_poll
= NO_VGDB_POLL
;
911 void VG_(force_vgdb_poll
) ( void )
913 vgdb_next_poll
= VGDB_POLL_ASAP
;
916 /* Run the thread tid for a while, and return a VG_TRC_* value
917 indicating why VG_(disp_run_translations) stopped, and possibly an
918 auxiliary word. Also, only allow the thread to run for at most
919 *dispatchCtrP events. If (as is the normal case) use_alt_host_addr
920 is False, we are running ordinary redir'd translations, and we
921 should therefore start by looking up the guest next IP in TT. If
922 it is True then we ignore the guest next IP and just run from
923 alt_host_addr, which presumably points at host code for a no-redir
926 Return results are placed in two_words. two_words[0] is set to the
927 TRC. In the case where that is VG_TRC_CHAIN_ME_TO_{SLOW,FAST}_EP,
928 the address to patch is placed in two_words[1].
931 void run_thread_for_a_while ( /*OUT*/HWord
* two_words
,
932 /*MOD*/Int
* dispatchCtrP
,
935 Bool use_alt_host_addr
)
937 volatile HWord jumped
= 0;
938 volatile ThreadState
* tst
= NULL
; /* stop gcc complaining */
939 volatile Int done_this_time
= 0;
940 volatile HWord host_code_addr
= 0;
943 vg_assert(VG_(is_valid_tid
)(tid
));
944 vg_assert(VG_(is_running_thread
)(tid
));
945 vg_assert(!VG_(is_exiting
)(tid
));
946 vg_assert(*dispatchCtrP
> 0);
948 tst
= VG_(get_ThreadState
)(tid
);
949 do_pre_run_checks( tst
);
952 /* Futz with the XIndir stats counters. */
953 vg_assert(VG_(stats__n_xIndirs_32
) == 0);
954 vg_assert(VG_(stats__n_xIndir_hits1_32
) == 0);
955 vg_assert(VG_(stats__n_xIndir_hits2_32
) == 0);
956 vg_assert(VG_(stats__n_xIndir_hits3_32
) == 0);
957 vg_assert(VG_(stats__n_xIndir_misses_32
) == 0);
959 /* Clear return area. */
960 two_words
[0] = two_words
[1] = 0;
962 /* Figure out where we're starting from. */
963 if (use_alt_host_addr
) {
964 /* unusual case -- no-redir translation */
965 host_code_addr
= alt_host_addr
;
967 /* normal case -- redir translation */
968 Addr host_from_fast_cache
= 0;
969 Bool found_in_fast_cache
970 = VG_(lookupInFastCache
)( &host_from_fast_cache
,
971 (Addr
)tst
->arch
.vex
.VG_INSTR_PTR
);
972 if (found_in_fast_cache
) {
973 host_code_addr
= host_from_fast_cache
;
976 /* not found in VG_(tt_fast). Searching here the transtab
977 improves the performance compared to returning directly
979 Bool found
= VG_(search_transtab
)(&res
, NULL
, NULL
,
980 (Addr
)tst
->arch
.vex
.VG_INSTR_PTR
,
984 host_code_addr
= res
;
986 /* At this point, we know that we intended to start at a
987 normal redir translation, but it was not found. In
988 which case we can return now claiming it's not
990 two_words
[0] = VG_TRC_INNER_FASTMISS
; /* hmm, is that right? */
995 /* We have either a no-redir or a redir translation. */
996 vg_assert(host_code_addr
!= 0); /* implausible */
998 /* there should be no undealt-with signals */
999 //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
1001 /* Set up event counter stuff for the run. */
1002 tst
->arch
.vex
.host_EvC_COUNTER
= *dispatchCtrP
;
1003 tst
->arch
.vex
.host_EvC_FAILADDR
1004 = (HWord
)VG_(fnptr_to_fnentry
)( &VG_(disp_cp_evcheck_fail
) );
1006 /* Invalidate any in-flight LL/SC transactions, in the case that we're
1007 using the fallback LL/SC implementation. See bugs 344524 and 369459. */
1008 # if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
1009 || defined(VGP_nanomips_linux)
1010 tst
->arch
.vex
.guest_LLaddr
= (RegWord
)(-1);
1011 # elif defined(VGP_arm64_linux)
1012 tst
->arch
.vex
.guest_LLSC_SIZE
= 0;
1017 Int i
, err
= VG_(sigprocmask
)(VKI_SIG_SETMASK
, NULL
, &m
);
1018 vg_assert(err
== 0);
1019 VG_(printf
)("tid %u: entering code with unblocked signals: ", tid
);
1020 for (i
= 1; i
<= _VKI_NSIG
; i
++)
1021 if (!VG_(sigismember
)(&m
, i
))
1022 VG_(printf
)("%d ", i
);
1026 /* Set up return-value area. */
1028 // Tell the tool this thread is about to run client code
1029 VG_TRACK( start_client_code
, tid
, bbs_done
);
1031 vg_assert(VG_(in_generated_code
) == False
);
1032 VG_(in_generated_code
) = True
;
1037 VG_(disp_run_translations
)(
1039 (volatile void*)&tst
->arch
.vex
,
1044 vg_assert(VG_(in_generated_code
) == True
);
1045 VG_(in_generated_code
) = False
;
1047 if (jumped
!= (HWord
)0) {
1048 /* We get here if the client took a fault that caused our signal
1049 handler to longjmp. */
1050 vg_assert(two_words
[0] == 0 && two_words
[1] == 0); // correct?
1051 two_words
[0] = VG_TRC_FAULT_SIGNAL
;
1056 /* Merge the 32-bit XIndir/miss counters into the 64 bit versions,
1057 and zero out the 32-bit ones in preparation for the next run of
1059 stats__n_xIndirs
+= (ULong
)VG_(stats__n_xIndirs_32
);
1060 VG_(stats__n_xIndirs_32
) = 0;
1061 stats__n_xIndir_hits1
+= (ULong
)VG_(stats__n_xIndir_hits1_32
);
1062 VG_(stats__n_xIndir_hits1_32
) = 0;
1063 stats__n_xIndir_hits2
+= (ULong
)VG_(stats__n_xIndir_hits2_32
);
1064 VG_(stats__n_xIndir_hits2_32
) = 0;
1065 stats__n_xIndir_hits3
+= (ULong
)VG_(stats__n_xIndir_hits3_32
);
1066 VG_(stats__n_xIndir_hits3_32
) = 0;
1067 stats__n_xIndir_misses
+= (ULong
)VG_(stats__n_xIndir_misses_32
);
1068 VG_(stats__n_xIndir_misses_32
) = 0;
1070 /* Inspect the event counter. */
1071 vg_assert((Int
)tst
->arch
.vex
.host_EvC_COUNTER
>= -1);
1072 vg_assert(tst
->arch
.vex
.host_EvC_FAILADDR
1073 == (HWord
)VG_(fnptr_to_fnentry
)( &VG_(disp_cp_evcheck_fail
)) );
1075 /* The number of events done this time is the difference between
1076 the event counter originally and what it is now. Except -- if
1077 it has gone negative (to -1) then the transition 0 to -1 doesn't
1078 correspond to a real executed block, so back it out. It's like
1079 this because the event checks decrement the counter first and
1080 check it for negativeness second, hence the 0 to -1 transition
1081 causes a bailout and the block it happens in isn't executed. */
1083 Int dispatchCtrAfterwards
= (Int
)tst
->arch
.vex
.host_EvC_COUNTER
;
1084 done_this_time
= *dispatchCtrP
- dispatchCtrAfterwards
;
1085 if (dispatchCtrAfterwards
== -1) {
1088 /* If the generated code drives the counter below -1, something
1089 is seriously wrong. */
1090 vg_assert(dispatchCtrAfterwards
>= 0);
1094 vg_assert(done_this_time
>= 0);
1095 bbs_done
+= (ULong
)done_this_time
;
1097 *dispatchCtrP
-= done_this_time
;
1098 vg_assert(*dispatchCtrP
>= 0);
1100 // Tell the tool this thread has stopped running client code
1101 VG_TRACK( stop_client_code
, tid
, bbs_done
);
1103 if (bbs_done
>= vgdb_next_poll
) {
1104 if (VG_(clo_vgdb_poll
))
1105 vgdb_next_poll
= bbs_done
+ (ULong
)VG_(clo_vgdb_poll
);
1107 /* value was changed due to gdbserver invocation via ptrace */
1108 vgdb_next_poll
= NO_VGDB_POLL
;
1109 if (VG_(gdbserver_activity
) (tid
))
1110 VG_(gdbserver
) (tid
);
1113 /* TRC value and possible auxiliary patch-address word are already
1114 in two_words[0] and [1] respectively, as a result of the call to
1115 VG_(run_innerloop). */
1117 if (two_words
[0] == VG_TRC_CHAIN_ME_TO_SLOW_EP
1118 || two_words
[0] == VG_TRC_CHAIN_ME_TO_FAST_EP
) {
1119 vg_assert(two_words
[1] != 0); /* we have a legit patch addr */
1121 vg_assert(two_words
[1] == 0); /* nobody messed with it */
1126 /* ---------------------------------------------------------------------
1127 The scheduler proper.
1128 ------------------------------------------------------------------ */
1130 static void handle_tt_miss ( ThreadId tid
)
1133 Addr ip
= VG_(get_IP
)(tid
);
1135 /* Trivial event. Miss in the fast-cache. Do a full
1137 found
= VG_(search_transtab
)( NULL
, NULL
, NULL
,
1138 ip
, True
/*upd_fast_cache*/ );
1139 if (UNLIKELY(!found
)) {
1140 /* Not found; we need to request a translation. */
1141 if (VG_(translate
)( tid
, ip
, /*debug*/False
, 0/*not verbose*/,
1142 bbs_done
, True
/*allow redirection*/ )) {
1143 found
= VG_(search_transtab
)( NULL
, NULL
, NULL
,
1145 vg_assert2(found
, "handle_tt_miss: missing tt_fast entry");
1148 // If VG_(translate)() fails, it's because it had to throw a
1149 // signal because the client jumped to a bad address. That
1150 // means that either a signal has been set up for delivery,
1151 // or the thread has been marked for termination. Either
1152 // way, we just need to go back into the scheduler loop.
1158 void handle_chain_me ( ThreadId tid
, void* place_to_chain
, Bool toFastEP
)
1161 Addr ip
= VG_(get_IP
)(tid
);
1162 SECno to_sNo
= INV_SNO
;
1163 TTEno to_tteNo
= INV_TTE
;
1165 found
= VG_(search_transtab
)( NULL
, &to_sNo
, &to_tteNo
,
1166 ip
, False
/*dont_upd_fast_cache*/ );
1168 /* Not found; we need to request a translation. */
1169 if (VG_(translate
)( tid
, ip
, /*debug*/False
, 0/*not verbose*/,
1170 bbs_done
, True
/*allow redirection*/ )) {
1171 found
= VG_(search_transtab
)( NULL
, &to_sNo
, &to_tteNo
,
1173 vg_assert2(found
, "handle_chain_me: missing tt_fast entry");
1175 // If VG_(translate)() fails, it's because it had to throw a
1176 // signal because the client jumped to a bad address. That
1177 // means that either a signal has been set up for delivery,
1178 // or the thread has been marked for termination. Either
1179 // way, we just need to go back into the scheduler loop.
1184 vg_assert(to_sNo
!= INV_SNO
);
1185 vg_assert(to_tteNo
!= INV_TTE
);
1187 /* So, finally we know where to patch through to. Do the patching
1188 and update the various admin tables that allow it to be undone
1189 in the case that the destination block gets deleted. */
1190 VG_(tt_tc_do_chaining
)( place_to_chain
,
1191 to_sNo
, to_tteNo
, toFastEP
);
1194 static void handle_syscall(ThreadId tid
, UInt trc
)
1196 ThreadState
* volatile tst
= VG_(get_ThreadState
)(tid
);
1197 volatile UWord jumped
;
1199 /* Syscall may or may not block; either way, it will be
1200 complete by the time this call returns, and we'll be
1201 runnable again. We could take a signal while the
1204 if (VG_(clo_sanity_level
) >= 3) {
1205 HChar buf
[50]; // large enough
1206 VG_(sprintf
)(buf
, "(BEFORE SYSCALL, tid %u)", tid
);
1207 Bool ok
= VG_(am_do_sync_check
)(buf
, __FILE__
, __LINE__
);
1211 SCHEDSETJMP(tid
, jumped
, VG_(client_syscall
)(tid
, trc
));
1213 if (VG_(clo_sanity_level
) >= 3) {
1214 HChar buf
[50]; // large enough
1215 VG_(sprintf
)(buf
, "(AFTER SYSCALL, tid %u)", tid
);
1216 Bool ok
= VG_(am_do_sync_check
)(buf
, __FILE__
, __LINE__
);
1220 if (!VG_(is_running_thread
)(tid
))
1221 VG_(printf
)("tid %u not running; VG_(running_tid)=%u, tid %u status %u\n",
1222 tid
, VG_(running_tid
), tid
, tst
->status
);
1223 vg_assert(VG_(is_running_thread
)(tid
));
1225 if (jumped
!= (UWord
)0) {
1227 VG_(poll_signals
)(tid
);
1231 /* tid just requested a jump to the noredir version of its current
1232 program counter. So make up that translation if needed, run it,
1233 and return the resulting thread return code in two_words[]. */
1235 void handle_noredir_jump ( /*OUT*/HWord
* two_words
,
1236 /*MOD*/Int
* dispatchCtrP
,
1239 /* Clear return area. */
1240 two_words
[0] = two_words
[1] = 0;
1243 Addr ip
= VG_(get_IP
)(tid
);
1245 Bool found
= VG_(search_unredir_transtab
)( &hcode
, ip
);
1247 /* Not found; we need to request a translation. */
1248 if (VG_(translate
)( tid
, ip
, /*debug*/False
, 0/*not verbose*/, bbs_done
,
1249 False
/*NO REDIRECTION*/ )) {
1251 found
= VG_(search_unredir_transtab
)( &hcode
, ip
);
1252 vg_assert2(found
, "unredir translation missing after creation?!");
1254 // If VG_(translate)() fails, it's because it had to throw a
1255 // signal because the client jumped to a bad address. That
1256 // means that either a signal has been set up for delivery,
1257 // or the thread has been marked for termination. Either
1258 // way, we just need to go back into the scheduler loop.
1259 two_words
[0] = VG_TRC_BORING
;
1266 vg_assert(hcode
!= 0);
1268 /* Otherwise run it and return the resulting VG_TRC_* value. */
1269 vg_assert(*dispatchCtrP
> 0); /* so as to guarantee progress */
1270 run_thread_for_a_while( two_words
, dispatchCtrP
, tid
,
1271 hcode
, True
/*use hcode*/ );
1276 Run a thread until it wants to exit.
1278 We assume that the caller has already called VG_(acquire_BigLock) for
1279 us, so we own the VCPU. Also, all signals are blocked.
1281 VgSchedReturnCode
VG_(scheduler
) ( ThreadId tid
)
1283 /* Holds the remaining size of this thread's "timeslice". */
1284 Int dispatch_ctr
= 0;
1286 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1287 static Bool vgdb_startup_action_done
= False
;
1289 if (VG_(clo_trace_sched
))
1290 print_sched_event(tid
, "entering VG_(scheduler)");
1292 /* Do vgdb initialization (but once). Only the first (main) task
1293 starting up will do the below.
1294 Initialize gdbserver earlier than at the first
1295 thread VG_(scheduler) is causing problems:
1296 * at the end of VG_(scheduler_init_phase2) :
1297 The main thread is in VgTs_Init state, but in a not yet
1298 consistent state => the thread cannot be reported to gdb
1299 (e.g. causes an assert in LibVEX_GuestX86_get_eflags when giving
1300 back the guest registers to gdb).
1301 * at end of valgrind_main, just
1302 before VG_(main_thread_wrapper_NORETURN)(1) :
1303 The main thread is still in VgTs_Init state but in a
1304 more advanced state. However, the thread state is not yet
1305 completely initialized : a.o., the os_state is not yet fully
1306 set => the thread is then not properly reported to gdb,
1307 which is then confused (causing e.g. a duplicate thread be
1308 shown, without thread id).
1309 * it would be possible to initialize gdbserver "lower" in the
1310 call stack (e.g. in VG_(main_thread_wrapper_NORETURN)) but
1311 these are platform dependent and the place at which
1312 the thread state is completely initialized is not
1313 specific anymore to the main thread (so a similar "do it only
1314 once" would be needed).
1316 => a "once only" initialization here is the best compromise. */
1317 if (!vgdb_startup_action_done
) {
1318 vg_assert(tid
== 1); // it must be the main thread.
1319 vgdb_startup_action_done
= True
;
1320 if (VG_(clo_vgdb
) != Vg_VgdbNo
) {
1321 /* If we have to poll, ensures we do an initial poll at first
1322 scheduler call. Otherwise, ensure no poll (unless interrupted
1324 if (VG_(clo_vgdb_poll
))
1325 VG_(force_vgdb_poll
) ();
1327 VG_(disable_vgdb_poll
) ();
1329 VG_(gdbserver_prerun_action
) (1);
1331 VG_(disable_vgdb_poll
) ();
1335 if (SimHintiS(SimHint_no_nptl_pthread_stackcache
, VG_(clo_sim_hints
))
1337 /* We disable the stack cache the first time we see a thread other
1338 than the main thread appearing. At this moment, we are sure the pthread
1339 lib loading is done/variable was initialised by pthread lib/... */
1340 if (VG_(client__stack_cache_actsize__addr
)) {
1341 if (*VG_(client__stack_cache_actsize__addr
) == 0) {
1342 VG_(debugLog
)(1,"sched",
1343 "pthread stack cache size disable done"
1345 *VG_(client__stack_cache_actsize__addr
) = 1000 * 1000 * 1000;
1346 /* Set a value big enough to be above the hardcoded maximum stack
1347 cache size in glibc, small enough to allow a pthread stack size
1348 to be added without risk of overflow. */
1352 * glibc 2.34 no longer has stack_cache_actsize as a visible variable
1353 * so we switch to using the GLIBC_TUNABLES env var. Processing for that
1354 * is done in initimg-linux.c / setup_client_env for all glibc
1356 * If we don't detect stack_cache_actsize we want to be able to tell
1357 * whether it is an unexpected error or if it is no longer there.
1358 * In the latter case we don't print a warning.
1360 Bool print_warning
= True
;
1361 if (VG_(client__gnu_get_libc_version_addr
) != NULL
) {
1362 const HChar
* gnu_libc_version
= VG_(client__gnu_get_libc_version_addr
)();
1363 if (gnu_libc_version
!= NULL
) {
1364 HChar
* glibc_version_tok
= VG_(strdup
)("scheduler.1", gnu_libc_version
);
1365 const HChar
* str_major
= VG_(strtok
)(glibc_version_tok
, ".");
1366 Long major
= VG_(strtoll10
)(str_major
, NULL
);
1367 const HChar
* str_minor
= VG_(strtok
)(NULL
, ".");
1368 Long minor
= VG_(strtoll10
)(str_minor
, NULL
);
1369 if (major
>= 2 && minor
>= 34) {
1370 print_warning
= False
;
1372 VG_(free
)(glibc_version_tok
);
1377 if (print_warning
) {
1378 VG_(debugLog
)(0,"sched",
1379 "WARNING: pthread stack cache cannot be disabled!\n");
1381 VG_(clo_sim_hints
) &= ~SimHint2S(SimHint_no_nptl_pthread_stackcache
);
1382 /* Remove SimHint_no_nptl_pthread_stackcache from VG_(clo_sim_hints)
1383 to avoid having a msg for all following threads. */
1387 /* set the proper running signal mask */
1390 vg_assert(VG_(is_running_thread
)(tid
));
1392 dispatch_ctr
= SCHEDULING_QUANTUM
;
1394 while (!VG_(is_exiting
)(tid
)) {
1396 vg_assert(dispatch_ctr
>= 0);
1397 if (dispatch_ctr
== 0) {
1399 /* Our slice is done, so yield the CPU to another thread. On
1400 Linux, this doesn't sleep between sleeping and running,
1401 since that would take too much time. */
1403 /* 4 July 06: it seems that a zero-length nsleep is needed to
1404 cause async thread cancellation (canceller.c) to terminate
1405 in finite time; else it is in some kind of race/starvation
1406 situation and completion is arbitrarily delayed (although
1407 this is not a deadlock).
1409 Unfortunately these sleeps cause MPI jobs not to terminate
1410 sometimes (some kind of livelock). So sleeping once
1411 every N opportunities appears to work. */
1413 /* 3 Aug 06: doing sys__nsleep works but crashes some apps.
1414 sys_yield also helps the problem, whilst not crashing apps. */
1416 VG_(release_BigLock
)(tid
, VgTs_Yielding
,
1417 "VG_(scheduler):timeslice");
1418 /* ------------ now we don't have The Lock ------------ */
1420 VG_(acquire_BigLock
)(tid
, "VG_(scheduler):timeslice");
1421 /* ------------ now we do have The Lock ------------ */
1423 /* OK, do some relatively expensive housekeeping stuff */
1424 scheduler_sanity(tid
);
1425 VG_(sanity_check_general
)(False
);
1427 /* Possibly make a progress report */
1428 if (UNLIKELY(VG_(clo_progress_interval
) > 0)) {
1429 maybe_progress_report( VG_(clo_progress_interval
) );
1432 /* Look for any pending signals for this thread, and set them up
1434 VG_(poll_signals
)(tid
);
1436 if (VG_(is_exiting
)(tid
))
1437 break; /* poll_signals picked up a fatal signal */
1439 /* For stats purposes only. */
1440 n_scheduling_events_MAJOR
++;
1442 /* Figure out how many bbs to ask vg_run_innerloop to do. */
1443 dispatch_ctr
= SCHEDULING_QUANTUM
;
1446 vg_assert(tst
->tid
== tid
);
1447 vg_assert(tst
->os_state
.lwpid
== VG_(gettid
)());
1450 /* For stats purposes only. */
1451 n_scheduling_events_MINOR
++;
1454 VG_(message
)(Vg_DebugMsg
, "thread %u: running for %d bbs\n",
1455 tid
, dispatch_ctr
- 1 );
1457 HWord trc
[2]; /* "two_words" */
1458 run_thread_for_a_while( &trc
[0],
1460 tid
, 0/*ignored*/, False
);
1462 if (VG_(clo_trace_sched
) && VG_(clo_verbosity
) > 2) {
1463 const HChar
*name
= name_of_sched_event(trc
[0]);
1464 HChar buf
[VG_(strlen
)(name
) + 10]; // large enough
1465 VG_(sprintf
)(buf
, "TRC: %s", name
);
1466 print_sched_event(tid
, buf
);
1469 if (trc
[0] == VEX_TRC_JMP_NOREDIR
) {
1470 /* If we got a request to run a no-redir version of
1471 something, do so now -- handle_noredir_jump just (creates
1472 and) runs that one translation. The flip side is that the
1473 noredir translation can't itself return another noredir
1474 request -- that would be nonsensical. It can, however,
1475 return VG_TRC_BORING, which just means keep going as
1477 /* Note that the fact that we need to continue with a
1478 no-redir jump is not recorded anywhere else in this
1479 thread's state. So we *must* execute the block right now
1480 -- we can't fail to execute it and later resume with it,
1481 because by then we'll have forgotten the fact that it
1482 should be run as no-redir, but will get run as a normal
1483 potentially-redir'd, hence screwing up. This really ought
1484 to be cleaned up, by noting in the guest state that the
1485 next block to be executed should be no-redir. Then we can
1486 suspend and resume at any point, which isn't the case at
1488 /* We can't enter a no-redir translation with the dispatch
1489 ctr set to zero, for the reasons commented just above --
1490 we need to force it to execute right now. So, if the
1491 dispatch ctr is zero, set it to one. Note that this would
1492 have the bad side effect of holding the Big Lock arbitrary
1493 long should there be an arbitrarily long sequence of
1494 back-to-back no-redir translations to run. But we assert
1495 just below that this translation cannot request another
1496 no-redir jump, so we should be safe against that. */
1497 if (dispatch_ctr
== 0) {
1500 handle_noredir_jump( &trc
[0],
1503 vg_assert(trc
[0] != VEX_TRC_JMP_NOREDIR
);
1505 /* This can't be allowed to happen, since it means the block
1506 didn't execute, and we have no way to resume-as-noredir
1507 after we get more timeslice. But I don't think it ever
1508 can, since handle_noredir_jump will assert if the counter
1509 is zero on entry. */
1510 vg_assert(trc
[0] != VG_TRC_INNER_COUNTERZERO
);
1511 /* This asserts the same thing. */
1512 vg_assert(dispatch_ctr
>= 0);
1514 /* A no-redir translation can't return with a chain-me
1515 request, since chaining in the no-redir cache is too
1517 vg_assert(trc
[0] != VG_TRC_CHAIN_ME_TO_SLOW_EP
1518 && trc
[0] != VG_TRC_CHAIN_ME_TO_FAST_EP
);
1522 case VEX_TRC_JMP_BORING
:
1523 /* assisted dispatch, no event. Used by no-redir
1524 translations to force return to the scheduler. */
1526 /* no special event, just keep going. */
1529 case VG_TRC_INNER_FASTMISS
:
1530 vg_assert(dispatch_ctr
>= 0);
1531 handle_tt_miss(tid
);
1534 case VG_TRC_CHAIN_ME_TO_SLOW_EP
: {
1535 if (0) VG_(printf
)("sched: CHAIN_TO_SLOW_EP: %p\n", (void*)trc
[1] );
1536 handle_chain_me(tid
, (void*)trc
[1], False
);
1540 case VG_TRC_CHAIN_ME_TO_FAST_EP
: {
1541 if (0) VG_(printf
)("sched: CHAIN_TO_FAST_EP: %p\n", (void*)trc
[1] );
1542 handle_chain_me(tid
, (void*)trc
[1], True
);
1546 case VEX_TRC_JMP_CLIENTREQ
:
1547 do_client_request(tid
);
1550 case VEX_TRC_JMP_SYS_INT128
: /* x86-linux */
1551 case VEX_TRC_JMP_SYS_INT129
: /* x86-darwin */
1552 case VEX_TRC_JMP_SYS_INT130
: /* x86-darwin */
1553 case VEX_TRC_JMP_SYS_INT145
: /* x86-solaris */
1554 case VEX_TRC_JMP_SYS_INT210
: /* x86-solaris */
1555 /* amd64-linux, ppc32-linux, amd64-darwin, amd64-solaris */
1556 case VEX_TRC_JMP_SYS_SYSCALL
:
1557 handle_syscall(tid
, trc
[0]);
1558 if (VG_(clo_sanity_level
) > 2)
1559 VG_(sanity_check_general
)(True
); /* sanity-check every syscall */
1562 case VEX_TRC_JMP_YIELD
:
1563 /* Explicit yield, because this thread is in a spin-lock
1564 or something. Only let the thread run for a short while
1565 longer. Because swapping to another thread is expensive,
1566 we're prepared to let this thread eat a little more CPU
1567 before swapping to another. That means that short term
1568 spins waiting for hardware to poke memory won't cause a
1570 if (dispatch_ctr
> 300)
1574 case VG_TRC_INNER_COUNTERZERO
:
1575 /* Timeslice is out. Let a new thread be scheduled. */
1576 vg_assert(dispatch_ctr
== 0);
1579 case VG_TRC_FAULT_SIGNAL
:
1580 /* Everything should be set up (either we're exiting, or
1581 about to start in a signal handler). */
1584 case VEX_TRC_JMP_MAPFAIL
:
1585 /* Failure of arch-specific address translation (x86/amd64
1586 segment override use) */
1587 /* jrs 2005 03 11: is this correct? */
1588 VG_(synth_fault
)(tid
);
1591 case VEX_TRC_JMP_EMWARN
: {
1592 static Int counts
[EmNote_NUMBER
];
1593 static Bool counts_initted
= False
;
1598 if (!counts_initted
) {
1599 counts_initted
= True
;
1600 for (q
= 0; q
< EmNote_NUMBER
; q
++)
1603 ew
= (VexEmNote
)VG_(threads
)[tid
].arch
.vex
.guest_EMNOTE
;
1604 what
= (ew
< 0 || ew
>= EmNote_NUMBER
)
1606 : LibVEX_EmNote_string(ew
);
1607 show
= (ew
< 0 || ew
>= EmNote_NUMBER
)
1610 if (show
&& VG_(clo_show_emwarns
) && !VG_(clo_xml
)) {
1611 VG_(message
)( Vg_UserMsg
,
1612 "Emulation warning: unsupported action:\n");
1613 VG_(message
)( Vg_UserMsg
, " %s\n", what
);
1614 VG_(get_and_pp_StackTrace
)( tid
, VG_(clo_backtrace_size
) );
1619 case VEX_TRC_JMP_EMFAIL
: {
1622 ew
= (VexEmNote
)VG_(threads
)[tid
].arch
.vex
.guest_EMNOTE
;
1623 what
= (ew
< 0 || ew
>= EmNote_NUMBER
)
1625 : LibVEX_EmNote_string(ew
);
1626 VG_(message
)( Vg_UserMsg
,
1627 "Emulation fatal error -- Valgrind cannot continue:\n");
1628 VG_(message
)( Vg_UserMsg
, " %s\n", what
);
1629 VG_(get_and_pp_StackTrace
)( tid
, VG_(clo_backtrace_size
) );
1630 VG_(message
)(Vg_UserMsg
, "\n");
1631 VG_(message
)(Vg_UserMsg
, "Valgrind has to exit now. Sorry.\n");
1632 VG_(message
)(Vg_UserMsg
, "\n");
1637 case VEX_TRC_JMP_SIGILL
:
1638 VG_(synth_sigill
)(tid
, VG_(get_IP
)(tid
));
1641 case VEX_TRC_JMP_SIGTRAP
:
1642 VG_(synth_sigtrap
)(tid
);
1645 case VEX_TRC_JMP_SIGSEGV
:
1646 VG_(synth_fault
)(tid
);
1649 case VEX_TRC_JMP_SIGBUS
:
1650 VG_(synth_sigbus
)(tid
);
1653 case VEX_TRC_JMP_SIGFPE
:
1654 VG_(synth_sigfpe
)(tid
, 0);
1657 case VEX_TRC_JMP_SIGFPE_INTDIV
:
1658 VG_(synth_sigfpe
)(tid
, VKI_FPE_INTDIV
);
1661 case VEX_TRC_JMP_SIGFPE_INTOVF
:
1662 VG_(synth_sigfpe
)(tid
, VKI_FPE_INTOVF
);
1665 case VEX_TRC_JMP_NODECODE
: {
1666 Addr addr
= VG_(get_IP
)(tid
);
1668 if (VG_(clo_sigill_diag
)) {
1670 "valgrind: Unrecognised instruction at address %#lx.\n", addr
);
1671 VG_(get_and_pp_StackTrace
)(tid
, VG_(clo_backtrace_size
));
1672 # define M(a) VG_(umsg)(a "\n");
1673 M("Your program just tried to execute an instruction that Valgrind" );
1674 M("did not recognise. There are two possible reasons for this." );
1675 M("1. Your program has a bug and erroneously jumped to a non-code" );
1676 M(" location. If you are running Memcheck and you just saw a" );
1677 M(" warning about a bad jump, it's probably your program's fault.");
1678 M("2. The instruction is legitimate but Valgrind doesn't handle it,");
1679 M(" i.e. it's Valgrind's fault. If you think this is the case or");
1680 M(" you are not sure, please let us know and we'll try to fix it.");
1681 M("Either way, Valgrind will now raise a SIGILL signal which will" );
1682 M("probably kill your program." );
1685 # if defined(VGA_s390x)
1686 /* Now that the complaint is out we need to adjust the guest_IA. The
1687 reason is that -- after raising the exception -- execution will
1688 continue with the insn that follows the invalid insn. As the first
1689 2 bits of the invalid insn determine its length in the usual way,
1690 we can compute the address of the next insn here and adjust the
1691 guest_IA accordingly. This adjustment is essential and tested by
1692 none/tests/s390x/op_exception.c (which would loop forever
1694 UChar byte
= ((UChar
*)addr
)[0];
1695 UInt insn_length
= ((((byte
>> 6) + 1) >> 1) + 1) << 1;
1696 Addr next_insn_addr
= addr
+ insn_length
;
1697 VG_(set_IP
)(tid
, next_insn_addr
);
1699 VG_(synth_sigill
)(tid
, addr
);
1703 case VEX_TRC_JMP_INVALICACHE
:
1704 VG_(discard_translations
)(
1705 (Addr
)VG_(threads
)[tid
].arch
.vex
.guest_CMSTART
,
1706 VG_(threads
)[tid
].arch
.vex
.guest_CMLEN
,
1707 "scheduler(VEX_TRC_JMP_INVALICACHE)"
1710 VG_(printf
)("dump translations done.\n");
1713 case VEX_TRC_JMP_FLUSHDCACHE
: {
1714 void* start
= (void*)(Addr
)VG_(threads
)[tid
].arch
.vex
.guest_CMSTART
;
1715 SizeT len
= VG_(threads
)[tid
].arch
.vex
.guest_CMLEN
;
1716 VG_(debugLog
)(2, "sched", "flush_dcache(%p, %lu)\n", start
, len
);
1717 VG_(flush_dcache
)(start
, len
);
1721 case VG_TRC_INVARIANT_FAILED
:
1722 /* This typically happens if, after running generated code,
1723 it is detected that host CPU settings (eg, FPU/Vector
1724 control words) are not as they should be. Vex's code
1725 generation specifies the state such control words should
1726 be in on entry to Vex-generated code, and they should be
1727 unchanged on exit from it. Failure of this assertion
1728 usually means a bug in Vex's code generation. */
1730 // __asm__ __volatile__ (
1731 // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
1732 // "\tmov %0, r2" : "=r"(xx) : : "r2" );
1733 // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
1735 vg_assert2(0, "VG_(scheduler), phase 3: "
1736 "run_innerloop detected host "
1737 "state invariant failure", trc
);
1739 case VEX_TRC_JMP_SYS_SYSENTER
:
1740 /* Do whatever simulation is appropriate for an x86 sysenter
1741 instruction. Note that it is critical to set this thread's
1742 guest_EIP to point at the code to execute after the
1743 sysenter, since Vex-generated code will not have set it --
1744 vex does not know what it should be. Vex sets the next
1745 address to zero, so if you don't set guest_EIP, the thread
1746 will jump to zero afterwards and probably die as a result. */
1747 # if defined(VGP_x86_linux)
1748 vg_assert2(0, "VG_(scheduler), phase 3: "
1749 "sysenter_x86 on x86-linux is not supported");
1750 # elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
1751 /* return address in client edx */
1752 VG_(threads
)[tid
].arch
.vex
.guest_EIP
1753 = VG_(threads
)[tid
].arch
.vex
.guest_EDX
;
1754 handle_syscall(tid
, trc
[0]);
1756 vg_assert2(0, "VG_(scheduler), phase 3: "
1757 "sysenter_x86 on non-x86 platform?!?!");
1762 vg_assert2(0, "VG_(scheduler), phase 3: "
1763 "unexpected thread return code (%u)", trc
[0]);
1767 } /* switch (trc) */
1769 if (UNLIKELY(VG_(clo_profyle_sbs
)) && VG_(clo_profyle_interval
) > 0)
1770 maybe_show_sb_profile();
1773 if (VG_(clo_trace_sched
))
1774 print_sched_event(tid
, "exiting VG_(scheduler)");
1776 vg_assert(VG_(is_exiting
)(tid
));
1778 return tst
->exitreason
;
1782 void VG_(nuke_all_threads_except
) ( ThreadId me
, VgSchedReturnCode src
)
1786 vg_assert(VG_(is_running_thread
)(me
));
1788 for (tid
= 1; tid
< VG_N_THREADS
; tid
++) {
1790 || VG_(threads
)[tid
].status
== VgTs_Empty
)
1794 "VG_(nuke_all_threads_except): nuking tid %u\n", tid
);
1796 VG_(threads
)[tid
].exitreason
= src
;
1797 if (src
== VgSrc_FatalSig
)
1798 VG_(threads
)[tid
].os_state
.fatalsig
= VKI_SIGKILL
;
1799 VG_(get_thread_out_of_syscall
)(tid
);
1804 /* ---------------------------------------------------------------------
1805 Specifying shadow register values
1806 ------------------------------------------------------------------ */
1808 #if defined(VGA_x86)
1809 # define VG_CLREQ_ARGS guest_EAX
1810 # define VG_CLREQ_RET guest_EDX
1811 #elif defined(VGA_amd64)
1812 # define VG_CLREQ_ARGS guest_RAX
1813 # define VG_CLREQ_RET guest_RDX
1814 #elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
1815 # define VG_CLREQ_ARGS guest_GPR4
1816 # define VG_CLREQ_RET guest_GPR3
1817 #elif defined(VGA_arm)
1818 # define VG_CLREQ_ARGS guest_R4
1819 # define VG_CLREQ_RET guest_R3
1820 #elif defined(VGA_arm64)
1821 # define VG_CLREQ_ARGS guest_X4
1822 # define VG_CLREQ_RET guest_X3
1823 #elif defined (VGA_s390x)
1824 # define VG_CLREQ_ARGS guest_r2
1825 # define VG_CLREQ_RET guest_r3
1826 #elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
1827 # define VG_CLREQ_ARGS guest_r12
1828 # define VG_CLREQ_RET guest_r11
1830 # error Unknown arch
1833 #define CLREQ_ARGS(regs) ((regs).vex.VG_CLREQ_ARGS)
1834 #define CLREQ_RET(regs) ((regs).vex.VG_CLREQ_RET)
1835 #define O_CLREQ_RET (offsetof(VexGuestArchState, VG_CLREQ_RET))
1837 // These macros write a value to a client's thread register, and tell the
1838 // tool that it's happened (if necessary).
1840 #define SET_CLREQ_RETVAL(zztid, zzval) \
1841 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1842 VG_TRACK( post_reg_write, \
1843 Vg_CoreClientReq, zztid, O_CLREQ_RET, sizeof(UWord)); \
1846 #define SET_CLCALL_RETVAL(zztid, zzval, f) \
1847 do { CLREQ_RET(VG_(threads)[zztid].arch) = (zzval); \
1848 VG_TRACK( post_reg_write_clientcall_return, \
1849 zztid, O_CLREQ_RET, sizeof(UWord), f); \
1853 /* ---------------------------------------------------------------------
1854 Handle client requests.
1855 ------------------------------------------------------------------ */
1857 // OS-specific(?) client requests
1858 static Bool
os_client_request(ThreadId tid
, UWord
*args
)
1860 Bool handled
= True
;
1862 vg_assert(VG_(is_running_thread
)(tid
));
1865 case VG_USERREQ__FREERES_DONE
:
1866 /* This is equivalent to an exit() syscall, but we don't set the
1867 exitcode (since it might already be set) */
1868 if (0 || VG_(clo_trace_syscalls
) || VG_(clo_trace_sched
))
1869 VG_(message
)(Vg_DebugMsg
,
1870 "__gnu_cxx::__freeres() and __libc_freeres() wrapper "
1871 "done; really quitting!\n");
1872 VG_(threads
)[tid
].exitreason
= VgSrc_ExitThread
;
1884 /* Write out a client message, possibly including a back trace. Return
1885 the number of characters written. In case of XML output, the format
1886 string as well as any arguments it requires will be XML'ified.
1887 I.e. special characters such as the angle brackets will be translated
1888 into proper escape sequences. */
1890 Int
print_client_message( ThreadId tid
, const HChar
*format
,
1891 va_list *vargsp
, Bool include_backtrace
)
1896 /* Translate the format string as follows:
1901 Yes, yes, it's simplified but in synch with
1902 myvprintf_str_XML_simplistic and VG_(debugLog_vprintf).
1905 /* Allocate a buffer that is for sure large enough. */
1906 HChar xml_format
[VG_(strlen
)(format
) * 5 + 1];
1909 HChar
*q
= xml_format
;
1911 for (p
= format
; *p
; ++p
) {
1913 case '<': VG_(strcpy
)(q
, "<"); q
+= 4; break;
1914 case '>': VG_(strcpy
)(q
, ">"); q
+= 4; break;
1915 case '&': VG_(strcpy
)(q
, "&"); q
+= 5; break;
1917 /* Careful: make sure %%s stays %%s */
1934 VG_(printf_xml
)( "<clientmsg>\n" );
1935 VG_(printf_xml
)( " <tid>%u</tid>\n", tid
);
1936 const ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1937 if (tst
->thread_name
)
1938 VG_(printf_xml
)(" <threadname>%s</threadname>\n", tst
->thread_name
);
1939 VG_(printf_xml
)( " <text>" );
1940 count
= VG_(vprintf_xml
)( xml_format
, *vargsp
);
1941 VG_(printf_xml
)( " </text>\n" );
1943 count
= VG_(vmessage
)( Vg_ClientMsg
, format
, *vargsp
);
1944 VG_(message_flush
)();
1947 if (include_backtrace
)
1948 VG_(get_and_pp_StackTrace
)( tid
, VG_(clo_backtrace_size
) );
1951 VG_(printf_xml
)( "</clientmsg>\n" );
1957 /* Do a client request for the thread tid. After the request, tid may
1958 or may not still be runnable; if not, the scheduler will have to
1959 choose a new thread to run.
1962 void do_client_request ( ThreadId tid
)
1964 UWord
* arg
= (UWord
*)(Addr
)(CLREQ_ARGS(VG_(threads
)[tid
].arch
));
1965 UWord req_no
= arg
[0];
1968 VG_(printf
)("req no = 0x%lx, arg = %p\n", req_no
, arg
);
1971 case VG_USERREQ__CLIENT_CALL0
: {
1972 UWord (*f
)(ThreadId
) = (__typeof__(f
))arg
[1];
1974 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL0: func=%p\n", f
);
1976 SET_CLCALL_RETVAL(tid
, f ( tid
), (Addr
)f
);
1979 case VG_USERREQ__CLIENT_CALL1
: {
1980 UWord (*f
)(ThreadId
, UWord
) = (__typeof__(f
))arg
[1];
1982 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL1: func=%p\n", f
);
1984 SET_CLCALL_RETVAL(tid
, f ( tid
, arg
[2] ), (Addr
)f
);
1987 case VG_USERREQ__CLIENT_CALL2
: {
1988 UWord (*f
)(ThreadId
, UWord
, UWord
) = (__typeof__(f
))arg
[1];
1990 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL2: func=%p\n", f
);
1992 SET_CLCALL_RETVAL(tid
, f ( tid
, arg
[2], arg
[3] ), (Addr
)f
);
1995 case VG_USERREQ__CLIENT_CALL3
: {
1996 UWord (*f
)(ThreadId
, UWord
, UWord
, UWord
) = (__typeof__(f
))arg
[1];
1998 VG_(message
)(Vg_DebugMsg
, "VG_USERREQ__CLIENT_CALL3: func=%p\n", f
);
2000 SET_CLCALL_RETVAL(tid
, f ( tid
, arg
[2], arg
[3], arg
[4] ), (Addr
)f
);
2004 // Nb: this looks like a circular definition, because it kind of is.
2005 // See comment in valgrind.h to understand what's going on.
2006 case VG_USERREQ__RUNNING_ON_VALGRIND
:
2007 SET_CLREQ_RETVAL(tid
, RUNNING_ON_VALGRIND
+1);
2010 case VG_USERREQ__PRINTF
: {
2011 const HChar
* format
= (HChar
*)arg
[1];
2012 /* JRS 2010-Jan-28: this is DEPRECATED; use the
2013 _VALIST_BY_REF version instead */
2014 if (sizeof(va_list) != sizeof(UWord
))
2015 goto va_list_casting_error_NORETURN
;
2020 u
.uw
= (unsigned long)arg
[2];
2022 print_client_message( tid
, format
, &u
.vargs
,
2023 /* include_backtrace */ False
);
2024 SET_CLREQ_RETVAL( tid
, count
);
2028 case VG_USERREQ__PRINTF_BACKTRACE
: {
2029 const HChar
* format
= (HChar
*)arg
[1];
2030 /* JRS 2010-Jan-28: this is DEPRECATED; use the
2031 _VALIST_BY_REF version instead */
2032 if (sizeof(va_list) != sizeof(UWord
))
2033 goto va_list_casting_error_NORETURN
;
2038 u
.uw
= (unsigned long)arg
[2];
2040 print_client_message( tid
, format
, &u
.vargs
,
2041 /* include_backtrace */ True
);
2042 SET_CLREQ_RETVAL( tid
, count
);
2046 case VG_USERREQ__PRINTF_VALIST_BY_REF
: {
2047 const HChar
* format
= (HChar
*)arg
[1];
2048 va_list* vargsp
= (va_list*)arg
[2];
2050 print_client_message( tid
, format
, vargsp
,
2051 /* include_backtrace */ False
);
2053 SET_CLREQ_RETVAL( tid
, count
);
2057 case VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF
: {
2058 const HChar
* format
= (HChar
*)arg
[1];
2059 va_list* vargsp
= (va_list*)arg
[2];
2061 print_client_message( tid
, format
, vargsp
,
2062 /* include_backtrace */ True
);
2063 SET_CLREQ_RETVAL( tid
, count
);
2067 case VG_USERREQ__INTERNAL_PRINTF_VALIST_BY_REF
: {
2068 va_list* vargsp
= (va_list*)arg
[2];
2070 VG_(vmessage
)( Vg_DebugMsg
, (HChar
*)arg
[1], *vargsp
);
2071 VG_(message_flush
)();
2072 SET_CLREQ_RETVAL( tid
, count
);
2076 case VG_USERREQ__ADD_IFUNC_TARGET
: {
2077 VG_(redir_add_ifunc_target
)( arg
[1], arg
[2] );
2078 SET_CLREQ_RETVAL( tid
, 0);
2081 case VG_USERREQ__STACK_REGISTER
: {
2082 UWord sid
= VG_(register_stack
)((Addr
)arg
[1], (Addr
)arg
[2]);
2083 SET_CLREQ_RETVAL( tid
, sid
);
2084 VG_TRACK(register_stack
, (Addr
)arg
[1], (Addr
)arg
[2]);
2087 case VG_USERREQ__STACK_DEREGISTER
: {
2088 VG_(deregister_stack
)(arg
[1]);
2089 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2092 case VG_USERREQ__STACK_CHANGE
: {
2093 VG_(change_stack
)(arg
[1], (Addr
)arg
[2], (Addr
)arg
[3]);
2094 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2097 case VG_USERREQ__GET_MALLOCFUNCS
: {
2098 struct vg_mallocfunc_info
*info
= (struct vg_mallocfunc_info
*)arg
[1];
2100 info
->tl_malloc
= VG_(tdict
).tool_malloc
;
2101 info
->tl_calloc
= VG_(tdict
).tool_calloc
;
2102 info
->tl_realloc
= VG_(tdict
).tool_realloc
;
2103 info
->tl_memalign
= VG_(tdict
).tool_memalign
;
2104 info
->tl___builtin_new
= VG_(tdict
).tool___builtin_new
;
2105 info
->tl___builtin_new_aligned
= VG_(tdict
).tool___builtin_new_aligned
;
2106 info
->tl___builtin_vec_new
= VG_(tdict
).tool___builtin_vec_new
;
2107 info
->tl___builtin_vec_new_aligned
= VG_(tdict
).tool___builtin_vec_new_aligned
;
2108 info
->tl_free
= VG_(tdict
).tool_free
;
2109 info
->tl___builtin_delete
= VG_(tdict
).tool___builtin_delete
;
2110 info
->tl___builtin_delete_aligned
= VG_(tdict
).tool___builtin_delete_aligned
;
2111 info
->tl___builtin_vec_delete
= VG_(tdict
).tool___builtin_vec_delete
;
2112 info
->tl___builtin_vec_delete_aligned
= VG_(tdict
).tool___builtin_vec_delete_aligned
;
2113 info
->tl_malloc_usable_size
= VG_(tdict
).tool_malloc_usable_size
;
2115 info
->mallinfo
= VG_(mallinfo
);
2116 info
->clo_trace_malloc
= VG_(clo_trace_malloc
);
2118 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2123 /* Requests from the client program */
2125 case VG_USERREQ__DISCARD_TRANSLATIONS
:
2126 if (VG_(clo_verbosity
) > 2)
2127 VG_(printf
)( "client request: DISCARD_TRANSLATIONS,"
2128 " addr %p, len %lu\n",
2129 (void*)arg
[1], arg
[2] );
2131 VG_(discard_translations
)(
2132 arg
[1], arg
[2], "scheduler(VG_USERREQ__DISCARD_TRANSLATIONS)"
2135 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2138 case VG_USERREQ__INNER_THREADS
:
2139 if (VG_(clo_verbosity
) > 2)
2140 VG_(printf
)( "client request: INNER_THREADS,"
2143 VG_(inner_threads
) = (ThreadState
*)arg
[1];
2144 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2147 case VG_USERREQ__COUNT_ERRORS
:
2148 SET_CLREQ_RETVAL( tid
, VG_(get_n_errs_found
)() );
2151 case VG_USERREQ__CLO_CHANGE
:
2152 VG_(process_dynamic_option
) (cloD
, (HChar
*)arg
[1]);
2153 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2156 case VG_USERREQ__LOAD_PDB_DEBUGINFO
:
2157 VG_(di_notify_pdb_debuginfo
)( arg
[1], arg
[2], arg
[3], arg
[4] );
2158 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2161 case VG_USERREQ__MAP_IP_TO_SRCLOC
: {
2163 HChar
* buf64
= (HChar
*)arg
[2]; // points to a HChar [64] array
2164 const HChar
*buf
; // points to a string of unknown size
2166 VG_(memset
)(buf64
, 0, 64);
2169 // Unless the guest would become epoch aware (and would need to
2170 // describe IP addresses of dlclosed libs), using cur_ep is a
2171 // reasonable choice.
2172 const DiEpoch cur_ep
= VG_(current_DiEpoch
)();
2174 Bool ok
= VG_(get_filename_linenum
)(
2175 cur_ep
, ip
, &buf
, NULL
, &linenum
2178 /* For backward compatibility truncate the filename to
2180 VG_(strncpy
)(buf64
, buf
, 50);
2183 for (i
= 0; i
< 50; i
++) {
2187 VG_(sprintf
)(buf64
+i
, ":%u", linenum
); // safe
2192 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2196 case VG_USERREQ__CHANGE_ERR_DISABLEMENT
: {
2197 Word delta
= arg
[1];
2198 vg_assert(delta
== 1 || delta
== -1);
2199 ThreadState
* tst
= VG_(get_ThreadState
)(tid
);
2201 if (delta
== 1 && tst
->err_disablement_level
< 0xFFFFFFFF) {
2202 tst
->err_disablement_level
++;
2205 if (delta
== -1 && tst
->err_disablement_level
> 0) {
2206 tst
->err_disablement_level
--;
2208 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2212 case VG_USERREQ__GDB_MONITOR_COMMAND
: {
2214 ret
= (UWord
) VG_(client_monitor_command
) ((HChar
*)arg
[1]);
2215 SET_CLREQ_RETVAL(tid
, ret
);
2219 case VG_USERREQ__MALLOCLIKE_BLOCK
:
2220 case VG_USERREQ__RESIZEINPLACE_BLOCK
:
2221 case VG_USERREQ__FREELIKE_BLOCK
:
2222 // Ignore them if the addr is NULL; otherwise pass onto the tool.
2224 SET_CLREQ_RETVAL( tid
, 0 ); /* return value is meaningless */
2230 case VG_USERREQ__VEX_INIT_FOR_IRI
:
2231 LibVEX_InitIRI ( (IRICB
*)arg
[1] );
2236 if (os_client_request(tid
, arg
)) {
2237 // do nothing, os_client_request() handled it
2238 } else if (VG_(needs
).client_requests
) {
2241 if (VG_(clo_verbosity
) > 2)
2242 VG_(printf
)("client request: code %lx, addr %p, len %lu\n",
2243 arg
[0], (void*)arg
[1], arg
[2] );
2245 if ( VG_TDICT_CALL(tool_handle_client_request
, tid
, arg
, &ret
) )
2246 SET_CLREQ_RETVAL(tid
, ret
);
2248 static Bool whined
= False
;
2250 if (!whined
&& VG_(clo_verbosity
) > 2) {
2251 // Allow for requests in core, but defined by tools, which
2252 // have 0 and 0 in their two high bytes.
2253 HChar c1
= (arg
[0] >> 24) & 0xff;
2254 HChar c2
= (arg
[0] >> 16) & 0xff;
2255 if (c1
== 0) c1
= '_';
2256 if (c2
== 0) c2
= '_';
2257 VG_(message
)(Vg_UserMsg
, "Warning:\n"
2258 " unhandled client request: 0x%lx (%c%c+0x%lx). Perhaps\n"
2259 " VG_(needs).client_requests should be set?\n",
2260 arg
[0], c1
, c2
, arg
[0] & 0xffff);
2269 va_list_casting_error_NORETURN
:
2271 "Valgrind: fatal error - cannot continue: use of the deprecated\n"
2272 "client requests VG_USERREQ__PRINTF or VG_USERREQ__PRINTF_BACKTRACE\n"
2273 "on a platform where they cannot be supported. Please use the\n"
2274 "equivalent _VALIST_BY_REF versions instead.\n"
2276 "This is a binary-incompatible change in Valgrind's client request\n"
2277 "mechanism. It is unfortunate, but difficult to avoid. End-users\n"
2278 "are expected to almost never see this message. The only case in\n"
2279 "which you might see this message is if your code uses the macros\n"
2280 "VALGRIND_PRINTF or VALGRIND_PRINTF_BACKTRACE. If so, you will need\n"
2281 "to recompile such code, using the header files from this version of\n"
2282 "Valgrind, and not any previous version.\n"
2284 "If you see this message in any other circumstances, it is probably\n"
2285 "a bug in Valgrind. In this case, please file a bug report at\n"
2287 " http://www.valgrind.org/support/bug_reports.html\n"
2295 /* ---------------------------------------------------------------------
2296 Sanity checking (permanently engaged)
2297 ------------------------------------------------------------------ */
2299 /* Internal consistency checks on the sched structures. */
2301 void scheduler_sanity ( ThreadId tid
)
2304 Int lwpid
= VG_(gettid
)();
2306 if (!VG_(is_running_thread
)(tid
)) {
2307 VG_(message
)(Vg_DebugMsg
,
2308 "Thread %u is supposed to be running, "
2309 "but doesn't own the_BigLock (owned by %u)\n",
2310 tid
, VG_(running_tid
));
2314 if (lwpid
!= VG_(threads
)[tid
].os_state
.lwpid
) {
2315 VG_(message
)(Vg_DebugMsg
,
2316 "Thread %u supposed to be in LWP %d, but we're actually %d\n",
2317 tid
, VG_(threads
)[tid
].os_state
.lwpid
, VG_(gettid
)());
2321 if (lwpid
!= ML_(get_sched_lock_owner
)(the_BigLock
)) {
2322 VG_(message
)(Vg_DebugMsg
,
2323 "Thread (LWPID) %u doesn't own the_BigLock\n",
2329 /* Periodically show the state of all threads, for debugging
2331 static UInt lasttime
= 0;
2333 now
= VG_(read_millisecond_timer
)();
2334 if ((!bad
) && (lasttime
+ 4000/*ms*/ <= now
)) {
2336 VG_(printf
)("\n------------ Sched State at %d ms ------------\n",
2338 VG_(show_sched_status
)(True
, // host_stacktrace
2339 True
, // stack_usage
2340 True
); // exited_threads);
2344 /* core_panic also shows the sched status, which is why we don't
2345 show it above if bad==True. */
2347 VG_(core_panic
)("scheduler_sanity: failed");
2350 void VG_(sanity_check_general
) ( Bool force_expensive
)
2354 static UInt next_slow_check_at
= 1;
2355 static UInt slow_check_interval
= 25;
2357 if (VG_(clo_sanity_level
) < 1) return;
2359 /* --- First do all the tests that we can do quickly. ---*/
2361 sanity_fast_count
++;
2363 /* Check stuff pertaining to the memory check system. */
2365 /* Check that nobody has spuriously claimed that the first or
2366 last 16 pages of memory have become accessible [...] */
2367 if (VG_(needs
).sanity_checks
) {
2368 vg_assert(VG_TDICT_CALL(tool_cheap_sanity_check
));
2371 /* --- Now some more expensive checks. ---*/
2373 /* Once every now and again, check some more expensive stuff.
2374 Gradually increase the interval between such checks so as not to
2375 burden long-running programs too much. */
2376 if ( force_expensive
2377 || VG_(clo_sanity_level
) > 1
2378 || (VG_(clo_sanity_level
) == 1
2379 && sanity_fast_count
== next_slow_check_at
)) {
2381 if (0) VG_(printf
)("SLOW at %u\n", sanity_fast_count
-1);
2383 next_slow_check_at
= sanity_fast_count
- 1 + slow_check_interval
;
2384 slow_check_interval
++;
2385 sanity_slow_count
++;
2387 if (VG_(needs
).sanity_checks
) {
2388 vg_assert(VG_TDICT_CALL(tool_expensive_sanity_check
));
2391 /* Look for stack overruns. Visit all threads. */
2392 for (tid
= 1; tid
< VG_N_THREADS
; tid
++) {
2396 if (VG_(threads
)[tid
].status
== VgTs_Empty
||
2397 VG_(threads
)[tid
].status
== VgTs_Zombie
)
2402 VG_(get_ThreadState
)(tid
)->os_state
.valgrind_stack_base
;
2404 = 4096; // Let's say. Checking more causes lots of L2 misses.
2406 = VG_(am_get_VgStack_unused_szB
)(stack
, limit
);
2407 if (remains
< limit
)
2408 VG_(message
)(Vg_DebugMsg
,
2409 "WARNING: Thread %u is within %lu bytes "
2410 "of running out of valgrind stack!\n"
2411 "Valgrind stack size can be increased "
2412 "using --valgrind-stacksize=....\n",
2417 if (VG_(clo_sanity_level
) > 1) {
2418 /* Check sanity of the low-level memory manager. Note that bugs
2419 in the client's code can cause this to fail, so we don't do
2420 this check unless specially asked for. And because it's
2421 potentially very expensive. */
2422 VG_(sanity_check_malloc_all
)();
2426 /*--------------------------------------------------------------------*/
2428 /*--------------------------------------------------------------------*/