2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2020 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
19 The GNU General Public License is contained in the file COPYING.
23 #include "drd_barrier.h"
24 #include "drd_clientobj.h"
25 #include "drd_clientreq.h"
27 #include "drd_error.h"
29 #include "drd_load_store.h"
30 #include "drd_malloc_wrappers.h"
31 #include "drd_mutex.h"
32 #include "drd_rwlock.h"
33 #include "drd_segment.h"
34 #include "drd_semaphore.h"
35 #include "drd_suppression.h"
36 #include "drd_thread.h"
37 #include "libvex_guest_offsets.h"
38 #include "pub_drd_bitmap.h"
39 #include "pub_tool_vki.h" // Must be included before pub_tool_libcproc
40 #include "pub_tool_basics.h"
41 #include "pub_tool_libcassert.h" // tl_assert()
42 #include "pub_tool_libcbase.h" // VG_(strcmp)
43 #include "pub_tool_libcprint.h" // VG_(printf)
44 #include "pub_tool_libcproc.h" // VG_(getenv)()
45 #include "pub_tool_machine.h"
46 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
47 #include "pub_tool_options.h" // command line options
48 #include "pub_tool_replacemalloc.h"
49 #include "pub_tool_threadstate.h" // VG_(get_running_tid)()
50 #include "pub_tool_tooliface.h"
51 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
54 /* Local variables. */
56 static Bool s_print_stats
;
57 static Bool s_var_info
;
58 static Bool s_show_stack_usage
;
59 static Bool s_trace_alloc
;
60 static Bool trace_sectsuppr
;
64 * Implement the needs_command_line_options for drd.
66 static Bool
DRD_(process_cmd_line_option
)(const HChar
* arg
)
68 int check_stack_accesses
= -1;
69 int join_list_vol
= -1;
70 int exclusive_threshold_ms
= -1;
71 int first_race_only
= -1;
72 int report_signal_unlocked
= -1;
73 int segment_merging
= -1;
74 int segment_merge_interval
= -1;
75 int shared_threshold_ms
= -1;
76 int show_confl_seg
= -1;
77 int trace_barrier
= -1;
78 int trace_clientobj
= -1;
81 int trace_fork_join
= -1;
83 int trace_conflict_set
= -1;
84 int trace_conflict_set_bm
= -1;
86 int trace_rwlock
= -1;
87 int trace_segment
= -1;
88 int trace_semaphore
= -1;
89 int trace_suppression
= -1;
90 const HChar
* trace_address
= 0;
91 const HChar
* ptrace_address
= 0;
93 if VG_BOOL_CLO(arg
, "--check-stack-var", check_stack_accesses
) {}
94 else if VG_INT_CLO (arg
, "--join-list-vol", join_list_vol
) {}
95 else if VG_BOOL_CLO(arg
, "--drd-stats", s_print_stats
) {}
96 else if VG_BOOL_CLO(arg
, "--first-race-only", first_race_only
) {}
97 else if VG_BOOL_CLO(arg
, "--free-is-write", DRD_(g_free_is_write
)) {}
98 else if VG_BOOL_CLO(arg
,"--report-signal-unlocked",report_signal_unlocked
)
100 else if VG_BOOL_CLO(arg
, "--segment-merging", segment_merging
) {}
101 else if VG_INT_CLO (arg
, "--segment-merging-interval", segment_merge_interval
)
103 else if VG_BOOL_CLO(arg
, "--show-confl-seg", show_confl_seg
) {}
104 else if VG_BOOL_CLO(arg
, "--show-stack-usage", s_show_stack_usage
) {}
105 else if VG_BOOL_CLO(arg
, "--ignore-thread-creation",
106 DRD_(ignore_thread_creation
)) {}
107 else if VG_BOOL_CLO(arg
, "--trace-alloc", s_trace_alloc
) {}
108 else if VG_BOOL_CLO(arg
, "--trace-barrier", trace_barrier
) {}
109 else if VG_BOOL_CLO(arg
, "--trace-clientobj", trace_clientobj
) {}
110 else if VG_BOOL_CLO(arg
, "--trace-cond", trace_cond
) {}
111 else if VG_BOOL_CLO(arg
, "--trace-conflict-set", trace_conflict_set
) {}
112 else if VG_BOOL_CLO(arg
, "--trace-conflict-set-bm", trace_conflict_set_bm
){}
113 else if VG_BOOL_CLO(arg
, "--trace-csw", trace_csw
) {}
114 else if VG_BOOL_CLO(arg
, "--trace-fork-join", trace_fork_join
) {}
115 else if VG_BOOL_CLO(arg
, "--trace-hb", trace_hb
) {}
116 else if VG_BOOL_CLO(arg
, "--trace-mutex", trace_mutex
) {}
117 else if VG_BOOL_CLO(arg
, "--trace-rwlock", trace_rwlock
) {}
118 else if VG_BOOL_CLO(arg
, "--trace-sectsuppr", trace_sectsuppr
) {}
119 else if VG_BOOL_CLO(arg
, "--trace-segment", trace_segment
) {}
120 else if VG_BOOL_CLO(arg
, "--trace-semaphore", trace_semaphore
) {}
121 else if VG_BOOL_CLO(arg
, "--trace-suppr", trace_suppression
) {}
122 else if VG_BOOL_CLO(arg
, "--var-info", s_var_info
) {}
123 else if VG_BOOL_CLO(arg
, "--verify-conflict-set", DRD_(verify_conflict_set
))
125 else if VG_INT_CLO (arg
, "--exclusive-threshold", exclusive_threshold_ms
) {}
126 else if VG_STR_CLO (arg
, "--ptrace-addr", ptrace_address
) {}
127 else if VG_INT_CLO (arg
, "--shared-threshold", shared_threshold_ms
) {}
128 else if VG_STR_CLO (arg
, "--trace-addr", trace_address
) {}
130 return VG_(replacement_malloc_process_cmd_line_option
)(arg
);
132 if (check_stack_accesses
!= -1)
133 DRD_(set_check_stack_accesses
)(check_stack_accesses
);
134 if (exclusive_threshold_ms
!= -1)
136 DRD_(mutex_set_lock_threshold
)(exclusive_threshold_ms
);
137 DRD_(rwlock_set_exclusive_threshold
)(exclusive_threshold_ms
);
139 if (first_race_only
!= -1)
141 DRD_(set_first_race_only
)(first_race_only
);
143 if (join_list_vol
!= -1)
144 DRD_(thread_set_join_list_vol
)(join_list_vol
);
145 if (report_signal_unlocked
!= -1)
147 DRD_(cond_set_report_signal_unlocked
)(report_signal_unlocked
);
149 if (shared_threshold_ms
!= -1)
151 DRD_(rwlock_set_shared_threshold
)(shared_threshold_ms
);
153 if (segment_merging
!= -1)
154 DRD_(thread_set_segment_merging
)(segment_merging
);
155 if (segment_merge_interval
!= -1)
156 DRD_(thread_set_segment_merge_interval
)(segment_merge_interval
);
157 if (show_confl_seg
!= -1)
158 DRD_(set_show_conflicting_segments
)(show_confl_seg
);
160 const Addr addr
= VG_(strtoll16
)(trace_address
, 0);
161 DRD_(start_tracing_address_range
)(addr
, addr
+ 1, False
);
163 if (ptrace_address
) {
164 HChar
*plus
= VG_(strchr
)(ptrace_address
, '+');
168 addr
= VG_(strtoll16
)(ptrace_address
, 0);
169 length
= plus
? VG_(strtoll16
)(plus
+ 1, 0) : 1;
170 DRD_(start_tracing_address_range
)(addr
, addr
+ length
, True
);
172 if (trace_barrier
!= -1)
173 DRD_(barrier_set_trace
)(trace_barrier
);
174 if (trace_clientobj
!= -1)
175 DRD_(clientobj_set_trace
)(trace_clientobj
);
176 if (trace_cond
!= -1)
177 DRD_(cond_set_trace
)(trace_cond
);
179 DRD_(thread_trace_context_switches
)(trace_csw
);
180 if (trace_fork_join
!= -1)
181 DRD_(thread_set_trace_fork_join
)(trace_fork_join
);
183 DRD_(hb_set_trace
)(trace_hb
);
184 if (trace_conflict_set
!= -1)
185 DRD_(thread_trace_conflict_set
)(trace_conflict_set
);
186 if (trace_conflict_set_bm
!= -1)
187 DRD_(thread_trace_conflict_set_bm
)(trace_conflict_set_bm
);
188 if (trace_mutex
!= -1)
189 DRD_(mutex_set_trace
)(trace_mutex
);
190 if (trace_rwlock
!= -1)
191 DRD_(rwlock_set_trace
)(trace_rwlock
);
192 if (trace_segment
!= -1)
193 DRD_(sg_set_trace
)(trace_segment
);
194 if (trace_semaphore
!= -1)
195 DRD_(semaphore_set_trace
)(trace_semaphore
);
196 if (trace_suppression
!= -1)
197 DRD_(suppression_set_trace
)(trace_suppression
);
202 static void DRD_(print_usage
)(void)
205 " --check-stack-var=yes|no Whether or not to report data races on\n"
206 " stack variables [no].\n"
207 " --exclusive-threshold=<n> Print an error message if any mutex or\n"
208 " writer lock is held longer than the specified\n"
209 " time (in milliseconds) [off].\n"
210 " --first-race-only=yes|no Only report the first data race that occurs on\n"
211 " a memory location instead of all races [no].\n"
212 " --free-is-write=yes|no Whether to report races between freeing memory\n"
213 " and subsequent accesses of that memory[no].\n"
214 " --join-list-vol=<n> Number of threads to delay cleanup for [10].\n"
215 " --report-signal-unlocked=yes|no Whether to report calls to\n"
216 " pthread_cond_signal() where the mutex associated\n"
217 " with the signal via pthread_cond_wait() is not\n"
218 " locked at the time the signal is sent [yes].\n"
219 " --segment-merging=yes|no Controls segment merging [yes].\n"
220 " Segment merging is an algorithm to limit memory usage of the\n"
221 " data race detection algorithm. Disabling segment merging may\n"
222 " improve the accuracy of the so-called 'other segments' displayed\n"
223 " in race reports but can also trigger an out of memory error.\n"
224 " --segment-merging-interval=<n> Perform segment merging every time n new\n"
225 " segments have been created. Default: %d.\n"
226 " --shared-threshold=<n> Print an error message if a reader lock\n"
227 " is held longer than the specified time (in\n"
228 " milliseconds) [off]\n"
229 " --show-confl-seg=yes|no Show conflicting segments in race reports [yes].\n"
230 " --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n"
231 " --ignore-thread-creation=yes|no Ignore activities during thread \n"
234 " drd options for monitoring process behavior:\n"
235 " --ptrace-addr=<address>[+<length>] Trace all load and store activity for\n"
236 " the specified address range and keep doing that\n"
237 " even after the memory at that address has been\n"
238 " freed and reallocated [off].\n"
239 " --trace-addr=<address> Trace all load and store activity for the\n"
240 " specified address [off].\n"
241 " --trace-alloc=yes|no Trace all memory allocations and deallocations\n"
243 " --trace-barrier=yes|no Trace all barrier activity [no].\n"
244 " --trace-cond=yes|no Trace all condition variable activity [no].\n"
245 " --trace-fork-join=yes|no Trace all thread fork/join activity [no].\n"
246 " --trace-hb=yes|no Trace ANNOTATE_HAPPENS_BEFORE() etc. [no].\n"
247 " --trace-mutex=yes|no Trace all mutex activity [no].\n"
248 " --trace-rwlock=yes|no Trace all reader-writer lock activity[no].\n"
249 " --trace-semaphore=yes|no Trace all semaphore activity [no].\n",
250 DRD_(thread_get_segment_merge_interval
)(),
251 DRD_(ignore_thread_creation
) ? "yes" : "no"
255 static void DRD_(print_debug_usage
)(void)
258 " --drd-stats=yes|no Print statistics about DRD activity [no].\n"
259 " --trace-clientobj=yes|no Trace all client object activity [no].\n"
260 " --trace-csw=yes|no Trace all scheduler context switches [no].\n"
261 " --trace-conflict-set=yes|no Trace all conflict set updates [no].\n"
262 " --trace-conflict-set-bm=yes|no Trace all conflict set bitmap\n"
263 " updates [no]. Note: enabling this option\n"
264 " will generate a lot of output !\n"
265 " --trace-sectsuppr=yes|no Trace which the dynamic library sections on\n"
266 " which data race detection is suppressed.\n"
267 " --trace-segment=yes|no Trace segment actions [no].\n"
268 " --trace-suppr=yes|no Trace all address suppression actions [no].\n"
269 " --verify-conflict-set=yes|no Verify conflict set consistency [no].\n"
275 // Implements the thread-related core callbacks.
278 static void drd_pre_mem_read(const CorePart part
,
280 const HChar
* const s
,
284 DRD_(thread_set_vg_running_tid
)(VG_(get_running_tid
)());
287 DRD_(trace_load
)(a
, size
);
291 static void drd_pre_mem_read_asciiz(const CorePart part
,
293 const HChar
* const s
,
296 const HChar
* p
= (void*)a
;
299 // Don't segfault if the string starts in an obviously stupid
300 // place. Actually we should check the whole string, not just
301 // the start address, but that's too much trouble. At least
302 // checking the first byte is better than nothing. See #255009.
303 if (!VG_(am_is_valid_for_client
) (a
, 1, VKI_PROT_READ
))
306 /* Note: the expression '*p' reads client memory and may crash if the */
307 /* client provided an invalid pointer ! */
315 DRD_(trace_load
)(a
, size
);
319 static void drd_post_mem_write(const CorePart part
,
324 DRD_(thread_set_vg_running_tid
)(VG_(get_running_tid
)());
327 DRD_(trace_store
)(a
, size
);
332 void drd_start_using_mem(const Addr a1
, const SizeT len
,
333 const Bool is_stack_mem
)
335 const Addr a2
= a1
+ len
;
339 if (!is_stack_mem
&& s_trace_alloc
)
340 DRD_(trace_msg
)("Started using memory range 0x%lx + %lu%s",
341 a1
, len
, DRD_(running_thread_inside_pthread_create
)()
342 ? " (inside pthread_create())" : "");
344 if (!is_stack_mem
&& DRD_(g_free_is_write
))
345 DRD_(thread_stop_using_mem
)(a1
, a2
);
347 if (UNLIKELY(DRD_(any_address_is_traced
)()))
349 DRD_(trace_mem_access
)(a1
, len
, eStart
, 0, 0);
352 if (UNLIKELY(DRD_(running_thread_inside_pthread_create
)()))
354 DRD_(start_suppression
)(a1
, a2
, "pthread_create()");
358 static void drd_start_using_mem_w_ecu(const Addr a1
,
362 drd_start_using_mem(a1
, len
, False
);
365 static void drd_start_using_mem_w_tid(const Addr a1
,
369 drd_start_using_mem(a1
, len
, False
);
373 void drd_stop_using_mem(const Addr a1
, const SizeT len
,
374 const Bool is_stack_mem
)
376 const Addr a2
= a1
+ len
;
380 if (UNLIKELY(DRD_(any_address_is_traced
)()))
381 DRD_(trace_mem_access
)(a1
, len
, eEnd
, 0, 0);
383 if (!is_stack_mem
&& s_trace_alloc
)
384 DRD_(trace_msg
)("Stopped using memory range 0x%lx + %lu",
387 if (!is_stack_mem
|| DRD_(get_check_stack_accesses
)())
389 if (is_stack_mem
|| !DRD_(g_free_is_write
))
390 DRD_(thread_stop_using_mem
)(a1
, a2
);
391 else if (DRD_(g_free_is_write
))
392 DRD_(trace_store
)(a1
, len
);
393 DRD_(clientobj_stop_using_mem
)(a1
, a2
);
394 DRD_(suppression_stop_using_mem
)(a1
, a2
);
399 void drd_stop_using_nonstack_mem(const Addr a1
, const SizeT len
)
401 drd_stop_using_mem(a1
, len
, False
);
405 * Discard all information DRD has about memory accesses and client objects
406 * in the specified address range.
408 void DRD_(clean_memory
)(const Addr a1
, const SizeT len
)
410 const Bool is_stack_memory
= DRD_(thread_address_on_any_stack
)(a1
);
411 drd_stop_using_mem(a1
, len
, is_stack_memory
);
412 drd_start_using_mem(a1
, len
, is_stack_memory
);
416 * Suppress data race reports on all addresses contained in .plt, .got and
417 * .got.plt sections inside the address range [ a, a + len [. The data in
418 * these sections is modified by _dl_relocate_object() every time a function
419 * in a shared library is called for the first time. Since the first call
420 * to a function in a shared library can happen from a multithreaded context,
421 * such calls can cause conflicting accesses. See also Ulrich Drepper's
422 * paper "How to Write Shared Libraries" for more information about relocation
423 * (http://people.redhat.com/drepper/dsohowto.pdf).
424 * Note: the contents of the .got section is only modified by the MIPS resolver.
426 static void DRD_(suppress_relocation_conflicts
)(const Addr a
, const SizeT len
)
431 VG_(dmsg
)("Evaluating range @ 0x%lx size %lu\n", a
, len
);
433 for (di
= VG_(next_DebugInfo
)(0); di
; di
= VG_(next_DebugInfo
)(di
)) {
438 VG_(dmsg
)("Examining %s / %s\n", VG_(DebugInfo_get_filename
)(di
),
439 VG_(DebugInfo_get_soname
)(di
));
442 * Suppress the race report on the libpthread global variable
443 * __pthread_multiple_threads. See also
444 * http://bugs.kde.org/show_bug.cgi?id=323905.
446 avma
= VG_(DebugInfo_get_bss_avma
)(di
);
447 size
= VG_(DebugInfo_get_bss_size
)(di
);
448 tl_assert((avma
&& size
) || (avma
== 0 && size
== 0));
450 VG_(strcmp
)(VG_(DebugInfo_get_soname
)(di
), "libpthread.so.0") == 0) {
452 VG_(dmsg
)("Suppressing .bss @ 0x%lx size %lu\n", avma
, size
);
453 tl_assert(VG_(DebugInfo_sect_kind
)(NULL
, avma
) == Vg_SectBSS
);
454 DRD_(start_suppression
)(avma
, avma
+ size
, ".bss");
457 avma
= VG_(DebugInfo_get_plt_avma
)(di
);
458 size
= VG_(DebugInfo_get_plt_size
)(di
);
459 tl_assert((avma
&& size
) || (avma
== 0 && size
== 0));
462 VG_(dmsg
)("Suppressing .plt @ 0x%lx size %lu\n", avma
, size
);
463 tl_assert(VG_(DebugInfo_sect_kind
)(NULL
, avma
) == Vg_SectPLT
);
464 DRD_(start_suppression
)(avma
, avma
+ size
, ".plt");
467 avma
= VG_(DebugInfo_get_gotplt_avma
)(di
);
468 size
= VG_(DebugInfo_get_gotplt_size
)(di
);
469 tl_assert((avma
&& size
) || (avma
== 0 && size
== 0));
472 VG_(dmsg
)("Suppressing .got.plt @ 0x%lx size %lu\n", avma
, size
);
473 tl_assert(VG_(DebugInfo_sect_kind
)(NULL
, avma
) == Vg_SectGOTPLT
);
474 DRD_(start_suppression
)(avma
, avma
+ size
, ".gotplt");
477 avma
= VG_(DebugInfo_get_got_avma
)(di
);
478 size
= VG_(DebugInfo_get_got_size
)(di
);
479 tl_assert((avma
&& size
) || (avma
== 0 && size
== 0));
482 VG_(dmsg
)("Suppressing .got @ 0x%lx size %lu\n", avma
, size
);
483 tl_assert(VG_(DebugInfo_sect_kind
)(NULL
, avma
) == Vg_SectGOT
);
484 DRD_(start_suppression
)(avma
, avma
+ size
, ".got");
490 void drd_start_using_mem_w_perms(const Addr a
, const SizeT len
,
491 const Bool rr
, const Bool ww
, const Bool xx
,
494 DRD_(thread_set_vg_running_tid
)(VG_(get_running_tid
)());
496 drd_start_using_mem(a
, len
, False
);
498 DRD_(suppress_relocation_conflicts
)(a
, len
);
502 * Called by the core when the stack of a thread grows, to indicate that
503 * the addresses in range [ a, a + len [ may now be used by the client.
504 * Assumption: stacks grow downward.
507 void drd_start_using_mem_stack2(const DrdThreadId tid
, const Addr a
,
510 DRD_(thread_set_stack_min
)(tid
, a
- VG_STACK_REDZONE_SZB
);
511 drd_start_using_mem(a
- VG_STACK_REDZONE_SZB
, len
+ VG_STACK_REDZONE_SZB
,
516 void drd_start_using_mem_stack(const Addr a
, const SizeT len
)
518 drd_start_using_mem_stack2(DRD_(thread_get_running_tid
)(), a
, len
);
522 * Called by the core when the stack of a thread shrinks, to indicate that
523 * the addresses [ a, a + len [ are no longer accessible for the client.
524 * Assumption: stacks grow downward.
527 void drd_stop_using_mem_stack2(const DrdThreadId tid
, const Addr a
,
530 DRD_(thread_set_stack_min
)(tid
, a
+ len
- VG_STACK_REDZONE_SZB
);
531 drd_stop_using_mem(a
- VG_STACK_REDZONE_SZB
, len
+ VG_STACK_REDZONE_SZB
,
536 void drd_stop_using_mem_stack(const Addr a
, const SizeT len
)
538 drd_stop_using_mem_stack2(DRD_(thread_get_running_tid
)(), a
, len
);
542 Bool
on_alt_stack(const Addr a
)
548 vg_tid
= VG_(get_running_tid
)();
549 alt_min
= VG_(thread_get_altstack_min
)(vg_tid
);
550 alt_size
= VG_(thread_get_altstack_size
)(vg_tid
);
551 return (SizeT
)(a
- alt_min
) < alt_size
;
555 void drd_start_using_mem_alt_stack(const Addr a
, const SizeT len
)
557 if (!on_alt_stack(a
))
558 drd_start_using_mem_stack(a
, len
);
562 void drd_stop_using_mem_alt_stack(const Addr a
, const SizeT len
)
564 if (!on_alt_stack(a
))
565 drd_stop_using_mem_stack(a
, len
);
569 * Callback function invoked by the Valgrind core before a signal is delivered.
572 void drd_pre_deliver_signal(const ThreadId vg_tid
, const Int sigNo
,
573 const Bool alt_stack
)
577 drd_tid
= DRD_(VgThreadIdToDrdThreadId
)(vg_tid
);
578 DRD_(thread_set_on_alt_stack
)(drd_tid
, alt_stack
);
582 * As soon a signal handler has been invoked on the alternate stack,
583 * switch to stack memory handling functions that can handle the
586 VG_(track_new_mem_stack
)(drd_start_using_mem_alt_stack
);
587 VG_(track_die_mem_stack
)(drd_stop_using_mem_alt_stack
);
592 * Callback function invoked by the Valgrind core after a signal is delivered,
593 * at least if the signal handler did not longjmp().
596 void drd_post_deliver_signal(const ThreadId vg_tid
, const Int sigNo
)
600 drd_tid
= DRD_(VgThreadIdToDrdThreadId
)(vg_tid
);
601 DRD_(thread_set_on_alt_stack
)(drd_tid
, False
);
602 if (DRD_(thread_get_threads_on_alt_stack
)() == 0)
604 VG_(track_new_mem_stack
)(drd_start_using_mem_stack
);
605 VG_(track_die_mem_stack
)(drd_stop_using_mem_stack
);
610 * Callback function called by the Valgrind core before a stack area is
611 * being used by a signal handler.
613 * @param[in] a Start of address range - VG_STACK_REDZONE_SZB.
614 * @param[in] len Address range length + VG_STACK_REDZONE_SZB.
615 * @param[in] tid Valgrind thread ID for whom the signal frame is being
618 static void drd_start_using_mem_stack_signal(const Addr a
, const SizeT len
,
621 DRD_(thread_set_vg_running_tid
)(VG_(get_running_tid
)());
622 drd_start_using_mem(a
+ VG_STACK_REDZONE_SZB
, len
- VG_STACK_REDZONE_SZB
,
626 static void drd_stop_using_mem_stack_signal(Addr a
, SizeT len
)
628 drd_stop_using_mem(a
+ VG_STACK_REDZONE_SZB
, len
- VG_STACK_REDZONE_SZB
,
632 static void drd_register_stack(Addr start
, Addr end
)
634 DrdThreadId drd_tid
= DRD_(thread_get_running_tid
)();
636 DRD_(thread_register_stack
)(drd_tid
, start
, end
);
640 void drd_pre_thread_create(const ThreadId creator
, const ThreadId created
)
642 const DrdThreadId drd_creator
= DRD_(VgThreadIdToDrdThreadId
)(creator
);
643 tl_assert(created
!= VG_INVALID_THREADID
);
644 DRD_(thread_pre_create
)(drd_creator
, created
);
645 if (DRD_(IsValidDrdThreadId
)(drd_creator
))
647 DRD_(thread_new_segment
)(drd_creator
);
649 if (DRD_(thread_get_trace_fork_join
)())
651 DRD_(trace_msg
)("drd_pre_thread_create creator = %u, created = %u",
652 drd_creator
, created
);
657 * Called by Valgrind's core before any loads or stores are performed on
658 * the context of thread "created".
661 void drd_post_thread_create(const ThreadId vg_created
)
663 DrdThreadId drd_created
;
666 tl_assert(vg_created
!= VG_INVALID_THREADID
);
668 drd_created
= DRD_(thread_post_create
)(vg_created
);
670 /* Set up red zone before the code in glibc's clone.S is run. */
671 stack_max
= DRD_(thread_get_stack_max
)(drd_created
);
672 drd_start_using_mem_stack2(drd_created
, stack_max
, 0);
674 if (DRD_(thread_get_trace_fork_join
)())
676 DRD_(trace_msg
)("drd_post_thread_create created = %u", drd_created
);
678 if (! DRD_(get_check_stack_accesses
)())
680 DRD_(start_suppression
)(DRD_(thread_get_stack_max
)(drd_created
)
681 - DRD_(thread_get_stack_size
)(drd_created
),
682 DRD_(thread_get_stack_max
)(drd_created
),
687 /* Called after a thread has performed its last memory access. */
688 static void drd_thread_finished(ThreadId vg_tid
)
693 * Ignore if invoked because thread creation failed. See e.g.
694 * coregrind/m_syswrap/syswrap-amd64-linux.c
696 if (VG_(get_running_tid
)() != vg_tid
)
699 drd_tid
= DRD_(VgThreadIdToDrdThreadId
)(vg_tid
);
700 tl_assert(drd_tid
!= DRD_INVALID_THREADID
);
701 if (DRD_(thread_get_trace_fork_join
)())
703 DRD_(trace_msg
)("drd_thread_finished tid = %u%s", drd_tid
,
704 DRD_(thread_get_joinable
)(drd_tid
)
705 ? "" : " (which is a detached thread)");
707 if (s_show_stack_usage
&& !VG_(clo_xml
)) {
708 const SizeT stack_size
= DRD_(thread_get_stack_size
)(drd_tid
);
709 const SizeT used_stack
710 = (DRD_(thread_get_stack_max
)(drd_tid
)
711 - DRD_(thread_get_stack_min_min
)(drd_tid
));
712 VG_(message
)(Vg_UserMsg
,
713 "thread %u%s finished and used %lu bytes out of %lu on its stack. Margin: %ld bytes.\n",
715 DRD_(thread_get_joinable
)(drd_tid
)
716 ? "" : " (which is a detached thread)",
717 used_stack
, stack_size
, (long)(stack_size
- used_stack
));
720 drd_stop_using_mem(DRD_(thread_get_stack_min
)(drd_tid
),
721 DRD_(thread_get_stack_max
)(drd_tid
)
722 - DRD_(thread_get_stack_min
)(drd_tid
),
724 DRD_(thread_set_record_loads
)(drd_tid
, False
);
725 DRD_(thread_set_record_stores
)(drd_tid
, False
);
726 DRD_(thread_finished
)(drd_tid
);
730 * Called immediately after fork for the child process only. 'tid' is the
731 * only surviving thread in the child process. Cleans up thread state.
732 * See also http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_atfork.html for a detailed discussion of using fork() in combination with mutexes.
735 void drd__atfork_child(ThreadId tid
)
737 DRD_(drd_thread_atfork_child
)(tid
);
742 // Implementation of the tool interface.
745 static void DRD_(post_clo_init
)(void)
747 #if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
750 VG_(printf
)("\nWARNING: DRD has not yet been tested on this operating system.\n\n");
755 VG_(needs_var_info
)();
759 static void drd_start_client_code(const ThreadId tid
, const ULong bbs_done
)
761 tl_assert(tid
== VG_(get_running_tid
)());
762 DRD_(thread_set_vg_running_tid
)(tid
);
765 static void DRD_(fini
)(Int exitcode
)
767 // thread_print_all();
769 if ((VG_(clo_stats
) || s_print_stats
) && !VG_(clo_xml
))
771 ULong pu
= DRD_(thread_get_update_conflict_set_count
)();
772 ULong pu_seg_cr
= DRD_(thread_get_update_conflict_set_new_sg_count
)();
773 ULong pu_mtx_cv
= DRD_(thread_get_update_conflict_set_sync_count
)();
774 ULong pu_join
= DRD_(thread_get_update_conflict_set_join_count
)();
776 VG_(message
)(Vg_UserMsg
,
777 " thread: %llu context switches.\n",
778 DRD_(thread_get_context_switch_count
)());
779 VG_(message
)(Vg_UserMsg
,
780 "confl set: %llu full updates and %llu partial updates;\n",
781 DRD_(thread_get_compute_conflict_set_count
)(),
783 VG_(message
)(Vg_UserMsg
,
784 " %llu partial updates during segment creation,\n",
786 VG_(message
)(Vg_UserMsg
,
787 " %llu because of mutex/sema/cond.var. operations,\n",
789 VG_(message
)(Vg_UserMsg
,
790 " %llu because of barrier/rwlock operations and\n",
791 pu
- pu_seg_cr
- pu_mtx_cv
- pu_join
);
792 VG_(message
)(Vg_UserMsg
,
793 " %llu partial updates because of thread join"
796 VG_(message
)(Vg_UserMsg
,
797 " segments: created %llu segments, max %llu alive,\n",
798 DRD_(sg_get_segments_created_count
)(),
799 DRD_(sg_get_max_segments_alive_count
)());
800 VG_(message
)(Vg_UserMsg
,
801 " %llu discard points and %llu merges.\n",
802 DRD_(thread_get_discard_ordered_segments_count
)(),
803 DRD_(sg_get_segment_merge_count
)());
804 VG_(message
)(Vg_UserMsg
,
805 "segmnt cr: %llu mutex, %llu rwlock, %llu semaphore and"
807 DRD_(get_mutex_segment_creation_count
)(),
808 DRD_(get_rwlock_segment_creation_count
)(),
809 DRD_(get_semaphore_segment_creation_count
)(),
810 DRD_(get_barrier_segment_creation_count
)());
811 VG_(message
)(Vg_UserMsg
,
812 " bitmaps: %llu level one"
813 " and %llu level two bitmaps were allocated.\n",
814 DRD_(bm_get_bitmap_creation_count
)(),
815 DRD_(bm_get_bitmap2_creation_count
)());
816 VG_(message
)(Vg_UserMsg
,
817 " mutex: %llu non-recursive lock/unlock events.\n",
818 DRD_(get_mutex_lock_count
)());
819 DRD_(print_malloc_stats
)();
822 DRD_(bm_module_cleanup
)();
826 void drd_pre_clo_init(void)
829 VG_(details_name
) ("drd");
830 VG_(details_version
) (NULL
);
831 VG_(details_description
) ("a thread error detector");
832 VG_(details_copyright_author
)("Copyright (C) 2006-2020, and GNU GPL'd,"
833 " by Bart Van Assche.");
834 VG_(details_bug_reports_to
) (VG_BUGS_TO
);
836 VG_(basic_tool_funcs
) (DRD_(post_clo_init
),
840 // Command line stuff.
841 VG_(needs_command_line_options
)(DRD_(process_cmd_line_option
),
843 DRD_(print_debug_usage
));
844 VG_(needs_xml_output
) ();
847 DRD_(register_error_handlers
)();
849 // Core event tracking.
850 VG_(track_pre_mem_read
) (drd_pre_mem_read
);
851 VG_(track_pre_mem_read_asciiz
) (drd_pre_mem_read_asciiz
);
852 VG_(track_post_mem_write
) (drd_post_mem_write
);
853 VG_(track_new_mem_brk
) (drd_start_using_mem_w_tid
);
854 VG_(track_new_mem_mmap
) (drd_start_using_mem_w_perms
);
855 VG_(track_new_mem_stack
) (drd_start_using_mem_stack
);
856 VG_(track_new_mem_stack_signal
) (drd_start_using_mem_stack_signal
);
857 VG_(track_new_mem_startup
) (drd_start_using_mem_w_perms
);
858 VG_(track_die_mem_brk
) (drd_stop_using_nonstack_mem
);
859 VG_(track_die_mem_munmap
) (drd_stop_using_nonstack_mem
);
860 VG_(track_die_mem_stack
) (drd_stop_using_mem_stack
);
861 VG_(track_die_mem_stack_signal
) (drd_stop_using_mem_stack_signal
);
862 VG_(track_register_stack
) (drd_register_stack
);
863 VG_(track_pre_deliver_signal
) (drd_pre_deliver_signal
);
864 VG_(track_post_deliver_signal
) (drd_post_deliver_signal
);
865 VG_(track_start_client_code
) (drd_start_client_code
);
866 VG_(track_pre_thread_ll_create
) (drd_pre_thread_create
);
867 VG_(track_pre_thread_first_insn
)(drd_post_thread_create
);
868 VG_(track_pre_thread_ll_exit
) (drd_thread_finished
);
869 VG_(atfork
) (NULL
/*pre*/, NULL
/*parent*/,
870 drd__atfork_child
/*child*/);
873 DRD_(register_malloc_wrappers
)(drd_start_using_mem_w_ecu
,
874 drd_stop_using_nonstack_mem
);
876 DRD_(bm_module_init
)();
878 DRD_(clientreq_init
)();
880 DRD_(suppression_init
)();
882 DRD_(clientobj_init
)();
887 HChar
* const smi
= VG_(getenv
)("DRD_SEGMENT_MERGING_INTERVAL");
889 DRD_(thread_set_segment_merge_interval
)(VG_(strtoll10
)(smi
, NULL
));
892 if (VG_(getenv
)("DRD_VERIFY_CONFLICT_SET"))
893 DRD_(verify_conflict_set
) = True
;
898 VG_DETERMINE_INTERFACE_VERSION(drd_pre_clo_init
)