2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
21 The GNU General Public License is contained in the file COPYING.
25 #include "drd_error.h"
26 #include "drd_barrier.h"
27 #include "drd_clientobj.h"
29 #include "drd_mutex.h"
30 #include "drd_segment.h"
31 #include "drd_semaphore.h"
32 #include "drd_suppression.h"
33 #include "drd_thread.h"
34 #include "pub_tool_vki.h"
35 #include "pub_tool_basics.h" // Addr, SizeT
36 #include "pub_tool_libcassert.h" // tl_assert()
37 #include "pub_tool_libcbase.h" // VG_(strlen)()
38 #include "pub_tool_libcprint.h" // VG_(printf)()
39 #include "pub_tool_machine.h"
40 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
41 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
42 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
46 /* Local functions. */
48 static void thread_append_segment(const DrdThreadId tid
, Segment
* const sg
);
49 static void thread_discard_segment(const DrdThreadId tid
, Segment
* const sg
);
50 static void thread_compute_conflict_set(struct bitmap
** conflict_set
,
51 const DrdThreadId tid
);
52 static Bool
thread_conflict_set_up_to_date(const DrdThreadId tid
);
55 /* Local variables. */
57 static ULong s_context_switch_count
;
58 static ULong s_discard_ordered_segments_count
;
59 static ULong s_compute_conflict_set_count
;
60 static ULong s_update_conflict_set_count
;
61 static ULong s_update_conflict_set_new_sg_count
;
62 static ULong s_update_conflict_set_sync_count
;
63 static ULong s_update_conflict_set_join_count
;
64 static ULong s_conflict_set_bitmap_creation_count
;
65 static ULong s_conflict_set_bitmap2_creation_count
;
66 static ThreadId s_vg_running_tid
= VG_INVALID_THREADID
;
67 DrdThreadId
DRD_(g_drd_running_tid
) = DRD_INVALID_THREADID
;
68 ThreadInfo
DRD_(g_threadinfo
)[DRD_N_THREADS
];
69 struct bitmap
* DRD_(g_conflict_set
);
70 Bool
DRD_(verify_conflict_set
);
71 static Bool s_trace_context_switches
= False
;
72 static Bool s_trace_conflict_set
= False
;
73 static Bool s_trace_conflict_set_bm
= False
;
74 static Bool s_trace_fork_join
= False
;
75 static Bool s_segment_merging
= True
;
76 static Bool s_new_segments_since_last_merge
;
77 static int s_segment_merge_interval
= 10;
78 static unsigned s_join_list_vol
= 10;
79 static unsigned s_deletion_head
;
80 static unsigned s_deletion_tail
;
83 /* Function definitions. */
85 /** Enables/disables context switch tracing. */
86 void DRD_(thread_trace_context_switches
)(const Bool t
)
88 tl_assert(t
== False
|| t
== True
);
89 s_trace_context_switches
= t
;
92 /** Enables/disables conflict set tracing. */
93 void DRD_(thread_trace_conflict_set
)(const Bool t
)
95 tl_assert(t
== False
|| t
== True
);
96 s_trace_conflict_set
= t
;
99 /** Enables/disables conflict set bitmap tracing. */
100 void DRD_(thread_trace_conflict_set_bm
)(const Bool t
)
102 tl_assert(t
== False
|| t
== True
);
103 s_trace_conflict_set_bm
= t
;
106 /** Report whether fork/join tracing is enabled. */
107 Bool
DRD_(thread_get_trace_fork_join
)(void)
109 return s_trace_fork_join
;
112 /** Enables/disables fork/join tracing. */
113 void DRD_(thread_set_trace_fork_join
)(const Bool t
)
115 tl_assert(t
== False
|| t
== True
);
116 s_trace_fork_join
= t
;
119 /** Enables/disables segment merging. */
120 void DRD_(thread_set_segment_merging
)(const Bool m
)
122 tl_assert(m
== False
|| m
== True
);
123 s_segment_merging
= m
;
126 /** Get the segment merging interval. */
127 int DRD_(thread_get_segment_merge_interval
)(void)
129 return s_segment_merge_interval
;
132 /** Set the segment merging interval. */
133 void DRD_(thread_set_segment_merge_interval
)(const int i
)
135 s_segment_merge_interval
= i
;
138 void DRD_(thread_set_join_list_vol
)(const int jlv
)
140 s_join_list_vol
= jlv
;
143 void DRD_(thread_init
)(void)
148 * Convert Valgrind's ThreadId into a DrdThreadId.
150 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
151 * Valgrind ThreadId does not yet exist.
153 DrdThreadId
DRD_(VgThreadIdToDrdThreadId
)(const ThreadId tid
)
157 if (tid
== VG_INVALID_THREADID
)
158 return DRD_INVALID_THREADID
;
160 for (i
= 1; i
< DRD_N_THREADS
; i
++)
162 if (DRD_(g_threadinfo
)[i
].vg_thread_exists
== True
163 && DRD_(g_threadinfo
)[i
].vg_threadid
== tid
)
169 return DRD_INVALID_THREADID
;
172 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
173 static DrdThreadId
DRD_(VgThreadIdToNewDrdThreadId
)(const ThreadId tid
)
177 tl_assert(DRD_(VgThreadIdToDrdThreadId
)(tid
) == DRD_INVALID_THREADID
);
179 for (i
= 1; i
< DRD_N_THREADS
; i
++)
181 if (!DRD_(g_threadinfo
)[i
].valid
)
183 tl_assert(! DRD_(IsValidDrdThreadId
)(i
));
185 DRD_(g_threadinfo
)[i
].valid
= True
;
186 DRD_(g_threadinfo
)[i
].vg_thread_exists
= True
;
187 DRD_(g_threadinfo
)[i
].vg_threadid
= tid
;
188 DRD_(g_threadinfo
)[i
].pt_threadid
= INVALID_POSIX_THREADID
;
189 DRD_(g_threadinfo
)[i
].stack_min
= 0;
190 DRD_(g_threadinfo
)[i
].stack_min_min
= 0;
191 DRD_(g_threadinfo
)[i
].stack_startup
= 0;
192 DRD_(g_threadinfo
)[i
].stack_max
= 0;
193 DRD_(thread_set_name
)(i
, "");
194 DRD_(g_threadinfo
)[i
].on_alt_stack
= False
;
195 DRD_(g_threadinfo
)[i
].is_recording_loads
= True
;
196 DRD_(g_threadinfo
)[i
].is_recording_stores
= True
;
197 DRD_(g_threadinfo
)[i
].pthread_create_nesting_level
= 0;
198 DRD_(g_threadinfo
)[i
].synchr_nesting
= 0;
199 DRD_(g_threadinfo
)[i
].deletion_seq
= s_deletion_tail
- 1;
200 tl_assert(DRD_(g_threadinfo
)[i
].sg_first
== NULL
);
201 tl_assert(DRD_(g_threadinfo
)[i
].sg_last
== NULL
);
203 tl_assert(DRD_(IsValidDrdThreadId
)(i
));
210 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
215 return DRD_INVALID_THREADID
;
218 /** Convert a POSIX thread ID into a DRD thread ID. */
219 DrdThreadId
DRD_(PtThreadIdToDrdThreadId
)(const PThreadId tid
)
223 if (tid
!= INVALID_POSIX_THREADID
)
225 for (i
= 1; i
< DRD_N_THREADS
; i
++)
227 if (DRD_(g_threadinfo
)[i
].posix_thread_exists
228 && DRD_(g_threadinfo
)[i
].pt_threadid
== tid
)
234 return DRD_INVALID_THREADID
;
237 /** Convert a DRD thread ID into a Valgrind thread ID. */
238 ThreadId
DRD_(DrdThreadIdToVgThreadId
)(const DrdThreadId tid
)
240 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
241 && tid
!= DRD_INVALID_THREADID
);
243 return (DRD_(g_threadinfo
)[tid
].vg_thread_exists
244 ? DRD_(g_threadinfo
)[tid
].vg_threadid
245 : VG_INVALID_THREADID
);
248 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
250 * Sanity check of the doubly linked list of segments referenced by a
252 * @return True if sane, False if not.
254 static Bool
DRD_(sane_ThreadInfo
)(const ThreadInfo
* const ti
)
258 for (p
= ti
->sg_first
; p
; p
= p
->thr_next
) {
259 if (p
->thr_next
&& p
->thr_next
->thr_prev
!= p
)
261 if (p
->thr_next
== 0 && p
!= ti
->sg_last
)
264 for (p
= ti
->sg_last
; p
; p
= p
->thr_prev
) {
265 if (p
->thr_prev
&& p
->thr_prev
->thr_next
!= p
)
267 if (p
->thr_prev
== 0 && p
!= ti
->sg_first
)
275 * Create the first segment for a newly started thread.
277 * This function is called from the handler installed via
278 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
279 * from the context of the creator thread, before the new thread has been
282 * @param[in] creator DRD thread ID of the creator thread.
283 * @param[in] vg_created Valgrind thread ID of the created thread.
285 * @return DRD thread ID of the created thread.
287 DrdThreadId
DRD_(thread_pre_create
)(const DrdThreadId creator
,
288 const ThreadId vg_created
)
292 tl_assert(DRD_(VgThreadIdToDrdThreadId
)(vg_created
) == DRD_INVALID_THREADID
);
293 created
= DRD_(VgThreadIdToNewDrdThreadId
)(vg_created
);
294 tl_assert(0 <= (int)created
&& created
< DRD_N_THREADS
295 && created
!= DRD_INVALID_THREADID
);
297 tl_assert(DRD_(g_threadinfo
)[created
].sg_first
== NULL
);
298 tl_assert(DRD_(g_threadinfo
)[created
].sg_last
== NULL
);
299 /* Create an initial segment for the newly created thread. */
300 thread_append_segment(created
, DRD_(sg_new
)(creator
, created
));
306 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
307 * after the thread has been created and before any client instructions are run
308 * on the newly created thread, e.g. from the handler installed via
309 * VG_(track_pre_thread_first_insn)().
311 * @param[in] vg_created Valgrind thread ID of the newly created thread.
313 * @return DRD thread ID for the new thread.
315 DrdThreadId
DRD_(thread_post_create
)(const ThreadId vg_created
)
317 const DrdThreadId created
= DRD_(VgThreadIdToDrdThreadId
)(vg_created
);
319 tl_assert(0 <= (int)created
&& created
< DRD_N_THREADS
320 && created
!= DRD_INVALID_THREADID
);
322 DRD_(g_threadinfo
)[created
].stack_max
323 = VG_(thread_get_stack_max
)(vg_created
);
324 DRD_(g_threadinfo
)[created
].stack_startup
325 = DRD_(g_threadinfo
)[created
].stack_max
;
326 DRD_(g_threadinfo
)[created
].stack_min
327 = DRD_(g_threadinfo
)[created
].stack_max
;
328 DRD_(g_threadinfo
)[created
].stack_min_min
329 = DRD_(g_threadinfo
)[created
].stack_max
;
330 DRD_(g_threadinfo
)[created
].stack_size
331 = VG_(thread_get_stack_size
)(vg_created
);
332 tl_assert(DRD_(g_threadinfo
)[created
].stack_max
!= 0);
337 static void DRD_(thread_delayed_delete
)(const DrdThreadId tid
)
341 DRD_(g_threadinfo
)[tid
].vg_thread_exists
= False
;
342 DRD_(g_threadinfo
)[tid
].posix_thread_exists
= False
;
343 DRD_(g_threadinfo
)[tid
].deletion_seq
= s_deletion_head
++;
345 VG_(message
)(Vg_DebugMsg
, "Adding thread %d to the deletion list\n", tid
);
347 if (s_deletion_head
- s_deletion_tail
>= s_join_list_vol
) {
348 for (j
= 0; j
< DRD_N_THREADS
; ++j
) {
349 if (DRD_(IsValidDrdThreadId
)(j
)
350 && DRD_(g_threadinfo
)[j
].deletion_seq
== s_deletion_tail
)
354 VG_(message
)(Vg_DebugMsg
, "Delayed delete of thread %d\n", j
);
356 DRD_(thread_delete
)(j
, False
);
364 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
365 * after thread drd_joiner joined thread drd_joinee.
367 void DRD_(thread_post_join
)(DrdThreadId drd_joiner
, DrdThreadId drd_joinee
)
369 tl_assert(DRD_(IsValidDrdThreadId
)(drd_joiner
));
370 tl_assert(DRD_(IsValidDrdThreadId
)(drd_joinee
));
372 DRD_(thread_new_segment
)(drd_joiner
);
373 DRD_(thread_combine_vc_join
)(drd_joiner
, drd_joinee
);
374 DRD_(thread_new_segment
)(drd_joinee
);
376 if (s_trace_fork_join
)
378 const ThreadId joiner
= DRD_(DrdThreadIdToVgThreadId
)(drd_joiner
);
379 const unsigned msg_size
= 256;
382 msg
= VG_(malloc
)("drd.main.dptj.1", msg_size
);
384 VG_(snprintf
)(msg
, msg_size
,
385 "drd_post_thread_join joiner = %d, joinee = %d",
386 drd_joiner
, drd_joinee
);
391 vc
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(drd_joiner
));
392 VG_(snprintf
)(msg
+ VG_(strlen
)(msg
), msg_size
- VG_(strlen
)(msg
),
396 DRD_(trace_msg
)("%pS", msg
);
400 if (! DRD_(get_check_stack_accesses
)())
402 DRD_(finish_suppression
)(DRD_(thread_get_stack_max
)(drd_joinee
)
403 - DRD_(thread_get_stack_size
)(drd_joinee
),
404 DRD_(thread_get_stack_max
)(drd_joinee
));
406 DRD_(clientobj_delete_thread
)(drd_joinee
);
407 DRD_(thread_delayed_delete
)(drd_joinee
);
411 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
412 * and accesses this data structure from multiple threads without locking.
413 * Any conflicting accesses in the range stack_startup..stack_max will be
416 void DRD_(thread_set_stack_startup
)(const DrdThreadId tid
,
417 const Addr stack_startup
)
419 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
420 && tid
!= DRD_INVALID_THREADID
);
421 tl_assert(DRD_(g_threadinfo
)[tid
].stack_min
<= stack_startup
);
422 tl_assert(stack_startup
<= DRD_(g_threadinfo
)[tid
].stack_max
);
423 DRD_(g_threadinfo
)[tid
].stack_startup
= stack_startup
;
426 /** Return the stack pointer for the specified thread. */
427 Addr
DRD_(thread_get_stack_min
)(const DrdThreadId tid
)
429 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
430 && tid
!= DRD_INVALID_THREADID
);
431 return DRD_(g_threadinfo
)[tid
].stack_min
;
435 * Return the lowest value that was ever assigned to the stack pointer
436 * for the specified thread.
438 Addr
DRD_(thread_get_stack_min_min
)(const DrdThreadId tid
)
440 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
441 && tid
!= DRD_INVALID_THREADID
);
442 return DRD_(g_threadinfo
)[tid
].stack_min_min
;
445 /** Return the top address for the stack of the specified thread. */
446 Addr
DRD_(thread_get_stack_max
)(const DrdThreadId tid
)
448 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
449 && tid
!= DRD_INVALID_THREADID
);
450 return DRD_(g_threadinfo
)[tid
].stack_max
;
453 /** Return the maximum stack size for the specified thread. */
454 SizeT
DRD_(thread_get_stack_size
)(const DrdThreadId tid
)
456 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
457 && tid
!= DRD_INVALID_THREADID
);
458 return DRD_(g_threadinfo
)[tid
].stack_size
;
461 Bool
DRD_(thread_get_on_alt_stack
)(const DrdThreadId tid
)
463 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
464 && tid
!= DRD_INVALID_THREADID
);
465 return DRD_(g_threadinfo
)[tid
].on_alt_stack
;
468 void DRD_(thread_set_on_alt_stack
)(const DrdThreadId tid
,
469 const Bool on_alt_stack
)
471 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
472 && tid
!= DRD_INVALID_THREADID
);
473 tl_assert(on_alt_stack
== !!on_alt_stack
);
474 DRD_(g_threadinfo
)[tid
].on_alt_stack
= on_alt_stack
;
477 Int
DRD_(thread_get_threads_on_alt_stack
)(void)
481 for (i
= 1; i
< DRD_N_THREADS
; i
++)
482 n
+= DRD_(g_threadinfo
)[i
].on_alt_stack
;
487 * Clean up thread-specific data structures.
489 void DRD_(thread_delete
)(const DrdThreadId tid
, const Bool detached
)
494 tl_assert(DRD_(IsValidDrdThreadId
)(tid
));
496 tl_assert(DRD_(g_threadinfo
)[tid
].synchr_nesting
>= 0);
497 for (sg
= DRD_(g_threadinfo
)[tid
].sg_last
; sg
; sg
= sg_prev
) {
498 sg_prev
= sg
->thr_prev
;
503 DRD_(g_threadinfo
)[tid
].valid
= False
;
504 DRD_(g_threadinfo
)[tid
].vg_thread_exists
= False
;
505 DRD_(g_threadinfo
)[tid
].posix_thread_exists
= False
;
507 DRD_(g_threadinfo
)[tid
].detached_posix_thread
= False
;
509 tl_assert(!DRD_(g_threadinfo
)[tid
].detached_posix_thread
);
510 DRD_(g_threadinfo
)[tid
].sg_first
= NULL
;
511 DRD_(g_threadinfo
)[tid
].sg_last
= NULL
;
513 tl_assert(!DRD_(IsValidDrdThreadId
)(tid
));
517 * Called after a thread performed its last memory access and before
518 * thread_delete() is called. Note: thread_delete() is only called for
519 * joinable threads, not for detached threads.
521 void DRD_(thread_finished
)(const DrdThreadId tid
)
523 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
524 && tid
!= DRD_INVALID_THREADID
);
526 DRD_(g_threadinfo
)[tid
].vg_thread_exists
= False
;
528 if (DRD_(g_threadinfo
)[tid
].detached_posix_thread
)
531 * Once a detached thread has finished, its stack is deallocated and
532 * should no longer be taken into account when computing the conflict set.
534 DRD_(g_threadinfo
)[tid
].stack_min
= DRD_(g_threadinfo
)[tid
].stack_max
;
537 * For a detached thread, calling pthread_exit() invalidates the
538 * POSIX thread ID associated with the detached thread. For joinable
539 * POSIX threads however, the POSIX thread ID remains live after the
540 * pthread_exit() call until pthread_join() is called.
542 DRD_(g_threadinfo
)[tid
].posix_thread_exists
= False
;
546 /** Called just after fork() in the child process. */
547 void DRD_(drd_thread_atfork_child
)(const DrdThreadId tid
)
551 for (i
= 1; i
< DRD_N_THREADS
; i
++)
555 if (DRD_(IsValidDrdThreadId(i
)))
556 DRD_(thread_delete
)(i
, True
);
557 tl_assert(!DRD_(IsValidDrdThreadId(i
)));
560 DRD_(bm_cleanup
)(DRD_(g_conflict_set
));
561 DRD_(bm_init
)(DRD_(g_conflict_set
));
564 /** Called just before pthread_cancel(). */
565 void DRD_(thread_pre_cancel
)(const DrdThreadId tid
)
567 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
568 && tid
!= DRD_INVALID_THREADID
);
569 tl_assert(DRD_(g_threadinfo
)[tid
].pt_threadid
!= INVALID_POSIX_THREADID
);
571 if (DRD_(thread_get_trace_fork_join
)())
572 DRD_(trace_msg
)("[%d] drd_thread_pre_cancel %d",
573 DRD_(g_drd_running_tid
), tid
);
577 * Store the POSIX thread ID for the specified thread.
579 * @note This function can be called two times for the same thread -- see also
580 * the comment block preceding the pthread_create() wrapper in
581 * drd_pthread_intercepts.c.
583 void DRD_(thread_set_pthreadid
)(const DrdThreadId tid
, const PThreadId ptid
)
585 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
586 && tid
!= DRD_INVALID_THREADID
);
587 tl_assert(DRD_(g_threadinfo
)[tid
].pt_threadid
== INVALID_POSIX_THREADID
588 || DRD_(g_threadinfo
)[tid
].pt_threadid
== ptid
);
589 tl_assert(ptid
!= INVALID_POSIX_THREADID
);
590 DRD_(g_threadinfo
)[tid
].posix_thread_exists
= True
;
591 DRD_(g_threadinfo
)[tid
].pt_threadid
= ptid
;
594 /** Returns true for joinable threads and false for detached threads. */
595 Bool
DRD_(thread_get_joinable
)(const DrdThreadId tid
)
597 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
598 && tid
!= DRD_INVALID_THREADID
);
599 return ! DRD_(g_threadinfo
)[tid
].detached_posix_thread
;
602 /** Store the thread mode: joinable or detached. */
603 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
604 /* There is a cse related issue in gcc for MIPS. Optimization level
605 has to be lowered, so cse related optimizations are not
607 __attribute__((optimize("O1")))
609 void DRD_(thread_set_joinable
)(const DrdThreadId tid
, const Bool joinable
)
611 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
612 && tid
!= DRD_INVALID_THREADID
);
613 tl_assert(!! joinable
== joinable
);
614 tl_assert(DRD_(g_threadinfo
)[tid
].pt_threadid
!= INVALID_POSIX_THREADID
);
616 DRD_(g_threadinfo
)[tid
].detached_posix_thread
= ! joinable
;
619 /** Tells DRD that the calling thread is about to enter pthread_create(). */
620 void DRD_(thread_entering_pthread_create
)(const DrdThreadId tid
)
622 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
623 && tid
!= DRD_INVALID_THREADID
);
624 tl_assert(DRD_(g_threadinfo
)[tid
].pt_threadid
!= INVALID_POSIX_THREADID
);
625 tl_assert(DRD_(g_threadinfo
)[tid
].pthread_create_nesting_level
>= 0);
627 DRD_(g_threadinfo
)[tid
].pthread_create_nesting_level
++;
630 /** Tells DRD that the calling thread has left pthread_create(). */
631 void DRD_(thread_left_pthread_create
)(const DrdThreadId tid
)
633 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
634 && tid
!= DRD_INVALID_THREADID
);
635 tl_assert(DRD_(g_threadinfo
)[tid
].pt_threadid
!= INVALID_POSIX_THREADID
);
636 tl_assert(DRD_(g_threadinfo
)[tid
].pthread_create_nesting_level
> 0);
638 DRD_(g_threadinfo
)[tid
].pthread_create_nesting_level
--;
641 /** Obtain the thread number and the user-assigned thread name. */
642 const HChar
* DRD_(thread_get_name
)(const DrdThreadId tid
)
644 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
645 && tid
!= DRD_INVALID_THREADID
);
647 return DRD_(g_threadinfo
)[tid
].name
;
650 /** Set the name of the specified thread. */
651 void DRD_(thread_set_name
)(const DrdThreadId tid
, const HChar
* const name
)
653 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
654 && tid
!= DRD_INVALID_THREADID
);
656 if (name
== NULL
|| name
[0] == 0)
657 VG_(snprintf
)(DRD_(g_threadinfo
)[tid
].name
,
658 sizeof(DRD_(g_threadinfo
)[tid
].name
),
662 VG_(snprintf
)(DRD_(g_threadinfo
)[tid
].name
,
663 sizeof(DRD_(g_threadinfo
)[tid
].name
),
666 DRD_(g_threadinfo
)[tid
].name
[sizeof(DRD_(g_threadinfo
)[tid
].name
) - 1] = 0;
670 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
673 void DRD_(thread_set_vg_running_tid
)(const ThreadId vg_tid
)
675 tl_assert(vg_tid
!= VG_INVALID_THREADID
);
677 if (vg_tid
!= s_vg_running_tid
)
679 DRD_(thread_set_running_tid
)(vg_tid
,
680 DRD_(VgThreadIdToDrdThreadId
)(vg_tid
));
683 tl_assert(s_vg_running_tid
!= VG_INVALID_THREADID
);
684 tl_assert(DRD_(g_drd_running_tid
) != DRD_INVALID_THREADID
);
688 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
691 void DRD_(thread_set_running_tid
)(const ThreadId vg_tid
,
692 const DrdThreadId drd_tid
)
694 tl_assert(vg_tid
!= VG_INVALID_THREADID
);
695 tl_assert(drd_tid
!= DRD_INVALID_THREADID
);
697 if (vg_tid
!= s_vg_running_tid
)
699 if (s_trace_context_switches
700 && DRD_(g_drd_running_tid
) != DRD_INVALID_THREADID
)
702 VG_(message
)(Vg_DebugMsg
,
703 "Context switch from thread %d to thread %d;"
705 DRD_(g_drd_running_tid
), drd_tid
,
706 DRD_(sg_get_segments_alive_count
)());
708 s_vg_running_tid
= vg_tid
;
709 DRD_(g_drd_running_tid
) = drd_tid
;
710 thread_compute_conflict_set(&DRD_(g_conflict_set
), drd_tid
);
711 s_context_switch_count
++;
714 tl_assert(s_vg_running_tid
!= VG_INVALID_THREADID
);
715 tl_assert(DRD_(g_drd_running_tid
) != DRD_INVALID_THREADID
);
719 * Increase the synchronization nesting counter. Must be called before the
720 * client calls a synchronization function.
722 int DRD_(thread_enter_synchr
)(const DrdThreadId tid
)
724 tl_assert(DRD_(IsValidDrdThreadId
)(tid
));
725 return DRD_(g_threadinfo
)[tid
].synchr_nesting
++;
729 * Decrease the synchronization nesting counter. Must be called after the
730 * client left a synchronization function.
732 int DRD_(thread_leave_synchr
)(const DrdThreadId tid
)
734 tl_assert(DRD_(IsValidDrdThreadId
)(tid
));
735 tl_assert(DRD_(g_threadinfo
)[tid
].synchr_nesting
>= 1);
736 return --DRD_(g_threadinfo
)[tid
].synchr_nesting
;
739 /** Returns the synchronization nesting counter. */
740 int DRD_(thread_get_synchr_nesting_count
)(const DrdThreadId tid
)
742 tl_assert(DRD_(IsValidDrdThreadId
)(tid
));
743 return DRD_(g_threadinfo
)[tid
].synchr_nesting
;
746 /** Append a new segment at the end of the segment list. */
748 void thread_append_segment(const DrdThreadId tid
, Segment
* const sg
)
750 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
751 && tid
!= DRD_INVALID_THREADID
);
753 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
754 tl_assert(DRD_(sane_ThreadInfo
)(&DRD_(g_threadinfo
)[tid
]));
758 sg
->thr_prev
= DRD_(g_threadinfo
)[tid
].sg_last
;
760 if (DRD_(g_threadinfo
)[tid
].sg_last
)
761 DRD_(g_threadinfo
)[tid
].sg_last
->thr_next
= sg
;
762 DRD_(g_threadinfo
)[tid
].sg_last
= sg
;
763 if (DRD_(g_threadinfo
)[tid
].sg_first
== NULL
)
764 DRD_(g_threadinfo
)[tid
].sg_first
= sg
;
766 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
767 tl_assert(DRD_(sane_ThreadInfo
)(&DRD_(g_threadinfo
)[tid
]));
772 * Remove a segment from the segment list of thread threadid, and free the
776 void thread_discard_segment(const DrdThreadId tid
, Segment
* const sg
)
778 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
779 && tid
!= DRD_INVALID_THREADID
);
781 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
782 tl_assert(DRD_(sane_ThreadInfo
)(&DRD_(g_threadinfo
)[tid
]));
786 sg
->thr_prev
->thr_next
= sg
->thr_next
;
788 sg
->thr_next
->thr_prev
= sg
->thr_prev
;
789 if (sg
== DRD_(g_threadinfo
)[tid
].sg_first
)
790 DRD_(g_threadinfo
)[tid
].sg_first
= sg
->thr_next
;
791 if (sg
== DRD_(g_threadinfo
)[tid
].sg_last
)
792 DRD_(g_threadinfo
)[tid
].sg_last
= sg
->thr_prev
;
795 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
796 tl_assert(DRD_(sane_ThreadInfo
)(&DRD_(g_threadinfo
)[tid
]));
801 * Returns a pointer to the vector clock of the most recent segment associated
804 VectorClock
* DRD_(thread_get_vc
)(const DrdThreadId tid
)
808 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
809 && tid
!= DRD_INVALID_THREADID
);
810 latest_sg
= DRD_(g_threadinfo
)[tid
].sg_last
;
811 tl_assert(latest_sg
);
812 return &latest_sg
->vc
;
816 * Return the latest segment of thread 'tid' and increment its reference count.
818 void DRD_(thread_get_latest_segment
)(Segment
** sg
, const DrdThreadId tid
)
823 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
824 && tid
!= DRD_INVALID_THREADID
);
825 latest_sg
= DRD_(g_threadinfo
)[tid
].sg_last
;
826 tl_assert(latest_sg
);
829 *sg
= DRD_(sg_get
)(latest_sg
);
833 * Compute the minimum of all latest vector clocks of all threads
834 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
836 * @param vc pointer to a vectorclock, holds result upon return.
838 static void DRD_(thread_compute_minimum_vc
)(VectorClock
* vc
)
845 for (i
= 0; i
< DRD_N_THREADS
; i
++)
847 latest_sg
= DRD_(g_threadinfo
)[i
].sg_last
;
850 DRD_(vc_assign
)(vc
, &latest_sg
->vc
);
852 DRD_(vc_min
)(vc
, &latest_sg
->vc
);
859 * Compute the maximum of all latest vector clocks of all threads.
861 * @param vc pointer to a vectorclock, holds result upon return.
863 static void DRD_(thread_compute_maximum_vc
)(VectorClock
* vc
)
870 for (i
= 0; i
< DRD_N_THREADS
; i
++)
872 latest_sg
= DRD_(g_threadinfo
)[i
].sg_last
;
875 DRD_(vc_assign
)(vc
, &latest_sg
->vc
);
877 DRD_(vc_combine
)(vc
, &latest_sg
->vc
);
884 * Discard all segments that have a defined order against the latest vector
885 * clock of all threads -- these segments can no longer be involved in a
888 static void thread_discard_ordered_segments(void)
891 VectorClock thread_vc_min
;
893 s_discard_ordered_segments_count
++;
895 DRD_(vc_init
)(&thread_vc_min
, 0, 0);
896 DRD_(thread_compute_minimum_vc
)(&thread_vc_min
);
897 if (DRD_(sg_get_trace
)())
899 HChar
*vc_min
, *vc_max
;
900 VectorClock thread_vc_max
;
902 DRD_(vc_init
)(&thread_vc_max
, 0, 0);
903 DRD_(thread_compute_maximum_vc
)(&thread_vc_max
);
904 vc_min
= DRD_(vc_aprint
)(&thread_vc_min
);
905 vc_max
= DRD_(vc_aprint
)(&thread_vc_max
);
906 VG_(message
)(Vg_DebugMsg
,
907 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
911 DRD_(vc_cleanup
)(&thread_vc_max
);
914 for (i
= 0; i
< DRD_N_THREADS
; i
++) {
918 for (sg
= DRD_(g_threadinfo
)[i
].sg_first
;
919 sg
&& (sg_next
= sg
->thr_next
)
920 && DRD_(vc_lte
)(&sg
->vc
, &thread_vc_min
);
923 thread_discard_segment(i
, sg
);
926 DRD_(vc_cleanup
)(&thread_vc_min
);
930 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
931 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
932 * all segments in the set CS are ordered consistently against both sg1 and
933 * sg2. The set CS is defined as the set of segments that can immediately
934 * precede future segments via inter-thread synchronization operations. In
935 * DRD the set CS consists of the latest segment of each thread combined with
936 * all segments for which the reference count is strictly greater than one.
937 * The code below is an optimized version of the following:
939 * for (i = 0; i < DRD_N_THREADS; i++)
943 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
945 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
947 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
948 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
949 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
950 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
958 static Bool
thread_consistent_segment_ordering(const DrdThreadId tid
,
964 tl_assert(sg1
->thr_next
);
965 tl_assert(sg2
->thr_next
);
966 tl_assert(sg1
->thr_next
== sg2
);
967 tl_assert(DRD_(vc_lte
)(&sg1
->vc
, &sg2
->vc
));
969 for (i
= 0; i
< DRD_N_THREADS
; i
++)
973 for (sg
= DRD_(g_threadinfo
)[i
].sg_first
; sg
; sg
= sg
->thr_next
) {
974 if (!sg
->thr_next
|| DRD_(sg_get_refcnt
)(sg
) > 1) {
975 if (DRD_(vc_lte
)(&sg2
->vc
, &sg
->vc
))
977 if (DRD_(vc_lte
)(&sg1
->vc
, &sg
->vc
))
981 for (sg
= DRD_(g_threadinfo
)[i
].sg_last
; sg
; sg
= sg
->thr_prev
) {
982 if (!sg
->thr_next
|| DRD_(sg_get_refcnt
)(sg
) > 1) {
983 if (DRD_(vc_lte
)(&sg
->vc
, &sg1
->vc
))
985 if (DRD_(vc_lte
)(&sg
->vc
, &sg2
->vc
))
994 * Merge all segments that may be merged without triggering false positives
995 * or discarding real data races. For the theoretical background of segment
996 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
997 * and Koen De Bosschere. Bounding the number of segment histories during
998 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
999 * pp 1221-1238, September 2002. This paper contains a proof that merging
1000 * consecutive segments for which the property equiv(s1,s2) holds can be
1001 * merged without reducing the accuracy of datarace detection. Furthermore
1002 * it is also proven that the total number of all segments will never grow
1003 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
1004 * every time a new segment is created. The property equiv(s1, s2) is defined
1005 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
1006 * clocks of segments s and s1 are ordered in the same way as those of segments
1007 * s and s2. The set CS is defined as the set of existing segments s that have
1008 * the potential to conflict with not yet created segments, either because the
1009 * segment s is the latest segment of a thread or because it can become the
1010 * immediate predecessor of a new segment due to a synchronization operation.
1012 static void thread_merge_segments(void)
1016 s_new_segments_since_last_merge
= 0;
1018 for (i
= 0; i
< DRD_N_THREADS
; i
++)
1022 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1023 tl_assert(DRD_(sane_ThreadInfo
)(&DRD_(g_threadinfo
)[i
]));
1026 for (sg
= DRD_(g_threadinfo
)[i
].sg_first
; sg
; sg
= sg
->thr_next
) {
1027 if (DRD_(sg_get_refcnt
)(sg
) == 1 && sg
->thr_next
) {
1028 Segment
* const sg_next
= sg
->thr_next
;
1029 if (DRD_(sg_get_refcnt
)(sg_next
) == 1
1030 && sg_next
->thr_next
1031 && thread_consistent_segment_ordering(i
, sg
, sg_next
))
1033 /* Merge sg and sg_next into sg. */
1034 DRD_(sg_merge
)(sg
, sg_next
);
1035 thread_discard_segment(i
, sg_next
);
1040 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1041 tl_assert(DRD_(sane_ThreadInfo
)(&DRD_(g_threadinfo
)[i
]));
1047 * Create a new segment for the specified thread, and discard any segments
1048 * that cannot cause races anymore.
1050 void DRD_(thread_new_segment
)(const DrdThreadId tid
)
1055 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1056 && tid
!= DRD_INVALID_THREADID
);
1057 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid
)));
1059 last_sg
= DRD_(g_threadinfo
)[tid
].sg_last
;
1060 new_sg
= DRD_(sg_new
)(tid
, tid
);
1061 thread_append_segment(tid
, new_sg
);
1062 if (tid
== DRD_(g_drd_running_tid
) && last_sg
)
1064 DRD_(thread_update_conflict_set
)(tid
, &last_sg
->vc
);
1065 s_update_conflict_set_new_sg_count
++;
1068 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid
)));
1070 if (s_segment_merging
1071 && ++s_new_segments_since_last_merge
>= s_segment_merge_interval
)
1073 thread_discard_ordered_segments();
1074 thread_merge_segments();
1078 /** Call this function after thread 'joiner' joined thread 'joinee'. */
1079 void DRD_(thread_combine_vc_join
)(DrdThreadId joiner
, DrdThreadId joinee
)
1081 tl_assert(joiner
!= joinee
);
1082 tl_assert(0 <= (int)joiner
&& joiner
< DRD_N_THREADS
1083 && joiner
!= DRD_INVALID_THREADID
);
1084 tl_assert(0 <= (int)joinee
&& joinee
< DRD_N_THREADS
1085 && joinee
!= DRD_INVALID_THREADID
);
1086 tl_assert(DRD_(g_threadinfo
)[joiner
].sg_first
);
1087 tl_assert(DRD_(g_threadinfo
)[joiner
].sg_last
);
1088 tl_assert(DRD_(g_threadinfo
)[joinee
].sg_first
);
1089 tl_assert(DRD_(g_threadinfo
)[joinee
].sg_last
);
1091 if (DRD_(sg_get_trace
)())
1094 str1
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(joiner
));
1095 str2
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(joinee
));
1096 VG_(message
)(Vg_DebugMsg
, "Before join: joiner %s, joinee %s\n",
1101 if (joiner
== DRD_(g_drd_running_tid
)) {
1104 DRD_(vc_copy
)(&old_vc
, DRD_(thread_get_vc
)(joiner
));
1105 DRD_(vc_combine
)(DRD_(thread_get_vc
)(joiner
),
1106 DRD_(thread_get_vc
)(joinee
));
1107 DRD_(thread_update_conflict_set
)(joiner
, &old_vc
);
1108 s_update_conflict_set_join_count
++;
1109 DRD_(vc_cleanup
)(&old_vc
);
1111 DRD_(vc_combine
)(DRD_(thread_get_vc
)(joiner
),
1112 DRD_(thread_get_vc
)(joinee
));
1115 thread_discard_ordered_segments();
1117 if (DRD_(sg_get_trace
)()) {
1120 str
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(joiner
));
1121 VG_(message
)(Vg_DebugMsg
, "After join: %s\n", str
);
1127 * Update the vector clock of the last segment of thread tid with the
1128 * the vector clock of segment sg.
1130 static void thread_combine_vc_sync(DrdThreadId tid
, const Segment
* sg
)
1132 const VectorClock
* const vc
= &sg
->vc
;
1134 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1135 && tid
!= DRD_INVALID_THREADID
);
1136 tl_assert(DRD_(g_threadinfo
)[tid
].sg_first
);
1137 tl_assert(DRD_(g_threadinfo
)[tid
].sg_last
);
1141 if (tid
!= sg
->tid
) {
1144 DRD_(vc_copy
)(&old_vc
, DRD_(thread_get_vc
)(tid
));
1145 DRD_(vc_combine
)(DRD_(thread_get_vc
)(tid
), vc
);
1146 if (DRD_(sg_get_trace
)()) {
1148 str1
= DRD_(vc_aprint
)(&old_vc
);
1149 str2
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(tid
));
1150 VG_(message
)(Vg_DebugMsg
, "thread %d: vc %s -> %s\n", tid
, str1
, str2
);
1155 thread_discard_ordered_segments();
1157 DRD_(thread_update_conflict_set
)(tid
, &old_vc
);
1158 s_update_conflict_set_sync_count
++;
1160 DRD_(vc_cleanup
)(&old_vc
);
1162 tl_assert(DRD_(vc_lte
)(vc
, DRD_(thread_get_vc
)(tid
)));
1167 * Create a new segment for thread tid and update the vector clock of the last
1168 * segment of this thread with the the vector clock of segment sg. Call this
1169 * function after thread tid had to wait because of thread synchronization
1170 * until the memory accesses in the segment sg finished.
1172 void DRD_(thread_new_segment_and_combine_vc
)(DrdThreadId tid
, const Segment
* sg
)
1174 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1175 && tid
!= DRD_INVALID_THREADID
);
1176 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid
)));
1179 thread_append_segment(tid
, DRD_(sg_new
)(tid
, tid
));
1181 thread_combine_vc_sync(tid
, sg
);
1183 if (s_segment_merging
1184 && ++s_new_segments_since_last_merge
>= s_segment_merge_interval
)
1186 thread_discard_ordered_segments();
1187 thread_merge_segments();
1192 * Call this function whenever a thread is no longer using the memory
1193 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1196 void DRD_(thread_stop_using_mem
)(const Addr a1
, const Addr a2
)
1200 for (p
= DRD_(g_sg_list
); p
; p
= p
->g_next
)
1201 DRD_(bm_clear
)(DRD_(sg_bm
)(p
), a1
, a2
);
1203 DRD_(bm_clear
)(DRD_(g_conflict_set
), a1
, a2
);
1206 /** Specify whether memory loads should be recorded. */
1207 void DRD_(thread_set_record_loads
)(const DrdThreadId tid
, const Bool enabled
)
1209 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1210 && tid
!= DRD_INVALID_THREADID
);
1211 tl_assert(enabled
== !! enabled
);
1213 DRD_(g_threadinfo
)[tid
].is_recording_loads
= enabled
;
1216 /** Specify whether memory stores should be recorded. */
1217 void DRD_(thread_set_record_stores
)(const DrdThreadId tid
, const Bool enabled
)
1219 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1220 && tid
!= DRD_INVALID_THREADID
);
1221 tl_assert(enabled
== !! enabled
);
1223 DRD_(g_threadinfo
)[tid
].is_recording_stores
= enabled
;
1227 * Print the segment information for all threads.
1229 * This function is only used for debugging purposes.
1231 void DRD_(thread_print_all
)(void)
1236 for (i
= 0; i
< DRD_N_THREADS
; i
++)
1238 p
= DRD_(g_threadinfo
)[i
].sg_first
;
1240 VG_(printf
)("**************\n"
1241 "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1244 DRD_(g_threadinfo
)[i
].valid
,
1245 DRD_(g_threadinfo
)[i
].vg_thread_exists
,
1246 DRD_(g_threadinfo
)[i
].vg_threadid
,
1247 DRD_(g_threadinfo
)[i
].posix_thread_exists
,
1248 DRD_(g_threadinfo
)[i
].pt_threadid
,
1249 DRD_(g_threadinfo
)[i
].detached_posix_thread
);
1250 for ( ; p
; p
= p
->thr_next
)
1256 /** Show a call stack involved in a data race. */
1257 static void show_call_stack(const DrdThreadId tid
, ExeContext
* const callstack
)
1259 const ThreadId vg_tid
= DRD_(DrdThreadIdToVgThreadId
)(tid
);
1261 if (vg_tid
!= VG_INVALID_THREADID
) {
1263 VG_(pp_ExeContext
)(callstack
);
1265 VG_(get_and_pp_StackTrace
)(vg_tid
, VG_(clo_backtrace_size
));
1268 VG_(message
)(Vg_UserMsg
,
1269 " (thread finished, call stack no longer available)\n");
1273 /** Print information about the segments involved in a data race. */
1275 thread_report_conflicting_segments_segment(const DrdThreadId tid
,
1278 const BmAccessTypeT access_type
,
1279 const Segment
* const p
)
1283 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1284 && tid
!= DRD_INVALID_THREADID
);
1287 for (i
= 0; i
< DRD_N_THREADS
; i
++) {
1291 for (q
= DRD_(g_threadinfo
)[i
].sg_last
; q
; q
= q
->thr_prev
) {
1293 * Since q iterates over the segments of thread i in order of
1294 * decreasing vector clocks, if q->vc <= p->vc, then
1295 * q->next->vc <= p->vc will also hold. Hence, break out of the
1296 * loop once this condition is met.
1298 if (DRD_(vc_lte
)(&q
->vc
, &p
->vc
))
1300 if (!DRD_(vc_lte
)(&p
->vc
, &q
->vc
)) {
1301 if (DRD_(bm_has_conflict_with
)(DRD_(sg_bm
)(q
), addr
, addr
+ size
,
1305 tl_assert(q
->stacktrace
);
1307 VG_(printf_xml
)(" <other_segment_start>\n");
1309 VG_(message
)(Vg_UserMsg
,
1310 "Other segment start (thread %d)\n", i
);
1311 show_call_stack(i
, q
->stacktrace
);
1313 VG_(printf_xml
)(" </other_segment_start>\n"
1314 " <other_segment_end>\n");
1316 VG_(message
)(Vg_UserMsg
,
1317 "Other segment end (thread %d)\n", i
);
1318 q_next
= q
->thr_next
;
1319 show_call_stack(i
, q_next
? q_next
->stacktrace
: 0);
1321 VG_(printf_xml
)(" </other_segment_end>\n");
1329 /** Print information about all segments involved in a data race. */
1330 void DRD_(thread_report_conflicting_segments
)(const DrdThreadId tid
,
1333 const BmAccessTypeT access_type
)
1337 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1338 && tid
!= DRD_INVALID_THREADID
);
1340 for (p
= DRD_(g_threadinfo
)[tid
].sg_first
; p
; p
= p
->thr_next
) {
1341 if (DRD_(bm_has
)(DRD_(sg_bm
)(p
), addr
, addr
+ size
, access_type
))
1342 thread_report_conflicting_segments_segment(tid
, addr
, size
,
1348 * Verify whether the conflict set for thread tid is up to date. Only perform
1349 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1351 static Bool
thread_conflict_set_up_to_date(const DrdThreadId tid
)
1354 struct bitmap
* computed_conflict_set
= 0;
1356 if (!DRD_(verify_conflict_set
))
1359 thread_compute_conflict_set(&computed_conflict_set
, tid
);
1360 result
= DRD_(bm_equal
)(DRD_(g_conflict_set
), computed_conflict_set
);
1363 VG_(printf
)("actual conflict set:\n");
1364 DRD_(bm_print
)(DRD_(g_conflict_set
));
1366 VG_(printf
)("computed conflict set:\n");
1367 DRD_(bm_print
)(computed_conflict_set
);
1370 DRD_(bm_delete
)(computed_conflict_set
);
1375 * Compute the conflict set: a bitmap that represents the union of all memory
1376 * accesses of all segments that are unordered to the current segment of the
1379 static void thread_compute_conflict_set(struct bitmap
** conflict_set
,
1380 const DrdThreadId tid
)
1384 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1385 && tid
!= DRD_INVALID_THREADID
);
1386 tl_assert(tid
== DRD_(g_drd_running_tid
));
1388 s_compute_conflict_set_count
++;
1389 s_conflict_set_bitmap_creation_count
1390 -= DRD_(bm_get_bitmap_creation_count
)();
1391 s_conflict_set_bitmap2_creation_count
1392 -= DRD_(bm_get_bitmap2_creation_count
)();
1394 if (*conflict_set
) {
1395 DRD_(bm_cleanup
)(*conflict_set
);
1396 DRD_(bm_init
)(*conflict_set
);
1398 *conflict_set
= DRD_(bm_new
)();
1401 if (s_trace_conflict_set
) {
1404 str
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(tid
));
1405 VG_(message
)(Vg_DebugMsg
,
1406 "computing conflict set for thread %d with vc %s\n",
1411 p
= DRD_(g_threadinfo
)[tid
].sg_last
;
1415 if (s_trace_conflict_set
) {
1418 vc
= DRD_(vc_aprint
)(&p
->vc
);
1419 VG_(message
)(Vg_DebugMsg
, "conflict set: thread [%d] at vc %s\n",
1424 for (j
= 0; j
< DRD_N_THREADS
; j
++) {
1425 if (j
!= tid
&& DRD_(IsValidDrdThreadId
)(j
)) {
1428 for (q
= DRD_(g_threadinfo
)[j
].sg_last
; q
; q
= q
->thr_prev
) {
1429 if (!DRD_(vc_lte
)(&q
->vc
, &p
->vc
)
1430 && !DRD_(vc_lte
)(&p
->vc
, &q
->vc
)) {
1431 if (s_trace_conflict_set
) {
1434 str
= DRD_(vc_aprint
)(&q
->vc
);
1435 VG_(message
)(Vg_DebugMsg
,
1436 "conflict set: [%d] merging segment %s\n",
1440 DRD_(bm_merge2
)(*conflict_set
, DRD_(sg_bm
)(q
));
1442 if (s_trace_conflict_set
) {
1445 str
= DRD_(vc_aprint
)(&q
->vc
);
1446 VG_(message
)(Vg_DebugMsg
,
1447 "conflict set: [%d] ignoring segment %s\n",
1457 s_conflict_set_bitmap_creation_count
1458 += DRD_(bm_get_bitmap_creation_count
)();
1459 s_conflict_set_bitmap2_creation_count
1460 += DRD_(bm_get_bitmap2_creation_count
)();
1462 if (s_trace_conflict_set_bm
) {
1463 VG_(message
)(Vg_DebugMsg
, "[%d] new conflict set:\n", tid
);
1464 DRD_(bm_print
)(*conflict_set
);
1465 VG_(message
)(Vg_DebugMsg
, "[%d] end of new conflict set.\n", tid
);
1470 * Update the conflict set after the vector clock of thread tid has been
1471 * updated from old_vc to its current value, either because a new segment has
1472 * been created or because of a synchronization operation.
1474 void DRD_(thread_update_conflict_set
)(const DrdThreadId tid
,
1475 const VectorClock
* const old_vc
)
1477 const VectorClock
* new_vc
;
1481 tl_assert(0 <= (int)tid
&& tid
< DRD_N_THREADS
1482 && tid
!= DRD_INVALID_THREADID
);
1484 tl_assert(tid
== DRD_(g_drd_running_tid
));
1485 tl_assert(DRD_(g_conflict_set
));
1487 if (s_trace_conflict_set
) {
1490 str
= DRD_(vc_aprint
)(DRD_(thread_get_vc
)(tid
));
1491 VG_(message
)(Vg_DebugMsg
,
1492 "updating conflict set for thread %d with vc %s\n",
1497 new_vc
= DRD_(thread_get_vc
)(tid
);
1498 tl_assert(DRD_(vc_lte
)(old_vc
, new_vc
));
1500 DRD_(bm_unmark
)(DRD_(g_conflict_set
));
1502 for (j
= 0; j
< DRD_N_THREADS
; j
++)
1506 if (j
== tid
|| ! DRD_(IsValidDrdThreadId
)(j
))
1509 for (q
= DRD_(g_threadinfo
)[j
].sg_last
;
1510 q
&& !DRD_(vc_lte
)(&q
->vc
, new_vc
);
1512 const Bool included_in_old_conflict_set
1513 = !DRD_(vc_lte
)(old_vc
, &q
->vc
);
1514 const Bool included_in_new_conflict_set
1515 = !DRD_(vc_lte
)(new_vc
, &q
->vc
);
1517 if (UNLIKELY(s_trace_conflict_set
)) {
1520 str
= DRD_(vc_aprint
)(&q
->vc
);
1521 VG_(message
)(Vg_DebugMsg
,
1522 "conflict set: [%d] %s segment %s\n", j
,
1523 included_in_old_conflict_set
1524 != included_in_new_conflict_set
1525 ? "merging" : "ignoring", str
);
1528 if (included_in_old_conflict_set
!= included_in_new_conflict_set
)
1529 DRD_(bm_mark
)(DRD_(g_conflict_set
), DRD_(sg_bm
)(q
));
1532 for ( ; q
&& !DRD_(vc_lte
)(&q
->vc
, old_vc
); q
= q
->thr_prev
) {
1533 const Bool included_in_old_conflict_set
1534 = !DRD_(vc_lte
)(old_vc
, &q
->vc
);
1535 const Bool included_in_new_conflict_set
1536 = !DRD_(vc_lte
)(&q
->vc
, new_vc
)
1537 && !DRD_(vc_lte
)(new_vc
, &q
->vc
);
1539 if (UNLIKELY(s_trace_conflict_set
)) {
1542 str
= DRD_(vc_aprint
)(&q
->vc
);
1543 VG_(message
)(Vg_DebugMsg
,
1544 "conflict set: [%d] %s segment %s\n", j
,
1545 included_in_old_conflict_set
1546 != included_in_new_conflict_set
1547 ? "merging" : "ignoring", str
);
1550 if (included_in_old_conflict_set
!= included_in_new_conflict_set
)
1551 DRD_(bm_mark
)(DRD_(g_conflict_set
), DRD_(sg_bm
)(q
));
1555 DRD_(bm_clear_marked
)(DRD_(g_conflict_set
));
1557 p
= DRD_(g_threadinfo
)[tid
].sg_last
;
1558 for (j
= 0; j
< DRD_N_THREADS
; j
++) {
1559 if (j
!= tid
&& DRD_(IsValidDrdThreadId
)(j
)) {
1561 for (q
= DRD_(g_threadinfo
)[j
].sg_last
;
1562 q
&& !DRD_(vc_lte
)(&q
->vc
, &p
->vc
);
1564 if (!DRD_(vc_lte
)(&p
->vc
, &q
->vc
))
1565 DRD_(bm_merge2_marked
)(DRD_(g_conflict_set
), DRD_(sg_bm
)(q
));
1570 DRD_(bm_remove_cleared_marked
)(DRD_(g_conflict_set
));
1572 s_update_conflict_set_count
++;
1574 if (s_trace_conflict_set_bm
)
1576 VG_(message
)(Vg_DebugMsg
, "[%d] updated conflict set:\n", tid
);
1577 DRD_(bm_print
)(DRD_(g_conflict_set
));
1578 VG_(message
)(Vg_DebugMsg
, "[%d] end of updated conflict set.\n", tid
);
1581 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid
)));
1584 /** Report the number of context switches performed. */
1585 ULong
DRD_(thread_get_context_switch_count
)(void)
1587 return s_context_switch_count
;
1590 /** Report the number of ordered segments that have been discarded. */
1591 ULong
DRD_(thread_get_discard_ordered_segments_count
)(void)
1593 return s_discard_ordered_segments_count
;
1596 /** Return how many times the conflict set has been updated entirely. */
1597 ULong
DRD_(thread_get_compute_conflict_set_count
)()
1599 return s_compute_conflict_set_count
;
1602 /** Return how many times the conflict set has been updated partially. */
1603 ULong
DRD_(thread_get_update_conflict_set_count
)(void)
1605 return s_update_conflict_set_count
;
1609 * Return how many times the conflict set has been updated partially
1610 * because a new segment has been created.
1612 ULong
DRD_(thread_get_update_conflict_set_new_sg_count
)(void)
1614 return s_update_conflict_set_new_sg_count
;
1618 * Return how many times the conflict set has been updated partially
1619 * because of combining vector clocks due to synchronization operations
1620 * other than reader/writer lock or barrier operations.
1622 ULong
DRD_(thread_get_update_conflict_set_sync_count
)(void)
1624 return s_update_conflict_set_sync_count
;
1628 * Return how many times the conflict set has been updated partially
1629 * because of thread joins.
1631 ULong
DRD_(thread_get_update_conflict_set_join_count
)(void)
1633 return s_update_conflict_set_join_count
;
1637 * Return the number of first-level bitmaps that have been created during
1638 * conflict set updates.
1640 ULong
DRD_(thread_get_conflict_set_bitmap_creation_count
)(void)
1642 return s_conflict_set_bitmap_creation_count
;
1646 * Return the number of second-level bitmaps that have been created during
1647 * conflict set updates.
1649 ULong
DRD_(thread_get_conflict_set_bitmap2_creation_count
)(void)
1651 return s_conflict_set_bitmap2_creation_count
;