drd: Add command-line option --verify-conflict-set
[valgrind.git] / drd / drd_thread.c
blobd9cb0a9d2bca4e492b4a6055c3edb65db859b538
1 /*
2 This file is part of drd, a thread error detector.
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
21 The GNU General Public License is contained in the file COPYING.
25 #include "drd_error.h"
26 #include "drd_barrier.h"
27 #include "drd_clientobj.h"
28 #include "drd_cond.h"
29 #include "drd_mutex.h"
30 #include "drd_segment.h"
31 #include "drd_semaphore.h"
32 #include "drd_suppression.h"
33 #include "drd_thread.h"
34 #include "pub_tool_vki.h"
35 #include "pub_tool_basics.h" // Addr, SizeT
36 #include "pub_tool_libcassert.h" // tl_assert()
37 #include "pub_tool_libcbase.h" // VG_(strlen)()
38 #include "pub_tool_libcprint.h" // VG_(printf)()
39 #include "pub_tool_libcproc.h" // VG_(getenv)()
40 #include "pub_tool_machine.h"
41 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
42 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
43 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
47 /* Local functions. */
49 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
50 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
51 static void thread_compute_conflict_set(struct bitmap** conflict_set,
52 const DrdThreadId tid);
53 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
56 /* Local variables. */
58 static ULong s_context_switch_count;
59 static ULong s_discard_ordered_segments_count;
60 static ULong s_compute_conflict_set_count;
61 static ULong s_update_conflict_set_count;
62 static ULong s_update_conflict_set_new_sg_count;
63 static ULong s_update_conflict_set_sync_count;
64 static ULong s_update_conflict_set_join_count;
65 static ULong s_conflict_set_bitmap_creation_count;
66 static ULong s_conflict_set_bitmap2_creation_count;
67 static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
68 DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
69 ThreadInfo DRD_(g_threadinfo)[DRD_N_THREADS];
70 struct bitmap* DRD_(g_conflict_set);
71 int DRD_(verify_conflict_set) = -1;
72 static Bool s_trace_context_switches = False;
73 static Bool s_trace_conflict_set = False;
74 static Bool s_trace_conflict_set_bm = False;
75 static Bool s_trace_fork_join = False;
76 static Bool s_segment_merging = True;
77 static Bool s_new_segments_since_last_merge;
78 static int s_segment_merge_interval = 10;
79 static unsigned s_join_list_vol = 10;
80 static unsigned s_deletion_head;
81 static unsigned s_deletion_tail;
84 /* Function definitions. */
86 /** Enables/disables context switch tracing. */
87 void DRD_(thread_trace_context_switches)(const Bool t)
89 tl_assert(t == False || t == True);
90 s_trace_context_switches = t;
93 /** Enables/disables conflict set tracing. */
94 void DRD_(thread_trace_conflict_set)(const Bool t)
96 tl_assert(t == False || t == True);
97 s_trace_conflict_set = t;
100 /** Enables/disables conflict set bitmap tracing. */
101 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
103 tl_assert(t == False || t == True);
104 s_trace_conflict_set_bm = t;
107 /** Report whether fork/join tracing is enabled. */
108 Bool DRD_(thread_get_trace_fork_join)(void)
110 return s_trace_fork_join;
113 /** Enables/disables fork/join tracing. */
114 void DRD_(thread_set_trace_fork_join)(const Bool t)
116 tl_assert(t == False || t == True);
117 s_trace_fork_join = t;
120 /** Enables/disables segment merging. */
121 void DRD_(thread_set_segment_merging)(const Bool m)
123 tl_assert(m == False || m == True);
124 s_segment_merging = m;
127 /** Get the segment merging interval. */
128 int DRD_(thread_get_segment_merge_interval)(void)
130 return s_segment_merge_interval;
133 /** Set the segment merging interval. */
134 void DRD_(thread_set_segment_merge_interval)(const int i)
136 s_segment_merge_interval = i;
139 void DRD_(thread_set_join_list_vol)(const int jlv)
141 s_join_list_vol = jlv;
144 void DRD_(thread_init)(void)
149 * Convert Valgrind's ThreadId into a DrdThreadId.
151 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
152 * Valgrind ThreadId does not yet exist.
154 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
156 int i;
158 if (tid == VG_INVALID_THREADID)
159 return DRD_INVALID_THREADID;
161 for (i = 1; i < DRD_N_THREADS; i++)
163 if (DRD_(g_threadinfo)[i].vg_thread_exists == True
164 && DRD_(g_threadinfo)[i].vg_threadid == tid)
166 return i;
170 return DRD_INVALID_THREADID;
173 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
174 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
176 int i;
178 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
180 for (i = 1; i < DRD_N_THREADS; i++)
182 if (!DRD_(g_threadinfo)[i].valid)
184 tl_assert(! DRD_(IsValidDrdThreadId)(i));
186 DRD_(g_threadinfo)[i].valid = True;
187 DRD_(g_threadinfo)[i].vg_thread_exists = True;
188 DRD_(g_threadinfo)[i].vg_threadid = tid;
189 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID;
190 DRD_(g_threadinfo)[i].stack_min = 0;
191 DRD_(g_threadinfo)[i].stack_min_min = 0;
192 DRD_(g_threadinfo)[i].stack_startup = 0;
193 DRD_(g_threadinfo)[i].stack_max = 0;
194 DRD_(thread_set_name)(i, "");
195 DRD_(g_threadinfo)[i].on_alt_stack = False;
196 DRD_(g_threadinfo)[i].is_recording_loads = True;
197 DRD_(g_threadinfo)[i].is_recording_stores = True;
198 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
199 DRD_(g_threadinfo)[i].synchr_nesting = 0;
200 DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
201 tl_assert(DRD_(g_threadinfo)[i].sg_first == NULL);
202 tl_assert(DRD_(g_threadinfo)[i].sg_last == NULL);
204 tl_assert(DRD_(IsValidDrdThreadId)(i));
206 return i;
210 VG_(printf)(
211 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
212 "Aborting.\n");
214 tl_assert(False);
216 return DRD_INVALID_THREADID;
219 /** Convert a POSIX thread ID into a DRD thread ID. */
220 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
222 int i;
224 if (tid != INVALID_POSIX_THREADID)
226 for (i = 1; i < DRD_N_THREADS; i++)
228 if (DRD_(g_threadinfo)[i].posix_thread_exists
229 && DRD_(g_threadinfo)[i].pt_threadid == tid)
231 return i;
235 return DRD_INVALID_THREADID;
238 /** Convert a DRD thread ID into a Valgrind thread ID. */
239 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
241 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
242 && tid != DRD_INVALID_THREADID);
244 return (DRD_(g_threadinfo)[tid].vg_thread_exists
245 ? DRD_(g_threadinfo)[tid].vg_threadid
246 : VG_INVALID_THREADID);
249 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
251 * Sanity check of the doubly linked list of segments referenced by a
252 * ThreadInfo struct.
253 * @return True if sane, False if not.
255 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
257 Segment* p;
259 for (p = ti->sg_first; p; p = p->thr_next) {
260 if (p->thr_next && p->thr_next->thr_prev != p)
261 return False;
262 if (p->thr_next == 0 && p != ti->sg_last)
263 return False;
265 for (p = ti->sg_last; p; p = p->thr_prev) {
266 if (p->thr_prev && p->thr_prev->thr_next != p)
267 return False;
268 if (p->thr_prev == 0 && p != ti->sg_first)
269 return False;
271 return True;
273 #endif
276 * Create the first segment for a newly started thread.
278 * This function is called from the handler installed via
279 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
280 * from the context of the creator thread, before the new thread has been
281 * created.
283 * @param[in] creator DRD thread ID of the creator thread.
284 * @param[in] vg_created Valgrind thread ID of the created thread.
286 * @return DRD thread ID of the created thread.
288 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
289 const ThreadId vg_created)
291 DrdThreadId created;
293 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
294 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
295 tl_assert(0 <= (int)created && created < DRD_N_THREADS
296 && created != DRD_INVALID_THREADID);
298 tl_assert(DRD_(g_threadinfo)[created].sg_first == NULL);
299 tl_assert(DRD_(g_threadinfo)[created].sg_last == NULL);
300 /* Create an initial segment for the newly created thread. */
301 thread_append_segment(created, DRD_(sg_new)(creator, created));
303 return created;
307 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
308 * after the thread has been created and before any client instructions are run
309 * on the newly created thread, e.g. from the handler installed via
310 * VG_(track_pre_thread_first_insn)().
312 * @param[in] vg_created Valgrind thread ID of the newly created thread.
314 * @return DRD thread ID for the new thread.
316 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
318 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
320 tl_assert(0 <= (int)created && created < DRD_N_THREADS
321 && created != DRD_INVALID_THREADID);
323 DRD_(g_threadinfo)[created].stack_max
324 = VG_(thread_get_stack_max)(vg_created);
325 DRD_(g_threadinfo)[created].stack_startup
326 = DRD_(g_threadinfo)[created].stack_max;
327 DRD_(g_threadinfo)[created].stack_min
328 = DRD_(g_threadinfo)[created].stack_max;
329 DRD_(g_threadinfo)[created].stack_min_min
330 = DRD_(g_threadinfo)[created].stack_max;
331 DRD_(g_threadinfo)[created].stack_size
332 = VG_(thread_get_stack_size)(vg_created);
333 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
335 return created;
338 static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
340 int j;
342 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
343 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
344 DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
345 #if 0
346 VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
347 #endif
348 if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
349 for (j = 0; j < DRD_N_THREADS; ++j) {
350 if (DRD_(IsValidDrdThreadId)(j)
351 && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
353 s_deletion_tail++;
354 #if 0
355 VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
356 #endif
357 DRD_(thread_delete)(j, False);
358 break;
365 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
366 * after thread drd_joiner joined thread drd_joinee.
368 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
370 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
371 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
373 DRD_(thread_new_segment)(drd_joiner);
374 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
375 DRD_(thread_new_segment)(drd_joinee);
377 if (s_trace_fork_join)
379 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
380 const unsigned msg_size = 256;
381 HChar* msg;
383 msg = VG_(malloc)("drd.main.dptj.1", msg_size);
384 tl_assert(msg);
385 VG_(snprintf)(msg, msg_size,
386 "drd_post_thread_join joiner = %d, joinee = %d",
387 drd_joiner, drd_joinee);
388 if (joiner)
390 HChar* vc;
392 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
393 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
394 ", new vc: %s", vc);
395 VG_(free)(vc);
397 DRD_(trace_msg)("%pS", msg);
398 VG_(free)(msg);
401 if (! DRD_(get_check_stack_accesses)())
403 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
404 - DRD_(thread_get_stack_size)(drd_joinee),
405 DRD_(thread_get_stack_max)(drd_joinee));
407 DRD_(clientobj_delete_thread)(drd_joinee);
408 DRD_(thread_delayed_delete)(drd_joinee);
412 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
413 * and accesses this data structure from multiple threads without locking.
414 * Any conflicting accesses in the range stack_startup..stack_max will be
415 * ignored.
417 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
418 const Addr stack_startup)
420 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
421 && tid != DRD_INVALID_THREADID);
422 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
423 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
424 DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
427 /** Return the stack pointer for the specified thread. */
428 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
430 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
431 && tid != DRD_INVALID_THREADID);
432 return DRD_(g_threadinfo)[tid].stack_min;
436 * Return the lowest value that was ever assigned to the stack pointer
437 * for the specified thread.
439 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
441 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
442 && tid != DRD_INVALID_THREADID);
443 return DRD_(g_threadinfo)[tid].stack_min_min;
446 /** Return the top address for the stack of the specified thread. */
447 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
449 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
450 && tid != DRD_INVALID_THREADID);
451 return DRD_(g_threadinfo)[tid].stack_max;
454 /** Return the maximum stack size for the specified thread. */
455 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
457 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
458 && tid != DRD_INVALID_THREADID);
459 return DRD_(g_threadinfo)[tid].stack_size;
462 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
464 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
465 && tid != DRD_INVALID_THREADID);
466 return DRD_(g_threadinfo)[tid].on_alt_stack;
469 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
470 const Bool on_alt_stack)
472 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
473 && tid != DRD_INVALID_THREADID);
474 tl_assert(on_alt_stack == !!on_alt_stack);
475 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
478 Int DRD_(thread_get_threads_on_alt_stack)(void)
480 int i, n = 0;
482 for (i = 1; i < DRD_N_THREADS; i++)
483 n += DRD_(g_threadinfo)[i].on_alt_stack;
484 return n;
488 * Clean up thread-specific data structures.
490 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
492 Segment* sg;
493 Segment* sg_prev;
495 tl_assert(DRD_(IsValidDrdThreadId)(tid));
497 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
498 for (sg = DRD_(g_threadinfo)[tid].sg_last; sg; sg = sg_prev) {
499 sg_prev = sg->thr_prev;
500 sg->thr_next = NULL;
501 sg->thr_prev = NULL;
502 DRD_(sg_put)(sg);
504 DRD_(g_threadinfo)[tid].valid = False;
505 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
506 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
507 if (detached)
508 DRD_(g_threadinfo)[tid].detached_posix_thread = False;
509 else
510 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
511 DRD_(g_threadinfo)[tid].sg_first = NULL;
512 DRD_(g_threadinfo)[tid].sg_last = NULL;
514 tl_assert(!DRD_(IsValidDrdThreadId)(tid));
518 * Called after a thread performed its last memory access and before
519 * thread_delete() is called. Note: thread_delete() is only called for
520 * joinable threads, not for detached threads.
522 void DRD_(thread_finished)(const DrdThreadId tid)
524 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
525 && tid != DRD_INVALID_THREADID);
527 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
529 if (DRD_(g_threadinfo)[tid].detached_posix_thread)
532 * Once a detached thread has finished, its stack is deallocated and
533 * should no longer be taken into account when computing the conflict set.
535 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
538 * For a detached thread, calling pthread_exit() invalidates the
539 * POSIX thread ID associated with the detached thread. For joinable
540 * POSIX threads however, the POSIX thread ID remains live after the
541 * pthread_exit() call until pthread_join() is called.
543 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
547 /** Called just after fork() in the child process. */
548 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
550 unsigned i;
552 for (i = 1; i < DRD_N_THREADS; i++)
554 if (i == tid)
555 continue;
556 if (DRD_(IsValidDrdThreadId(i)))
557 DRD_(thread_delete)(i, True);
558 tl_assert(!DRD_(IsValidDrdThreadId(i)));
562 /** Called just before pthread_cancel(). */
563 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
565 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
566 && tid != DRD_INVALID_THREADID);
567 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
569 if (DRD_(thread_get_trace_fork_join)())
570 DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
571 DRD_(g_drd_running_tid), tid);
575 * Store the POSIX thread ID for the specified thread.
577 * @note This function can be called two times for the same thread -- see also
578 * the comment block preceding the pthread_create() wrapper in
579 * drd_pthread_intercepts.c.
581 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
583 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
584 && tid != DRD_INVALID_THREADID);
585 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
586 || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
587 tl_assert(ptid != INVALID_POSIX_THREADID);
588 DRD_(g_threadinfo)[tid].posix_thread_exists = True;
589 DRD_(g_threadinfo)[tid].pt_threadid = ptid;
592 /** Returns true for joinable threads and false for detached threads. */
593 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
595 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
596 && tid != DRD_INVALID_THREADID);
597 return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
600 /** Store the thread mode: joinable or detached. */
601 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
602 /* There is a cse related issue in gcc for MIPS. Optimization level
603 has to be lowered, so cse related optimizations are not
604 included.*/
605 __attribute__((optimize("O1")))
606 #endif
607 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
609 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
610 && tid != DRD_INVALID_THREADID);
611 tl_assert(!! joinable == joinable);
612 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
614 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
617 /** Tells DRD that the calling thread is about to enter pthread_create(). */
618 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
620 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
621 && tid != DRD_INVALID_THREADID);
622 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
623 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
625 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
628 /** Tells DRD that the calling thread has left pthread_create(). */
629 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
631 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
632 && tid != DRD_INVALID_THREADID);
633 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
634 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
636 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
639 /** Obtain the thread number and the user-assigned thread name. */
640 const HChar* DRD_(thread_get_name)(const DrdThreadId tid)
642 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
643 && tid != DRD_INVALID_THREADID);
645 return DRD_(g_threadinfo)[tid].name;
648 /** Set the name of the specified thread. */
649 void DRD_(thread_set_name)(const DrdThreadId tid, const HChar* const name)
651 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
652 && tid != DRD_INVALID_THREADID);
654 if (name == NULL || name[0] == 0)
655 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
656 sizeof(DRD_(g_threadinfo)[tid].name),
657 "Thread %d",
658 tid);
659 else
660 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
661 sizeof(DRD_(g_threadinfo)[tid].name),
662 "Thread %d (%s)",
663 tid, name);
664 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
668 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
669 * conflict set.
671 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
673 tl_assert(vg_tid != VG_INVALID_THREADID);
675 if (vg_tid != s_vg_running_tid)
677 DRD_(thread_set_running_tid)(vg_tid,
678 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
681 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
682 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
686 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
687 * conflict set.
689 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
690 const DrdThreadId drd_tid)
692 tl_assert(vg_tid != VG_INVALID_THREADID);
693 tl_assert(drd_tid != DRD_INVALID_THREADID);
695 if (vg_tid != s_vg_running_tid)
697 if (s_trace_context_switches
698 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
700 VG_(message)(Vg_DebugMsg,
701 "Context switch from thread %d to thread %d;"
702 " segments: %llu\n",
703 DRD_(g_drd_running_tid), drd_tid,
704 DRD_(sg_get_segments_alive_count)());
706 s_vg_running_tid = vg_tid;
707 DRD_(g_drd_running_tid) = drd_tid;
708 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
709 s_context_switch_count++;
712 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
713 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
717 * Increase the synchronization nesting counter. Must be called before the
718 * client calls a synchronization function.
720 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
722 tl_assert(DRD_(IsValidDrdThreadId)(tid));
723 return DRD_(g_threadinfo)[tid].synchr_nesting++;
727 * Decrease the synchronization nesting counter. Must be called after the
728 * client left a synchronization function.
730 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
732 tl_assert(DRD_(IsValidDrdThreadId)(tid));
733 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
734 return --DRD_(g_threadinfo)[tid].synchr_nesting;
737 /** Returns the synchronization nesting counter. */
738 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
740 tl_assert(DRD_(IsValidDrdThreadId)(tid));
741 return DRD_(g_threadinfo)[tid].synchr_nesting;
744 /** Append a new segment at the end of the segment list. */
745 static
746 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
748 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
749 && tid != DRD_INVALID_THREADID);
751 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
752 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
753 #endif
755 // add at tail
756 sg->thr_prev = DRD_(g_threadinfo)[tid].sg_last;
757 sg->thr_next = NULL;
758 if (DRD_(g_threadinfo)[tid].sg_last)
759 DRD_(g_threadinfo)[tid].sg_last->thr_next = sg;
760 DRD_(g_threadinfo)[tid].sg_last = sg;
761 if (DRD_(g_threadinfo)[tid].sg_first == NULL)
762 DRD_(g_threadinfo)[tid].sg_first = sg;
764 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
765 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
766 #endif
770 * Remove a segment from the segment list of thread threadid, and free the
771 * associated memory.
773 static
774 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
776 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
777 && tid != DRD_INVALID_THREADID);
779 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
780 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
781 #endif
783 if (sg->thr_prev)
784 sg->thr_prev->thr_next = sg->thr_next;
785 if (sg->thr_next)
786 sg->thr_next->thr_prev = sg->thr_prev;
787 if (sg == DRD_(g_threadinfo)[tid].sg_first)
788 DRD_(g_threadinfo)[tid].sg_first = sg->thr_next;
789 if (sg == DRD_(g_threadinfo)[tid].sg_last)
790 DRD_(g_threadinfo)[tid].sg_last = sg->thr_prev;
791 DRD_(sg_put)(sg);
793 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
794 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
795 #endif
799 * Returns a pointer to the vector clock of the most recent segment associated
800 * with thread 'tid'.
802 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
804 Segment* latest_sg;
806 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
807 && tid != DRD_INVALID_THREADID);
808 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
809 tl_assert(latest_sg);
810 return &latest_sg->vc;
814 * Return the latest segment of thread 'tid' and increment its reference count.
816 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
818 Segment* latest_sg;
820 tl_assert(sg);
821 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
822 && tid != DRD_INVALID_THREADID);
823 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
824 tl_assert(latest_sg);
826 DRD_(sg_put)(*sg);
827 *sg = DRD_(sg_get)(latest_sg);
831 * Compute the minimum of all latest vector clocks of all threads
832 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
834 * @param vc pointer to a vectorclock, holds result upon return.
836 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
838 unsigned i;
839 Bool first;
840 Segment* latest_sg;
842 first = True;
843 for (i = 0; i < DRD_N_THREADS; i++)
845 latest_sg = DRD_(g_threadinfo)[i].sg_last;
846 if (latest_sg) {
847 if (first)
848 DRD_(vc_assign)(vc, &latest_sg->vc);
849 else
850 DRD_(vc_min)(vc, &latest_sg->vc);
851 first = False;
857 * Compute the maximum of all latest vector clocks of all threads.
859 * @param vc pointer to a vectorclock, holds result upon return.
861 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
863 unsigned i;
864 Bool first;
865 Segment* latest_sg;
867 first = True;
868 for (i = 0; i < DRD_N_THREADS; i++)
870 latest_sg = DRD_(g_threadinfo)[i].sg_last;
871 if (latest_sg) {
872 if (first)
873 DRD_(vc_assign)(vc, &latest_sg->vc);
874 else
875 DRD_(vc_combine)(vc, &latest_sg->vc);
876 first = False;
882 * Discard all segments that have a defined order against the latest vector
883 * clock of all threads -- these segments can no longer be involved in a
884 * data race.
886 static void thread_discard_ordered_segments(void)
888 unsigned i;
889 VectorClock thread_vc_min;
891 s_discard_ordered_segments_count++;
893 DRD_(vc_init)(&thread_vc_min, 0, 0);
894 DRD_(thread_compute_minimum_vc)(&thread_vc_min);
895 if (DRD_(sg_get_trace)())
897 HChar *vc_min, *vc_max;
898 VectorClock thread_vc_max;
900 DRD_(vc_init)(&thread_vc_max, 0, 0);
901 DRD_(thread_compute_maximum_vc)(&thread_vc_max);
902 vc_min = DRD_(vc_aprint)(&thread_vc_min);
903 vc_max = DRD_(vc_aprint)(&thread_vc_max);
904 VG_(message)(Vg_DebugMsg,
905 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
906 vc_min, vc_max);
907 VG_(free)(vc_min);
908 VG_(free)(vc_max);
909 DRD_(vc_cleanup)(&thread_vc_max);
912 for (i = 0; i < DRD_N_THREADS; i++) {
913 Segment* sg;
914 Segment* sg_next;
916 for (sg = DRD_(g_threadinfo)[i].sg_first;
917 sg && (sg_next = sg->thr_next)
918 && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
919 sg = sg_next)
921 thread_discard_segment(i, sg);
924 DRD_(vc_cleanup)(&thread_vc_min);
928 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
929 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
930 * all segments in the set CS are ordered consistently against both sg1 and
931 * sg2. The set CS is defined as the set of segments that can immediately
932 * precede future segments via inter-thread synchronization operations. In
933 * DRD the set CS consists of the latest segment of each thread combined with
934 * all segments for which the reference count is strictly greater than one.
935 * The code below is an optimized version of the following:
937 * for (i = 0; i < DRD_N_THREADS; i++)
939 * Segment* sg;
941 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
943 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
945 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
946 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
947 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
948 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
950 * return False;
956 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
957 Segment* const sg1,
958 Segment* const sg2)
960 unsigned i;
962 tl_assert(sg1->thr_next);
963 tl_assert(sg2->thr_next);
964 tl_assert(sg1->thr_next == sg2);
965 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
967 for (i = 0; i < DRD_N_THREADS; i++)
969 Segment* sg;
971 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
972 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
973 if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
974 break;
975 if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
976 return False;
979 for (sg = DRD_(g_threadinfo)[i].sg_last; sg; sg = sg->thr_prev) {
980 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
981 if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
982 break;
983 if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
984 return False;
988 return True;
992 * Merge all segments that may be merged without triggering false positives
993 * or discarding real data races. For the theoretical background of segment
994 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
995 * and Koen De Bosschere. Bounding the number of segment histories during
996 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
997 * pp 1221-1238, September 2002. This paper contains a proof that merging
998 * consecutive segments for which the property equiv(s1,s2) holds can be
999 * merged without reducing the accuracy of datarace detection. Furthermore
1000 * it is also proven that the total number of all segments will never grow
1001 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
1002 * every time a new segment is created. The property equiv(s1, s2) is defined
1003 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
1004 * clocks of segments s and s1 are ordered in the same way as those of segments
1005 * s and s2. The set CS is defined as the set of existing segments s that have
1006 * the potential to conflict with not yet created segments, either because the
1007 * segment s is the latest segment of a thread or because it can become the
1008 * immediate predecessor of a new segment due to a synchronization operation.
1010 static void thread_merge_segments(void)
1012 unsigned i;
1014 s_new_segments_since_last_merge = 0;
1016 for (i = 0; i < DRD_N_THREADS; i++)
1018 Segment* sg;
1020 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1021 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1022 #endif
1024 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1025 if (DRD_(sg_get_refcnt)(sg) == 1 && sg->thr_next) {
1026 Segment* const sg_next = sg->thr_next;
1027 if (DRD_(sg_get_refcnt)(sg_next) == 1
1028 && sg_next->thr_next
1029 && thread_consistent_segment_ordering(i, sg, sg_next))
1031 /* Merge sg and sg_next into sg. */
1032 DRD_(sg_merge)(sg, sg_next);
1033 thread_discard_segment(i, sg_next);
1038 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1039 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1040 #endif
1045 * Create a new segment for the specified thread, and discard any segments
1046 * that cannot cause races anymore.
1048 void DRD_(thread_new_segment)(const DrdThreadId tid)
1050 Segment* last_sg;
1051 Segment* new_sg;
1053 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1054 && tid != DRD_INVALID_THREADID);
1055 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1057 last_sg = DRD_(g_threadinfo)[tid].sg_last;
1058 new_sg = DRD_(sg_new)(tid, tid);
1059 thread_append_segment(tid, new_sg);
1060 if (tid == DRD_(g_drd_running_tid) && last_sg)
1062 DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1063 s_update_conflict_set_new_sg_count++;
1066 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1068 if (s_segment_merging
1069 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1071 thread_discard_ordered_segments();
1072 thread_merge_segments();
1076 /** Call this function after thread 'joiner' joined thread 'joinee'. */
1077 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1079 tl_assert(joiner != joinee);
1080 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1081 && joiner != DRD_INVALID_THREADID);
1082 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1083 && joinee != DRD_INVALID_THREADID);
1084 tl_assert(DRD_(g_threadinfo)[joiner].sg_first);
1085 tl_assert(DRD_(g_threadinfo)[joiner].sg_last);
1086 tl_assert(DRD_(g_threadinfo)[joinee].sg_first);
1087 tl_assert(DRD_(g_threadinfo)[joinee].sg_last);
1089 if (DRD_(sg_get_trace)())
1091 HChar *str1, *str2;
1092 str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1093 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1094 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1095 str1, str2);
1096 VG_(free)(str1);
1097 VG_(free)(str2);
1099 if (joiner == DRD_(g_drd_running_tid)) {
1100 VectorClock old_vc;
1102 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1103 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1104 DRD_(thread_get_vc)(joinee));
1105 DRD_(thread_update_conflict_set)(joiner, &old_vc);
1106 s_update_conflict_set_join_count++;
1107 DRD_(vc_cleanup)(&old_vc);
1108 } else {
1109 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1110 DRD_(thread_get_vc)(joinee));
1113 thread_discard_ordered_segments();
1115 if (DRD_(sg_get_trace)()) {
1116 HChar* str;
1118 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1119 VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1120 VG_(free)(str);
1125 * Update the vector clock of the last segment of thread tid with the
1126 * the vector clock of segment sg.
1128 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1130 const VectorClock* const vc = &sg->vc;
1132 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1133 && tid != DRD_INVALID_THREADID);
1134 tl_assert(DRD_(g_threadinfo)[tid].sg_first);
1135 tl_assert(DRD_(g_threadinfo)[tid].sg_last);
1136 tl_assert(sg);
1137 tl_assert(vc);
1139 if (tid != sg->tid) {
1140 VectorClock old_vc;
1142 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1143 DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1144 if (DRD_(sg_get_trace)()) {
1145 HChar *str1, *str2;
1146 str1 = DRD_(vc_aprint)(&old_vc);
1147 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1148 VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1149 VG_(free)(str1);
1150 VG_(free)(str2);
1153 thread_discard_ordered_segments();
1155 DRD_(thread_update_conflict_set)(tid, &old_vc);
1156 s_update_conflict_set_sync_count++;
1158 DRD_(vc_cleanup)(&old_vc);
1159 } else {
1160 tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1165 * Create a new segment for thread tid and update the vector clock of the last
1166 * segment of this thread with the the vector clock of segment sg. Call this
1167 * function after thread tid had to wait because of thread synchronization
1168 * until the memory accesses in the segment sg finished.
1170 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1172 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1173 && tid != DRD_INVALID_THREADID);
1174 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1175 tl_assert(sg);
1177 thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1179 thread_combine_vc_sync(tid, sg);
1181 if (s_segment_merging
1182 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1184 thread_discard_ordered_segments();
1185 thread_merge_segments();
1190 * Call this function whenever a thread is no longer using the memory
1191 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1192 * increase.
1194 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1196 Segment* p;
1198 for (p = DRD_(g_sg_list); p; p = p->g_next)
1199 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1201 DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1204 /** Specify whether memory loads should be recorded. */
1205 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1207 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1208 && tid != DRD_INVALID_THREADID);
1209 tl_assert(enabled == !! enabled);
1211 DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1214 /** Specify whether memory stores should be recorded. */
1215 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1217 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1218 && tid != DRD_INVALID_THREADID);
1219 tl_assert(enabled == !! enabled);
1221 DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1225 * Print the segment information for all threads.
1227 * This function is only used for debugging purposes.
1229 void DRD_(thread_print_all)(void)
1231 unsigned i;
1232 Segment* p;
1234 for (i = 0; i < DRD_N_THREADS; i++)
1236 p = DRD_(g_threadinfo)[i].sg_first;
1237 if (p) {
1238 VG_(printf)("**************\n"
1239 "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1240 "**************\n",
1242 DRD_(g_threadinfo)[i].valid,
1243 DRD_(g_threadinfo)[i].vg_thread_exists,
1244 DRD_(g_threadinfo)[i].vg_threadid,
1245 DRD_(g_threadinfo)[i].posix_thread_exists,
1246 DRD_(g_threadinfo)[i].pt_threadid,
1247 DRD_(g_threadinfo)[i].detached_posix_thread);
1248 for ( ; p; p = p->thr_next)
1249 DRD_(sg_print)(p);
1254 /** Show a call stack involved in a data race. */
1255 static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1257 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1259 if (vg_tid != VG_INVALID_THREADID) {
1260 if (callstack)
1261 VG_(pp_ExeContext)(callstack);
1262 else
1263 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1264 } else {
1265 if (!VG_(clo_xml))
1266 VG_(message)(Vg_UserMsg,
1267 " (thread finished, call stack no longer available)\n");
1271 /** Print information about the segments involved in a data race. */
1272 static void
1273 thread_report_conflicting_segments_segment(const DrdThreadId tid,
1274 const Addr addr,
1275 const SizeT size,
1276 const BmAccessTypeT access_type,
1277 const Segment* const p)
1279 unsigned i;
1281 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1282 && tid != DRD_INVALID_THREADID);
1283 tl_assert(p);
1285 for (i = 0; i < DRD_N_THREADS; i++) {
1286 if (i != tid) {
1287 Segment* q;
1289 for (q = DRD_(g_threadinfo)[i].sg_last; q; q = q->thr_prev) {
1291 * Since q iterates over the segments of thread i in order of
1292 * decreasing vector clocks, if q->vc <= p->vc, then
1293 * q->next->vc <= p->vc will also hold. Hence, break out of the
1294 * loop once this condition is met.
1296 if (DRD_(vc_lte)(&q->vc, &p->vc))
1297 break;
1298 if (!DRD_(vc_lte)(&p->vc, &q->vc)) {
1299 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1300 access_type)) {
1301 Segment* q_next;
1303 tl_assert(q->stacktrace);
1304 if (VG_(clo_xml))
1305 VG_(printf_xml)(" <other_segment_start>\n");
1306 else
1307 VG_(message)(Vg_UserMsg,
1308 "Other segment start (thread %d)\n", i);
1309 show_call_stack(i, q->stacktrace);
1310 if (VG_(clo_xml))
1311 VG_(printf_xml)(" </other_segment_start>\n"
1312 " <other_segment_end>\n");
1313 else
1314 VG_(message)(Vg_UserMsg,
1315 "Other segment end (thread %d)\n", i);
1316 q_next = q->thr_next;
1317 show_call_stack(i, q_next ? q_next->stacktrace : 0);
1318 if (VG_(clo_xml))
1319 VG_(printf_xml)(" </other_segment_end>\n");
1327 /** Print information about all segments involved in a data race. */
1328 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1329 const Addr addr,
1330 const SizeT size,
1331 const BmAccessTypeT access_type)
1333 Segment* p;
1335 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1336 && tid != DRD_INVALID_THREADID);
1338 for (p = DRD_(g_threadinfo)[tid].sg_first; p; p = p->thr_next) {
1339 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1340 thread_report_conflicting_segments_segment(tid, addr, size,
1341 access_type, p);
1346 * Verify whether the conflict set for thread tid is up to date. Only perform
1347 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1349 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1351 Bool result;
1352 struct bitmap* computed_conflict_set = 0;
1354 if (DRD_(verify_conflict_set) < 0)
1355 DRD_(verify_conflict_set) = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1357 if (DRD_(verify_conflict_set) == 0)
1358 return True;
1360 thread_compute_conflict_set(&computed_conflict_set, tid);
1361 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1362 if (! result)
1364 VG_(printf)("actual conflict set:\n");
1365 DRD_(bm_print)(DRD_(g_conflict_set));
1366 VG_(printf)("\n");
1367 VG_(printf)("computed conflict set:\n");
1368 DRD_(bm_print)(computed_conflict_set);
1369 VG_(printf)("\n");
1371 DRD_(bm_delete)(computed_conflict_set);
1372 return result;
1376 * Compute the conflict set: a bitmap that represents the union of all memory
1377 * accesses of all segments that are unordered to the current segment of the
1378 * thread tid.
1380 static void thread_compute_conflict_set(struct bitmap** conflict_set,
1381 const DrdThreadId tid)
1383 Segment* p;
1385 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1386 && tid != DRD_INVALID_THREADID);
1387 tl_assert(tid == DRD_(g_drd_running_tid));
1389 s_compute_conflict_set_count++;
1390 s_conflict_set_bitmap_creation_count
1391 -= DRD_(bm_get_bitmap_creation_count)();
1392 s_conflict_set_bitmap2_creation_count
1393 -= DRD_(bm_get_bitmap2_creation_count)();
1395 if (*conflict_set) {
1396 DRD_(bm_cleanup)(*conflict_set);
1397 DRD_(bm_init)(*conflict_set);
1398 } else {
1399 *conflict_set = DRD_(bm_new)();
1402 if (s_trace_conflict_set) {
1403 HChar* str;
1405 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1406 VG_(message)(Vg_DebugMsg,
1407 "computing conflict set for thread %d with vc %s\n",
1408 tid, str);
1409 VG_(free)(str);
1412 p = DRD_(g_threadinfo)[tid].sg_last;
1414 unsigned j;
1416 if (s_trace_conflict_set) {
1417 HChar* vc;
1419 vc = DRD_(vc_aprint)(&p->vc);
1420 VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1421 tid, vc);
1422 VG_(free)(vc);
1425 for (j = 0; j < DRD_N_THREADS; j++) {
1426 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1427 Segment* q;
1429 for (q = DRD_(g_threadinfo)[j].sg_last; q; q = q->thr_prev) {
1430 if (!DRD_(vc_lte)(&q->vc, &p->vc)
1431 && !DRD_(vc_lte)(&p->vc, &q->vc)) {
1432 if (s_trace_conflict_set) {
1433 HChar* str;
1435 str = DRD_(vc_aprint)(&q->vc);
1436 VG_(message)(Vg_DebugMsg,
1437 "conflict set: [%d] merging segment %s\n",
1438 j, str);
1439 VG_(free)(str);
1441 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1442 } else {
1443 if (s_trace_conflict_set) {
1444 HChar* str;
1446 str = DRD_(vc_aprint)(&q->vc);
1447 VG_(message)(Vg_DebugMsg,
1448 "conflict set: [%d] ignoring segment %s\n",
1449 j, str);
1450 VG_(free)(str);
1458 s_conflict_set_bitmap_creation_count
1459 += DRD_(bm_get_bitmap_creation_count)();
1460 s_conflict_set_bitmap2_creation_count
1461 += DRD_(bm_get_bitmap2_creation_count)();
1463 if (s_trace_conflict_set_bm) {
1464 VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1465 DRD_(bm_print)(*conflict_set);
1466 VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1471 * Update the conflict set after the vector clock of thread tid has been
1472 * updated from old_vc to its current value, either because a new segment has
1473 * been created or because of a synchronization operation.
1475 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1476 const VectorClock* const old_vc)
1478 const VectorClock* new_vc;
1479 Segment* p;
1480 unsigned j;
1482 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1483 && tid != DRD_INVALID_THREADID);
1484 tl_assert(old_vc);
1485 tl_assert(tid == DRD_(g_drd_running_tid));
1486 tl_assert(DRD_(g_conflict_set));
1488 if (s_trace_conflict_set) {
1489 HChar* str;
1491 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1492 VG_(message)(Vg_DebugMsg,
1493 "updating conflict set for thread %d with vc %s\n",
1494 tid, str);
1495 VG_(free)(str);
1498 new_vc = DRD_(thread_get_vc)(tid);
1499 tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1501 DRD_(bm_unmark)(DRD_(g_conflict_set));
1503 for (j = 0; j < DRD_N_THREADS; j++)
1505 Segment* q;
1507 if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1508 continue;
1510 for (q = DRD_(g_threadinfo)[j].sg_last;
1511 q && !DRD_(vc_lte)(&q->vc, new_vc);
1512 q = q->thr_prev) {
1513 const Bool included_in_old_conflict_set
1514 = !DRD_(vc_lte)(old_vc, &q->vc);
1515 const Bool included_in_new_conflict_set
1516 = !DRD_(vc_lte)(new_vc, &q->vc);
1518 if (UNLIKELY(s_trace_conflict_set)) {
1519 HChar* str;
1521 str = DRD_(vc_aprint)(&q->vc);
1522 VG_(message)(Vg_DebugMsg,
1523 "conflict set: [%d] %s segment %s\n", j,
1524 included_in_old_conflict_set
1525 != included_in_new_conflict_set
1526 ? "merging" : "ignoring", str);
1527 VG_(free)(str);
1529 if (included_in_old_conflict_set != included_in_new_conflict_set)
1530 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1533 for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->thr_prev) {
1534 const Bool included_in_old_conflict_set
1535 = !DRD_(vc_lte)(old_vc, &q->vc);
1536 const Bool included_in_new_conflict_set
1537 = !DRD_(vc_lte)(&q->vc, new_vc)
1538 && !DRD_(vc_lte)(new_vc, &q->vc);
1540 if (UNLIKELY(s_trace_conflict_set)) {
1541 HChar* str;
1543 str = DRD_(vc_aprint)(&q->vc);
1544 VG_(message)(Vg_DebugMsg,
1545 "conflict set: [%d] %s segment %s\n", j,
1546 included_in_old_conflict_set
1547 != included_in_new_conflict_set
1548 ? "merging" : "ignoring", str);
1549 VG_(free)(str);
1551 if (included_in_old_conflict_set != included_in_new_conflict_set)
1552 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1556 DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1558 p = DRD_(g_threadinfo)[tid].sg_last;
1559 for (j = 0; j < DRD_N_THREADS; j++) {
1560 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1561 Segment* q;
1562 for (q = DRD_(g_threadinfo)[j].sg_last;
1563 q && !DRD_(vc_lte)(&q->vc, &p->vc);
1564 q = q->thr_prev) {
1565 if (!DRD_(vc_lte)(&p->vc, &q->vc))
1566 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1571 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1573 s_update_conflict_set_count++;
1575 if (s_trace_conflict_set_bm)
1577 VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1578 DRD_(bm_print)(DRD_(g_conflict_set));
1579 VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1582 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1585 /** Report the number of context switches performed. */
1586 ULong DRD_(thread_get_context_switch_count)(void)
1588 return s_context_switch_count;
1591 /** Report the number of ordered segments that have been discarded. */
1592 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1594 return s_discard_ordered_segments_count;
1597 /** Return how many times the conflict set has been updated entirely. */
1598 ULong DRD_(thread_get_compute_conflict_set_count)()
1600 return s_compute_conflict_set_count;
1603 /** Return how many times the conflict set has been updated partially. */
1604 ULong DRD_(thread_get_update_conflict_set_count)(void)
1606 return s_update_conflict_set_count;
1610 * Return how many times the conflict set has been updated partially
1611 * because a new segment has been created.
1613 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1615 return s_update_conflict_set_new_sg_count;
1619 * Return how many times the conflict set has been updated partially
1620 * because of combining vector clocks due to synchronization operations
1621 * other than reader/writer lock or barrier operations.
1623 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1625 return s_update_conflict_set_sync_count;
1629 * Return how many times the conflict set has been updated partially
1630 * because of thread joins.
1632 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1634 return s_update_conflict_set_join_count;
1638 * Return the number of first-level bitmaps that have been created during
1639 * conflict set updates.
1641 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1643 return s_conflict_set_bitmap_creation_count;
1647 * Return the number of second-level bitmaps that have been created during
1648 * conflict set updates.
1650 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1652 return s_conflict_set_bitmap2_creation_count;