Add DRD suppression patterns for races triggered by std::ostream
[valgrind.git] / helgrind / hg_errors.c
blob2eb9f0ef9817a27160eb5dfaa1a01a9b47a66e5a
2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
11 Copyright (C) 2007-2017 OpenWorks Ltd
12 info@open-works.co.uk
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_libcbase.h"
34 #include "pub_tool_libcassert.h"
35 #include "pub_tool_libcprint.h"
36 #include "pub_tool_stacktrace.h"
37 #include "pub_tool_execontext.h"
38 #include "pub_tool_errormgr.h"
39 #include "pub_tool_wordfm.h"
40 #include "pub_tool_xarray.h"
41 #include "pub_tool_debuginfo.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_options.h" // VG_(clo_xml)
44 #include "pub_tool_aspacemgr.h"
45 #include "pub_tool_addrinfo.h"
47 #include "hg_basics.h"
48 #include "hg_addrdescr.h"
49 #include "hg_wordset.h"
50 #include "hg_lock_n_thread.h"
51 #include "libhb.h"
52 #include "hg_errors.h" /* self */
55 /*----------------------------------------------------------------*/
56 /*--- Error management -- storage ---*/
57 /*----------------------------------------------------------------*/
59 /* maps (by value) strings to a copy of them in ARENA_TOOL */
61 static WordFM* string_table = NULL;
63 ULong HG_(stats__string_table_queries) = 0;
65 ULong HG_(stats__string_table_get_map_size) ( void ) {
66 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
69 static Word string_table_cmp ( UWord s1, UWord s2 ) {
70 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
73 static HChar* string_table_strdup ( const HChar* str ) {
74 HChar* copy = NULL;
75 HG_(stats__string_table_queries)++;
76 if (!str)
77 str = "(null)";
78 if (!string_table) {
79 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
80 HG_(free), string_table_cmp );
82 if (VG_(lookupFM)( string_table,
83 NULL, (UWord*)&copy, (UWord)str )) {
84 tl_assert(copy);
85 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
86 return copy;
87 } else {
88 copy = HG_(strdup)("hg.sts.2", str);
89 VG_(addToFM)( string_table, (UWord)copy, (UWord)copy );
90 return copy;
94 /* maps from Lock .unique fields to LockP*s */
96 static WordFM* map_LockN_to_P = NULL;
98 ULong HG_(stats__LockN_to_P_queries) = 0;
100 ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
101 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
104 static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
106 Lock* lk1 = (Lock*)lk1W;
107 Lock* lk2 = (Lock*)lk2W;
108 tl_assert( HG_(is_sane_LockNorP)(lk1) );
109 tl_assert( HG_(is_sane_LockNorP)(lk2) );
110 if (lk1->unique < lk2->unique) return -1;
111 if (lk1->unique > lk2->unique) return 1;
112 return 0;
115 /* Given a normal Lock (LockN), convert it to a persistent Lock
116 (LockP). In some cases the LockN could be invalid (if it's been
117 freed), so we enquire, in hg_main.c's admin_locks list, whether it
118 is in fact valid. If allowed_to_be_invalid is True, then it's OK
119 for the LockN to be invalid, in which case Lock_INVALID is
120 returned. In all other cases, we insist that the LockN is a valid
121 lock, and return its corresponding LockP.
123 Why can LockNs sometimes be invalid? Because they are harvested
124 from locksets that are attached to the OldRef info for conflicting
125 threads. By the time we detect a race, the some of the elements of
126 the lockset may have been destroyed by the client, in which case
127 the corresponding Lock structures we maintain will have been freed.
129 So we check that each LockN is a member of the admin_locks double
130 linked list of all Lock structures. That stops us prodding around
131 in potentially freed-up Lock structures. However, it's not quite a
132 proper check: if a new Lock has been reallocated at the same
133 address as one which was previously freed, we'll wind up copying
134 the new one as the basis for the LockP, which is completely bogus
135 because it is unrelated to the previous Lock that lived there.
136 Let's hope that doesn't happen too often.
138 static Lock* mk_LockP_from_LockN ( Lock* lkn,
139 Bool allowed_to_be_invalid )
141 Lock* lkp = NULL;
142 HG_(stats__LockN_to_P_queries)++;
144 /* First off, let's do some sanity checks. If
145 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
146 in admin_locks; else we must assert. If it is True, it's OK for
147 it not to be findable, but in that case we must return
148 Lock_INVALID right away. */
149 Lock* lock_list = HG_(get_admin_locks)();
150 while (lock_list) {
151 if (lock_list == lkn)
152 break;
153 lock_list = lock_list->admin_next;
155 if (lock_list == NULL) {
156 /* We didn't find it. That possibility has to be OK'd by the
157 caller. */
158 tl_assert(allowed_to_be_invalid);
159 return Lock_INVALID;
162 /* So we must be looking at a valid LockN. */
163 tl_assert( HG_(is_sane_LockN)(lkn) );
165 if (!map_LockN_to_P) {
166 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
167 HG_(free), lock_unique_cmp );
169 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (UWord*)&lkp, (UWord)lkn)) {
170 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
171 *lkp = *lkn;
172 lkp->admin_next = NULL;
173 lkp->admin_prev = NULL;
174 lkp->magic = LockP_MAGIC;
175 /* Forget about the bag of lock holders - don't copy that.
176 Also, acquired_at should be NULL whenever heldBy is, and vice
177 versa. Also forget about the associated libhb synch object. */
178 lkp->heldW = False;
179 lkp->heldBy = NULL;
180 lkp->acquired_at = NULL;
181 lkp->hbso = NULL;
182 VG_(addToFM)( map_LockN_to_P, (UWord)lkp, (UWord)lkp );
184 tl_assert( HG_(is_sane_LockP)(lkp) );
185 return lkp;
188 static Int sort_by_guestaddr(const void* n1, const void* n2)
190 const Lock* l1 = *(const Lock *const *)n1;
191 const Lock* l2 = *(const Lock *const *)n2;
193 Addr a1 = l1 == Lock_INVALID ? 0 : l1->guestaddr;
194 Addr a2 = l2 == Lock_INVALID ? 0 : l2->guestaddr;
195 if (a1 < a2) return -1;
196 if (a1 > a2) return 1;
197 return 0;
200 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
201 LockP*'s. Any LockN's that can't be converted into a LockP
202 (because they have been freed, see comment on mk_LockP_from_LockN)
203 are converted instead into the value Lock_INVALID. Hence the
204 returned vector is a sequence: zero or more (valid LockP* or
205 LockN_INVALID), terminated by a NULL. */
206 static
207 Lock** enumerate_WordSet_into_LockP_vector( WordSetU* univ_lsets,
208 WordSetID lockset,
209 Bool allowed_to_be_invalid )
211 tl_assert(univ_lsets);
212 tl_assert( HG_(plausibleWS)(univ_lsets, lockset) );
213 UWord nLocks = HG_(cardinalityWS)(univ_lsets, lockset);
214 Lock** lockPs = HG_(zalloc)( "hg.eWSiLPa",
215 (nLocks+1) * sizeof(Lock*) );
216 tl_assert(lockPs[nLocks] == NULL); /* pre-NULL terminated */
217 UWord* lockNs = NULL;
218 UWord nLockNs = 0;
219 if (nLocks > 0) {
220 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
221 lockset is empty; hence the guarding "if". Sigh. */
222 HG_(getPayloadWS)( &lockNs, &nLockNs, univ_lsets, lockset );
223 tl_assert(lockNs);
225 UWord i;
226 /* Convert to LockPs. */
227 for (i = 0; i < nLockNs; i++) {
228 lockPs[i] = mk_LockP_from_LockN( (Lock*)lockNs[i],
229 allowed_to_be_invalid );
231 /* Sort the locks by increasing Lock::guestaddr to avoid jitters
232 in the output. */
233 VG_(ssort)(lockPs, nLockNs, sizeof lockPs[0], sort_by_guestaddr);
235 return lockPs;
238 /* Get the number of useful elements in a vector created by
239 enumerate_WordSet_into_LockP_vector. Returns both the total number
240 of elements (not including the terminating NULL) and the number of
241 non-Lock_INVALID elements. */
242 static void count_LockP_vector ( /*OUT*/UWord* nLocks,
243 /*OUT*/UWord* nLocksValid,
244 Lock** vec )
246 tl_assert(vec);
247 *nLocks = *nLocksValid = 0;
248 UWord n = 0;
249 while (vec[n]) {
250 (*nLocks)++;
251 if (vec[n] != Lock_INVALID)
252 (*nLocksValid)++;
253 n++;
257 /* Find out whether 'lk' is in 'vec'. */
258 static Bool elem_LockP_vector ( Lock** vec, Lock* lk )
260 tl_assert(vec);
261 tl_assert(lk);
262 UWord n = 0;
263 while (vec[n]) {
264 if (vec[n] == lk)
265 return True;
266 n++;
268 return False;
272 /* Errors:
274 race: program counter
275 read or write
276 data size
277 previous state
278 current state
280 FIXME: how does state printing interact with lockset gc?
281 Are the locksets in prev/curr state always valid?
282 Ditto question for the threadsets
283 ThreadSets - probably are always valid if Threads
284 are never thrown away.
285 LockSets - could at least print the lockset elements that
286 correspond to actual locks at the time of printing. Hmm.
289 /* Error kinds */
290 typedef
291 enum {
292 XE_Race=1101, // race
293 XE_UnlockUnlocked, // unlocking a not-locked lock
294 XE_UnlockForeign, // unlocking a lock held by some other thread
295 XE_UnlockBogus, // unlocking an address not known to be a lock
296 XE_PthAPIerror, // error from the POSIX pthreads API
297 XE_LockOrder, // lock order error
298 XE_Misc // misc other error (w/ string to describe it)
300 XErrorTag;
302 /* Extra contexts for kinds */
303 typedef
304 struct {
305 XErrorTag tag;
306 union {
307 struct {
308 Addr data_addr;
309 Int szB;
310 AddrInfo data_addrinfo;
311 Bool isWrite;
312 Thread* thr;
313 Lock** locksHeldW;
314 /* h1_* and h2_* provide some description of a previously
315 observed access with which we are conflicting. */
316 Thread* h1_ct; /* non-NULL means h1 info present */
317 ExeContext* h1_ct_mbsegstartEC;
318 ExeContext* h1_ct_mbsegendEC;
319 Thread* h2_ct; /* non-NULL means h2 info present */
320 ExeContext* h2_ct_accEC;
321 Int h2_ct_accSzB;
322 Bool h2_ct_accIsW;
323 Lock** h2_ct_locksHeldW;
324 } Race;
325 struct {
326 Thread* thr; /* doing the unlocking */
327 Lock* lock; /* lock (that is already unlocked) */
328 } UnlockUnlocked;
329 struct {
330 Thread* thr; /* doing the unlocking */
331 Thread* owner; /* thread that actually holds the lock */
332 Lock* lock; /* lock (that is held by 'owner') */
333 } UnlockForeign;
334 struct {
335 Thread* thr; /* doing the unlocking */
336 Addr lock_ga; /* purported address of the lock */
337 } UnlockBogus;
338 struct {
339 Thread* thr;
340 HChar* fnname; /* persistent, in tool-arena */
341 Word err; /* pth error code */
342 HChar* errstr; /* persistent, in tool-arena */
343 } PthAPIerror;
344 struct {
345 Thread* thr;
346 /* The first 4 fields describe the previously observed
347 (should-be) ordering. */
348 Lock* shouldbe_earlier_lk;
349 Lock* shouldbe_later_lk;
350 ExeContext* shouldbe_earlier_ec;
351 ExeContext* shouldbe_later_ec;
352 /* In principle we need to record two more stacks, from
353 this thread, when acquiring the locks in the "wrong"
354 order. In fact the wallclock-later acquisition by this
355 thread is recorded in the main stack for this error.
356 So we only need a stack for the earlier acquisition by
357 this thread. */
358 ExeContext* actual_earlier_ec;
359 } LockOrder;
360 struct {
361 Thread* thr;
362 HChar* errstr; /* persistent, in tool-arena */
363 HChar* auxstr; /* optional, persistent, in tool-arena */
364 ExeContext* auxctx; /* optional */
365 } Misc;
366 } XE;
368 XError;
370 static void init_XError ( XError* xe ) {
371 VG_(memset)(xe, 0, sizeof(*xe) );
372 xe->tag = XE_Race-1; /* bogus */
376 /* Extensions of suppressions */
377 typedef
378 enum {
379 XS_Race=1201, /* race */
380 XS_FreeMemLock,
381 XS_UnlockUnlocked,
382 XS_UnlockForeign,
383 XS_UnlockBogus,
384 XS_PthAPIerror,
385 XS_LockOrder,
386 XS_Misc
388 XSuppTag;
391 /* Updates the copy with address info if necessary. */
392 UInt HG_(update_extra) ( const Error* err )
394 XError* xe = (XError*)VG_(get_error_extra)(err);
395 tl_assert(xe);
396 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
397 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
400 if (xe->tag == XE_Race) {
402 /* Note the set of locks that the thread is (w-)holding.
403 Convert the WordSetID of LockN*'s into a NULL-terminated
404 vector of LockP*'s. We don't expect to encounter any invalid
405 LockNs in this conversion. */
406 tl_assert(xe->XE.Race.thr);
407 xe->XE.Race.locksHeldW
408 = enumerate_WordSet_into_LockP_vector(
409 HG_(get_univ_lsets)(),
410 xe->XE.Race.thr->locksetW,
411 False/*!allowed_to_be_invalid*/
414 /* See if we can come up with a source level description of the
415 raced-upon address. This is potentially expensive, which is
416 why it's only done at the update_extra point, not when the
417 error is initially created. */
418 static Int xxx = 0;
419 xxx++;
420 if (0)
421 VG_(printf)("HG_(update_extra): "
422 "%d conflicting-event queries\n", xxx);
424 HG_(describe_addr) (VG_(get_ExeContext_epoch)(VG_(get_error_where)(err)),
425 xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
427 /* And poke around in the conflicting-event map, to see if we
428 can rustle up a plausible-looking conflicting memory access
429 to show. */
430 if (HG_(clo_history_level) >= 2) {
431 Thr* thrp = NULL;
432 ExeContext* wherep = NULL;
433 Addr acc_addr = xe->XE.Race.data_addr;
434 Int acc_szB = xe->XE.Race.szB;
435 Thr* acc_thr = xe->XE.Race.thr->hbthr;
436 Bool acc_isW = xe->XE.Race.isWrite;
437 SizeT conf_szB = 0;
438 Bool conf_isW = False;
439 WordSetID conf_locksHeldW = 0;
440 tl_assert(!xe->XE.Race.h2_ct_accEC);
441 tl_assert(!xe->XE.Race.h2_ct);
442 if (libhb_event_map_lookup(
443 &wherep, &thrp, &conf_szB, &conf_isW, &conf_locksHeldW,
444 acc_thr, acc_addr, acc_szB, acc_isW )) {
445 Thread* threadp;
446 tl_assert(wherep);
447 tl_assert(thrp);
448 threadp = libhb_get_Thr_hgthread( thrp );
449 tl_assert(threadp);
450 xe->XE.Race.h2_ct_accEC = wherep;
451 xe->XE.Race.h2_ct = threadp;
452 xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
453 xe->XE.Race.h2_ct_accIsW = conf_isW;
454 xe->XE.Race.h2_ct_locksHeldW
455 = enumerate_WordSet_into_LockP_vector(
456 HG_(get_univ_lsets)(),
457 conf_locksHeldW,
458 True/*allowed_to_be_invalid*/
463 // both NULL or both non-NULL
464 tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
467 return sizeof(XError);
470 void HG_(record_error_Race) ( Thread* thr,
471 Addr data_addr, Int szB, Bool isWrite,
472 Thread* h1_ct,
473 ExeContext* h1_ct_segstart,
474 ExeContext* h1_ct_mbsegendEC )
476 XError xe;
477 tl_assert( HG_(is_sane_Thread)(thr) );
479 # if defined(VGO_linux)
480 /* Skip any races on locations apparently in GOTPLT sections. This
481 is said to be caused by ld.so poking PLT table entries (or
482 whatever) when it writes the resolved address of a dynamically
483 linked routine, into the table (or whatever) when it is called
484 for the first time. */
486 VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, data_addr );
487 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
488 data_addr, VG_(pp_SectKind)(sect));
489 /* SectPLT is required on ???-linux */
490 if (sect == Vg_SectGOTPLT) return;
491 /* SectPLT is required on ppc32/64-linux */
492 if (sect == Vg_SectPLT) return;
493 /* SectGOT is required on arm-linux */
494 if (sect == Vg_SectGOT) return;
496 # endif
498 init_XError(&xe);
499 xe.tag = XE_Race;
500 xe.XE.Race.data_addr = data_addr;
501 xe.XE.Race.szB = szB;
502 xe.XE.Race.isWrite = isWrite;
503 xe.XE.Race.thr = thr;
504 tl_assert(isWrite == False || isWrite == True);
505 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
506 /* Skip on the detailed description of the raced-on address at this
507 point; it's expensive. Leave it for the update_extra function
508 if we ever make it that far. */
509 xe.XE.Race.data_addrinfo.tag = Addr_Undescribed;
510 // FIXME: tid vs thr
511 // Skip on any of the conflicting-access info at this point.
512 // It's expensive to obtain, and this error is more likely than
513 // not to be discarded. We'll fill these fields in in
514 // HG_(update_extra) just above, assuming the error ever makes
515 // it that far (unlikely).
516 xe.XE.Race.h2_ct_accSzB = 0;
517 xe.XE.Race.h2_ct_accIsW = False;
518 xe.XE.Race.h2_ct_accEC = NULL;
519 xe.XE.Race.h2_ct = NULL;
520 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
521 tl_assert( thr->coretid != VG_INVALID_THREADID );
523 xe.XE.Race.h1_ct = h1_ct;
524 xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
525 xe.XE.Race.h1_ct_mbsegendEC = h1_ct_mbsegendEC;
527 VG_(maybe_record_error)( thr->coretid,
528 XE_Race, data_addr, NULL, &xe );
531 void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
533 XError xe;
534 tl_assert( HG_(is_sane_Thread)(thr) );
535 tl_assert( HG_(is_sane_LockN)(lk) );
536 init_XError(&xe);
537 xe.tag = XE_UnlockUnlocked;
538 xe.XE.UnlockUnlocked.thr
539 = thr;
540 xe.XE.UnlockUnlocked.lock
541 = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
542 // FIXME: tid vs thr
543 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
544 tl_assert( thr->coretid != VG_INVALID_THREADID );
545 VG_(maybe_record_error)( thr->coretid,
546 XE_UnlockUnlocked, 0, NULL, &xe );
549 void HG_(record_error_UnlockForeign) ( Thread* thr,
550 Thread* owner, Lock* lk )
552 XError xe;
553 tl_assert( HG_(is_sane_Thread)(thr) );
554 tl_assert( HG_(is_sane_Thread)(owner) );
555 tl_assert( HG_(is_sane_LockN)(lk) );
556 init_XError(&xe);
557 xe.tag = XE_UnlockForeign;
558 xe.XE.UnlockForeign.thr = thr;
559 xe.XE.UnlockForeign.owner = owner;
560 xe.XE.UnlockForeign.lock
561 = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
562 // FIXME: tid vs thr
563 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
564 tl_assert( thr->coretid != VG_INVALID_THREADID );
565 VG_(maybe_record_error)( thr->coretid,
566 XE_UnlockForeign, 0, NULL, &xe );
569 void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
571 XError xe;
572 tl_assert( HG_(is_sane_Thread)(thr) );
573 init_XError(&xe);
574 xe.tag = XE_UnlockBogus;
575 xe.XE.UnlockBogus.thr = thr;
576 xe.XE.UnlockBogus.lock_ga = lock_ga;
577 // FIXME: tid vs thr
578 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
579 tl_assert( thr->coretid != VG_INVALID_THREADID );
580 VG_(maybe_record_error)( thr->coretid,
581 XE_UnlockBogus, 0, NULL, &xe );
584 void HG_(record_error_LockOrder)(
585 Thread* thr,
586 Lock* shouldbe_earlier_lk,
587 Lock* shouldbe_later_lk,
588 ExeContext* shouldbe_earlier_ec,
589 ExeContext* shouldbe_later_ec,
590 ExeContext* actual_earlier_ec
593 XError xe;
594 tl_assert( HG_(is_sane_Thread)(thr) );
595 tl_assert(HG_(clo_track_lockorders));
596 init_XError(&xe);
597 xe.tag = XE_LockOrder;
598 xe.XE.LockOrder.thr = thr;
599 xe.XE.LockOrder.shouldbe_earlier_lk
600 = mk_LockP_from_LockN(shouldbe_earlier_lk,
601 False/*!allowed_to_be_invalid*/);
602 xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
603 xe.XE.LockOrder.shouldbe_later_lk
604 = mk_LockP_from_LockN(shouldbe_later_lk,
605 False/*!allowed_to_be_invalid*/);
606 xe.XE.LockOrder.shouldbe_later_ec = shouldbe_later_ec;
607 xe.XE.LockOrder.actual_earlier_ec = actual_earlier_ec;
608 // FIXME: tid vs thr
609 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
610 tl_assert( thr->coretid != VG_INVALID_THREADID );
611 VG_(maybe_record_error)( thr->coretid,
612 XE_LockOrder, 0, NULL, &xe );
615 void HG_(record_error_PthAPIerror) ( Thread* thr, const HChar* fnname,
616 Word err, const HChar* errstr )
618 XError xe;
619 tl_assert( HG_(is_sane_Thread)(thr) );
620 tl_assert(fnname);
621 tl_assert(errstr);
622 init_XError(&xe);
623 xe.tag = XE_PthAPIerror;
624 xe.XE.PthAPIerror.thr = thr;
625 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
626 xe.XE.PthAPIerror.err = err;
627 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
628 // FIXME: tid vs thr
629 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
630 tl_assert( thr->coretid != VG_INVALID_THREADID );
631 VG_(maybe_record_error)( thr->coretid,
632 XE_PthAPIerror, 0, NULL, &xe );
635 void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr,
636 const HChar* auxstr, ExeContext* auxctx )
638 XError xe;
639 tl_assert( HG_(is_sane_Thread)(thr) );
640 tl_assert(errstr);
641 init_XError(&xe);
642 xe.tag = XE_Misc;
643 xe.XE.Misc.thr = thr;
644 xe.XE.Misc.errstr = string_table_strdup(errstr);
645 xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
646 xe.XE.Misc.auxctx = auxctx;
647 // FIXME: tid vs thr
648 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
649 tl_assert( thr->coretid != VG_INVALID_THREADID );
650 VG_(maybe_record_error)( thr->coretid,
651 XE_Misc, 0, NULL, &xe );
654 void HG_(record_error_Misc) ( Thread* thr, const HChar* errstr )
656 HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
659 Bool HG_(eq_Error) ( VgRes not_used, const Error* e1, const Error* e2 )
661 XError *xe1, *xe2;
663 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
665 xe1 = (XError*)VG_(get_error_extra)(e1);
666 xe2 = (XError*)VG_(get_error_extra)(e2);
667 tl_assert(xe1);
668 tl_assert(xe2);
670 switch (VG_(get_error_kind)(e1)) {
671 case XE_Race:
672 return xe1->XE.Race.szB == xe2->XE.Race.szB
673 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
674 && (HG_(clo_cmp_race_err_addrs)
675 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
676 : True);
677 case XE_UnlockUnlocked:
678 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
679 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
680 case XE_UnlockForeign:
681 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
682 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
683 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
684 case XE_UnlockBogus:
685 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
686 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
687 case XE_PthAPIerror:
688 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
689 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
690 xe2->XE.PthAPIerror.fnname)
691 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
692 case XE_LockOrder:
693 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
694 case XE_Misc:
695 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
696 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
697 default:
698 tl_assert(0);
701 /*NOTREACHED*/
702 tl_assert(0);
706 /*----------------------------------------------------------------*/
707 /*--- Error management -- printing ---*/
708 /*----------------------------------------------------------------*/
710 /* Do a printf-style operation on either the XML or normal output
711 channel, depending on the setting of VG_(clo_xml).
713 static void emit_WRK ( const HChar* format, va_list vargs )
715 if (VG_(clo_xml)) {
716 VG_(vprintf_xml)(format, vargs);
717 } else {
718 VG_(vmessage)(Vg_UserMsg, format, vargs);
721 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
722 static void emit ( const HChar* format, ... )
724 va_list vargs;
725 va_start(vargs, format);
726 emit_WRK(format, vargs);
727 va_end(vargs);
731 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
732 this once, as we only want to see these announcements once per
733 thread. Returned Bool indicates whether or not an announcement was
734 made.
736 static Bool announce_one_thread ( Thread* thr )
738 tl_assert(HG_(is_sane_Thread)(thr));
739 tl_assert(thr->errmsg_index >= 1);
740 if (thr->announced)
741 return False;
743 if (VG_(clo_xml)) {
745 VG_(printf_xml)("<announcethread>\n");
746 VG_(printf_xml)(" <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
747 if (thr->errmsg_index == 1) {
748 tl_assert(thr->created_at == NULL);
749 VG_(printf_xml)(" <isrootthread></isrootthread>\n");
750 } else {
751 tl_assert(thr->created_at != NULL);
752 VG_(pp_ExeContext)( thr->created_at );
754 VG_(printf_xml)("</announcethread>\n\n");
756 } else {
758 VG_(umsg)("---Thread-Announcement----------"
759 "--------------------------------" "\n");
760 VG_(umsg)("\n");
762 if (thr->errmsg_index == 1) {
763 tl_assert(thr->created_at == NULL);
764 VG_(message)(Vg_UserMsg,
765 "Thread #%d is the program's root thread\n",
766 thr->errmsg_index);
767 } else {
768 tl_assert(thr->created_at != NULL);
769 VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
770 thr->errmsg_index);
771 VG_(pp_ExeContext)( thr->created_at );
773 VG_(message)(Vg_UserMsg, "\n");
777 thr->announced = True;
778 return True;
781 /* Announce 'lk'. */
782 static void announce_LockP ( Lock* lk )
784 tl_assert(lk);
785 if (lk == Lock_INVALID)
786 return; /* Can't be announced -- we know nothing about it. */
787 tl_assert(lk->magic == LockP_MAGIC);
789 if (VG_(clo_xml)) {
790 if (lk->appeared_at) {
791 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
792 (void*)lk );
793 VG_(pp_ExeContext)( lk->appeared_at );
796 } else {
797 if (lk->appeared_at) {
798 VG_(umsg)( " Lock at %p was first observed\n",
799 (void*)lk->guestaddr );
800 VG_(pp_ExeContext)( lk->appeared_at );
801 } else {
802 VG_(umsg)( " Lock at %p : no stacktrace for first observation\n",
803 (void*)lk->guestaddr );
805 HG_(get_and_pp_addrdescr)
806 (lk->appeared_at
807 ? VG_(get_ExeContext_epoch)(lk->appeared_at)
808 : VG_(current_DiEpoch)(),
809 lk->guestaddr);
810 VG_(umsg)("\n");
814 /* Announce (that is, print point-of-first-observation) for the
815 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
816 static void announce_combined_LockP_vecs ( Lock** lockvec,
817 Lock** lockvec2 )
819 UWord i;
820 tl_assert(lockvec);
821 for (i = 0; lockvec[i]; i++) {
822 announce_LockP(lockvec[i]);
824 if (lockvec2) {
825 for (i = 0; lockvec2[i]; i++) {
826 Lock* lk = lockvec2[i];
827 if (!elem_LockP_vector(lockvec, lk))
828 announce_LockP(lk);
834 static void show_LockP_summary_textmode ( Lock** locks, const HChar* pre )
836 tl_assert(locks);
837 UWord i;
838 UWord nLocks = 0, nLocksValid = 0;
839 count_LockP_vector(&nLocks, &nLocksValid, locks);
840 tl_assert(nLocksValid <= nLocks);
842 if (nLocks == 0) {
843 VG_(umsg)( "%sLocks held: none", pre );
844 } else {
845 VG_(umsg)( "%sLocks held: %lu, at address%s ",
846 pre, nLocks, nLocksValid == 1 ? "" : "es" );
849 if (nLocks > 0) {
850 for (i = 0; i < nLocks; i++) {
851 if (locks[i] == Lock_INVALID)
852 continue;
853 VG_(umsg)( "%p", (void*)locks[i]->guestaddr);
854 if (locks[i+1] != NULL)
855 VG_(umsg)(" ");
857 if (nLocksValid < nLocks)
858 VG_(umsg)(" (and %lu that can't be shown)", nLocks - nLocksValid);
860 VG_(umsg)("\n");
864 /* This is the "this error is due to be printed shortly; so have a
865 look at it any print any preamble you want" function. We use it to
866 announce any previously un-announced threads in the upcoming error
867 message.
869 void HG_(before_pp_Error) ( const Error* err )
871 XError* xe;
872 tl_assert(err);
873 xe = (XError*)VG_(get_error_extra)(err);
874 tl_assert(xe);
876 switch (VG_(get_error_kind)(err)) {
877 case XE_Misc:
878 announce_one_thread( xe->XE.Misc.thr );
879 break;
880 case XE_LockOrder:
881 announce_one_thread( xe->XE.LockOrder.thr );
882 break;
883 case XE_PthAPIerror:
884 announce_one_thread( xe->XE.PthAPIerror.thr );
885 break;
886 case XE_UnlockBogus:
887 announce_one_thread( xe->XE.UnlockBogus.thr );
888 break;
889 case XE_UnlockForeign:
890 announce_one_thread( xe->XE.UnlockForeign.thr );
891 announce_one_thread( xe->XE.UnlockForeign.owner );
892 break;
893 case XE_UnlockUnlocked:
894 announce_one_thread( xe->XE.UnlockUnlocked.thr );
895 break;
896 case XE_Race:
897 announce_one_thread( xe->XE.Race.thr );
898 if (xe->XE.Race.h2_ct)
899 announce_one_thread( xe->XE.Race.h2_ct );
900 if (xe->XE.Race.h1_ct)
901 announce_one_thread( xe->XE.Race.h1_ct );
902 if (xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
903 Thread* thr = get_admin_threads();
904 while (thr) {
905 if (thr->errmsg_index
906 == xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
907 announce_one_thread (thr);
908 break;
910 thr = thr->admin;
913 break;
914 default:
915 tl_assert(0);
919 void HG_(pp_Error) ( const Error* err )
921 const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
923 if (!xml) {
924 VG_(umsg)("--------------------------------"
925 "--------------------------------" "\n");
926 VG_(umsg)("\n");
929 XError *xe = (XError*)VG_(get_error_extra)(err);
930 tl_assert(xe);
932 if (xml)
933 emit( " <kind>%s</kind>\n", HG_(get_error_name)(err));
935 switch (VG_(get_error_kind)(err)) {
937 case XE_Misc: {
938 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
940 if (xml) {
942 emit( " <xwhat>\n" );
943 emit( " <text>Thread #%d: %s</text>\n",
944 (Int)xe->XE.Misc.thr->errmsg_index,
945 xe->XE.Misc.errstr );
946 emit( " <hthreadid>%d</hthreadid>\n",
947 (Int)xe->XE.Misc.thr->errmsg_index );
948 emit( " </xwhat>\n" );
949 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
950 if (xe->XE.Misc.auxstr) {
951 emit(" <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
952 if (xe->XE.Misc.auxctx)
953 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
956 } else {
958 emit( "Thread #%d: %s\n",
959 (Int)xe->XE.Misc.thr->errmsg_index,
960 xe->XE.Misc.errstr );
961 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
962 if (xe->XE.Misc.auxstr) {
963 emit(" %s\n", xe->XE.Misc.auxstr);
964 if (xe->XE.Misc.auxctx)
965 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
969 break;
972 case XE_LockOrder: {
973 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
975 if (xml) {
977 emit( " <xwhat>\n" );
978 emit( " <text>Thread #%d: lock order \"%p before %p\" "
979 "violated</text>\n",
980 (Int)xe->XE.LockOrder.thr->errmsg_index,
981 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
982 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
983 emit( " <hthreadid>%d</hthreadid>\n",
984 (Int)xe->XE.LockOrder.thr->errmsg_index );
985 emit( " </xwhat>\n" );
986 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
987 if (xe->XE.LockOrder.shouldbe_earlier_ec
988 && xe->XE.LockOrder.shouldbe_later_ec) {
989 emit( " <auxwhat>Required order was established by "
990 "acquisition of lock at %p</auxwhat>\n",
991 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
992 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
993 emit( " <auxwhat>followed by a later acquisition "
994 "of lock at %p</auxwhat>\n",
995 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
996 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
998 announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
999 announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
1001 } else {
1003 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
1004 (Int)xe->XE.LockOrder.thr->errmsg_index,
1005 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
1006 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1007 emit( "\n" );
1008 emit( "Observed (incorrect) order is: "
1009 "acquisition of lock at %p\n",
1010 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr);
1011 if (xe->XE.LockOrder.actual_earlier_ec) {
1012 VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
1013 } else {
1014 emit(" (stack unavailable)\n");
1016 emit( "\n" );
1017 emit(" followed by a later acquisition of lock at %p\n",
1018 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr);
1019 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1020 if (xe->XE.LockOrder.shouldbe_earlier_ec
1021 && xe->XE.LockOrder.shouldbe_later_ec) {
1022 emit("\n");
1023 emit( "Required order was established by "
1024 "acquisition of lock at %p\n",
1025 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
1026 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
1027 emit( "\n" );
1028 emit( " followed by a later acquisition of lock at %p\n",
1029 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1030 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1032 emit("\n");
1033 announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
1034 announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
1038 break;
1041 case XE_PthAPIerror: {
1042 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
1044 if (xml) {
1046 emit( " <xwhat>\n" );
1047 emit(
1048 " <text>Thread #%d's call to %pS failed</text>\n",
1049 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1050 xe->XE.PthAPIerror.fnname );
1051 emit( " <hthreadid>%d</hthreadid>\n",
1052 (Int)xe->XE.PthAPIerror.thr->errmsg_index );
1053 emit( " </xwhat>\n" );
1054 emit( " <what>with error code %ld (%s)</what>\n",
1055 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1056 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1058 } else {
1060 emit( "Thread #%d's call to %pS failed\n",
1061 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1062 xe->XE.PthAPIerror.fnname );
1063 emit( " with error code %ld (%s)\n",
1064 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1065 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1069 break;
1072 case XE_UnlockBogus: {
1073 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
1075 if (xml) {
1077 emit( " <xwhat>\n" );
1078 emit( " <text>Thread #%d unlocked an invalid "
1079 "lock at %p</text>\n",
1080 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1081 (void*)xe->XE.UnlockBogus.lock_ga );
1082 emit( " <hthreadid>%d</hthreadid>\n",
1083 (Int)xe->XE.UnlockBogus.thr->errmsg_index );
1084 emit( " </xwhat>\n" );
1085 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1087 } else {
1089 emit( "Thread #%d unlocked an invalid lock at %p\n",
1090 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1091 (void*)xe->XE.UnlockBogus.lock_ga );
1092 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1096 break;
1099 case XE_UnlockForeign: {
1100 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
1101 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
1102 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
1104 if (xml) {
1106 emit( " <xwhat>\n" );
1107 emit( " <text>Thread #%d unlocked lock at %p "
1108 "currently held by thread #%d</text>\n",
1109 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1110 (void*)xe->XE.UnlockForeign.lock->guestaddr,
1111 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1112 emit( " <hthreadid>%d</hthreadid>\n",
1113 (Int)xe->XE.UnlockForeign.thr->errmsg_index );
1114 emit( " <hthreadid>%d</hthreadid>\n",
1115 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1116 emit( " </xwhat>\n" );
1117 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1118 announce_LockP ( xe->XE.UnlockForeign.lock );
1120 } else {
1122 emit( "Thread #%d unlocked lock at %p "
1123 "currently held by thread #%d\n",
1124 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1125 (void*)xe->XE.UnlockForeign.lock->guestaddr,
1126 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1127 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1128 announce_LockP ( xe->XE.UnlockForeign.lock );
1132 break;
1135 case XE_UnlockUnlocked: {
1136 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
1137 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
1139 if (xml) {
1141 emit( " <xwhat>\n" );
1142 emit( " <text>Thread #%d unlocked a "
1143 "not-locked lock at %p</text>\n",
1144 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1145 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1146 emit( " <hthreadid>%d</hthreadid>\n",
1147 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
1148 emit( " </xwhat>\n" );
1149 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1150 announce_LockP ( xe->XE.UnlockUnlocked.lock);
1152 } else {
1154 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1155 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1156 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1157 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1158 announce_LockP ( xe->XE.UnlockUnlocked.lock);
1162 break;
1165 case XE_Race: {
1166 Addr err_ga;
1167 const HChar* what;
1168 Int szB;
1169 what = xe->XE.Race.isWrite ? "write" : "read";
1170 szB = xe->XE.Race.szB;
1171 err_ga = VG_(get_error_address)(err);
1173 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
1174 if (xe->XE.Race.h2_ct)
1175 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
1177 if (xml) {
1179 /* ------ XML ------ */
1180 emit( " <xwhat>\n" );
1181 emit( " <text>Possible data race during %s of size %d "
1182 "at %p by thread #%d</text>\n",
1183 what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1184 emit( " <hthreadid>%d</hthreadid>\n",
1185 (Int)xe->XE.Race.thr->errmsg_index );
1186 emit( " </xwhat>\n" );
1187 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1189 if (xe->XE.Race.h2_ct) {
1190 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1191 emit( " <xauxwhat>\n");
1192 emit( " <text>This conflicts with a previous %s of size %d "
1193 "by thread #%d</text>\n",
1194 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1195 xe->XE.Race.h2_ct_accSzB,
1196 xe->XE.Race.h2_ct->errmsg_index );
1197 emit( " <hthreadid>%d</hthreadid>\n",
1198 xe->XE.Race.h2_ct->errmsg_index);
1199 emit(" </xauxwhat>\n");
1200 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1203 if (xe->XE.Race.h1_ct) {
1204 emit( " <xauxwhat>\n");
1205 emit( " <text>This conflicts with a previous access "
1206 "by thread #%d, after</text>\n",
1207 xe->XE.Race.h1_ct->errmsg_index );
1208 emit( " <hthreadid>%d</hthreadid>\n",
1209 xe->XE.Race.h1_ct->errmsg_index );
1210 emit(" </xauxwhat>\n");
1211 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1212 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1213 } else {
1214 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1216 emit( " <auxwhat>but before</auxwhat>\n" );
1217 if (xe->XE.Race.h1_ct_mbsegendEC) {
1218 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1219 } else {
1220 emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
1224 } else {
1226 /* ------ Text ------ */
1227 announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
1228 xe->XE.Race.h2_ct_locksHeldW );
1230 emit( "Possible data race during %s of size %d "
1231 "at %p by thread #%d\n",
1232 what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1234 tl_assert(xe->XE.Race.locksHeldW);
1235 show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
1236 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1238 if (xe->XE.Race.h2_ct) {
1239 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1240 tl_assert(xe->XE.Race.h2_ct_locksHeldW);
1241 emit( "\n" );
1242 emit( "This conflicts with a previous %s of size %d "
1243 "by thread #%d\n",
1244 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1245 xe->XE.Race.h2_ct_accSzB,
1246 xe->XE.Race.h2_ct->errmsg_index );
1247 show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
1248 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1251 if (xe->XE.Race.h1_ct) {
1252 emit( " This conflicts with a previous access by thread #%d, "
1253 "after\n",
1254 xe->XE.Race.h1_ct->errmsg_index );
1255 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1256 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1257 } else {
1258 emit( " (the start of the thread)\n" );
1260 emit( " but before\n" );
1261 if (xe->XE.Race.h1_ct_mbsegendEC) {
1262 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1263 } else {
1264 emit( " (the end of the thread)\n" );
1269 VG_(pp_addrinfo) (err_ga, &xe->XE.Race.data_addrinfo);
1270 break; /* case XE_Race */
1271 } /* case XE_Race */
1273 default:
1274 tl_assert(0);
1275 } /* switch (VG_(get_error_kind)(err)) */
1278 void HG_(print_access) (StackTrace ips, UInt n_ips,
1279 Thr* thr_a,
1280 Addr ga,
1281 SizeT SzB,
1282 Bool isW,
1283 WordSetID locksHeldW )
1285 Thread* threadp;
1287 threadp = libhb_get_Thr_hgthread( thr_a );
1288 tl_assert(threadp);
1289 if (!threadp->announced) {
1290 /* This is for interactive use. We announce the thread if needed,
1291 but reset it to not announced afterwards, because we want
1292 the thread to be announced on the error output/log if needed. */
1293 announce_one_thread (threadp);
1294 threadp->announced = False;
1297 announce_one_thread (threadp);
1298 VG_(printf) ("%s of size %d at %p by thread #%d",
1299 isW ? "write" : "read",
1300 (int)SzB, (void*)ga, threadp->errmsg_index);
1301 if (threadp->coretid == VG_INVALID_THREADID)
1302 VG_(printf)(" tid (exited)\n");
1303 else
1304 VG_(printf)(" tid %u\n", threadp->coretid);
1306 Lock** locksHeldW_P;
1307 locksHeldW_P = enumerate_WordSet_into_LockP_vector(
1308 HG_(get_univ_lsets)(),
1309 locksHeldW,
1310 True/*allowed_to_be_invalid*/
1312 show_LockP_summary_textmode( locksHeldW_P, "" );
1313 HG_(free) (locksHeldW_P);
1315 // FIXME PW EPOCH : need the real ips epoch.
1316 VG_(pp_StackTrace)( VG_(current_DiEpoch)(), ips, n_ips );
1317 VG_(printf) ("\n");
1320 const HChar* HG_(get_error_name) ( const Error* err )
1322 switch (VG_(get_error_kind)(err)) {
1323 case XE_Race: return "Race";
1324 case XE_UnlockUnlocked: return "UnlockUnlocked";
1325 case XE_UnlockForeign: return "UnlockForeign";
1326 case XE_UnlockBogus: return "UnlockBogus";
1327 case XE_PthAPIerror: return "PthAPIerror";
1328 case XE_LockOrder: return "LockOrder";
1329 case XE_Misc: return "Misc";
1330 default: tl_assert(0); /* fill in missing case */
1334 Bool HG_(recognised_suppression) ( const HChar* name, Supp *su )
1336 # define TRY(_name,_xskind) \
1337 if (0 == VG_(strcmp)(name, (_name))) { \
1338 VG_(set_supp_kind)(su, (_xskind)); \
1339 return True; \
1341 TRY("Race", XS_Race);
1342 TRY("FreeMemLock", XS_FreeMemLock);
1343 TRY("UnlockUnlocked", XS_UnlockUnlocked);
1344 TRY("UnlockForeign", XS_UnlockForeign);
1345 TRY("UnlockBogus", XS_UnlockBogus);
1346 TRY("PthAPIerror", XS_PthAPIerror);
1347 TRY("LockOrder", XS_LockOrder);
1348 TRY("Misc", XS_Misc);
1349 return False;
1350 # undef TRY
1353 Bool HG_(read_extra_suppression_info) ( Int fd, HChar** bufpp, SizeT* nBufp,
1354 Int* lineno, Supp* su )
1356 /* do nothing -- no extra suppression info present. Return True to
1357 indicate nothing bad happened. */
1358 return True;
1361 Bool HG_(error_matches_suppression) ( const Error* err, const Supp* su )
1363 switch (VG_(get_supp_kind)(su)) {
1364 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
1365 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1366 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
1367 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
1368 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
1369 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
1370 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
1371 //case XS_: return VG_(get_error_kind)(err) == XE_;
1372 default: tl_assert(0); /* fill in missing cases */
1376 SizeT HG_(get_extra_suppression_info) ( const Error* err,
1377 /*OUT*/HChar* buf, Int nBuf )
1379 tl_assert(nBuf >= 1);
1380 /* Do nothing */
1381 buf[0] = '\0';
1382 return 0;
1385 SizeT HG_(print_extra_suppression_use) ( const Supp* su,
1386 /*OUT*/HChar* buf, Int nBuf )
1388 tl_assert(nBuf >= 1);
1389 /* Do nothing */
1390 buf[0] = '\0';
1391 return 0;
1394 void HG_(update_extra_suppression_use) ( const Error* err, const Supp* su )
1396 /* Do nothing */
1397 return;
1401 /*--------------------------------------------------------------------*/
1402 /*--- end hg_errors.c ---*/
1403 /*--------------------------------------------------------------------*/