drd: Add a consistency check
[valgrind.git] / helgrind / hg_errors.c
blob87c5bbdfc732ae9f313b7b6a4223bd293713e03a
2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
7 /*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
11 Copyright (C) 2007-2013 OpenWorks Ltd
12 info@open-works.co.uk
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_libcbase.h"
34 #include "pub_tool_libcassert.h"
35 #include "pub_tool_libcprint.h"
36 #include "pub_tool_execontext.h"
37 #include "pub_tool_errormgr.h"
38 #include "pub_tool_wordfm.h"
39 #include "pub_tool_xarray.h"
40 #include "pub_tool_debuginfo.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_options.h" // VG_(clo_xml)
43 #include "pub_tool_aspacemgr.h"
44 #include "pub_tool_addrinfo.h"
46 #include "hg_basics.h"
47 #include "hg_addrdescr.h"
48 #include "hg_wordset.h"
49 #include "hg_lock_n_thread.h"
50 #include "libhb.h"
51 #include "hg_errors.h" /* self */
54 /*----------------------------------------------------------------*/
55 /*--- Error management -- storage ---*/
56 /*----------------------------------------------------------------*/
58 /* maps (by value) strings to a copy of them in ARENA_TOOL */
60 static WordFM* string_table = NULL;
62 ULong HG_(stats__string_table_queries) = 0;
64 ULong HG_(stats__string_table_get_map_size) ( void ) {
65 return string_table ? (ULong)VG_(sizeFM)(string_table) : 0;
68 static Word string_table_cmp ( UWord s1, UWord s2 ) {
69 return (Word)VG_(strcmp)( (HChar*)s1, (HChar*)s2 );
72 static HChar* string_table_strdup ( const HChar* str ) {
73 HChar* copy = NULL;
74 HG_(stats__string_table_queries)++;
75 if (!str)
76 str = "(null)";
77 if (!string_table) {
78 string_table = VG_(newFM)( HG_(zalloc), "hg.sts.1",
79 HG_(free), string_table_cmp );
81 if (VG_(lookupFM)( string_table,
82 NULL, (UWord*)&copy, (UWord)str )) {
83 tl_assert(copy);
84 if (0) VG_(printf)("string_table_strdup: %p -> %p\n", str, copy );
85 return copy;
86 } else {
87 copy = HG_(strdup)("hg.sts.2", str);
88 VG_(addToFM)( string_table, (UWord)copy, (UWord)copy );
89 return copy;
93 /* maps from Lock .unique fields to LockP*s */
95 static WordFM* map_LockN_to_P = NULL;
97 ULong HG_(stats__LockN_to_P_queries) = 0;
99 ULong HG_(stats__LockN_to_P_get_map_size) ( void ) {
100 return map_LockN_to_P ? (ULong)VG_(sizeFM)(map_LockN_to_P) : 0;
103 static Word lock_unique_cmp ( UWord lk1W, UWord lk2W )
105 Lock* lk1 = (Lock*)lk1W;
106 Lock* lk2 = (Lock*)lk2W;
107 tl_assert( HG_(is_sane_LockNorP)(lk1) );
108 tl_assert( HG_(is_sane_LockNorP)(lk2) );
109 if (lk1->unique < lk2->unique) return -1;
110 if (lk1->unique > lk2->unique) return 1;
111 return 0;
114 /* Given a normal Lock (LockN), convert it to a persistent Lock
115 (LockP). In some cases the LockN could be invalid (if it's been
116 freed), so we enquire, in hg_main.c's admin_locks list, whether it
117 is in fact valid. If allowed_to_be_invalid is True, then it's OK
118 for the LockN to be invalid, in which case Lock_INVALID is
119 returned. In all other cases, we insist that the LockN is a valid
120 lock, and return its corresponding LockP.
122 Why can LockNs sometimes be invalid? Because they are harvested
123 from locksets that are attached to the OldRef info for conflicting
124 threads. By the time we detect a race, the some of the elements of
125 the lockset may have been destroyed by the client, in which case
126 the corresponding Lock structures we maintain will have been freed.
128 So we check that each LockN is a member of the admin_locks double
129 linked list of all Lock structures. That stops us prodding around
130 in potentially freed-up Lock structures. However, it's not quite a
131 proper check: if a new Lock has been reallocated at the same
132 address as one which was previously freed, we'll wind up copying
133 the new one as the basis for the LockP, which is completely bogus
134 because it is unrelated to the previous Lock that lived there.
135 Let's hope that doesn't happen too often.
137 static Lock* mk_LockP_from_LockN ( Lock* lkn,
138 Bool allowed_to_be_invalid )
140 Lock* lkp = NULL;
141 HG_(stats__LockN_to_P_queries)++;
143 /* First off, let's do some sanity checks. If
144 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
145 in admin_locks; else we must assert. If it is True, it's OK for
146 it not to be findable, but in that case we must return
147 Lock_INVALID right away. */
148 Lock* lock_list = HG_(get_admin_locks)();
149 while (lock_list) {
150 if (lock_list == lkn)
151 break;
152 lock_list = lock_list->admin_next;
154 if (lock_list == NULL) {
155 /* We didn't find it. That possibility has to be OK'd by the
156 caller. */
157 tl_assert(allowed_to_be_invalid);
158 return Lock_INVALID;
161 /* So we must be looking at a valid LockN. */
162 tl_assert( HG_(is_sane_LockN)(lkn) );
164 if (!map_LockN_to_P) {
165 map_LockN_to_P = VG_(newFM)( HG_(zalloc), "hg.mLPfLN.1",
166 HG_(free), lock_unique_cmp );
168 if (!VG_(lookupFM)( map_LockN_to_P, NULL, (UWord*)&lkp, (UWord)lkn)) {
169 lkp = HG_(zalloc)( "hg.mLPfLN.2", sizeof(Lock) );
170 *lkp = *lkn;
171 lkp->admin_next = NULL;
172 lkp->admin_prev = NULL;
173 lkp->magic = LockP_MAGIC;
174 /* Forget about the bag of lock holders - don't copy that.
175 Also, acquired_at should be NULL whenever heldBy is, and vice
176 versa. Also forget about the associated libhb synch object. */
177 lkp->heldW = False;
178 lkp->heldBy = NULL;
179 lkp->acquired_at = NULL;
180 lkp->hbso = NULL;
181 VG_(addToFM)( map_LockN_to_P, (UWord)lkp, (UWord)lkp );
183 tl_assert( HG_(is_sane_LockP)(lkp) );
184 return lkp;
187 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
188 LockP*'s. Any LockN's that can't be converted into a LockP
189 (because they have been freed, see comment on mk_LockP_from_LockN)
190 are converted instead into the value Lock_INVALID. Hence the
191 returned vector is a sequence: zero or more (valid LockP* or
192 LockN_INVALID), terminated by a NULL. */
193 static
194 Lock** enumerate_WordSet_into_LockP_vector( WordSetU* univ_lsets,
195 WordSetID lockset,
196 Bool allowed_to_be_invalid )
198 tl_assert(univ_lsets);
199 tl_assert( HG_(plausibleWS)(univ_lsets, lockset) );
200 UWord nLocks = HG_(cardinalityWS)(univ_lsets, lockset);
201 Lock** lockPs = HG_(zalloc)( "hg.eWSiLPa",
202 (nLocks+1) * sizeof(Lock*) );
203 tl_assert(lockPs[nLocks] == NULL); /* pre-NULL terminated */
204 UWord* lockNs = NULL;
205 UWord nLockNs = 0;
206 if (nLocks > 0) {
207 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
208 lockset is empty; hence the guarding "if". Sigh. */
209 HG_(getPayloadWS)( &lockNs, &nLockNs, univ_lsets, lockset );
210 tl_assert(lockNs);
212 UWord i;
213 /* Convert to LockPs. */
214 for (i = 0; i < nLockNs; i++) {
215 lockPs[i] = mk_LockP_from_LockN( (Lock*)lockNs[i],
216 allowed_to_be_invalid );
218 return lockPs;
221 /* Get the number of useful elements in a vector created by
222 enumerate_WordSet_into_LockP_vector. Returns both the total number
223 of elements (not including the terminating NULL) and the number of
224 non-Lock_INVALID elements. */
225 static void count_LockP_vector ( /*OUT*/UWord* nLocks,
226 /*OUT*/UWord* nLocksValid,
227 Lock** vec )
229 tl_assert(vec);
230 *nLocks = *nLocksValid = 0;
231 UWord n = 0;
232 while (vec[n]) {
233 (*nLocks)++;
234 if (vec[n] != Lock_INVALID)
235 (*nLocksValid)++;
236 n++;
240 /* Find out whether 'lk' is in 'vec'. */
241 static Bool elem_LockP_vector ( Lock** vec, Lock* lk )
243 tl_assert(vec);
244 tl_assert(lk);
245 UWord n = 0;
246 while (vec[n]) {
247 if (vec[n] == lk)
248 return True;
249 n++;
251 return False;
255 /* Errors:
257 race: program counter
258 read or write
259 data size
260 previous state
261 current state
263 FIXME: how does state printing interact with lockset gc?
264 Are the locksets in prev/curr state always valid?
265 Ditto question for the threadsets
266 ThreadSets - probably are always valid if Threads
267 are never thrown away.
268 LockSets - could at least print the lockset elements that
269 correspond to actual locks at the time of printing. Hmm.
272 /* Error kinds */
273 typedef
274 enum {
275 XE_Race=1101, // race
276 XE_UnlockUnlocked, // unlocking a not-locked lock
277 XE_UnlockForeign, // unlocking a lock held by some other thread
278 XE_UnlockBogus, // unlocking an address not known to be a lock
279 XE_PthAPIerror, // error from the POSIX pthreads API
280 XE_LockOrder, // lock order error
281 XE_Misc // misc other error (w/ string to describe it)
283 XErrorTag;
285 /* Extra contexts for kinds */
286 typedef
287 struct {
288 XErrorTag tag;
289 union {
290 struct {
291 Addr data_addr;
292 Int szB;
293 AddrInfo data_addrinfo;
294 Bool isWrite;
295 Thread* thr;
296 Lock** locksHeldW;
297 /* h1_* and h2_* provide some description of a previously
298 observed access with which we are conflicting. */
299 Thread* h1_ct; /* non-NULL means h1 info present */
300 ExeContext* h1_ct_mbsegstartEC;
301 ExeContext* h1_ct_mbsegendEC;
302 Thread* h2_ct; /* non-NULL means h2 info present */
303 ExeContext* h2_ct_accEC;
304 Int h2_ct_accSzB;
305 Bool h2_ct_accIsW;
306 Lock** h2_ct_locksHeldW;
307 } Race;
308 struct {
309 Thread* thr; /* doing the unlocking */
310 Lock* lock; /* lock (that is already unlocked) */
311 } UnlockUnlocked;
312 struct {
313 Thread* thr; /* doing the unlocking */
314 Thread* owner; /* thread that actually holds the lock */
315 Lock* lock; /* lock (that is held by 'owner') */
316 } UnlockForeign;
317 struct {
318 Thread* thr; /* doing the unlocking */
319 Addr lock_ga; /* purported address of the lock */
320 } UnlockBogus;
321 struct {
322 Thread* thr;
323 HChar* fnname; /* persistent, in tool-arena */
324 Word err; /* pth error code */
325 HChar* errstr; /* persistent, in tool-arena */
326 } PthAPIerror;
327 struct {
328 Thread* thr;
329 /* The first 4 fields describe the previously observed
330 (should-be) ordering. */
331 Lock* shouldbe_earlier_lk;
332 Lock* shouldbe_later_lk;
333 ExeContext* shouldbe_earlier_ec;
334 ExeContext* shouldbe_later_ec;
335 /* In principle we need to record two more stacks, from
336 this thread, when acquiring the locks in the "wrong"
337 order. In fact the wallclock-later acquisition by this
338 thread is recorded in the main stack for this error.
339 So we only need a stack for the earlier acquisition by
340 this thread. */
341 ExeContext* actual_earlier_ec;
342 } LockOrder;
343 struct {
344 Thread* thr;
345 HChar* errstr; /* persistent, in tool-arena */
346 HChar* auxstr; /* optional, persistent, in tool-arena */
347 ExeContext* auxctx; /* optional */
348 } Misc;
349 } XE;
351 XError;
353 static void init_XError ( XError* xe ) {
354 VG_(memset)(xe, 0, sizeof(*xe) );
355 xe->tag = XE_Race-1; /* bogus */
359 /* Extensions of suppressions */
360 typedef
361 enum {
362 XS_Race=1201, /* race */
363 XS_FreeMemLock,
364 XS_UnlockUnlocked,
365 XS_UnlockForeign,
366 XS_UnlockBogus,
367 XS_PthAPIerror,
368 XS_LockOrder,
369 XS_Misc
371 XSuppTag;
374 /* Updates the copy with address info if necessary. */
375 UInt HG_(update_extra) ( const Error* err )
377 XError* xe = (XError*)VG_(get_error_extra)(err);
378 tl_assert(xe);
379 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
380 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
383 if (xe->tag == XE_Race) {
385 /* Note the set of locks that the thread is (w-)holding.
386 Convert the WordSetID of LockN*'s into a NULL-terminated
387 vector of LockP*'s. We don't expect to encounter any invalid
388 LockNs in this conversion. */
389 tl_assert(xe->XE.Race.thr);
390 xe->XE.Race.locksHeldW
391 = enumerate_WordSet_into_LockP_vector(
392 HG_(get_univ_lsets)(),
393 xe->XE.Race.thr->locksetW,
394 False/*!allowed_to_be_invalid*/
397 /* See if we can come up with a source level description of the
398 raced-upon address. This is potentially expensive, which is
399 why it's only done at the update_extra point, not when the
400 error is initially created. */
401 static Int xxx = 0;
402 xxx++;
403 if (0)
404 VG_(printf)("HG_(update_extra): "
405 "%d conflicting-event queries\n", xxx);
407 HG_(describe_addr) (xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
409 /* And poke around in the conflicting-event map, to see if we
410 can rustle up a plausible-looking conflicting memory access
411 to show. */
412 if (HG_(clo_history_level) >= 2) {
413 Thr* thrp = NULL;
414 ExeContext* wherep = NULL;
415 Addr acc_addr = xe->XE.Race.data_addr;
416 Int acc_szB = xe->XE.Race.szB;
417 Thr* acc_thr = xe->XE.Race.thr->hbthr;
418 Bool acc_isW = xe->XE.Race.isWrite;
419 SizeT conf_szB = 0;
420 Bool conf_isW = False;
421 WordSetID conf_locksHeldW = 0;
422 tl_assert(!xe->XE.Race.h2_ct_accEC);
423 tl_assert(!xe->XE.Race.h2_ct);
424 if (libhb_event_map_lookup(
425 &wherep, &thrp, &conf_szB, &conf_isW, &conf_locksHeldW,
426 acc_thr, acc_addr, acc_szB, acc_isW )) {
427 Thread* threadp;
428 tl_assert(wherep);
429 tl_assert(thrp);
430 threadp = libhb_get_Thr_hgthread( thrp );
431 tl_assert(threadp);
432 xe->XE.Race.h2_ct_accEC = wherep;
433 xe->XE.Race.h2_ct = threadp;
434 xe->XE.Race.h2_ct_accSzB = (Int)conf_szB;
435 xe->XE.Race.h2_ct_accIsW = conf_isW;
436 xe->XE.Race.h2_ct_locksHeldW
437 = enumerate_WordSet_into_LockP_vector(
438 HG_(get_univ_lsets)(),
439 conf_locksHeldW,
440 True/*allowed_to_be_invalid*/
445 // both NULL or both non-NULL
446 tl_assert( (!!xe->XE.Race.h2_ct) == (!!xe->XE.Race.h2_ct_accEC) );
449 return sizeof(XError);
452 void HG_(record_error_Race) ( Thread* thr,
453 Addr data_addr, Int szB, Bool isWrite,
454 Thread* h1_ct,
455 ExeContext* h1_ct_segstart,
456 ExeContext* h1_ct_mbsegendEC )
458 XError xe;
459 tl_assert( HG_(is_sane_Thread)(thr) );
461 # if defined(VGO_linux)
462 /* Skip any races on locations apparently in GOTPLT sections. This
463 is said to be caused by ld.so poking PLT table entries (or
464 whatever) when it writes the resolved address of a dynamically
465 linked routine, into the table (or whatever) when it is called
466 for the first time. */
468 VgSectKind sect = VG_(DebugInfo_sect_kind)( NULL, data_addr );
469 if (0) VG_(printf)("XXXXXXXXX RACE on %#lx %s\n",
470 data_addr, VG_(pp_SectKind)(sect));
471 /* SectPLT is required on ???-linux */
472 if (sect == Vg_SectGOTPLT) return;
473 /* SectPLT is required on ppc32/64-linux */
474 if (sect == Vg_SectPLT) return;
475 /* SectGOT is required on arm-linux */
476 if (sect == Vg_SectGOT) return;
478 # endif
480 init_XError(&xe);
481 xe.tag = XE_Race;
482 xe.XE.Race.data_addr = data_addr;
483 xe.XE.Race.szB = szB;
484 xe.XE.Race.isWrite = isWrite;
485 xe.XE.Race.thr = thr;
486 tl_assert(isWrite == False || isWrite == True);
487 tl_assert(szB == 8 || szB == 4 || szB == 2 || szB == 1);
488 /* Skip on the detailed description of the raced-on address at this
489 point; it's expensive. Leave it for the update_extra function
490 if we ever make it that far. */
491 xe.XE.Race.data_addrinfo.tag = Addr_Undescribed;
492 // FIXME: tid vs thr
493 // Skip on any of the conflicting-access info at this point.
494 // It's expensive to obtain, and this error is more likely than
495 // not to be discarded. We'll fill these fields in in
496 // HG_(update_extra) just above, assuming the error ever makes
497 // it that far (unlikely).
498 xe.XE.Race.h2_ct_accSzB = 0;
499 xe.XE.Race.h2_ct_accIsW = False;
500 xe.XE.Race.h2_ct_accEC = NULL;
501 xe.XE.Race.h2_ct = NULL;
502 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
503 tl_assert( thr->coretid != VG_INVALID_THREADID );
505 xe.XE.Race.h1_ct = h1_ct;
506 xe.XE.Race.h1_ct_mbsegstartEC = h1_ct_segstart;
507 xe.XE.Race.h1_ct_mbsegendEC = h1_ct_mbsegendEC;
509 VG_(maybe_record_error)( thr->coretid,
510 XE_Race, data_addr, NULL, &xe );
513 void HG_(record_error_UnlockUnlocked) ( Thread* thr, Lock* lk )
515 XError xe;
516 tl_assert( HG_(is_sane_Thread)(thr) );
517 tl_assert( HG_(is_sane_LockN)(lk) );
518 init_XError(&xe);
519 xe.tag = XE_UnlockUnlocked;
520 xe.XE.UnlockUnlocked.thr
521 = thr;
522 xe.XE.UnlockUnlocked.lock
523 = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
524 // FIXME: tid vs thr
525 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
526 tl_assert( thr->coretid != VG_INVALID_THREADID );
527 VG_(maybe_record_error)( thr->coretid,
528 XE_UnlockUnlocked, 0, NULL, &xe );
531 void HG_(record_error_UnlockForeign) ( Thread* thr,
532 Thread* owner, Lock* lk )
534 XError xe;
535 tl_assert( HG_(is_sane_Thread)(thr) );
536 tl_assert( HG_(is_sane_Thread)(owner) );
537 tl_assert( HG_(is_sane_LockN)(lk) );
538 init_XError(&xe);
539 xe.tag = XE_UnlockForeign;
540 xe.XE.UnlockForeign.thr = thr;
541 xe.XE.UnlockForeign.owner = owner;
542 xe.XE.UnlockForeign.lock
543 = mk_LockP_from_LockN(lk, False/*!allowed_to_be_invalid*/);
544 // FIXME: tid vs thr
545 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
546 tl_assert( thr->coretid != VG_INVALID_THREADID );
547 VG_(maybe_record_error)( thr->coretid,
548 XE_UnlockForeign, 0, NULL, &xe );
551 void HG_(record_error_UnlockBogus) ( Thread* thr, Addr lock_ga )
553 XError xe;
554 tl_assert( HG_(is_sane_Thread)(thr) );
555 init_XError(&xe);
556 xe.tag = XE_UnlockBogus;
557 xe.XE.UnlockBogus.thr = thr;
558 xe.XE.UnlockBogus.lock_ga = lock_ga;
559 // FIXME: tid vs thr
560 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
561 tl_assert( thr->coretid != VG_INVALID_THREADID );
562 VG_(maybe_record_error)( thr->coretid,
563 XE_UnlockBogus, 0, NULL, &xe );
566 void HG_(record_error_LockOrder)(
567 Thread* thr,
568 Lock* shouldbe_earlier_lk,
569 Lock* shouldbe_later_lk,
570 ExeContext* shouldbe_earlier_ec,
571 ExeContext* shouldbe_later_ec,
572 ExeContext* actual_earlier_ec
575 XError xe;
576 tl_assert( HG_(is_sane_Thread)(thr) );
577 tl_assert(HG_(clo_track_lockorders));
578 init_XError(&xe);
579 xe.tag = XE_LockOrder;
580 xe.XE.LockOrder.thr = thr;
581 xe.XE.LockOrder.shouldbe_earlier_lk
582 = mk_LockP_from_LockN(shouldbe_earlier_lk,
583 False/*!allowed_to_be_invalid*/);
584 xe.XE.LockOrder.shouldbe_earlier_ec = shouldbe_earlier_ec;
585 xe.XE.LockOrder.shouldbe_later_lk
586 = mk_LockP_from_LockN(shouldbe_later_lk,
587 False/*!allowed_to_be_invalid*/);
588 xe.XE.LockOrder.shouldbe_later_ec = shouldbe_later_ec;
589 xe.XE.LockOrder.actual_earlier_ec = actual_earlier_ec;
590 // FIXME: tid vs thr
591 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
592 tl_assert( thr->coretid != VG_INVALID_THREADID );
593 VG_(maybe_record_error)( thr->coretid,
594 XE_LockOrder, 0, NULL, &xe );
597 void HG_(record_error_PthAPIerror) ( Thread* thr, const HChar* fnname,
598 Word err, const HChar* errstr )
600 XError xe;
601 tl_assert( HG_(is_sane_Thread)(thr) );
602 tl_assert(fnname);
603 tl_assert(errstr);
604 init_XError(&xe);
605 xe.tag = XE_PthAPIerror;
606 xe.XE.PthAPIerror.thr = thr;
607 xe.XE.PthAPIerror.fnname = string_table_strdup(fnname);
608 xe.XE.PthAPIerror.err = err;
609 xe.XE.PthAPIerror.errstr = string_table_strdup(errstr);
610 // FIXME: tid vs thr
611 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
612 tl_assert( thr->coretid != VG_INVALID_THREADID );
613 VG_(maybe_record_error)( thr->coretid,
614 XE_PthAPIerror, 0, NULL, &xe );
617 void HG_(record_error_Misc_w_aux) ( Thread* thr, const HChar* errstr,
618 const HChar* auxstr, ExeContext* auxctx )
620 XError xe;
621 tl_assert( HG_(is_sane_Thread)(thr) );
622 tl_assert(errstr);
623 init_XError(&xe);
624 xe.tag = XE_Misc;
625 xe.XE.Misc.thr = thr;
626 xe.XE.Misc.errstr = string_table_strdup(errstr);
627 xe.XE.Misc.auxstr = auxstr ? string_table_strdup(auxstr) : NULL;
628 xe.XE.Misc.auxctx = auxctx;
629 // FIXME: tid vs thr
630 tl_assert( HG_(is_sane_ThreadId)(thr->coretid) );
631 tl_assert( thr->coretid != VG_INVALID_THREADID );
632 VG_(maybe_record_error)( thr->coretid,
633 XE_Misc, 0, NULL, &xe );
636 void HG_(record_error_Misc) ( Thread* thr, const HChar* errstr )
638 HG_(record_error_Misc_w_aux)(thr, errstr, NULL, NULL);
641 Bool HG_(eq_Error) ( VgRes not_used, const Error* e1, const Error* e2 )
643 XError *xe1, *xe2;
645 tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
647 xe1 = (XError*)VG_(get_error_extra)(e1);
648 xe2 = (XError*)VG_(get_error_extra)(e2);
649 tl_assert(xe1);
650 tl_assert(xe2);
652 switch (VG_(get_error_kind)(e1)) {
653 case XE_Race:
654 return xe1->XE.Race.szB == xe2->XE.Race.szB
655 && xe1->XE.Race.isWrite == xe2->XE.Race.isWrite
656 && (HG_(clo_cmp_race_err_addrs)
657 ? xe1->XE.Race.data_addr == xe2->XE.Race.data_addr
658 : True);
659 case XE_UnlockUnlocked:
660 return xe1->XE.UnlockUnlocked.thr == xe2->XE.UnlockUnlocked.thr
661 && xe1->XE.UnlockUnlocked.lock == xe2->XE.UnlockUnlocked.lock;
662 case XE_UnlockForeign:
663 return xe1->XE.UnlockForeign.thr == xe2->XE.UnlockForeign.thr
664 && xe1->XE.UnlockForeign.owner == xe2->XE.UnlockForeign.owner
665 && xe1->XE.UnlockForeign.lock == xe2->XE.UnlockForeign.lock;
666 case XE_UnlockBogus:
667 return xe1->XE.UnlockBogus.thr == xe2->XE.UnlockBogus.thr
668 && xe1->XE.UnlockBogus.lock_ga == xe2->XE.UnlockBogus.lock_ga;
669 case XE_PthAPIerror:
670 return xe1->XE.PthAPIerror.thr == xe2->XE.PthAPIerror.thr
671 && 0==VG_(strcmp)(xe1->XE.PthAPIerror.fnname,
672 xe2->XE.PthAPIerror.fnname)
673 && xe1->XE.PthAPIerror.err == xe2->XE.PthAPIerror.err;
674 case XE_LockOrder:
675 return xe1->XE.LockOrder.thr == xe2->XE.LockOrder.thr;
676 case XE_Misc:
677 return xe1->XE.Misc.thr == xe2->XE.Misc.thr
678 && 0==VG_(strcmp)(xe1->XE.Misc.errstr, xe2->XE.Misc.errstr);
679 default:
680 tl_assert(0);
683 /*NOTREACHED*/
684 tl_assert(0);
688 /*----------------------------------------------------------------*/
689 /*--- Error management -- printing ---*/
690 /*----------------------------------------------------------------*/
692 /* Do a printf-style operation on either the XML or normal output
693 channel, depending on the setting of VG_(clo_xml).
695 static void emit_WRK ( const HChar* format, va_list vargs )
697 if (VG_(clo_xml)) {
698 VG_(vprintf_xml)(format, vargs);
699 } else {
700 VG_(vmessage)(Vg_UserMsg, format, vargs);
703 static void emit ( const HChar* format, ... ) PRINTF_CHECK(1, 2);
704 static void emit ( const HChar* format, ... )
706 va_list vargs;
707 va_start(vargs, format);
708 emit_WRK(format, vargs);
709 va_end(vargs);
713 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
714 this once, as we only want to see these announcements once per
715 thread. Returned Bool indicates whether or not an announcement was
716 made.
718 static Bool announce_one_thread ( Thread* thr )
720 tl_assert(HG_(is_sane_Thread)(thr));
721 tl_assert(thr->errmsg_index >= 1);
722 if (thr->announced)
723 return False;
725 if (VG_(clo_xml)) {
727 VG_(printf_xml)("<announcethread>\n");
728 VG_(printf_xml)(" <hthreadid>%d</hthreadid>\n", thr->errmsg_index);
729 if (thr->errmsg_index == 1) {
730 tl_assert(thr->created_at == NULL);
731 VG_(printf_xml)(" <isrootthread></isrootthread>\n");
732 } else {
733 tl_assert(thr->created_at != NULL);
734 VG_(pp_ExeContext)( thr->created_at );
736 VG_(printf_xml)("</announcethread>\n\n");
738 } else {
740 VG_(umsg)("---Thread-Announcement----------"
741 "--------------------------------" "\n");
742 VG_(umsg)("\n");
744 if (thr->errmsg_index == 1) {
745 tl_assert(thr->created_at == NULL);
746 VG_(message)(Vg_UserMsg,
747 "Thread #%d is the program's root thread\n",
748 thr->errmsg_index);
749 } else {
750 tl_assert(thr->created_at != NULL);
751 VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
752 thr->errmsg_index);
753 VG_(pp_ExeContext)( thr->created_at );
755 VG_(message)(Vg_UserMsg, "\n");
759 thr->announced = True;
760 return True;
763 /* Announce 'lk'. */
764 static void announce_LockP ( Lock* lk )
766 tl_assert(lk);
767 if (lk == Lock_INVALID)
768 return; /* Can't be announced -- we know nothing about it. */
769 tl_assert(lk->magic == LockP_MAGIC);
771 if (VG_(clo_xml)) {
772 if (lk->appeared_at) {
773 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
774 (void*)lk );
775 VG_(pp_ExeContext)( lk->appeared_at );
778 } else {
779 if (lk->appeared_at) {
780 VG_(umsg)( " Lock at %p was first observed\n",
781 (void*)lk->guestaddr );
782 VG_(pp_ExeContext)( lk->appeared_at );
783 } else {
784 VG_(umsg)( " Lock at %p : no stacktrace for first observation\n",
785 (void*)lk->guestaddr );
787 HG_(get_and_pp_addrdescr) (lk->guestaddr);
788 VG_(umsg)("\n");
792 /* Announce (that is, print point-of-first-observation) for the
793 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
794 static void announce_combined_LockP_vecs ( Lock** lockvec,
795 Lock** lockvec2 )
797 UWord i;
798 tl_assert(lockvec);
799 for (i = 0; lockvec[i]; i++) {
800 announce_LockP(lockvec[i]);
802 if (lockvec2) {
803 for (i = 0; lockvec2[i]; i++) {
804 Lock* lk = lockvec2[i];
805 if (!elem_LockP_vector(lockvec, lk))
806 announce_LockP(lk);
812 static void show_LockP_summary_textmode ( Lock** locks, const HChar* pre )
814 tl_assert(locks);
815 UWord i;
816 UWord nLocks = 0, nLocksValid = 0;
817 count_LockP_vector(&nLocks, &nLocksValid, locks);
818 tl_assert(nLocksValid <= nLocks);
820 if (nLocks == 0) {
821 VG_(umsg)( "%sLocks held: none", pre );
822 } else {
823 VG_(umsg)( "%sLocks held: %lu, at address%s ",
824 pre, nLocks, nLocksValid == 1 ? "" : "es" );
827 if (nLocks > 0) {
828 for (i = 0; i < nLocks; i++) {
829 if (locks[i] == Lock_INVALID)
830 continue;
831 VG_(umsg)( "%p", (void*)locks[i]->guestaddr);
832 if (locks[i+1] != NULL)
833 VG_(umsg)(" ");
835 if (nLocksValid < nLocks)
836 VG_(umsg)(" (and %lu that can't be shown)", nLocks - nLocksValid);
838 VG_(umsg)("\n");
842 /* This is the "this error is due to be printed shortly; so have a
843 look at it any print any preamble you want" function. We use it to
844 announce any previously un-announced threads in the upcoming error
845 message.
847 void HG_(before_pp_Error) ( const Error* err )
849 XError* xe;
850 tl_assert(err);
851 xe = (XError*)VG_(get_error_extra)(err);
852 tl_assert(xe);
854 switch (VG_(get_error_kind)(err)) {
855 case XE_Misc:
856 announce_one_thread( xe->XE.Misc.thr );
857 break;
858 case XE_LockOrder:
859 announce_one_thread( xe->XE.LockOrder.thr );
860 break;
861 case XE_PthAPIerror:
862 announce_one_thread( xe->XE.PthAPIerror.thr );
863 break;
864 case XE_UnlockBogus:
865 announce_one_thread( xe->XE.UnlockBogus.thr );
866 break;
867 case XE_UnlockForeign:
868 announce_one_thread( xe->XE.UnlockForeign.thr );
869 announce_one_thread( xe->XE.UnlockForeign.owner );
870 break;
871 case XE_UnlockUnlocked:
872 announce_one_thread( xe->XE.UnlockUnlocked.thr );
873 break;
874 case XE_Race:
875 announce_one_thread( xe->XE.Race.thr );
876 if (xe->XE.Race.h2_ct)
877 announce_one_thread( xe->XE.Race.h2_ct );
878 if (xe->XE.Race.h1_ct)
879 announce_one_thread( xe->XE.Race.h1_ct );
880 if (xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
881 Thread* thr = get_admin_threads();
882 while (thr) {
883 if (thr->errmsg_index
884 == xe->XE.Race.data_addrinfo.Addr.Block.alloc_tinfo.tnr) {
885 announce_one_thread (thr);
886 break;
888 thr = thr->admin;
891 break;
892 default:
893 tl_assert(0);
897 void HG_(pp_Error) ( const Error* err )
899 const Bool xml = VG_(clo_xml); /* a shorthand, that's all */
901 if (!xml) {
902 VG_(umsg)("--------------------------------"
903 "--------------------------------" "\n");
904 VG_(umsg)("\n");
907 XError *xe = (XError*)VG_(get_error_extra)(err);
908 tl_assert(xe);
910 if (xml)
911 emit( " <kind>%s</kind>\n", HG_(get_error_name)(err));
913 switch (VG_(get_error_kind)(err)) {
915 case XE_Misc: {
916 tl_assert( HG_(is_sane_Thread)( xe->XE.Misc.thr ) );
918 if (xml) {
920 emit( " <xwhat>\n" );
921 emit( " <text>Thread #%d: %s</text>\n",
922 (Int)xe->XE.Misc.thr->errmsg_index,
923 xe->XE.Misc.errstr );
924 emit( " <hthreadid>%d</hthreadid>\n",
925 (Int)xe->XE.Misc.thr->errmsg_index );
926 emit( " </xwhat>\n" );
927 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
928 if (xe->XE.Misc.auxstr) {
929 emit(" <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
930 if (xe->XE.Misc.auxctx)
931 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
934 } else {
936 emit( "Thread #%d: %s\n",
937 (Int)xe->XE.Misc.thr->errmsg_index,
938 xe->XE.Misc.errstr );
939 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
940 if (xe->XE.Misc.auxstr) {
941 emit(" %s\n", xe->XE.Misc.auxstr);
942 if (xe->XE.Misc.auxctx)
943 VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
947 break;
950 case XE_LockOrder: {
951 tl_assert( HG_(is_sane_Thread)( xe->XE.LockOrder.thr ) );
953 if (xml) {
955 emit( " <xwhat>\n" );
956 emit( " <text>Thread #%d: lock order \"%p before %p\" "
957 "violated</text>\n",
958 (Int)xe->XE.LockOrder.thr->errmsg_index,
959 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
960 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
961 emit( " <hthreadid>%d</hthreadid>\n",
962 (Int)xe->XE.LockOrder.thr->errmsg_index );
963 emit( " </xwhat>\n" );
964 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
965 if (xe->XE.LockOrder.shouldbe_earlier_ec
966 && xe->XE.LockOrder.shouldbe_later_ec) {
967 emit( " <auxwhat>Required order was established by "
968 "acquisition of lock at %p</auxwhat>\n",
969 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
970 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
971 emit( " <auxwhat>followed by a later acquisition "
972 "of lock at %p</auxwhat>\n",
973 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
974 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
976 announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
977 announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
979 } else {
981 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
982 (Int)xe->XE.LockOrder.thr->errmsg_index,
983 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr,
984 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
985 emit( "\n" );
986 emit( "Observed (incorrect) order is: "
987 "acquisition of lock at %p\n",
988 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr);
989 if (xe->XE.LockOrder.actual_earlier_ec) {
990 VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
991 } else {
992 emit(" (stack unavailable)\n");
994 emit( "\n" );
995 emit(" followed by a later acquisition of lock at %p\n",
996 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr);
997 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
998 if (xe->XE.LockOrder.shouldbe_earlier_ec
999 && xe->XE.LockOrder.shouldbe_later_ec) {
1000 emit("\n");
1001 emit( "Required order was established by "
1002 "acquisition of lock at %p\n",
1003 (void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
1004 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
1005 emit( "\n" );
1006 emit( " followed by a later acquisition of lock at %p\n",
1007 (void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
1008 VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
1010 emit("\n");
1011 announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
1012 announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
1016 break;
1019 case XE_PthAPIerror: {
1020 tl_assert( HG_(is_sane_Thread)( xe->XE.PthAPIerror.thr ) );
1022 if (xml) {
1024 emit( " <xwhat>\n" );
1025 emit(
1026 " <text>Thread #%d's call to %pS failed</text>\n",
1027 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1028 xe->XE.PthAPIerror.fnname );
1029 emit( " <hthreadid>%d</hthreadid>\n",
1030 (Int)xe->XE.PthAPIerror.thr->errmsg_index );
1031 emit( " </xwhat>\n" );
1032 emit( " <what>with error code %ld (%s)</what>\n",
1033 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1034 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1036 } else {
1038 emit( "Thread #%d's call to %pS failed\n",
1039 (Int)xe->XE.PthAPIerror.thr->errmsg_index,
1040 xe->XE.PthAPIerror.fnname );
1041 emit( " with error code %ld (%s)\n",
1042 xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
1043 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1047 break;
1050 case XE_UnlockBogus: {
1051 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockBogus.thr ) );
1053 if (xml) {
1055 emit( " <xwhat>\n" );
1056 emit( " <text>Thread #%d unlocked an invalid "
1057 "lock at %p</text>\n",
1058 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1059 (void*)xe->XE.UnlockBogus.lock_ga );
1060 emit( " <hthreadid>%d</hthreadid>\n",
1061 (Int)xe->XE.UnlockBogus.thr->errmsg_index );
1062 emit( " </xwhat>\n" );
1063 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1065 } else {
1067 emit( "Thread #%d unlocked an invalid lock at %p\n",
1068 (Int)xe->XE.UnlockBogus.thr->errmsg_index,
1069 (void*)xe->XE.UnlockBogus.lock_ga );
1070 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1074 break;
1077 case XE_UnlockForeign: {
1078 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockForeign.lock ) );
1079 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.owner ) );
1080 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockForeign.thr ) );
1082 if (xml) {
1084 emit( " <xwhat>\n" );
1085 emit( " <text>Thread #%d unlocked lock at %p "
1086 "currently held by thread #%d</text>\n",
1087 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1088 (void*)xe->XE.UnlockForeign.lock->guestaddr,
1089 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1090 emit( " <hthreadid>%d</hthreadid>\n",
1091 (Int)xe->XE.UnlockForeign.thr->errmsg_index );
1092 emit( " <hthreadid>%d</hthreadid>\n",
1093 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1094 emit( " </xwhat>\n" );
1095 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1096 announce_LockP ( xe->XE.UnlockForeign.lock );
1098 } else {
1100 emit( "Thread #%d unlocked lock at %p "
1101 "currently held by thread #%d\n",
1102 (Int)xe->XE.UnlockForeign.thr->errmsg_index,
1103 (void*)xe->XE.UnlockForeign.lock->guestaddr,
1104 (Int)xe->XE.UnlockForeign.owner->errmsg_index );
1105 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1106 announce_LockP ( xe->XE.UnlockForeign.lock );
1110 break;
1113 case XE_UnlockUnlocked: {
1114 tl_assert( HG_(is_sane_LockP)( xe->XE.UnlockUnlocked.lock ) );
1115 tl_assert( HG_(is_sane_Thread)( xe->XE.UnlockUnlocked.thr ) );
1117 if (xml) {
1119 emit( " <xwhat>\n" );
1120 emit( " <text>Thread #%d unlocked a "
1121 "not-locked lock at %p</text>\n",
1122 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1123 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1124 emit( " <hthreadid>%d</hthreadid>\n",
1125 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
1126 emit( " </xwhat>\n" );
1127 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1128 announce_LockP ( xe->XE.UnlockUnlocked.lock);
1130 } else {
1132 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1133 (Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
1134 (void*)xe->XE.UnlockUnlocked.lock->guestaddr );
1135 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1136 announce_LockP ( xe->XE.UnlockUnlocked.lock);
1140 break;
1143 case XE_Race: {
1144 Addr err_ga;
1145 const HChar* what;
1146 Int szB;
1147 what = xe->XE.Race.isWrite ? "write" : "read";
1148 szB = xe->XE.Race.szB;
1149 err_ga = VG_(get_error_address)(err);
1151 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.thr ));
1152 if (xe->XE.Race.h2_ct)
1153 tl_assert( HG_(is_sane_Thread)( xe->XE.Race.h2_ct ));
1155 if (xml) {
1157 /* ------ XML ------ */
1158 emit( " <xwhat>\n" );
1159 emit( " <text>Possible data race during %s of size %d "
1160 "at %p by thread #%d</text>\n",
1161 what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1162 emit( " <hthreadid>%d</hthreadid>\n",
1163 (Int)xe->XE.Race.thr->errmsg_index );
1164 emit( " </xwhat>\n" );
1165 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1167 if (xe->XE.Race.h2_ct) {
1168 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1169 emit( " <xauxwhat>\n");
1170 emit( " <text>This conflicts with a previous %s of size %d "
1171 "by thread #%d</text>\n",
1172 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1173 xe->XE.Race.h2_ct_accSzB,
1174 xe->XE.Race.h2_ct->errmsg_index );
1175 emit( " <hthreadid>%d</hthreadid>\n",
1176 xe->XE.Race.h2_ct->errmsg_index);
1177 emit(" </xauxwhat>\n");
1178 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1181 if (xe->XE.Race.h1_ct) {
1182 emit( " <xauxwhat>\n");
1183 emit( " <text>This conflicts with a previous access "
1184 "by thread #%d, after</text>\n",
1185 xe->XE.Race.h1_ct->errmsg_index );
1186 emit( " <hthreadid>%d</hthreadid>\n",
1187 xe->XE.Race.h1_ct->errmsg_index );
1188 emit(" </xauxwhat>\n");
1189 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1190 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1191 } else {
1192 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1194 emit( " <auxwhat>but before</auxwhat>\n" );
1195 if (xe->XE.Race.h1_ct_mbsegendEC) {
1196 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1197 } else {
1198 emit( " <auxwhat>(the end of the the thread)</auxwhat>\n" );
1202 } else {
1204 /* ------ Text ------ */
1205 announce_combined_LockP_vecs( xe->XE.Race.locksHeldW,
1206 xe->XE.Race.h2_ct_locksHeldW );
1208 emit( "Possible data race during %s of size %d "
1209 "at %p by thread #%d\n",
1210 what, szB, (void*)err_ga, (Int)xe->XE.Race.thr->errmsg_index );
1212 tl_assert(xe->XE.Race.locksHeldW);
1213 show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
1214 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
1216 if (xe->XE.Race.h2_ct) {
1217 tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
1218 tl_assert(xe->XE.Race.h2_ct_locksHeldW);
1219 emit( "\n" );
1220 emit( "This conflicts with a previous %s of size %d "
1221 "by thread #%d\n",
1222 xe->XE.Race.h2_ct_accIsW ? "write" : "read",
1223 xe->XE.Race.h2_ct_accSzB,
1224 xe->XE.Race.h2_ct->errmsg_index );
1225 show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
1226 VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
1229 if (xe->XE.Race.h1_ct) {
1230 emit( " This conflicts with a previous access by thread #%d, "
1231 "after\n",
1232 xe->XE.Race.h1_ct->errmsg_index );
1233 if (xe->XE.Race.h1_ct_mbsegstartEC) {
1234 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
1235 } else {
1236 emit( " (the start of the thread)\n" );
1238 emit( " but before\n" );
1239 if (xe->XE.Race.h1_ct_mbsegendEC) {
1240 VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
1241 } else {
1242 emit( " (the end of the the thread)\n" );
1247 VG_(pp_addrinfo) (err_ga, &xe->XE.Race.data_addrinfo);
1248 break; /* case XE_Race */
1249 } /* case XE_Race */
1251 default:
1252 tl_assert(0);
1253 } /* switch (VG_(get_error_kind)(err)) */
1256 const HChar* HG_(get_error_name) ( const Error* err )
1258 switch (VG_(get_error_kind)(err)) {
1259 case XE_Race: return "Race";
1260 case XE_UnlockUnlocked: return "UnlockUnlocked";
1261 case XE_UnlockForeign: return "UnlockForeign";
1262 case XE_UnlockBogus: return "UnlockBogus";
1263 case XE_PthAPIerror: return "PthAPIerror";
1264 case XE_LockOrder: return "LockOrder";
1265 case XE_Misc: return "Misc";
1266 default: tl_assert(0); /* fill in missing case */
1270 Bool HG_(recognised_suppression) ( const HChar* name, Supp *su )
1272 # define TRY(_name,_xskind) \
1273 if (0 == VG_(strcmp)(name, (_name))) { \
1274 VG_(set_supp_kind)(su, (_xskind)); \
1275 return True; \
1277 TRY("Race", XS_Race);
1278 TRY("FreeMemLock", XS_FreeMemLock);
1279 TRY("UnlockUnlocked", XS_UnlockUnlocked);
1280 TRY("UnlockForeign", XS_UnlockForeign);
1281 TRY("UnlockBogus", XS_UnlockBogus);
1282 TRY("PthAPIerror", XS_PthAPIerror);
1283 TRY("LockOrder", XS_LockOrder);
1284 TRY("Misc", XS_Misc);
1285 return False;
1286 # undef TRY
1289 Bool HG_(read_extra_suppression_info) ( Int fd, HChar** bufpp, SizeT* nBufp,
1290 Int* lineno, Supp* su )
1292 /* do nothing -- no extra suppression info present. Return True to
1293 indicate nothing bad happened. */
1294 return True;
1297 Bool HG_(error_matches_suppression) ( const Error* err, const Supp* su )
1299 switch (VG_(get_supp_kind)(su)) {
1300 case XS_Race: return VG_(get_error_kind)(err) == XE_Race;
1301 case XS_UnlockUnlocked: return VG_(get_error_kind)(err) == XE_UnlockUnlocked;
1302 case XS_UnlockForeign: return VG_(get_error_kind)(err) == XE_UnlockForeign;
1303 case XS_UnlockBogus: return VG_(get_error_kind)(err) == XE_UnlockBogus;
1304 case XS_PthAPIerror: return VG_(get_error_kind)(err) == XE_PthAPIerror;
1305 case XS_LockOrder: return VG_(get_error_kind)(err) == XE_LockOrder;
1306 case XS_Misc: return VG_(get_error_kind)(err) == XE_Misc;
1307 //case XS_: return VG_(get_error_kind)(err) == XE_;
1308 default: tl_assert(0); /* fill in missing cases */
1312 SizeT HG_(get_extra_suppression_info) ( const Error* err,
1313 /*OUT*/HChar* buf, Int nBuf )
1315 tl_assert(nBuf >= 1);
1316 /* Do nothing */
1317 buf[0] = '\0';
1318 return 0;
1321 SizeT HG_(print_extra_suppression_use) ( const Supp* su,
1322 /*OUT*/HChar* buf, Int nBuf )
1324 tl_assert(nBuf >= 1);
1325 /* Do nothing */
1326 buf[0] = '\0';
1327 return 0;
1330 void HG_(update_extra_suppression_use) ( const Error* err, const Supp* su )
1332 /* Do nothing */
1333 return;
1337 /*--------------------------------------------------------------------*/
1338 /*--- end hg_errors.c ---*/
1339 /*--------------------------------------------------------------------*/