2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2013 OpenWorks Ltd
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_libcbase.h"
34 #include "pub_tool_libcassert.h"
35 #include "pub_tool_libcprint.h"
36 #include "pub_tool_execontext.h"
37 #include "pub_tool_errormgr.h"
38 #include "pub_tool_wordfm.h"
39 #include "pub_tool_xarray.h"
40 #include "pub_tool_debuginfo.h"
41 #include "pub_tool_threadstate.h"
42 #include "pub_tool_options.h" // VG_(clo_xml)
43 #include "pub_tool_aspacemgr.h"
44 #include "pub_tool_addrinfo.h"
46 #include "hg_basics.h"
47 #include "hg_addrdescr.h"
48 #include "hg_wordset.h"
49 #include "hg_lock_n_thread.h"
51 #include "hg_errors.h" /* self */
54 /*----------------------------------------------------------------*/
55 /*--- Error management -- storage ---*/
56 /*----------------------------------------------------------------*/
58 /* maps (by value) strings to a copy of them in ARENA_TOOL */
60 static WordFM
* string_table
= NULL
;
62 ULong
HG_(stats__string_table_queries
) = 0;
64 ULong
HG_(stats__string_table_get_map_size
) ( void ) {
65 return string_table
? (ULong
)VG_(sizeFM
)(string_table
) : 0;
68 static Word
string_table_cmp ( UWord s1
, UWord s2
) {
69 return (Word
)VG_(strcmp
)( (HChar
*)s1
, (HChar
*)s2
);
72 static HChar
* string_table_strdup ( const HChar
* str
) {
74 HG_(stats__string_table_queries
)++;
78 string_table
= VG_(newFM
)( HG_(zalloc
), "hg.sts.1",
79 HG_(free
), string_table_cmp
);
81 if (VG_(lookupFM
)( string_table
,
82 NULL
, (UWord
*)©
, (UWord
)str
)) {
84 if (0) VG_(printf
)("string_table_strdup: %p -> %p\n", str
, copy
);
87 copy
= HG_(strdup
)("hg.sts.2", str
);
88 VG_(addToFM
)( string_table
, (UWord
)copy
, (UWord
)copy
);
93 /* maps from Lock .unique fields to LockP*s */
95 static WordFM
* map_LockN_to_P
= NULL
;
97 ULong
HG_(stats__LockN_to_P_queries
) = 0;
99 ULong
HG_(stats__LockN_to_P_get_map_size
) ( void ) {
100 return map_LockN_to_P
? (ULong
)VG_(sizeFM
)(map_LockN_to_P
) : 0;
103 static Word
lock_unique_cmp ( UWord lk1W
, UWord lk2W
)
105 Lock
* lk1
= (Lock
*)lk1W
;
106 Lock
* lk2
= (Lock
*)lk2W
;
107 tl_assert( HG_(is_sane_LockNorP
)(lk1
) );
108 tl_assert( HG_(is_sane_LockNorP
)(lk2
) );
109 if (lk1
->unique
< lk2
->unique
) return -1;
110 if (lk1
->unique
> lk2
->unique
) return 1;
114 /* Given a normal Lock (LockN), convert it to a persistent Lock
115 (LockP). In some cases the LockN could be invalid (if it's been
116 freed), so we enquire, in hg_main.c's admin_locks list, whether it
117 is in fact valid. If allowed_to_be_invalid is True, then it's OK
118 for the LockN to be invalid, in which case Lock_INVALID is
119 returned. In all other cases, we insist that the LockN is a valid
120 lock, and return its corresponding LockP.
122 Why can LockNs sometimes be invalid? Because they are harvested
123 from locksets that are attached to the OldRef info for conflicting
124 threads. By the time we detect a race, the some of the elements of
125 the lockset may have been destroyed by the client, in which case
126 the corresponding Lock structures we maintain will have been freed.
128 So we check that each LockN is a member of the admin_locks double
129 linked list of all Lock structures. That stops us prodding around
130 in potentially freed-up Lock structures. However, it's not quite a
131 proper check: if a new Lock has been reallocated at the same
132 address as one which was previously freed, we'll wind up copying
133 the new one as the basis for the LockP, which is completely bogus
134 because it is unrelated to the previous Lock that lived there.
135 Let's hope that doesn't happen too often.
137 static Lock
* mk_LockP_from_LockN ( Lock
* lkn
,
138 Bool allowed_to_be_invalid
)
141 HG_(stats__LockN_to_P_queries
)++;
143 /* First off, let's do some sanity checks. If
144 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
145 in admin_locks; else we must assert. If it is True, it's OK for
146 it not to be findable, but in that case we must return
147 Lock_INVALID right away. */
148 Lock
* lock_list
= HG_(get_admin_locks
)();
150 if (lock_list
== lkn
)
152 lock_list
= lock_list
->admin_next
;
154 if (lock_list
== NULL
) {
155 /* We didn't find it. That possibility has to be OK'd by the
157 tl_assert(allowed_to_be_invalid
);
161 /* So we must be looking at a valid LockN. */
162 tl_assert( HG_(is_sane_LockN
)(lkn
) );
164 if (!map_LockN_to_P
) {
165 map_LockN_to_P
= VG_(newFM
)( HG_(zalloc
), "hg.mLPfLN.1",
166 HG_(free
), lock_unique_cmp
);
168 if (!VG_(lookupFM
)( map_LockN_to_P
, NULL
, (UWord
*)&lkp
, (UWord
)lkn
)) {
169 lkp
= HG_(zalloc
)( "hg.mLPfLN.2", sizeof(Lock
) );
171 lkp
->admin_next
= NULL
;
172 lkp
->admin_prev
= NULL
;
173 lkp
->magic
= LockP_MAGIC
;
174 /* Forget about the bag of lock holders - don't copy that.
175 Also, acquired_at should be NULL whenever heldBy is, and vice
176 versa. Also forget about the associated libhb synch object. */
179 lkp
->acquired_at
= NULL
;
181 VG_(addToFM
)( map_LockN_to_P
, (UWord
)lkp
, (UWord
)lkp
);
183 tl_assert( HG_(is_sane_LockP
)(lkp
) );
187 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
188 LockP*'s. Any LockN's that can't be converted into a LockP
189 (because they have been freed, see comment on mk_LockP_from_LockN)
190 are converted instead into the value Lock_INVALID. Hence the
191 returned vector is a sequence: zero or more (valid LockP* or
192 LockN_INVALID), terminated by a NULL. */
194 Lock
** enumerate_WordSet_into_LockP_vector( WordSetU
* univ_lsets
,
196 Bool allowed_to_be_invalid
)
198 tl_assert(univ_lsets
);
199 tl_assert( HG_(plausibleWS
)(univ_lsets
, lockset
) );
200 UWord nLocks
= HG_(cardinalityWS
)(univ_lsets
, lockset
);
201 Lock
** lockPs
= HG_(zalloc
)( "hg.eWSiLPa",
202 (nLocks
+1) * sizeof(Lock
*) );
203 tl_assert(lockPs
[nLocks
] == NULL
); /* pre-NULL terminated */
204 UWord
* lockNs
= NULL
;
207 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
208 lockset is empty; hence the guarding "if". Sigh. */
209 HG_(getPayloadWS
)( &lockNs
, &nLockNs
, univ_lsets
, lockset
);
213 /* Convert to LockPs. */
214 for (i
= 0; i
< nLockNs
; i
++) {
215 lockPs
[i
] = mk_LockP_from_LockN( (Lock
*)lockNs
[i
],
216 allowed_to_be_invalid
);
221 /* Get the number of useful elements in a vector created by
222 enumerate_WordSet_into_LockP_vector. Returns both the total number
223 of elements (not including the terminating NULL) and the number of
224 non-Lock_INVALID elements. */
225 static void count_LockP_vector ( /*OUT*/UWord
* nLocks
,
226 /*OUT*/UWord
* nLocksValid
,
230 *nLocks
= *nLocksValid
= 0;
234 if (vec
[n
] != Lock_INVALID
)
240 /* Find out whether 'lk' is in 'vec'. */
241 static Bool
elem_LockP_vector ( Lock
** vec
, Lock
* lk
)
257 race: program counter
263 FIXME: how does state printing interact with lockset gc?
264 Are the locksets in prev/curr state always valid?
265 Ditto question for the threadsets
266 ThreadSets - probably are always valid if Threads
267 are never thrown away.
268 LockSets - could at least print the lockset elements that
269 correspond to actual locks at the time of printing. Hmm.
275 XE_Race
=1101, // race
276 XE_UnlockUnlocked
, // unlocking a not-locked lock
277 XE_UnlockForeign
, // unlocking a lock held by some other thread
278 XE_UnlockBogus
, // unlocking an address not known to be a lock
279 XE_PthAPIerror
, // error from the POSIX pthreads API
280 XE_LockOrder
, // lock order error
281 XE_Misc
// misc other error (w/ string to describe it)
285 /* Extra contexts for kinds */
293 AddrInfo data_addrinfo
;
297 /* h1_* and h2_* provide some description of a previously
298 observed access with which we are conflicting. */
299 Thread
* h1_ct
; /* non-NULL means h1 info present */
300 ExeContext
* h1_ct_mbsegstartEC
;
301 ExeContext
* h1_ct_mbsegendEC
;
302 Thread
* h2_ct
; /* non-NULL means h2 info present */
303 ExeContext
* h2_ct_accEC
;
306 Lock
** h2_ct_locksHeldW
;
309 Thread
* thr
; /* doing the unlocking */
310 Lock
* lock
; /* lock (that is already unlocked) */
313 Thread
* thr
; /* doing the unlocking */
314 Thread
* owner
; /* thread that actually holds the lock */
315 Lock
* lock
; /* lock (that is held by 'owner') */
318 Thread
* thr
; /* doing the unlocking */
319 Addr lock_ga
; /* purported address of the lock */
323 HChar
* fnname
; /* persistent, in tool-arena */
324 Word err
; /* pth error code */
325 HChar
* errstr
; /* persistent, in tool-arena */
329 /* The first 4 fields describe the previously observed
330 (should-be) ordering. */
331 Lock
* shouldbe_earlier_lk
;
332 Lock
* shouldbe_later_lk
;
333 ExeContext
* shouldbe_earlier_ec
;
334 ExeContext
* shouldbe_later_ec
;
335 /* In principle we need to record two more stacks, from
336 this thread, when acquiring the locks in the "wrong"
337 order. In fact the wallclock-later acquisition by this
338 thread is recorded in the main stack for this error.
339 So we only need a stack for the earlier acquisition by
341 ExeContext
* actual_earlier_ec
;
345 HChar
* errstr
; /* persistent, in tool-arena */
346 HChar
* auxstr
; /* optional, persistent, in tool-arena */
347 ExeContext
* auxctx
; /* optional */
353 static void init_XError ( XError
* xe
) {
354 VG_(memset
)(xe
, 0, sizeof(*xe
) );
355 xe
->tag
= XE_Race
-1; /* bogus */
359 /* Extensions of suppressions */
362 XS_Race
=1201, /* race */
374 /* Updates the copy with address info if necessary. */
375 UInt
HG_(update_extra
) ( const Error
* err
)
377 XError
* xe
= (XError
*)VG_(get_error_extra
)(err
);
379 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
380 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
383 if (xe
->tag
== XE_Race
) {
385 /* Note the set of locks that the thread is (w-)holding.
386 Convert the WordSetID of LockN*'s into a NULL-terminated
387 vector of LockP*'s. We don't expect to encounter any invalid
388 LockNs in this conversion. */
389 tl_assert(xe
->XE
.Race
.thr
);
390 xe
->XE
.Race
.locksHeldW
391 = enumerate_WordSet_into_LockP_vector(
392 HG_(get_univ_lsets
)(),
393 xe
->XE
.Race
.thr
->locksetW
,
394 False
/*!allowed_to_be_invalid*/
397 /* See if we can come up with a source level description of the
398 raced-upon address. This is potentially expensive, which is
399 why it's only done at the update_extra point, not when the
400 error is initially created. */
404 VG_(printf
)("HG_(update_extra): "
405 "%d conflicting-event queries\n", xxx
);
407 HG_(describe_addr
) (xe
->XE
.Race
.data_addr
, &xe
->XE
.Race
.data_addrinfo
);
409 /* And poke around in the conflicting-event map, to see if we
410 can rustle up a plausible-looking conflicting memory access
412 if (HG_(clo_history_level
) >= 2) {
414 ExeContext
* wherep
= NULL
;
415 Addr acc_addr
= xe
->XE
.Race
.data_addr
;
416 Int acc_szB
= xe
->XE
.Race
.szB
;
417 Thr
* acc_thr
= xe
->XE
.Race
.thr
->hbthr
;
418 Bool acc_isW
= xe
->XE
.Race
.isWrite
;
420 Bool conf_isW
= False
;
421 WordSetID conf_locksHeldW
= 0;
422 tl_assert(!xe
->XE
.Race
.h2_ct_accEC
);
423 tl_assert(!xe
->XE
.Race
.h2_ct
);
424 if (libhb_event_map_lookup(
425 &wherep
, &thrp
, &conf_szB
, &conf_isW
, &conf_locksHeldW
,
426 acc_thr
, acc_addr
, acc_szB
, acc_isW
)) {
430 threadp
= libhb_get_Thr_hgthread( thrp
);
432 xe
->XE
.Race
.h2_ct_accEC
= wherep
;
433 xe
->XE
.Race
.h2_ct
= threadp
;
434 xe
->XE
.Race
.h2_ct_accSzB
= (Int
)conf_szB
;
435 xe
->XE
.Race
.h2_ct_accIsW
= conf_isW
;
436 xe
->XE
.Race
.h2_ct_locksHeldW
437 = enumerate_WordSet_into_LockP_vector(
438 HG_(get_univ_lsets
)(),
440 True
/*allowed_to_be_invalid*/
445 // both NULL or both non-NULL
446 tl_assert( (!!xe
->XE
.Race
.h2_ct
) == (!!xe
->XE
.Race
.h2_ct_accEC
) );
449 return sizeof(XError
);
452 void HG_(record_error_Race
) ( Thread
* thr
,
453 Addr data_addr
, Int szB
, Bool isWrite
,
455 ExeContext
* h1_ct_segstart
,
456 ExeContext
* h1_ct_mbsegendEC
)
459 tl_assert( HG_(is_sane_Thread
)(thr
) );
461 # if defined(VGO_linux)
462 /* Skip any races on locations apparently in GOTPLT sections. This
463 is said to be caused by ld.so poking PLT table entries (or
464 whatever) when it writes the resolved address of a dynamically
465 linked routine, into the table (or whatever) when it is called
466 for the first time. */
468 VgSectKind sect
= VG_(DebugInfo_sect_kind
)( NULL
, data_addr
);
469 if (0) VG_(printf
)("XXXXXXXXX RACE on %#lx %s\n",
470 data_addr
, VG_(pp_SectKind
)(sect
));
471 /* SectPLT is required on ???-linux */
472 if (sect
== Vg_SectGOTPLT
) return;
473 /* SectPLT is required on ppc32/64-linux */
474 if (sect
== Vg_SectPLT
) return;
475 /* SectGOT is required on arm-linux */
476 if (sect
== Vg_SectGOT
) return;
482 xe
.XE
.Race
.data_addr
= data_addr
;
483 xe
.XE
.Race
.szB
= szB
;
484 xe
.XE
.Race
.isWrite
= isWrite
;
485 xe
.XE
.Race
.thr
= thr
;
486 tl_assert(isWrite
== False
|| isWrite
== True
);
487 tl_assert(szB
== 8 || szB
== 4 || szB
== 2 || szB
== 1);
488 /* Skip on the detailed description of the raced-on address at this
489 point; it's expensive. Leave it for the update_extra function
490 if we ever make it that far. */
491 xe
.XE
.Race
.data_addrinfo
.tag
= Addr_Undescribed
;
493 // Skip on any of the conflicting-access info at this point.
494 // It's expensive to obtain, and this error is more likely than
495 // not to be discarded. We'll fill these fields in in
496 // HG_(update_extra) just above, assuming the error ever makes
497 // it that far (unlikely).
498 xe
.XE
.Race
.h2_ct_accSzB
= 0;
499 xe
.XE
.Race
.h2_ct_accIsW
= False
;
500 xe
.XE
.Race
.h2_ct_accEC
= NULL
;
501 xe
.XE
.Race
.h2_ct
= NULL
;
502 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
503 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
505 xe
.XE
.Race
.h1_ct
= h1_ct
;
506 xe
.XE
.Race
.h1_ct_mbsegstartEC
= h1_ct_segstart
;
507 xe
.XE
.Race
.h1_ct_mbsegendEC
= h1_ct_mbsegendEC
;
509 VG_(maybe_record_error
)( thr
->coretid
,
510 XE_Race
, data_addr
, NULL
, &xe
);
513 void HG_(record_error_UnlockUnlocked
) ( Thread
* thr
, Lock
* lk
)
516 tl_assert( HG_(is_sane_Thread
)(thr
) );
517 tl_assert( HG_(is_sane_LockN
)(lk
) );
519 xe
.tag
= XE_UnlockUnlocked
;
520 xe
.XE
.UnlockUnlocked
.thr
522 xe
.XE
.UnlockUnlocked
.lock
523 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
525 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
526 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
527 VG_(maybe_record_error
)( thr
->coretid
,
528 XE_UnlockUnlocked
, 0, NULL
, &xe
);
531 void HG_(record_error_UnlockForeign
) ( Thread
* thr
,
532 Thread
* owner
, Lock
* lk
)
535 tl_assert( HG_(is_sane_Thread
)(thr
) );
536 tl_assert( HG_(is_sane_Thread
)(owner
) );
537 tl_assert( HG_(is_sane_LockN
)(lk
) );
539 xe
.tag
= XE_UnlockForeign
;
540 xe
.XE
.UnlockForeign
.thr
= thr
;
541 xe
.XE
.UnlockForeign
.owner
= owner
;
542 xe
.XE
.UnlockForeign
.lock
543 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
545 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
546 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
547 VG_(maybe_record_error
)( thr
->coretid
,
548 XE_UnlockForeign
, 0, NULL
, &xe
);
551 void HG_(record_error_UnlockBogus
) ( Thread
* thr
, Addr lock_ga
)
554 tl_assert( HG_(is_sane_Thread
)(thr
) );
556 xe
.tag
= XE_UnlockBogus
;
557 xe
.XE
.UnlockBogus
.thr
= thr
;
558 xe
.XE
.UnlockBogus
.lock_ga
= lock_ga
;
560 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
561 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
562 VG_(maybe_record_error
)( thr
->coretid
,
563 XE_UnlockBogus
, 0, NULL
, &xe
);
566 void HG_(record_error_LockOrder
)(
568 Lock
* shouldbe_earlier_lk
,
569 Lock
* shouldbe_later_lk
,
570 ExeContext
* shouldbe_earlier_ec
,
571 ExeContext
* shouldbe_later_ec
,
572 ExeContext
* actual_earlier_ec
576 tl_assert( HG_(is_sane_Thread
)(thr
) );
577 tl_assert(HG_(clo_track_lockorders
));
579 xe
.tag
= XE_LockOrder
;
580 xe
.XE
.LockOrder
.thr
= thr
;
581 xe
.XE
.LockOrder
.shouldbe_earlier_lk
582 = mk_LockP_from_LockN(shouldbe_earlier_lk
,
583 False
/*!allowed_to_be_invalid*/);
584 xe
.XE
.LockOrder
.shouldbe_earlier_ec
= shouldbe_earlier_ec
;
585 xe
.XE
.LockOrder
.shouldbe_later_lk
586 = mk_LockP_from_LockN(shouldbe_later_lk
,
587 False
/*!allowed_to_be_invalid*/);
588 xe
.XE
.LockOrder
.shouldbe_later_ec
= shouldbe_later_ec
;
589 xe
.XE
.LockOrder
.actual_earlier_ec
= actual_earlier_ec
;
591 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
592 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
593 VG_(maybe_record_error
)( thr
->coretid
,
594 XE_LockOrder
, 0, NULL
, &xe
);
597 void HG_(record_error_PthAPIerror
) ( Thread
* thr
, const HChar
* fnname
,
598 Word err
, const HChar
* errstr
)
601 tl_assert( HG_(is_sane_Thread
)(thr
) );
605 xe
.tag
= XE_PthAPIerror
;
606 xe
.XE
.PthAPIerror
.thr
= thr
;
607 xe
.XE
.PthAPIerror
.fnname
= string_table_strdup(fnname
);
608 xe
.XE
.PthAPIerror
.err
= err
;
609 xe
.XE
.PthAPIerror
.errstr
= string_table_strdup(errstr
);
611 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
612 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
613 VG_(maybe_record_error
)( thr
->coretid
,
614 XE_PthAPIerror
, 0, NULL
, &xe
);
617 void HG_(record_error_Misc_w_aux
) ( Thread
* thr
, const HChar
* errstr
,
618 const HChar
* auxstr
, ExeContext
* auxctx
)
621 tl_assert( HG_(is_sane_Thread
)(thr
) );
625 xe
.XE
.Misc
.thr
= thr
;
626 xe
.XE
.Misc
.errstr
= string_table_strdup(errstr
);
627 xe
.XE
.Misc
.auxstr
= auxstr
? string_table_strdup(auxstr
) : NULL
;
628 xe
.XE
.Misc
.auxctx
= auxctx
;
630 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
631 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
632 VG_(maybe_record_error
)( thr
->coretid
,
633 XE_Misc
, 0, NULL
, &xe
);
636 void HG_(record_error_Misc
) ( Thread
* thr
, const HChar
* errstr
)
638 HG_(record_error_Misc_w_aux
)(thr
, errstr
, NULL
, NULL
);
641 Bool
HG_(eq_Error
) ( VgRes not_used
, const Error
* e1
, const Error
* e2
)
645 tl_assert(VG_(get_error_kind
)(e1
) == VG_(get_error_kind
)(e2
));
647 xe1
= (XError
*)VG_(get_error_extra
)(e1
);
648 xe2
= (XError
*)VG_(get_error_extra
)(e2
);
652 switch (VG_(get_error_kind
)(e1
)) {
654 return xe1
->XE
.Race
.szB
== xe2
->XE
.Race
.szB
655 && xe1
->XE
.Race
.isWrite
== xe2
->XE
.Race
.isWrite
656 && (HG_(clo_cmp_race_err_addrs
)
657 ? xe1
->XE
.Race
.data_addr
== xe2
->XE
.Race
.data_addr
659 case XE_UnlockUnlocked
:
660 return xe1
->XE
.UnlockUnlocked
.thr
== xe2
->XE
.UnlockUnlocked
.thr
661 && xe1
->XE
.UnlockUnlocked
.lock
== xe2
->XE
.UnlockUnlocked
.lock
;
662 case XE_UnlockForeign
:
663 return xe1
->XE
.UnlockForeign
.thr
== xe2
->XE
.UnlockForeign
.thr
664 && xe1
->XE
.UnlockForeign
.owner
== xe2
->XE
.UnlockForeign
.owner
665 && xe1
->XE
.UnlockForeign
.lock
== xe2
->XE
.UnlockForeign
.lock
;
667 return xe1
->XE
.UnlockBogus
.thr
== xe2
->XE
.UnlockBogus
.thr
668 && xe1
->XE
.UnlockBogus
.lock_ga
== xe2
->XE
.UnlockBogus
.lock_ga
;
670 return xe1
->XE
.PthAPIerror
.thr
== xe2
->XE
.PthAPIerror
.thr
671 && 0==VG_(strcmp
)(xe1
->XE
.PthAPIerror
.fnname
,
672 xe2
->XE
.PthAPIerror
.fnname
)
673 && xe1
->XE
.PthAPIerror
.err
== xe2
->XE
.PthAPIerror
.err
;
675 return xe1
->XE
.LockOrder
.thr
== xe2
->XE
.LockOrder
.thr
;
677 return xe1
->XE
.Misc
.thr
== xe2
->XE
.Misc
.thr
678 && 0==VG_(strcmp
)(xe1
->XE
.Misc
.errstr
, xe2
->XE
.Misc
.errstr
);
688 /*----------------------------------------------------------------*/
689 /*--- Error management -- printing ---*/
690 /*----------------------------------------------------------------*/
692 /* Do a printf-style operation on either the XML or normal output
693 channel, depending on the setting of VG_(clo_xml).
695 static void emit_WRK ( const HChar
* format
, va_list vargs
)
698 VG_(vprintf_xml
)(format
, vargs
);
700 VG_(vmessage
)(Vg_UserMsg
, format
, vargs
);
703 static void emit ( const HChar
* format
, ... ) PRINTF_CHECK(1, 2);
704 static void emit ( const HChar
* format
, ... )
707 va_start(vargs
, format
);
708 emit_WRK(format
, vargs
);
713 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
714 this once, as we only want to see these announcements once per
715 thread. Returned Bool indicates whether or not an announcement was
718 static Bool
announce_one_thread ( Thread
* thr
)
720 tl_assert(HG_(is_sane_Thread
)(thr
));
721 tl_assert(thr
->errmsg_index
>= 1);
727 VG_(printf_xml
)("<announcethread>\n");
728 VG_(printf_xml
)(" <hthreadid>%d</hthreadid>\n", thr
->errmsg_index
);
729 if (thr
->errmsg_index
== 1) {
730 tl_assert(thr
->created_at
== NULL
);
731 VG_(printf_xml
)(" <isrootthread></isrootthread>\n");
733 tl_assert(thr
->created_at
!= NULL
);
734 VG_(pp_ExeContext
)( thr
->created_at
);
736 VG_(printf_xml
)("</announcethread>\n\n");
740 VG_(umsg
)("---Thread-Announcement----------"
741 "--------------------------------" "\n");
744 if (thr
->errmsg_index
== 1) {
745 tl_assert(thr
->created_at
== NULL
);
746 VG_(message
)(Vg_UserMsg
,
747 "Thread #%d is the program's root thread\n",
750 tl_assert(thr
->created_at
!= NULL
);
751 VG_(message
)(Vg_UserMsg
, "Thread #%d was created\n",
753 VG_(pp_ExeContext
)( thr
->created_at
);
755 VG_(message
)(Vg_UserMsg
, "\n");
759 thr
->announced
= True
;
764 static void announce_LockP ( Lock
* lk
)
767 if (lk
== Lock_INVALID
)
768 return; /* Can't be announced -- we know nothing about it. */
769 tl_assert(lk
->magic
== LockP_MAGIC
);
772 if (lk
->appeared_at
) {
773 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
775 VG_(pp_ExeContext
)( lk
->appeared_at
);
779 if (lk
->appeared_at
) {
780 VG_(umsg
)( " Lock at %p was first observed\n",
781 (void*)lk
->guestaddr
);
782 VG_(pp_ExeContext
)( lk
->appeared_at
);
784 VG_(umsg
)( " Lock at %p : no stacktrace for first observation\n",
785 (void*)lk
->guestaddr
);
787 HG_(get_and_pp_addrdescr
) (lk
->guestaddr
);
792 /* Announce (that is, print point-of-first-observation) for the
793 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
794 static void announce_combined_LockP_vecs ( Lock
** lockvec
,
799 for (i
= 0; lockvec
[i
]; i
++) {
800 announce_LockP(lockvec
[i
]);
803 for (i
= 0; lockvec2
[i
]; i
++) {
804 Lock
* lk
= lockvec2
[i
];
805 if (!elem_LockP_vector(lockvec
, lk
))
812 static void show_LockP_summary_textmode ( Lock
** locks
, const HChar
* pre
)
816 UWord nLocks
= 0, nLocksValid
= 0;
817 count_LockP_vector(&nLocks
, &nLocksValid
, locks
);
818 tl_assert(nLocksValid
<= nLocks
);
821 VG_(umsg
)( "%sLocks held: none", pre
);
823 VG_(umsg
)( "%sLocks held: %lu, at address%s ",
824 pre
, nLocks
, nLocksValid
== 1 ? "" : "es" );
828 for (i
= 0; i
< nLocks
; i
++) {
829 if (locks
[i
] == Lock_INVALID
)
831 VG_(umsg
)( "%p", (void*)locks
[i
]->guestaddr
);
832 if (locks
[i
+1] != NULL
)
835 if (nLocksValid
< nLocks
)
836 VG_(umsg
)(" (and %lu that can't be shown)", nLocks
- nLocksValid
);
842 /* This is the "this error is due to be printed shortly; so have a
843 look at it any print any preamble you want" function. We use it to
844 announce any previously un-announced threads in the upcoming error
847 void HG_(before_pp_Error
) ( const Error
* err
)
851 xe
= (XError
*)VG_(get_error_extra
)(err
);
854 switch (VG_(get_error_kind
)(err
)) {
856 announce_one_thread( xe
->XE
.Misc
.thr
);
859 announce_one_thread( xe
->XE
.LockOrder
.thr
);
862 announce_one_thread( xe
->XE
.PthAPIerror
.thr
);
865 announce_one_thread( xe
->XE
.UnlockBogus
.thr
);
867 case XE_UnlockForeign
:
868 announce_one_thread( xe
->XE
.UnlockForeign
.thr
);
869 announce_one_thread( xe
->XE
.UnlockForeign
.owner
);
871 case XE_UnlockUnlocked
:
872 announce_one_thread( xe
->XE
.UnlockUnlocked
.thr
);
875 announce_one_thread( xe
->XE
.Race
.thr
);
876 if (xe
->XE
.Race
.h2_ct
)
877 announce_one_thread( xe
->XE
.Race
.h2_ct
);
878 if (xe
->XE
.Race
.h1_ct
)
879 announce_one_thread( xe
->XE
.Race
.h1_ct
);
880 if (xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
881 Thread
* thr
= get_admin_threads();
883 if (thr
->errmsg_index
884 == xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
885 announce_one_thread (thr
);
897 void HG_(pp_Error
) ( const Error
* err
)
899 const Bool xml
= VG_(clo_xml
); /* a shorthand, that's all */
902 VG_(umsg
)("--------------------------------"
903 "--------------------------------" "\n");
907 XError
*xe
= (XError
*)VG_(get_error_extra
)(err
);
911 emit( " <kind>%s</kind>\n", HG_(get_error_name
)(err
));
913 switch (VG_(get_error_kind
)(err
)) {
916 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Misc
.thr
) );
920 emit( " <xwhat>\n" );
921 emit( " <text>Thread #%d: %s</text>\n",
922 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
923 xe
->XE
.Misc
.errstr
);
924 emit( " <hthreadid>%d</hthreadid>\n",
925 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
);
926 emit( " </xwhat>\n" );
927 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
928 if (xe
->XE
.Misc
.auxstr
) {
929 emit(" <auxwhat>%s</auxwhat>\n", xe
->XE
.Misc
.auxstr
);
930 if (xe
->XE
.Misc
.auxctx
)
931 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
936 emit( "Thread #%d: %s\n",
937 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
938 xe
->XE
.Misc
.errstr
);
939 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
940 if (xe
->XE
.Misc
.auxstr
) {
941 emit(" %s\n", xe
->XE
.Misc
.auxstr
);
942 if (xe
->XE
.Misc
.auxctx
)
943 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
951 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.LockOrder
.thr
) );
955 emit( " <xwhat>\n" );
956 emit( " <text>Thread #%d: lock order \"%p before %p\" "
958 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
959 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
960 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
961 emit( " <hthreadid>%d</hthreadid>\n",
962 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
);
963 emit( " </xwhat>\n" );
964 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
965 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
966 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
967 emit( " <auxwhat>Required order was established by "
968 "acquisition of lock at %p</auxwhat>\n",
969 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
970 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
971 emit( " <auxwhat>followed by a later acquisition "
972 "of lock at %p</auxwhat>\n",
973 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
974 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
976 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
977 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
981 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
982 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
983 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
984 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
986 emit( "Observed (incorrect) order is: "
987 "acquisition of lock at %p\n",
988 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
989 if (xe
->XE
.LockOrder
.actual_earlier_ec
) {
990 VG_(pp_ExeContext
)(xe
->XE
.LockOrder
.actual_earlier_ec
);
992 emit(" (stack unavailable)\n");
995 emit(" followed by a later acquisition of lock at %p\n",
996 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
997 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
998 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
999 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
1001 emit( "Required order was established by "
1002 "acquisition of lock at %p\n",
1003 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1004 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
1006 emit( " followed by a later acquisition of lock at %p\n",
1007 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1008 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
1011 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
1012 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1019 case XE_PthAPIerror
: {
1020 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.PthAPIerror
.thr
) );
1024 emit( " <xwhat>\n" );
1026 " <text>Thread #%d's call to %pS failed</text>\n",
1027 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1028 xe
->XE
.PthAPIerror
.fnname
);
1029 emit( " <hthreadid>%d</hthreadid>\n",
1030 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
);
1031 emit( " </xwhat>\n" );
1032 emit( " <what>with error code %ld (%s)</what>\n",
1033 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1034 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1038 emit( "Thread #%d's call to %pS failed\n",
1039 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1040 xe
->XE
.PthAPIerror
.fnname
);
1041 emit( " with error code %ld (%s)\n",
1042 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1043 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1050 case XE_UnlockBogus
: {
1051 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockBogus
.thr
) );
1055 emit( " <xwhat>\n" );
1056 emit( " <text>Thread #%d unlocked an invalid "
1057 "lock at %p</text>\n",
1058 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1059 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1060 emit( " <hthreadid>%d</hthreadid>\n",
1061 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
);
1062 emit( " </xwhat>\n" );
1063 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1067 emit( "Thread #%d unlocked an invalid lock at %p\n",
1068 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1069 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1070 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1077 case XE_UnlockForeign
: {
1078 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockForeign
.lock
) );
1079 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.owner
) );
1080 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.thr
) );
1084 emit( " <xwhat>\n" );
1085 emit( " <text>Thread #%d unlocked lock at %p "
1086 "currently held by thread #%d</text>\n",
1087 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1088 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1089 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1090 emit( " <hthreadid>%d</hthreadid>\n",
1091 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
);
1092 emit( " <hthreadid>%d</hthreadid>\n",
1093 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1094 emit( " </xwhat>\n" );
1095 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1096 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1100 emit( "Thread #%d unlocked lock at %p "
1101 "currently held by thread #%d\n",
1102 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1103 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1104 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1105 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1106 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1113 case XE_UnlockUnlocked
: {
1114 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockUnlocked
.lock
) );
1115 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockUnlocked
.thr
) );
1119 emit( " <xwhat>\n" );
1120 emit( " <text>Thread #%d unlocked a "
1121 "not-locked lock at %p</text>\n",
1122 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1123 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1124 emit( " <hthreadid>%d</hthreadid>\n",
1125 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
);
1126 emit( " </xwhat>\n" );
1127 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1128 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1132 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1133 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1134 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1135 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1136 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1147 what
= xe
->XE
.Race
.isWrite
? "write" : "read";
1148 szB
= xe
->XE
.Race
.szB
;
1149 err_ga
= VG_(get_error_address
)(err
);
1151 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.thr
));
1152 if (xe
->XE
.Race
.h2_ct
)
1153 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.h2_ct
));
1157 /* ------ XML ------ */
1158 emit( " <xwhat>\n" );
1159 emit( " <text>Possible data race during %s of size %d "
1160 "at %p by thread #%d</text>\n",
1161 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1162 emit( " <hthreadid>%d</hthreadid>\n",
1163 (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1164 emit( " </xwhat>\n" );
1165 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1167 if (xe
->XE
.Race
.h2_ct
) {
1168 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1169 emit( " <xauxwhat>\n");
1170 emit( " <text>This conflicts with a previous %s of size %d "
1171 "by thread #%d</text>\n",
1172 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1173 xe
->XE
.Race
.h2_ct_accSzB
,
1174 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1175 emit( " <hthreadid>%d</hthreadid>\n",
1176 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1177 emit(" </xauxwhat>\n");
1178 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1181 if (xe
->XE
.Race
.h1_ct
) {
1182 emit( " <xauxwhat>\n");
1183 emit( " <text>This conflicts with a previous access "
1184 "by thread #%d, after</text>\n",
1185 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1186 emit( " <hthreadid>%d</hthreadid>\n",
1187 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1188 emit(" </xauxwhat>\n");
1189 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1190 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1192 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1194 emit( " <auxwhat>but before</auxwhat>\n" );
1195 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1196 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1198 emit( " <auxwhat>(the end of the the thread)</auxwhat>\n" );
1204 /* ------ Text ------ */
1205 announce_combined_LockP_vecs( xe
->XE
.Race
.locksHeldW
,
1206 xe
->XE
.Race
.h2_ct_locksHeldW
);
1208 emit( "Possible data race during %s of size %d "
1209 "at %p by thread #%d\n",
1210 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1212 tl_assert(xe
->XE
.Race
.locksHeldW
);
1213 show_LockP_summary_textmode( xe
->XE
.Race
.locksHeldW
, "" );
1214 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1216 if (xe
->XE
.Race
.h2_ct
) {
1217 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1218 tl_assert(xe
->XE
.Race
.h2_ct_locksHeldW
);
1220 emit( "This conflicts with a previous %s of size %d "
1222 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1223 xe
->XE
.Race
.h2_ct_accSzB
,
1224 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1225 show_LockP_summary_textmode( xe
->XE
.Race
.h2_ct_locksHeldW
, "" );
1226 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1229 if (xe
->XE
.Race
.h1_ct
) {
1230 emit( " This conflicts with a previous access by thread #%d, "
1232 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1233 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1234 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1236 emit( " (the start of the thread)\n" );
1238 emit( " but before\n" );
1239 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1240 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1242 emit( " (the end of the the thread)\n" );
1247 VG_(pp_addrinfo
) (err_ga
, &xe
->XE
.Race
.data_addrinfo
);
1248 break; /* case XE_Race */
1249 } /* case XE_Race */
1253 } /* switch (VG_(get_error_kind)(err)) */
1256 const HChar
* HG_(get_error_name
) ( const Error
* err
)
1258 switch (VG_(get_error_kind
)(err
)) {
1259 case XE_Race
: return "Race";
1260 case XE_UnlockUnlocked
: return "UnlockUnlocked";
1261 case XE_UnlockForeign
: return "UnlockForeign";
1262 case XE_UnlockBogus
: return "UnlockBogus";
1263 case XE_PthAPIerror
: return "PthAPIerror";
1264 case XE_LockOrder
: return "LockOrder";
1265 case XE_Misc
: return "Misc";
1266 default: tl_assert(0); /* fill in missing case */
1270 Bool
HG_(recognised_suppression
) ( const HChar
* name
, Supp
*su
)
1272 # define TRY(_name,_xskind) \
1273 if (0 == VG_(strcmp)(name, (_name))) { \
1274 VG_(set_supp_kind)(su, (_xskind)); \
1277 TRY("Race", XS_Race
);
1278 TRY("FreeMemLock", XS_FreeMemLock
);
1279 TRY("UnlockUnlocked", XS_UnlockUnlocked
);
1280 TRY("UnlockForeign", XS_UnlockForeign
);
1281 TRY("UnlockBogus", XS_UnlockBogus
);
1282 TRY("PthAPIerror", XS_PthAPIerror
);
1283 TRY("LockOrder", XS_LockOrder
);
1284 TRY("Misc", XS_Misc
);
1289 Bool
HG_(read_extra_suppression_info
) ( Int fd
, HChar
** bufpp
, SizeT
* nBufp
,
1290 Int
* lineno
, Supp
* su
)
1292 /* do nothing -- no extra suppression info present. Return True to
1293 indicate nothing bad happened. */
1297 Bool
HG_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
)
1299 switch (VG_(get_supp_kind
)(su
)) {
1300 case XS_Race
: return VG_(get_error_kind
)(err
) == XE_Race
;
1301 case XS_UnlockUnlocked
: return VG_(get_error_kind
)(err
) == XE_UnlockUnlocked
;
1302 case XS_UnlockForeign
: return VG_(get_error_kind
)(err
) == XE_UnlockForeign
;
1303 case XS_UnlockBogus
: return VG_(get_error_kind
)(err
) == XE_UnlockBogus
;
1304 case XS_PthAPIerror
: return VG_(get_error_kind
)(err
) == XE_PthAPIerror
;
1305 case XS_LockOrder
: return VG_(get_error_kind
)(err
) == XE_LockOrder
;
1306 case XS_Misc
: return VG_(get_error_kind
)(err
) == XE_Misc
;
1307 //case XS_: return VG_(get_error_kind)(err) == XE_;
1308 default: tl_assert(0); /* fill in missing cases */
1312 SizeT
HG_(get_extra_suppression_info
) ( const Error
* err
,
1313 /*OUT*/HChar
* buf
, Int nBuf
)
1315 tl_assert(nBuf
>= 1);
1321 SizeT
HG_(print_extra_suppression_use
) ( const Supp
* su
,
1322 /*OUT*/HChar
* buf
, Int nBuf
)
1324 tl_assert(nBuf
>= 1);
1330 void HG_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
)
1337 /*--------------------------------------------------------------------*/
1338 /*--- end hg_errors.c ---*/
1339 /*--------------------------------------------------------------------*/