2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2017 OpenWorks Ltd
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 The GNU General Public License is contained in the file COPYING.
32 #include "pub_tool_basics.h"
33 #include "pub_tool_libcbase.h"
34 #include "pub_tool_libcassert.h"
35 #include "pub_tool_libcprint.h"
36 #include "pub_tool_stacktrace.h"
37 #include "pub_tool_execontext.h"
38 #include "pub_tool_errormgr.h"
39 #include "pub_tool_wordfm.h"
40 #include "pub_tool_xarray.h"
41 #include "pub_tool_debuginfo.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_options.h" // VG_(clo_xml)
44 #include "pub_tool_aspacemgr.h"
45 #include "pub_tool_addrinfo.h"
47 #include "hg_basics.h"
48 #include "hg_addrdescr.h"
49 #include "hg_wordset.h"
50 #include "hg_lock_n_thread.h"
52 #include "hg_errors.h" /* self */
55 /*----------------------------------------------------------------*/
56 /*--- Error management -- storage ---*/
57 /*----------------------------------------------------------------*/
59 /* maps (by value) strings to a copy of them in ARENA_TOOL */
61 static WordFM
* string_table
= NULL
;
63 ULong
HG_(stats__string_table_queries
) = 0;
65 ULong
HG_(stats__string_table_get_map_size
) ( void ) {
66 return string_table
? (ULong
)VG_(sizeFM
)(string_table
) : 0;
69 static Word
string_table_cmp ( UWord s1
, UWord s2
) {
70 return (Word
)VG_(strcmp
)( (HChar
*)s1
, (HChar
*)s2
);
73 static HChar
* string_table_strdup ( const HChar
* str
) {
75 HG_(stats__string_table_queries
)++;
79 string_table
= VG_(newFM
)( HG_(zalloc
), "hg.sts.1",
80 HG_(free
), string_table_cmp
);
82 if (VG_(lookupFM
)( string_table
,
83 NULL
, (UWord
*)©
, (UWord
)str
)) {
85 if (0) VG_(printf
)("string_table_strdup: %p -> %p\n", str
, copy
);
88 copy
= HG_(strdup
)("hg.sts.2", str
);
89 VG_(addToFM
)( string_table
, (UWord
)copy
, (UWord
)copy
);
94 /* maps from Lock .unique fields to LockP*s */
96 static WordFM
* map_LockN_to_P
= NULL
;
98 ULong
HG_(stats__LockN_to_P_queries
) = 0;
100 ULong
HG_(stats__LockN_to_P_get_map_size
) ( void ) {
101 return map_LockN_to_P
? (ULong
)VG_(sizeFM
)(map_LockN_to_P
) : 0;
104 static Word
lock_unique_cmp ( UWord lk1W
, UWord lk2W
)
106 Lock
* lk1
= (Lock
*)lk1W
;
107 Lock
* lk2
= (Lock
*)lk2W
;
108 tl_assert( HG_(is_sane_LockNorP
)(lk1
) );
109 tl_assert( HG_(is_sane_LockNorP
)(lk2
) );
110 if (lk1
->unique
< lk2
->unique
) return -1;
111 if (lk1
->unique
> lk2
->unique
) return 1;
115 /* Given a normal Lock (LockN), convert it to a persistent Lock
116 (LockP). In some cases the LockN could be invalid (if it's been
117 freed), so we enquire, in hg_main.c's admin_locks list, whether it
118 is in fact valid. If allowed_to_be_invalid is True, then it's OK
119 for the LockN to be invalid, in which case Lock_INVALID is
120 returned. In all other cases, we insist that the LockN is a valid
121 lock, and return its corresponding LockP.
123 Why can LockNs sometimes be invalid? Because they are harvested
124 from locksets that are attached to the OldRef info for conflicting
125 threads. By the time we detect a race, the some of the elements of
126 the lockset may have been destroyed by the client, in which case
127 the corresponding Lock structures we maintain will have been freed.
129 So we check that each LockN is a member of the admin_locks double
130 linked list of all Lock structures. That stops us prodding around
131 in potentially freed-up Lock structures. However, it's not quite a
132 proper check: if a new Lock has been reallocated at the same
133 address as one which was previously freed, we'll wind up copying
134 the new one as the basis for the LockP, which is completely bogus
135 because it is unrelated to the previous Lock that lived there.
136 Let's hope that doesn't happen too often.
138 static Lock
* mk_LockP_from_LockN ( Lock
* lkn
,
139 Bool allowed_to_be_invalid
)
142 HG_(stats__LockN_to_P_queries
)++;
144 /* First off, let's do some sanity checks. If
145 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
146 in admin_locks; else we must assert. If it is True, it's OK for
147 it not to be findable, but in that case we must return
148 Lock_INVALID right away. */
149 Lock
* lock_list
= HG_(get_admin_locks
)();
151 if (lock_list
== lkn
)
153 lock_list
= lock_list
->admin_next
;
155 if (lock_list
== NULL
) {
156 /* We didn't find it. That possibility has to be OK'd by the
158 tl_assert(allowed_to_be_invalid
);
162 /* So we must be looking at a valid LockN. */
163 tl_assert( HG_(is_sane_LockN
)(lkn
) );
165 if (!map_LockN_to_P
) {
166 map_LockN_to_P
= VG_(newFM
)( HG_(zalloc
), "hg.mLPfLN.1",
167 HG_(free
), lock_unique_cmp
);
169 if (!VG_(lookupFM
)( map_LockN_to_P
, NULL
, (UWord
*)&lkp
, (UWord
)lkn
)) {
170 lkp
= HG_(zalloc
)( "hg.mLPfLN.2", sizeof(Lock
) );
172 lkp
->admin_next
= NULL
;
173 lkp
->admin_prev
= NULL
;
174 lkp
->magic
= LockP_MAGIC
;
175 /* Forget about the bag of lock holders - don't copy that.
176 Also, acquired_at should be NULL whenever heldBy is, and vice
177 versa. Also forget about the associated libhb synch object. */
180 lkp
->acquired_at
= NULL
;
182 VG_(addToFM
)( map_LockN_to_P
, (UWord
)lkp
, (UWord
)lkp
);
184 tl_assert( HG_(is_sane_LockP
)(lkp
) );
188 static Int
sort_by_guestaddr(const void* n1
, const void* n2
)
190 const Lock
* l1
= *(const Lock
*const *)n1
;
191 const Lock
* l2
= *(const Lock
*const *)n2
;
193 Addr a1
= l1
== Lock_INVALID
? 0 : l1
->guestaddr
;
194 Addr a2
= l2
== Lock_INVALID
? 0 : l2
->guestaddr
;
195 if (a1
< a2
) return -1;
196 if (a1
> a2
) return 1;
200 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
201 LockP*'s. Any LockN's that can't be converted into a LockP
202 (because they have been freed, see comment on mk_LockP_from_LockN)
203 are converted instead into the value Lock_INVALID. Hence the
204 returned vector is a sequence: zero or more (valid LockP* or
205 LockN_INVALID), terminated by a NULL. */
207 Lock
** enumerate_WordSet_into_LockP_vector( WordSetU
* univ_lsets
,
209 Bool allowed_to_be_invalid
)
211 tl_assert(univ_lsets
);
212 tl_assert( HG_(plausibleWS
)(univ_lsets
, lockset
) );
213 UWord nLocks
= HG_(cardinalityWS
)(univ_lsets
, lockset
);
214 Lock
** lockPs
= HG_(zalloc
)( "hg.eWSiLPa",
215 (nLocks
+1) * sizeof(Lock
*) );
216 tl_assert(lockPs
[nLocks
] == NULL
); /* pre-NULL terminated */
217 UWord
* lockNs
= NULL
;
220 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
221 lockset is empty; hence the guarding "if". Sigh. */
222 HG_(getPayloadWS
)( &lockNs
, &nLockNs
, univ_lsets
, lockset
);
226 /* Convert to LockPs. */
227 for (i
= 0; i
< nLockNs
; i
++) {
228 lockPs
[i
] = mk_LockP_from_LockN( (Lock
*)lockNs
[i
],
229 allowed_to_be_invalid
);
231 /* Sort the locks by increasing Lock::guestaddr to avoid jitters
233 VG_(ssort
)(lockPs
, nLockNs
, sizeof lockPs
[0], sort_by_guestaddr
);
238 /* Get the number of useful elements in a vector created by
239 enumerate_WordSet_into_LockP_vector. Returns both the total number
240 of elements (not including the terminating NULL) and the number of
241 non-Lock_INVALID elements. */
242 static void count_LockP_vector ( /*OUT*/UWord
* nLocks
,
243 /*OUT*/UWord
* nLocksValid
,
247 *nLocks
= *nLocksValid
= 0;
251 if (vec
[n
] != Lock_INVALID
)
257 /* Find out whether 'lk' is in 'vec'. */
258 static Bool
elem_LockP_vector ( Lock
** vec
, Lock
* lk
)
274 race: program counter
280 FIXME: how does state printing interact with lockset gc?
281 Are the locksets in prev/curr state always valid?
282 Ditto question for the threadsets
283 ThreadSets - probably are always valid if Threads
284 are never thrown away.
285 LockSets - could at least print the lockset elements that
286 correspond to actual locks at the time of printing. Hmm.
292 XE_Race
=1101, // race
293 XE_UnlockUnlocked
, // unlocking a not-locked lock
294 XE_UnlockForeign
, // unlocking a lock held by some other thread
295 XE_UnlockBogus
, // unlocking an address not known to be a lock
296 XE_PthAPIerror
, // error from the POSIX pthreads API
297 XE_LockOrder
, // lock order error
298 XE_Misc
// misc other error (w/ string to describe it)
302 /* Extra contexts for kinds */
310 AddrInfo data_addrinfo
;
314 /* h1_* and h2_* provide some description of a previously
315 observed access with which we are conflicting. */
316 Thread
* h1_ct
; /* non-NULL means h1 info present */
317 ExeContext
* h1_ct_mbsegstartEC
;
318 ExeContext
* h1_ct_mbsegendEC
;
319 Thread
* h2_ct
; /* non-NULL means h2 info present */
320 ExeContext
* h2_ct_accEC
;
323 Lock
** h2_ct_locksHeldW
;
326 Thread
* thr
; /* doing the unlocking */
327 Lock
* lock
; /* lock (that is already unlocked) */
330 Thread
* thr
; /* doing the unlocking */
331 Thread
* owner
; /* thread that actually holds the lock */
332 Lock
* lock
; /* lock (that is held by 'owner') */
335 Thread
* thr
; /* doing the unlocking */
336 Addr lock_ga
; /* purported address of the lock */
340 HChar
* fnname
; /* persistent, in tool-arena */
341 Word err
; /* pth error code */
342 HChar
* errstr
; /* persistent, in tool-arena */
346 /* The first 4 fields describe the previously observed
347 (should-be) ordering. */
348 Lock
* shouldbe_earlier_lk
;
349 Lock
* shouldbe_later_lk
;
350 ExeContext
* shouldbe_earlier_ec
;
351 ExeContext
* shouldbe_later_ec
;
352 /* In principle we need to record two more stacks, from
353 this thread, when acquiring the locks in the "wrong"
354 order. In fact the wallclock-later acquisition by this
355 thread is recorded in the main stack for this error.
356 So we only need a stack for the earlier acquisition by
358 ExeContext
* actual_earlier_ec
;
362 HChar
* errstr
; /* persistent, in tool-arena */
363 HChar
* auxstr
; /* optional, persistent, in tool-arena */
364 ExeContext
* auxctx
; /* optional */
370 static void init_XError ( XError
* xe
) {
371 VG_(memset
)(xe
, 0, sizeof(*xe
) );
372 xe
->tag
= XE_Race
-1; /* bogus */
376 /* Extensions of suppressions */
379 XS_Race
=1201, /* race */
391 /* Updates the copy with address info if necessary. */
392 UInt
HG_(update_extra
) ( const Error
* err
)
394 XError
* xe
= (XError
*)VG_(get_error_extra
)(err
);
396 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
397 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
400 if (xe
->tag
== XE_Race
) {
402 /* Note the set of locks that the thread is (w-)holding.
403 Convert the WordSetID of LockN*'s into a NULL-terminated
404 vector of LockP*'s. We don't expect to encounter any invalid
405 LockNs in this conversion. */
406 tl_assert(xe
->XE
.Race
.thr
);
407 xe
->XE
.Race
.locksHeldW
408 = enumerate_WordSet_into_LockP_vector(
409 HG_(get_univ_lsets
)(),
410 xe
->XE
.Race
.thr
->locksetW
,
411 False
/*!allowed_to_be_invalid*/
414 /* See if we can come up with a source level description of the
415 raced-upon address. This is potentially expensive, which is
416 why it's only done at the update_extra point, not when the
417 error is initially created. */
421 VG_(printf
)("HG_(update_extra): "
422 "%d conflicting-event queries\n", xxx
);
424 HG_(describe_addr
) (VG_(get_ExeContext_epoch
)(VG_(get_error_where
)(err
)),
425 xe
->XE
.Race
.data_addr
, &xe
->XE
.Race
.data_addrinfo
);
427 /* And poke around in the conflicting-event map, to see if we
428 can rustle up a plausible-looking conflicting memory access
430 if (HG_(clo_history_level
) >= 2) {
432 ExeContext
* wherep
= NULL
;
433 Addr acc_addr
= xe
->XE
.Race
.data_addr
;
434 Int acc_szB
= xe
->XE
.Race
.szB
;
435 Thr
* acc_thr
= xe
->XE
.Race
.thr
->hbthr
;
436 Bool acc_isW
= xe
->XE
.Race
.isWrite
;
438 Bool conf_isW
= False
;
439 WordSetID conf_locksHeldW
= 0;
440 tl_assert(!xe
->XE
.Race
.h2_ct_accEC
);
441 tl_assert(!xe
->XE
.Race
.h2_ct
);
442 if (libhb_event_map_lookup(
443 &wherep
, &thrp
, &conf_szB
, &conf_isW
, &conf_locksHeldW
,
444 acc_thr
, acc_addr
, acc_szB
, acc_isW
)) {
448 threadp
= libhb_get_Thr_hgthread( thrp
);
450 xe
->XE
.Race
.h2_ct_accEC
= wherep
;
451 xe
->XE
.Race
.h2_ct
= threadp
;
452 xe
->XE
.Race
.h2_ct_accSzB
= (Int
)conf_szB
;
453 xe
->XE
.Race
.h2_ct_accIsW
= conf_isW
;
454 xe
->XE
.Race
.h2_ct_locksHeldW
455 = enumerate_WordSet_into_LockP_vector(
456 HG_(get_univ_lsets
)(),
458 True
/*allowed_to_be_invalid*/
463 // both NULL or both non-NULL
464 tl_assert( (!!xe
->XE
.Race
.h2_ct
) == (!!xe
->XE
.Race
.h2_ct_accEC
) );
467 return sizeof(XError
);
470 void HG_(record_error_Race
) ( Thread
* thr
,
471 Addr data_addr
, Int szB
, Bool isWrite
,
473 ExeContext
* h1_ct_segstart
,
474 ExeContext
* h1_ct_mbsegendEC
)
477 tl_assert( HG_(is_sane_Thread
)(thr
) );
479 # if defined(VGO_linux)
480 /* Skip any races on locations apparently in GOTPLT sections. This
481 is said to be caused by ld.so poking PLT table entries (or
482 whatever) when it writes the resolved address of a dynamically
483 linked routine, into the table (or whatever) when it is called
484 for the first time. */
486 VgSectKind sect
= VG_(DebugInfo_sect_kind
)( NULL
, data_addr
);
487 if (0) VG_(printf
)("XXXXXXXXX RACE on %#lx %s\n",
488 data_addr
, VG_(pp_SectKind
)(sect
));
489 /* SectPLT is required on ???-linux */
490 if (sect
== Vg_SectGOTPLT
) return;
491 /* SectPLT is required on ppc32/64-linux */
492 if (sect
== Vg_SectPLT
) return;
493 /* SectGOT is required on arm-linux */
494 if (sect
== Vg_SectGOT
) return;
500 xe
.XE
.Race
.data_addr
= data_addr
;
501 xe
.XE
.Race
.szB
= szB
;
502 xe
.XE
.Race
.isWrite
= isWrite
;
503 xe
.XE
.Race
.thr
= thr
;
504 tl_assert(isWrite
== False
|| isWrite
== True
);
505 tl_assert(szB
== 8 || szB
== 4 || szB
== 2 || szB
== 1);
506 /* Skip on the detailed description of the raced-on address at this
507 point; it's expensive. Leave it for the update_extra function
508 if we ever make it that far. */
509 xe
.XE
.Race
.data_addrinfo
.tag
= Addr_Undescribed
;
511 // Skip on any of the conflicting-access info at this point.
512 // It's expensive to obtain, and this error is more likely than
513 // not to be discarded. We'll fill these fields in in
514 // HG_(update_extra) just above, assuming the error ever makes
515 // it that far (unlikely).
516 xe
.XE
.Race
.h2_ct_accSzB
= 0;
517 xe
.XE
.Race
.h2_ct_accIsW
= False
;
518 xe
.XE
.Race
.h2_ct_accEC
= NULL
;
519 xe
.XE
.Race
.h2_ct
= NULL
;
520 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
521 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
523 xe
.XE
.Race
.h1_ct
= h1_ct
;
524 xe
.XE
.Race
.h1_ct_mbsegstartEC
= h1_ct_segstart
;
525 xe
.XE
.Race
.h1_ct_mbsegendEC
= h1_ct_mbsegendEC
;
527 VG_(maybe_record_error
)( thr
->coretid
,
528 XE_Race
, data_addr
, NULL
, &xe
);
531 void HG_(record_error_UnlockUnlocked
) ( Thread
* thr
, Lock
* lk
)
534 tl_assert( HG_(is_sane_Thread
)(thr
) );
535 tl_assert( HG_(is_sane_LockN
)(lk
) );
537 xe
.tag
= XE_UnlockUnlocked
;
538 xe
.XE
.UnlockUnlocked
.thr
540 xe
.XE
.UnlockUnlocked
.lock
541 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
543 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
544 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
545 VG_(maybe_record_error
)( thr
->coretid
,
546 XE_UnlockUnlocked
, 0, NULL
, &xe
);
549 void HG_(record_error_UnlockForeign
) ( Thread
* thr
,
550 Thread
* owner
, Lock
* lk
)
553 tl_assert( HG_(is_sane_Thread
)(thr
) );
554 tl_assert( HG_(is_sane_Thread
)(owner
) );
555 tl_assert( HG_(is_sane_LockN
)(lk
) );
557 xe
.tag
= XE_UnlockForeign
;
558 xe
.XE
.UnlockForeign
.thr
= thr
;
559 xe
.XE
.UnlockForeign
.owner
= owner
;
560 xe
.XE
.UnlockForeign
.lock
561 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
563 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
564 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
565 VG_(maybe_record_error
)( thr
->coretid
,
566 XE_UnlockForeign
, 0, NULL
, &xe
);
569 void HG_(record_error_UnlockBogus
) ( Thread
* thr
, Addr lock_ga
)
572 tl_assert( HG_(is_sane_Thread
)(thr
) );
574 xe
.tag
= XE_UnlockBogus
;
575 xe
.XE
.UnlockBogus
.thr
= thr
;
576 xe
.XE
.UnlockBogus
.lock_ga
= lock_ga
;
578 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
579 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
580 VG_(maybe_record_error
)( thr
->coretid
,
581 XE_UnlockBogus
, 0, NULL
, &xe
);
584 void HG_(record_error_LockOrder
)(
586 Lock
* shouldbe_earlier_lk
,
587 Lock
* shouldbe_later_lk
,
588 ExeContext
* shouldbe_earlier_ec
,
589 ExeContext
* shouldbe_later_ec
,
590 ExeContext
* actual_earlier_ec
594 tl_assert( HG_(is_sane_Thread
)(thr
) );
595 tl_assert(HG_(clo_track_lockorders
));
597 xe
.tag
= XE_LockOrder
;
598 xe
.XE
.LockOrder
.thr
= thr
;
599 xe
.XE
.LockOrder
.shouldbe_earlier_lk
600 = mk_LockP_from_LockN(shouldbe_earlier_lk
,
601 False
/*!allowed_to_be_invalid*/);
602 xe
.XE
.LockOrder
.shouldbe_earlier_ec
= shouldbe_earlier_ec
;
603 xe
.XE
.LockOrder
.shouldbe_later_lk
604 = mk_LockP_from_LockN(shouldbe_later_lk
,
605 False
/*!allowed_to_be_invalid*/);
606 xe
.XE
.LockOrder
.shouldbe_later_ec
= shouldbe_later_ec
;
607 xe
.XE
.LockOrder
.actual_earlier_ec
= actual_earlier_ec
;
609 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
610 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
611 VG_(maybe_record_error
)( thr
->coretid
,
612 XE_LockOrder
, 0, NULL
, &xe
);
615 void HG_(record_error_PthAPIerror
) ( Thread
* thr
, const HChar
* fnname
,
616 Word err
, const HChar
* errstr
)
619 tl_assert( HG_(is_sane_Thread
)(thr
) );
623 xe
.tag
= XE_PthAPIerror
;
624 xe
.XE
.PthAPIerror
.thr
= thr
;
625 xe
.XE
.PthAPIerror
.fnname
= string_table_strdup(fnname
);
626 xe
.XE
.PthAPIerror
.err
= err
;
627 xe
.XE
.PthAPIerror
.errstr
= string_table_strdup(errstr
);
629 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
630 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
631 VG_(maybe_record_error
)( thr
->coretid
,
632 XE_PthAPIerror
, 0, NULL
, &xe
);
635 void HG_(record_error_Misc_w_aux
) ( Thread
* thr
, const HChar
* errstr
,
636 const HChar
* auxstr
, ExeContext
* auxctx
)
639 tl_assert( HG_(is_sane_Thread
)(thr
) );
643 xe
.XE
.Misc
.thr
= thr
;
644 xe
.XE
.Misc
.errstr
= string_table_strdup(errstr
);
645 xe
.XE
.Misc
.auxstr
= auxstr
? string_table_strdup(auxstr
) : NULL
;
646 xe
.XE
.Misc
.auxctx
= auxctx
;
648 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
649 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
650 VG_(maybe_record_error
)( thr
->coretid
,
651 XE_Misc
, 0, NULL
, &xe
);
654 void HG_(record_error_Misc
) ( Thread
* thr
, const HChar
* errstr
)
656 HG_(record_error_Misc_w_aux
)(thr
, errstr
, NULL
, NULL
);
659 Bool
HG_(eq_Error
) ( VgRes not_used
, const Error
* e1
, const Error
* e2
)
663 tl_assert(VG_(get_error_kind
)(e1
) == VG_(get_error_kind
)(e2
));
665 xe1
= (XError
*)VG_(get_error_extra
)(e1
);
666 xe2
= (XError
*)VG_(get_error_extra
)(e2
);
670 switch (VG_(get_error_kind
)(e1
)) {
672 return xe1
->XE
.Race
.szB
== xe2
->XE
.Race
.szB
673 && xe1
->XE
.Race
.isWrite
== xe2
->XE
.Race
.isWrite
674 && (HG_(clo_cmp_race_err_addrs
)
675 ? xe1
->XE
.Race
.data_addr
== xe2
->XE
.Race
.data_addr
677 case XE_UnlockUnlocked
:
678 return xe1
->XE
.UnlockUnlocked
.thr
== xe2
->XE
.UnlockUnlocked
.thr
679 && xe1
->XE
.UnlockUnlocked
.lock
== xe2
->XE
.UnlockUnlocked
.lock
;
680 case XE_UnlockForeign
:
681 return xe1
->XE
.UnlockForeign
.thr
== xe2
->XE
.UnlockForeign
.thr
682 && xe1
->XE
.UnlockForeign
.owner
== xe2
->XE
.UnlockForeign
.owner
683 && xe1
->XE
.UnlockForeign
.lock
== xe2
->XE
.UnlockForeign
.lock
;
685 return xe1
->XE
.UnlockBogus
.thr
== xe2
->XE
.UnlockBogus
.thr
686 && xe1
->XE
.UnlockBogus
.lock_ga
== xe2
->XE
.UnlockBogus
.lock_ga
;
688 return xe1
->XE
.PthAPIerror
.thr
== xe2
->XE
.PthAPIerror
.thr
689 && 0==VG_(strcmp
)(xe1
->XE
.PthAPIerror
.fnname
,
690 xe2
->XE
.PthAPIerror
.fnname
)
691 && xe1
->XE
.PthAPIerror
.err
== xe2
->XE
.PthAPIerror
.err
;
693 return xe1
->XE
.LockOrder
.thr
== xe2
->XE
.LockOrder
.thr
;
695 return xe1
->XE
.Misc
.thr
== xe2
->XE
.Misc
.thr
696 && 0==VG_(strcmp
)(xe1
->XE
.Misc
.errstr
, xe2
->XE
.Misc
.errstr
);
706 /*----------------------------------------------------------------*/
707 /*--- Error management -- printing ---*/
708 /*----------------------------------------------------------------*/
710 /* Do a printf-style operation on either the XML or normal output
711 channel, depending on the setting of VG_(clo_xml).
713 static void emit_WRK ( const HChar
* format
, va_list vargs
)
716 VG_(vprintf_xml
)(format
, vargs
);
718 VG_(vmessage
)(Vg_UserMsg
, format
, vargs
);
721 static void emit ( const HChar
* format
, ... ) PRINTF_CHECK(1, 2);
722 static void emit ( const HChar
* format
, ... )
725 va_start(vargs
, format
);
726 emit_WRK(format
, vargs
);
731 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
732 this once, as we only want to see these announcements once per
733 thread. Returned Bool indicates whether or not an announcement was
736 static Bool
announce_one_thread ( Thread
* thr
)
738 tl_assert(HG_(is_sane_Thread
)(thr
));
739 tl_assert(thr
->errmsg_index
>= 1);
745 VG_(printf_xml
)("<announcethread>\n");
746 VG_(printf_xml
)(" <hthreadid>%d</hthreadid>\n", thr
->errmsg_index
);
747 if (thr
->errmsg_index
== 1) {
748 tl_assert(thr
->created_at
== NULL
);
749 VG_(printf_xml
)(" <isrootthread></isrootthread>\n");
751 tl_assert(thr
->created_at
!= NULL
);
752 VG_(pp_ExeContext
)( thr
->created_at
);
754 VG_(printf_xml
)("</announcethread>\n\n");
758 VG_(umsg
)("---Thread-Announcement----------"
759 "--------------------------------" "\n");
762 if (thr
->errmsg_index
== 1) {
763 tl_assert(thr
->created_at
== NULL
);
764 VG_(message
)(Vg_UserMsg
,
765 "Thread #%d is the program's root thread\n",
768 tl_assert(thr
->created_at
!= NULL
);
769 VG_(message
)(Vg_UserMsg
, "Thread #%d was created\n",
771 VG_(pp_ExeContext
)( thr
->created_at
);
773 VG_(message
)(Vg_UserMsg
, "\n");
777 thr
->announced
= True
;
782 static void announce_LockP ( Lock
* lk
)
785 if (lk
== Lock_INVALID
)
786 return; /* Can't be announced -- we know nothing about it. */
787 tl_assert(lk
->magic
== LockP_MAGIC
);
790 if (lk
->appeared_at
) {
791 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
793 VG_(pp_ExeContext
)( lk
->appeared_at
);
797 if (lk
->appeared_at
) {
798 VG_(umsg
)( " Lock at %p was first observed\n",
799 (void*)lk
->guestaddr
);
800 VG_(pp_ExeContext
)( lk
->appeared_at
);
802 VG_(umsg
)( " Lock at %p : no stacktrace for first observation\n",
803 (void*)lk
->guestaddr
);
805 HG_(get_and_pp_addrdescr
)
807 ? VG_(get_ExeContext_epoch
)(lk
->appeared_at
)
808 : VG_(current_DiEpoch
)(),
814 /* Announce (that is, print point-of-first-observation) for the
815 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
816 static void announce_combined_LockP_vecs ( Lock
** lockvec
,
821 for (i
= 0; lockvec
[i
]; i
++) {
822 announce_LockP(lockvec
[i
]);
825 for (i
= 0; lockvec2
[i
]; i
++) {
826 Lock
* lk
= lockvec2
[i
];
827 if (!elem_LockP_vector(lockvec
, lk
))
834 static void show_LockP_summary_textmode ( Lock
** locks
, const HChar
* pre
)
838 UWord nLocks
= 0, nLocksValid
= 0;
839 count_LockP_vector(&nLocks
, &nLocksValid
, locks
);
840 tl_assert(nLocksValid
<= nLocks
);
843 VG_(umsg
)( "%sLocks held: none", pre
);
845 VG_(umsg
)( "%sLocks held: %lu, at address%s ",
846 pre
, nLocks
, nLocksValid
== 1 ? "" : "es" );
850 for (i
= 0; i
< nLocks
; i
++) {
851 if (locks
[i
] == Lock_INVALID
)
853 VG_(umsg
)( "%p", (void*)locks
[i
]->guestaddr
);
854 if (locks
[i
+1] != NULL
)
857 if (nLocksValid
< nLocks
)
858 VG_(umsg
)(" (and %lu that can't be shown)", nLocks
- nLocksValid
);
864 /* This is the "this error is due to be printed shortly; so have a
865 look at it any print any preamble you want" function. We use it to
866 announce any previously un-announced threads in the upcoming error
869 void HG_(before_pp_Error
) ( const Error
* err
)
873 xe
= (XError
*)VG_(get_error_extra
)(err
);
876 switch (VG_(get_error_kind
)(err
)) {
878 announce_one_thread( xe
->XE
.Misc
.thr
);
881 announce_one_thread( xe
->XE
.LockOrder
.thr
);
884 announce_one_thread( xe
->XE
.PthAPIerror
.thr
);
887 announce_one_thread( xe
->XE
.UnlockBogus
.thr
);
889 case XE_UnlockForeign
:
890 announce_one_thread( xe
->XE
.UnlockForeign
.thr
);
891 announce_one_thread( xe
->XE
.UnlockForeign
.owner
);
893 case XE_UnlockUnlocked
:
894 announce_one_thread( xe
->XE
.UnlockUnlocked
.thr
);
897 announce_one_thread( xe
->XE
.Race
.thr
);
898 if (xe
->XE
.Race
.h2_ct
)
899 announce_one_thread( xe
->XE
.Race
.h2_ct
);
900 if (xe
->XE
.Race
.h1_ct
)
901 announce_one_thread( xe
->XE
.Race
.h1_ct
);
902 if (xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
903 Thread
* thr
= get_admin_threads();
905 if (thr
->errmsg_index
906 == xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
907 announce_one_thread (thr
);
919 void HG_(pp_Error
) ( const Error
* err
)
921 const Bool xml
= VG_(clo_xml
); /* a shorthand, that's all */
924 VG_(umsg
)("--------------------------------"
925 "--------------------------------" "\n");
929 XError
*xe
= (XError
*)VG_(get_error_extra
)(err
);
933 emit( " <kind>%s</kind>\n", HG_(get_error_name
)(err
));
935 switch (VG_(get_error_kind
)(err
)) {
938 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Misc
.thr
) );
942 emit( " <xwhat>\n" );
943 emit( " <text>Thread #%d: %s</text>\n",
944 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
945 xe
->XE
.Misc
.errstr
);
946 emit( " <hthreadid>%d</hthreadid>\n",
947 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
);
948 emit( " </xwhat>\n" );
949 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
950 if (xe
->XE
.Misc
.auxstr
) {
951 emit(" <auxwhat>%s</auxwhat>\n", xe
->XE
.Misc
.auxstr
);
952 if (xe
->XE
.Misc
.auxctx
)
953 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
958 emit( "Thread #%d: %s\n",
959 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
960 xe
->XE
.Misc
.errstr
);
961 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
962 if (xe
->XE
.Misc
.auxstr
) {
963 emit(" %s\n", xe
->XE
.Misc
.auxstr
);
964 if (xe
->XE
.Misc
.auxctx
)
965 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
973 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.LockOrder
.thr
) );
977 emit( " <xwhat>\n" );
978 emit( " <text>Thread #%d: lock order \"%p before %p\" "
980 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
981 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
982 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
983 emit( " <hthreadid>%d</hthreadid>\n",
984 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
);
985 emit( " </xwhat>\n" );
986 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
987 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
988 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
989 emit( " <auxwhat>Required order was established by "
990 "acquisition of lock at %p</auxwhat>\n",
991 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
992 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
993 emit( " <auxwhat>followed by a later acquisition "
994 "of lock at %p</auxwhat>\n",
995 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
996 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
998 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
999 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1003 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
1004 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
1005 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
1006 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1008 emit( "Observed (incorrect) order is: "
1009 "acquisition of lock at %p\n",
1010 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1011 if (xe
->XE
.LockOrder
.actual_earlier_ec
) {
1012 VG_(pp_ExeContext
)(xe
->XE
.LockOrder
.actual_earlier_ec
);
1014 emit(" (stack unavailable)\n");
1017 emit(" followed by a later acquisition of lock at %p\n",
1018 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1019 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1020 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
1021 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
1023 emit( "Required order was established by "
1024 "acquisition of lock at %p\n",
1025 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1026 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
1028 emit( " followed by a later acquisition of lock at %p\n",
1029 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1030 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
1033 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
1034 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1041 case XE_PthAPIerror
: {
1042 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.PthAPIerror
.thr
) );
1046 emit( " <xwhat>\n" );
1048 " <text>Thread #%d's call to %pS failed</text>\n",
1049 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1050 xe
->XE
.PthAPIerror
.fnname
);
1051 emit( " <hthreadid>%d</hthreadid>\n",
1052 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
);
1053 emit( " </xwhat>\n" );
1054 emit( " <what>with error code %ld (%s)</what>\n",
1055 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1056 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1060 emit( "Thread #%d's call to %pS failed\n",
1061 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1062 xe
->XE
.PthAPIerror
.fnname
);
1063 emit( " with error code %ld (%s)\n",
1064 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1065 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1072 case XE_UnlockBogus
: {
1073 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockBogus
.thr
) );
1077 emit( " <xwhat>\n" );
1078 emit( " <text>Thread #%d unlocked an invalid "
1079 "lock at %p</text>\n",
1080 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1081 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1082 emit( " <hthreadid>%d</hthreadid>\n",
1083 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
);
1084 emit( " </xwhat>\n" );
1085 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1089 emit( "Thread #%d unlocked an invalid lock at %p\n",
1090 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1091 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1092 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1099 case XE_UnlockForeign
: {
1100 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockForeign
.lock
) );
1101 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.owner
) );
1102 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.thr
) );
1106 emit( " <xwhat>\n" );
1107 emit( " <text>Thread #%d unlocked lock at %p "
1108 "currently held by thread #%d</text>\n",
1109 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1110 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1111 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1112 emit( " <hthreadid>%d</hthreadid>\n",
1113 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
);
1114 emit( " <hthreadid>%d</hthreadid>\n",
1115 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1116 emit( " </xwhat>\n" );
1117 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1118 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1122 emit( "Thread #%d unlocked lock at %p "
1123 "currently held by thread #%d\n",
1124 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1125 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1126 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1127 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1128 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1135 case XE_UnlockUnlocked
: {
1136 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockUnlocked
.lock
) );
1137 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockUnlocked
.thr
) );
1141 emit( " <xwhat>\n" );
1142 emit( " <text>Thread #%d unlocked a "
1143 "not-locked lock at %p</text>\n",
1144 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1145 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1146 emit( " <hthreadid>%d</hthreadid>\n",
1147 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
);
1148 emit( " </xwhat>\n" );
1149 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1150 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1154 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1155 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1156 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1157 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1158 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1169 what
= xe
->XE
.Race
.isWrite
? "write" : "read";
1170 szB
= xe
->XE
.Race
.szB
;
1171 err_ga
= VG_(get_error_address
)(err
);
1173 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.thr
));
1174 if (xe
->XE
.Race
.h2_ct
)
1175 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.h2_ct
));
1179 /* ------ XML ------ */
1180 emit( " <xwhat>\n" );
1181 emit( " <text>Possible data race during %s of size %d "
1182 "at %p by thread #%d</text>\n",
1183 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1184 emit( " <hthreadid>%d</hthreadid>\n",
1185 (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1186 emit( " </xwhat>\n" );
1187 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1189 if (xe
->XE
.Race
.h2_ct
) {
1190 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1191 emit( " <xauxwhat>\n");
1192 emit( " <text>This conflicts with a previous %s of size %d "
1193 "by thread #%d</text>\n",
1194 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1195 xe
->XE
.Race
.h2_ct_accSzB
,
1196 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1197 emit( " <hthreadid>%d</hthreadid>\n",
1198 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1199 emit(" </xauxwhat>\n");
1200 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1203 if (xe
->XE
.Race
.h1_ct
) {
1204 emit( " <xauxwhat>\n");
1205 emit( " <text>This conflicts with a previous access "
1206 "by thread #%d, after</text>\n",
1207 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1208 emit( " <hthreadid>%d</hthreadid>\n",
1209 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1210 emit(" </xauxwhat>\n");
1211 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1212 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1214 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1216 emit( " <auxwhat>but before</auxwhat>\n" );
1217 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1218 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1220 emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
1226 /* ------ Text ------ */
1227 announce_combined_LockP_vecs( xe
->XE
.Race
.locksHeldW
,
1228 xe
->XE
.Race
.h2_ct_locksHeldW
);
1230 emit( "Possible data race during %s of size %d "
1231 "at %p by thread #%d\n",
1232 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1234 tl_assert(xe
->XE
.Race
.locksHeldW
);
1235 show_LockP_summary_textmode( xe
->XE
.Race
.locksHeldW
, "" );
1236 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1238 if (xe
->XE
.Race
.h2_ct
) {
1239 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1240 tl_assert(xe
->XE
.Race
.h2_ct_locksHeldW
);
1242 emit( "This conflicts with a previous %s of size %d "
1244 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1245 xe
->XE
.Race
.h2_ct_accSzB
,
1246 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1247 show_LockP_summary_textmode( xe
->XE
.Race
.h2_ct_locksHeldW
, "" );
1248 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1251 if (xe
->XE
.Race
.h1_ct
) {
1252 emit( " This conflicts with a previous access by thread #%d, "
1254 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1255 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1256 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1258 emit( " (the start of the thread)\n" );
1260 emit( " but before\n" );
1261 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1262 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1264 emit( " (the end of the thread)\n" );
1269 VG_(pp_addrinfo
) (err_ga
, &xe
->XE
.Race
.data_addrinfo
);
1270 break; /* case XE_Race */
1271 } /* case XE_Race */
1275 } /* switch (VG_(get_error_kind)(err)) */
1278 void HG_(print_access
) (StackTrace ips
, UInt n_ips
,
1283 WordSetID locksHeldW
)
1287 threadp
= libhb_get_Thr_hgthread( thr_a
);
1289 if (!threadp
->announced
) {
1290 /* This is for interactive use. We announce the thread if needed,
1291 but reset it to not announced afterwards, because we want
1292 the thread to be announced on the error output/log if needed. */
1293 announce_one_thread (threadp
);
1294 threadp
->announced
= False
;
1297 announce_one_thread (threadp
);
1298 VG_(printf
) ("%s of size %d at %p by thread #%d",
1299 isW
? "write" : "read",
1300 (int)SzB
, (void*)ga
, threadp
->errmsg_index
);
1301 if (threadp
->coretid
== VG_INVALID_THREADID
)
1302 VG_(printf
)(" tid (exited)\n");
1304 VG_(printf
)(" tid %u\n", threadp
->coretid
);
1306 Lock
** locksHeldW_P
;
1307 locksHeldW_P
= enumerate_WordSet_into_LockP_vector(
1308 HG_(get_univ_lsets
)(),
1310 True
/*allowed_to_be_invalid*/
1312 show_LockP_summary_textmode( locksHeldW_P
, "" );
1313 HG_(free
) (locksHeldW_P
);
1315 // FIXME PW EPOCH : need the real ips epoch.
1316 VG_(pp_StackTrace
)( VG_(current_DiEpoch
)(), ips
, n_ips
);
1320 const HChar
* HG_(get_error_name
) ( const Error
* err
)
1322 switch (VG_(get_error_kind
)(err
)) {
1323 case XE_Race
: return "Race";
1324 case XE_UnlockUnlocked
: return "UnlockUnlocked";
1325 case XE_UnlockForeign
: return "UnlockForeign";
1326 case XE_UnlockBogus
: return "UnlockBogus";
1327 case XE_PthAPIerror
: return "PthAPIerror";
1328 case XE_LockOrder
: return "LockOrder";
1329 case XE_Misc
: return "Misc";
1330 default: tl_assert(0); /* fill in missing case */
1334 Bool
HG_(recognised_suppression
) ( const HChar
* name
, Supp
*su
)
1336 # define TRY(_name,_xskind) \
1337 if (0 == VG_(strcmp)(name, (_name))) { \
1338 VG_(set_supp_kind)(su, (_xskind)); \
1341 TRY("Race", XS_Race
);
1342 TRY("FreeMemLock", XS_FreeMemLock
);
1343 TRY("UnlockUnlocked", XS_UnlockUnlocked
);
1344 TRY("UnlockForeign", XS_UnlockForeign
);
1345 TRY("UnlockBogus", XS_UnlockBogus
);
1346 TRY("PthAPIerror", XS_PthAPIerror
);
1347 TRY("LockOrder", XS_LockOrder
);
1348 TRY("Misc", XS_Misc
);
1353 Bool
HG_(read_extra_suppression_info
) ( Int fd
, HChar
** bufpp
, SizeT
* nBufp
,
1354 Int
* lineno
, Supp
* su
)
1356 /* do nothing -- no extra suppression info present. Return True to
1357 indicate nothing bad happened. */
1361 Bool
HG_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
)
1363 switch (VG_(get_supp_kind
)(su
)) {
1364 case XS_Race
: return VG_(get_error_kind
)(err
) == XE_Race
;
1365 case XS_UnlockUnlocked
: return VG_(get_error_kind
)(err
) == XE_UnlockUnlocked
;
1366 case XS_UnlockForeign
: return VG_(get_error_kind
)(err
) == XE_UnlockForeign
;
1367 case XS_UnlockBogus
: return VG_(get_error_kind
)(err
) == XE_UnlockBogus
;
1368 case XS_PthAPIerror
: return VG_(get_error_kind
)(err
) == XE_PthAPIerror
;
1369 case XS_LockOrder
: return VG_(get_error_kind
)(err
) == XE_LockOrder
;
1370 case XS_Misc
: return VG_(get_error_kind
)(err
) == XE_Misc
;
1371 //case XS_: return VG_(get_error_kind)(err) == XE_;
1372 default: tl_assert(0); /* fill in missing cases */
1376 SizeT
HG_(get_extra_suppression_info
) ( const Error
* err
,
1377 /*OUT*/HChar
* buf
, Int nBuf
)
1379 tl_assert(nBuf
>= 1);
1385 SizeT
HG_(print_extra_suppression_use
) ( const Supp
* su
,
1386 /*OUT*/HChar
* buf
, Int nBuf
)
1388 tl_assert(nBuf
>= 1);
1394 void HG_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
)
1401 /*--------------------------------------------------------------------*/
1402 /*--- end hg_errors.c ---*/
1403 /*--------------------------------------------------------------------*/