2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2017 OpenWorks Ltd
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_libcbase.h"
32 #include "pub_tool_libcassert.h"
33 #include "pub_tool_libcprint.h"
34 #include "pub_tool_stacktrace.h"
35 #include "pub_tool_execontext.h"
36 #include "pub_tool_errormgr.h"
37 #include "pub_tool_wordfm.h"
38 #include "pub_tool_xarray.h"
39 #include "pub_tool_debuginfo.h"
40 #include "pub_tool_threadstate.h"
41 #include "pub_tool_options.h" // VG_(clo_xml)
42 #include "pub_tool_aspacemgr.h"
43 #include "pub_tool_addrinfo.h"
45 #include "hg_basics.h"
46 #include "hg_addrdescr.h"
47 #include "hg_wordset.h"
48 #include "hg_lock_n_thread.h"
50 #include "hg_errors.h" /* self */
53 /*----------------------------------------------------------------*/
54 /*--- Error management -- storage ---*/
55 /*----------------------------------------------------------------*/
57 /* maps (by value) strings to a copy of them in ARENA_TOOL */
59 static WordFM
* string_table
= NULL
;
61 ULong
HG_(stats__string_table_queries
) = 0;
63 ULong
HG_(stats__string_table_get_map_size
) ( void ) {
64 return string_table
? (ULong
)VG_(sizeFM
)(string_table
) : 0;
67 static Word
string_table_cmp ( UWord s1
, UWord s2
) {
68 return (Word
)VG_(strcmp
)( (HChar
*)s1
, (HChar
*)s2
);
71 static HChar
* string_table_strdup ( const HChar
* str
) {
73 HG_(stats__string_table_queries
)++;
77 string_table
= VG_(newFM
)( HG_(zalloc
), "hg.sts.1",
78 HG_(free
), string_table_cmp
);
80 if (VG_(lookupFM
)( string_table
,
81 NULL
, (UWord
*)©
, (UWord
)str
)) {
83 if (0) VG_(printf
)("string_table_strdup: %p -> %p\n", str
, copy
);
86 copy
= HG_(strdup
)("hg.sts.2", str
);
87 VG_(addToFM
)( string_table
, (UWord
)copy
, (UWord
)copy
);
92 /* maps from Lock .unique fields to LockP*s */
94 static WordFM
* map_LockN_to_P
= NULL
;
96 ULong
HG_(stats__LockN_to_P_queries
) = 0;
98 ULong
HG_(stats__LockN_to_P_get_map_size
) ( void ) {
99 return map_LockN_to_P
? (ULong
)VG_(sizeFM
)(map_LockN_to_P
) : 0;
102 static Word
lock_unique_cmp ( UWord lk1W
, UWord lk2W
)
104 Lock
* lk1
= (Lock
*)lk1W
;
105 Lock
* lk2
= (Lock
*)lk2W
;
106 tl_assert( HG_(is_sane_LockNorP
)(lk1
) );
107 tl_assert( HG_(is_sane_LockNorP
)(lk2
) );
108 if (lk1
->unique
< lk2
->unique
) return -1;
109 if (lk1
->unique
> lk2
->unique
) return 1;
113 /* Given a normal Lock (LockN), convert it to a persistent Lock
114 (LockP). In some cases the LockN could be invalid (if it's been
115 freed), so we enquire, in hg_main.c's admin_locks list, whether it
116 is in fact valid. If allowed_to_be_invalid is True, then it's OK
117 for the LockN to be invalid, in which case Lock_INVALID is
118 returned. In all other cases, we insist that the LockN is a valid
119 lock, and return its corresponding LockP.
121 Why can LockNs sometimes be invalid? Because they are harvested
122 from locksets that are attached to the OldRef info for conflicting
123 threads. By the time we detect a race, the some of the elements of
124 the lockset may have been destroyed by the client, in which case
125 the corresponding Lock structures we maintain will have been freed.
127 So we check that each LockN is a member of the admin_locks double
128 linked list of all Lock structures. That stops us prodding around
129 in potentially freed-up Lock structures. However, it's not quite a
130 proper check: if a new Lock has been reallocated at the same
131 address as one which was previously freed, we'll wind up copying
132 the new one as the basis for the LockP, which is completely bogus
133 because it is unrelated to the previous Lock that lived there.
134 Let's hope that doesn't happen too often.
136 static Lock
* mk_LockP_from_LockN ( Lock
* lkn
,
137 Bool allowed_to_be_invalid
)
140 HG_(stats__LockN_to_P_queries
)++;
142 /* First off, let's do some sanity checks. If
143 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
144 in admin_locks; else we must assert. If it is True, it's OK for
145 it not to be findable, but in that case we must return
146 Lock_INVALID right away. */
147 Lock
* lock_list
= HG_(get_admin_locks
)();
149 if (lock_list
== lkn
)
151 lock_list
= lock_list
->admin_next
;
153 if (lock_list
== NULL
) {
154 /* We didn't find it. That possibility has to be OK'd by the
156 tl_assert(allowed_to_be_invalid
);
160 /* So we must be looking at a valid LockN. */
161 tl_assert( HG_(is_sane_LockN
)(lkn
) );
163 if (!map_LockN_to_P
) {
164 map_LockN_to_P
= VG_(newFM
)( HG_(zalloc
), "hg.mLPfLN.1",
165 HG_(free
), lock_unique_cmp
);
167 if (!VG_(lookupFM
)( map_LockN_to_P
, NULL
, (UWord
*)&lkp
, (UWord
)lkn
)) {
168 lkp
= HG_(zalloc
)( "hg.mLPfLN.2", sizeof(Lock
) );
170 lkp
->admin_next
= NULL
;
171 lkp
->admin_prev
= NULL
;
172 lkp
->magic
= LockP_MAGIC
;
173 /* Forget about the bag of lock holders - don't copy that.
174 Also, acquired_at should be NULL whenever heldBy is, and vice
175 versa. Also forget about the associated libhb synch object. */
178 lkp
->acquired_at
= NULL
;
180 VG_(addToFM
)( map_LockN_to_P
, (UWord
)lkp
, (UWord
)lkp
);
182 tl_assert( HG_(is_sane_LockP
)(lkp
) );
186 static Int
sort_by_guestaddr(const void* n1
, const void* n2
)
188 const Lock
* l1
= *(const Lock
*const *)n1
;
189 const Lock
* l2
= *(const Lock
*const *)n2
;
191 Addr a1
= l1
== Lock_INVALID
? 0 : l1
->guestaddr
;
192 Addr a2
= l2
== Lock_INVALID
? 0 : l2
->guestaddr
;
193 if (a1
< a2
) return -1;
194 if (a1
> a2
) return 1;
198 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
199 LockP*'s. Any LockN's that can't be converted into a LockP
200 (because they have been freed, see comment on mk_LockP_from_LockN)
201 are converted instead into the value Lock_INVALID. Hence the
202 returned vector is a sequence: zero or more (valid LockP* or
203 LockN_INVALID), terminated by a NULL. */
205 Lock
** enumerate_WordSet_into_LockP_vector( WordSetU
* univ_lsets
,
207 Bool allowed_to_be_invalid
)
209 tl_assert(univ_lsets
);
210 tl_assert( HG_(plausibleWS
)(univ_lsets
, lockset
) );
211 UWord nLocks
= HG_(cardinalityWS
)(univ_lsets
, lockset
);
212 Lock
** lockPs
= HG_(zalloc
)( "hg.eWSiLPa",
213 (nLocks
+1) * sizeof(Lock
*) );
214 tl_assert(lockPs
[nLocks
] == NULL
); /* pre-NULL terminated */
215 UWord
* lockNs
= NULL
;
218 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
219 lockset is empty; hence the guarding "if". Sigh. */
220 HG_(getPayloadWS
)( &lockNs
, &nLockNs
, univ_lsets
, lockset
);
224 /* Convert to LockPs. */
225 for (i
= 0; i
< nLockNs
; i
++) {
226 lockPs
[i
] = mk_LockP_from_LockN( (Lock
*)lockNs
[i
],
227 allowed_to_be_invalid
);
229 /* Sort the locks by increasing Lock::guestaddr to avoid jitters
231 VG_(ssort
)(lockPs
, nLockNs
, sizeof lockPs
[0], sort_by_guestaddr
);
236 /* Get the number of useful elements in a vector created by
237 enumerate_WordSet_into_LockP_vector. Returns both the total number
238 of elements (not including the terminating NULL) and the number of
239 non-Lock_INVALID elements. */
240 static void count_LockP_vector ( /*OUT*/UWord
* nLocks
,
241 /*OUT*/UWord
* nLocksValid
,
245 *nLocks
= *nLocksValid
= 0;
249 if (vec
[n
] != Lock_INVALID
)
255 /* Find out whether 'lk' is in 'vec'. */
256 static Bool
elem_LockP_vector ( Lock
** vec
, Lock
* lk
)
272 race: program counter
278 FIXME: how does state printing interact with lockset gc?
279 Are the locksets in prev/curr state always valid?
280 Ditto question for the threadsets
281 ThreadSets - probably are always valid if Threads
282 are never thrown away.
283 LockSets - could at least print the lockset elements that
284 correspond to actual locks at the time of printing. Hmm.
290 XE_Race
=1101, // race
291 XE_UnlockUnlocked
, // unlocking a not-locked lock
292 XE_UnlockForeign
, // unlocking a lock held by some other thread
293 XE_UnlockBogus
, // unlocking an address not known to be a lock
294 XE_PthAPIerror
, // error from the POSIX pthreads API
295 XE_LockOrder
, // lock order error
296 XE_Misc
// misc other error (w/ string to describe it)
300 /* Extra contexts for kinds */
308 AddrInfo data_addrinfo
;
312 /* h1_* and h2_* provide some description of a previously
313 observed access with which we are conflicting. */
314 Thread
* h1_ct
; /* non-NULL means h1 info present */
315 ExeContext
* h1_ct_mbsegstartEC
;
316 ExeContext
* h1_ct_mbsegendEC
;
317 Thread
* h2_ct
; /* non-NULL means h2 info present */
318 ExeContext
* h2_ct_accEC
;
321 Lock
** h2_ct_locksHeldW
;
324 Thread
* thr
; /* doing the unlocking */
325 Lock
* lock
; /* lock (that is already unlocked) */
328 Thread
* thr
; /* doing the unlocking */
329 Thread
* owner
; /* thread that actually holds the lock */
330 Lock
* lock
; /* lock (that is held by 'owner') */
333 Thread
* thr
; /* doing the unlocking */
334 Addr lock_ga
; /* purported address of the lock */
338 HChar
* fnname
; /* persistent, in tool-arena */
339 Word err
; /* pth error code */
340 HChar
* errstr
; /* persistent, in tool-arena */
344 /* The first 4 fields describe the previously observed
345 (should-be) ordering. */
346 Lock
* shouldbe_earlier_lk
;
347 Lock
* shouldbe_later_lk
;
348 ExeContext
* shouldbe_earlier_ec
;
349 ExeContext
* shouldbe_later_ec
;
350 /* In principle we need to record two more stacks, from
351 this thread, when acquiring the locks in the "wrong"
352 order. In fact the wallclock-later acquisition by this
353 thread is recorded in the main stack for this error.
354 So we only need a stack for the earlier acquisition by
356 ExeContext
* actual_earlier_ec
;
360 HChar
* errstr
; /* persistent, in tool-arena */
361 HChar
* auxstr
; /* optional, persistent, in tool-arena */
362 ExeContext
* auxctx
; /* optional */
368 static void init_XError ( XError
* xe
) {
369 VG_(memset
)(xe
, 0, sizeof(*xe
) );
370 xe
->tag
= XE_Race
-1; /* bogus */
374 /* Extensions of suppressions */
377 XS_Race
=1201, /* race */
389 /* Updates the copy with address info if necessary. */
390 UInt
HG_(update_extra
) ( const Error
* err
)
392 XError
* xe
= (XError
*)VG_(get_error_extra
)(err
);
394 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
395 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
398 if (xe
->tag
== XE_Race
) {
400 /* Note the set of locks that the thread is (w-)holding.
401 Convert the WordSetID of LockN*'s into a NULL-terminated
402 vector of LockP*'s. We don't expect to encounter any invalid
403 LockNs in this conversion. */
404 tl_assert(xe
->XE
.Race
.thr
);
405 xe
->XE
.Race
.locksHeldW
406 = enumerate_WordSet_into_LockP_vector(
407 HG_(get_univ_lsets
)(),
408 xe
->XE
.Race
.thr
->locksetW
,
409 False
/*!allowed_to_be_invalid*/
412 /* See if we can come up with a source level description of the
413 raced-upon address. This is potentially expensive, which is
414 why it's only done at the update_extra point, not when the
415 error is initially created. */
419 VG_(printf
)("HG_(update_extra): "
420 "%d conflicting-event queries\n", xxx
);
422 HG_(describe_addr
) (VG_(get_ExeContext_epoch
)(VG_(get_error_where
)(err
)),
423 xe
->XE
.Race
.data_addr
, &xe
->XE
.Race
.data_addrinfo
);
425 /* And poke around in the conflicting-event map, to see if we
426 can rustle up a plausible-looking conflicting memory access
428 if (HG_(clo_history_level
) >= 2) {
430 ExeContext
* wherep
= NULL
;
431 Addr acc_addr
= xe
->XE
.Race
.data_addr
;
432 Int acc_szB
= xe
->XE
.Race
.szB
;
433 Thr
* acc_thr
= xe
->XE
.Race
.thr
->hbthr
;
434 Bool acc_isW
= xe
->XE
.Race
.isWrite
;
436 Bool conf_isW
= False
;
437 WordSetID conf_locksHeldW
= 0;
438 tl_assert(!xe
->XE
.Race
.h2_ct_accEC
);
439 tl_assert(!xe
->XE
.Race
.h2_ct
);
440 if (libhb_event_map_lookup(
441 &wherep
, &thrp
, &conf_szB
, &conf_isW
, &conf_locksHeldW
,
442 acc_thr
, acc_addr
, acc_szB
, acc_isW
)) {
446 threadp
= libhb_get_Thr_hgthread( thrp
);
448 xe
->XE
.Race
.h2_ct_accEC
= wherep
;
449 xe
->XE
.Race
.h2_ct
= threadp
;
450 xe
->XE
.Race
.h2_ct_accSzB
= (Int
)conf_szB
;
451 xe
->XE
.Race
.h2_ct_accIsW
= conf_isW
;
452 xe
->XE
.Race
.h2_ct_locksHeldW
453 = enumerate_WordSet_into_LockP_vector(
454 HG_(get_univ_lsets
)(),
456 True
/*allowed_to_be_invalid*/
461 // both NULL or both non-NULL
462 tl_assert( (!!xe
->XE
.Race
.h2_ct
) == (!!xe
->XE
.Race
.h2_ct_accEC
) );
465 return sizeof(XError
);
468 void HG_(record_error_Race
) ( Thread
* thr
,
469 Addr data_addr
, Int szB
, Bool isWrite
,
471 ExeContext
* h1_ct_segstart
,
472 ExeContext
* h1_ct_mbsegendEC
)
475 tl_assert( HG_(is_sane_Thread
)(thr
) );
477 # if defined(VGO_linux) || defined(VGO_freebsd)
478 /* Skip any races on locations apparently in GOTPLT sections. This
479 is said to be caused by ld.so poking PLT table entries (or
480 whatever) when it writes the resolved address of a dynamically
481 linked routine, into the table (or whatever) when it is called
482 for the first time. */
484 VgSectKind sect
= VG_(DebugInfo_sect_kind
)( NULL
, data_addr
);
485 if (0) VG_(printf
)("XXXXXXXXX RACE on %#lx %s\n",
486 data_addr
, VG_(pp_SectKind
)(sect
));
487 /* SectPLT is required on ???-linux */
488 if (sect
== Vg_SectGOTPLT
) return;
489 /* SectPLT is required on ppc32/64-linux */
490 if (sect
== Vg_SectPLT
) return;
491 /* SectGOT is required on arm-linux */
492 if (sect
== Vg_SectGOT
) return;
498 xe
.XE
.Race
.data_addr
= data_addr
;
499 xe
.XE
.Race
.szB
= szB
;
500 xe
.XE
.Race
.isWrite
= isWrite
;
501 xe
.XE
.Race
.thr
= thr
;
502 tl_assert(isWrite
== False
|| isWrite
== True
);
503 tl_assert(szB
== 8 || szB
== 4 || szB
== 2 || szB
== 1);
504 /* Skip on the detailed description of the raced-on address at this
505 point; it's expensive. Leave it for the update_extra function
506 if we ever make it that far. */
507 xe
.XE
.Race
.data_addrinfo
.tag
= Addr_Undescribed
;
509 // Skip on any of the conflicting-access info at this point.
510 // It's expensive to obtain, and this error is more likely than
511 // not to be discarded. We'll fill these fields in in
512 // HG_(update_extra) just above, assuming the error ever makes
513 // it that far (unlikely).
514 xe
.XE
.Race
.h2_ct_accSzB
= 0;
515 xe
.XE
.Race
.h2_ct_accIsW
= False
;
516 xe
.XE
.Race
.h2_ct_accEC
= NULL
;
517 xe
.XE
.Race
.h2_ct
= NULL
;
518 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
519 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
521 xe
.XE
.Race
.h1_ct
= h1_ct
;
522 xe
.XE
.Race
.h1_ct_mbsegstartEC
= h1_ct_segstart
;
523 xe
.XE
.Race
.h1_ct_mbsegendEC
= h1_ct_mbsegendEC
;
525 VG_(maybe_record_error
)( thr
->coretid
,
526 XE_Race
, data_addr
, NULL
, &xe
);
529 void HG_(record_error_UnlockUnlocked
) ( Thread
* thr
, Lock
* lk
)
532 tl_assert( HG_(is_sane_Thread
)(thr
) );
533 tl_assert( HG_(is_sane_LockN
)(lk
) );
535 xe
.tag
= XE_UnlockUnlocked
;
536 xe
.XE
.UnlockUnlocked
.thr
538 xe
.XE
.UnlockUnlocked
.lock
539 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
541 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
542 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
543 VG_(maybe_record_error
)( thr
->coretid
,
544 XE_UnlockUnlocked
, 0, NULL
, &xe
);
547 void HG_(record_error_UnlockForeign
) ( Thread
* thr
,
548 Thread
* owner
, Lock
* lk
)
551 tl_assert( HG_(is_sane_Thread
)(thr
) );
552 tl_assert( HG_(is_sane_Thread
)(owner
) );
553 tl_assert( HG_(is_sane_LockN
)(lk
) );
555 xe
.tag
= XE_UnlockForeign
;
556 xe
.XE
.UnlockForeign
.thr
= thr
;
557 xe
.XE
.UnlockForeign
.owner
= owner
;
558 xe
.XE
.UnlockForeign
.lock
559 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
561 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
562 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
563 VG_(maybe_record_error
)( thr
->coretid
,
564 XE_UnlockForeign
, 0, NULL
, &xe
);
567 void HG_(record_error_UnlockBogus
) ( Thread
* thr
, Addr lock_ga
)
570 tl_assert( HG_(is_sane_Thread
)(thr
) );
572 xe
.tag
= XE_UnlockBogus
;
573 xe
.XE
.UnlockBogus
.thr
= thr
;
574 xe
.XE
.UnlockBogus
.lock_ga
= lock_ga
;
576 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
577 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
578 VG_(maybe_record_error
)( thr
->coretid
,
579 XE_UnlockBogus
, 0, NULL
, &xe
);
582 void HG_(record_error_LockOrder
)(
584 Lock
* shouldbe_earlier_lk
,
585 Lock
* shouldbe_later_lk
,
586 ExeContext
* shouldbe_earlier_ec
,
587 ExeContext
* shouldbe_later_ec
,
588 ExeContext
* actual_earlier_ec
592 tl_assert( HG_(is_sane_Thread
)(thr
) );
593 tl_assert(HG_(clo_track_lockorders
));
595 xe
.tag
= XE_LockOrder
;
596 xe
.XE
.LockOrder
.thr
= thr
;
597 xe
.XE
.LockOrder
.shouldbe_earlier_lk
598 = mk_LockP_from_LockN(shouldbe_earlier_lk
,
599 False
/*!allowed_to_be_invalid*/);
600 xe
.XE
.LockOrder
.shouldbe_earlier_ec
= shouldbe_earlier_ec
;
601 xe
.XE
.LockOrder
.shouldbe_later_lk
602 = mk_LockP_from_LockN(shouldbe_later_lk
,
603 False
/*!allowed_to_be_invalid*/);
604 xe
.XE
.LockOrder
.shouldbe_later_ec
= shouldbe_later_ec
;
605 xe
.XE
.LockOrder
.actual_earlier_ec
= actual_earlier_ec
;
607 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
608 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
609 VG_(maybe_record_error
)( thr
->coretid
,
610 XE_LockOrder
, 0, NULL
, &xe
);
613 void HG_(record_error_PthAPIerror
) ( Thread
* thr
, const HChar
* fnname
,
614 Word err
, const HChar
* errstr
)
617 tl_assert( HG_(is_sane_Thread
)(thr
) );
621 xe
.tag
= XE_PthAPIerror
;
622 xe
.XE
.PthAPIerror
.thr
= thr
;
623 xe
.XE
.PthAPIerror
.fnname
= string_table_strdup(fnname
);
624 xe
.XE
.PthAPIerror
.err
= err
;
625 xe
.XE
.PthAPIerror
.errstr
= string_table_strdup(errstr
);
627 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
628 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
629 VG_(maybe_record_error
)( thr
->coretid
,
630 XE_PthAPIerror
, 0, NULL
, &xe
);
633 void HG_(record_error_Misc_w_aux
) ( Thread
* thr
, const HChar
* errstr
,
634 const HChar
* auxstr
, ExeContext
* auxctx
)
637 tl_assert( HG_(is_sane_Thread
)(thr
) );
641 xe
.XE
.Misc
.thr
= thr
;
642 xe
.XE
.Misc
.errstr
= string_table_strdup(errstr
);
643 xe
.XE
.Misc
.auxstr
= auxstr
? string_table_strdup(auxstr
) : NULL
;
644 xe
.XE
.Misc
.auxctx
= auxctx
;
646 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
647 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
648 VG_(maybe_record_error
)( thr
->coretid
,
649 XE_Misc
, 0, NULL
, &xe
);
652 void HG_(record_error_Misc
) ( Thread
* thr
, const HChar
* errstr
)
654 HG_(record_error_Misc_w_aux
)(thr
, errstr
, NULL
, NULL
);
657 Bool
HG_(eq_Error
) ( VgRes not_used
, const Error
* e1
, const Error
* e2
)
661 tl_assert(VG_(get_error_kind
)(e1
) == VG_(get_error_kind
)(e2
));
663 xe1
= (XError
*)VG_(get_error_extra
)(e1
);
664 xe2
= (XError
*)VG_(get_error_extra
)(e2
);
668 switch (VG_(get_error_kind
)(e1
)) {
670 return xe1
->XE
.Race
.szB
== xe2
->XE
.Race
.szB
671 && xe1
->XE
.Race
.isWrite
== xe2
->XE
.Race
.isWrite
672 && (HG_(clo_cmp_race_err_addrs
)
673 ? xe1
->XE
.Race
.data_addr
== xe2
->XE
.Race
.data_addr
675 case XE_UnlockUnlocked
:
676 return xe1
->XE
.UnlockUnlocked
.thr
== xe2
->XE
.UnlockUnlocked
.thr
677 && xe1
->XE
.UnlockUnlocked
.lock
== xe2
->XE
.UnlockUnlocked
.lock
;
678 case XE_UnlockForeign
:
679 return xe1
->XE
.UnlockForeign
.thr
== xe2
->XE
.UnlockForeign
.thr
680 && xe1
->XE
.UnlockForeign
.owner
== xe2
->XE
.UnlockForeign
.owner
681 && xe1
->XE
.UnlockForeign
.lock
== xe2
->XE
.UnlockForeign
.lock
;
683 return xe1
->XE
.UnlockBogus
.thr
== xe2
->XE
.UnlockBogus
.thr
684 && xe1
->XE
.UnlockBogus
.lock_ga
== xe2
->XE
.UnlockBogus
.lock_ga
;
686 return xe1
->XE
.PthAPIerror
.thr
== xe2
->XE
.PthAPIerror
.thr
687 && 0==VG_(strcmp
)(xe1
->XE
.PthAPIerror
.fnname
,
688 xe2
->XE
.PthAPIerror
.fnname
)
689 && xe1
->XE
.PthAPIerror
.err
== xe2
->XE
.PthAPIerror
.err
;
691 return xe1
->XE
.LockOrder
.thr
== xe2
->XE
.LockOrder
.thr
;
693 return xe1
->XE
.Misc
.thr
== xe2
->XE
.Misc
.thr
694 && 0==VG_(strcmp
)(xe1
->XE
.Misc
.errstr
, xe2
->XE
.Misc
.errstr
);
704 /*----------------------------------------------------------------*/
705 /*--- Error management -- printing ---*/
706 /*----------------------------------------------------------------*/
708 /* Do a printf-style operation on either the XML or normal output
709 channel, depending on the setting of VG_(clo_xml).
711 static void emit_WRK ( const HChar
* format
, va_list vargs
)
714 VG_(vprintf_xml
)(format
, vargs
);
716 VG_(vmessage
)(Vg_UserMsg
, format
, vargs
);
719 static void emit ( const HChar
* format
, ... ) PRINTF_CHECK(1, 2);
720 static void emit ( const HChar
* format
, ... )
723 va_start(vargs
, format
);
724 emit_WRK(format
, vargs
);
729 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
730 this once, as we only want to see these announcements once per
731 thread. Returned Bool indicates whether or not an announcement was
734 static Bool
announce_one_thread ( Thread
* thr
)
736 tl_assert(HG_(is_sane_Thread
)(thr
));
737 tl_assert(thr
->errmsg_index
>= 1);
743 VG_(printf_xml
)("<announcethread>\n");
744 VG_(printf_xml
)(" <hthreadid>%d</hthreadid>\n", thr
->errmsg_index
);
745 if (thr
->errmsg_index
== 1) {
746 tl_assert(thr
->created_at
== NULL
);
747 VG_(printf_xml
)(" <isrootthread></isrootthread>\n");
749 tl_assert(thr
->created_at
!= NULL
);
750 VG_(pp_ExeContext
)( thr
->created_at
);
752 VG_(printf_xml
)("</announcethread>\n\n");
756 VG_(umsg
)("---Thread-Announcement----------"
757 "--------------------------------" "\n");
760 if (thr
->errmsg_index
== 1) {
761 tl_assert(thr
->created_at
== NULL
);
762 VG_(message
)(Vg_UserMsg
,
763 "Thread #%d is the program's root thread\n",
766 tl_assert(thr
->created_at
!= NULL
);
767 VG_(message
)(Vg_UserMsg
, "Thread #%d was created\n",
769 VG_(pp_ExeContext
)( thr
->created_at
);
771 VG_(message
)(Vg_UserMsg
, "\n");
775 thr
->announced
= True
;
780 static void announce_LockP ( Lock
* lk
)
783 if (lk
== Lock_INVALID
)
784 return; /* Can't be announced -- we know nothing about it. */
785 tl_assert(lk
->magic
== LockP_MAGIC
);
788 if (lk
->appeared_at
) {
789 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
791 VG_(pp_ExeContext
)( lk
->appeared_at
);
795 if (lk
->appeared_at
) {
796 VG_(umsg
)( " Lock at %p was first observed\n",
797 (void*)lk
->guestaddr
);
798 VG_(pp_ExeContext
)( lk
->appeared_at
);
800 VG_(umsg
)( " Lock at %p : no stacktrace for first observation\n",
801 (void*)lk
->guestaddr
);
803 HG_(get_and_pp_addrdescr
)
805 ? VG_(get_ExeContext_epoch
)(lk
->appeared_at
)
806 : VG_(current_DiEpoch
)(),
812 /* Announce (that is, print point-of-first-observation) for the
813 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
814 static void announce_combined_LockP_vecs ( Lock
** lockvec
,
819 for (i
= 0; lockvec
[i
]; i
++) {
820 announce_LockP(lockvec
[i
]);
823 for (i
= 0; lockvec2
[i
]; i
++) {
824 Lock
* lk
= lockvec2
[i
];
825 if (!elem_LockP_vector(lockvec
, lk
))
832 static void show_LockP_summary_textmode ( Lock
** locks
, const HChar
* pre
)
836 UWord nLocks
= 0, nLocksValid
= 0;
837 count_LockP_vector(&nLocks
, &nLocksValid
, locks
);
838 tl_assert(nLocksValid
<= nLocks
);
841 VG_(umsg
)( "%sLocks held: none", pre
);
843 VG_(umsg
)( "%sLocks held: %lu, at address%s ",
844 pre
, nLocks
, nLocksValid
== 1 ? "" : "es" );
848 for (i
= 0; i
< nLocks
; i
++) {
849 if (locks
[i
] == Lock_INVALID
)
851 VG_(umsg
)( "%p", (void*)locks
[i
]->guestaddr
);
852 if (locks
[i
+1] != NULL
)
855 if (nLocksValid
< nLocks
)
856 VG_(umsg
)(" (and %lu that can't be shown)", nLocks
- nLocksValid
);
862 /* This is the "this error is due to be printed shortly; so have a
863 look at it any print any preamble you want" function. We use it to
864 announce any previously un-announced threads in the upcoming error
867 void HG_(before_pp_Error
) ( const Error
* err
)
871 xe
= (XError
*)VG_(get_error_extra
)(err
);
874 switch (VG_(get_error_kind
)(err
)) {
876 announce_one_thread( xe
->XE
.Misc
.thr
);
879 announce_one_thread( xe
->XE
.LockOrder
.thr
);
882 announce_one_thread( xe
->XE
.PthAPIerror
.thr
);
885 announce_one_thread( xe
->XE
.UnlockBogus
.thr
);
887 case XE_UnlockForeign
:
888 announce_one_thread( xe
->XE
.UnlockForeign
.thr
);
889 announce_one_thread( xe
->XE
.UnlockForeign
.owner
);
891 case XE_UnlockUnlocked
:
892 announce_one_thread( xe
->XE
.UnlockUnlocked
.thr
);
895 announce_one_thread( xe
->XE
.Race
.thr
);
896 if (xe
->XE
.Race
.h2_ct
)
897 announce_one_thread( xe
->XE
.Race
.h2_ct
);
898 if (xe
->XE
.Race
.h1_ct
)
899 announce_one_thread( xe
->XE
.Race
.h1_ct
);
900 if (xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
901 Thread
* thr
= get_admin_threads();
903 if (thr
->errmsg_index
904 == xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
905 announce_one_thread (thr
);
917 void HG_(pp_Error
) ( const Error
* err
)
919 const Bool xml
= VG_(clo_xml
); /* a shorthand, that's all */
922 VG_(umsg
)("--------------------------------"
923 "--------------------------------" "\n");
927 XError
*xe
= (XError
*)VG_(get_error_extra
)(err
);
931 emit( " <kind>%s</kind>\n", HG_(get_error_name
)(err
));
933 switch (VG_(get_error_kind
)(err
)) {
936 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Misc
.thr
) );
940 emit( " <xwhat>\n" );
941 emit( " <text>Thread #%d: %s</text>\n",
942 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
943 xe
->XE
.Misc
.errstr
);
944 emit( " <hthreadid>%d</hthreadid>\n",
945 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
);
946 emit( " </xwhat>\n" );
947 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
948 if (xe
->XE
.Misc
.auxstr
) {
949 emit(" <auxwhat>%s</auxwhat>\n", xe
->XE
.Misc
.auxstr
);
950 if (xe
->XE
.Misc
.auxctx
)
951 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
956 emit( "Thread #%d: %s\n",
957 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
958 xe
->XE
.Misc
.errstr
);
959 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
960 if (xe
->XE
.Misc
.auxstr
) {
961 emit(" %s\n", xe
->XE
.Misc
.auxstr
);
962 if (xe
->XE
.Misc
.auxctx
)
963 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
971 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.LockOrder
.thr
) );
975 emit( " <xwhat>\n" );
976 emit( " <text>Thread #%d: lock order \"%p before %p\" "
978 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
979 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
980 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
981 emit( " <hthreadid>%d</hthreadid>\n",
982 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
);
983 emit( " </xwhat>\n" );
984 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
985 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
986 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
987 emit( " <auxwhat>Required order was established by "
988 "acquisition of lock at %p</auxwhat>\n",
989 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
990 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
991 emit( " <auxwhat>followed by a later acquisition "
992 "of lock at %p</auxwhat>\n",
993 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
994 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
996 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
997 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1001 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
1002 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
1003 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
1004 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1006 emit( "Observed (incorrect) order is: "
1007 "acquisition of lock at %p\n",
1008 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1009 if (xe
->XE
.LockOrder
.actual_earlier_ec
) {
1010 VG_(pp_ExeContext
)(xe
->XE
.LockOrder
.actual_earlier_ec
);
1012 emit(" (stack unavailable)\n");
1015 emit(" followed by a later acquisition of lock at %p\n",
1016 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1017 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1018 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
1019 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
1021 emit( "Required order was established by "
1022 "acquisition of lock at %p\n",
1023 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1024 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
1026 emit( " followed by a later acquisition of lock at %p\n",
1027 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1028 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
1031 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
1032 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1039 case XE_PthAPIerror
: {
1040 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.PthAPIerror
.thr
) );
1044 emit( " <xwhat>\n" );
1046 " <text>Thread #%d's call to %pS failed</text>\n",
1047 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1048 xe
->XE
.PthAPIerror
.fnname
);
1049 emit( " <hthreadid>%d</hthreadid>\n",
1050 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
);
1051 emit( " </xwhat>\n" );
1052 emit( " <what>with error code %ld (%s)</what>\n",
1053 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1054 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1058 emit( "Thread #%d's call to %pS failed\n",
1059 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1060 xe
->XE
.PthAPIerror
.fnname
);
1061 emit( " with error code %ld (%s)\n",
1062 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1063 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1070 case XE_UnlockBogus
: {
1071 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockBogus
.thr
) );
1075 emit( " <xwhat>\n" );
1076 emit( " <text>Thread #%d unlocked an invalid "
1077 "lock at %p</text>\n",
1078 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1079 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1080 emit( " <hthreadid>%d</hthreadid>\n",
1081 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
);
1082 emit( " </xwhat>\n" );
1083 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1087 emit( "Thread #%d unlocked an invalid lock at %p\n",
1088 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1089 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1090 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1097 case XE_UnlockForeign
: {
1098 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockForeign
.lock
) );
1099 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.owner
) );
1100 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.thr
) );
1104 emit( " <xwhat>\n" );
1105 emit( " <text>Thread #%d unlocked lock at %p "
1106 "currently held by thread #%d</text>\n",
1107 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1108 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1109 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1110 emit( " <hthreadid>%d</hthreadid>\n",
1111 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
);
1112 emit( " <hthreadid>%d</hthreadid>\n",
1113 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1114 emit( " </xwhat>\n" );
1115 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1116 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1120 emit( "Thread #%d unlocked lock at %p "
1121 "currently held by thread #%d\n",
1122 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1123 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1124 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1125 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1126 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1133 case XE_UnlockUnlocked
: {
1134 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockUnlocked
.lock
) );
1135 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockUnlocked
.thr
) );
1139 emit( " <xwhat>\n" );
1140 emit( " <text>Thread #%d unlocked a "
1141 "not-locked lock at %p</text>\n",
1142 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1143 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1144 emit( " <hthreadid>%d</hthreadid>\n",
1145 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
);
1146 emit( " </xwhat>\n" );
1147 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1148 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1152 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1153 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1154 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1155 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1156 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1167 what
= xe
->XE
.Race
.isWrite
? "write" : "read";
1168 szB
= xe
->XE
.Race
.szB
;
1169 err_ga
= VG_(get_error_address
)(err
);
1171 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.thr
));
1172 if (xe
->XE
.Race
.h2_ct
)
1173 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.h2_ct
));
1177 /* ------ XML ------ */
1178 emit( " <xwhat>\n" );
1179 emit( " <text>Possible data race during %s of size %d "
1180 "at %p by thread #%d</text>\n",
1181 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1182 emit( " <hthreadid>%d</hthreadid>\n",
1183 (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1184 emit( " </xwhat>\n" );
1185 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1187 if (xe
->XE
.Race
.h2_ct
) {
1188 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1189 emit( " <xauxwhat>\n");
1190 emit( " <text>This conflicts with a previous %s of size %d "
1191 "by thread #%d</text>\n",
1192 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1193 xe
->XE
.Race
.h2_ct_accSzB
,
1194 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1195 emit( " <hthreadid>%d</hthreadid>\n",
1196 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1197 emit(" </xauxwhat>\n");
1198 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1201 if (xe
->XE
.Race
.h1_ct
) {
1202 emit( " <xauxwhat>\n");
1203 emit( " <text>This conflicts with a previous access "
1204 "by thread #%d, after</text>\n",
1205 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1206 emit( " <hthreadid>%d</hthreadid>\n",
1207 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1208 emit(" </xauxwhat>\n");
1209 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1210 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1212 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1214 emit( " <auxwhat>but before</auxwhat>\n" );
1215 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1216 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1218 emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
1224 /* ------ Text ------ */
1225 announce_combined_LockP_vecs( xe
->XE
.Race
.locksHeldW
,
1226 xe
->XE
.Race
.h2_ct_locksHeldW
);
1228 emit( "Possible data race during %s of size %d "
1229 "at %p by thread #%d\n",
1230 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1232 tl_assert(xe
->XE
.Race
.locksHeldW
);
1233 show_LockP_summary_textmode( xe
->XE
.Race
.locksHeldW
, "" );
1234 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1236 if (xe
->XE
.Race
.h2_ct
) {
1237 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1238 tl_assert(xe
->XE
.Race
.h2_ct_locksHeldW
);
1240 emit( "This conflicts with a previous %s of size %d "
1242 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1243 xe
->XE
.Race
.h2_ct_accSzB
,
1244 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1245 show_LockP_summary_textmode( xe
->XE
.Race
.h2_ct_locksHeldW
, "" );
1246 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1249 if (xe
->XE
.Race
.h1_ct
) {
1250 emit( " This conflicts with a previous access by thread #%d, "
1252 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1253 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1254 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1256 emit( " (the start of the thread)\n" );
1258 emit( " but before\n" );
1259 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1260 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1262 emit( " (the end of the thread)\n" );
1267 VG_(pp_addrinfo
) (err_ga
, &xe
->XE
.Race
.data_addrinfo
);
1268 break; /* case XE_Race */
1269 } /* case XE_Race */
1273 } /* switch (VG_(get_error_kind)(err)) */
1276 void HG_(print_access
) (StackTrace ips
, UInt n_ips
,
1281 WordSetID locksHeldW
)
1285 threadp
= libhb_get_Thr_hgthread( thr_a
);
1287 if (!threadp
->announced
) {
1288 /* This is for interactive use. We announce the thread if needed,
1289 but reset it to not announced afterwards, because we want
1290 the thread to be announced on the error output/log if needed. */
1291 announce_one_thread (threadp
);
1292 threadp
->announced
= False
;
1295 announce_one_thread (threadp
);
1296 VG_(printf
) ("%s of size %d at %p by thread #%d",
1297 isW
? "write" : "read",
1298 (int)SzB
, (void*)ga
, threadp
->errmsg_index
);
1299 if (threadp
->coretid
== VG_INVALID_THREADID
)
1300 VG_(printf
)(" tid (exited)\n");
1302 VG_(printf
)(" tid %u\n", threadp
->coretid
);
1304 Lock
** locksHeldW_P
;
1305 locksHeldW_P
= enumerate_WordSet_into_LockP_vector(
1306 HG_(get_univ_lsets
)(),
1308 True
/*allowed_to_be_invalid*/
1310 show_LockP_summary_textmode( locksHeldW_P
, "" );
1311 HG_(free
) (locksHeldW_P
);
1313 // FIXME PW EPOCH : need the real ips epoch.
1314 VG_(pp_StackTrace
)( VG_(current_DiEpoch
)(), ips
, n_ips
);
1318 const HChar
* HG_(get_error_name
) ( const Error
* err
)
1320 switch (VG_(get_error_kind
)(err
)) {
1321 case XE_Race
: return "Race";
1322 case XE_UnlockUnlocked
: return "UnlockUnlocked";
1323 case XE_UnlockForeign
: return "UnlockForeign";
1324 case XE_UnlockBogus
: return "UnlockBogus";
1325 case XE_PthAPIerror
: return "PthAPIerror";
1326 case XE_LockOrder
: return "LockOrder";
1327 case XE_Misc
: return "Misc";
1328 default: tl_assert(0); /* fill in missing case */
1332 Bool
HG_(recognised_suppression
) ( const HChar
* name
, Supp
*su
)
1334 # define TRY(_name,_xskind) \
1335 if (0 == VG_(strcmp)(name, (_name))) { \
1336 VG_(set_supp_kind)(su, (_xskind)); \
1339 TRY("Race", XS_Race
);
1340 TRY("FreeMemLock", XS_FreeMemLock
);
1341 TRY("UnlockUnlocked", XS_UnlockUnlocked
);
1342 TRY("UnlockForeign", XS_UnlockForeign
);
1343 TRY("UnlockBogus", XS_UnlockBogus
);
1344 TRY("PthAPIerror", XS_PthAPIerror
);
1345 TRY("LockOrder", XS_LockOrder
);
1346 TRY("Misc", XS_Misc
);
1351 Bool
HG_(read_extra_suppression_info
) ( Int fd
, HChar
** bufpp
, SizeT
* nBufp
,
1352 Int
* lineno
, Supp
* su
)
1354 /* do nothing -- no extra suppression info present. Return True to
1355 indicate nothing bad happened. */
1359 Bool
HG_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
)
1361 switch (VG_(get_supp_kind
)(su
)) {
1362 case XS_Race
: return VG_(get_error_kind
)(err
) == XE_Race
;
1363 case XS_UnlockUnlocked
: return VG_(get_error_kind
)(err
) == XE_UnlockUnlocked
;
1364 case XS_UnlockForeign
: return VG_(get_error_kind
)(err
) == XE_UnlockForeign
;
1365 case XS_UnlockBogus
: return VG_(get_error_kind
)(err
) == XE_UnlockBogus
;
1366 case XS_PthAPIerror
: return VG_(get_error_kind
)(err
) == XE_PthAPIerror
;
1367 case XS_LockOrder
: return VG_(get_error_kind
)(err
) == XE_LockOrder
;
1368 case XS_Misc
: return VG_(get_error_kind
)(err
) == XE_Misc
;
1369 //case XS_: return VG_(get_error_kind)(err) == XE_;
1370 default: tl_assert(0); /* fill in missing cases */
1374 SizeT
HG_(get_extra_suppression_info
) ( const Error
* err
,
1375 /*OUT*/HChar
* buf
, Int nBuf
)
1377 tl_assert(nBuf
>= 1);
1383 SizeT
HG_(print_extra_suppression_use
) ( const Supp
* su
,
1384 /*OUT*/HChar
* buf
, Int nBuf
)
1386 tl_assert(nBuf
>= 1);
1392 void HG_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
)
1399 /*--------------------------------------------------------------------*/
1400 /*--- end hg_errors.c ---*/
1401 /*--------------------------------------------------------------------*/