2 /*--------------------------------------------------------------------*/
3 /*--- Error management for Helgrind. ---*/
4 /*--- hg_errors.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2017 OpenWorks Ltd
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, see <http://www.gnu.org/licenses/>.
27 The GNU General Public License is contained in the file COPYING.
30 #include "pub_tool_basics.h"
31 #include "pub_tool_libcbase.h"
32 #include "pub_tool_libcassert.h"
33 #include "pub_tool_libcprint.h"
34 #include "pub_tool_stacktrace.h"
35 #include "pub_tool_execontext.h"
36 #include "pub_tool_errormgr.h"
37 #include "pub_tool_wordfm.h"
38 #include "pub_tool_xarray.h"
39 #include "pub_tool_debuginfo.h"
40 #include "pub_tool_threadstate.h"
41 #include "pub_tool_options.h" // VG_(clo_xml)
42 #include "pub_tool_aspacemgr.h"
43 #include "pub_tool_addrinfo.h"
45 #include "hg_basics.h"
46 #include "hg_addrdescr.h"
47 #include "hg_wordset.h"
48 #include "hg_lock_n_thread.h"
50 #include "hg_errors.h" /* self */
53 /*----------------------------------------------------------------*/
54 /*--- Error management -- storage ---*/
55 /*----------------------------------------------------------------*/
57 /* maps (by value) strings to a copy of them in ARENA_TOOL */
59 static WordFM
* string_table
= NULL
;
61 ULong
HG_(stats__string_table_queries
) = 0;
63 ULong
HG_(stats__string_table_get_map_size
) ( void ) {
64 return string_table
? (ULong
)VG_(sizeFM
)(string_table
) : 0;
67 static Word
string_table_cmp ( UWord s1
, UWord s2
) {
68 return (Word
)VG_(strcmp
)( (HChar
*)s1
, (HChar
*)s2
);
71 static HChar
* string_table_strdup ( const HChar
* str
) {
73 HG_(stats__string_table_queries
)++;
77 string_table
= VG_(newFM
)( HG_(zalloc
), "hg.sts.1",
78 HG_(free
), string_table_cmp
);
80 if (VG_(lookupFM
)( string_table
,
81 NULL
, (UWord
*)©
, (UWord
)str
)) {
83 if (0) VG_(printf
)("string_table_strdup: %p -> %p\n", str
, copy
);
86 copy
= HG_(strdup
)("hg.sts.2", str
);
87 VG_(addToFM
)( string_table
, (UWord
)copy
, (UWord
)copy
);
92 /* maps from Lock .unique fields to LockP*s */
94 static WordFM
* map_LockN_to_P
= NULL
;
96 ULong
HG_(stats__LockN_to_P_queries
) = 0;
98 ULong
HG_(stats__LockN_to_P_get_map_size
) ( void ) {
99 return map_LockN_to_P
? (ULong
)VG_(sizeFM
)(map_LockN_to_P
) : 0;
102 static Word
lock_unique_cmp ( UWord lk1W
, UWord lk2W
)
104 Lock
* lk1
= (Lock
*)lk1W
;
105 Lock
* lk2
= (Lock
*)lk2W
;
106 tl_assert( HG_(is_sane_LockNorP
)(lk1
) );
107 tl_assert( HG_(is_sane_LockNorP
)(lk2
) );
108 if (lk1
->unique
< lk2
->unique
) return -1;
109 if (lk1
->unique
> lk2
->unique
) return 1;
113 /* Given a normal Lock (LockN), convert it to a persistent Lock
114 (LockP). In some cases the LockN could be invalid (if it's been
115 freed), so we enquire, in hg_main.c's admin_locks list, whether it
116 is in fact valid. If allowed_to_be_invalid is True, then it's OK
117 for the LockN to be invalid, in which case Lock_INVALID is
118 returned. In all other cases, we insist that the LockN is a valid
119 lock, and return its corresponding LockP.
121 Why can LockNs sometimes be invalid? Because they are harvested
122 from locksets that are attached to the OldRef info for conflicting
123 threads. By the time we detect a race, the some of the elements of
124 the lockset may have been destroyed by the client, in which case
125 the corresponding Lock structures we maintain will have been freed.
127 So we check that each LockN is a member of the admin_locks double
128 linked list of all Lock structures. That stops us prodding around
129 in potentially freed-up Lock structures. However, it's not quite a
130 proper check: if a new Lock has been reallocated at the same
131 address as one which was previously freed, we'll wind up copying
132 the new one as the basis for the LockP, which is completely bogus
133 because it is unrelated to the previous Lock that lived there.
134 Let's hope that doesn't happen too often.
136 static Lock
* mk_LockP_from_LockN ( Lock
* lkn
,
137 Bool allowed_to_be_invalid
)
140 HG_(stats__LockN_to_P_queries
)++;
142 /* First off, let's do some sanity checks. If
143 allowed_to_be_invalid is False, we _must_ be able to find 'lkn'
144 in admin_locks; else we must assert. If it is True, it's OK for
145 it not to be findable, but in that case we must return
146 Lock_INVALID right away. */
147 Lock
* lock_list
= HG_(get_admin_locks
)();
149 if (lock_list
== lkn
)
151 lock_list
= lock_list
->admin_next
;
153 if (lock_list
== NULL
) {
154 /* We didn't find it. That possibility has to be OK'd by the
156 tl_assert(allowed_to_be_invalid
);
160 /* So we must be looking at a valid LockN. */
161 tl_assert( HG_(is_sane_LockN
)(lkn
) );
163 if (!map_LockN_to_P
) {
164 map_LockN_to_P
= VG_(newFM
)( HG_(zalloc
), "hg.mLPfLN.1",
165 HG_(free
), lock_unique_cmp
);
167 if (!VG_(lookupFM
)( map_LockN_to_P
, NULL
, (UWord
*)&lkp
, (UWord
)lkn
)) {
168 lkp
= HG_(zalloc
)( "hg.mLPfLN.2", sizeof(Lock
) );
170 lkp
->admin_next
= NULL
;
171 lkp
->admin_prev
= NULL
;
172 lkp
->magic
= LockP_MAGIC
;
173 /* Forget about the bag of lock holders - don't copy that.
174 Also, acquired_at should be NULL whenever heldBy is, and vice
175 versa. Also forget about the associated libhb synch object. */
178 lkp
->acquired_at
= NULL
;
180 VG_(addToFM
)( map_LockN_to_P
, (UWord
)lkp
, (UWord
)lkp
);
182 tl_assert( HG_(is_sane_LockP
)(lkp
) );
186 static Int
sort_by_guestaddr(const void* n1
, const void* n2
)
188 const Lock
* l1
= *(const Lock
*const *)n1
;
189 const Lock
* l2
= *(const Lock
*const *)n2
;
191 Addr a1
= l1
== Lock_INVALID
? 0 : l1
->guestaddr
;
192 Addr a2
= l2
== Lock_INVALID
? 0 : l2
->guestaddr
;
193 if (a1
< a2
) return -1;
194 if (a1
> a2
) return 1;
198 /* Expand a WordSet of LockN*'s into a NULL-terminated vector of
199 LockP*'s. Any LockN's that can't be converted into a LockP
200 (because they have been freed, see comment on mk_LockP_from_LockN)
201 are converted instead into the value Lock_INVALID. Hence the
202 returned vector is a sequence: zero or more (valid LockP* or
203 LockN_INVALID), terminated by a NULL. */
205 Lock
** enumerate_WordSet_into_LockP_vector( WordSetU
* univ_lsets
,
207 Bool allowed_to_be_invalid
)
209 tl_assert(univ_lsets
);
210 tl_assert( HG_(plausibleWS
)(univ_lsets
, lockset
) );
211 UWord nLocks
= HG_(cardinalityWS
)(univ_lsets
, lockset
);
212 Lock
** lockPs
= HG_(zalloc
)( "hg.eWSiLPa",
213 (nLocks
+1) * sizeof(Lock
*) );
214 tl_assert(lockPs
[nLocks
] == NULL
); /* pre-NULL terminated */
215 UWord
* lockNs
= NULL
;
218 /* HG_(getPayloadWS) doesn't assign non-NULL to &lockNs if the
219 lockset is empty; hence the guarding "if". Sigh. */
220 HG_(getPayloadWS
)( &lockNs
, &nLockNs
, univ_lsets
, lockset
);
224 /* Convert to LockPs. */
225 for (i
= 0; i
< nLockNs
; i
++) {
226 lockPs
[i
] = mk_LockP_from_LockN( (Lock
*)lockNs
[i
],
227 allowed_to_be_invalid
);
229 /* Sort the locks by increasing Lock::guestaddr to avoid jitters
231 VG_(ssort
)(lockPs
, nLockNs
, sizeof lockPs
[0], sort_by_guestaddr
);
236 /* Get the number of useful elements in a vector created by
237 enumerate_WordSet_into_LockP_vector. Returns both the total number
238 of elements (not including the terminating NULL) and the number of
239 non-Lock_INVALID elements. */
240 static void count_LockP_vector ( /*OUT*/UWord
* nLocks
,
241 /*OUT*/UWord
* nLocksValid
,
245 *nLocks
= *nLocksValid
= 0;
249 if (vec
[n
] != Lock_INVALID
)
255 /* Find out whether 'lk' is in 'vec'. */
256 static Bool
elem_LockP_vector ( Lock
** vec
, Lock
* lk
)
272 race: program counter
278 FIXME: how does state printing interact with lockset gc?
279 Are the locksets in prev/curr state always valid?
280 Ditto question for the threadsets
281 ThreadSets - probably are always valid if Threads
282 are never thrown away.
283 LockSets - could at least print the lockset elements that
284 correspond to actual locks at the time of printing. Hmm.
290 XE_Race
=1101, // race
291 XE_UnlockUnlocked
, // unlocking a not-locked lock
292 XE_UnlockForeign
, // unlocking a lock held by some other thread
293 XE_UnlockBogus
, // unlocking an address not known to be a lock
294 XE_PthAPIerror
, // error from the POSIX pthreads API
295 XE_LockOrder
, // lock order error
296 XE_Misc
, // misc other error (w/ string to describe it)
297 XE_Dubious
// a bit like misc for cases where the POSIX
298 // spec is unclear on error conditons
302 /* Extra contexts for kinds */
310 AddrInfo data_addrinfo
;
314 /* h1_* and h2_* provide some description of a previously
315 observed access with which we are conflicting. */
316 Thread
* h1_ct
; /* non-NULL means h1 info present */
317 ExeContext
* h1_ct_mbsegstartEC
;
318 ExeContext
* h1_ct_mbsegendEC
;
319 Thread
* h2_ct
; /* non-NULL means h2 info present */
320 ExeContext
* h2_ct_accEC
;
323 Lock
** h2_ct_locksHeldW
;
326 Thread
* thr
; /* doing the unlocking */
327 Lock
* lock
; /* lock (that is already unlocked) */
330 Thread
* thr
; /* doing the unlocking */
331 Thread
* owner
; /* thread that actually holds the lock */
332 Lock
* lock
; /* lock (that is held by 'owner') */
335 Thread
* thr
; /* doing the unlocking */
336 Addr lock_ga
; /* purported address of the lock */
340 HChar
* fnname
; /* persistent, in tool-arena */
341 Word err
; /* pth error code */
342 HChar
* errstr
; /* persistent, in tool-arena */
346 /* The first 4 fields describe the previously observed
347 (should-be) ordering. */
348 Lock
* shouldbe_earlier_lk
;
349 Lock
* shouldbe_later_lk
;
350 ExeContext
* shouldbe_earlier_ec
;
351 ExeContext
* shouldbe_later_ec
;
352 /* In principle we need to record two more stacks, from
353 this thread, when acquiring the locks in the "wrong"
354 order. In fact the wallclock-later acquisition by this
355 thread is recorded in the main stack for this error.
356 So we only need a stack for the earlier acquisition by
358 ExeContext
* actual_earlier_ec
;
362 HChar
* errstr
; /* persistent, in tool-arena */
363 HChar
* auxstr
; /* optional, persistent, in tool-arena */
364 ExeContext
* auxctx
; /* optional */
370 static void init_XError ( XError
* xe
) {
371 VG_(memset
)(xe
, 0, sizeof(*xe
) );
372 xe
->tag
= XE_Race
-1; /* bogus */
376 /* Extensions of suppressions */
379 XS_Race
=1201, /* race */
392 /* Updates the copy with address info if necessary. */
393 UInt
HG_(update_extra
) ( const Error
* err
)
395 XError
* xe
= (XError
*)VG_(get_error_extra
)(err
);
397 //if (extra != NULL && Undescribed == extra->addrinfo.akind) {
398 // describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
401 if (xe
->tag
== XE_Race
) {
403 /* Note the set of locks that the thread is (w-)holding.
404 Convert the WordSetID of LockN*'s into a NULL-terminated
405 vector of LockP*'s. We don't expect to encounter any invalid
406 LockNs in this conversion. */
407 tl_assert(xe
->XE
.Race
.thr
);
408 xe
->XE
.Race
.locksHeldW
409 = enumerate_WordSet_into_LockP_vector(
410 HG_(get_univ_lsets
)(),
411 xe
->XE
.Race
.thr
->locksetW
,
412 False
/*!allowed_to_be_invalid*/
415 /* See if we can come up with a source level description of the
416 raced-upon address. This is potentially expensive, which is
417 why it's only done at the update_extra point, not when the
418 error is initially created. */
422 VG_(printf
)("HG_(update_extra): "
423 "%d conflicting-event queries\n", xxx
);
425 HG_(describe_addr
) (VG_(get_ExeContext_epoch
)(VG_(get_error_where
)(err
)),
426 xe
->XE
.Race
.data_addr
, &xe
->XE
.Race
.data_addrinfo
);
428 /* And poke around in the conflicting-event map, to see if we
429 can rustle up a plausible-looking conflicting memory access
431 if (HG_(clo_history_level
) >= 2) {
433 ExeContext
* wherep
= NULL
;
434 Addr acc_addr
= xe
->XE
.Race
.data_addr
;
435 Int acc_szB
= xe
->XE
.Race
.szB
;
436 Thr
* acc_thr
= xe
->XE
.Race
.thr
->hbthr
;
437 Bool acc_isW
= xe
->XE
.Race
.isWrite
;
439 Bool conf_isW
= False
;
440 WordSetID conf_locksHeldW
= 0;
441 tl_assert(!xe
->XE
.Race
.h2_ct_accEC
);
442 tl_assert(!xe
->XE
.Race
.h2_ct
);
443 if (libhb_event_map_lookup(
444 &wherep
, &thrp
, &conf_szB
, &conf_isW
, &conf_locksHeldW
,
445 acc_thr
, acc_addr
, acc_szB
, acc_isW
)) {
449 threadp
= libhb_get_Thr_hgthread( thrp
);
451 xe
->XE
.Race
.h2_ct_accEC
= wherep
;
452 xe
->XE
.Race
.h2_ct
= threadp
;
453 xe
->XE
.Race
.h2_ct_accSzB
= (Int
)conf_szB
;
454 xe
->XE
.Race
.h2_ct_accIsW
= conf_isW
;
455 xe
->XE
.Race
.h2_ct_locksHeldW
456 = enumerate_WordSet_into_LockP_vector(
457 HG_(get_univ_lsets
)(),
459 True
/*allowed_to_be_invalid*/
464 // both NULL or both non-NULL
465 tl_assert( (!!xe
->XE
.Race
.h2_ct
) == (!!xe
->XE
.Race
.h2_ct_accEC
) );
468 return sizeof(XError
);
471 void HG_(record_error_Race
) ( Thread
* thr
,
472 Addr data_addr
, Int szB
, Bool isWrite
,
474 ExeContext
* h1_ct_segstart
,
475 ExeContext
* h1_ct_mbsegendEC
)
478 tl_assert( HG_(is_sane_Thread
)(thr
) );
480 # if defined(VGO_linux) || defined(VGO_freebsd)
481 /* Skip any races on locations apparently in GOTPLT sections. This
482 is said to be caused by ld.so poking PLT table entries (or
483 whatever) when it writes the resolved address of a dynamically
484 linked routine, into the table (or whatever) when it is called
485 for the first time. */
487 VgSectKind sect
= VG_(DebugInfo_sect_kind
)( NULL
, data_addr
);
488 if (0) VG_(printf
)("XXXXXXXXX RACE on %#lx %s\n",
489 data_addr
, VG_(pp_SectKind
)(sect
));
490 /* SectPLT is required on ???-linux */
491 if (sect
== Vg_SectGOTPLT
) return;
492 /* SectPLT is required on ppc32/64-linux */
493 if (sect
== Vg_SectPLT
) return;
494 /* SectGOT is required on arm-linux */
495 if (sect
== Vg_SectGOT
) return;
501 xe
.XE
.Race
.data_addr
= data_addr
;
502 xe
.XE
.Race
.szB
= szB
;
503 xe
.XE
.Race
.isWrite
= isWrite
;
504 xe
.XE
.Race
.thr
= thr
;
505 tl_assert(isWrite
== False
|| isWrite
== True
);
506 tl_assert(szB
== 8 || szB
== 4 || szB
== 2 || szB
== 1);
507 /* Skip on the detailed description of the raced-on address at this
508 point; it's expensive. Leave it for the update_extra function
509 if we ever make it that far. */
510 xe
.XE
.Race
.data_addrinfo
.tag
= Addr_Undescribed
;
512 // Skip on any of the conflicting-access info at this point.
513 // It's expensive to obtain, and this error is more likely than
514 // not to be discarded. We'll fill these fields in in
515 // HG_(update_extra) just above, assuming the error ever makes
516 // it that far (unlikely).
517 xe
.XE
.Race
.h2_ct_accSzB
= 0;
518 xe
.XE
.Race
.h2_ct_accIsW
= False
;
519 xe
.XE
.Race
.h2_ct_accEC
= NULL
;
520 xe
.XE
.Race
.h2_ct
= NULL
;
521 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
522 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
524 xe
.XE
.Race
.h1_ct
= h1_ct
;
525 xe
.XE
.Race
.h1_ct_mbsegstartEC
= h1_ct_segstart
;
526 xe
.XE
.Race
.h1_ct_mbsegendEC
= h1_ct_mbsegendEC
;
528 VG_(maybe_record_error
)( thr
->coretid
,
529 XE_Race
, data_addr
, NULL
, &xe
);
532 void HG_(record_error_UnlockUnlocked
) ( Thread
* thr
, Lock
* lk
)
535 tl_assert( HG_(is_sane_Thread
)(thr
) );
536 tl_assert( HG_(is_sane_LockN
)(lk
) );
538 xe
.tag
= XE_UnlockUnlocked
;
539 xe
.XE
.UnlockUnlocked
.thr
541 xe
.XE
.UnlockUnlocked
.lock
542 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
544 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
545 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
546 VG_(maybe_record_error
)( thr
->coretid
,
547 XE_UnlockUnlocked
, 0, NULL
, &xe
);
550 void HG_(record_error_UnlockForeign
) ( Thread
* thr
,
551 Thread
* owner
, Lock
* lk
)
554 tl_assert( HG_(is_sane_Thread
)(thr
) );
555 tl_assert( HG_(is_sane_Thread
)(owner
) );
556 tl_assert( HG_(is_sane_LockN
)(lk
) );
558 xe
.tag
= XE_UnlockForeign
;
559 xe
.XE
.UnlockForeign
.thr
= thr
;
560 xe
.XE
.UnlockForeign
.owner
= owner
;
561 xe
.XE
.UnlockForeign
.lock
562 = mk_LockP_from_LockN(lk
, False
/*!allowed_to_be_invalid*/);
564 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
565 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
566 VG_(maybe_record_error
)( thr
->coretid
,
567 XE_UnlockForeign
, 0, NULL
, &xe
);
570 void HG_(record_error_UnlockBogus
) ( Thread
* thr
, Addr lock_ga
)
573 tl_assert( HG_(is_sane_Thread
)(thr
) );
575 xe
.tag
= XE_UnlockBogus
;
576 xe
.XE
.UnlockBogus
.thr
= thr
;
577 xe
.XE
.UnlockBogus
.lock_ga
= lock_ga
;
579 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
580 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
581 VG_(maybe_record_error
)( thr
->coretid
,
582 XE_UnlockBogus
, 0, NULL
, &xe
);
585 void HG_(record_error_LockOrder
)(
587 Lock
* shouldbe_earlier_lk
,
588 Lock
* shouldbe_later_lk
,
589 ExeContext
* shouldbe_earlier_ec
,
590 ExeContext
* shouldbe_later_ec
,
591 ExeContext
* actual_earlier_ec
595 tl_assert( HG_(is_sane_Thread
)(thr
) );
596 tl_assert(HG_(clo_track_lockorders
));
598 xe
.tag
= XE_LockOrder
;
599 xe
.XE
.LockOrder
.thr
= thr
;
600 xe
.XE
.LockOrder
.shouldbe_earlier_lk
601 = mk_LockP_from_LockN(shouldbe_earlier_lk
,
602 False
/*!allowed_to_be_invalid*/);
603 xe
.XE
.LockOrder
.shouldbe_earlier_ec
= shouldbe_earlier_ec
;
604 xe
.XE
.LockOrder
.shouldbe_later_lk
605 = mk_LockP_from_LockN(shouldbe_later_lk
,
606 False
/*!allowed_to_be_invalid*/);
607 xe
.XE
.LockOrder
.shouldbe_later_ec
= shouldbe_later_ec
;
608 xe
.XE
.LockOrder
.actual_earlier_ec
= actual_earlier_ec
;
610 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
611 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
612 VG_(maybe_record_error
)( thr
->coretid
,
613 XE_LockOrder
, 0, NULL
, &xe
);
616 void HG_(record_error_PthAPIerror
) ( Thread
* thr
, const HChar
* fnname
,
617 Word err
, const HChar
* errstr
)
620 tl_assert( HG_(is_sane_Thread
)(thr
) );
624 xe
.tag
= XE_PthAPIerror
;
625 xe
.XE
.PthAPIerror
.thr
= thr
;
626 xe
.XE
.PthAPIerror
.fnname
= string_table_strdup(fnname
);
627 xe
.XE
.PthAPIerror
.err
= err
;
628 xe
.XE
.PthAPIerror
.errstr
= string_table_strdup(errstr
);
630 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
631 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
632 VG_(maybe_record_error
)( thr
->coretid
,
633 XE_PthAPIerror
, 0, NULL
, &xe
);
636 void HG_(record_error_Misc_w_aux
) ( Thread
* thr
, const HChar
* errstr
,
637 const HChar
* auxstr
, ExeContext
* auxctx
)
640 tl_assert( HG_(is_sane_Thread
)(thr
) );
644 xe
.XE
.Misc
.thr
= thr
;
645 xe
.XE
.Misc
.errstr
= string_table_strdup(errstr
);
646 xe
.XE
.Misc
.auxstr
= auxstr
? string_table_strdup(auxstr
) : NULL
;
647 xe
.XE
.Misc
.auxctx
= auxctx
;
649 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
650 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
651 VG_(maybe_record_error
)( thr
->coretid
,
652 XE_Misc
, 0, NULL
, &xe
);
655 void HG_(record_error_Misc
) ( Thread
* thr
, const HChar
* errstr
)
657 HG_(record_error_Misc_w_aux
)(thr
, errstr
, NULL
, NULL
);
660 void HG_(record_error_Dubious_w_aux
) ( Thread
* thr
, const HChar
* errstr
,
661 const HChar
* auxstr
, ExeContext
* auxctx
)
664 tl_assert( HG_(is_sane_Thread
)(thr
) );
668 xe
.XE
.Misc
.thr
= thr
;
669 xe
.XE
.Misc
.errstr
= string_table_strdup(errstr
);
670 xe
.XE
.Misc
.auxstr
= auxstr
? string_table_strdup(auxstr
) : NULL
;
671 xe
.XE
.Misc
.auxctx
= auxctx
;
673 tl_assert( HG_(is_sane_ThreadId
)(thr
->coretid
) );
674 tl_assert( thr
->coretid
!= VG_INVALID_THREADID
);
675 VG_(maybe_record_error
)( thr
->coretid
,
676 XE_Dubious
, 0, NULL
, &xe
);
679 void HG_(record_error_Dubious
) ( Thread
* thr
, const HChar
* errstr
)
681 HG_(record_error_Dubious_w_aux
)(thr
, errstr
, NULL
, NULL
);
684 Bool
HG_(eq_Error
) ( VgRes not_used
, const Error
* e1
, const Error
* e2
)
688 tl_assert(VG_(get_error_kind
)(e1
) == VG_(get_error_kind
)(e2
));
690 xe1
= (XError
*)VG_(get_error_extra
)(e1
);
691 xe2
= (XError
*)VG_(get_error_extra
)(e2
);
695 switch (VG_(get_error_kind
)(e1
)) {
697 return xe1
->XE
.Race
.szB
== xe2
->XE
.Race
.szB
698 && xe1
->XE
.Race
.isWrite
== xe2
->XE
.Race
.isWrite
699 && (HG_(clo_cmp_race_err_addrs
)
700 ? xe1
->XE
.Race
.data_addr
== xe2
->XE
.Race
.data_addr
702 case XE_UnlockUnlocked
:
703 return xe1
->XE
.UnlockUnlocked
.thr
== xe2
->XE
.UnlockUnlocked
.thr
704 && xe1
->XE
.UnlockUnlocked
.lock
== xe2
->XE
.UnlockUnlocked
.lock
;
705 case XE_UnlockForeign
:
706 return xe1
->XE
.UnlockForeign
.thr
== xe2
->XE
.UnlockForeign
.thr
707 && xe1
->XE
.UnlockForeign
.owner
== xe2
->XE
.UnlockForeign
.owner
708 && xe1
->XE
.UnlockForeign
.lock
== xe2
->XE
.UnlockForeign
.lock
;
710 return xe1
->XE
.UnlockBogus
.thr
== xe2
->XE
.UnlockBogus
.thr
711 && xe1
->XE
.UnlockBogus
.lock_ga
== xe2
->XE
.UnlockBogus
.lock_ga
;
713 return xe1
->XE
.PthAPIerror
.thr
== xe2
->XE
.PthAPIerror
.thr
714 && 0==VG_(strcmp
)(xe1
->XE
.PthAPIerror
.fnname
,
715 xe2
->XE
.PthAPIerror
.fnname
)
716 && xe1
->XE
.PthAPIerror
.err
== xe2
->XE
.PthAPIerror
.err
;
718 return xe1
->XE
.LockOrder
.thr
== xe2
->XE
.LockOrder
.thr
;
720 return xe1
->XE
.Misc
.thr
== xe2
->XE
.Misc
.thr
721 && 0==VG_(strcmp
)(xe1
->XE
.Misc
.errstr
, xe2
->XE
.Misc
.errstr
);
723 return xe1
->XE
.Misc
.thr
== xe2
->XE
.Misc
.thr
724 && 0==VG_(strcmp
)(xe1
->XE
.Misc
.errstr
, xe2
->XE
.Misc
.errstr
);
734 /*----------------------------------------------------------------*/
735 /*--- Error management -- printing ---*/
736 /*----------------------------------------------------------------*/
738 /* Do a printf-style operation on either the XML or normal output
739 channel, depending on the setting of VG_(clo_xml).
741 static void emit_WRK ( const HChar
* format
, va_list vargs
)
744 VG_(vprintf_xml
)(format
, vargs
);
746 VG_(vmessage
)(Vg_UserMsg
, format
, vargs
);
749 static void emit ( const HChar
* format
, ... ) PRINTF_CHECK(1, 2);
750 static void emit ( const HChar
* format
, ... )
753 va_start(vargs
, format
);
754 emit_WRK(format
, vargs
);
759 /* Announce (that is, print the point-of-creation) of 'thr'. Only do
760 this once, as we only want to see these announcements once per
761 thread. Returned Bool indicates whether or not an announcement was
764 static Bool
announce_one_thread ( Thread
* thr
)
766 tl_assert(HG_(is_sane_Thread
)(thr
));
767 tl_assert(thr
->errmsg_index
>= 1);
773 VG_(printf_xml
)("<announcethread>\n");
774 VG_(printf_xml
)(" <hthreadid>%d</hthreadid>\n", thr
->errmsg_index
);
775 if (thr
->errmsg_index
== 1) {
776 tl_assert(thr
->created_at
== NULL
);
777 VG_(printf_xml
)(" <isrootthread></isrootthread>\n");
779 tl_assert(thr
->created_at
!= NULL
);
780 VG_(pp_ExeContext
)( thr
->created_at
);
782 VG_(printf_xml
)("</announcethread>\n\n");
786 VG_(umsg
)("---Thread-Announcement----------"
787 "--------------------------------" "\n");
790 if (thr
->errmsg_index
== 1) {
791 tl_assert(thr
->created_at
== NULL
);
792 VG_(message
)(Vg_UserMsg
,
793 "Thread #%d is the program's root thread\n",
796 tl_assert(thr
->created_at
!= NULL
);
797 VG_(message
)(Vg_UserMsg
, "Thread #%d was created\n",
799 VG_(pp_ExeContext
)( thr
->created_at
);
801 VG_(message
)(Vg_UserMsg
, "\n");
805 thr
->announced
= True
;
810 static void announce_LockP ( Lock
* lk
)
813 if (lk
== Lock_INVALID
)
814 return; /* Can't be announced -- we know nothing about it. */
815 tl_assert(lk
->magic
== LockP_MAGIC
);
818 if (lk
->appeared_at
) {
819 emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
821 VG_(pp_ExeContext
)( lk
->appeared_at
);
825 if (lk
->appeared_at
) {
826 VG_(umsg
)( " Lock at %p was first observed\n",
827 (void*)lk
->guestaddr
);
828 VG_(pp_ExeContext
)( lk
->appeared_at
);
830 VG_(umsg
)( " Lock at %p : no stacktrace for first observation\n",
831 (void*)lk
->guestaddr
);
833 HG_(get_and_pp_addrdescr
)
835 ? VG_(get_ExeContext_epoch
)(lk
->appeared_at
)
836 : VG_(current_DiEpoch
)(),
842 /* Announce (that is, print point-of-first-observation) for the
843 locks in 'lockvec' and, if non-NULL, 'lockvec2'. */
844 static void announce_combined_LockP_vecs ( Lock
** lockvec
,
849 for (i
= 0; lockvec
[i
]; i
++) {
850 announce_LockP(lockvec
[i
]);
853 for (i
= 0; lockvec2
[i
]; i
++) {
854 Lock
* lk
= lockvec2
[i
];
855 if (!elem_LockP_vector(lockvec
, lk
))
862 static void show_LockP_summary_textmode ( Lock
** locks
, const HChar
* pre
)
866 UWord nLocks
= 0, nLocksValid
= 0;
867 count_LockP_vector(&nLocks
, &nLocksValid
, locks
);
868 tl_assert(nLocksValid
<= nLocks
);
871 VG_(umsg
)( "%sLocks held: none", pre
);
873 VG_(umsg
)( "%sLocks held: %lu, at address%s ",
874 pre
, nLocks
, nLocksValid
== 1 ? "" : "es" );
878 for (i
= 0; i
< nLocks
; i
++) {
879 if (locks
[i
] == Lock_INVALID
)
881 VG_(umsg
)( "%p", (void*)locks
[i
]->guestaddr
);
882 if (locks
[i
+1] != NULL
)
885 if (nLocksValid
< nLocks
)
886 VG_(umsg
)(" (and %lu that can't be shown)", nLocks
- nLocksValid
);
892 /* This is the "this error is due to be printed shortly; so have a
893 look at it any print any preamble you want" function. We use it to
894 announce any previously un-announced threads in the upcoming error
897 void HG_(before_pp_Error
) ( const Error
* err
)
901 xe
= (XError
*)VG_(get_error_extra
)(err
);
904 switch (VG_(get_error_kind
)(err
)) {
906 announce_one_thread( xe
->XE
.Misc
.thr
);
909 announce_one_thread( xe
->XE
.Misc
.thr
);
912 announce_one_thread( xe
->XE
.LockOrder
.thr
);
915 announce_one_thread( xe
->XE
.PthAPIerror
.thr
);
918 announce_one_thread( xe
->XE
.UnlockBogus
.thr
);
920 case XE_UnlockForeign
:
921 announce_one_thread( xe
->XE
.UnlockForeign
.thr
);
922 announce_one_thread( xe
->XE
.UnlockForeign
.owner
);
924 case XE_UnlockUnlocked
:
925 announce_one_thread( xe
->XE
.UnlockUnlocked
.thr
);
928 announce_one_thread( xe
->XE
.Race
.thr
);
929 if (xe
->XE
.Race
.h2_ct
)
930 announce_one_thread( xe
->XE
.Race
.h2_ct
);
931 if (xe
->XE
.Race
.h1_ct
)
932 announce_one_thread( xe
->XE
.Race
.h1_ct
);
933 if (xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
934 Thread
* thr
= get_admin_threads();
936 if (thr
->errmsg_index
937 == xe
->XE
.Race
.data_addrinfo
.Addr
.Block
.alloc_tinfo
.tnr
) {
938 announce_one_thread (thr
);
950 void HG_(pp_Error
) ( const Error
* err
)
952 const Bool xml
= VG_(clo_xml
); /* a shorthand, that's all */
955 VG_(umsg
)("--------------------------------"
956 "--------------------------------" "\n");
960 XError
*xe
= (XError
*)VG_(get_error_extra
)(err
);
964 emit( " <kind>%s</kind>\n", HG_(get_error_name
)(err
));
966 switch (VG_(get_error_kind
)(err
)) {
968 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Misc
.thr
) );
972 emit( " <xwhat>\n" );
973 emit( " <text>Thread #%d: %s</text>\n",
974 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
975 xe
->XE
.Misc
.errstr
);
976 emit( " <hthreadid>%d</hthreadid>\n",
977 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
);
978 emit( " </xwhat>\n" );
979 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
980 if (xe
->XE
.Misc
.auxstr
) {
981 emit(" <auxwhat>%s</auxwhat>\n", xe
->XE
.Misc
.auxstr
);
982 if (xe
->XE
.Misc
.auxctx
)
983 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
988 emit( "Thread #%d: %s\n",
989 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
990 xe
->XE
.Misc
.errstr
);
991 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
992 if (xe
->XE
.Misc
.auxstr
) {
993 emit(" %s\n", xe
->XE
.Misc
.auxstr
);
994 if (xe
->XE
.Misc
.auxctx
)
995 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
1003 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Misc
.thr
) );
1007 emit( " <xwhat>\n" );
1008 emit( " <text>Thread #%d: %s</text>\n",
1009 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
1010 xe
->XE
.Misc
.errstr
);
1011 emit( " <hthreadid>%d</hthreadid>\n",
1012 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
);
1013 emit( " </xwhat>\n" );
1014 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1015 if (xe
->XE
.Misc
.auxstr
) {
1016 emit(" <auxwhat>%s</auxwhat>\n", xe
->XE
.Misc
.auxstr
);
1017 if (xe
->XE
.Misc
.auxctx
)
1018 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
1023 emit( "Thread #%d: %s\n",
1024 (Int
)xe
->XE
.Misc
.thr
->errmsg_index
,
1025 xe
->XE
.Misc
.errstr
);
1026 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1027 if (xe
->XE
.Misc
.auxstr
) {
1028 emit(" %s\n", xe
->XE
.Misc
.auxstr
);
1029 if (xe
->XE
.Misc
.auxctx
)
1030 VG_(pp_ExeContext
)( xe
->XE
.Misc
.auxctx
);
1037 case XE_LockOrder
: {
1038 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.LockOrder
.thr
) );
1042 emit( " <xwhat>\n" );
1043 emit( " <text>Thread #%d: lock order \"%p before %p\" "
1044 "violated</text>\n",
1045 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
1046 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
1047 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1048 emit( " <hthreadid>%d</hthreadid>\n",
1049 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
);
1050 emit( " </xwhat>\n" );
1051 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1052 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
1053 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
1054 emit( " <auxwhat>Required order was established by "
1055 "acquisition of lock at %p</auxwhat>\n",
1056 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1057 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
1058 emit( " <auxwhat>followed by a later acquisition "
1059 "of lock at %p</auxwhat>\n",
1060 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1061 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
1063 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
1064 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1068 emit( "Thread #%d: lock order \"%p before %p\" violated\n",
1069 (Int
)xe
->XE
.LockOrder
.thr
->errmsg_index
,
1070 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
,
1071 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1073 emit( "Observed (incorrect) order is: "
1074 "acquisition of lock at %p\n",
1075 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1076 if (xe
->XE
.LockOrder
.actual_earlier_ec
) {
1077 VG_(pp_ExeContext
)(xe
->XE
.LockOrder
.actual_earlier_ec
);
1079 emit(" (stack unavailable)\n");
1082 emit(" followed by a later acquisition of lock at %p\n",
1083 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1084 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1085 if (xe
->XE
.LockOrder
.shouldbe_earlier_ec
1086 && xe
->XE
.LockOrder
.shouldbe_later_ec
) {
1088 emit( "Required order was established by "
1089 "acquisition of lock at %p\n",
1090 (void*)xe
->XE
.LockOrder
.shouldbe_earlier_lk
->guestaddr
);
1091 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_earlier_ec
);
1093 emit( " followed by a later acquisition of lock at %p\n",
1094 (void*)xe
->XE
.LockOrder
.shouldbe_later_lk
->guestaddr
);
1095 VG_(pp_ExeContext
)( xe
->XE
.LockOrder
.shouldbe_later_ec
);
1098 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_earlier_lk
);
1099 announce_LockP ( xe
->XE
.LockOrder
.shouldbe_later_lk
);
1106 case XE_PthAPIerror
: {
1107 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.PthAPIerror
.thr
) );
1111 emit( " <xwhat>\n" );
1113 " <text>Thread #%d's call to %pS failed</text>\n",
1114 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1115 xe
->XE
.PthAPIerror
.fnname
);
1116 emit( " <hthreadid>%d</hthreadid>\n",
1117 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
);
1118 emit( " </xwhat>\n" );
1119 emit( " <what>with error code %ld (%s)</what>\n",
1120 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1121 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1125 emit( "Thread #%d's call to %pS failed\n",
1126 (Int
)xe
->XE
.PthAPIerror
.thr
->errmsg_index
,
1127 xe
->XE
.PthAPIerror
.fnname
);
1128 emit( " with error code %ld (%s)\n",
1129 xe
->XE
.PthAPIerror
.err
, xe
->XE
.PthAPIerror
.errstr
);
1130 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1137 case XE_UnlockBogus
: {
1138 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockBogus
.thr
) );
1142 emit( " <xwhat>\n" );
1143 emit( " <text>Thread #%d unlocked an invalid "
1144 "lock at %p</text>\n",
1145 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1146 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1147 emit( " <hthreadid>%d</hthreadid>\n",
1148 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
);
1149 emit( " </xwhat>\n" );
1150 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1154 emit( "Thread #%d unlocked an invalid lock at %p\n",
1155 (Int
)xe
->XE
.UnlockBogus
.thr
->errmsg_index
,
1156 (void*)xe
->XE
.UnlockBogus
.lock_ga
);
1157 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1164 case XE_UnlockForeign
: {
1165 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockForeign
.lock
) );
1166 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.owner
) );
1167 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockForeign
.thr
) );
1171 emit( " <xwhat>\n" );
1172 emit( " <text>Thread #%d unlocked lock at %p "
1173 "currently held by thread #%d</text>\n",
1174 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1175 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1176 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1177 emit( " <hthreadid>%d</hthreadid>\n",
1178 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
);
1179 emit( " <hthreadid>%d</hthreadid>\n",
1180 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1181 emit( " </xwhat>\n" );
1182 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1183 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1187 emit( "Thread #%d unlocked lock at %p "
1188 "currently held by thread #%d\n",
1189 (Int
)xe
->XE
.UnlockForeign
.thr
->errmsg_index
,
1190 (void*)xe
->XE
.UnlockForeign
.lock
->guestaddr
,
1191 (Int
)xe
->XE
.UnlockForeign
.owner
->errmsg_index
);
1192 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1193 announce_LockP ( xe
->XE
.UnlockForeign
.lock
);
1200 case XE_UnlockUnlocked
: {
1201 tl_assert( HG_(is_sane_LockP
)( xe
->XE
.UnlockUnlocked
.lock
) );
1202 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.UnlockUnlocked
.thr
) );
1206 emit( " <xwhat>\n" );
1207 emit( " <text>Thread #%d unlocked a "
1208 "not-locked lock at %p</text>\n",
1209 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1210 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1211 emit( " <hthreadid>%d</hthreadid>\n",
1212 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
);
1213 emit( " </xwhat>\n" );
1214 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1215 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1219 emit( "Thread #%d unlocked a not-locked lock at %p\n",
1220 (Int
)xe
->XE
.UnlockUnlocked
.thr
->errmsg_index
,
1221 (void*)xe
->XE
.UnlockUnlocked
.lock
->guestaddr
);
1222 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1223 announce_LockP ( xe
->XE
.UnlockUnlocked
.lock
);
1234 what
= xe
->XE
.Race
.isWrite
? "write" : "read";
1235 szB
= xe
->XE
.Race
.szB
;
1236 err_ga
= VG_(get_error_address
)(err
);
1238 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.thr
));
1239 if (xe
->XE
.Race
.h2_ct
)
1240 tl_assert( HG_(is_sane_Thread
)( xe
->XE
.Race
.h2_ct
));
1244 /* ------ XML ------ */
1245 emit( " <xwhat>\n" );
1246 emit( " <text>Possible data race during %s of size %d "
1247 "at %p by thread #%d</text>\n",
1248 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1249 emit( " <hthreadid>%d</hthreadid>\n",
1250 (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1251 emit( " </xwhat>\n" );
1252 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1254 if (xe
->XE
.Race
.h2_ct
) {
1255 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1256 emit( " <xauxwhat>\n");
1257 emit( " <text>This conflicts with a previous %s of size %d "
1258 "by thread #%d</text>\n",
1259 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1260 xe
->XE
.Race
.h2_ct_accSzB
,
1261 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1262 emit( " <hthreadid>%d</hthreadid>\n",
1263 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1264 emit(" </xauxwhat>\n");
1265 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1268 if (xe
->XE
.Race
.h1_ct
) {
1269 emit( " <xauxwhat>\n");
1270 emit( " <text>This conflicts with a previous access "
1271 "by thread #%d, after</text>\n",
1272 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1273 emit( " <hthreadid>%d</hthreadid>\n",
1274 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1275 emit(" </xauxwhat>\n");
1276 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1277 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1279 emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
1281 emit( " <auxwhat>but before</auxwhat>\n" );
1282 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1283 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1285 emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
1291 /* ------ Text ------ */
1292 announce_combined_LockP_vecs( xe
->XE
.Race
.locksHeldW
,
1293 xe
->XE
.Race
.h2_ct_locksHeldW
);
1295 emit( "Possible data race during %s of size %d "
1296 "at %p by thread #%d\n",
1297 what
, szB
, (void*)err_ga
, (Int
)xe
->XE
.Race
.thr
->errmsg_index
);
1299 tl_assert(xe
->XE
.Race
.locksHeldW
);
1300 show_LockP_summary_textmode( xe
->XE
.Race
.locksHeldW
, "" );
1301 VG_(pp_ExeContext
)( VG_(get_error_where
)(err
) );
1303 if (xe
->XE
.Race
.h2_ct
) {
1304 tl_assert(xe
->XE
.Race
.h2_ct_accEC
); // assured by update_extra
1305 tl_assert(xe
->XE
.Race
.h2_ct_locksHeldW
);
1307 emit( "This conflicts with a previous %s of size %d "
1309 xe
->XE
.Race
.h2_ct_accIsW
? "write" : "read",
1310 xe
->XE
.Race
.h2_ct_accSzB
,
1311 xe
->XE
.Race
.h2_ct
->errmsg_index
);
1312 show_LockP_summary_textmode( xe
->XE
.Race
.h2_ct_locksHeldW
, "" );
1313 VG_(pp_ExeContext
)( xe
->XE
.Race
.h2_ct_accEC
);
1316 if (xe
->XE
.Race
.h1_ct
) {
1317 emit( " This conflicts with a previous access by thread #%d, "
1319 xe
->XE
.Race
.h1_ct
->errmsg_index
);
1320 if (xe
->XE
.Race
.h1_ct_mbsegstartEC
) {
1321 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegstartEC
);
1323 emit( " (the start of the thread)\n" );
1325 emit( " but before\n" );
1326 if (xe
->XE
.Race
.h1_ct_mbsegendEC
) {
1327 VG_(pp_ExeContext
)( xe
->XE
.Race
.h1_ct_mbsegendEC
);
1329 emit( " (the end of the thread)\n" );
1334 VG_(pp_addrinfo
) (err_ga
, &xe
->XE
.Race
.data_addrinfo
);
1335 break; /* case XE_Race */
1336 } /* case XE_Race */
1340 } /* switch (VG_(get_error_kind)(err)) */
1343 void HG_(print_access
) (StackTrace ips
, UInt n_ips
,
1348 WordSetID locksHeldW
)
1352 threadp
= libhb_get_Thr_hgthread( thr_a
);
1354 if (!threadp
->announced
) {
1355 /* This is for interactive use. We announce the thread if needed,
1356 but reset it to not announced afterwards, because we want
1357 the thread to be announced on the error output/log if needed. */
1358 announce_one_thread (threadp
);
1359 threadp
->announced
= False
;
1362 announce_one_thread (threadp
);
1363 VG_(printf
) ("%s of size %d at %p by thread #%d",
1364 isW
? "write" : "read",
1365 (int)SzB
, (void*)ga
, threadp
->errmsg_index
);
1366 if (threadp
->coretid
== VG_INVALID_THREADID
)
1367 VG_(printf
)(" tid (exited)\n");
1369 VG_(printf
)(" tid %u\n", threadp
->coretid
);
1371 Lock
** locksHeldW_P
;
1372 locksHeldW_P
= enumerate_WordSet_into_LockP_vector(
1373 HG_(get_univ_lsets
)(),
1375 True
/*allowed_to_be_invalid*/
1377 show_LockP_summary_textmode( locksHeldW_P
, "" );
1378 HG_(free
) (locksHeldW_P
);
1380 // FIXME PW EPOCH : need the real ips epoch.
1381 VG_(pp_StackTrace
)( VG_(current_DiEpoch
)(), ips
, n_ips
);
1385 const HChar
* HG_(get_error_name
) ( const Error
* err
)
1387 switch (VG_(get_error_kind
)(err
)) {
1388 case XE_Race
: return "Race";
1389 case XE_UnlockUnlocked
: return "UnlockUnlocked";
1390 case XE_UnlockForeign
: return "UnlockForeign";
1391 case XE_UnlockBogus
: return "UnlockBogus";
1392 case XE_PthAPIerror
: return "PthAPIerror";
1393 case XE_LockOrder
: return "LockOrder";
1394 case XE_Misc
: return "Misc";
1395 case XE_Dubious
: return "Dubious";
1396 default: tl_assert(0); /* fill in missing case */
1400 Bool
HG_(recognised_suppression
) ( const HChar
* name
, Supp
*su
)
1402 # define TRY(_name,_xskind) \
1403 if (0 == VG_(strcmp)(name, (_name))) { \
1404 VG_(set_supp_kind)(su, (_xskind)); \
1407 TRY("Race", XS_Race
);
1408 TRY("FreeMemLock", XS_FreeMemLock
);
1409 TRY("UnlockUnlocked", XS_UnlockUnlocked
);
1410 TRY("UnlockForeign", XS_UnlockForeign
);
1411 TRY("UnlockBogus", XS_UnlockBogus
);
1412 TRY("PthAPIerror", XS_PthAPIerror
);
1413 TRY("LockOrder", XS_LockOrder
);
1414 TRY("Misc", XS_Misc
);
1415 TRY("Dubious", XS_Dubious
);
1420 Bool
HG_(read_extra_suppression_info
) ( Int fd
, HChar
** bufpp
, SizeT
* nBufp
,
1421 Int
* lineno
, Supp
* su
)
1423 /* do nothing -- no extra suppression info present. Return True to
1424 indicate nothing bad happened. */
1428 Bool
HG_(error_matches_suppression
) ( const Error
* err
, const Supp
* su
)
1430 switch (VG_(get_supp_kind
)(su
)) {
1431 case XS_Race
: return VG_(get_error_kind
)(err
) == XE_Race
;
1432 case XS_UnlockUnlocked
: return VG_(get_error_kind
)(err
) == XE_UnlockUnlocked
;
1433 case XS_UnlockForeign
: return VG_(get_error_kind
)(err
) == XE_UnlockForeign
;
1434 case XS_UnlockBogus
: return VG_(get_error_kind
)(err
) == XE_UnlockBogus
;
1435 case XS_PthAPIerror
: return VG_(get_error_kind
)(err
) == XE_PthAPIerror
;
1436 case XS_LockOrder
: return VG_(get_error_kind
)(err
) == XE_LockOrder
;
1437 case XS_Misc
: return VG_(get_error_kind
)(err
) == XE_Misc
;
1438 case XS_Dubious
: return VG_(get_error_kind
)(err
) == XE_Dubious
;
1439 //case XS_: return VG_(get_error_kind)(err) == XE_;
1440 default: tl_assert(0); /* fill in missing cases */
1444 SizeT
HG_(get_extra_suppression_info
) ( const Error
* err
,
1445 /*OUT*/HChar
* buf
, Int nBuf
)
1447 tl_assert(nBuf
>= 1);
1453 SizeT
HG_(print_extra_suppression_use
) ( const Supp
* su
,
1454 /*OUT*/HChar
* buf
, Int nBuf
)
1456 tl_assert(nBuf
>= 1);
1462 void HG_(update_extra_suppression_use
) ( const Error
* err
, const Supp
* su
)
1469 /*--------------------------------------------------------------------*/
1470 /*--- end hg_errors.c ---*/
1471 /*--------------------------------------------------------------------*/