2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors ---*/
4 /*--- in threaded programs. hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2017 OpenWorks LLP
14 Copyright (C) 2007-2017 Apple, Inc.
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, see <http://www.gnu.org/licenses/>.
29 The GNU General Public License is contained in the file COPYING.
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
37 #include "pub_tool_basics.h"
38 #include "pub_tool_gdbserver.h"
39 #include "pub_tool_libcassert.h"
40 #include "pub_tool_libcbase.h"
41 #include "pub_tool_libcprint.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"
44 #include "pub_tool_hashtable.h"
45 #include "pub_tool_replacemalloc.h"
46 #include "pub_tool_machine.h"
47 #include "pub_tool_options.h"
48 #include "pub_tool_xarray.h"
49 #include "pub_tool_stacktrace.h"
50 #include "pub_tool_wordfm.h"
51 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
52 #include "pub_tool_redir.h" // sonames for the dynamic linkers
53 #include "pub_tool_vki.h" // VKI_PAGE_SIZE
54 #include "pub_tool_libcproc.h"
55 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
56 #include "pub_tool_poolalloc.h"
57 #include "pub_tool_addrinfo.h"
58 #include "pub_tool_xtree.h"
59 #include "pub_tool_xtmemory.h"
61 #include "hg_basics.h"
62 #include "hg_wordset.h"
63 #include "hg_addrdescr.h"
64 #include "hg_lock_n_thread.h"
65 #include "hg_errors.h"
72 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
74 // FIXME: when client destroys a lock or a CV, remove these
75 // from our mappings, so that the associated SO can be freed up
77 /*----------------------------------------------------------------*/
79 /*----------------------------------------------------------------*/
81 /* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
88 // FIXME what is supposed to happen to locks in memory which
89 // is relocated as a result of client realloc?
91 // FIXME put referencing ThreadId into Thread and get
92 // rid of the slow reverse mapping function.
94 // FIXME accesses to NoAccess areas: change state to Excl?
96 // FIXME report errors for accesses of NoAccess memory?
98 // FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99 // the thread still holds the lock.
101 /* ------------ Debug/trace options ------------ */
103 // 0 for silent, 1 for some stuff, 2 for lots of stuff
104 #define SHOW_EVENTS 0
107 static void all__sanity_check ( const HChar
* who
); /* fwds */
109 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
111 // 0 for none, 1 for dump at end of run
112 #define SHOW_DATA_STRUCTURES 0
115 /* ------------ Misc comments ------------ */
117 // FIXME: don't hardwire initial entries for root thread.
118 // Instead, let the pre_thread_ll_create handler do this.
121 /*----------------------------------------------------------------*/
122 /*--- Primary data structures ---*/
123 /*----------------------------------------------------------------*/
125 /* Admin linked list of Threads */
126 static Thread
* admin_threads
= NULL
;
127 Thread
* get_admin_threads ( void ) { return admin_threads
; }
129 /* Admin double linked list of Locks */
130 /* We need a double linked list to properly and efficiently
132 static Lock
* admin_locks
= NULL
;
134 /* Mapping table for core ThreadIds to Thread* */
135 static Thread
** map_threads
= NULL
; /* Array[VG_N_THREADS] of Thread* */
137 /* Mapping table for lock guest addresses to Lock* */
138 static WordFM
* map_locks
= NULL
; /* WordFM LockAddr Lock* */
140 /* The word-set universes for lock sets. */
141 static WordSetU
* univ_lsets
= NULL
; /* sets of Lock* */
142 static WordSetU
* univ_laog
= NULL
; /* sets of Lock*, for LAOG */
143 static Int next_gc_univ_laog
= 1;
144 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
147 /* Allow libhb to get at the universe of locksets stored
149 WordSetU
* HG_(get_univ_lsets
) ( void ) { return univ_lsets
; }
151 /* Allow libhb to get at the list of locks stored here. Ditto
153 Lock
* HG_(get_admin_locks
) ( void ) { return admin_locks
; }
156 /*----------------------------------------------------------------*/
157 /*--- Simple helpers for the data structures ---*/
158 /*----------------------------------------------------------------*/
160 static UWord stats__lockN_acquires
= 0;
161 static UWord stats__lockN_releases
= 0;
163 #if defined(VGO_solaris)
164 Bool
HG_(clo_ignore_thread_creation
) = True
;
166 Bool
HG_(clo_ignore_thread_creation
) = False
;
167 #endif /* VGO_solaris */
170 ThreadId
map_threads_maybe_reverse_lookup_SLOW ( Thread
* thr
); /*fwds*/
172 /* --------- Constructors --------- */
174 static Thread
* mk_Thread ( Thr
* hbthr
) {
176 Thread
* thread
= HG_(zalloc
)( "hg.mk_Thread.1", sizeof(Thread
) );
177 thread
->locksetA
= HG_(emptyWS
)( univ_lsets
);
178 thread
->locksetW
= HG_(emptyWS
)( univ_lsets
);
179 thread
->magic
= Thread_MAGIC
;
180 thread
->hbthr
= hbthr
;
181 thread
->coretid
= VG_INVALID_THREADID
;
182 thread
->created_at
= NULL
;
183 thread
->announced
= False
;
184 thread
->first_sp_delta
= 0;
185 thread
->errmsg_index
= indx
++;
186 thread
->admin
= admin_threads
;
187 thread
->synchr_nesting
= 0;
188 thread
->pthread_create_nesting_level
= 0;
189 #if defined(VGO_solaris)
190 thread
->bind_guard_flag
= 0;
191 #endif /* VGO_solaris */
193 admin_threads
= thread
;
197 // Make a new lock which is unlocked (hence ownerless)
198 // and insert the new lock in admin_locks double linked list.
199 static Lock
* mk_LockN ( LockKind kind
, Addr guestaddr
) {
200 static ULong unique
= 0;
201 Lock
* lock
= HG_(zalloc
)( "hg.mk_Lock.1", sizeof(Lock
) );
202 /* begin: add to double linked list */
204 admin_locks
->admin_prev
= lock
;
205 lock
->admin_next
= admin_locks
;
206 lock
->admin_prev
= NULL
;
209 lock
->unique
= unique
++;
210 lock
->magic
= LockN_MAGIC
;
211 lock
->appeared_at
= NULL
;
212 lock
->acquired_at
= NULL
;
213 lock
->hbso
= libhb_so_alloc();
214 lock
->guestaddr
= guestaddr
;
218 tl_assert(HG_(is_sane_LockN
)(lock
));
222 /* Release storage for a Lock. Also release storage in .heldBy, if
223 any. Removes from admin_locks double linked list. */
224 static void del_LockN ( Lock
* lk
)
226 tl_assert(HG_(is_sane_LockN
)(lk
));
228 libhb_so_dealloc(lk
->hbso
);
230 VG_(deleteBag
)( lk
->heldBy
);
231 /* begin: del lock from double linked list */
232 if (lk
== admin_locks
) {
233 tl_assert(lk
->admin_prev
== NULL
);
235 lk
->admin_next
->admin_prev
= NULL
;
236 admin_locks
= lk
->admin_next
;
239 tl_assert(lk
->admin_prev
!= NULL
);
240 lk
->admin_prev
->admin_next
= lk
->admin_next
;
242 lk
->admin_next
->admin_prev
= lk
->admin_prev
;
245 VG_(memset
)(lk
, 0xAA, sizeof(*lk
));
249 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
250 it. This is done strictly: only combinations resulting from
251 correct program and libpthread behaviour are allowed. */
252 static void lockN_acquire_writer ( Lock
* lk
, Thread
* thr
)
254 tl_assert(HG_(is_sane_LockN
)(lk
));
255 tl_assert(HG_(is_sane_Thread
)(thr
));
257 stats__lockN_acquires
++;
259 /* EXPOSITION only */
260 /* We need to keep recording snapshots of where the lock was
261 acquired, so as to produce better lock-order error messages. */
262 if (lk
->acquired_at
== NULL
) {
264 tl_assert(lk
->heldBy
== NULL
);
265 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
267 = VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
269 tl_assert(lk
->heldBy
!= NULL
);
271 /* end EXPOSITION only */
276 tl_assert(lk
->heldBy
== NULL
); /* can't w-lock recursively */
277 tl_assert(!lk
->heldW
);
279 lk
->heldBy
= VG_(newBag
)( HG_(zalloc
), "hg.lNaw.1", HG_(free
) );
280 VG_(addToBag
)( lk
->heldBy
, (UWord
)thr
);
283 if (lk
->heldBy
== NULL
)
285 /* 2nd and subsequent locking of a lock by its owner */
286 tl_assert(lk
->heldW
);
287 /* assert: lk is only held by one thread .. */
288 tl_assert(VG_(sizeUniqueBag
)(lk
->heldBy
) == 1);
289 /* assert: .. and that thread is 'thr'. */
290 tl_assert(VG_(elemBag
)(lk
->heldBy
, (UWord
)thr
)
291 == VG_(sizeTotalBag
)(lk
->heldBy
));
292 VG_(addToBag
)(lk
->heldBy
, (UWord
)thr
);
295 tl_assert(lk
->heldBy
== NULL
&& !lk
->heldW
); /* must be unheld */
300 tl_assert(HG_(is_sane_LockN
)(lk
));
303 static void lockN_acquire_reader ( Lock
* lk
, Thread
* thr
)
305 tl_assert(HG_(is_sane_LockN
)(lk
));
306 tl_assert(HG_(is_sane_Thread
)(thr
));
307 /* can only add reader to a reader-writer lock. */
308 tl_assert(lk
->kind
== LK_rdwr
);
309 /* lk must be free or already r-held. */
310 tl_assert(lk
->heldBy
== NULL
311 || (lk
->heldBy
!= NULL
&& !lk
->heldW
));
313 stats__lockN_acquires
++;
315 /* EXPOSITION only */
316 /* We need to keep recording snapshots of where the lock was
317 acquired, so as to produce better lock-order error messages. */
318 if (lk
->acquired_at
== NULL
) {
320 tl_assert(lk
->heldBy
== NULL
);
321 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
323 = VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
325 tl_assert(lk
->heldBy
!= NULL
);
327 /* end EXPOSITION only */
330 VG_(addToBag
)(lk
->heldBy
, (UWord
)thr
);
333 lk
->heldBy
= VG_(newBag
)( HG_(zalloc
), "hg.lNar.1", HG_(free
) );
334 VG_(addToBag
)( lk
->heldBy
, (UWord
)thr
);
336 tl_assert(!lk
->heldW
);
337 tl_assert(HG_(is_sane_LockN
)(lk
));
340 /* Update 'lk' to reflect a release of it by 'thr'. This is done
341 strictly: only combinations resulting from correct program and
342 libpthread behaviour are allowed. */
344 static void lockN_release ( Lock
* lk
, Thread
* thr
)
347 tl_assert(HG_(is_sane_LockN
)(lk
));
348 tl_assert(HG_(is_sane_Thread
)(thr
));
349 /* lock must be held by someone */
350 tl_assert(lk
->heldBy
);
351 stats__lockN_releases
++;
352 /* Remove it from the holder set */
353 b
= VG_(delFromBag
)(lk
->heldBy
, (UWord
)thr
);
354 /* thr must actually have been a holder of lk */
357 tl_assert(lk
->acquired_at
);
358 if (VG_(isEmptyBag
)(lk
->heldBy
)) {
359 VG_(deleteBag
)(lk
->heldBy
);
362 lk
->acquired_at
= NULL
;
364 tl_assert(HG_(is_sane_LockN
)(lk
));
367 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock
* lk
)
371 tl_assert(!lk
->heldW
);
374 /* for each thread that holds this lock do ... */
375 VG_(initIterBag
)( lk
->heldBy
);
376 while (VG_(nextIterBag
)( lk
->heldBy
, (UWord
*)&thr
, NULL
)) {
377 tl_assert(HG_(is_sane_Thread
)(thr
));
378 tl_assert(HG_(elemWS
)( univ_lsets
,
379 thr
->locksetA
, (UWord
)lk
));
381 = HG_(delFromWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
384 tl_assert(HG_(elemWS
)( univ_lsets
,
385 thr
->locksetW
, (UWord
)lk
));
387 = HG_(delFromWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lk
);
390 VG_(doneIterBag
)( lk
->heldBy
);
394 /*----------------------------------------------------------------*/
395 /*--- Print out the primary data structures ---*/
396 /*----------------------------------------------------------------*/
398 #define PP_THREADS (1<<1)
399 #define PP_LOCKS (1<<2)
400 #define PP_ALL (PP_THREADS | PP_LOCKS)
403 static const Int sHOW_ADMIN
= 0;
405 static void space ( Int n
)
409 tl_assert(n
>= 0 && n
< 128);
412 for (i
= 0; i
< n
; i
++)
415 tl_assert(i
< 128+1);
416 VG_(printf
)("%s", spaces
);
419 static void pp_Thread ( Int d
, Thread
* t
)
421 space(d
+0); VG_(printf
)("Thread %p {\n", t
);
423 space(d
+3); VG_(printf
)("admin %p\n", t
->admin
);
424 space(d
+3); VG_(printf
)("magic 0x%x\n", (UInt
)t
->magic
);
426 space(d
+3); VG_(printf
)("locksetA %d\n", (Int
)t
->locksetA
);
427 space(d
+3); VG_(printf
)("locksetW %d\n", (Int
)t
->locksetW
);
428 space(d
+0); VG_(printf
)("}\n");
431 static void pp_admin_threads ( Int d
)
435 for (n
= 0, t
= admin_threads
; t
; n
++, t
= t
->admin
) {
438 space(d
); VG_(printf
)("admin_threads (%d records) {\n", n
);
439 for (i
= 0, t
= admin_threads
; t
; i
++, t
= t
->admin
) {
442 VG_(printf
)("admin_threads record %d of %d:\n", i
, n
);
446 space(d
); VG_(printf
)("}\n");
449 static void pp_map_threads ( Int d
)
452 space(d
); VG_(printf
)("map_threads ");
453 for (i
= 0; i
< VG_N_THREADS
; i
++) {
454 if (map_threads
[i
] != NULL
)
457 VG_(printf
)("(%d entries) {\n", n
);
458 for (i
= 0; i
< VG_N_THREADS
; i
++) {
459 if (map_threads
[i
] == NULL
)
462 VG_(printf
)("coretid %d -> Thread %p\n", i
, map_threads
[i
]);
464 space(d
); VG_(printf
)("}\n");
467 static const HChar
* show_LockKind ( LockKind lkk
) {
469 case LK_mbRec
: return "mbRec";
470 case LK_nonRec
: return "nonRec";
471 case LK_rdwr
: return "rdwr";
472 default: tl_assert(0);
476 /* Pretty Print lock lk.
477 if show_lock_addrdescr, describes the (guest) lock address.
478 (this description will be more complete with --read-var-info=yes).
479 if show_internal_data, shows also helgrind internal information.
480 d is the level at which output is indented. */
481 static void pp_Lock ( Int d
, Lock
* lk
,
482 Bool show_lock_addrdescr
,
483 Bool show_internal_data
)
485 // FIXME PW EPOCH should use the epoch of the allocated_at ec.
486 const DiEpoch cur_ep
= VG_(current_DiEpoch
)();
488 if (show_internal_data
)
489 VG_(printf
)("Lock %p (ga %#lx) {\n", lk
, lk
->guestaddr
);
491 VG_(printf
)("Lock ga %#lx {\n", lk
->guestaddr
);
492 if (!show_lock_addrdescr
493 || !HG_(get_and_pp_addrdescr
) (cur_ep
, (Addr
) lk
->guestaddr
))
497 space(d
+3); VG_(printf
)("admin_n %p\n", lk
->admin_next
);
498 space(d
+3); VG_(printf
)("admin_p %p\n", lk
->admin_prev
);
499 space(d
+3); VG_(printf
)("magic 0x%x\n", (UInt
)lk
->magic
);
501 if (show_internal_data
) {
502 space(d
+3); VG_(printf
)("unique %llu\n", lk
->unique
);
504 space(d
+3); VG_(printf
)("kind %s\n", show_LockKind(lk
->kind
));
505 if (show_internal_data
) {
506 space(d
+3); VG_(printf
)("heldW %s\n", lk
->heldW
? "yes" : "no");
508 if (show_internal_data
) {
509 space(d
+3); VG_(printf
)("heldBy %p", lk
->heldBy
);
515 VG_(initIterBag
)( lk
->heldBy
);
516 while (VG_(nextIterBag
)( lk
->heldBy
, (UWord
*)&thr
, &count
)) {
517 if (show_internal_data
)
518 VG_(printf
)("%lu:%p ", count
, thr
);
520 VG_(printf
)("%c%lu:thread #%d ",
521 lk
->heldW
? 'W' : 'R',
522 count
, thr
->errmsg_index
);
523 if (thr
->coretid
== VG_INVALID_THREADID
)
524 VG_(printf
)("tid (exited) ");
526 VG_(printf
)("tid %u ", thr
->coretid
);
530 VG_(doneIterBag
)( lk
->heldBy
);
533 space(d
+0); VG_(printf
)("}\n");
536 static void pp_admin_locks ( Int d
)
540 for (n
= 0, lk
= admin_locks
; lk
; n
++, lk
= lk
->admin_next
) {
543 space(d
); VG_(printf
)("admin_locks (%d records) {\n", n
);
544 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
) {
547 VG_(printf
)("admin_locks record %d of %d:\n", i
, n
);
550 False
/* show_lock_addrdescr */,
551 True
/* show_internal_data */);
553 space(d
); VG_(printf
)("}\n");
556 static void pp_map_locks ( Int d
)
560 space(d
); VG_(printf
)("map_locks (%d entries) {\n",
561 (Int
)VG_(sizeFM
)( map_locks
));
562 VG_(initIterFM
)( map_locks
);
563 while (VG_(nextIterFM
)( map_locks
, (UWord
*)&gla
,
566 VG_(printf
)("guest %p -> Lock %p\n", gla
, lk
);
568 VG_(doneIterFM
)( map_locks
);
569 space(d
); VG_(printf
)("}\n");
572 static void pp_everything ( Int flags
, const HChar
* caller
)
576 VG_(printf
)("All_Data_Structures (caller = \"%s\") {\n", caller
);
577 if (flags
& PP_THREADS
) {
579 pp_admin_threads(d
+3);
583 if (flags
& PP_LOCKS
) {
598 /*----------------------------------------------------------------*/
599 /*--- Initialise the primary data structures ---*/
600 /*----------------------------------------------------------------*/
602 static void initialise_data_structures ( Thr
* hbthr_root
)
607 /* Get everything initialised and zeroed. */
608 tl_assert(admin_threads
== NULL
);
609 tl_assert(admin_locks
== NULL
);
611 tl_assert(map_threads
== NULL
);
612 map_threads
= HG_(zalloc
)( "hg.ids.1", VG_N_THREADS
* sizeof(Thread
*) );
614 tl_assert(sizeof(Addr
) == sizeof(UWord
));
615 tl_assert(map_locks
== NULL
);
616 map_locks
= VG_(newFM
)( HG_(zalloc
), "hg.ids.2", HG_(free
),
617 NULL
/*unboxed Word cmp*/);
619 tl_assert(univ_lsets
== NULL
);
620 univ_lsets
= HG_(newWordSetU
)( HG_(zalloc
), "hg.ids.4", HG_(free
),
622 tl_assert(univ_lsets
!= NULL
);
623 /* Ensure that univ_lsets is non-empty, with lockset zero being the
624 empty lockset. hg_errors.c relies on the assumption that
625 lockset number zero in univ_lsets is always valid. */
626 wsid
= HG_(emptyWS
)(univ_lsets
);
627 tl_assert(wsid
== 0);
629 tl_assert(univ_laog
== NULL
);
630 if (HG_(clo_track_lockorders
)) {
631 univ_laog
= HG_(newWordSetU
)( HG_(zalloc
), "hg.ids.5 (univ_laog)",
632 HG_(free
), 24/*cacheSize*/ );
633 tl_assert(univ_laog
!= NULL
);
636 /* Set up entries for the root thread */
637 // FIXME: this assumes that the first real ThreadId is 1
639 /* a Thread for the new thread ... */
640 thr
= mk_Thread(hbthr_root
);
641 thr
->coretid
= 1; /* FIXME: hardwires an assumption about the
642 identity of the root thread. */
643 tl_assert( libhb_get_Thr_hgthread(hbthr_root
) == NULL
);
644 libhb_set_Thr_hgthread(hbthr_root
, thr
);
646 /* and bind it in the thread-map table. */
647 tl_assert(HG_(is_sane_ThreadId
)(thr
->coretid
));
648 tl_assert(thr
->coretid
!= VG_INVALID_THREADID
);
650 map_threads
[thr
->coretid
] = thr
;
652 tl_assert(VG_INVALID_THREADID
== 0);
654 all__sanity_check("initialise_data_structures");
658 /*----------------------------------------------------------------*/
659 /*--- map_threads :: array[core-ThreadId] of Thread* ---*/
660 /*----------------------------------------------------------------*/
662 /* Doesn't assert if the relevant map_threads entry is NULL. */
663 static Thread
* map_threads_maybe_lookup ( ThreadId coretid
)
666 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
667 thr
= map_threads
[coretid
];
671 /* Asserts if the relevant map_threads entry is NULL. */
672 static inline Thread
* map_threads_lookup ( ThreadId coretid
)
675 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
676 thr
= map_threads
[coretid
];
681 /* Do a reverse lookup. Does not assert if 'thr' is not found in
683 static ThreadId
map_threads_maybe_reverse_lookup_SLOW ( Thread
* thr
)
686 tl_assert(HG_(is_sane_Thread
)(thr
));
687 /* Check nobody used the invalid-threadid slot */
688 tl_assert(VG_INVALID_THREADID
< VG_N_THREADS
);
689 tl_assert(map_threads
[VG_INVALID_THREADID
] == NULL
);
691 tl_assert(HG_(is_sane_ThreadId
)(tid
));
695 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
696 is not found in map_threads. */
697 static ThreadId
map_threads_reverse_lookup_SLOW ( Thread
* thr
)
699 ThreadId tid
= map_threads_maybe_reverse_lookup_SLOW( thr
);
700 tl_assert(tid
!= VG_INVALID_THREADID
);
701 tl_assert(map_threads
[tid
]);
702 tl_assert(map_threads
[tid
]->coretid
== tid
);
706 static void map_threads_delete ( ThreadId coretid
)
709 tl_assert(coretid
!= 0);
710 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
711 thr
= map_threads
[coretid
];
713 map_threads
[coretid
] = NULL
;
716 static void HG_(thread_enter_synchr
)(Thread
*thr
) {
717 tl_assert(thr
->synchr_nesting
>= 0);
718 #if defined(VGO_solaris) || defined(VGO_freebsd)
719 thr
->synchr_nesting
+= 1;
720 #endif /* VGO_solaris */
723 static void HG_(thread_leave_synchr
)(Thread
*thr
) {
724 #if defined(VGO_solaris) || defined(VGO_freebsd)
725 thr
->synchr_nesting
-= 1;
726 #endif /* VGO_solaris */
727 tl_assert(thr
->synchr_nesting
>= 0);
730 #if defined(VGO_freebsd)
731 static Int
HG_(get_pthread_synchr_nesting_level
)(ThreadId tid
) {
732 Thread
*thr
= map_threads_maybe_lookup(tid
);
733 return thr
->synchr_nesting
;
737 static void HG_(thread_enter_pthread_create
)(Thread
*thr
) {
738 tl_assert(thr
->pthread_create_nesting_level
>= 0);
739 thr
->pthread_create_nesting_level
+= 1;
742 static void HG_(thread_leave_pthread_create
)(Thread
*thr
) {
743 tl_assert(thr
->pthread_create_nesting_level
> 0);
744 thr
->pthread_create_nesting_level
-= 1;
747 static Int
HG_(get_pthread_create_nesting_level
)(ThreadId tid
) {
748 Thread
*thr
= map_threads_maybe_lookup(tid
);
749 return thr
->pthread_create_nesting_level
;
752 /*----------------------------------------------------------------*/
753 /*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
754 /*----------------------------------------------------------------*/
756 /* Make sure there is a lock table entry for the given (lock) guest
757 address. If not, create one of the stated 'kind' in unheld state.
758 In any case, return the address of the existing or new Lock. */
760 Lock
* map_locks_lookup_or_create ( LockKind lkk
, Addr ga
, ThreadId tid
)
763 Lock
* oldlock
= NULL
;
764 tl_assert(HG_(is_sane_ThreadId
)(tid
));
765 found
= VG_(lookupFM
)( map_locks
,
766 NULL
, (UWord
*)&oldlock
, (UWord
)ga
);
768 Lock
* lock
= mk_LockN(lkk
, ga
);
769 lock
->appeared_at
= VG_(record_ExeContext
)( tid
, 0 );
770 tl_assert(HG_(is_sane_LockN
)(lock
));
771 VG_(addToFM
)( map_locks
, (UWord
)ga
, (UWord
)lock
);
772 tl_assert(oldlock
== NULL
);
775 tl_assert(oldlock
!= NULL
);
776 tl_assert(HG_(is_sane_LockN
)(oldlock
));
777 tl_assert(oldlock
->guestaddr
== ga
);
782 static Lock
* map_locks_maybe_lookup ( Addr ga
)
786 found
= VG_(lookupFM
)( map_locks
, NULL
, (UWord
*)&lk
, (UWord
)ga
);
787 tl_assert(found
? lk
!= NULL
: lk
== NULL
);
791 static void map_locks_delete ( Addr ga
)
795 VG_(delFromFM
)( map_locks
,
796 (UWord
*)&ga2
, (UWord
*)&lk
, (UWord
)ga
);
797 /* delFromFM produces the val which is being deleted, if it is
798 found. So assert it is non-null; that in effect asserts that we
799 are deleting a (ga, Lock) pair which actually exists. */
800 tl_assert(lk
!= NULL
);
801 tl_assert(ga2
== ga
);
806 /*----------------------------------------------------------------*/
807 /*--- Sanity checking the data structures ---*/
808 /*----------------------------------------------------------------*/
810 static UWord stats__sanity_checks
= 0;
812 static void laog__sanity_check ( const HChar
* who
); /* fwds */
814 /* REQUIRED INVARIANTS:
816 Thread vs Segment/Lock/SecMaps
818 for each t in Threads {
820 // Thread.lockset: each element is really a valid Lock
822 // Thread.lockset: each Lock in set is actually held by that thread
823 for lk in Thread.lockset
826 // Thread.csegid is a valid SegmentID
827 // and the associated Segment has .thr == t
831 all thread Locksets are pairwise empty under intersection
832 (that is, no lock is claimed to be held by more than one thread)
833 -- this is guaranteed if all locks in locksets point back to their
836 Lock vs Thread/Segment/SecMaps
838 for each entry (gla, la) in map_locks
839 gla == la->guest_addr
841 for each lk in Locks {
844 lk->guest_addr does not have shadow state NoAccess
845 if lk == LockedBy(t), then t->lockset contains lk
846 if lk == UnlockedBy(segid) then segid is valid SegmentID
847 and can be mapped to a valid Segment(seg)
848 and seg->thr->lockset does not contain lk
849 if lk == UnlockedNew then (no lockset contains lk)
851 secmaps for lk has .mbHasLocks == True
855 Segment vs Thread/Lock/SecMaps
857 the Segment graph is a dag (no cycles)
858 all of the Segment graph must be reachable from the segids
859 mentioned in the Threads
861 for seg in Segments {
863 seg->thr is a sane Thread
867 SecMaps vs Segment/Thread/Lock
872 if any shadow word is ShR or ShM then .mbHasShared == True
874 for each Excl(segid) state
875 map_segments_lookup maps to a sane Segment(seg)
876 for each ShM/ShR(tsetid,lsetid) state
877 each lk in lset is a valid Lock
878 each thr in tset is a valid thread, which is non-dead
884 /* Return True iff 'thr' holds 'lk' in some mode. */
885 static Bool
thread_is_a_holder_of_Lock ( Thread
* thr
, Lock
* lk
)
888 return VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) > 0;
893 /* Sanity check Threads, as far as possible */
894 __attribute__((noinline
))
895 static void threads__sanity_check ( const HChar
* who
)
897 #define BAD(_str) do { how = (_str); goto bad; } while (0)
898 const HChar
* how
= "no error";
904 for (thr
= admin_threads
; thr
; thr
= thr
->admin
) {
905 if (!HG_(is_sane_Thread
)(thr
)) BAD("1");
908 // locks held in W mode are a subset of all locks held
909 if (!HG_(isSubsetOf
)( univ_lsets
, wsW
, wsA
)) BAD("7");
910 HG_(getPayloadWS
)( &ls_words
, &ls_size
, univ_lsets
, wsA
);
911 for (i
= 0; i
< ls_size
; i
++) {
912 lk
= (Lock
*)ls_words
[i
];
913 // Thread.lockset: each element is really a valid Lock
914 if (!HG_(is_sane_LockN
)(lk
)) BAD("2");
915 // Thread.lockset: each Lock in set is actually held by that
917 if (!thread_is_a_holder_of_Lock(thr
,lk
)) BAD("3");
922 VG_(printf
)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who
, how
);
928 /* Sanity check Locks, as far as possible */
929 __attribute__((noinline
))
930 static void locks__sanity_check ( const HChar
* who
)
932 #define BAD(_str) do { how = (_str); goto bad; } while (0)
933 const HChar
* how
= "no error";
937 // # entries in admin_locks == # entries in map_locks
938 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
)
940 if (i
!= VG_(sizeFM
)(map_locks
)) BAD("1");
941 // for each entry (gla, lk) in map_locks
942 // gla == lk->guest_addr
943 VG_(initIterFM
)( map_locks
);
944 while (VG_(nextIterFM
)( map_locks
,
945 (UWord
*)&gla
, (UWord
*)&lk
)) {
946 if (lk
->guestaddr
!= gla
) BAD("2");
948 VG_(doneIterFM
)( map_locks
);
949 // scan through admin_locks ...
950 for (lk
= admin_locks
; lk
; lk
= lk
->admin_next
) {
951 // lock is sane. Quite comprehensive, also checks that
952 // referenced (holder) threads are sane.
953 if (!HG_(is_sane_LockN
)(lk
)) BAD("3");
954 // map_locks binds guest address back to this lock
955 if (lk
!= map_locks_maybe_lookup(lk
->guestaddr
)) BAD("4");
956 // look at all threads mentioned as holders of this lock. Ensure
957 // this lock is mentioned in their locksets.
961 VG_(initIterBag
)( lk
->heldBy
);
962 while (VG_(nextIterBag
)( lk
->heldBy
,
963 (UWord
*)&thr
, &count
)) {
964 // HG_(is_sane_LockN) above ensures these
965 tl_assert(count
>= 1);
966 tl_assert(HG_(is_sane_Thread
)(thr
));
967 if (!HG_(elemWS
)(univ_lsets
, thr
->locksetA
, (UWord
)lk
))
969 // also check the w-only lockset
971 && !HG_(elemWS
)(univ_lsets
, thr
->locksetW
, (UWord
)lk
))
974 && HG_(elemWS
)(univ_lsets
, thr
->locksetW
, (UWord
)lk
))
977 VG_(doneIterBag
)( lk
->heldBy
);
979 /* lock not held by anybody */
980 if (lk
->heldW
) BAD("9"); /* should be False if !heldBy */
981 // since lk is unheld, then (no lockset contains lk)
982 // hmm, this is really too expensive to check. Hmm.
988 VG_(printf
)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who
, how
);
994 static void all_except_Locks__sanity_check ( const HChar
* who
) {
995 stats__sanity_checks
++;
996 if (0) VG_(printf
)("all_except_Locks__sanity_check(%s)\n", who
);
997 threads__sanity_check(who
);
998 if (HG_(clo_track_lockorders
))
999 laog__sanity_check(who
);
1001 static void all__sanity_check ( const HChar
* who
) {
1002 all_except_Locks__sanity_check(who
);
1003 locks__sanity_check(who
);
1007 /*----------------------------------------------------------------*/
1008 /*--- Shadow value and address range handlers ---*/
1009 /*----------------------------------------------------------------*/
1011 static void laog__pre_thread_acquires_lock ( Thread
*, Lock
* ); /* fwds */
1012 //static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
1013 static inline Thread
* get_current_Thread ( void ); /* fwds */
1014 __attribute__((noinline
))
1015 static void laog__handle_one_lock_deletion ( Lock
* lk
); /* fwds */
1018 /* Block-copy states (needed for implementing realloc()). */
1019 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1020 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1021 static void shadow_mem_scopy_range ( Thread
* thr
,
1022 Addr src
, Addr dst
, SizeT len
)
1024 Thr
* hbthr
= thr
->hbthr
;
1026 libhb_copy_shadow_state( hbthr
, src
, dst
, len
);
1029 static void shadow_mem_cread_range ( Thread
* thr
, Addr a
, SizeT len
)
1031 Thr
* hbthr
= thr
->hbthr
;
1033 LIBHB_CREAD_N(hbthr
, a
, len
);
1036 static void shadow_mem_cwrite_range ( Thread
* thr
, Addr a
, SizeT len
) {
1037 Thr
* hbthr
= thr
->hbthr
;
1039 LIBHB_CWRITE_N(hbthr
, a
, len
);
1042 inline static void shadow_mem_make_New ( Thread
* thr
, Addr a
, SizeT len
)
1044 libhb_srange_new( thr
->hbthr
, a
, len
);
1047 inline static void shadow_mem_make_NoAccess_NoFX ( Thread
* thr
, Addr aIN
,
1051 VG_(printf
)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN
, len
);
1052 // has no effect (NoFX)
1053 libhb_srange_noaccess_NoFX( thr
->hbthr
, aIN
, len
);
1056 inline static void shadow_mem_make_NoAccess_AHAE ( Thread
* thr
, Addr aIN
,
1060 VG_(printf
)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN
, len
);
1061 // Actually Has An Effect (AHAE)
1062 libhb_srange_noaccess_AHAE( thr
->hbthr
, aIN
, len
);
1065 inline static void shadow_mem_make_Untracked ( Thread
* thr
, Addr aIN
,
1069 VG_(printf
)("make Untracked ( %#lx, %lu )\n", aIN
, len
);
1070 libhb_srange_untrack( thr
->hbthr
, aIN
, len
);
1074 /*----------------------------------------------------------------*/
1075 /*--- Event handlers (evh__* functions) ---*/
1076 /*--- plus helpers (evhH__* functions) ---*/
1077 /*----------------------------------------------------------------*/
1079 /*--------- Event handler helpers (evhH__* functions) ---------*/
1081 /* Create a new segment for 'thr', making it depend (.prev) on its
1082 existing segment, bind together the SegmentID and Segment, and
1083 return both of them. Also update 'thr' so it references the new
1086 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1087 //zz /*OUT*/Segment** new_segP,
1090 //zz Segment* cur_seg;
1091 //zz tl_assert(new_segP);
1092 //zz tl_assert(new_segidP);
1093 //zz tl_assert(HG_(is_sane_Thread)(thr));
1094 //zz cur_seg = map_segments_lookup( thr->csegid );
1095 //zz tl_assert(cur_seg);
1096 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1097 //zz at their owner thread. */
1098 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1099 //zz *new_segidP = alloc_SegmentID();
1100 //zz map_segments_add( *new_segidP, *new_segP );
1101 //zz thr->csegid = *new_segidP;
1105 /* The lock at 'lock_ga' has acquired a writer. Make all necessary
1106 updates, and also do all possible error checks. */
1108 void evhH__post_thread_w_acquires_lock ( Thread
* thr
,
1109 LockKind lkk
, Addr lock_ga
)
1113 /* Basically what we need to do is call lockN_acquire_writer.
1114 However, that will barf if any 'invalid' lock states would
1115 result. Therefore check before calling. Side effect is that
1116 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1119 Because this routine is only called after successful lock
1120 acquisition, we should not be asked to move the lock into any
1121 invalid states. Requests to do so are bugs in libpthread, since
1122 that should have rejected any such requests. */
1124 tl_assert(HG_(is_sane_Thread
)(thr
));
1125 /* Try to find the lock. If we can't, then create a new one with
1127 lk
= map_locks_lookup_or_create(
1128 lkk
, lock_ga
, map_threads_reverse_lookup_SLOW(thr
) );
1129 tl_assert( HG_(is_sane_LockN
)(lk
) );
1131 /* check libhb level entities exist */
1132 tl_assert(thr
->hbthr
);
1133 tl_assert(lk
->hbso
);
1135 if (lk
->heldBy
== NULL
) {
1136 /* the lock isn't held. Simple. */
1137 tl_assert(!lk
->heldW
);
1138 lockN_acquire_writer( lk
, thr
);
1139 /* acquire a dependency from the lock's VCs */
1140 libhb_so_recv( thr
->hbthr
, lk
->hbso
, True
/*strong_recv*/ );
1144 /* So the lock is already held. If held as a r-lock then
1145 libpthread must be buggy. */
1146 tl_assert(lk
->heldBy
);
1148 HG_(record_error_Misc
)(
1149 thr
, "Bug in libpthread: write lock "
1150 "granted on rwlock which is currently rd-held");
1154 /* So the lock is held in w-mode. If it's held by some other
1155 thread, then libpthread must be buggy. */
1156 tl_assert(VG_(sizeUniqueBag
)(lk
->heldBy
) == 1); /* from precondition */
1158 if (thr
!= (Thread
*)VG_(anyElementOfBag
)(lk
->heldBy
)) {
1159 HG_(record_error_Misc
)(
1160 thr
, "Bug in libpthread: write lock "
1161 "granted on mutex/rwlock which is currently "
1162 "wr-held by a different thread");
1166 /* So the lock is already held in w-mode by 'thr'. That means this
1167 is an attempt to lock it recursively, which is only allowable
1168 for LK_mbRec kinded locks. Since this routine is called only
1169 once the lock has been acquired, this must also be a libpthread
1171 if (lk
->kind
!= LK_mbRec
) {
1172 HG_(record_error_Misc
)(
1173 thr
, "Bug in libpthread: recursive write lock "
1174 "granted on mutex/wrlock which does not "
1175 "support recursion");
1179 /* So we are recursively re-locking a lock we already w-hold. */
1180 lockN_acquire_writer( lk
, thr
);
1181 /* acquire a dependency from the lock's VC. Probably pointless,
1182 but also harmless. */
1183 libhb_so_recv( thr
->hbthr
, lk
->hbso
, True
/*strong_recv*/ );
1187 if (HG_(clo_track_lockorders
)) {
1188 /* check lock order acquisition graph, and update. This has to
1189 happen before the lock is added to the thread's locksetA/W. */
1190 laog__pre_thread_acquires_lock( thr
, lk
);
1192 /* update the thread's held-locks set */
1193 thr
->locksetA
= HG_(addToWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
1194 thr
->locksetW
= HG_(addToWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lk
);
1198 tl_assert(HG_(is_sane_LockN
)(lk
));
1202 /* The lock at 'lock_ga' has acquired a reader. Make all necessary
1203 updates, and also do all possible error checks. */
1205 void evhH__post_thread_r_acquires_lock ( Thread
* thr
,
1206 LockKind lkk
, Addr lock_ga
)
1210 /* Basically what we need to do is call lockN_acquire_reader.
1211 However, that will barf if any 'invalid' lock states would
1212 result. Therefore check before calling. Side effect is that
1213 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1216 Because this routine is only called after successful lock
1217 acquisition, we should not be asked to move the lock into any
1218 invalid states. Requests to do so are bugs in libpthread, since
1219 that should have rejected any such requests. */
1221 tl_assert(HG_(is_sane_Thread
)(thr
));
1222 /* Try to find the lock. If we can't, then create a new one with
1223 kind 'lkk'. Only a reader-writer lock can be read-locked,
1224 hence the first assertion. */
1225 tl_assert(lkk
== LK_rdwr
);
1226 lk
= map_locks_lookup_or_create(
1227 lkk
, lock_ga
, map_threads_reverse_lookup_SLOW(thr
) );
1228 tl_assert( HG_(is_sane_LockN
)(lk
) );
1230 /* check libhb level entities exist */
1231 tl_assert(thr
->hbthr
);
1232 tl_assert(lk
->hbso
);
1234 if (lk
->heldBy
== NULL
) {
1235 /* the lock isn't held. Simple. */
1236 tl_assert(!lk
->heldW
);
1237 lockN_acquire_reader( lk
, thr
);
1238 /* acquire a dependency from the lock's VC */
1239 libhb_so_recv( thr
->hbthr
, lk
->hbso
, False
/*!strong_recv*/ );
1243 /* So the lock is already held. If held as a w-lock then
1244 libpthread must be buggy. */
1245 tl_assert(lk
->heldBy
);
1247 HG_(record_error_Misc
)( thr
, "Bug in libpthread: read lock "
1248 "granted on rwlock which is "
1249 "currently wr-held");
1253 /* Easy enough. In short anybody can get a read-lock on a rwlock
1254 provided it is either unlocked or already in rd-held. */
1255 lockN_acquire_reader( lk
, thr
);
1256 /* acquire a dependency from the lock's VC. Probably pointless,
1257 but also harmless. */
1258 libhb_so_recv( thr
->hbthr
, lk
->hbso
, False
/*!strong_recv*/ );
1262 if (HG_(clo_track_lockorders
)) {
1263 /* check lock order acquisition graph, and update. This has to
1264 happen before the lock is added to the thread's locksetA/W. */
1265 laog__pre_thread_acquires_lock( thr
, lk
);
1267 /* update the thread's held-locks set */
1268 thr
->locksetA
= HG_(addToWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
1269 /* but don't update thr->locksetW, since lk is only rd-held */
1273 tl_assert(HG_(is_sane_LockN
)(lk
));
1277 /* The lock at 'lock_ga' is just about to be unlocked. Make all
1278 necessary updates, and also do all possible error checks. */
1280 void evhH__pre_thread_releases_lock ( Thread
* thr
,
1281 Addr lock_ga
, Bool isRDWR
)
1287 /* This routine is called prior to a lock release, before
1288 libpthread has had a chance to validate the call. Hence we need
1289 to detect and reject any attempts to move the lock into an
1290 invalid state. Such attempts are bugs in the client.
1292 isRDWR is True if we know from the wrapper context that lock_ga
1293 should refer to a reader-writer lock, and is False if [ditto]
1294 lock_ga should refer to a standard mutex. */
1296 tl_assert(HG_(is_sane_Thread
)(thr
));
1297 lock
= map_locks_maybe_lookup( lock_ga
);
1300 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1301 the client is trying to unlock it. So complain, then ignore
1303 HG_(record_error_UnlockBogus
)( thr
, lock_ga
);
1307 tl_assert(lock
->guestaddr
== lock_ga
);
1308 tl_assert(HG_(is_sane_LockN
)(lock
));
1310 if (isRDWR
&& lock
->kind
!= LK_rdwr
) {
1311 HG_(record_error_Misc
)( thr
, "pthread_rwlock_unlock with a "
1312 "pthread_mutex_t* argument " );
1314 if ((!isRDWR
) && lock
->kind
== LK_rdwr
) {
1315 HG_(record_error_Misc
)( thr
, "pthread_mutex_unlock with a "
1316 "pthread_rwlock_t* argument " );
1319 if (!lock
->heldBy
) {
1320 /* The lock is not held. This indicates a serious bug in the
1322 tl_assert(!lock
->heldW
);
1323 HG_(record_error_UnlockUnlocked
)( thr
, lock
);
1324 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1325 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1329 /* test just above dominates */
1330 tl_assert(lock
->heldBy
);
1331 was_heldW
= lock
->heldW
;
1333 /* The lock is held. Is this thread one of the holders? If not,
1334 report a bug in the client. */
1335 n
= VG_(elemBag
)( lock
->heldBy
, (UWord
)thr
);
1338 /* We are not a current holder of the lock. This is a bug in
1339 the guest, and (per POSIX pthread rules) the unlock
1340 attempt will fail. So just complain and do nothing
1342 Thread
* realOwner
= (Thread
*)VG_(anyElementOfBag
)( lock
->heldBy
);
1343 tl_assert(HG_(is_sane_Thread
)(realOwner
));
1344 tl_assert(realOwner
!= thr
);
1345 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1346 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1347 HG_(record_error_UnlockForeign
)( thr
, realOwner
, lock
);
1351 /* Ok, we hold the lock 'n' times. */
1354 lockN_release( lock
, thr
);
1360 tl_assert(lock
->heldBy
);
1361 tl_assert(n
== VG_(elemBag
)( lock
->heldBy
, (UWord
)thr
));
1362 /* We still hold the lock. So either it's a recursive lock
1363 or a rwlock which is currently r-held. */
1364 tl_assert(lock
->kind
== LK_mbRec
1365 || (lock
->kind
== LK_rdwr
&& !lock
->heldW
));
1366 tl_assert(HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1368 tl_assert(HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1370 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1372 /* n is zero. This means we don't hold the lock any more. But
1373 if it's a rwlock held in r-mode, someone else could still
1374 hold it. Just do whatever sanity checks we can. */
1375 if (lock
->kind
== LK_rdwr
&& lock
->heldBy
) {
1376 /* It's a rwlock. We no longer hold it but we used to;
1377 nevertheless it still appears to be held by someone else.
1378 The implication is that, prior to this release, it must
1379 have been shared by us and and whoever else is holding it;
1380 which in turn implies it must be r-held, since a lock
1381 can't be w-held by more than one thread. */
1382 /* The lock is now R-held by somebody else: */
1383 tl_assert(lock
->heldW
== False
);
1385 /* Normal case. It's either not a rwlock, or it's a rwlock
1386 that we used to hold in w-mode (which is pretty much the
1387 same thing as a non-rwlock.) Since this transaction is
1388 atomic (V does not allow multiple threads to run
1389 simultaneously), it must mean the lock is now not held by
1390 anybody. Hence assert for it. */
1391 /* The lock is now not held by anybody: */
1392 tl_assert(!lock
->heldBy
);
1393 tl_assert(lock
->heldW
== False
);
1395 //if (lock->heldBy) {
1396 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1398 /* update this thread's lockset accordingly. */
1400 = HG_(delFromWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
);
1402 = HG_(delFromWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
);
1403 /* push our VC into the lock */
1404 tl_assert(thr
->hbthr
);
1405 tl_assert(lock
->hbso
);
1406 /* If the lock was previously W-held, then we want to do a
1407 strong send, and if previously R-held, then a weak send. */
1408 libhb_so_send( thr
->hbthr
, lock
->hbso
, was_heldW
);
1413 tl_assert(HG_(is_sane_LockN
)(lock
));
1417 /* ---------------------------------------------------------- */
1418 /* -------- Event handlers proper (evh__* functions) -------- */
1419 /* ---------------------------------------------------------- */
1421 /* What is the Thread* for the currently running thread? This is
1422 absolutely performance critical. We receive notifications from the
1423 core for client code starts/stops, and cache the looked-up result
1424 in 'current_Thread'. Hence, for the vast majority of requests,
1425 finding the current thread reduces to a read of a global variable,
1426 provided get_current_Thread_in_C_C is inlined.
1428 Outside of client code, current_Thread is NULL, and presumably
1429 any uses of it will cause a segfault. Hence:
1431 - for uses definitely within client code, use
1432 get_current_Thread_in_C_C.
1434 - for all other uses, use get_current_Thread.
1437 static Thread
*current_Thread
= NULL
,
1438 *current_Thread_prev
= NULL
;
1440 static void evh__start_client_code ( ThreadId tid
, ULong nDisp
) {
1441 if (0) VG_(printf
)("start %d %llu\n", (Int
)tid
, nDisp
);
1442 tl_assert(current_Thread
== NULL
);
1443 current_Thread
= map_threads_lookup( tid
);
1444 tl_assert(current_Thread
!= NULL
);
1445 if (current_Thread
!= current_Thread_prev
) {
1446 libhb_Thr_resumes( current_Thread
->hbthr
);
1447 current_Thread_prev
= current_Thread
;
1450 static void evh__stop_client_code ( ThreadId tid
, ULong nDisp
) {
1451 if (0) VG_(printf
)(" stop %d %llu\n", (Int
)tid
, nDisp
);
1452 tl_assert(current_Thread
!= NULL
);
1453 current_Thread
= NULL
;
1456 static inline Thread
* get_current_Thread_in_C_C ( void ) {
1457 return current_Thread
;
1459 static inline Thread
* get_current_Thread ( void ) {
1462 thr
= get_current_Thread_in_C_C();
1465 /* evidently not in client code. Do it the slow way. */
1466 coretid
= VG_(get_running_tid
)();
1467 /* FIXME: get rid of the following kludge. It exists because
1468 evh__new_mem is called during initialisation (as notification
1469 of initial memory layout) and VG_(get_running_tid)() returns
1470 VG_INVALID_THREADID at that point. */
1471 if (coretid
== VG_INVALID_THREADID
)
1472 coretid
= 1; /* KLUDGE */
1473 thr
= map_threads_lookup( coretid
);
1478 void evh__new_mem ( Addr a
, SizeT len
) {
1479 Thread
*thr
= get_current_Thread();
1480 if (SHOW_EVENTS
>= 2)
1481 VG_(printf
)("evh__new_mem(%p, %lu)\n", (void*)a
, len
);
1482 shadow_mem_make_New( thr
, a
, len
);
1483 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1484 all__sanity_check("evh__new_mem-post");
1485 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1486 shadow_mem_make_Untracked( thr
, a
, len
);
1490 void evh__new_mem_stack ( Addr a
, SizeT len
) {
1491 Thread
*thr
= get_current_Thread();
1492 if (SHOW_EVENTS
>= 2)
1493 VG_(printf
)("evh__new_mem_stack(%p, %lu)\n", (void*)a
, len
);
1494 shadow_mem_make_New( thr
, -VG_STACK_REDZONE_SZB
+ a
, len
);
1495 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1496 all__sanity_check("evh__new_mem_stack-post");
1497 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1498 shadow_mem_make_Untracked( thr
, a
, len
);
1501 #define DCL_evh__new_mem_stack(syze) \
1502 static void VG_REGPARM(1) evh__new_mem_stack_##syze(Addr new_SP) \
1504 Thread *thr = get_current_Thread(); \
1505 if (SHOW_EVENTS >= 2) \
1506 VG_(printf)("evh__new_mem_stack_" #syze "(%p, %lu)\n", \
1507 (void*)new_SP, (SizeT)syze ); \
1508 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + new_SP, syze ); \
1509 if (syze >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE)) \
1510 all__sanity_check("evh__new_mem_stack_" #syze "-post"); \
1511 if (UNLIKELY(thr->pthread_create_nesting_level > 0)) \
1512 shadow_mem_make_Untracked( thr, new_SP, syze ); \
1515 DCL_evh__new_mem_stack(4);
1516 DCL_evh__new_mem_stack(8);
1517 DCL_evh__new_mem_stack(12);
1518 DCL_evh__new_mem_stack(16);
1519 DCL_evh__new_mem_stack(32);
1520 DCL_evh__new_mem_stack(112);
1521 DCL_evh__new_mem_stack(128);
1522 DCL_evh__new_mem_stack(144);
1523 DCL_evh__new_mem_stack(160);
1526 void evh__new_mem_w_tid ( Addr a
, SizeT len
, ThreadId tid
) {
1527 Thread
*thr
= get_current_Thread();
1528 if (SHOW_EVENTS
>= 2)
1529 VG_(printf
)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a
, len
);
1530 shadow_mem_make_New( thr
, a
, len
);
1531 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1532 all__sanity_check("evh__new_mem_w_tid-post");
1533 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1534 shadow_mem_make_Untracked( thr
, a
, len
);
1538 void evh__new_mem_w_perms ( Addr a
, SizeT len
,
1539 Bool rr
, Bool ww
, Bool xx
, ULong di_handle
) {
1540 Thread
*thr
= get_current_Thread();
1541 if (SHOW_EVENTS
>= 1)
1542 VG_(printf
)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1543 (void*)a
, len
, (Int
)rr
, (Int
)ww
, (Int
)xx
);
1544 if (rr
|| ww
|| xx
) {
1545 shadow_mem_make_New( thr
, a
, len
);
1546 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1547 shadow_mem_make_Untracked( thr
, a
, len
);
1549 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1550 all__sanity_check("evh__new_mem_w_perms-post");
1554 void evh__set_perms ( Addr a
, SizeT len
,
1555 Bool rr
, Bool ww
, Bool xx
) {
1556 // This handles mprotect requests. If the memory is being put
1557 // into no-R no-W state, paint it as NoAccess, for the reasons
1558 // documented at evh__die_mem_munmap().
1559 if (SHOW_EVENTS
>= 1)
1560 VG_(printf
)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1561 (void*)a
, len
, (Int
)rr
, (Int
)ww
, (Int
)xx
);
1562 /* Hmm. What should we do here, that actually makes any sense?
1563 Let's say: if neither readable nor writable, then declare it
1564 NoAccess, else leave it alone. */
1566 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a
, len
);
1567 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1568 all__sanity_check("evh__set_perms-post");
1572 void evh__die_mem ( Addr a
, SizeT len
) {
1573 // Urr, libhb ignores this.
1574 if (SHOW_EVENTS
>= 2)
1575 VG_(printf
)("evh__die_mem(%p, %lu)\n", (void*)a
, len
);
1576 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a
, len
);
1577 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1578 all__sanity_check("evh__die_mem-post");
1582 void evh__die_mem_munmap ( Addr a
, SizeT len
) {
1583 // It's important that libhb doesn't ignore this. If, as is likely,
1584 // the client is subject to address space layout randomization,
1585 // then unmapped areas may never get remapped over, even in long
1586 // runs. If we just ignore them we wind up with large resource
1587 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1588 // VTS references in the affected area are dropped. Marking memory
1589 // as NoAccess is expensive, but we assume that munmap is sufficiently
1590 // rare that the space gains of doing this are worth the costs.
1591 if (SHOW_EVENTS
>= 2)
1592 VG_(printf
)("evh__die_mem_munmap(%p, %lu)\n", (void*)a
, len
);
1593 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a
, len
);
1597 void evh__untrack_mem ( Addr a
, SizeT len
) {
1598 // Libhb doesn't ignore this.
1599 if (SHOW_EVENTS
>= 2)
1600 VG_(printf
)("evh__untrack_mem(%p, %lu)\n", (void*)a
, len
);
1601 shadow_mem_make_Untracked( get_current_Thread(), a
, len
);
1602 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1603 all__sanity_check("evh__untrack_mem-post");
1607 void evh__copy_mem ( Addr src
, Addr dst
, SizeT len
) {
1608 if (SHOW_EVENTS
>= 2)
1609 VG_(printf
)("evh__copy_mem(%p, %p, %lu)\n", (void*)src
, (void*)dst
, len
);
1610 Thread
*thr
= get_current_Thread();
1611 if (LIKELY(thr
->synchr_nesting
== 0))
1612 shadow_mem_scopy_range( thr
, src
, dst
, len
);
1613 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1614 all__sanity_check("evh__copy_mem-post");
1618 void evh__pre_thread_ll_create ( ThreadId parent
, ThreadId child
)
1620 if (SHOW_EVENTS
>= 1)
1621 VG_(printf
)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1622 (Int
)parent
, (Int
)child
);
1624 if (parent
!= VG_INVALID_THREADID
) {
1630 tl_assert(HG_(is_sane_ThreadId
)(parent
));
1631 tl_assert(HG_(is_sane_ThreadId
)(child
));
1632 tl_assert(parent
!= child
);
1634 thr_p
= map_threads_maybe_lookup( parent
);
1635 thr_c
= map_threads_maybe_lookup( child
);
1637 tl_assert(thr_p
!= NULL
);
1638 tl_assert(thr_c
== NULL
);
1640 hbthr_p
= thr_p
->hbthr
;
1641 tl_assert(hbthr_p
!= NULL
);
1642 tl_assert( libhb_get_Thr_hgthread(hbthr_p
) == thr_p
);
1644 hbthr_c
= libhb_create ( hbthr_p
);
1646 /* Create a new thread record for the child. */
1647 /* a Thread for the new thread ... */
1648 thr_c
= mk_Thread( hbthr_c
);
1649 tl_assert( libhb_get_Thr_hgthread(hbthr_c
) == NULL
);
1650 libhb_set_Thr_hgthread(hbthr_c
, thr_c
);
1652 /* and bind it in the thread-map table */
1653 map_threads
[child
] = thr_c
;
1654 tl_assert(thr_c
->coretid
== VG_INVALID_THREADID
);
1655 thr_c
->coretid
= child
;
1657 /* Record where the parent is so we can later refer to this in
1660 On x86/amd64-linux, this entails a nasty glibc specific hack.
1661 The stack snapshot is taken immediately after the parent has
1662 returned from its sys_clone call. Unfortunately there is no
1663 unwind info for the insn following "syscall" - reading the
1664 glibc sources confirms this. So we ask for a snapshot to be
1665 taken as if RIP was 3 bytes earlier, in a place where there
1666 is unwind info. Sigh.
1668 { Word first_ip_delta
= 0;
1669 # if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1670 first_ip_delta
= -3;
1671 # elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1672 first_ip_delta
= -1;
1674 thr_c
->created_at
= VG_(record_ExeContext
)(parent
, first_ip_delta
);
1677 if (HG_(clo_ignore_thread_creation
)) {
1678 HG_(thread_enter_pthread_create
)(thr_c
);
1679 tl_assert(thr_c
->synchr_nesting
== 0);
1680 HG_(thread_enter_synchr
)(thr_c
);
1681 /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1685 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1686 all__sanity_check("evh__pre_thread_create-post");
1690 void evh__pre_thread_ll_exit ( ThreadId quit_tid
)
1694 if (SHOW_EVENTS
>= 1)
1695 VG_(printf
)("evh__pre_thread_ll_exit(thr=%d)\n",
1698 /* quit_tid has disappeared without joining to any other thread.
1699 Therefore there is no synchronisation event associated with its
1700 exit and so we have to pretty much treat it as if it was still
1701 alive but mysteriously making no progress. That is because, if
1702 we don't know when it really exited, then we can never say there
1703 is a point in time when we're sure the thread really has
1704 finished, and so we need to consider the possibility that it
1705 lingers indefinitely and continues to interact with other
1707 /* However, it might have rendezvous'd with a thread that called
1708 pthread_join with this one as arg, prior to this point (that's
1709 how NPTL works). In which case there has already been a prior
1710 sync event. So in any case, just let the thread exit. On NPTL,
1711 all thread exits go through here. */
1712 tl_assert(HG_(is_sane_ThreadId
)(quit_tid
));
1713 thr_q
= map_threads_maybe_lookup( quit_tid
);
1714 tl_assert(thr_q
!= NULL
);
1716 /* Complain if this thread holds any locks. */
1717 nHeld
= HG_(cardinalityWS
)( univ_lsets
, thr_q
->locksetA
);
1718 tl_assert(nHeld
>= 0);
1719 Bool lock_at_exit
= False
;
1720 #if defined(VGO_freebsd)
1722 * temporary (?): turn off this check on FreeBSD 14.2+
1723 * there is a lock during exit() to make it thread safe
1724 * but that lock gets leaked.
1726 if (VG_(getosreldate
)() > 1401500) {
1727 lock_at_exit
= True
;
1730 if (nHeld
> 0 && (lock_at_exit
== False
)) {
1732 VG_(sprintf
)(buf
, "Exiting thread still holds %d lock%s",
1733 nHeld
, nHeld
> 1 ? "s" : "");
1734 HG_(record_error_Misc
)( thr_q
, buf
);
1737 /* Not much to do here:
1738 - tell libhb the thread is gone
1739 - clear the map_threads entry, in order that the Valgrind core
1741 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1743 tl_assert(thr_q
->hbthr
);
1744 libhb_async_exit(thr_q
->hbthr
);
1745 tl_assert(thr_q
->coretid
== quit_tid
);
1746 thr_q
->coretid
= VG_INVALID_THREADID
;
1747 map_threads_delete( quit_tid
);
1749 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1750 all__sanity_check("evh__pre_thread_ll_exit-post");
1753 /* This is called immediately after fork, for the child only. 'tid'
1754 is the only surviving thread (as per POSIX rules on fork() in
1755 threaded programs), so we have to clean up map_threads to remove
1756 entries for any other threads. */
1758 void evh__atfork_child ( ThreadId tid
)
1762 /* Slot 0 should never be used. */
1763 thr
= map_threads_maybe_lookup( 0/*INVALID*/ );
1765 /* Clean up all other slots except 'tid'. */
1766 for (i
= 1; i
< VG_N_THREADS
; i
++) {
1769 thr
= map_threads_maybe_lookup(i
);
1772 /* Cleanup actions (next 5 lines) copied from end of
1773 evh__pre_thread_ll_exit; keep in sync. */
1774 tl_assert(thr
->hbthr
);
1775 libhb_async_exit(thr
->hbthr
);
1776 tl_assert(thr
->coretid
== i
);
1777 thr
->coretid
= VG_INVALID_THREADID
;
1778 map_threads_delete(i
);
1782 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1784 void generate_quitter_stayer_dependence (Thr
* hbthr_q
, Thr
* hbthr_s
)
1787 /* Allocate a temporary synchronisation object and use it to send
1788 an imaginary message from the quitter to the stayer, the purpose
1789 being to generate a dependence from the quitter to the
1791 so
= libhb_so_alloc();
1793 /* Send last arg of _so_send as False, since the sending thread
1794 doesn't actually exist any more, so we don't want _so_send to
1795 try taking stack snapshots of it. */
1796 libhb_so_send(hbthr_q
, so
, True
/*strong_send*//*?!? wrt comment above*/);
1797 libhb_so_recv(hbthr_s
, so
, True
/*strong_recv*/);
1798 libhb_so_dealloc(so
);
1800 /* Tell libhb that the quitter has been reaped. Note that we might
1801 have to be cleverer about this, to exclude 2nd and subsequent
1802 notifications for the same hbthr_q, in the case where the app is
1803 buggy (calls pthread_join twice or more on the same thread) AND
1804 where libpthread is also buggy and doesn't return ESRCH on
1805 subsequent calls. (If libpthread isn't thusly buggy, then the
1806 wrapper for pthread_join in hg_intercepts.c will stop us getting
1807 notified here multiple times for the same joinee.) See also
1808 comments in helgrind/tests/jointwice.c. */
1809 libhb_joinedwith_done(hbthr_q
);
1814 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid
, Thread
* quit_thr
)
1821 if (SHOW_EVENTS
>= 1)
1822 VG_(printf
)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1823 (Int
)stay_tid
, quit_thr
);
1825 tl_assert(HG_(is_sane_ThreadId
)(stay_tid
));
1827 thr_s
= map_threads_maybe_lookup( stay_tid
);
1829 tl_assert(thr_s
!= NULL
);
1830 tl_assert(thr_q
!= NULL
);
1831 tl_assert(thr_s
!= thr_q
);
1833 hbthr_s
= thr_s
->hbthr
;
1834 hbthr_q
= thr_q
->hbthr
;
1835 tl_assert(hbthr_s
!= hbthr_q
);
1836 tl_assert( libhb_get_Thr_hgthread(hbthr_s
) == thr_s
);
1837 tl_assert( libhb_get_Thr_hgthread(hbthr_q
) == thr_q
);
1839 generate_quitter_stayer_dependence (hbthr_q
, hbthr_s
);
1841 /* evh__pre_thread_ll_exit issues an error message if the exiting
1842 thread holds any locks. No need to check here. */
1844 /* This holds because, at least when using NPTL as the thread
1845 library, we should be notified the low level thread exit before
1846 we hear of any join event on it. The low level exit
1847 notification feeds through into evh__pre_thread_ll_exit,
1848 which should clear the map_threads entry for it. Hence we
1849 expect there to be no map_threads entry at this point. */
1850 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q
)
1851 == VG_INVALID_THREADID
);
1853 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1854 all__sanity_check("evh__post_thread_join-post");
1858 void evh__pre_mem_read ( CorePart part
, ThreadId tid
, const HChar
* s
,
1859 Addr a
, SizeT size
) {
1860 if (SHOW_EVENTS
>= 2
1861 || (SHOW_EVENTS
>= 1 && size
!= 1))
1862 VG_(printf
)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1863 (Int
)tid
, s
, (void*)a
, size
);
1864 Thread
*thr
= map_threads_lookup(tid
);
1865 if (LIKELY(thr
->synchr_nesting
== 0))
1866 shadow_mem_cread_range(thr
, a
, size
);
1867 if (size
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1868 all__sanity_check("evh__pre_mem_read-post");
1872 void evh__pre_mem_read_asciiz ( CorePart part
, ThreadId tid
,
1873 const HChar
* s
, Addr a
) {
1875 if (SHOW_EVENTS
>= 1)
1876 VG_(printf
)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1877 (Int
)tid
, s
, (void*)a
);
1878 // Don't segfault if the string starts in an obviously stupid
1879 // place. Actually we should check the whole string, not just
1880 // the start address, but that's too much trouble. At least
1881 // checking the first byte is better than nothing. See #255009.
1882 if (!VG_(am_is_valid_for_client
) (a
, 1, VKI_PROT_READ
))
1884 Thread
*thr
= map_threads_lookup(tid
);
1885 len
= VG_(strlen
)( (HChar
*) a
);
1886 if (LIKELY(thr
->synchr_nesting
== 0))
1887 shadow_mem_cread_range( thr
, a
, len
+1 );
1888 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1889 all__sanity_check("evh__pre_mem_read_asciiz-post");
1893 void evh__pre_mem_write ( CorePart part
, ThreadId tid
, const HChar
* s
,
1894 Addr a
, SizeT size
) {
1895 if (SHOW_EVENTS
>= 1)
1896 VG_(printf
)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1897 (Int
)tid
, s
, (void*)a
, size
);
1898 Thread
*thr
= map_threads_lookup(tid
);
1899 if (LIKELY(thr
->synchr_nesting
== 0))
1900 shadow_mem_cwrite_range(thr
, a
, size
);
1901 if (size
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1902 all__sanity_check("evh__pre_mem_write-post");
1906 void evh__new_mem_heap ( Addr a
, SizeT len
, Bool is_inited
) {
1907 if (SHOW_EVENTS
>= 1)
1908 VG_(printf
)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1909 (void*)a
, len
, (Int
)is_inited
);
1910 // We ignore the initialisation state (is_inited); that's ok.
1911 shadow_mem_make_New(get_current_Thread(), a
, len
);
1912 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1913 all__sanity_check("evh__pre_mem_read-post");
1917 void evh__die_mem_heap ( Addr a
, SizeT len
) {
1919 if (SHOW_EVENTS
>= 1)
1920 VG_(printf
)("evh__die_mem_heap(%p, %lu)\n", (void*)a
, len
);
1921 thr
= get_current_Thread();
1923 if (HG_(clo_free_is_write
)) {
1924 /* Treat frees as if the memory was written immediately prior to
1925 the free. This shakes out more races, specifically, cases
1926 where memory is referenced by one thread, and freed by
1927 another, and there's no observable synchronisation event to
1928 guarantee that the reference happens before the free. */
1929 if (LIKELY(thr
->synchr_nesting
== 0))
1930 shadow_mem_cwrite_range(thr
, a
, len
);
1932 shadow_mem_make_NoAccess_AHAE( thr
, a
, len
);
1933 /* We used to call instead
1934 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1935 A non-buggy application will not access anymore
1936 the freed memory, and so marking no access is in theory useless.
1937 Not marking freed memory would avoid the overhead for applications
1938 doing mostly malloc/free, as the freed memory should then be recycled
1939 very quickly after marking.
1940 We rather mark it noaccess for the following reasons:
1941 * accessibility bits then always correctly represents the memory
1942 status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1943 * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1944 blocks, on a ppc64le, for a unrealistic workload of an application
1945 doing only malloc/free).
1946 * marking no access allows to GC the SecMap, which might improve
1947 performance and/or memory usage.
1948 * we might detect more applications bugs when memory is marked
1950 If needed, we could support here an option --free-is-noaccess=yes|no
1951 to avoid marking freed memory as no access if some applications
1952 would need to avoid the marking noaccess overhead. */
1954 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1955 all__sanity_check("evh__pre_mem_read-post");
1958 /* --- Event handlers called from generated code --- */
1960 static VG_REGPARM(1)
1961 void evh__mem_help_cread_1(Addr a
) {
1962 Thread
* thr
= get_current_Thread_in_C_C();
1963 Thr
* hbthr
= thr
->hbthr
;
1964 if (LIKELY(thr
->synchr_nesting
== 0))
1965 LIBHB_CREAD_1(hbthr
, a
);
1968 static VG_REGPARM(1)
1969 void evh__mem_help_cread_2(Addr a
) {
1970 Thread
* thr
= get_current_Thread_in_C_C();
1971 Thr
* hbthr
= thr
->hbthr
;
1972 if (LIKELY(thr
->synchr_nesting
== 0))
1973 LIBHB_CREAD_2(hbthr
, a
);
1976 static VG_REGPARM(1)
1977 void evh__mem_help_cread_4(Addr a
) {
1978 Thread
* thr
= get_current_Thread_in_C_C();
1979 Thr
* hbthr
= thr
->hbthr
;
1980 if (LIKELY(thr
->synchr_nesting
== 0))
1981 LIBHB_CREAD_4(hbthr
, a
);
1984 static VG_REGPARM(1)
1985 void evh__mem_help_cread_8(Addr a
) {
1986 Thread
* thr
= get_current_Thread_in_C_C();
1987 Thr
* hbthr
= thr
->hbthr
;
1988 if (LIKELY(thr
->synchr_nesting
== 0))
1989 LIBHB_CREAD_8(hbthr
, a
);
1992 static VG_REGPARM(2)
1993 void evh__mem_help_cread_N(Addr a
, SizeT size
) {
1994 Thread
* thr
= get_current_Thread_in_C_C();
1995 Thr
* hbthr
= thr
->hbthr
;
1996 if (LIKELY(thr
->synchr_nesting
== 0))
1997 LIBHB_CREAD_N(hbthr
, a
, size
);
2000 static VG_REGPARM(1)
2001 void evh__mem_help_cwrite_1(Addr a
) {
2002 Thread
* thr
= get_current_Thread_in_C_C();
2003 Thr
* hbthr
= thr
->hbthr
;
2004 if (LIKELY(thr
->synchr_nesting
== 0))
2005 LIBHB_CWRITE_1(hbthr
, a
);
2008 static VG_REGPARM(1)
2009 void evh__mem_help_cwrite_2(Addr a
) {
2010 Thread
* thr
= get_current_Thread_in_C_C();
2011 Thr
* hbthr
= thr
->hbthr
;
2012 if (LIKELY(thr
->synchr_nesting
== 0))
2013 LIBHB_CWRITE_2(hbthr
, a
);
2016 static VG_REGPARM(1)
2017 void evh__mem_help_cwrite_4(Addr a
) {
2018 Thread
* thr
= get_current_Thread_in_C_C();
2019 Thr
* hbthr
= thr
->hbthr
;
2020 if (LIKELY(thr
->synchr_nesting
== 0))
2021 LIBHB_CWRITE_4(hbthr
, a
);
2024 /* Same as evh__mem_help_cwrite_4 but unwind will use a first_sp_delta of
2026 static VG_REGPARM(1)
2027 void evh__mem_help_cwrite_4_fixupSP(Addr a
) {
2028 Thread
* thr
= get_current_Thread_in_C_C();
2029 Thr
* hbthr
= thr
->hbthr
;
2031 thr
->first_sp_delta
= sizeof(Word
);
2032 if (LIKELY(thr
->synchr_nesting
== 0))
2033 LIBHB_CWRITE_4(hbthr
, a
);
2034 thr
->first_sp_delta
= 0;
2037 static VG_REGPARM(1)
2038 void evh__mem_help_cwrite_8(Addr a
) {
2039 Thread
* thr
= get_current_Thread_in_C_C();
2040 Thr
* hbthr
= thr
->hbthr
;
2041 if (LIKELY(thr
->synchr_nesting
== 0))
2042 LIBHB_CWRITE_8(hbthr
, a
);
2045 /* Same as evh__mem_help_cwrite_8 but unwind will use a first_sp_delta of
2047 static VG_REGPARM(1)
2048 void evh__mem_help_cwrite_8_fixupSP(Addr a
) {
2049 Thread
* thr
= get_current_Thread_in_C_C();
2050 Thr
* hbthr
= thr
->hbthr
;
2052 thr
->first_sp_delta
= sizeof(Word
);
2053 if (LIKELY(thr
->synchr_nesting
== 0))
2054 LIBHB_CWRITE_8(hbthr
, a
);
2055 thr
->first_sp_delta
= 0;
2058 static VG_REGPARM(2)
2059 void evh__mem_help_cwrite_N(Addr a
, SizeT size
) {
2060 Thread
* thr
= get_current_Thread_in_C_C();
2061 Thr
* hbthr
= thr
->hbthr
;
2062 if (LIKELY(thr
->synchr_nesting
== 0))
2063 LIBHB_CWRITE_N(hbthr
, a
, size
);
2067 /* ------------------------------------------------------- */
2068 /* -------------- events to do with mutexes -------------- */
2069 /* ------------------------------------------------------- */
2071 /* EXPOSITION only: by intercepting lock init events we can show the
2072 user where the lock was initialised, rather than only being able to
2073 show where it was first locked. Intercepting lock initialisations
2074 is not necessary for the basic operation of the race checker. */
2076 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid
,
2077 void* mutex
, Word mbRec
)
2079 if (SHOW_EVENTS
>= 1)
2080 VG_(printf
)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2081 (Int
)tid
, mbRec
, (void*)mutex
);
2082 tl_assert(mbRec
== 0 || mbRec
== 1);
2083 map_locks_lookup_or_create( mbRec
? LK_mbRec
: LK_nonRec
,
2085 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2086 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2090 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid
, void* mutex
,
2091 Bool mutex_is_init
)
2095 if (SHOW_EVENTS
>= 1)
2096 VG_(printf
)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2097 "(ctid=%d, %p, isInit=%d)\n",
2098 (Int
)tid
, (void*)mutex
, (Int
)mutex_is_init
);
2100 thr
= map_threads_maybe_lookup( tid
);
2101 /* cannot fail - Thread* must already exist */
2102 tl_assert( HG_(is_sane_Thread
)(thr
) );
2104 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2106 if (lk
== NULL
&& mutex_is_init
) {
2107 /* We're destroying a mutex which we don't have any record of,
2108 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2109 Assume it never got used, and so we don't need to do anything
2114 if (lk
== NULL
|| (lk
->kind
!= LK_nonRec
&& lk
->kind
!= LK_mbRec
)) {
2115 HG_(record_error_Misc
)(
2116 thr
, "pthread_mutex_destroy with invalid argument" );
2120 tl_assert( HG_(is_sane_LockN
)(lk
) );
2121 tl_assert( lk
->guestaddr
== (Addr
)mutex
);
2123 /* Basically act like we unlocked the lock */
2124 HG_(record_error_Misc
)(
2125 thr
, "pthread_mutex_destroy of a locked mutex" );
2126 /* remove lock from locksets of all owning threads */
2127 remove_Lock_from_locksets_of_all_owning_Threads( lk
);
2128 VG_(deleteBag
)( lk
->heldBy
);
2131 lk
->acquired_at
= NULL
;
2133 tl_assert( !lk
->heldBy
);
2134 tl_assert( HG_(is_sane_LockN
)(lk
) );
2136 if (HG_(clo_track_lockorders
))
2137 laog__handle_one_lock_deletion(lk
);
2138 map_locks_delete( lk
->guestaddr
);
2143 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2144 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2147 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid
,
2148 void* mutex
, Word isTryLock
)
2150 /* Just check the mutex is sane; nothing else to do. */
2151 // 'mutex' may be invalid - not checked by wrapper
2154 if (SHOW_EVENTS
>= 1)
2155 VG_(printf
)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2156 (Int
)tid
, (void*)mutex
);
2158 tl_assert(isTryLock
== 0 || isTryLock
== 1);
2159 thr
= map_threads_maybe_lookup( tid
);
2160 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2162 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2164 if (lk
&& (lk
->kind
== LK_rdwr
)) {
2165 HG_(record_error_Misc
)( thr
, "pthread_mutex_lock with a "
2166 "pthread_rwlock_t* argument " );
2171 && (lk
->kind
== LK_nonRec
|| lk
->kind
== LK_rdwr
)
2174 && VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) > 0 ) {
2175 /* uh, it's a non-recursive lock and we already w-hold it, and
2176 this is a real lock operation (not a speculative "tryLock"
2177 kind of thing). Duh. Deadlock coming up; but at least
2178 produce an error message. */
2179 const HChar
* errstr
= "Attempt to re-lock a "
2180 "non-recursive lock I already hold";
2181 const HChar
* auxstr
= "Lock was previously acquired";
2182 if (lk
->acquired_at
) {
2183 HG_(record_error_Misc_w_aux
)( thr
, errstr
, auxstr
, lk
->acquired_at
);
2185 HG_(record_error_Misc
)( thr
, errstr
);
2190 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid
, void* mutex
)
2192 // only called if the real library call succeeded - so mutex is sane
2194 if (SHOW_EVENTS
>= 1)
2195 VG_(printf
)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2196 (Int
)tid
, (void*)mutex
);
2198 thr
= map_threads_maybe_lookup( tid
);
2199 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2201 evhH__post_thread_w_acquires_lock(
2203 LK_mbRec
, /* if not known, create new lock with this LockKind */
2208 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid
, void* mutex
)
2210 // 'mutex' may be invalid - not checked by wrapper
2212 if (SHOW_EVENTS
>= 1)
2213 VG_(printf
)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2214 (Int
)tid
, (void*)mutex
);
2216 thr
= map_threads_maybe_lookup( tid
);
2217 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2219 evhH__pre_thread_releases_lock( thr
, (Addr
)mutex
, False
/*!isRDWR*/ );
2222 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid
, void* mutex
)
2224 // only called if the real library call succeeded - so mutex is sane
2226 if (SHOW_EVENTS
>= 1)
2227 VG_(printf
)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2228 (Int
)tid
, (void*)mutex
);
2229 thr
= map_threads_maybe_lookup( tid
);
2230 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2232 // anything we should do here?
2236 /* ------------------------------------------------------- */
2237 /* -------------- events to do with spinlocks ------------ */
2238 /* ------------------------------------------------------- */
2240 /* All a bit of a kludge. Pretend we're really dealing with ordinary
2241 pthread_mutex_t's instead, for the most part. */
2243 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid
,
2248 /* In glibc's kludgey world, we're either initialising or unlocking
2249 it. Since this is the pre-routine, if it is locked, unlock it
2250 and take a dependence edge. Otherwise, do nothing. */
2252 if (SHOW_EVENTS
>= 1)
2253 VG_(printf
)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2254 "(ctid=%d, slock=%p)\n",
2255 (Int
)tid
, (void*)slock
);
2257 thr
= map_threads_maybe_lookup( tid
);
2258 /* cannot fail - Thread* must already exist */;
2259 tl_assert( HG_(is_sane_Thread
)(thr
) );
2261 lk
= map_locks_maybe_lookup( (Addr
)slock
);
2262 if (lk
&& lk
->heldBy
) {
2263 /* it's held. So do the normal pre-unlock actions, as copied
2264 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2265 duplicates the map_locks_maybe_lookup. */
2266 evhH__pre_thread_releases_lock( thr
, (Addr
)slock
,
2271 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid
,
2275 /* More kludgery. If the lock has never been seen before, do
2276 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2279 if (SHOW_EVENTS
>= 1)
2280 VG_(printf
)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2281 "(ctid=%d, slock=%p)\n",
2282 (Int
)tid
, (void*)slock
);
2284 lk
= map_locks_maybe_lookup( (Addr
)slock
);
2286 map_locks_lookup_or_create( LK_nonRec
, (Addr
)slock
, tid
);
2290 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid
,
2291 void* slock
, Word isTryLock
)
2293 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, slock
, isTryLock
);
2296 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid
,
2299 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, slock
);
2302 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid
,
2305 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid
, slock
, 0/*!isInit*/ );
2309 /* ----------------------------------------------------- */
2310 /* --------------- events to do with CVs --------------- */
2311 /* ----------------------------------------------------- */
2313 /* A mapping from CV to (the SO associated with it, plus some
2314 auxiliary data for error checking). When the CV is
2315 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2316 wait on it completes, we do a 'recv' from the SO. This is believed
2317 to give the correct happens-before events arising from CV
2318 signallings/broadcasts.
2321 /* .so is the SO for this CV.
2322 .mx_ga is the associated mutex, when .nWaiters > 0
2324 POSIX says effectively that the first pthread_cond_{timed}wait call
2325 causes a dynamic binding between the CV and the mutex, and that
2326 lasts until such time as the waiter count falls to zero. Hence
2327 need to keep track of the number of waiters in order to do
2328 consistency tracking. */
2331 SO
* so
; /* libhb-allocated SO */
2332 void* mx_ga
; /* addr of associated mutex, if any */
2333 UWord nWaiters
; /* # threads waiting on the CV */
2338 /* pthread_cond_t* -> CVInfo* */
2339 static WordFM
* map_cond_to_CVInfo
= NULL
;
2341 static void map_cond_to_CVInfo_INIT ( void ) {
2342 if (UNLIKELY(map_cond_to_CVInfo
== NULL
)) {
2343 map_cond_to_CVInfo
= VG_(newFM
)( HG_(zalloc
),
2344 "hg.mctCI.1", HG_(free
), NULL
);
2348 static CVInfo
* map_cond_to_CVInfo_lookup_or_alloc ( void* cond
) {
2350 map_cond_to_CVInfo_INIT();
2351 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &key
, &val
, (UWord
)cond
)) {
2352 tl_assert(key
== (UWord
)cond
);
2353 return (CVInfo
*)val
;
2355 SO
* so
= libhb_so_alloc();
2356 CVInfo
* cvi
= HG_(zalloc
)("hg.mctCloa.1", sizeof(CVInfo
));
2359 VG_(addToFM
)( map_cond_to_CVInfo
, (UWord
)cond
, (UWord
)cvi
);
2364 static CVInfo
* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond
) {
2366 map_cond_to_CVInfo_INIT();
2367 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &key
, &val
, (UWord
)cond
)) {
2368 tl_assert(key
== (UWord
)cond
);
2369 return (CVInfo
*)val
;
2375 static void map_cond_to_CVInfo_delete ( ThreadId tid
,
2376 void* cond
, Bool cond_is_init
) {
2380 thr
= map_threads_maybe_lookup( tid
);
2381 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2383 map_cond_to_CVInfo_INIT();
2384 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &keyW
, &valW
, (UWord
)cond
)) {
2385 CVInfo
* cvi
= (CVInfo
*)valW
;
2386 tl_assert(keyW
== (UWord
)cond
);
2389 if (cvi
->nWaiters
> 0) {
2390 HG_(record_error_Misc
)(
2391 thr
, "pthread_cond_destroy:"
2392 " destruction of condition variable being waited upon");
2393 /* Destroying a cond var being waited upon outcome is EBUSY and
2394 variable is not destroyed. */
2397 if (!VG_(delFromFM
)( map_cond_to_CVInfo
, &keyW
, &valW
, (UWord
)cond
))
2398 tl_assert(0); // cond var found above, and not here ???
2399 libhb_so_dealloc(cvi
->so
);
2403 /* We have no record of this CV. So complain about it
2404 .. except, don't bother to complain if it has exactly the
2405 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2406 was initialised like that but never used. */
2407 if (!cond_is_init
) {
2408 HG_(record_error_Misc
)(
2409 thr
, "pthread_cond_destroy: destruction of unknown cond var");
2414 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid
, void* cond
)
2416 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2417 cond to a SO if it is not already so bound, and 'send' on the
2418 SO. This is later used by other thread(s) which successfully
2419 exit from a pthread_cond_wait on the same cv; then they 'recv'
2420 from the SO, thereby acquiring a dependency on this signalling
2426 if (SHOW_EVENTS
>= 1)
2427 VG_(printf
)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2428 (Int
)tid
, (void*)cond
);
2430 thr
= map_threads_maybe_lookup( tid
);
2431 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2433 cvi
= map_cond_to_CVInfo_lookup_or_alloc( cond
);
2437 // error-if: mutex is bogus
2438 // error-if: mutex is not locked
2439 // Hmm. POSIX doesn't actually say that it's an error to call
2440 // pthread_cond_signal with the associated mutex being unlocked.
2441 // Although it does say that it should be "if consistent scheduling
2442 // is desired." For that reason, print "dubious" if the lock isn't
2443 // held by any thread. Skip the "dubious" if it is held by some
2444 // other thread; that sounds straight-out wrong.
2446 // Anybody who writes code that signals on a CV without holding
2447 // the associated MX needs to be shipped off to a lunatic asylum
2448 // ASAP, even though POSIX doesn't actually declare such behaviour
2449 // illegal -- it makes code extremely difficult to understand/
2450 // reason about. In particular it puts the signalling thread in
2451 // a situation where it is racing against the released waiter
2452 // as soon as the signalling is done, and so there needs to be
2453 // some auxiliary synchronisation mechanism in the program that
2454 // makes this safe -- or the race(s) need to be harmless, or
2455 // probably nonexistent.
2459 if (cvi
->mx_ga
!= 0) {
2460 lk
= map_locks_maybe_lookup( (Addr
)cvi
->mx_ga
);
2462 /* note: lk could be NULL. Be careful. */
2464 if (lk
->kind
== LK_rdwr
) {
2465 HG_(record_error_Misc
)(thr
,
2466 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2468 if (lk
->heldBy
== NULL
) {
2469 HG_(record_error_Dubious
)(thr
,
2470 "pthread_cond_{signal,broadcast}: dubious: "
2471 "associated lock is not held by any thread");
2473 if (lk
->heldBy
!= NULL
&& 0 == VG_(elemBag
)(lk
->heldBy
, (UWord
)thr
)) {
2474 HG_(record_error_Misc
)(thr
,
2475 "pthread_cond_{signal,broadcast}: "
2476 "associated lock is not held by calling thread");
2479 /* Couldn't even find the damn thing. */
2480 // But actually .. that's not necessarily an error. We don't
2481 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2482 // shows us what it is, and if that may not have happened yet.
2483 // So just keep quiet in this circumstance.
2484 //HG_(record_error_Misc)( thr,
2485 // "pthread_cond_{signal,broadcast}: "
2486 // "no or invalid mutex associated with cond");
2490 libhb_so_send( thr
->hbthr
, cvi
->so
, True
/*strong_send*/ );
2493 /* returns True if it reckons 'mutex' is valid and held by this
2494 thread, else False */
2495 static Bool
evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid
,
2496 void* cond
, void* mutex
)
2500 Bool lk_valid
= True
;
2503 if (SHOW_EVENTS
>= 1)
2504 VG_(printf
)("evh__hg_PTHREAD_COND_WAIT_PRE"
2505 "(ctid=%d, cond=%p, mutex=%p)\n",
2506 (Int
)tid
, (void*)cond
, (void*)mutex
);
2508 thr
= map_threads_maybe_lookup( tid
);
2509 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2511 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2513 /* Check for stupid mutex arguments. There are various ways to be
2514 a bozo. Only complain once, though, even if more than one thing
2518 HG_(record_error_Misc
)(
2520 "pthread_cond_{timed}wait called with invalid mutex" );
2522 tl_assert( HG_(is_sane_LockN
)(lk
) );
2523 if (lk
->kind
== LK_rdwr
) {
2525 HG_(record_error_Misc
)(
2526 thr
, "pthread_cond_{timed}wait called with mutex "
2527 "of type pthread_rwlock_t*" );
2529 if (lk
->heldBy
== NULL
) {
2531 HG_(record_error_Misc
)(
2532 thr
, "pthread_cond_{timed}wait called with un-held mutex");
2534 if (lk
->heldBy
!= NULL
2535 && VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) == 0) {
2537 HG_(record_error_Misc
)(
2538 thr
, "pthread_cond_{timed}wait called with mutex "
2539 "held by a different thread" );
2543 // error-if: cond is also associated with a different mutex
2544 cvi
= map_cond_to_CVInfo_lookup_or_alloc(cond
);
2547 if (cvi
->nWaiters
== 0) {
2548 /* form initial (CV,MX) binding */
2551 else /* check existing (CV,MX) binding */
2552 if (cvi
->mx_ga
!= mutex
) {
2553 HG_(record_error_Misc
)(
2554 thr
, "pthread_cond_{timed}wait: cond is associated "
2555 "with a different mutex");
2562 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid
,
2563 void* cond
, void* mutex
,
2566 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2567 the SO for this cond, and 'recv' from it so as to acquire a
2568 dependency edge back to the signaller/broadcaster. */
2572 if (SHOW_EVENTS
>= 1)
2573 VG_(printf
)("evh__HG_PTHREAD_COND_WAIT_POST"
2574 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2575 (Int
)tid
, (void*)cond
, (void*)mutex
, (Int
)timeout
);
2577 thr
= map_threads_maybe_lookup( tid
);
2578 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2580 // error-if: cond is also associated with a different mutex
2582 cvi
= map_cond_to_CVInfo_lookup_NO_alloc( cond
);
2584 /* This could be either a bug in helgrind or the guest application
2585 that did an error (e.g. cond var was destroyed by another thread.
2586 Let's assume helgrind is perfect ...
2587 Note that this is similar to drd behaviour. */
2588 HG_(record_error_Misc
)(thr
, "condition variable has been destroyed while"
2589 " being waited upon");
2595 tl_assert(cvi
->nWaiters
> 0);
2597 if (!timeout
&& !libhb_so_everSent(cvi
->so
)) {
2598 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2599 it? If this happened it would surely be a bug in the threads
2600 library. Or one of those fabled "spurious wakeups". */
2601 HG_(record_error_Misc
)( thr
, "Bug in libpthread: pthread_cond_wait "
2603 " without prior pthread_cond_post");
2606 /* anyway, acquire a dependency on it. */
2607 libhb_so_recv( thr
->hbthr
, cvi
->so
, True
/*strong_recv*/ );
2612 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid
,
2613 void* cond
, void* cond_attr
)
2617 if (SHOW_EVENTS
>= 1)
2618 VG_(printf
)("evh__HG_PTHREAD_COND_INIT_POST"
2619 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2620 (Int
)tid
, (void*)cond
, (void*) cond_attr
);
2622 cvi
= map_cond_to_CVInfo_lookup_or_alloc( cond
);
2624 tl_assert (cvi
->so
);
2628 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid
,
2629 void* cond
, Bool cond_is_init
)
2631 /* Deal with destroy events. The only purpose is to free storage
2632 associated with the CV, so as to avoid any possible resource
2634 if (SHOW_EVENTS
>= 1)
2635 VG_(printf
)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2636 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2637 (Int
)tid
, (void*)cond
, (Int
)cond_is_init
);
2639 map_cond_to_CVInfo_delete( tid
, cond
, cond_is_init
);
2643 /* ------------------------------------------------------- */
2644 /* -------------- events to do with rwlocks -------------- */
2645 /* ------------------------------------------------------- */
2647 /* EXPOSITION only */
2649 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid
, void* rwl
)
2651 if (SHOW_EVENTS
>= 1)
2652 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2653 (Int
)tid
, (void*)rwl
);
2654 map_locks_lookup_or_create( LK_rdwr
, (Addr
)rwl
, tid
);
2655 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2656 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2660 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid
, void* rwl
)
2664 if (SHOW_EVENTS
>= 1)
2665 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2666 (Int
)tid
, (void*)rwl
);
2668 thr
= map_threads_maybe_lookup( tid
);
2669 /* cannot fail - Thread* must already exist */
2670 tl_assert( HG_(is_sane_Thread
)(thr
) );
2672 lk
= map_locks_maybe_lookup( (Addr
)rwl
);
2674 if (lk
== NULL
|| lk
->kind
!= LK_rdwr
) {
2675 HG_(record_error_Misc
)(
2676 thr
, "pthread_rwlock_destroy with invalid argument" );
2680 tl_assert( HG_(is_sane_LockN
)(lk
) );
2681 tl_assert( lk
->guestaddr
== (Addr
)rwl
);
2683 /* Basically act like we unlocked the lock */
2684 HG_(record_error_Misc
)(
2685 thr
, "pthread_rwlock_destroy of a locked mutex" );
2686 /* remove lock from locksets of all owning threads */
2687 remove_Lock_from_locksets_of_all_owning_Threads( lk
);
2688 VG_(deleteBag
)( lk
->heldBy
);
2691 lk
->acquired_at
= NULL
;
2693 tl_assert( !lk
->heldBy
);
2694 tl_assert( HG_(is_sane_LockN
)(lk
) );
2696 if (HG_(clo_track_lockorders
))
2697 laog__handle_one_lock_deletion(lk
);
2698 map_locks_delete( lk
->guestaddr
);
2702 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2703 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2707 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid
,
2709 Word isW
, Word isTryLock
)
2711 /* Just check the rwl is sane; nothing else to do. */
2712 // 'rwl' may be invalid - not checked by wrapper
2715 if (SHOW_EVENTS
>= 1)
2716 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2717 (Int
)tid
, (Int
)isW
, (void*)rwl
);
2719 tl_assert(isW
== 0 || isW
== 1); /* assured us by wrapper */
2720 tl_assert(isTryLock
== 0 || isTryLock
== 1); /* assured us by wrapper */
2721 thr
= map_threads_maybe_lookup( tid
);
2722 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2724 lk
= map_locks_maybe_lookup( (Addr
)rwl
);
2726 && (lk
->kind
== LK_nonRec
|| lk
->kind
== LK_mbRec
) ) {
2727 /* Wrong kind of lock. Duh. */
2728 HG_(record_error_Misc
)(
2729 thr
, "pthread_rwlock_{rd,rw}lock with a "
2730 "pthread_mutex_t* argument " );
2735 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid
, void* rwl
, Word isW
)
2737 // only called if the real library call succeeded - so mutex is sane
2739 if (SHOW_EVENTS
>= 1)
2740 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2741 (Int
)tid
, (Int
)isW
, (void*)rwl
);
2743 tl_assert(isW
== 0 || isW
== 1); /* assured us by wrapper */
2744 thr
= map_threads_maybe_lookup( tid
);
2745 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2747 (isW
? evhH__post_thread_w_acquires_lock
2748 : evhH__post_thread_r_acquires_lock
)(
2750 LK_rdwr
, /* if not known, create new lock with this LockKind */
2755 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid
, void* rwl
)
2757 // 'rwl' may be invalid - not checked by wrapper
2759 if (SHOW_EVENTS
>= 1)
2760 VG_(printf
)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2761 (Int
)tid
, (void*)rwl
);
2763 thr
= map_threads_maybe_lookup( tid
);
2764 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2766 evhH__pre_thread_releases_lock( thr
, (Addr
)rwl
, True
/*isRDWR*/ );
2769 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid
, void* rwl
)
2771 // only called if the real library call succeeded - so mutex is sane
2773 if (SHOW_EVENTS
>= 1)
2774 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2775 (Int
)tid
, (void*)rwl
);
2776 thr
= map_threads_maybe_lookup( tid
);
2777 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2779 // anything we should do here?
2783 /* ---------------------------------------------------------- */
2784 /* -------------- events to do with semaphores -------------- */
2785 /* ---------------------------------------------------------- */
2787 /* This is similar to but not identical to the handling for condition
2790 /* For each semaphore, we maintain a stack of SOs. When a 'post'
2791 operation is done on a semaphore (unlocking, essentially), a new SO
2792 is created for the posting thread, the posting thread does a strong
2793 send to it (which merely installs the posting thread's VC in the
2794 SO), and the SO is pushed on the semaphore's stack.
2796 Later, when a (probably different) thread completes 'wait' on the
2797 semaphore, we pop a SO off the semaphore's stack (which should be
2798 nonempty), and do a strong recv from it. This mechanism creates
2799 dependencies between posters and waiters of the semaphore.
2801 It may not be necessary to use a stack - perhaps a bag of SOs would
2802 do. But we do need to keep track of how many unused-up posts have
2803 happened for the semaphore.
2805 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2806 twice on S. T3 cannot complete its waits without both T1 and T2
2807 posting. The above mechanism will ensure that T3 acquires
2808 dependencies on both T1 and T2.
2810 When a semaphore is initialised with value N, we do as if we'd
2811 posted N times on the semaphore: basically create N SOs and do a
2812 strong send to all of then. This allows up to N waits on the
2813 semaphore to acquire a dependency on the initialisation point,
2814 which AFAICS is the correct behaviour.
2816 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2820 /* sem_t* -> XArray* SO* */
2821 static WordFM
* map_sem_to_SO_stack
= NULL
;
2823 static void map_sem_to_SO_stack_INIT ( void ) {
2824 if (map_sem_to_SO_stack
== NULL
) {
2825 map_sem_to_SO_stack
= VG_(newFM
)( HG_(zalloc
), "hg.mstSs.1",
2830 static void push_SO_for_sem ( void* sem
, SO
* so
) {
2834 map_sem_to_SO_stack_INIT();
2835 if (VG_(lookupFM
)( map_sem_to_SO_stack
,
2836 &keyW
, (UWord
*)&xa
, (UWord
)sem
)) {
2837 tl_assert(keyW
== (UWord
)sem
);
2839 VG_(addToXA
)( xa
, &so
);
2841 xa
= VG_(newXA
)( HG_(zalloc
), "hg.pSfs.1", HG_(free
), sizeof(SO
*) );
2842 VG_(addToXA
)( xa
, &so
);
2843 VG_(addToFM
)( map_sem_to_SO_stack
, (UWord
)sem
, (UWord
)xa
);
2847 static SO
* mb_pop_SO_for_sem ( void* sem
) {
2851 map_sem_to_SO_stack_INIT();
2852 if (VG_(lookupFM
)( map_sem_to_SO_stack
,
2853 &keyW
, (UWord
*)&xa
, (UWord
)sem
)) {
2854 /* xa is the stack for this semaphore. */
2856 tl_assert(keyW
== (UWord
)sem
);
2857 sz
= VG_(sizeXA
)( xa
);
2860 return NULL
; /* odd, the stack is empty */
2861 so
= *(SO
**)VG_(indexXA
)( xa
, sz
-1 );
2863 VG_(dropTailXA
)( xa
, 1 );
2866 /* hmm, that's odd. No stack for this semaphore. */
2871 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid
, void* sem
)
2876 if (SHOW_EVENTS
>= 1)
2877 VG_(printf
)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2878 (Int
)tid
, (void*)sem
);
2880 map_sem_to_SO_stack_INIT();
2882 /* Empty out the semaphore's SO stack. This way of doing it is
2883 stupid, but at least it's easy. */
2885 so
= mb_pop_SO_for_sem( sem
);
2887 libhb_so_dealloc(so
);
2890 if (VG_(delFromFM
)( map_sem_to_SO_stack
, &keyW
, &valW
, (UWord
)sem
)) {
2891 XArray
* xa
= (XArray
*)valW
;
2892 tl_assert(keyW
== (UWord
)sem
);
2894 tl_assert(VG_(sizeXA
)(xa
) == 0); /* preceding loop just emptied it */
2900 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid
, void* sem
, UWord value
)
2905 if (SHOW_EVENTS
>= 1)
2906 VG_(printf
)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2907 (Int
)tid
, (void*)sem
, value
);
2909 thr
= map_threads_maybe_lookup( tid
);
2910 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2912 /* Empty out the semaphore's SO stack. This way of doing it is
2913 stupid, but at least it's easy. */
2915 so
= mb_pop_SO_for_sem( sem
);
2917 libhb_so_dealloc(so
);
2920 /* If we don't do this check, the following while loop runs us out
2921 of memory for stupid initial values of 'value'. */
2922 if (value
> 10000) {
2923 HG_(record_error_Misc
)(
2924 thr
, "sem_init: initial value exceeds 10000; using 10000" );
2928 /* Now create 'valid' new SOs for the thread, do a strong send to
2929 each of them, and push them all on the stack. */
2930 for (; value
> 0; value
--) {
2931 Thr
* hbthr
= thr
->hbthr
;
2934 so
= libhb_so_alloc();
2935 libhb_so_send( hbthr
, so
, True
/*strong send*/ );
2936 push_SO_for_sem( sem
, so
);
2940 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid
, void* sem
)
2942 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2943 it (iow, write our VC into it, then tick ours), and push the SO
2944 on on a stack of SOs associated with 'sem'. This is later used
2945 by other thread(s) which successfully exit from a sem_wait on
2946 the same sem; by doing a strong recv from SOs popped of the
2947 stack, they acquire dependencies on the posting thread
2954 if (SHOW_EVENTS
>= 1)
2955 VG_(printf
)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2956 (Int
)tid
, (void*)sem
);
2958 thr
= map_threads_maybe_lookup( tid
);
2959 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2961 // error-if: sem is bogus
2966 so
= libhb_so_alloc();
2967 libhb_so_send( hbthr
, so
, True
/*strong send*/ );
2968 push_SO_for_sem( sem
, so
);
2971 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid
, void* sem
)
2973 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2974 the 'sem' from this semaphore's SO-stack, and do a strong recv
2975 from it. This creates a dependency back to one of the post-ers
2976 for the semaphore. */
2982 if (SHOW_EVENTS
>= 1)
2983 VG_(printf
)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2984 (Int
)tid
, (void*)sem
);
2986 thr
= map_threads_maybe_lookup( tid
);
2987 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2989 // error-if: sem is bogus
2991 so
= mb_pop_SO_for_sem( sem
);
2997 libhb_so_recv( hbthr
, so
, True
/*strong recv*/ );
2998 libhb_so_dealloc(so
);
3000 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
3001 If this happened it would surely be a bug in the threads
3003 HG_(record_error_Misc
)(
3004 thr
, "Bug in libpthread: sem_wait succeeded on"
3005 " semaphore without prior sem_post");
3010 /* -------------------------------------------------------- */
3011 /* -------------- events to do with barriers -------------- */
3012 /* -------------------------------------------------------- */
3016 Bool initted
; /* has it yet been initted by guest? */
3017 Bool resizable
; /* is resizing allowed? */
3018 UWord size
; /* declared size */
3019 XArray
* waiting
; /* XA of Thread*. # present is 0 .. .size */
3023 static Bar
* new_Bar ( void ) {
3024 Bar
* bar
= HG_(zalloc
)( "hg.nB.1 (new_Bar)", sizeof(Bar
) );
3025 /* all fields are zero */
3026 tl_assert(bar
->initted
== False
);
3030 static void delete_Bar ( Bar
* bar
) {
3033 VG_(deleteXA
)(bar
->waiting
);
3037 /* A mapping which stores auxiliary data for barriers. */
3039 /* pthread_barrier_t* -> Bar* */
3040 static WordFM
* map_barrier_to_Bar
= NULL
;
3042 static void map_barrier_to_Bar_INIT ( void ) {
3043 if (UNLIKELY(map_barrier_to_Bar
== NULL
)) {
3044 map_barrier_to_Bar
= VG_(newFM
)( HG_(zalloc
),
3045 "hg.mbtBI.1", HG_(free
), NULL
);
3049 static Bar
* map_barrier_to_Bar_lookup_or_alloc ( void* barrier
) {
3051 map_barrier_to_Bar_INIT();
3052 if (VG_(lookupFM
)( map_barrier_to_Bar
, &key
, &val
, (UWord
)barrier
)) {
3053 tl_assert(key
== (UWord
)barrier
);
3056 Bar
* bar
= new_Bar();
3057 VG_(addToFM
)( map_barrier_to_Bar
, (UWord
)barrier
, (UWord
)bar
);
3062 static void map_barrier_to_Bar_delete ( void* barrier
) {
3064 map_barrier_to_Bar_INIT();
3065 if (VG_(delFromFM
)( map_barrier_to_Bar
, &keyW
, &valW
, (UWord
)barrier
)) {
3066 Bar
* bar
= (Bar
*)valW
;
3067 tl_assert(keyW
== (UWord
)barrier
);
3073 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid
,
3081 if (SHOW_EVENTS
>= 1)
3082 VG_(printf
)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3083 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3084 (Int
)tid
, (void*)barrier
, count
, resizable
);
3086 thr
= map_threads_maybe_lookup( tid
);
3087 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3090 HG_(record_error_Misc
)(
3091 thr
, "pthread_barrier_init: 'count' argument is zero"
3095 if (resizable
!= 0 && resizable
!= 1) {
3096 HG_(record_error_Misc
)(
3097 thr
, "pthread_barrier_init: invalid 'resizable' argument"
3101 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3105 HG_(record_error_Misc
)(
3106 thr
, "pthread_barrier_init: barrier is already initialised"
3110 if (bar
->waiting
&& VG_(sizeXA
)(bar
->waiting
) > 0) {
3111 tl_assert(bar
->initted
);
3112 HG_(record_error_Misc
)(
3113 thr
, "pthread_barrier_init: threads are waiting at barrier"
3115 VG_(dropTailXA
)(bar
->waiting
, VG_(sizeXA
)(bar
->waiting
));
3117 if (!bar
->waiting
) {
3118 bar
->waiting
= VG_(newXA
)( HG_(zalloc
), "hg.eHPBIP.1", HG_(free
),
3122 tl_assert(VG_(sizeXA
)(bar
->waiting
) == 0);
3123 bar
->initted
= True
;
3124 bar
->resizable
= resizable
== 1 ? True
: False
;
3129 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid
,
3135 /* Deal with destroy events. The only purpose is to free storage
3136 associated with the barrier, so as to avoid any possible
3138 if (SHOW_EVENTS
>= 1)
3139 VG_(printf
)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3140 "(tid=%d, barrier=%p)\n",
3141 (Int
)tid
, (void*)barrier
);
3143 thr
= map_threads_maybe_lookup( tid
);
3144 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3146 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3149 if (!bar
->initted
) {
3150 HG_(record_error_Misc
)(
3151 thr
, "pthread_barrier_destroy: barrier was never initialised"
3155 if (bar
->initted
&& bar
->waiting
&& VG_(sizeXA
)(bar
->waiting
) > 0) {
3156 HG_(record_error_Misc
)(
3157 thr
, "pthread_barrier_destroy: threads are waiting at barrier"
3161 /* Maybe we shouldn't do this; just let it persist, so that when it
3162 is reinitialised we don't need to do any dynamic memory
3163 allocation? The downside is a potentially unlimited space leak,
3164 if the client creates (in turn) a large number of barriers all
3165 at different locations. Note that if we do later move to the
3166 don't-delete-it scheme, we need to mark the barrier as
3167 uninitialised again since otherwise a later _init call will
3168 elicit a duplicate-init error. */
3169 map_barrier_to_Bar_delete( barrier
);
3173 /* All the threads have arrived. Now do the Interesting Bit. Get a
3174 new synchronisation object and do a weak send to it from all the
3175 participating threads. This makes its vector clocks be the join of
3176 all the individual threads' vector clocks. Then do a strong
3177 receive from it back to all threads, so that their VCs are a copy
3178 of it (hence are all equal to the join of their original VCs.) */
3179 static void do_barrier_cross_sync_and_empty ( Bar
* bar
)
3181 /* XXX check bar->waiting has no duplicates */
3183 SO
* so
= libhb_so_alloc();
3185 tl_assert(bar
->waiting
);
3186 tl_assert(VG_(sizeXA
)(bar
->waiting
) == bar
->size
);
3188 /* compute the join ... */
3189 for (i
= 0; i
< bar
->size
; i
++) {
3190 Thread
* t
= *(Thread
**)VG_(indexXA
)(bar
->waiting
, i
);
3191 Thr
* hbthr
= t
->hbthr
;
3192 libhb_so_send( hbthr
, so
, False
/*weak send*/ );
3194 /* ... and distribute to all threads */
3195 for (i
= 0; i
< bar
->size
; i
++) {
3196 Thread
* t
= *(Thread
**)VG_(indexXA
)(bar
->waiting
, i
);
3197 Thr
* hbthr
= t
->hbthr
;
3198 libhb_so_recv( hbthr
, so
, True
/*strong recv*/ );
3201 /* finally, we must empty out the waiting vector */
3202 VG_(dropTailXA
)(bar
->waiting
, VG_(sizeXA
)(bar
->waiting
));
3204 /* and we don't need this any more. Perhaps a stack-allocated
3205 SO would be better? */
3206 libhb_so_dealloc(so
);
3210 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid
,
3213 /* This function gets called after a client thread calls
3214 pthread_barrier_wait but before it arrives at the real
3215 pthread_barrier_wait.
3217 Why is the following correct? It's a bit subtle.
3219 If this is not the last thread arriving at the barrier, we simply
3220 note its presence and return. Because valgrind (at least as of
3221 Nov 08) is single threaded, we are guaranteed safe from any race
3222 conditions when in this function -- no other client threads are
3225 If this is the last thread, then we are again the only running
3226 thread. All the other threads will have either arrived at the
3227 real pthread_barrier_wait or are on their way to it, but in any
3228 case are guaranteed not to be able to move past it, because this
3229 thread is currently in this function and so has not yet arrived
3230 at the real pthread_barrier_wait. That means that:
3232 1. While we are in this function, none of the other threads
3233 waiting at the barrier can move past it.
3235 2. When this function returns (and simulated execution resumes),
3236 this thread and all other waiting threads will be able to move
3237 past the real barrier.
3239 Because of this, it is now safe to update the vector clocks of
3240 all threads, to represent the fact that they all arrived at the
3241 barrier and have all moved on. There is no danger of any
3242 complications to do with some threads leaving the barrier and
3243 racing back round to the front, whilst others are still leaving
3244 (which is the primary source of complication in correct handling/
3245 implementation of barriers). That can't happen because we update
3246 here our data structures so as to indicate that the threads have
3247 passed the barrier, even though, as per (2) above, they are
3248 guaranteed not to pass the barrier until we return.
3250 This relies crucially on Valgrind being single threaded. If that
3251 changes, this will need to be reconsidered.
3257 if (SHOW_EVENTS
>= 1)
3258 VG_(printf
)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3259 "(tid=%d, barrier=%p)\n",
3260 (Int
)tid
, (void*)barrier
);
3262 thr
= map_threads_maybe_lookup( tid
);
3263 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3265 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3268 if (!bar
->initted
) {
3269 HG_(record_error_Misc
)(
3270 thr
, "pthread_barrier_wait: barrier is uninitialised"
3272 return; /* client is broken .. avoid assertions below */
3275 /* guaranteed by _INIT_PRE above */
3276 tl_assert(bar
->size
> 0);
3277 tl_assert(bar
->waiting
);
3279 VG_(addToXA
)( bar
->waiting
, &thr
);
3281 /* guaranteed by this function */
3282 present
= VG_(sizeXA
)(bar
->waiting
);
3283 tl_assert(present
> 0 && present
<= bar
->size
);
3285 if (present
< bar
->size
)
3288 do_barrier_cross_sync_and_empty(bar
);
3292 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid
,
3300 if (SHOW_EVENTS
>= 1)
3301 VG_(printf
)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3302 "(tid=%d, barrier=%p, newcount=%lu)\n",
3303 (Int
)tid
, (void*)barrier
, newcount
);
3305 thr
= map_threads_maybe_lookup( tid
);
3306 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3308 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3311 if (!bar
->initted
) {
3312 HG_(record_error_Misc
)(
3313 thr
, "pthread_barrier_resize: barrier is uninitialised"
3315 return; /* client is broken .. avoid assertions below */
3318 if (!bar
->resizable
) {
3319 HG_(record_error_Misc
)(
3320 thr
, "pthread_barrier_resize: barrier is may not be resized"
3322 return; /* client is broken .. avoid assertions below */
3325 if (newcount
== 0) {
3326 HG_(record_error_Misc
)(
3327 thr
, "pthread_barrier_resize: 'newcount' argument is zero"
3329 return; /* client is broken .. avoid assertions below */
3332 /* guaranteed by _INIT_PRE above */
3333 tl_assert(bar
->size
> 0);
3334 tl_assert(bar
->waiting
);
3335 /* Guaranteed by this fn */
3336 tl_assert(newcount
> 0);
3338 if (newcount
>= bar
->size
) {
3339 /* Increasing the capacity. There's no possibility of threads
3340 moving on from the barrier in this situation, so just note
3341 the fact and do nothing more. */
3342 bar
->size
= newcount
;
3344 /* Decreasing the capacity. If we decrease it to be equal or
3345 below the number of waiting threads, they will now move past
3346 the barrier, so need to mess with dep edges in the same way
3347 as if the barrier had filled up normally. */
3348 present
= VG_(sizeXA
)(bar
->waiting
);
3349 tl_assert(present
<= bar
->size
);
3350 if (newcount
<= present
) {
3351 bar
->size
= present
; /* keep the cross_sync call happy */
3352 do_barrier_cross_sync_and_empty(bar
);
3354 bar
->size
= newcount
;
3359 /* ----------------------------------------------------- */
3360 /* ----- events to do with user-specified HB edges ----- */
3361 /* ----------------------------------------------------- */
3363 /* A mapping from arbitrary UWord tag to the SO associated with it.
3364 The UWord tags are meaningless to us, interpreted only by the
3370 static WordFM
* map_usertag_to_SO
= NULL
;
3372 static void map_usertag_to_SO_INIT ( void ) {
3373 if (UNLIKELY(map_usertag_to_SO
== NULL
)) {
3374 map_usertag_to_SO
= VG_(newFM
)( HG_(zalloc
),
3375 "hg.mutS.1", HG_(free
), NULL
);
3379 static SO
* map_usertag_to_SO_lookup_or_alloc ( UWord usertag
) {
3381 map_usertag_to_SO_INIT();
3382 if (VG_(lookupFM
)( map_usertag_to_SO
, &key
, &val
, usertag
)) {
3383 tl_assert(key
== (UWord
)usertag
);
3386 SO
* so
= libhb_so_alloc();
3387 VG_(addToFM
)( map_usertag_to_SO
, usertag
, (UWord
)so
);
3392 static void map_usertag_to_SO_delete ( UWord usertag
) {
3394 map_usertag_to_SO_INIT();
3395 if (VG_(delFromFM
)( map_usertag_to_SO
, &keyW
, &valW
, usertag
)) {
3397 tl_assert(keyW
== usertag
);
3399 libhb_so_dealloc(so
);
3405 void evh__HG_USERSO_SEND_PRE ( ThreadId tid
, UWord usertag
)
3407 /* TID is just about to notionally sent a message on a notional
3408 abstract synchronisation object whose identity is given by
3409 USERTAG. Bind USERTAG to a real SO if it is not already so
3410 bound, and do a 'weak send' on the SO. This joins the vector
3411 clocks from this thread into any vector clocks already present
3412 in the SO. The resulting SO vector clocks are later used by
3413 other thread(s) which successfully 'receive' from the SO,
3414 thereby acquiring a dependency on all the events that have
3415 previously signalled on this SO. */
3419 if (SHOW_EVENTS
>= 1)
3420 VG_(printf
)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3421 (Int
)tid
, usertag
);
3423 thr
= map_threads_maybe_lookup( tid
);
3424 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3426 so
= map_usertag_to_SO_lookup_or_alloc( usertag
);
3429 libhb_so_send( thr
->hbthr
, so
, False
/*!strong_send*/ );
3433 void evh__HG_USERSO_RECV_POST ( ThreadId tid
, UWord usertag
)
3435 /* TID has just notionally received a message from a notional
3436 abstract synchronisation object whose identity is given by
3437 USERTAG. Bind USERTAG to a real SO if it is not already so
3438 bound. If the SO has at some point in the past been 'sent' on,
3439 to a 'strong receive' on it, thereby acquiring a dependency on
3444 if (SHOW_EVENTS
>= 1)
3445 VG_(printf
)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3446 (Int
)tid
, usertag
);
3448 thr
= map_threads_maybe_lookup( tid
);
3449 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3451 so
= map_usertag_to_SO_lookup_or_alloc( usertag
);
3454 /* Acquire a dependency on it. If the SO has never so far been
3455 sent on, then libhb_so_recv will do nothing. So we're safe
3456 regardless of SO's history. */
3457 libhb_so_recv( thr
->hbthr
, so
, True
/*strong_recv*/ );
3461 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid
, UWord usertag
)
3463 /* TID declares that any happens-before edges notionally stored in
3464 USERTAG can be deleted. If (as would normally be the case) a
3465 SO is associated with USERTAG, then the association is removed
3466 and all resources associated with SO are freed. Importantly,
3467 that frees up any VTSs stored in SO. */
3468 if (SHOW_EVENTS
>= 1)
3469 VG_(printf
)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3470 (Int
)tid
, usertag
);
3472 map_usertag_to_SO_delete( usertag
);
3476 #if defined(VGO_solaris)
3477 /* ----------------------------------------------------- */
3478 /* --- events to do with bind guard/clear intercepts --- */
3479 /* ----------------------------------------------------- */
3482 void evh__HG_RTLD_BIND_GUARD(ThreadId tid
, Int flags
)
3484 if (SHOW_EVENTS
>= 1)
3485 VG_(printf
)("evh__HG_RTLD_BIND_GUARD"
3486 "(tid=%d, flags=%d)\n",
3489 Thread
*thr
= map_threads_maybe_lookup(tid
);
3490 tl_assert(thr
!= NULL
);
3492 Int bindflag
= (flags
& VKI_THR_FLG_RTLD
);
3493 if ((bindflag
& thr
->bind_guard_flag
) == 0) {
3494 thr
->bind_guard_flag
|= bindflag
;
3495 HG_(thread_enter_synchr
)(thr
);
3496 /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3497 HG_(thread_enter_pthread_create
)(thr
);
3502 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid
, Int flags
)
3504 if (SHOW_EVENTS
>= 1)
3505 VG_(printf
)("evh__HG_RTLD_BIND_CLEAR"
3506 "(tid=%d, flags=%d)\n",
3509 Thread
*thr
= map_threads_maybe_lookup(tid
);
3510 tl_assert(thr
!= NULL
);
3512 Int bindflag
= (flags
& VKI_THR_FLG_RTLD
);
3513 if ((thr
->bind_guard_flag
& bindflag
) != 0) {
3514 thr
->bind_guard_flag
&= ~bindflag
;
3515 HG_(thread_leave_synchr
)(thr
);
3516 HG_(thread_leave_pthread_create
)(thr
);
3519 #endif /* VGO_solaris */
3522 /*--------------------------------------------------------------*/
3523 /*--- Lock acquisition order monitoring ---*/
3524 /*--------------------------------------------------------------*/
3526 /* FIXME: here are some optimisations still to do in
3527 laog__pre_thread_acquires_lock.
3529 The graph is structured so that if L1 --*--> L2 then L1 must be
3532 The common case is that some thread T holds (eg) L1 L2 and L3 and
3533 is repeatedly acquiring and releasing Ln, and there is no ordering
3534 error in what it is doing. Hence it repeatedly:
3536 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3537 produces the answer No (because there is no error).
3539 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3540 (because they already got added the first time T acquired Ln).
3542 Hence cache these two events:
3544 (1) Cache result of the query from last time. Invalidate the cache
3545 any time any edges are added to or deleted from laog.
3547 (2) Cache these add-edge requests and ignore them if said edges
3548 have already been added to laog. Invalidate the cache any time
3549 any edges are deleted from laog.
3554 WordSetID inns
; /* in univ_laog */
3555 WordSetID outs
; /* in univ_laog */
3559 /* lock order acquisition graph */
3560 static WordFM
* laog
= NULL
; /* WordFM Lock* LAOGLinks* */
3562 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3563 where that edge was created, so that we can show the user later if
3567 Addr src_ga
; /* Lock guest addresses for */
3568 Addr dst_ga
; /* src/dst of the edge */
3569 ExeContext
* src_ec
; /* And corresponding places where that */
3570 ExeContext
* dst_ec
; /* ordering was established */
3574 static Word
cmp_LAOGLinkExposition ( UWord llx1W
, UWord llx2W
) {
3575 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3576 LAOGLinkExposition
* llx1
= (LAOGLinkExposition
*)llx1W
;
3577 LAOGLinkExposition
* llx2
= (LAOGLinkExposition
*)llx2W
;
3578 if (llx1
->src_ga
< llx2
->src_ga
) return -1;
3579 if (llx1
->src_ga
> llx2
->src_ga
) return 1;
3580 if (llx1
->dst_ga
< llx2
->dst_ga
) return -1;
3581 if (llx1
->dst_ga
> llx2
->dst_ga
) return 1;
3585 static WordFM
* laog_exposition
= NULL
; /* WordFM LAOGLinkExposition* NULL */
3586 /* end EXPOSITION ONLY */
3589 __attribute__((noinline
))
3590 static void laog__init ( void )
3593 tl_assert(!laog_exposition
);
3594 tl_assert(HG_(clo_track_lockorders
));
3596 laog
= VG_(newFM
)( HG_(zalloc
), "hg.laog__init.1",
3597 HG_(free
), NULL
/*unboxedcmp*/ );
3599 laog_exposition
= VG_(newFM
)( HG_(zalloc
), "hg.laog__init.2", HG_(free
),
3600 cmp_LAOGLinkExposition
);
3603 static void laog__show ( const HChar
* who
) {
3608 VG_(printf
)("laog (requested by %s) {\n", who
);
3609 VG_(initIterFM
)( laog
);
3612 while (VG_(nextIterFM
)( laog
, (UWord
*)&me
,
3616 VG_(printf
)(" node %p:\n", me
);
3617 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->inns
);
3618 for (i
= 0; i
< ws_size
; i
++)
3619 VG_(printf
)(" inn %#lx\n", ws_words
[i
] );
3620 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->outs
);
3621 for (i
= 0; i
< ws_size
; i
++)
3622 VG_(printf
)(" out %#lx\n", ws_words
[i
] );
3626 VG_(doneIterFM
)( laog
);
3630 static void univ_laog_do_GC ( void ) {
3634 Int prev_next_gc_univ_laog
= next_gc_univ_laog
;
3635 const UWord univ_laog_cardinality
= HG_(cardinalityWSU
)( univ_laog
);
3637 Bool
*univ_laog_seen
= HG_(zalloc
) ( "hg.gc_univ_laog.1",
3638 (Int
) univ_laog_cardinality
3640 // univ_laog_seen[*] set to 0 (False) by zalloc.
3642 VG_(initIterFM
)( laog
);
3644 while (VG_(nextIterFM
)( laog
, NULL
, (UWord
*)&links
)) {
3646 tl_assert(links
->inns
< univ_laog_cardinality
);
3647 univ_laog_seen
[links
->inns
] = True
;
3648 tl_assert(links
->outs
< univ_laog_cardinality
);
3649 univ_laog_seen
[links
->outs
] = True
;
3652 VG_(doneIterFM
)( laog
);
3654 for (i
= 0; i
< (Int
)univ_laog_cardinality
; i
++) {
3655 if (univ_laog_seen
[i
])
3658 HG_(dieWS
) ( univ_laog
, (WordSet
)i
);
3661 HG_(free
) (univ_laog_seen
);
3663 // We need to decide the value of the next_gc.
3664 // 3 solutions were looked at:
3665 // Sol 1: garbage collect at seen * 2
3666 // This solution was a lot slower, probably because we both do a lot of
3667 // garbage collection and do not keep long enough laog WV that will become
3668 // useful again very soon.
3669 // Sol 2: garbage collect at a percentage increase of the current cardinality
3670 // (with a min increase of 1)
3671 // Trials on a small test program with 1%, 5% and 10% increase was done.
3672 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3673 // However, on a big application, this caused the memory to be exhausted,
3674 // as even a 1% increase of size at each gc becomes a lot, when many gc
3676 // Sol 3: always garbage collect at current cardinality + 1.
3677 // This solution was the fastest of the 3 solutions, and caused no memory
3678 // exhaustion in the big application.
3680 // With regards to cost introduced by gc: on the t2t perf test (doing only
3681 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3682 // version with garbage collection. With t2t 50 20 2, my machine started
3683 // to page out, and so the garbage collected version was much faster.
3684 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3685 // difference performance is insignificant (~ 0.1 s).
3686 // Of course, it might be that real life programs are not well represented
3689 // If ever we want to have a more sophisticated control
3690 // (e.g. clo options to control the percentage increase or fixed increased),
3691 // we should do it here, eg.
3692 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3693 // Currently, we just hard-code the solution 3 above.
3694 next_gc_univ_laog
= prev_next_gc_univ_laog
+ 1;
3699 "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3700 (Int
)univ_laog_cardinality
, (Int
)seen
, next_gc_univ_laog
);
3704 __attribute__((noinline
))
3705 static void laog__add_edge ( Lock
* src
, Lock
* dst
) {
3708 Bool presentF
, presentR
;
3709 if (0) VG_(printf
)("laog__add_edge %p %p\n", src
, dst
);
3711 /* Take the opportunity to sanity check the graph. Record in
3712 presentF if there is already a src->dst mapping in this node's
3713 forwards links, and presentR if there is already a src->dst
3714 mapping in this node's backwards links. They should agree!
3715 Also, we need to know whether the edge was already present so as
3716 to decide whether or not to update the link details mapping. We
3717 can compute presentF and presentR essentially for free, so may
3718 as well do this always. */
3719 presentF
= presentR
= False
;
3721 /* Update the out edges for src */
3724 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)src
)) {
3727 tl_assert(keyW
== (UWord
)src
);
3728 outs_new
= HG_(addToWS
)( univ_laog
, links
->outs
, (UWord
)dst
);
3729 presentF
= outs_new
== links
->outs
;
3730 links
->outs
= outs_new
;
3732 links
= HG_(zalloc
)("hg.lae.1", sizeof(LAOGLinks
));
3733 links
->inns
= HG_(emptyWS
)( univ_laog
);
3734 links
->outs
= HG_(singletonWS
)( univ_laog
, (UWord
)dst
);
3735 VG_(addToFM
)( laog
, (UWord
)src
, (UWord
)links
);
3737 /* Update the in edges for dst */
3740 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)dst
)) {
3743 tl_assert(keyW
== (UWord
)dst
);
3744 inns_new
= HG_(addToWS
)( univ_laog
, links
->inns
, (UWord
)src
);
3745 presentR
= inns_new
== links
->inns
;
3746 links
->inns
= inns_new
;
3748 links
= HG_(zalloc
)("hg.lae.2", sizeof(LAOGLinks
));
3749 links
->inns
= HG_(singletonWS
)( univ_laog
, (UWord
)src
);
3750 links
->outs
= HG_(emptyWS
)( univ_laog
);
3751 VG_(addToFM
)( laog
, (UWord
)dst
, (UWord
)links
);
3754 tl_assert( (presentF
&& presentR
) || (!presentF
&& !presentR
) );
3756 if (!presentF
&& src
->acquired_at
&& dst
->acquired_at
) {
3757 LAOGLinkExposition expo
;
3758 /* If this edge is entering the graph, and we have acquired_at
3759 information for both src and dst, record those acquisition
3760 points. Hence, if there is later a violation of this
3761 ordering, we can show the user the two places in which the
3762 required src-dst ordering was previously established. */
3763 if (0) VG_(printf
)("acquire edge %#lx %#lx\n",
3764 src
->guestaddr
, dst
->guestaddr
);
3765 expo
.src_ga
= src
->guestaddr
;
3766 expo
.dst_ga
= dst
->guestaddr
;
3769 tl_assert(laog_exposition
);
3770 if (VG_(lookupFM
)( laog_exposition
, NULL
, NULL
, (UWord
)&expo
)) {
3771 /* we already have it; do nothing */
3773 LAOGLinkExposition
* expo2
= HG_(zalloc
)("hg.lae.3",
3774 sizeof(LAOGLinkExposition
));
3775 expo2
->src_ga
= src
->guestaddr
;
3776 expo2
->dst_ga
= dst
->guestaddr
;
3777 expo2
->src_ec
= src
->acquired_at
;
3778 expo2
->dst_ec
= dst
->acquired_at
;
3779 VG_(addToFM
)( laog_exposition
, (UWord
)expo2
, (UWord
)NULL
);
3783 if (HG_(cardinalityWSU
) (univ_laog
) >= next_gc_univ_laog
)
3787 __attribute__((noinline
))
3788 static void laog__del_edge ( Lock
* src
, Lock
* dst
) {
3791 if (0) VG_(printf
)("laog__del_edge enter %p %p\n", src
, dst
);
3792 /* Update the out edges for src */
3795 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)src
)) {
3797 tl_assert(keyW
== (UWord
)src
);
3798 links
->outs
= HG_(delFromWS
)( univ_laog
, links
->outs
, (UWord
)dst
);
3800 /* Update the in edges for dst */
3803 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)dst
)) {
3805 tl_assert(keyW
== (UWord
)dst
);
3806 links
->inns
= HG_(delFromWS
)( univ_laog
, links
->inns
, (UWord
)src
);
3809 /* Remove the exposition of src,dst (if present) */
3811 LAOGLinkExposition
*fm_expo
;
3813 LAOGLinkExposition expo
;
3814 expo
.src_ga
= src
->guestaddr
;
3815 expo
.dst_ga
= dst
->guestaddr
;
3819 if (VG_(delFromFM
) (laog_exposition
,
3820 (UWord
*)&fm_expo
, NULL
, (UWord
)&expo
)) {
3821 HG_(free
) (fm_expo
);
3825 /* deleting edges can increase nr of of WS so check for gc. */
3826 if (HG_(cardinalityWSU
) (univ_laog
) >= next_gc_univ_laog
)
3828 if (0) VG_(printf
)("laog__del_edge exit\n");
3831 __attribute__((noinline
))
3832 static WordSetID
/* in univ_laog */ laog__succs ( Lock
* lk
) {
3837 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)lk
)) {
3839 tl_assert(keyW
== (UWord
)lk
);
3842 return HG_(emptyWS
)( univ_laog
);
3846 __attribute__((noinline
))
3847 static WordSetID
/* in univ_laog */ laog__preds ( Lock
* lk
) {
3852 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)lk
)) {
3854 tl_assert(keyW
== (UWord
)lk
);
3857 return HG_(emptyWS
)( univ_laog
);
3861 __attribute__((noinline
))
3862 static void laog__sanity_check ( const HChar
* who
) {
3867 VG_(initIterFM
)( laog
);
3870 if (0) VG_(printf
)("laog sanity check\n");
3871 while (VG_(nextIterFM
)( laog
, (UWord
*)&me
,
3875 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->inns
);
3876 for (i
= 0; i
< ws_size
; i
++) {
3877 if ( ! HG_(elemWS
)( univ_laog
,
3878 laog__succs( (Lock
*)ws_words
[i
] ),
3882 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->outs
);
3883 for (i
= 0; i
< ws_size
; i
++) {
3884 if ( ! HG_(elemWS
)( univ_laog
,
3885 laog__preds( (Lock
*)ws_words
[i
] ),
3892 VG_(doneIterFM
)( laog
);
3896 VG_(printf
)("laog__sanity_check(%s) FAILED\n", who
);
3901 /* If there is a path in laog from 'src' to any of the elements in
3902 'dst', return an arbitrarily chosen element of 'dst' reachable from
3903 'src'. If no path exist from 'src' to any element in 'dst', return
3905 __attribute__((noinline
))
3907 Lock
* laog__do_dfs_from_to ( Lock
* src
, WordSetID dsts
/* univ_lsets */ )
3911 XArray
* stack
; /* of Lock* */
3912 WordFM
* visited
; /* Lock* -> void, iow, Set(Lock*) */
3915 UWord succs_size
, i
;
3917 //laog__sanity_check();
3919 /* If the destination set is empty, we can never get there from
3920 'src' :-), so don't bother to try */
3921 if (HG_(isEmptyWS
)( univ_lsets
, dsts
))
3925 stack
= VG_(newXA
)( HG_(zalloc
), "hg.lddft.1", HG_(free
), sizeof(Lock
*) );
3926 visited
= VG_(newFM
)( HG_(zalloc
), "hg.lddft.2", HG_(free
), NULL
/*unboxedcmp*/ );
3928 (void) VG_(addToXA
)( stack
, &src
);
3932 ssz
= VG_(sizeXA
)( stack
);
3934 if (ssz
== 0) { ret
= NULL
; break; }
3936 here
= *(Lock
**) VG_(indexXA
)( stack
, ssz
-1 );
3937 VG_(dropTailXA
)( stack
, 1 );
3939 if (HG_(elemWS
)( univ_lsets
, dsts
, (UWord
)here
)) { ret
= here
; break; }
3941 if (VG_(lookupFM
)( visited
, NULL
, NULL
, (UWord
)here
))
3944 VG_(addToFM
)( visited
, (UWord
)here
, 0 );
3946 succs
= laog__succs( here
);
3947 HG_(getPayloadWS
)( &succs_words
, &succs_size
, univ_laog
, succs
);
3948 for (i
= 0; i
< succs_size
; i
++)
3949 (void) VG_(addToXA
)( stack
, &succs_words
[i
] );
3952 VG_(deleteFM
)( visited
, NULL
, NULL
);
3953 VG_(deleteXA
)( stack
);
3958 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3959 between 'lk' and the locks already held by 'thr' and issue a
3960 complaint if so. Also, update the ordering graph appropriately.
3962 __attribute__((noinline
))
3963 static void laog__pre_thread_acquires_lock (
3964 Thread
* thr
, /* NB: BEFORE lock is added */
3972 /* It may be that 'thr' already holds 'lk' and is recursively
3973 relocking in. In this case we just ignore the call. */
3974 /* NB: univ_lsets really is correct here */
3975 if (HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
))
3978 /* First, the check. Complain if there is any path in laog from lk
3979 to any of the locks already held by thr, since if any such path
3980 existed, it would mean that previously lk was acquired before
3981 (rather than after, as we are doing here) at least one of those
3984 other
= laog__do_dfs_from_to(lk
, thr
->locksetA
);
3986 LAOGLinkExposition key
, *found
;
3987 /* So we managed to find a path lk --*--> other in the graph,
3988 which implies that 'lk' should have been acquired before
3989 'other' but is in fact being acquired afterwards. We present
3990 the lk/other arguments to record_error_LockOrder in the order
3991 in which they should have been acquired. */
3992 /* Go look in the laog_exposition mapping, to find the allocation
3993 points for this edge, so we can show the user. */
3994 key
.src_ga
= lk
->guestaddr
;
3995 key
.dst_ga
= other
->guestaddr
;
3999 if (VG_(lookupFM
)( laog_exposition
,
4000 (UWord
*)&found
, NULL
, (UWord
)&key
)) {
4001 tl_assert(found
!= &key
);
4002 tl_assert(found
->src_ga
== key
.src_ga
);
4003 tl_assert(found
->dst_ga
== key
.dst_ga
);
4004 tl_assert(found
->src_ec
);
4005 tl_assert(found
->dst_ec
);
4006 HG_(record_error_LockOrder
)(
4008 found
->src_ec
, found
->dst_ec
, other
->acquired_at
);
4010 /* Hmm. This can't happen (can it?) */
4011 /* Yes, it can happen: see tests/tc14_laog_dinphils.
4012 Imagine we have 3 philosophers A B C, and the forks
4021 Let's have the following actions:
4029 Helgrind will report a lock order error when C takes fCA.
4030 Effectively, we have a deadlock if the following
4036 The error reported is:
4037 Observed (incorrect) order fBC followed by fCA
4038 but the stack traces that have established the required order
4041 This is because there is no pair (fCA, fBC) in laog exposition :
4042 the laog_exposition records all pairs of locks between a new lock
4043 taken by a thread and all the already taken locks.
4044 So, there is no laog_exposition (fCA, fBC) as no thread ever
4045 first locked fCA followed by fBC.
4047 In other words, when the deadlock cycle involves more than
4048 two locks, then helgrind does not report the sequence of
4049 operations that created the cycle.
4051 However, we can report the current stack trace (where
4052 lk is being taken), and the stack trace where other was acquired:
4053 Effectively, the variable 'other' contains a lock currently
4054 held by this thread, with its 'acquired_at'. */
4056 HG_(record_error_LockOrder
)(
4058 NULL
, NULL
, other
->acquired_at
);
4062 /* Second, add to laog the pairs
4063 (old, lk) | old <- locks already held by thr
4064 Since both old and lk are currently held by thr, their acquired_at
4065 fields must be non-NULL.
4067 tl_assert(lk
->acquired_at
);
4068 HG_(getPayloadWS
)( &ls_words
, &ls_size
, univ_lsets
, thr
->locksetA
);
4069 for (i
= 0; i
< ls_size
; i
++) {
4070 Lock
* old
= (Lock
*)ls_words
[i
];
4071 tl_assert(old
->acquired_at
);
4072 laog__add_edge( old
, lk
);
4075 /* Why "except_Locks" ? We're here because a lock is being
4076 acquired by a thread, and we're in an inconsistent state here.
4077 See the call points in evhH__post_thread_{r,w}_acquires_lock.
4078 When called in this inconsistent state, locks__sanity_check duly
4080 if (HG_(clo_sanity_flags
) & SCE_LAOG
)
4081 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4084 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
4085 static UWord
* UWordV_dup(UWord
* words
, Word words_size
)
4089 if (words_size
== 0)
4092 UWord
*dup
= HG_(zalloc
) ("hg.dup.1", (SizeT
) words_size
* sizeof(UWord
));
4094 for (i
= 0; i
< words_size
; i
++)
4100 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4102 __attribute__((noinline
))
4103 static void laog__handle_one_lock_deletion ( Lock
* lk
)
4105 WordSetID preds
, succs
;
4106 UWord preds_size
, succs_size
, i
, j
;
4107 UWord
*preds_words
, *succs_words
;
4109 preds
= laog__preds( lk
);
4110 succs
= laog__succs( lk
);
4112 // We need to duplicate the payload, as these can be garbage collected
4113 // during the del/add operations below.
4114 HG_(getPayloadWS
)( &preds_words
, &preds_size
, univ_laog
, preds
);
4115 preds_words
= UWordV_dup(preds_words
, preds_size
);
4117 HG_(getPayloadWS
)( &succs_words
, &succs_size
, univ_laog
, succs
);
4118 succs_words
= UWordV_dup(succs_words
, succs_size
);
4120 for (i
= 0; i
< preds_size
; i
++)
4121 laog__del_edge( (Lock
*)preds_words
[i
], lk
);
4123 for (j
= 0; j
< succs_size
; j
++)
4124 laog__del_edge( lk
, (Lock
*)succs_words
[j
] );
4126 for (i
= 0; i
< preds_size
; i
++) {
4127 for (j
= 0; j
< succs_size
; j
++) {
4128 if (preds_words
[i
] != succs_words
[j
]) {
4129 /* This can pass unlocked locks to laog__add_edge, since
4130 we're deleting stuff. So their acquired_at fields may
4132 laog__add_edge( (Lock
*)preds_words
[i
], (Lock
*)succs_words
[j
] );
4138 HG_(free
) (preds_words
);
4140 HG_(free
) (succs_words
);
4142 // Remove lk information from laog links FM
4147 if (VG_(delFromFM
) (laog
,
4148 (UWord
*)&linked_lk
, (UWord
*)&links
, (UWord
)lk
)) {
4149 tl_assert (linked_lk
== lk
);
4153 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4156 //__attribute__((noinline))
4157 //static void laog__handle_lock_deletions (
4158 // WordSetID /* in univ_laog */ locksToDelete
4165 // HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4166 // UWordV_dup call needed here ...
4167 // for (i = 0; i < ws_size; i++)
4168 // laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4170 // if (HG_(clo_sanity_flags) & SCE_LAOG)
4171 // all__sanity_check("laog__handle_lock_deletions-post");
4175 /*--------------------------------------------------------------*/
4176 /*--- Malloc/free replacements ---*/
4177 /*--------------------------------------------------------------*/
4181 void* next
; /* required by m_hashtable */
4182 Addr payload
; /* ptr to actual block */
4183 SizeT szB
; /* size requested */
4184 ExeContext
* where
; /* where it was allocated */
4185 Thread
* thr
; /* allocating thread */
4189 /* A hash table of MallocMetas, used to track malloc'd blocks
4191 static VgHashTable
*hg_mallocmeta_table
= NULL
;
4193 /* MallocMeta are small elements. We use a pool to avoid
4194 the overhead of malloc for each MallocMeta. */
4195 static PoolAlloc
*MallocMeta_poolalloc
= NULL
;
4197 static MallocMeta
* new_MallocMeta ( void ) {
4198 MallocMeta
* md
= VG_(allocEltPA
) (MallocMeta_poolalloc
);
4199 VG_(memset
)(md
, 0, sizeof(MallocMeta
));
4202 static void delete_MallocMeta ( MallocMeta
* md
) {
4203 VG_(freeEltPA
)(MallocMeta_poolalloc
, md
);
4207 /* Allocate a client block and set up the metadata for it. */
4210 void* handle_alloc ( ThreadId tid
,
4211 SizeT szB
, SizeT alignB
, Bool is_zeroed
)
4216 tl_assert( ((SSizeT
)szB
) >= 0 );
4217 p
= (Addr
)VG_(cli_malloc
)(alignB
, szB
);
4222 VG_(memset
)((void*)p
, 0, szB
);
4224 /* Note that map_threads_lookup must succeed (cannot assert), since
4225 memory can only be allocated by currently alive threads, hence
4226 they must have an entry in map_threads. */
4227 md
= new_MallocMeta();
4230 md
->where
= VG_(record_ExeContext
)( tid
, 0 );
4231 md
->thr
= map_threads_lookup( tid
);
4233 VG_(HT_add_node
)( hg_mallocmeta_table
, (VgHashNode
*)md
);
4234 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
4235 VG_(XTMemory_Full_alloc
)(md
->szB
, md
->where
);
4237 /* Tell the lower level memory wranglers. */
4238 evh__new_mem_heap( p
, szB
, is_zeroed
);
4243 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4244 Cast to a signed type to catch any unexpectedly negative args.
4245 We're assuming here that the size asked for is not greater than
4246 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4248 static void* hg_cli__malloc ( ThreadId tid
, SizeT n
) {
4249 if (((SSizeT
)n
) < 0) return NULL
;
4250 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4251 /*is_zeroed*/False
);
4253 static void* hg_cli____builtin_new ( ThreadId tid
, SizeT n
) {
4254 if (((SSizeT
)n
) < 0) return NULL
;
4255 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4256 /*is_zeroed*/False
);
4258 static void* hg_cli____builtin_new_aligned ( ThreadId tid
, SizeT n
, SizeT align
, SizeT orig_align
) {
4259 if (((SSizeT
)n
) < 0) return NULL
;
4260 return handle_alloc ( tid
, n
, align
,
4261 /*is_zeroed*/False
);
4263 static void* hg_cli____builtin_vec_new ( ThreadId tid
, SizeT n
) {
4264 if (((SSizeT
)n
) < 0) return NULL
;
4265 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4266 /*is_zeroed*/False
);
4268 static void* hg_cli____builtin_vec_new_aligned ( ThreadId tid
, SizeT n
, SizeT align
, SizeT orig_align
) {
4269 if (((SSizeT
)n
) < 0) return NULL
;
4270 return handle_alloc ( tid
, n
, align
,
4271 /*is_zeroed*/False
);
4273 static void* hg_cli__memalign ( ThreadId tid
, SizeT align
, SizeT orig_alignT
, SizeT n
) {
4274 if (((SSizeT
)n
) < 0) return NULL
;
4275 return handle_alloc ( tid
, n
, align
,
4276 /*is_zeroed*/False
);
4278 static void* hg_cli__calloc ( ThreadId tid
, SizeT nmemb
, SizeT size1
) {
4279 if ( ((SSizeT
)nmemb
) < 0 || ((SSizeT
)size1
) < 0 ) return NULL
;
4280 return handle_alloc ( tid
, nmemb
*size1
, VG_(clo_alignment
),
4281 /*is_zeroed*/True
);
4285 /* Free a client block, including getting rid of the relevant
4288 static void handle_free ( ThreadId tid
, void* p
)
4290 MallocMeta
*md
, *old_md
;
4293 /* First see if we can find the metadata for 'p'. */
4294 md
= (MallocMeta
*) VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)p
);
4296 return; /* apparently freeing a bogus address. Oh well. */
4298 tl_assert(md
->payload
== (Addr
)p
);
4300 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
)) {
4301 ExeContext
* ec_free
= VG_(record_ExeContext
)( tid
, 0 );
4302 VG_(XTMemory_Full_free
)(md
->szB
, md
->where
, ec_free
);
4305 /* Nuke the metadata block */
4306 old_md
= (MallocMeta
*)
4307 VG_(HT_remove
)( hg_mallocmeta_table
, (UWord
)p
);
4308 tl_assert(old_md
); /* it must be present - we just found it */
4309 tl_assert(old_md
== md
);
4310 tl_assert(old_md
->payload
== (Addr
)p
);
4312 VG_(cli_free
)((void*)old_md
->payload
);
4313 delete_MallocMeta(old_md
);
4315 /* Tell the lower level memory wranglers. */
4316 evh__die_mem_heap( (Addr
)p
, szB
);
4319 static void hg_cli__free ( ThreadId tid
, void* p
) {
4320 handle_free(tid
, p
);
4322 static void hg_cli____builtin_delete ( ThreadId tid
, void* p
) {
4323 handle_free(tid
, p
);
4325 static void hg_cli____builtin_delete_aligned ( ThreadId tid
, void* p
, SizeT align
) {
4326 handle_free(tid
, p
);
4328 static void hg_cli____builtin_vec_delete ( ThreadId tid
, void* p
) {
4329 handle_free(tid
, p
);
4331 static void hg_cli____builtin_vec_delete_aligned ( ThreadId tid
, void* p
, SizeT align
) {
4332 handle_free(tid
, p
);
4335 static void* hg_cli__realloc ( ThreadId tid
, void* payloadV
, SizeT new_size
)
4337 MallocMeta
*md
, *md_new
, *md_tmp
;
4340 Addr payload
= (Addr
)payloadV
;
4342 if (((SSizeT
)new_size
) < 0) return NULL
;
4344 if (payloadV
== NULL
) {
4345 return handle_alloc ( tid
, new_size
, VG_(clo_alignment
),
4346 /*is_zeroed*/False
);
4349 md
= (MallocMeta
*) VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)payload
);
4351 return NULL
; /* apparently realloc-ing a bogus address. Oh well. */
4353 tl_assert(md
->payload
== payload
);
4355 if (new_size
== 0U ) {
4356 if (VG_(clo_realloc_zero_bytes_frees
) == True
) {
4357 md_tmp
= VG_(HT_remove
)( hg_mallocmeta_table
, payload
);
4359 tl_assert(md_tmp
== md
);
4361 VG_(cli_free
)((void*)md
->payload
);
4362 delete_MallocMeta(md
);
4369 if (md
->szB
== new_size
) {
4370 /* size unchanged */
4371 md
->where
= VG_(record_ExeContext
)(tid
, 0);
4375 if (md
->szB
> new_size
) {
4376 /* new size is smaller */
4378 md
->where
= VG_(record_ExeContext
)(tid
, 0);
4379 evh__die_mem_heap( md
->payload
+ new_size
, md
->szB
- new_size
);
4384 /* new size is bigger */
4385 Addr p_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_size
);
4387 // Nb: if realloc fails, NULL is returned but the old block is not
4388 // touched. What an awful function.
4392 /* First half kept and copied, second half new */
4393 // FIXME: shouldn't we use a copier which implements the
4394 // memory state machine?
4395 evh__copy_mem( payload
, p_new
, md
->szB
);
4396 evh__new_mem_heap ( p_new
+ md
->szB
, new_size
- md
->szB
,
4398 /* FIXME: can anything funny happen here? specifically, if the
4399 old range contained a lock, then die_mem_heap will complain.
4400 Is that the correct behaviour? Not sure. */
4401 evh__die_mem_heap( payload
, md
->szB
);
4403 /* Copy from old to new */
4404 for (i
= 0; i
< md
->szB
; i
++)
4405 ((UChar
*)p_new
)[i
] = ((UChar
*)payload
)[i
];
4407 /* Because the metadata hash table is index by payload address,
4408 we have to get rid of the old hash table entry and make a new
4409 one. We can't just modify the existing metadata in place,
4410 because then it would (almost certainly) be in the wrong hash
4412 md_new
= new_MallocMeta();
4415 md_tmp
= VG_(HT_remove
)( hg_mallocmeta_table
, payload
);
4417 tl_assert(md_tmp
== md
);
4419 VG_(cli_free
)((void*)md
->payload
);
4420 delete_MallocMeta(md
);
4423 md_new
->where
= VG_(record_ExeContext
)( tid
, 0 );
4424 md_new
->szB
= new_size
;
4425 md_new
->payload
= p_new
;
4426 md_new
->thr
= map_threads_lookup( tid
);
4429 VG_(HT_add_node
)( hg_mallocmeta_table
, (VgHashNode
*)md_new
);
4431 return (void*)p_new
;
4435 static SizeT
hg_cli_malloc_usable_size ( ThreadId tid
, void* p
)
4437 MallocMeta
*md
= VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)p
);
4439 // There may be slop, but pretend there isn't because only the asked-for
4440 // area will have been shadowed properly.
4441 return ( md
? md
->szB
: 0 );
4445 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4446 Slow linear search. With a bit of hash table help if 'data_addr'
4447 is either the start of a block or up to 15 word-sized steps along
4448 from the start of a block. */
4450 static inline Bool
addr_is_in_MM_Chunk( MallocMeta
* mm
, Addr a
)
4452 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4454 if (UNLIKELY(mm
->szB
== 0 && a
== mm
->payload
))
4456 /* else normal interval rules apply */
4457 if (LIKELY(a
< mm
->payload
)) return False
;
4458 if (LIKELY(a
>= mm
->payload
+ mm
->szB
)) return False
;
4462 Bool
HG_(mm_find_containing_block
)( /*OUT*/ExeContext
** where
,
4464 /*OUT*/Addr
* payload
,
4470 const Int n_fast_check_words
= 16;
4472 /* Before searching the list of allocated blocks in hg_mallocmeta_table,
4473 first verify that data_addr is in a heap client segment. */
4474 const NSegment
*s
= VG_(am_find_nsegment
) (data_addr
);
4475 if (s
== NULL
|| !s
->isCH
)
4478 /* First, do a few fast searches on the basis that data_addr might
4479 be exactly the start of a block or up to 15 words inside. This
4480 can happen commonly via the creq
4481 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4482 for (i
= 0; i
< n_fast_check_words
; i
++) {
4483 mm
= VG_(HT_lookup
)( hg_mallocmeta_table
,
4484 data_addr
- (UWord
)(UInt
)i
* sizeof(UWord
) );
4485 if (UNLIKELY(mm
&& addr_is_in_MM_Chunk(mm
, data_addr
)))
4489 /* Well, this totally sucks. But without using an interval tree or
4490 some such, it's hard to see how to do better. We have to check
4491 every block in the entire table. */
4492 VG_(HT_ResetIter
)(hg_mallocmeta_table
);
4493 while ( (mm
= VG_(HT_Next
)(hg_mallocmeta_table
)) ) {
4494 if (UNLIKELY(addr_is_in_MM_Chunk(mm
, data_addr
)))
4498 /* Not found. Bah. */
4504 tl_assert(addr_is_in_MM_Chunk(mm
, data_addr
));
4505 if (where
) *where
= mm
->where
;
4506 if (tnr
) *tnr
= mm
->thr
->errmsg_index
;
4507 if (payload
) *payload
= mm
->payload
;
4508 if (szB
) *szB
= mm
->szB
;
4513 /*--------------------------------------------------------------*/
4514 /*--- Instrumentation ---*/
4515 /*--------------------------------------------------------------*/
4517 #define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
4518 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4519 #define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4520 #define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4521 #define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4522 #define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4524 /* This takes and returns atoms, of course. Not full IRExprs. */
4525 static IRExpr
* mk_And1 ( IRSB
* sbOut
, IRExpr
* arg1
, IRExpr
* arg2
)
4527 tl_assert(arg1
&& arg2
);
4528 tl_assert(isIRAtom(arg1
));
4529 tl_assert(isIRAtom(arg2
));
4530 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4532 IRTemp wide1
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4533 IRTemp wide2
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4534 IRTemp anded
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4535 IRTemp res
= newIRTemp(sbOut
->tyenv
, Ity_I1
);
4536 addStmtToIRSB(sbOut
, assign(wide1
, unop(Iop_1Uto32
, arg1
)));
4537 addStmtToIRSB(sbOut
, assign(wide2
, unop(Iop_1Uto32
, arg2
)));
4538 addStmtToIRSB(sbOut
, assign(anded
, binop(Iop_And32
, mkexpr(wide1
),
4540 addStmtToIRSB(sbOut
, assign(res
, unop(Iop_32to1
, mkexpr(anded
))));
4544 static void instrument_mem_access ( IRSB
* sbOut
,
4548 Bool fixupSP_needed
,
4552 /* goff_sp_s1 is the offset in guest
4553 state where the cachedstack validity
4555 IRExpr
* guard
) /* NULL => True */
4557 IRType tyAddr
= Ity_INVALID
;
4558 const HChar
* hName
= NULL
;
4561 IRExpr
** argv
= NULL
;
4564 // THRESH is the size of the window above SP (well,
4565 // mostly above) that we assume implies a stack reference.
4566 const Int THRESH
= 4096 * 4; // somewhat arbitrary
4567 const Int rz_szB
= VG_STACK_REDZONE_SZB
;
4569 tl_assert(isIRAtom(addr
));
4570 tl_assert(hWordTy_szB
== 4 || hWordTy_szB
== 8);
4572 tyAddr
= typeOfIRExpr( sbOut
->tyenv
, addr
);
4573 tl_assert(tyAddr
== Ity_I32
|| tyAddr
== Ity_I64
);
4575 /* So the effective address is in 'addr' now. */
4576 regparms
= 1; // unless stated otherwise
4580 hName
= "evh__mem_help_cwrite_1";
4581 hAddr
= &evh__mem_help_cwrite_1
;
4582 argv
= mkIRExprVec_1( addr
);
4585 hName
= "evh__mem_help_cwrite_2";
4586 hAddr
= &evh__mem_help_cwrite_2
;
4587 argv
= mkIRExprVec_1( addr
);
4590 if (fixupSP_needed
) {
4591 /* Unwind has to be done with a SP fixed up with one word.
4592 See Ist_Put heuristic in hg_instrument. */
4593 hName
= "evh__mem_help_cwrite_4_fixupSP";
4594 hAddr
= &evh__mem_help_cwrite_4_fixupSP
;
4596 hName
= "evh__mem_help_cwrite_4";
4597 hAddr
= &evh__mem_help_cwrite_4
;
4599 argv
= mkIRExprVec_1( addr
);
4602 if (fixupSP_needed
) {
4603 /* Unwind has to be done with a SP fixed up with one word.
4604 See Ist_Put heuristic in hg_instrument. */
4605 hName
= "evh__mem_help_cwrite_8_fixupSP";
4606 hAddr
= &evh__mem_help_cwrite_8_fixupSP
;
4608 hName
= "evh__mem_help_cwrite_8";
4609 hAddr
= &evh__mem_help_cwrite_8
;
4611 argv
= mkIRExprVec_1( addr
);
4614 tl_assert(szB
> 8 && szB
<= 512); /* stay sane */
4616 hName
= "evh__mem_help_cwrite_N";
4617 hAddr
= &evh__mem_help_cwrite_N
;
4618 argv
= mkIRExprVec_2( addr
, mkIRExpr_HWord( szB
));
4624 hName
= "evh__mem_help_cread_1";
4625 hAddr
= &evh__mem_help_cread_1
;
4626 argv
= mkIRExprVec_1( addr
);
4629 hName
= "evh__mem_help_cread_2";
4630 hAddr
= &evh__mem_help_cread_2
;
4631 argv
= mkIRExprVec_1( addr
);
4634 hName
= "evh__mem_help_cread_4";
4635 hAddr
= &evh__mem_help_cread_4
;
4636 argv
= mkIRExprVec_1( addr
);
4639 hName
= "evh__mem_help_cread_8";
4640 hAddr
= &evh__mem_help_cread_8
;
4641 argv
= mkIRExprVec_1( addr
);
4644 tl_assert(szB
> 8 && szB
<= 512); /* stay sane */
4646 hName
= "evh__mem_help_cread_N";
4647 hAddr
= &evh__mem_help_cread_N
;
4648 argv
= mkIRExprVec_2( addr
, mkIRExpr_HWord( szB
));
4653 /* Create the helper. */
4657 di
= unsafeIRDirty_0_N( regparms
,
4658 hName
, VG_(fnptr_to_fnentry
)( hAddr
),
4661 if (HG_(clo_delta_stacktrace
)) {
4662 /* memory access helper might read the shadow1 SP offset, that
4663 indicates if the cached stacktrace is valid. */
4664 di
->fxState
[0].fx
= Ifx_Read
;
4665 di
->fxState
[0].offset
= goff_sp_s1
;
4666 di
->fxState
[0].size
= hWordTy_szB
;
4667 di
->fxState
[0].nRepeats
= 0;
4668 di
->fxState
[0].repeatLen
= 0;
4672 if (! HG_(clo_check_stack_refs
)) {
4673 /* We're ignoring memory references which are (obviously) to the
4674 stack. In fact just skip stack refs that are within 4 pages
4675 of SP (SP - the redzone, really), as that's simple, easy, and
4676 filters out most stack references. */
4677 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4678 some arbitrary N. If that is true then addr is outside the
4679 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4680 pages) then we can say addr is within a few pages of SP and
4681 so can't possibly be a heap access, and so can be skipped.
4683 Note that the condition simplifies to
4684 (addr - SP + RZ) >u N
4685 which generates better code in x86/amd64 backends, but it does
4686 not unfortunately simplify to
4687 (addr - SP) >u (N - RZ)
4688 (would be beneficial because N - RZ is a constant) because
4689 wraparound arithmetic messes up the comparison. eg.
4691 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4693 IRTemp sp
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4694 addStmtToIRSB( sbOut
, assign(sp
, IRExpr_Get(goff_sp
, tyAddr
)));
4697 IRTemp addr_minus_sp
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4700 assign(addr_minus_sp
,
4702 ? binop(Iop_Sub32
, addr
, mkexpr(sp
))
4703 : binop(Iop_Sub64
, addr
, mkexpr(sp
)))
4706 /* "addr - SP + RZ" */
4707 IRTemp diff
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4712 ? binop(Iop_Add32
, mkexpr(addr_minus_sp
), mkU32(rz_szB
))
4713 : binop(Iop_Add64
, mkexpr(addr_minus_sp
), mkU64(rz_szB
)))
4716 /* guardA == "guard on the address" */
4717 IRTemp guardA
= newIRTemp(sbOut
->tyenv
, Ity_I1
);
4722 ? binop(Iop_CmpLT32U
, mkU32(THRESH
), mkexpr(diff
))
4723 : binop(Iop_CmpLT64U
, mkU64(THRESH
), mkexpr(diff
)))
4725 di
->guard
= mkexpr(guardA
);
4728 /* If there's a guard on the access itself (as supplied by the
4729 caller of this routine), we need to AND that in to any guard we
4730 might already have. */
4732 di
->guard
= mk_And1(sbOut
, di
->guard
, guard
);
4735 /* Add the helper. */
4736 addStmtToIRSB( sbOut
, IRStmt_Dirty(di
) );
4740 /* Figure out if GA is a guest code address in the dynamic linker, and
4741 if so return True. Otherwise (and in case of any doubt) return
4742 False. (sidedly safe w/ False as the safe value) */
4743 static Bool
is_in_dynamic_linker_shared_object( Addr ga
)
4746 const HChar
* soname
;
4748 dinfo
= VG_(find_DebugInfo
)( VG_(current_DiEpoch
)(), ga
);
4749 if (!dinfo
) return False
;
4751 soname
= VG_(DebugInfo_get_soname
)(dinfo
);
4753 if (0) VG_(printf
)("%s\n", soname
);
4755 return VG_(is_soname_ld_so
)(soname
);
4759 void addInvalidateCachedStack (IRSB
* bbOut
,
4763 /* Invalidate cached stack: Write 0 in the shadow1 offset 0 */
4764 addStmtToIRSB( bbOut
,
4765 IRStmt_Put(goff_sp_s1
,
4767 mkU32(0) : mkU64(0)));
4768 /// ???? anything more efficient than assign a Word???
4772 IRSB
* hg_instrument ( VgCallbackClosure
* closure
,
4774 const VexGuestLayout
* layout
,
4775 const VexGuestExtents
* vge
,
4776 const VexArchInfo
* archinfo_host
,
4777 IRType gWordTy
, IRType hWordTy
)
4781 Addr cia
; /* address of current insn */
4783 Bool inLDSO
= False
;
4784 Addr inLDSOmask4K
= 1; /* mismatches on first check */
4786 // Set to True when SP must be fixed up when taking a stack trace for the
4787 // mem accesses in the rest of the instruction
4788 Bool fixupSP_needed
= False
;
4790 const Int goff_SP
= layout
->offset_SP
;
4791 /* SP in shadow1 indicates if cached stack is valid.
4792 We have to invalidate the cached stack e.g. when seeing call or ret. */
4793 const Int goff_SP_s1
= layout
->total_sizeB
+ layout
->offset_SP
;
4794 const Int hWordTy_szB
= sizeofIRType(hWordTy
);
4796 if (gWordTy
!= hWordTy
) {
4797 /* We don't currently support this case. */
4798 VG_(tool_panic
)("host/guest word size mismatch");
4801 if (VKI_PAGE_SIZE
< 4096 || VG_(log2
)(VKI_PAGE_SIZE
) == -1) {
4802 VG_(tool_panic
)("implausible or too-small VKI_PAGE_SIZE");
4806 bbOut
= emptyIRSB();
4807 bbOut
->tyenv
= deepCopyIRTypeEnv(bbIn
->tyenv
);
4808 bbOut
->next
= deepCopyIRExpr(bbIn
->next
);
4809 bbOut
->jumpkind
= bbIn
->jumpkind
;
4810 bbOut
->offsIP
= bbIn
->offsIP
;
4812 // Copy verbatim any IR preamble preceding the first IMark
4814 while (i
< bbIn
->stmts_used
&& bbIn
->stmts
[i
]->tag
!= Ist_IMark
) {
4815 addStmtToIRSB( bbOut
, bbIn
->stmts
[i
] );
4819 // Get the first statement, and initial cia from it
4820 tl_assert(bbIn
->stmts_used
> 0);
4821 tl_assert(i
< bbIn
->stmts_used
);
4822 st
= bbIn
->stmts
[i
];
4823 tl_assert(Ist_IMark
== st
->tag
);
4824 cia
= st
->Ist
.IMark
.addr
;
4827 for (/*use current i*/; i
< bbIn
->stmts_used
; i
++) {
4828 st
= bbIn
->stmts
[i
];
4830 tl_assert(isFlatIRStmt(st
));
4833 /* No memory reference, but if we do anything else than
4834 Ijk_Boring, indicate to helgrind that the previously
4835 recorded stack is invalid.
4836 For Ijk_Boring, also invalidate the stack if the exit
4837 instruction has no CF info. This heuristic avoids cached
4838 stack trace mismatch in some cases such as longjmp
4839 implementation. Similar logic below for the bb exit. */
4840 if (HG_(clo_delta_stacktrace
)
4841 && (st
->Ist
.Exit
.jk
!= Ijk_Boring
|| ! VG_(has_CF_info
)(cia
)))
4842 addInvalidateCachedStack(bbOut
, goff_SP_s1
, hWordTy_szB
);
4846 /* None of these can contain any memory references. */
4849 /* This cannot contain any memory references. */
4850 /* If we see a put to SP, from now on in this instruction,
4851 the SP needed to unwind has to be fixed up by one word.
4852 This very simple heuristic ensures correct unwinding in the
4853 typical case of a push instruction. If we need to cover more
4854 cases, then we need to better track how the SP is modified by
4855 the instruction (and calculate a precise sp delta), rather than
4856 assuming that the SP is decremented by a Word size. */
4857 if (HG_(clo_delta_stacktrace
) && st
->Ist
.Put
.offset
== goff_SP
) {
4858 fixupSP_needed
= True
;
4862 /* This cannot contain any memory references. */
4866 fixupSP_needed
= False
;
4868 /* no mem refs, but note the insn address. */
4869 cia
= st
->Ist
.IMark
.addr
;
4871 /* Don't instrument the dynamic linker. It generates a
4872 lot of races which we just expensively suppress, so
4875 Avoid flooding is_in_dynamic_linker_shared_object with
4876 requests by only checking at transitions between 4K
4878 if ((cia
& ~(Addr
)0xFFF) != inLDSOmask4K
) {
4879 if (0) VG_(printf
)("NEW %#lx\n", cia
);
4880 inLDSOmask4K
= cia
& ~(Addr
)0xFFF;
4881 inLDSO
= is_in_dynamic_linker_shared_object(cia
);
4883 if (0) VG_(printf
)("old %#lx\n", cia
);
4888 switch (st
->Ist
.MBE
.event
) {
4890 case Imbe_CancelReservation
:
4891 break; /* not interesting */
4898 /* Atomic read-modify-write cycle. Just pretend it's a
4900 IRCAS
* cas
= st
->Ist
.CAS
.details
;
4901 Bool isDCAS
= cas
->oldHi
!= IRTemp_INVALID
;
4903 tl_assert(cas
->expdHi
);
4904 tl_assert(cas
->dataHi
);
4906 tl_assert(!cas
->expdHi
);
4907 tl_assert(!cas
->dataHi
);
4909 /* Just be boring about it. */
4911 instrument_mem_access(
4915 * sizeofIRType(typeOfIRExpr(bbIn
->tyenv
, cas
->dataLo
)),
4916 False
/*!isStore*/, fixupSP_needed
,
4917 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4925 /* We pretend store-conditionals don't exist, viz, ignore
4926 them. Whereas load-linked's are treated the same as
4929 if (st
->Ist
.LLSC
.storedata
== NULL
) {
4931 dataTy
= typeOfIRTemp(bbIn
->tyenv
, st
->Ist
.LLSC
.result
);
4933 instrument_mem_access(
4936 sizeofIRType(dataTy
),
4937 False
/*!isStore*/, fixupSP_needed
,
4938 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4951 instrument_mem_access(
4954 sizeofIRType(typeOfIRExpr(bbIn
->tyenv
, st
->Ist
.Store
.data
)),
4955 True
/*isStore*/, fixupSP_needed
,
4956 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4963 IRStoreG
* sg
= st
->Ist
.StoreG
.details
;
4964 IRExpr
* data
= sg
->data
;
4965 IRExpr
* addr
= sg
->addr
;
4966 IRType type
= typeOfIRExpr(bbIn
->tyenv
, data
);
4967 tl_assert(type
!= Ity_INVALID
);
4968 instrument_mem_access( bbOut
, addr
, sizeofIRType(type
),
4969 True
/*isStore*/, fixupSP_needed
,
4971 goff_SP
, goff_SP_s1
, sg
->guard
);
4976 IRLoadG
* lg
= st
->Ist
.LoadG
.details
;
4977 IRType type
= Ity_INVALID
; /* loaded type */
4978 IRType typeWide
= Ity_INVALID
; /* after implicit widening */
4979 IRExpr
* addr
= lg
->addr
;
4980 typeOfIRLoadGOp(lg
->cvt
, &typeWide
, &type
);
4981 tl_assert(type
!= Ity_INVALID
);
4982 instrument_mem_access( bbOut
, addr
, sizeofIRType(type
),
4983 False
/*!isStore*/, fixupSP_needed
,
4985 goff_SP
, goff_SP_s1
, lg
->guard
);
4990 IRExpr
* data
= st
->Ist
.WrTmp
.data
;
4991 if (data
->tag
== Iex_Load
) {
4993 instrument_mem_access(
4995 data
->Iex
.Load
.addr
,
4996 sizeofIRType(data
->Iex
.Load
.ty
),
4997 False
/*!isStore*/, fixupSP_needed
,
4998 hWordTy_szB
, goff_SP
, goff_SP_s1
,
5008 IRDirty
* d
= st
->Ist
.Dirty
.details
;
5009 if (d
->mFx
!= Ifx_None
) {
5010 /* This dirty helper accesses memory. Collect the
5012 tl_assert(d
->mAddr
!= NULL
);
5013 tl_assert(d
->mSize
!= 0);
5014 dataSize
= d
->mSize
;
5015 if (d
->mFx
== Ifx_Read
|| d
->mFx
== Ifx_Modify
) {
5017 instrument_mem_access(
5018 bbOut
, d
->mAddr
, dataSize
,
5019 False
/*!isStore*/, fixupSP_needed
,
5020 hWordTy_szB
, goff_SP
, goff_SP_s1
,
5025 if (d
->mFx
== Ifx_Write
|| d
->mFx
== Ifx_Modify
) {
5027 instrument_mem_access(
5028 bbOut
, d
->mAddr
, dataSize
,
5029 True
/*isStore*/, fixupSP_needed
,
5030 hWordTy_szB
, goff_SP
, goff_SP_s1
,
5036 tl_assert(d
->mAddr
== NULL
);
5037 tl_assert(d
->mSize
== 0);
5047 } /* switch (st->tag) */
5049 addStmtToIRSB( bbOut
, st
);
5050 } /* iterate over bbIn->stmts */
5052 // See above the case Ist_Exit:
5053 if (HG_(clo_delta_stacktrace
)
5054 && (bbOut
->jumpkind
!= Ijk_Boring
|| ! VG_(has_CF_info
)(cia
)))
5055 addInvalidateCachedStack(bbOut
, goff_SP_s1
, hWordTy_szB
);
5067 /*----------------------------------------------------------------*/
5068 /*--- Client requests ---*/
5069 /*----------------------------------------------------------------*/
5071 /* Sheesh. Yet another goddam finite map. */
5072 static WordFM
* map_pthread_t_to_Thread
= NULL
; /* pthread_t -> Thread* */
5074 static void map_pthread_t_to_Thread_INIT ( void ) {
5075 if (UNLIKELY(map_pthread_t_to_Thread
== NULL
)) {
5076 map_pthread_t_to_Thread
= VG_(newFM
)( HG_(zalloc
), "hg.mpttT.1",
5081 /* A list of Ada dependent tasks and their masters. Used for implementing
5082 the Ada task termination semantic as implemented by the
5083 gcc gnat Ada runtime. */
5086 void* dependent
; // Ada Task Control Block of the Dependent
5087 void* master
; // ATCB of the master
5088 Word master_level
; // level of dependency between master and dependent
5089 Thread
* hg_dependent
; // helgrind Thread* for dependent task.
5091 GNAT_dmml
; // (d)ependent (m)aster (m)aster_(l)evel.
5092 static XArray
* gnat_dmmls
; /* of GNAT_dmml */
5093 static void gnat_dmmls_INIT (void)
5095 if (UNLIKELY(gnat_dmmls
== NULL
)) {
5096 gnat_dmmls
= VG_(newXA
) (HG_(zalloc
), "hg.gnat_md.1",
5098 sizeof(GNAT_dmml
) );
5102 static void xtmemory_report_next_block(XT_Allocs
* xta
, ExeContext
** ec_alloc
)
5104 const MallocMeta
* md
= VG_(HT_Next
)(hg_mallocmeta_table
);
5106 xta
->nbytes
= md
->szB
;
5108 *ec_alloc
= md
->where
;
5112 static void HG_(xtmemory_report
) ( const HChar
* filename
, Bool fini
)
5114 // Make xtmemory_report_next_block ready to be called.
5115 VG_(HT_ResetIter
)(hg_mallocmeta_table
);
5116 VG_(XTMemory_report
)(filename
, fini
, xtmemory_report_next_block
,
5117 VG_(XT_filter_1top_and_maybe_below_main
));
5120 static void print_monitor_help ( void )
5125 "helgrind monitor commands:\n"
5126 " info locks [lock_addr] : show status of lock at addr lock_addr\n"
5127 " with no lock_addr, show status of all locks\n"
5128 " accesshistory <addr> [<len>] : show access history recorded\n"
5129 " for <len> (or 1) bytes at <addr>\n"
5130 " xtmemory [<filename>]\n"
5131 " dump xtree memory profile in <filename> (default xtmemory.kcg.%%p.%%n)\n"
5135 /* return True if request recognised, False otherwise */
5136 static Bool
handle_gdb_monitor_command (ThreadId tid
, HChar
*req
)
5139 HChar s
[VG_(strlen
)(req
)]; /* copy for strtok_r */
5143 VG_(strcpy
) (s
, req
);
5145 wcmd
= VG_(strtok_r
) (s
, " ", &ssaveptr
);
5146 /* NB: if possible, avoid introducing a new command below which
5147 starts with the same first letter(s) as an already existing
5148 command. This ensures a shorter abbreviation for the user. */
5149 switch (VG_(keyword_id
)
5150 ("help info accesshistory xtmemory",
5151 wcmd
, kwd_report_duplicated_matches
)) {
5152 case -2: /* multiple matches */
5154 case -1: /* not found */
5157 print_monitor_help();
5160 wcmd
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5161 switch (kwdid
= VG_(keyword_id
)
5163 wcmd
, kwd_report_all
)) {
5171 Bool lk_shown
= False
;
5172 Bool all_locks
= True
;
5176 wa
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5178 if (VG_(parse_Addr
) (&wa
, &lk_addr
) )
5181 VG_(gdb_printf
) ("missing or malformed address\n");
5184 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
) {
5185 if (all_locks
|| lk_addr
== lk
->guestaddr
) {
5187 True
/* show_lock_addrdescr */,
5188 False
/* show_internal_data */);
5193 VG_(gdb_printf
) ("no locks\n");
5194 if (!all_locks
&& !lk_shown
)
5195 VG_(gdb_printf
) ("lock with address %p not found\n",
5204 case 2: /* accesshistory */
5208 if (HG_(clo_history_level
) < 2) {
5210 ("helgrind must be started with --history-level=full"
5211 " to use accesshistory\n");
5214 if (VG_(strtok_get_address_and_size
) (&address
, &szB
, &ssaveptr
)) {
5216 libhb_event_map_access_history (address
, szB
, HG_(print_access
));
5218 VG_(gdb_printf
) ("len must be >=1\n");
5223 case 3: { /* xtmemory */
5225 filename
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5226 HG_(xtmemory_report
)(filename
, False
);
5237 Bool
hg_handle_client_request ( ThreadId tid
, UWord
* args
, UWord
* ret
)
5239 if (!VG_IS_TOOL_USERREQ('H','G',args
[0])
5240 && VG_USERREQ__GDB_MONITOR_COMMAND
!= args
[0])
5243 /* Anything that gets past the above check is one of ours, so we
5244 should be able to handle it. */
5246 /* default, meaningless return value, unless otherwise set */
5251 /* --- --- User-visible client requests --- --- */
5253 case VG_USERREQ__HG_CLEAN_MEMORY
:
5254 if (0) VG_(printf
)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5256 /* Call die_mem to (expensively) tidy up properly, if there
5257 are any held locks etc in the area. Calling evh__die_mem
5258 and then evh__new_mem is a bit inefficient; probably just
5259 the latter would do. */
5260 if (args
[2] > 0) { /* length */
5261 evh__die_mem(args
[1], args
[2]);
5262 /* and then set it to New */
5263 evh__new_mem(args
[1], args
[2]);
5267 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK
: {
5270 if (0) VG_(printf
)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5272 if (HG_(mm_find_containing_block
)(NULL
, NULL
,
5273 &payload
, &pszB
, args
[1])) {
5275 evh__die_mem(payload
, pszB
);
5276 evh__new_mem(payload
, pszB
);
5285 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED
:
5286 if (0) VG_(printf
)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5288 if (args
[2] > 0) { /* length */
5289 evh__untrack_mem(args
[1], args
[2]);
5293 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED
:
5294 if (0) VG_(printf
)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5296 if (args
[2] > 0) { /* length */
5297 evh__new_mem(args
[1], args
[2]);
5301 case _VG_USERREQ__HG_GET_ABITS
:
5302 if (0) VG_(printf
)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5303 args
[1], args
[2], args
[3]);
5304 UChar
*zzabit
= (UChar
*) args
[2];
5306 || VG_(am_is_valid_for_client
)((Addr
)zzabit
, (SizeT
)args
[3],
5307 VKI_PROT_READ
|VKI_PROT_WRITE
))
5308 *ret
= (UWord
) libhb_srange_get_abits ((Addr
) args
[1],
5315 /* This thread (tid) (a master) is informing us that it has
5316 seen the termination of a dependent task, and that this should
5317 be considered as a join between master and dependent. */
5318 case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN
: {
5320 const Thread
*stayer
= map_threads_maybe_lookup( tid
);
5321 const void *dependent
= (void*)args
[1];
5322 const void *master
= (void*)args
[2];
5325 VG_(printf
)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5326 "self_id = %p Thread* = %p dependent %p\n",
5327 (Int
)tid
, master
, stayer
, dependent
);
5330 /* Similar loop as for master completed hook below, but stops at
5331 the first matching occurence, only comparing master and
5333 for (n
= VG_(sizeXA
) (gnat_dmmls
) - 1; n
>= 0; n
--) {
5334 GNAT_dmml
*dmml
= (GNAT_dmml
*) VG_(indexXA
)(gnat_dmmls
, n
);
5335 if (dmml
->master
== master
5336 && dmml
->dependent
== dependent
) {
5338 VG_(printf
)("quitter %p dependency to stayer %p (join)\n",
5339 dmml
->hg_dependent
->hbthr
, stayer
->hbthr
);
5340 tl_assert(dmml
->hg_dependent
->hbthr
!= stayer
->hbthr
);
5341 generate_quitter_stayer_dependence (dmml
->hg_dependent
->hbthr
,
5343 VG_(removeIndexXA
) (gnat_dmmls
, n
);
5350 /* --- --- Client requests for Helgrind's use only --- --- */
5352 /* Some thread is telling us its pthread_t value. Record the
5353 binding between that and the associated Thread*, so we can
5354 later find the Thread* again when notified of a join by the
5356 case _VG_USERREQ__HG_SET_MY_PTHREAD_T
: {
5357 Thread
* my_thr
= NULL
;
5359 VG_(printf
)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int
)tid
,
5361 map_pthread_t_to_Thread_INIT();
5362 my_thr
= map_threads_maybe_lookup( tid
);
5363 /* This assertion should hold because the map_threads (tid to
5364 Thread*) binding should have been made at the point of
5365 low-level creation of this thread, which should have
5366 happened prior to us getting this client request for it.
5367 That's because this client request is sent from
5368 client-world from the 'thread_wrapper' function, which
5369 only runs once the thread has been low-level created. */
5370 tl_assert(my_thr
!= NULL
);
5371 /* So now we know that (pthread_t)args[1] is associated with
5372 (Thread*)my_thr. Note that down. */
5374 VG_(printf
)("XXXX: bind pthread_t %p to Thread* %p\n",
5375 (void*)args
[1], (void*)my_thr
);
5376 VG_(addToFM
)( map_pthread_t_to_Thread
, (UWord
)args
[1], (UWord
)my_thr
);
5378 if (my_thr
->coretid
!= 1) {
5379 /* FIXME: hardwires assumption about identity of the root thread. */
5380 if (HG_(clo_ignore_thread_creation
)) {
5381 HG_(thread_leave_pthread_create
)(my_thr
);
5382 HG_(thread_leave_synchr
)(my_thr
);
5383 tl_assert(my_thr
->synchr_nesting
== 0);
5389 case _VG_USERREQ__HG_PTH_API_ERROR
: {
5390 Thread
* my_thr
= NULL
;
5391 map_pthread_t_to_Thread_INIT();
5392 my_thr
= map_threads_maybe_lookup( tid
);
5393 tl_assert(my_thr
); /* See justification above in SET_MY_PTHREAD_T */
5394 #if defined(VGO_freebsd)
5395 if (HG_(get_pthread_synchr_nesting_level
)(tid
) >= 1) {
5399 HG_(record_error_PthAPIerror
)(
5400 my_thr
, (HChar
*)args
[1], (UWord
)args
[2], (HChar
*)args
[3] );
5404 /* This thread (tid) has completed a join with the quitting
5405 thread whose pthread_t is in args[1]. */
5406 case _VG_USERREQ__HG_PTHREAD_JOIN_POST
: {
5407 Thread
* thr_q
= NULL
; /* quitter Thread* */
5410 VG_(printf
)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int
)tid
,
5412 map_pthread_t_to_Thread_INIT();
5413 found
= VG_(lookupFM
)( map_pthread_t_to_Thread
,
5414 NULL
, (UWord
*)&thr_q
, (UWord
)args
[1] );
5415 /* Can this fail? It would mean that our pthread_join
5416 wrapper observed a successful join on args[1] yet that
5417 thread never existed (or at least, it never lodged an
5418 entry in the mapping (via SET_MY_PTHREAD_T)). Which
5419 sounds like a bug in the threads library. */
5420 // FIXME: get rid of this assertion; handle properly
5424 VG_(printf
)(".................... quitter Thread* = %p\n",
5426 evh__HG_PTHREAD_JOIN_POST( tid
, thr_q
);
5431 /* This thread (tid) is informing us of its master. */
5432 case _VG_USERREQ__HG_GNAT_MASTER_HOOK
: {
5434 dmml
.dependent
= (void*)args
[1];
5435 dmml
.master
= (void*)args
[2];
5436 dmml
.master_level
= (Word
)args
[3];
5437 dmml
.hg_dependent
= map_threads_maybe_lookup( tid
);
5438 tl_assert(dmml
.hg_dependent
);
5441 VG_(printf
)("HG_GNAT_MASTER_HOOK (tid %d): "
5442 "dependent = %p master = %p master_level = %ld"
5443 " dependent Thread* = %p\n",
5444 (Int
)tid
, dmml
.dependent
, dmml
.master
, dmml
.master_level
,
5447 VG_(addToXA
) (gnat_dmmls
, &dmml
);
5451 /* This thread (tid) is informing us that it has completed a
5453 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK
: {
5455 const Thread
*stayer
= map_threads_maybe_lookup( tid
);
5456 const void *master
= (void*)args
[1];
5457 const Word master_level
= (Word
) args
[2];
5461 VG_(printf
)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5462 "self_id = %p master_level = %ld Thread* = %p\n",
5463 (Int
)tid
, master
, master_level
, stayer
);
5466 /* Reverse loop on the array, simulating a pthread_join for
5467 the Dependent tasks of the completed master, and removing
5468 them from the array. */
5469 for (n
= VG_(sizeXA
) (gnat_dmmls
) - 1; n
>= 0; n
--) {
5470 GNAT_dmml
*dmml
= (GNAT_dmml
*) VG_(indexXA
)(gnat_dmmls
, n
);
5471 if (dmml
->master
== master
5472 && dmml
->master_level
== master_level
) {
5474 VG_(printf
)("quitter %p dependency to stayer %p\n",
5475 dmml
->hg_dependent
->hbthr
, stayer
->hbthr
);
5476 tl_assert(dmml
->hg_dependent
->hbthr
!= stayer
->hbthr
);
5477 generate_quitter_stayer_dependence (dmml
->hg_dependent
->hbthr
,
5479 VG_(removeIndexXA
) (gnat_dmmls
, n
);
5485 /* EXPOSITION only: by intercepting lock init events we can show
5486 the user where the lock was initialised, rather than only
5487 being able to show where it was first locked. Intercepting
5488 lock initialisations is not necessary for the basic operation
5489 of the race checker. */
5490 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST
:
5491 evh__HG_PTHREAD_MUTEX_INIT_POST( tid
, (void*)args
[1], args
[2] );
5494 /* mutex=arg[1], mutex_is_init=arg[2] */
5495 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE
:
5496 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid
, (void*)args
[1], args
[2] != 0 );
5499 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE
: // pth_mx_t*
5500 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5501 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5502 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid
, (void*)args
[1] );
5505 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST
: // pth_mx_t*
5506 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5507 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid
, (void*)args
[1] );
5508 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5511 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE
: // pth_mx_t*
5512 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5513 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5514 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5517 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST
: // pth_mx_t*, long
5518 if ((args
[2] == True
) // lock actually taken
5519 && (HG_(get_pthread_create_nesting_level
)(tid
) == 0))
5520 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, (void*)args
[1] );
5521 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5524 /* This thread is about to do pthread_cond_signal on the
5525 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5526 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE
:
5527 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE
:
5528 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5529 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid
, (void*)args
[1] );
5532 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST
:
5533 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST
:
5534 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5537 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5538 Returns a flag indicating whether or not the mutex is believed to be
5539 valid for this operation. */
5540 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE
: {
5541 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5543 = evh__HG_PTHREAD_COND_WAIT_PRE( tid
, (void*)args
[1],
5545 *ret
= mutex_is_valid
? 1 : 0;
5549 /* Thread successfully completed pthread_cond_init:
5550 cond=arg[1], cond_attr=arg[2] */
5551 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST
:
5552 evh__HG_PTHREAD_COND_INIT_POST( tid
,
5553 (void*)args
[1], (void*)args
[2] );
5556 /* cond=arg[1], cond_is_init=arg[2] */
5557 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE
:
5558 evh__HG_PTHREAD_COND_DESTROY_PRE( tid
, (void*)args
[1], args
[2] != 0 );
5561 /* Thread completed pthread_cond_wait, cond=arg[1],
5562 mutex=arg[2], timeout=arg[3], successful=arg[4] */
5563 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST
:
5564 if (args
[4] == True
)
5565 evh__HG_PTHREAD_COND_WAIT_POST( tid
,
5566 (void*)args
[1], (void*)args
[2],
5568 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5571 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST
:
5572 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid
, (void*)args
[1] );
5575 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE
:
5576 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid
, (void*)args
[1] );
5579 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5580 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE
:
5581 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5582 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5583 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid
, (void*)args
[1],
5587 /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5588 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST
:
5589 if ((args
[3] == True
)
5590 && (HG_(get_pthread_create_nesting_level
)(tid
) == 0))
5591 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid
, (void*)args
[1], args
[2] );
5592 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5595 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE
:
5596 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5597 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5598 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid
, (void*)args
[1] );
5601 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST
:
5602 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5603 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid
, (void*)args
[1] );
5604 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5607 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST
: /* sem_t*, unsigned long */
5608 evh__HG_POSIX_SEM_INIT_POST( tid
, (void*)args
[1], args
[2] );
5611 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE
: /* sem_t* */
5612 evh__HG_POSIX_SEM_DESTROY_PRE( tid
, (void*)args
[1] );
5615 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE
: /* sem_t* */
5616 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5617 evh__HG_POSIX_SEM_POST_PRE( tid
, (void*)args
[1] );
5620 case _VG_USERREQ__HG_POSIX_SEM_POST_POST
: /* sem_t* */
5621 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5624 case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE
: /* sem_t* */
5625 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5628 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST
: /* sem_t*, long tookLock */
5629 #if defined(VGO_freebsd)
5630 if (args
[2] == True
&& HG_(get_pthread_synchr_nesting_level
)(tid
) == 1)
5631 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5633 if (args
[2] == True
)
5634 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5636 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5639 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE
:
5640 /* pth_bar_t*, ulong count, ulong resizable */
5641 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid
, (void*)args
[1],
5645 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE
:
5646 /* pth_bar_t*, ulong newcount */
5647 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid
, (void*)args
[1],
5651 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE
:
5653 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid
, (void*)args
[1] );
5656 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE
:
5658 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid
, (void*)args
[1] );
5661 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE
:
5662 /* pth_spinlock_t* */
5663 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid
, (void*)args
[1] );
5666 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST
:
5667 /* pth_spinlock_t* */
5668 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid
, (void*)args
[1] );
5671 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE
:
5672 /* pth_spinlock_t*, Word */
5673 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5676 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST
:
5677 /* pth_spinlock_t* */
5678 evh__HG_PTHREAD_SPIN_LOCK_POST( tid
, (void*)args
[1] );
5681 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE
:
5682 /* pth_spinlock_t* */
5683 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid
, (void*)args
[1] );
5686 case _VG_USERREQ__HG_CLIENTREQ_UNIMP
: {
5688 HChar
* who
= (HChar
*)args
[1];
5690 Thread
* thr
= map_threads_maybe_lookup( tid
);
5691 tl_assert( thr
); /* I must be mapped */
5693 tl_assert( VG_(strlen
)(who
) <= 50 );
5694 VG_(sprintf
)(buf
, "Unimplemented client request macro \"%s\"", who
);
5695 /* record_error_Misc strdup's buf, so this is safe: */
5696 HG_(record_error_Misc
)( thr
, buf
);
5700 case _VG_USERREQ__HG_USERSO_SEND_PRE
:
5701 /* UWord arbitrary-SO-tag */
5702 evh__HG_USERSO_SEND_PRE( tid
, args
[1] );
5705 case _VG_USERREQ__HG_USERSO_RECV_POST
:
5706 /* UWord arbitrary-SO-tag */
5707 evh__HG_USERSO_RECV_POST( tid
, args
[1] );
5710 case _VG_USERREQ__HG_USERSO_FORGET_ALL
:
5711 /* UWord arbitrary-SO-tag */
5712 evh__HG_USERSO_FORGET_ALL( tid
, args
[1] );
5715 case VG_USERREQ__GDB_MONITOR_COMMAND
: {
5716 Bool handled
= handle_gdb_monitor_command (tid
, (HChar
*)args
[1]);
5724 case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN
: {
5725 Thread
*thr
= map_threads_maybe_lookup(tid
);
5726 if (HG_(clo_ignore_thread_creation
)) {
5727 HG_(thread_enter_pthread_create
)(thr
);
5728 HG_(thread_enter_synchr
)(thr
);
5733 case _VG_USERREQ__HG_PTHREAD_CREATE_END
: {
5734 Thread
*thr
= map_threads_maybe_lookup(tid
);
5735 if (HG_(clo_ignore_thread_creation
)) {
5736 HG_(thread_leave_pthread_create
)(thr
);
5737 HG_(thread_leave_synchr
)(thr
);
5742 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE
: // pth_mx_t*, long tryLock
5743 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5746 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST
: // pth_mx_t*
5747 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, (void*)args
[1] );
5750 case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED
: // void*, long isW
5751 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid
, (void*)args
[1], args
[2] );
5754 case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED
: // void*
5755 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid
, (void*)args
[1] );
5758 case _VG_USERREQ__HG_POSIX_SEM_RELEASED
: /* sem_t* */
5759 evh__HG_POSIX_SEM_POST_PRE( tid
, (void*)args
[1] );
5762 case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED
: /* sem_t* */
5763 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5766 #if defined(VGO_solaris)
5767 case _VG_USERREQ__HG_RTLD_BIND_GUARD
:
5768 evh__HG_RTLD_BIND_GUARD(tid
, args
[1]);
5771 case _VG_USERREQ__HG_RTLD_BIND_CLEAR
:
5772 evh__HG_RTLD_BIND_CLEAR(tid
, args
[1]);
5774 #endif /* VGO_solaris */
5777 /* Unhandled Helgrind client request! */
5778 VG_(message
)(Vg_UserMsg
,
5779 "Warning: unknown Helgrind client request code %llx\n",
5788 /*----------------------------------------------------------------*/
5790 /*----------------------------------------------------------------*/
5792 static Bool
hg_process_cmd_line_option ( const HChar
* arg
)
5794 const HChar
* tmp_str
;
5796 if VG_BOOL_CLO(arg
, "--track-lockorders",
5797 HG_(clo_track_lockorders
)) {}
5798 else if VG_BOOL_CLO(arg
, "--cmp-race-err-addrs",
5799 HG_(clo_cmp_race_err_addrs
)) {}
5801 else if VG_XACT_CLO(arg
, "--history-level=none",
5802 HG_(clo_history_level
), 0);
5803 else if VG_XACT_CLO(arg
, "--history-level=approx",
5804 HG_(clo_history_level
), 1);
5805 else if VG_XACT_CLO(arg
, "--history-level=full",
5806 HG_(clo_history_level
), 2);
5808 else if VG_BINT_CLO(arg
, "--history-backtrace-size",
5809 HG_(clo_history_backtrace_size
), 2, 500) {}
5810 // 500 just in case someone with a lot of CPU and memory would like to use
5811 // the same value for --num-callers and this.
5813 else if VG_BOOL_CLO(arg
, "--delta-stacktrace",
5814 HG_(clo_delta_stacktrace
)) {}
5816 else if VG_BINT_CLO(arg
, "--conflict-cache-size",
5817 HG_(clo_conflict_cache_size
), 10*1000, 150*1000*1000) {}
5819 /* "stuvwx" --> stuvwx (binary) */
5820 else if VG_STR_CLO(arg
, "--hg-sanity-flags", tmp_str
) {
5823 if (6 != VG_(strlen
)(tmp_str
)) {
5824 VG_(message
)(Vg_UserMsg
,
5825 "--hg-sanity-flags argument must have 6 digits\n");
5828 for (j
= 0; j
< 6; j
++) {
5829 if ('0' == tmp_str
[j
]) { /* do nothing */ }
5830 else if ('1' == tmp_str
[j
]) HG_(clo_sanity_flags
) |= (1 << (6-1-j
));
5832 VG_(message
)(Vg_UserMsg
, "--hg-sanity-flags argument can "
5833 "only contain 0s and 1s\n");
5837 if (0) VG_(printf
)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags
));
5840 else if VG_BOOL_CLO(arg
, "--free-is-write",
5841 HG_(clo_free_is_write
)) {}
5843 else if VG_XACT_CLO(arg
, "--vts-pruning=never",
5844 HG_(clo_vts_pruning
), 0);
5845 else if VG_XACT_CLO(arg
, "--vts-pruning=auto",
5846 HG_(clo_vts_pruning
), 1);
5847 else if VG_XACT_CLO(arg
, "--vts-pruning=always",
5848 HG_(clo_vts_pruning
), 2);
5850 else if VG_BOOL_CLO(arg
, "--check-stack-refs",
5851 HG_(clo_check_stack_refs
)) {}
5852 else if VG_BOOL_CLO(arg
, "--ignore-thread-creation",
5853 HG_(clo_ignore_thread_creation
)) {}
5856 return VG_(replacement_malloc_process_cmd_line_option
)(arg
);
5861 static void hg_print_usage ( void )
5864 " --free-is-write=no|yes treat heap frees as writes [no]\n"
5865 " --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5866 " --history-level=none|approx|full [full]\n"
5867 " full: show both stack traces for a data race (can be very slow)\n"
5868 " approx: full trace for one thread, approx for the other (faster)\n"
5869 " none: only show trace for one thread in a race (fastest)\n"
5870 " --history-backtrace-size=<number> record <number> callers for full\n"
5871 " history level [8]\n"
5872 " --delta-stacktrace=no|yes [yes on linux amd64/x86]\n"
5873 " no : always compute a full history stacktrace from unwind info\n"
5874 " yes : derive a stacktrace from the previous stacktrace\n"
5875 " if there was no call/return or similar instruction\n"
5876 " --conflict-cache-size=N size of 'full' history cache [2000000]\n"
5877 " --check-stack-refs=no|yes race-check reads and writes on the\n"
5878 " main stack and thread stacks? [yes]\n"
5879 " --ignore-thread-creation=yes|no Ignore activities during thread\n"
5881 HG_(clo_ignore_thread_creation
) ? "yes" : "no"
5885 static void hg_print_debug_usage ( void )
5887 VG_(printf
)(" --cmp-race-err-addrs=no|yes are data addresses in "
5888 "race errors significant? [no]\n");
5889 VG_(printf
)(" --hg-sanity-flags=<XXXXXX> sanity check "
5890 " at events (X = 0|1) [000000]\n");
5891 VG_(printf
)(" --hg-sanity-flags values:\n");
5892 VG_(printf
)(" 010000 after changes to "
5893 "lock-order-acquisition-graph\n");
5894 VG_(printf
)(" 001000 at memory accesses\n");
5895 VG_(printf
)(" 000100 at mem permission setting for "
5896 "ranges >= %d bytes\n", SCE_BIGRANGE_T
);
5897 VG_(printf
)(" 000010 at lock/unlock events\n");
5898 VG_(printf
)(" 000001 at thread create/join events\n");
5900 " --vts-pruning=never|auto|always [auto]\n"
5901 " never: is never done (may cause big space leaks in Helgrind)\n"
5902 " auto: done just often enough to keep space usage under control\n"
5903 " always: done after every VTS GC (mostly just a big time waster)\n"
5907 static void hg_print_stats (void)
5912 HG_(ppWSUstats
)( univ_lsets
, "univ_lsets" );
5913 if (HG_(clo_track_lockorders
)) {
5915 HG_(ppWSUstats
)( univ_laog
, "univ_laog" );
5919 //zz VG_(printf)("\n");
5920 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5921 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5922 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5923 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5924 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5925 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5926 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5927 //zz stats__hbefore_stk_hwm);
5928 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5929 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5932 VG_(printf
)(" locksets: %'8d unique lock sets\n",
5933 (Int
)HG_(cardinalityWSU
)( univ_lsets
));
5934 if (HG_(clo_track_lockorders
)) {
5935 VG_(printf
)(" univ_laog: %'8d unique lock sets\n",
5936 (Int
)HG_(cardinalityWSU
)( univ_laog
));
5939 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5940 // stats__ga_LL_adds,
5941 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5943 VG_(printf
)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5944 HG_(stats__LockN_to_P_queries
),
5945 HG_(stats__LockN_to_P_get_map_size
)() );
5947 VG_(printf
)("client malloc-ed blocks: %'8u\n",
5948 VG_(HT_count_nodes
)(hg_mallocmeta_table
));
5950 VG_(printf
)("string table map: %'8llu queries (%llu map size)\n",
5951 HG_(stats__string_table_queries
),
5952 HG_(stats__string_table_get_map_size
)() );
5953 if (HG_(clo_track_lockorders
)) {
5954 VG_(printf
)(" LAOG: %'8d map size\n",
5955 (Int
)(laog
? VG_(sizeFM
)( laog
) : 0));
5956 VG_(printf
)(" LAOG exposition: %'8d map size\n",
5957 (Int
)(laog_exposition
? VG_(sizeFM
)( laog_exposition
) : 0));
5960 VG_(printf
)(" locks: %'8lu acquires, "
5962 stats__lockN_acquires
,
5963 stats__lockN_releases
5965 VG_(printf
)(" sanity checks: %'8lu\n", stats__sanity_checks
);
5968 libhb_shutdown(True
); // This in fact only print stats.
5971 static void hg_fini ( Int exitcode
)
5973 HG_(xtmemory_report
) (VG_(clo_xtree_memory_file
), True
);
5975 if (VG_(clo_verbosity
) == 1 && !VG_(clo_xml
)
5976 && HG_(clo_history_level
) >= 2) {
5978 "Use --history-level=approx or =none to gain increased speed, at\n" );
5980 "the cost of reduced accuracy of conflicting-access information\n");
5983 if (SHOW_DATA_STRUCTURES
)
5984 pp_everything( PP_ALL
, "SK_(fini)" );
5985 if (HG_(clo_sanity_flags
))
5986 all__sanity_check("SK_(fini)");
5992 /* FIXME: move these somewhere sane */
5995 void for_libhb__get_stacktrace ( Thr
* hbt
, Addr
* frames
, UWord nRequest
)
6001 thr
= libhb_get_Thr_hgthread( hbt
);
6003 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
6004 nActual
= (UWord
)VG_(get_StackTrace_with_deltas
)
6005 ( tid
, frames
, (UInt
)nRequest
,
6007 thr
->first_sp_delta
);
6008 tl_assert(nActual
<= nRequest
);
6009 for (; nActual
< nRequest
; nActual
++)
6010 frames
[nActual
] = 0;
6014 ExeContext
* for_libhb__get_EC ( Thr
* hbt
)
6020 thr
= libhb_get_Thr_hgthread( hbt
);
6022 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
6023 /* this will assert if tid is invalid */
6024 ec
= VG_(record_ExeContext
)( tid
, 0 );
6029 static void hg_post_clo_init ( void )
6033 if (HG_(clo_delta_stacktrace
) && VG_(clo_vex_control
).guest_chase
) {
6034 if (VG_(clo_verbosity
) >= 2)
6035 VG_(message
)(Vg_UserMsg
,
6036 "helgrind --delta-stacktrace=yes only works with "
6037 "--vex-guest-chase=no\n"
6038 "=> (re-setting it to 'no')\n");
6039 VG_(clo_vex_control
).guest_chase
= False
;
6043 /////////////////////////////////////////////
6044 hbthr_root
= libhb_init( for_libhb__get_stacktrace
,
6045 for_libhb__get_EC
);
6046 /////////////////////////////////////////////
6049 if (HG_(clo_track_lockorders
))
6052 initialise_data_structures(hbthr_root
);
6053 if (VG_(clo_xtree_memory
) == Vg_XTMemory_Full
)
6054 // Activate full xtree memory profiling.
6055 VG_(XTMemory_Full_init
)(VG_(XT_filter_1top_and_maybe_below_main
));
6058 static void hg_info_location (DiEpoch ep
, Addr a
)
6060 (void) HG_(get_and_pp_addrdescr
) (ep
, a
);
6063 static void hg_pre_clo_init ( void )
6065 VG_(details_name
) ("Helgrind");
6066 VG_(details_version
) (NULL
);
6067 VG_(details_description
) ("a thread error detector");
6068 VG_(details_copyright_author
)(
6069 "Copyright (C) 2007-2024, and GNU GPL'd, by OpenWorks LLP et al.");
6070 VG_(details_bug_reports_to
) (VG_BUGS_TO
);
6071 VG_(details_avg_translation_sizeB
) ( 320 );
6073 VG_(basic_tool_funcs
) (hg_post_clo_init
,
6077 VG_(needs_core_errors
) (True
);
6078 VG_(needs_tool_errors
) (HG_(eq_Error
),
6079 HG_(before_pp_Error
),
6081 False
,/*show TIDs for errors*/
6083 HG_(recognised_suppression
),
6084 HG_(read_extra_suppression_info
),
6085 HG_(error_matches_suppression
),
6086 HG_(get_error_name
),
6087 HG_(get_extra_suppression_info
),
6088 HG_(print_extra_suppression_use
),
6089 HG_(update_extra_suppression_use
));
6091 VG_(needs_xml_output
) ();
6093 VG_(needs_command_line_options
)(hg_process_cmd_line_option
,
6095 hg_print_debug_usage
);
6096 VG_(needs_client_requests
) (hg_handle_client_request
);
6099 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
6100 // hg_expensive_sanity_check);
6102 VG_(needs_print_stats
) (hg_print_stats
);
6103 VG_(needs_info_location
) (hg_info_location
);
6105 VG_(needs_malloc_replacement
) (hg_cli__malloc
,
6106 hg_cli____builtin_new
,
6107 hg_cli____builtin_new_aligned
,
6108 hg_cli____builtin_vec_new
,
6109 hg_cli____builtin_vec_new_aligned
,
6113 hg_cli____builtin_delete
,
6114 hg_cli____builtin_delete_aligned
,
6115 hg_cli____builtin_vec_delete
,
6116 hg_cli____builtin_vec_delete_aligned
,
6118 hg_cli_malloc_usable_size
,
6119 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB
);
6121 /* 21 Dec 08: disabled this; it mostly causes H to start more
6122 slowly and use significantly more memory, without very often
6123 providing useful results. The user can request to load this
6124 information manually with --read-var-info=yes. */
6125 if (0) VG_(needs_var_info
)(); /* optional */
6127 VG_(track_new_mem_startup
) ( evh__new_mem_w_perms
);
6128 VG_(track_new_mem_stack_signal
)( evh__new_mem_w_tid
);
6129 VG_(track_new_mem_brk
) ( evh__new_mem_w_tid
);
6130 VG_(track_new_mem_mmap
) ( evh__new_mem_w_perms
);
6131 VG_(track_new_mem_stack
) ( evh__new_mem_stack
);
6132 VG_(track_new_mem_stack_4
) ( evh__new_mem_stack_4
);
6133 VG_(track_new_mem_stack_8
) ( evh__new_mem_stack_8
);
6134 VG_(track_new_mem_stack_12
) ( evh__new_mem_stack_12
);
6135 VG_(track_new_mem_stack_16
) ( evh__new_mem_stack_16
);
6136 VG_(track_new_mem_stack_32
) ( evh__new_mem_stack_32
);
6137 VG_(track_new_mem_stack_112
) ( evh__new_mem_stack_112
);
6138 VG_(track_new_mem_stack_128
) ( evh__new_mem_stack_128
);
6139 VG_(track_new_mem_stack_144
) ( evh__new_mem_stack_144
);
6140 VG_(track_new_mem_stack_160
) ( evh__new_mem_stack_160
);
6142 // FIXME: surely this isn't thread-aware
6143 VG_(track_copy_mem_remap
) ( evh__copy_mem
);
6145 VG_(track_change_mem_mprotect
) ( evh__set_perms
);
6147 VG_(track_die_mem_stack_signal
)( evh__die_mem
);
6148 VG_(track_die_mem_brk
) ( evh__die_mem_munmap
);
6149 VG_(track_die_mem_munmap
) ( evh__die_mem_munmap
);
6151 /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
6152 which has no effect. We do not use VG_(track_die_mem_stack),
6153 as this would be an expensive way to do nothing. */
6154 // VG_(track_die_mem_stack) ( evh__die_mem );
6156 // FIXME: what is this for?
6157 VG_(track_ban_mem_stack
) (NULL
);
6159 VG_(track_pre_mem_read
) ( evh__pre_mem_read
);
6160 VG_(track_pre_mem_read_asciiz
) ( evh__pre_mem_read_asciiz
);
6161 VG_(track_pre_mem_write
) ( evh__pre_mem_write
);
6162 VG_(track_post_mem_write
) (NULL
);
6166 VG_(track_pre_thread_ll_create
)( evh__pre_thread_ll_create
);
6167 VG_(track_pre_thread_ll_exit
) ( evh__pre_thread_ll_exit
);
6169 VG_(track_start_client_code
)( evh__start_client_code
);
6170 VG_(track_stop_client_code
)( evh__stop_client_code
);
6172 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
6173 as described in comments at the top of pub_tool_hashtable.h, are
6175 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta
*) );
6176 tl_assert( sizeof(UWord
) == sizeof(Addr
) );
6178 = VG_(HT_construct
)( "hg_malloc_metadata_table" );
6180 MallocMeta_poolalloc
= VG_(newPA
) ( sizeof(MallocMeta
),
6183 "hg_malloc_metadata_pool",
6186 // add a callback to clean up on (threaded) fork.
6187 VG_(atfork
)(NULL
/*pre*/, NULL
/*parent*/, evh__atfork_child
/*child*/);
6190 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init
)
6192 /*--------------------------------------------------------------------*/
6193 /*--- end hg_main.c ---*/
6194 /*--------------------------------------------------------------------*/