2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors ---*/
4 /*--- in threaded programs. hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2017 OpenWorks LLP
14 Copyright (C) 2007-2017 Apple, Inc.
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, see <http://www.gnu.org/licenses/>.
29 The GNU General Public License is contained in the file COPYING.
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
37 #include "pub_tool_basics.h"
38 #include "pub_tool_gdbserver.h"
39 #include "pub_tool_libcassert.h"
40 #include "pub_tool_libcbase.h"
41 #include "pub_tool_libcprint.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"
44 #include "pub_tool_hashtable.h"
45 #include "pub_tool_replacemalloc.h"
46 #include "pub_tool_machine.h"
47 #include "pub_tool_options.h"
48 #include "pub_tool_xarray.h"
49 #include "pub_tool_stacktrace.h"
50 #include "pub_tool_wordfm.h"
51 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
52 #include "pub_tool_redir.h" // sonames for the dynamic linkers
53 #include "pub_tool_vki.h" // VKI_PAGE_SIZE
54 #include "pub_tool_libcproc.h"
55 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
56 #include "pub_tool_poolalloc.h"
57 #include "pub_tool_addrinfo.h"
58 #include "pub_tool_xtree.h"
59 #include "pub_tool_xtmemory.h"
61 #include "hg_basics.h"
62 #include "hg_wordset.h"
63 #include "hg_addrdescr.h"
64 #include "hg_lock_n_thread.h"
65 #include "hg_errors.h"
72 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
74 // FIXME: when client destroys a lock or a CV, remove these
75 // from our mappings, so that the associated SO can be freed up
77 /*----------------------------------------------------------------*/
79 /*----------------------------------------------------------------*/
81 /* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
88 // FIXME what is supposed to happen to locks in memory which
89 // is relocated as a result of client realloc?
91 // FIXME put referencing ThreadId into Thread and get
92 // rid of the slow reverse mapping function.
94 // FIXME accesses to NoAccess areas: change state to Excl?
96 // FIXME report errors for accesses of NoAccess memory?
98 // FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99 // the thread still holds the lock.
101 /* ------------ Debug/trace options ------------ */
103 // 0 for silent, 1 for some stuff, 2 for lots of stuff
104 #define SHOW_EVENTS 0
107 static void all__sanity_check ( const HChar
* who
); /* fwds */
109 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
111 // 0 for none, 1 for dump at end of run
112 #define SHOW_DATA_STRUCTURES 0
115 /* ------------ Misc comments ------------ */
117 // FIXME: don't hardwire initial entries for root thread.
118 // Instead, let the pre_thread_ll_create handler do this.
121 /*----------------------------------------------------------------*/
122 /*--- Primary data structures ---*/
123 /*----------------------------------------------------------------*/
125 /* Admin linked list of Threads */
126 static Thread
* admin_threads
= NULL
;
127 Thread
* get_admin_threads ( void ) { return admin_threads
; }
129 /* Admin double linked list of Locks */
130 /* We need a double linked list to properly and efficiently
132 static Lock
* admin_locks
= NULL
;
134 /* Mapping table for core ThreadIds to Thread* */
135 static Thread
** map_threads
= NULL
; /* Array[VG_N_THREADS] of Thread* */
137 /* Mapping table for lock guest addresses to Lock* */
138 static WordFM
* map_locks
= NULL
; /* WordFM LockAddr Lock* */
140 /* The word-set universes for lock sets. */
141 static WordSetU
* univ_lsets
= NULL
; /* sets of Lock* */
142 static WordSetU
* univ_laog
= NULL
; /* sets of Lock*, for LAOG */
143 static Int next_gc_univ_laog
= 1;
144 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
147 /* Allow libhb to get at the universe of locksets stored
149 WordSetU
* HG_(get_univ_lsets
) ( void ) { return univ_lsets
; }
151 /* Allow libhb to get at the list of locks stored here. Ditto
153 Lock
* HG_(get_admin_locks
) ( void ) { return admin_locks
; }
156 /*----------------------------------------------------------------*/
157 /*--- Simple helpers for the data structures ---*/
158 /*----------------------------------------------------------------*/
160 static UWord stats__lockN_acquires
= 0;
161 static UWord stats__lockN_releases
= 0;
163 #if defined(VGO_solaris)
164 Bool
HG_(clo_ignore_thread_creation
) = True
;
166 Bool
HG_(clo_ignore_thread_creation
) = False
;
167 #endif /* VGO_solaris */
170 ThreadId
map_threads_maybe_reverse_lookup_SLOW ( Thread
* thr
); /*fwds*/
172 /* --------- Constructors --------- */
174 static Thread
* mk_Thread ( Thr
* hbthr
) {
176 Thread
* thread
= HG_(zalloc
)( "hg.mk_Thread.1", sizeof(Thread
) );
177 thread
->locksetA
= HG_(emptyWS
)( univ_lsets
);
178 thread
->locksetW
= HG_(emptyWS
)( univ_lsets
);
179 thread
->magic
= Thread_MAGIC
;
180 thread
->hbthr
= hbthr
;
181 thread
->coretid
= VG_INVALID_THREADID
;
182 thread
->created_at
= NULL
;
183 thread
->announced
= False
;
184 thread
->first_sp_delta
= 0;
185 thread
->errmsg_index
= indx
++;
186 thread
->admin
= admin_threads
;
187 thread
->synchr_nesting
= 0;
188 thread
->pthread_create_nesting_level
= 0;
189 #if defined(VGO_solaris)
190 thread
->bind_guard_flag
= 0;
191 #endif /* VGO_solaris */
193 admin_threads
= thread
;
197 // Make a new lock which is unlocked (hence ownerless)
198 // and insert the new lock in admin_locks double linked list.
199 static Lock
* mk_LockN ( LockKind kind
, Addr guestaddr
) {
200 static ULong unique
= 0;
201 Lock
* lock
= HG_(zalloc
)( "hg.mk_Lock.1", sizeof(Lock
) );
202 /* begin: add to double linked list */
204 admin_locks
->admin_prev
= lock
;
205 lock
->admin_next
= admin_locks
;
206 lock
->admin_prev
= NULL
;
209 lock
->unique
= unique
++;
210 lock
->magic
= LockN_MAGIC
;
211 lock
->appeared_at
= NULL
;
212 lock
->acquired_at
= NULL
;
213 lock
->hbso
= libhb_so_alloc();
214 lock
->guestaddr
= guestaddr
;
218 tl_assert(HG_(is_sane_LockN
)(lock
));
222 /* Release storage for a Lock. Also release storage in .heldBy, if
223 any. Removes from admin_locks double linked list. */
224 static void del_LockN ( Lock
* lk
)
226 tl_assert(HG_(is_sane_LockN
)(lk
));
228 libhb_so_dealloc(lk
->hbso
);
230 VG_(deleteBag
)( lk
->heldBy
);
231 /* begin: del lock from double linked list */
232 if (lk
== admin_locks
) {
233 tl_assert(lk
->admin_prev
== NULL
);
235 lk
->admin_next
->admin_prev
= NULL
;
236 admin_locks
= lk
->admin_next
;
239 tl_assert(lk
->admin_prev
!= NULL
);
240 lk
->admin_prev
->admin_next
= lk
->admin_next
;
242 lk
->admin_next
->admin_prev
= lk
->admin_prev
;
245 VG_(memset
)(lk
, 0xAA, sizeof(*lk
));
249 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
250 it. This is done strictly: only combinations resulting from
251 correct program and libpthread behaviour are allowed. */
252 static void lockN_acquire_writer ( Lock
* lk
, Thread
* thr
)
254 tl_assert(HG_(is_sane_LockN
)(lk
));
255 tl_assert(HG_(is_sane_Thread
)(thr
));
257 stats__lockN_acquires
++;
259 /* EXPOSITION only */
260 /* We need to keep recording snapshots of where the lock was
261 acquired, so as to produce better lock-order error messages. */
262 if (lk
->acquired_at
== NULL
) {
264 tl_assert(lk
->heldBy
== NULL
);
265 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
267 = VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
269 tl_assert(lk
->heldBy
!= NULL
);
271 /* end EXPOSITION only */
276 tl_assert(lk
->heldBy
== NULL
); /* can't w-lock recursively */
277 tl_assert(!lk
->heldW
);
279 lk
->heldBy
= VG_(newBag
)( HG_(zalloc
), "hg.lNaw.1", HG_(free
) );
280 VG_(addToBag
)( lk
->heldBy
, (UWord
)thr
);
283 if (lk
->heldBy
== NULL
)
285 /* 2nd and subsequent locking of a lock by its owner */
286 tl_assert(lk
->heldW
);
287 /* assert: lk is only held by one thread .. */
288 tl_assert(VG_(sizeUniqueBag
)(lk
->heldBy
) == 1);
289 /* assert: .. and that thread is 'thr'. */
290 tl_assert(VG_(elemBag
)(lk
->heldBy
, (UWord
)thr
)
291 == VG_(sizeTotalBag
)(lk
->heldBy
));
292 VG_(addToBag
)(lk
->heldBy
, (UWord
)thr
);
295 tl_assert(lk
->heldBy
== NULL
&& !lk
->heldW
); /* must be unheld */
300 tl_assert(HG_(is_sane_LockN
)(lk
));
303 static void lockN_acquire_reader ( Lock
* lk
, Thread
* thr
)
305 tl_assert(HG_(is_sane_LockN
)(lk
));
306 tl_assert(HG_(is_sane_Thread
)(thr
));
307 /* can only add reader to a reader-writer lock. */
308 tl_assert(lk
->kind
== LK_rdwr
);
309 /* lk must be free or already r-held. */
310 tl_assert(lk
->heldBy
== NULL
311 || (lk
->heldBy
!= NULL
&& !lk
->heldW
));
313 stats__lockN_acquires
++;
315 /* EXPOSITION only */
316 /* We need to keep recording snapshots of where the lock was
317 acquired, so as to produce better lock-order error messages. */
318 if (lk
->acquired_at
== NULL
) {
320 tl_assert(lk
->heldBy
== NULL
);
321 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
323 = VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
325 tl_assert(lk
->heldBy
!= NULL
);
327 /* end EXPOSITION only */
330 VG_(addToBag
)(lk
->heldBy
, (UWord
)thr
);
333 lk
->heldBy
= VG_(newBag
)( HG_(zalloc
), "hg.lNar.1", HG_(free
) );
334 VG_(addToBag
)( lk
->heldBy
, (UWord
)thr
);
336 tl_assert(!lk
->heldW
);
337 tl_assert(HG_(is_sane_LockN
)(lk
));
340 /* Update 'lk' to reflect a release of it by 'thr'. This is done
341 strictly: only combinations resulting from correct program and
342 libpthread behaviour are allowed. */
344 static void lockN_release ( Lock
* lk
, Thread
* thr
)
347 tl_assert(HG_(is_sane_LockN
)(lk
));
348 tl_assert(HG_(is_sane_Thread
)(thr
));
349 /* lock must be held by someone */
350 tl_assert(lk
->heldBy
);
351 stats__lockN_releases
++;
352 /* Remove it from the holder set */
353 b
= VG_(delFromBag
)(lk
->heldBy
, (UWord
)thr
);
354 /* thr must actually have been a holder of lk */
357 tl_assert(lk
->acquired_at
);
358 if (VG_(isEmptyBag
)(lk
->heldBy
)) {
359 VG_(deleteBag
)(lk
->heldBy
);
362 lk
->acquired_at
= NULL
;
364 tl_assert(HG_(is_sane_LockN
)(lk
));
367 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock
* lk
)
371 tl_assert(!lk
->heldW
);
374 /* for each thread that holds this lock do ... */
375 VG_(initIterBag
)( lk
->heldBy
);
376 while (VG_(nextIterBag
)( lk
->heldBy
, (UWord
*)&thr
, NULL
)) {
377 tl_assert(HG_(is_sane_Thread
)(thr
));
378 tl_assert(HG_(elemWS
)( univ_lsets
,
379 thr
->locksetA
, (UWord
)lk
));
381 = HG_(delFromWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
384 tl_assert(HG_(elemWS
)( univ_lsets
,
385 thr
->locksetW
, (UWord
)lk
));
387 = HG_(delFromWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lk
);
390 VG_(doneIterBag
)( lk
->heldBy
);
394 /*----------------------------------------------------------------*/
395 /*--- Print out the primary data structures ---*/
396 /*----------------------------------------------------------------*/
398 #define PP_THREADS (1<<1)
399 #define PP_LOCKS (1<<2)
400 #define PP_ALL (PP_THREADS | PP_LOCKS)
403 static const Int sHOW_ADMIN
= 0;
405 static void space ( Int n
)
409 tl_assert(n
>= 0 && n
< 128);
412 for (i
= 0; i
< n
; i
++)
415 tl_assert(i
< 128+1);
416 VG_(printf
)("%s", spaces
);
419 static void pp_Thread ( Int d
, Thread
* t
)
421 space(d
+0); VG_(printf
)("Thread %p {\n", t
);
423 space(d
+3); VG_(printf
)("admin %p\n", t
->admin
);
424 space(d
+3); VG_(printf
)("magic 0x%x\n", (UInt
)t
->magic
);
426 space(d
+3); VG_(printf
)("locksetA %d\n", (Int
)t
->locksetA
);
427 space(d
+3); VG_(printf
)("locksetW %d\n", (Int
)t
->locksetW
);
428 space(d
+0); VG_(printf
)("}\n");
431 static void pp_admin_threads ( Int d
)
435 for (n
= 0, t
= admin_threads
; t
; n
++, t
= t
->admin
) {
438 space(d
); VG_(printf
)("admin_threads (%d records) {\n", n
);
439 for (i
= 0, t
= admin_threads
; t
; i
++, t
= t
->admin
) {
442 VG_(printf
)("admin_threads record %d of %d:\n", i
, n
);
446 space(d
); VG_(printf
)("}\n");
449 static void pp_map_threads ( Int d
)
452 space(d
); VG_(printf
)("map_threads ");
453 for (i
= 0; i
< VG_N_THREADS
; i
++) {
454 if (map_threads
[i
] != NULL
)
457 VG_(printf
)("(%d entries) {\n", n
);
458 for (i
= 0; i
< VG_N_THREADS
; i
++) {
459 if (map_threads
[i
] == NULL
)
462 VG_(printf
)("coretid %d -> Thread %p\n", i
, map_threads
[i
]);
464 space(d
); VG_(printf
)("}\n");
467 static const HChar
* show_LockKind ( LockKind lkk
) {
469 case LK_mbRec
: return "mbRec";
470 case LK_nonRec
: return "nonRec";
471 case LK_rdwr
: return "rdwr";
472 default: tl_assert(0);
476 /* Pretty Print lock lk.
477 if show_lock_addrdescr, describes the (guest) lock address.
478 (this description will be more complete with --read-var-info=yes).
479 if show_internal_data, shows also helgrind internal information.
480 d is the level at which output is indented. */
481 static void pp_Lock ( Int d
, Lock
* lk
,
482 Bool show_lock_addrdescr
,
483 Bool show_internal_data
)
485 // FIXME PW EPOCH should use the epoch of the allocated_at ec.
486 const DiEpoch cur_ep
= VG_(current_DiEpoch
)();
488 if (show_internal_data
)
489 VG_(printf
)("Lock %p (ga %#lx) {\n", lk
, lk
->guestaddr
);
491 VG_(printf
)("Lock ga %#lx {\n", lk
->guestaddr
);
492 if (!show_lock_addrdescr
493 || !HG_(get_and_pp_addrdescr
) (cur_ep
, (Addr
) lk
->guestaddr
))
497 space(d
+3); VG_(printf
)("admin_n %p\n", lk
->admin_next
);
498 space(d
+3); VG_(printf
)("admin_p %p\n", lk
->admin_prev
);
499 space(d
+3); VG_(printf
)("magic 0x%x\n", (UInt
)lk
->magic
);
501 if (show_internal_data
) {
502 space(d
+3); VG_(printf
)("unique %llu\n", lk
->unique
);
504 space(d
+3); VG_(printf
)("kind %s\n", show_LockKind(lk
->kind
));
505 if (show_internal_data
) {
506 space(d
+3); VG_(printf
)("heldW %s\n", lk
->heldW
? "yes" : "no");
508 if (show_internal_data
) {
509 space(d
+3); VG_(printf
)("heldBy %p", lk
->heldBy
);
515 VG_(initIterBag
)( lk
->heldBy
);
516 while (VG_(nextIterBag
)( lk
->heldBy
, (UWord
*)&thr
, &count
)) {
517 if (show_internal_data
)
518 VG_(printf
)("%lu:%p ", count
, thr
);
520 VG_(printf
)("%c%lu:thread #%d ",
521 lk
->heldW
? 'W' : 'R',
522 count
, thr
->errmsg_index
);
523 if (thr
->coretid
== VG_INVALID_THREADID
)
524 VG_(printf
)("tid (exited) ");
526 VG_(printf
)("tid %u ", thr
->coretid
);
530 VG_(doneIterBag
)( lk
->heldBy
);
533 space(d
+0); VG_(printf
)("}\n");
536 static void pp_admin_locks ( Int d
)
540 for (n
= 0, lk
= admin_locks
; lk
; n
++, lk
= lk
->admin_next
) {
543 space(d
); VG_(printf
)("admin_locks (%d records) {\n", n
);
544 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
) {
547 VG_(printf
)("admin_locks record %d of %d:\n", i
, n
);
550 False
/* show_lock_addrdescr */,
551 True
/* show_internal_data */);
553 space(d
); VG_(printf
)("}\n");
556 static void pp_map_locks ( Int d
)
560 space(d
); VG_(printf
)("map_locks (%d entries) {\n",
561 (Int
)VG_(sizeFM
)( map_locks
));
562 VG_(initIterFM
)( map_locks
);
563 while (VG_(nextIterFM
)( map_locks
, (UWord
*)&gla
,
566 VG_(printf
)("guest %p -> Lock %p\n", gla
, lk
);
568 VG_(doneIterFM
)( map_locks
);
569 space(d
); VG_(printf
)("}\n");
572 static void pp_everything ( Int flags
, const HChar
* caller
)
576 VG_(printf
)("All_Data_Structures (caller = \"%s\") {\n", caller
);
577 if (flags
& PP_THREADS
) {
579 pp_admin_threads(d
+3);
583 if (flags
& PP_LOCKS
) {
598 /*----------------------------------------------------------------*/
599 /*--- Initialise the primary data structures ---*/
600 /*----------------------------------------------------------------*/
602 static void initialise_data_structures ( Thr
* hbthr_root
)
607 /* Get everything initialised and zeroed. */
608 tl_assert(admin_threads
== NULL
);
609 tl_assert(admin_locks
== NULL
);
611 tl_assert(map_threads
== NULL
);
612 map_threads
= HG_(zalloc
)( "hg.ids.1", VG_N_THREADS
* sizeof(Thread
*) );
614 tl_assert(sizeof(Addr
) == sizeof(UWord
));
615 tl_assert(map_locks
== NULL
);
616 map_locks
= VG_(newFM
)( HG_(zalloc
), "hg.ids.2", HG_(free
),
617 NULL
/*unboxed Word cmp*/);
619 tl_assert(univ_lsets
== NULL
);
620 univ_lsets
= HG_(newWordSetU
)( HG_(zalloc
), "hg.ids.4", HG_(free
),
622 tl_assert(univ_lsets
!= NULL
);
623 /* Ensure that univ_lsets is non-empty, with lockset zero being the
624 empty lockset. hg_errors.c relies on the assumption that
625 lockset number zero in univ_lsets is always valid. */
626 wsid
= HG_(emptyWS
)(univ_lsets
);
627 tl_assert(wsid
== 0);
629 tl_assert(univ_laog
== NULL
);
630 if (HG_(clo_track_lockorders
)) {
631 univ_laog
= HG_(newWordSetU
)( HG_(zalloc
), "hg.ids.5 (univ_laog)",
632 HG_(free
), 24/*cacheSize*/ );
633 tl_assert(univ_laog
!= NULL
);
636 /* Set up entries for the root thread */
637 // FIXME: this assumes that the first real ThreadId is 1
639 /* a Thread for the new thread ... */
640 thr
= mk_Thread(hbthr_root
);
641 thr
->coretid
= 1; /* FIXME: hardwires an assumption about the
642 identity of the root thread. */
643 tl_assert( libhb_get_Thr_hgthread(hbthr_root
) == NULL
);
644 libhb_set_Thr_hgthread(hbthr_root
, thr
);
646 /* and bind it in the thread-map table. */
647 tl_assert(HG_(is_sane_ThreadId
)(thr
->coretid
));
648 tl_assert(thr
->coretid
!= VG_INVALID_THREADID
);
650 map_threads
[thr
->coretid
] = thr
;
652 tl_assert(VG_INVALID_THREADID
== 0);
654 all__sanity_check("initialise_data_structures");
658 /*----------------------------------------------------------------*/
659 /*--- map_threads :: array[core-ThreadId] of Thread* ---*/
660 /*----------------------------------------------------------------*/
662 /* Doesn't assert if the relevant map_threads entry is NULL. */
663 static Thread
* map_threads_maybe_lookup ( ThreadId coretid
)
666 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
667 thr
= map_threads
[coretid
];
671 /* Asserts if the relevant map_threads entry is NULL. */
672 static inline Thread
* map_threads_lookup ( ThreadId coretid
)
675 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
676 thr
= map_threads
[coretid
];
681 /* Do a reverse lookup. Does not assert if 'thr' is not found in
683 static ThreadId
map_threads_maybe_reverse_lookup_SLOW ( Thread
* thr
)
686 tl_assert(HG_(is_sane_Thread
)(thr
));
687 /* Check nobody used the invalid-threadid slot */
688 tl_assert(VG_INVALID_THREADID
>= 0 && VG_INVALID_THREADID
< VG_N_THREADS
);
689 tl_assert(map_threads
[VG_INVALID_THREADID
] == NULL
);
691 tl_assert(HG_(is_sane_ThreadId
)(tid
));
695 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
696 is not found in map_threads. */
697 static ThreadId
map_threads_reverse_lookup_SLOW ( Thread
* thr
)
699 ThreadId tid
= map_threads_maybe_reverse_lookup_SLOW( thr
);
700 tl_assert(tid
!= VG_INVALID_THREADID
);
701 tl_assert(map_threads
[tid
]);
702 tl_assert(map_threads
[tid
]->coretid
== tid
);
706 static void map_threads_delete ( ThreadId coretid
)
709 tl_assert(coretid
!= 0);
710 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
711 thr
= map_threads
[coretid
];
713 map_threads
[coretid
] = NULL
;
716 static void HG_(thread_enter_synchr
)(Thread
*thr
) {
717 tl_assert(thr
->synchr_nesting
>= 0);
718 #if defined(VGO_solaris)
719 thr
->synchr_nesting
+= 1;
720 #endif /* VGO_solaris */
723 static void HG_(thread_leave_synchr
)(Thread
*thr
) {
724 #if defined(VGO_solaris)
725 thr
->synchr_nesting
-= 1;
726 #endif /* VGO_solaris */
727 tl_assert(thr
->synchr_nesting
>= 0);
730 static void HG_(thread_enter_pthread_create
)(Thread
*thr
) {
731 tl_assert(thr
->pthread_create_nesting_level
>= 0);
732 thr
->pthread_create_nesting_level
+= 1;
735 static void HG_(thread_leave_pthread_create
)(Thread
*thr
) {
736 tl_assert(thr
->pthread_create_nesting_level
> 0);
737 thr
->pthread_create_nesting_level
-= 1;
740 static Int
HG_(get_pthread_create_nesting_level
)(ThreadId tid
) {
741 Thread
*thr
= map_threads_maybe_lookup(tid
);
742 return thr
->pthread_create_nesting_level
;
745 /*----------------------------------------------------------------*/
746 /*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
747 /*----------------------------------------------------------------*/
749 /* Make sure there is a lock table entry for the given (lock) guest
750 address. If not, create one of the stated 'kind' in unheld state.
751 In any case, return the address of the existing or new Lock. */
753 Lock
* map_locks_lookup_or_create ( LockKind lkk
, Addr ga
, ThreadId tid
)
756 Lock
* oldlock
= NULL
;
757 tl_assert(HG_(is_sane_ThreadId
)(tid
));
758 found
= VG_(lookupFM
)( map_locks
,
759 NULL
, (UWord
*)&oldlock
, (UWord
)ga
);
761 Lock
* lock
= mk_LockN(lkk
, ga
);
762 lock
->appeared_at
= VG_(record_ExeContext
)( tid
, 0 );
763 tl_assert(HG_(is_sane_LockN
)(lock
));
764 VG_(addToFM
)( map_locks
, (UWord
)ga
, (UWord
)lock
);
765 tl_assert(oldlock
== NULL
);
768 tl_assert(oldlock
!= NULL
);
769 tl_assert(HG_(is_sane_LockN
)(oldlock
));
770 tl_assert(oldlock
->guestaddr
== ga
);
775 static Lock
* map_locks_maybe_lookup ( Addr ga
)
779 found
= VG_(lookupFM
)( map_locks
, NULL
, (UWord
*)&lk
, (UWord
)ga
);
780 tl_assert(found
? lk
!= NULL
: lk
== NULL
);
784 static void map_locks_delete ( Addr ga
)
788 VG_(delFromFM
)( map_locks
,
789 (UWord
*)&ga2
, (UWord
*)&lk
, (UWord
)ga
);
790 /* delFromFM produces the val which is being deleted, if it is
791 found. So assert it is non-null; that in effect asserts that we
792 are deleting a (ga, Lock) pair which actually exists. */
793 tl_assert(lk
!= NULL
);
794 tl_assert(ga2
== ga
);
799 /*----------------------------------------------------------------*/
800 /*--- Sanity checking the data structures ---*/
801 /*----------------------------------------------------------------*/
803 static UWord stats__sanity_checks
= 0;
805 static void laog__sanity_check ( const HChar
* who
); /* fwds */
807 /* REQUIRED INVARIANTS:
809 Thread vs Segment/Lock/SecMaps
811 for each t in Threads {
813 // Thread.lockset: each element is really a valid Lock
815 // Thread.lockset: each Lock in set is actually held by that thread
816 for lk in Thread.lockset
819 // Thread.csegid is a valid SegmentID
820 // and the associated Segment has .thr == t
824 all thread Locksets are pairwise empty under intersection
825 (that is, no lock is claimed to be held by more than one thread)
826 -- this is guaranteed if all locks in locksets point back to their
829 Lock vs Thread/Segment/SecMaps
831 for each entry (gla, la) in map_locks
832 gla == la->guest_addr
834 for each lk in Locks {
837 lk->guest_addr does not have shadow state NoAccess
838 if lk == LockedBy(t), then t->lockset contains lk
839 if lk == UnlockedBy(segid) then segid is valid SegmentID
840 and can be mapped to a valid Segment(seg)
841 and seg->thr->lockset does not contain lk
842 if lk == UnlockedNew then (no lockset contains lk)
844 secmaps for lk has .mbHasLocks == True
848 Segment vs Thread/Lock/SecMaps
850 the Segment graph is a dag (no cycles)
851 all of the Segment graph must be reachable from the segids
852 mentioned in the Threads
854 for seg in Segments {
856 seg->thr is a sane Thread
860 SecMaps vs Segment/Thread/Lock
865 if any shadow word is ShR or ShM then .mbHasShared == True
867 for each Excl(segid) state
868 map_segments_lookup maps to a sane Segment(seg)
869 for each ShM/ShR(tsetid,lsetid) state
870 each lk in lset is a valid Lock
871 each thr in tset is a valid thread, which is non-dead
877 /* Return True iff 'thr' holds 'lk' in some mode. */
878 static Bool
thread_is_a_holder_of_Lock ( Thread
* thr
, Lock
* lk
)
881 return VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) > 0;
886 /* Sanity check Threads, as far as possible */
887 __attribute__((noinline
))
888 static void threads__sanity_check ( const HChar
* who
)
890 #define BAD(_str) do { how = (_str); goto bad; } while (0)
891 const HChar
* how
= "no error";
897 for (thr
= admin_threads
; thr
; thr
= thr
->admin
) {
898 if (!HG_(is_sane_Thread
)(thr
)) BAD("1");
901 // locks held in W mode are a subset of all locks held
902 if (!HG_(isSubsetOf
)( univ_lsets
, wsW
, wsA
)) BAD("7");
903 HG_(getPayloadWS
)( &ls_words
, &ls_size
, univ_lsets
, wsA
);
904 for (i
= 0; i
< ls_size
; i
++) {
905 lk
= (Lock
*)ls_words
[i
];
906 // Thread.lockset: each element is really a valid Lock
907 if (!HG_(is_sane_LockN
)(lk
)) BAD("2");
908 // Thread.lockset: each Lock in set is actually held by that
910 if (!thread_is_a_holder_of_Lock(thr
,lk
)) BAD("3");
915 VG_(printf
)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who
, how
);
921 /* Sanity check Locks, as far as possible */
922 __attribute__((noinline
))
923 static void locks__sanity_check ( const HChar
* who
)
925 #define BAD(_str) do { how = (_str); goto bad; } while (0)
926 const HChar
* how
= "no error";
930 // # entries in admin_locks == # entries in map_locks
931 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
)
933 if (i
!= VG_(sizeFM
)(map_locks
)) BAD("1");
934 // for each entry (gla, lk) in map_locks
935 // gla == lk->guest_addr
936 VG_(initIterFM
)( map_locks
);
937 while (VG_(nextIterFM
)( map_locks
,
938 (UWord
*)&gla
, (UWord
*)&lk
)) {
939 if (lk
->guestaddr
!= gla
) BAD("2");
941 VG_(doneIterFM
)( map_locks
);
942 // scan through admin_locks ...
943 for (lk
= admin_locks
; lk
; lk
= lk
->admin_next
) {
944 // lock is sane. Quite comprehensive, also checks that
945 // referenced (holder) threads are sane.
946 if (!HG_(is_sane_LockN
)(lk
)) BAD("3");
947 // map_locks binds guest address back to this lock
948 if (lk
!= map_locks_maybe_lookup(lk
->guestaddr
)) BAD("4");
949 // look at all threads mentioned as holders of this lock. Ensure
950 // this lock is mentioned in their locksets.
954 VG_(initIterBag
)( lk
->heldBy
);
955 while (VG_(nextIterBag
)( lk
->heldBy
,
956 (UWord
*)&thr
, &count
)) {
957 // HG_(is_sane_LockN) above ensures these
958 tl_assert(count
>= 1);
959 tl_assert(HG_(is_sane_Thread
)(thr
));
960 if (!HG_(elemWS
)(univ_lsets
, thr
->locksetA
, (UWord
)lk
))
962 // also check the w-only lockset
964 && !HG_(elemWS
)(univ_lsets
, thr
->locksetW
, (UWord
)lk
))
967 && HG_(elemWS
)(univ_lsets
, thr
->locksetW
, (UWord
)lk
))
970 VG_(doneIterBag
)( lk
->heldBy
);
972 /* lock not held by anybody */
973 if (lk
->heldW
) BAD("9"); /* should be False if !heldBy */
974 // since lk is unheld, then (no lockset contains lk)
975 // hmm, this is really too expensive to check. Hmm.
981 VG_(printf
)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who
, how
);
987 static void all_except_Locks__sanity_check ( const HChar
* who
) {
988 stats__sanity_checks
++;
989 if (0) VG_(printf
)("all_except_Locks__sanity_check(%s)\n", who
);
990 threads__sanity_check(who
);
991 if (HG_(clo_track_lockorders
))
992 laog__sanity_check(who
);
994 static void all__sanity_check ( const HChar
* who
) {
995 all_except_Locks__sanity_check(who
);
996 locks__sanity_check(who
);
1000 /*----------------------------------------------------------------*/
1001 /*--- Shadow value and address range handlers ---*/
1002 /*----------------------------------------------------------------*/
1004 static void laog__pre_thread_acquires_lock ( Thread
*, Lock
* ); /* fwds */
1005 //static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
1006 static inline Thread
* get_current_Thread ( void ); /* fwds */
1007 __attribute__((noinline
))
1008 static void laog__handle_one_lock_deletion ( Lock
* lk
); /* fwds */
1011 /* Block-copy states (needed for implementing realloc()). */
1012 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1013 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1014 static void shadow_mem_scopy_range ( Thread
* thr
,
1015 Addr src
, Addr dst
, SizeT len
)
1017 Thr
* hbthr
= thr
->hbthr
;
1019 libhb_copy_shadow_state( hbthr
, src
, dst
, len
);
1022 static void shadow_mem_cread_range ( Thread
* thr
, Addr a
, SizeT len
)
1024 Thr
* hbthr
= thr
->hbthr
;
1026 LIBHB_CREAD_N(hbthr
, a
, len
);
1029 static void shadow_mem_cwrite_range ( Thread
* thr
, Addr a
, SizeT len
) {
1030 Thr
* hbthr
= thr
->hbthr
;
1032 LIBHB_CWRITE_N(hbthr
, a
, len
);
1035 inline static void shadow_mem_make_New ( Thread
* thr
, Addr a
, SizeT len
)
1037 libhb_srange_new( thr
->hbthr
, a
, len
);
1040 inline static void shadow_mem_make_NoAccess_NoFX ( Thread
* thr
, Addr aIN
,
1044 VG_(printf
)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN
, len
);
1045 // has no effect (NoFX)
1046 libhb_srange_noaccess_NoFX( thr
->hbthr
, aIN
, len
);
1049 inline static void shadow_mem_make_NoAccess_AHAE ( Thread
* thr
, Addr aIN
,
1053 VG_(printf
)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN
, len
);
1054 // Actually Has An Effect (AHAE)
1055 libhb_srange_noaccess_AHAE( thr
->hbthr
, aIN
, len
);
1058 inline static void shadow_mem_make_Untracked ( Thread
* thr
, Addr aIN
,
1062 VG_(printf
)("make Untracked ( %#lx, %lu )\n", aIN
, len
);
1063 libhb_srange_untrack( thr
->hbthr
, aIN
, len
);
1067 /*----------------------------------------------------------------*/
1068 /*--- Event handlers (evh__* functions) ---*/
1069 /*--- plus helpers (evhH__* functions) ---*/
1070 /*----------------------------------------------------------------*/
1072 /*--------- Event handler helpers (evhH__* functions) ---------*/
1074 /* Create a new segment for 'thr', making it depend (.prev) on its
1075 existing segment, bind together the SegmentID and Segment, and
1076 return both of them. Also update 'thr' so it references the new
1079 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1080 //zz /*OUT*/Segment** new_segP,
1083 //zz Segment* cur_seg;
1084 //zz tl_assert(new_segP);
1085 //zz tl_assert(new_segidP);
1086 //zz tl_assert(HG_(is_sane_Thread)(thr));
1087 //zz cur_seg = map_segments_lookup( thr->csegid );
1088 //zz tl_assert(cur_seg);
1089 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1090 //zz at their owner thread. */
1091 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1092 //zz *new_segidP = alloc_SegmentID();
1093 //zz map_segments_add( *new_segidP, *new_segP );
1094 //zz thr->csegid = *new_segidP;
1098 /* The lock at 'lock_ga' has acquired a writer. Make all necessary
1099 updates, and also do all possible error checks. */
1101 void evhH__post_thread_w_acquires_lock ( Thread
* thr
,
1102 LockKind lkk
, Addr lock_ga
)
1106 /* Basically what we need to do is call lockN_acquire_writer.
1107 However, that will barf if any 'invalid' lock states would
1108 result. Therefore check before calling. Side effect is that
1109 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1112 Because this routine is only called after successful lock
1113 acquisition, we should not be asked to move the lock into any
1114 invalid states. Requests to do so are bugs in libpthread, since
1115 that should have rejected any such requests. */
1117 tl_assert(HG_(is_sane_Thread
)(thr
));
1118 /* Try to find the lock. If we can't, then create a new one with
1120 lk
= map_locks_lookup_or_create(
1121 lkk
, lock_ga
, map_threads_reverse_lookup_SLOW(thr
) );
1122 tl_assert( HG_(is_sane_LockN
)(lk
) );
1124 /* check libhb level entities exist */
1125 tl_assert(thr
->hbthr
);
1126 tl_assert(lk
->hbso
);
1128 if (lk
->heldBy
== NULL
) {
1129 /* the lock isn't held. Simple. */
1130 tl_assert(!lk
->heldW
);
1131 lockN_acquire_writer( lk
, thr
);
1132 /* acquire a dependency from the lock's VCs */
1133 libhb_so_recv( thr
->hbthr
, lk
->hbso
, True
/*strong_recv*/ );
1137 /* So the lock is already held. If held as a r-lock then
1138 libpthread must be buggy. */
1139 tl_assert(lk
->heldBy
);
1141 HG_(record_error_Misc
)(
1142 thr
, "Bug in libpthread: write lock "
1143 "granted on rwlock which is currently rd-held");
1147 /* So the lock is held in w-mode. If it's held by some other
1148 thread, then libpthread must be buggy. */
1149 tl_assert(VG_(sizeUniqueBag
)(lk
->heldBy
) == 1); /* from precondition */
1151 if (thr
!= (Thread
*)VG_(anyElementOfBag
)(lk
->heldBy
)) {
1152 HG_(record_error_Misc
)(
1153 thr
, "Bug in libpthread: write lock "
1154 "granted on mutex/rwlock which is currently "
1155 "wr-held by a different thread");
1159 /* So the lock is already held in w-mode by 'thr'. That means this
1160 is an attempt to lock it recursively, which is only allowable
1161 for LK_mbRec kinded locks. Since this routine is called only
1162 once the lock has been acquired, this must also be a libpthread
1164 if (lk
->kind
!= LK_mbRec
) {
1165 HG_(record_error_Misc
)(
1166 thr
, "Bug in libpthread: recursive write lock "
1167 "granted on mutex/wrlock which does not "
1168 "support recursion");
1172 /* So we are recursively re-locking a lock we already w-hold. */
1173 lockN_acquire_writer( lk
, thr
);
1174 /* acquire a dependency from the lock's VC. Probably pointless,
1175 but also harmless. */
1176 libhb_so_recv( thr
->hbthr
, lk
->hbso
, True
/*strong_recv*/ );
1180 if (HG_(clo_track_lockorders
)) {
1181 /* check lock order acquisition graph, and update. This has to
1182 happen before the lock is added to the thread's locksetA/W. */
1183 laog__pre_thread_acquires_lock( thr
, lk
);
1185 /* update the thread's held-locks set */
1186 thr
->locksetA
= HG_(addToWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
1187 thr
->locksetW
= HG_(addToWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lk
);
1191 tl_assert(HG_(is_sane_LockN
)(lk
));
1195 /* The lock at 'lock_ga' has acquired a reader. Make all necessary
1196 updates, and also do all possible error checks. */
1198 void evhH__post_thread_r_acquires_lock ( Thread
* thr
,
1199 LockKind lkk
, Addr lock_ga
)
1203 /* Basically what we need to do is call lockN_acquire_reader.
1204 However, that will barf if any 'invalid' lock states would
1205 result. Therefore check before calling. Side effect is that
1206 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1209 Because this routine is only called after successful lock
1210 acquisition, we should not be asked to move the lock into any
1211 invalid states. Requests to do so are bugs in libpthread, since
1212 that should have rejected any such requests. */
1214 tl_assert(HG_(is_sane_Thread
)(thr
));
1215 /* Try to find the lock. If we can't, then create a new one with
1216 kind 'lkk'. Only a reader-writer lock can be read-locked,
1217 hence the first assertion. */
1218 tl_assert(lkk
== LK_rdwr
);
1219 lk
= map_locks_lookup_or_create(
1220 lkk
, lock_ga
, map_threads_reverse_lookup_SLOW(thr
) );
1221 tl_assert( HG_(is_sane_LockN
)(lk
) );
1223 /* check libhb level entities exist */
1224 tl_assert(thr
->hbthr
);
1225 tl_assert(lk
->hbso
);
1227 if (lk
->heldBy
== NULL
) {
1228 /* the lock isn't held. Simple. */
1229 tl_assert(!lk
->heldW
);
1230 lockN_acquire_reader( lk
, thr
);
1231 /* acquire a dependency from the lock's VC */
1232 libhb_so_recv( thr
->hbthr
, lk
->hbso
, False
/*!strong_recv*/ );
1236 /* So the lock is already held. If held as a w-lock then
1237 libpthread must be buggy. */
1238 tl_assert(lk
->heldBy
);
1240 HG_(record_error_Misc
)( thr
, "Bug in libpthread: read lock "
1241 "granted on rwlock which is "
1242 "currently wr-held");
1246 /* Easy enough. In short anybody can get a read-lock on a rwlock
1247 provided it is either unlocked or already in rd-held. */
1248 lockN_acquire_reader( lk
, thr
);
1249 /* acquire a dependency from the lock's VC. Probably pointless,
1250 but also harmless. */
1251 libhb_so_recv( thr
->hbthr
, lk
->hbso
, False
/*!strong_recv*/ );
1255 if (HG_(clo_track_lockorders
)) {
1256 /* check lock order acquisition graph, and update. This has to
1257 happen before the lock is added to the thread's locksetA/W. */
1258 laog__pre_thread_acquires_lock( thr
, lk
);
1260 /* update the thread's held-locks set */
1261 thr
->locksetA
= HG_(addToWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
1262 /* but don't update thr->locksetW, since lk is only rd-held */
1266 tl_assert(HG_(is_sane_LockN
)(lk
));
1270 /* The lock at 'lock_ga' is just about to be unlocked. Make all
1271 necessary updates, and also do all possible error checks. */
1273 void evhH__pre_thread_releases_lock ( Thread
* thr
,
1274 Addr lock_ga
, Bool isRDWR
)
1280 /* This routine is called prior to a lock release, before
1281 libpthread has had a chance to validate the call. Hence we need
1282 to detect and reject any attempts to move the lock into an
1283 invalid state. Such attempts are bugs in the client.
1285 isRDWR is True if we know from the wrapper context that lock_ga
1286 should refer to a reader-writer lock, and is False if [ditto]
1287 lock_ga should refer to a standard mutex. */
1289 tl_assert(HG_(is_sane_Thread
)(thr
));
1290 lock
= map_locks_maybe_lookup( lock_ga
);
1293 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1294 the client is trying to unlock it. So complain, then ignore
1296 HG_(record_error_UnlockBogus
)( thr
, lock_ga
);
1300 tl_assert(lock
->guestaddr
== lock_ga
);
1301 tl_assert(HG_(is_sane_LockN
)(lock
));
1303 if (isRDWR
&& lock
->kind
!= LK_rdwr
) {
1304 HG_(record_error_Misc
)( thr
, "pthread_rwlock_unlock with a "
1305 "pthread_mutex_t* argument " );
1307 if ((!isRDWR
) && lock
->kind
== LK_rdwr
) {
1308 HG_(record_error_Misc
)( thr
, "pthread_mutex_unlock with a "
1309 "pthread_rwlock_t* argument " );
1312 if (!lock
->heldBy
) {
1313 /* The lock is not held. This indicates a serious bug in the
1315 tl_assert(!lock
->heldW
);
1316 HG_(record_error_UnlockUnlocked
)( thr
, lock
);
1317 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1318 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1322 /* test just above dominates */
1323 tl_assert(lock
->heldBy
);
1324 was_heldW
= lock
->heldW
;
1326 /* The lock is held. Is this thread one of the holders? If not,
1327 report a bug in the client. */
1328 n
= VG_(elemBag
)( lock
->heldBy
, (UWord
)thr
);
1331 /* We are not a current holder of the lock. This is a bug in
1332 the guest, and (per POSIX pthread rules) the unlock
1333 attempt will fail. So just complain and do nothing
1335 Thread
* realOwner
= (Thread
*)VG_(anyElementOfBag
)( lock
->heldBy
);
1336 tl_assert(HG_(is_sane_Thread
)(realOwner
));
1337 tl_assert(realOwner
!= thr
);
1338 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1339 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1340 HG_(record_error_UnlockForeign
)( thr
, realOwner
, lock
);
1344 /* Ok, we hold the lock 'n' times. */
1347 lockN_release( lock
, thr
);
1353 tl_assert(lock
->heldBy
);
1354 tl_assert(n
== VG_(elemBag
)( lock
->heldBy
, (UWord
)thr
));
1355 /* We still hold the lock. So either it's a recursive lock
1356 or a rwlock which is currently r-held. */
1357 tl_assert(lock
->kind
== LK_mbRec
1358 || (lock
->kind
== LK_rdwr
&& !lock
->heldW
));
1359 tl_assert(HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1361 tl_assert(HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1363 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1365 /* n is zero. This means we don't hold the lock any more. But
1366 if it's a rwlock held in r-mode, someone else could still
1367 hold it. Just do whatever sanity checks we can. */
1368 if (lock
->kind
== LK_rdwr
&& lock
->heldBy
) {
1369 /* It's a rwlock. We no longer hold it but we used to;
1370 nevertheless it still appears to be held by someone else.
1371 The implication is that, prior to this release, it must
1372 have been shared by us and and whoever else is holding it;
1373 which in turn implies it must be r-held, since a lock
1374 can't be w-held by more than one thread. */
1375 /* The lock is now R-held by somebody else: */
1376 tl_assert(lock
->heldW
== False
);
1378 /* Normal case. It's either not a rwlock, or it's a rwlock
1379 that we used to hold in w-mode (which is pretty much the
1380 same thing as a non-rwlock.) Since this transaction is
1381 atomic (V does not allow multiple threads to run
1382 simultaneously), it must mean the lock is now not held by
1383 anybody. Hence assert for it. */
1384 /* The lock is now not held by anybody: */
1385 tl_assert(!lock
->heldBy
);
1386 tl_assert(lock
->heldW
== False
);
1388 //if (lock->heldBy) {
1389 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1391 /* update this thread's lockset accordingly. */
1393 = HG_(delFromWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
);
1395 = HG_(delFromWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
);
1396 /* push our VC into the lock */
1397 tl_assert(thr
->hbthr
);
1398 tl_assert(lock
->hbso
);
1399 /* If the lock was previously W-held, then we want to do a
1400 strong send, and if previously R-held, then a weak send. */
1401 libhb_so_send( thr
->hbthr
, lock
->hbso
, was_heldW
);
1406 tl_assert(HG_(is_sane_LockN
)(lock
));
1410 /* ---------------------------------------------------------- */
1411 /* -------- Event handlers proper (evh__* functions) -------- */
1412 /* ---------------------------------------------------------- */
1414 /* What is the Thread* for the currently running thread? This is
1415 absolutely performance critical. We receive notifications from the
1416 core for client code starts/stops, and cache the looked-up result
1417 in 'current_Thread'. Hence, for the vast majority of requests,
1418 finding the current thread reduces to a read of a global variable,
1419 provided get_current_Thread_in_C_C is inlined.
1421 Outside of client code, current_Thread is NULL, and presumably
1422 any uses of it will cause a segfault. Hence:
1424 - for uses definitely within client code, use
1425 get_current_Thread_in_C_C.
1427 - for all other uses, use get_current_Thread.
1430 static Thread
*current_Thread
= NULL
,
1431 *current_Thread_prev
= NULL
;
1433 static void evh__start_client_code ( ThreadId tid
, ULong nDisp
) {
1434 if (0) VG_(printf
)("start %d %llu\n", (Int
)tid
, nDisp
);
1435 tl_assert(current_Thread
== NULL
);
1436 current_Thread
= map_threads_lookup( tid
);
1437 tl_assert(current_Thread
!= NULL
);
1438 if (current_Thread
!= current_Thread_prev
) {
1439 libhb_Thr_resumes( current_Thread
->hbthr
);
1440 current_Thread_prev
= current_Thread
;
1443 static void evh__stop_client_code ( ThreadId tid
, ULong nDisp
) {
1444 if (0) VG_(printf
)(" stop %d %llu\n", (Int
)tid
, nDisp
);
1445 tl_assert(current_Thread
!= NULL
);
1446 current_Thread
= NULL
;
1449 static inline Thread
* get_current_Thread_in_C_C ( void ) {
1450 return current_Thread
;
1452 static inline Thread
* get_current_Thread ( void ) {
1455 thr
= get_current_Thread_in_C_C();
1458 /* evidently not in client code. Do it the slow way. */
1459 coretid
= VG_(get_running_tid
)();
1460 /* FIXME: get rid of the following kludge. It exists because
1461 evh__new_mem is called during initialisation (as notification
1462 of initial memory layout) and VG_(get_running_tid)() returns
1463 VG_INVALID_THREADID at that point. */
1464 if (coretid
== VG_INVALID_THREADID
)
1465 coretid
= 1; /* KLUDGE */
1466 thr
= map_threads_lookup( coretid
);
1471 void evh__new_mem ( Addr a
, SizeT len
) {
1472 Thread
*thr
= get_current_Thread();
1473 if (SHOW_EVENTS
>= 2)
1474 VG_(printf
)("evh__new_mem(%p, %lu)\n", (void*)a
, len
);
1475 shadow_mem_make_New( thr
, a
, len
);
1476 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1477 all__sanity_check("evh__new_mem-post");
1478 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1479 shadow_mem_make_Untracked( thr
, a
, len
);
1483 void evh__new_mem_stack ( Addr a
, SizeT len
) {
1484 Thread
*thr
= get_current_Thread();
1485 if (SHOW_EVENTS
>= 2)
1486 VG_(printf
)("evh__new_mem_stack(%p, %lu)\n", (void*)a
, len
);
1487 shadow_mem_make_New( thr
, -VG_STACK_REDZONE_SZB
+ a
, len
);
1488 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1489 all__sanity_check("evh__new_mem_stack-post");
1490 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1491 shadow_mem_make_Untracked( thr
, a
, len
);
1494 #define DCL_evh__new_mem_stack(syze) \
1495 static void VG_REGPARM(1) evh__new_mem_stack_##syze(Addr new_SP) \
1497 Thread *thr = get_current_Thread(); \
1498 if (SHOW_EVENTS >= 2) \
1499 VG_(printf)("evh__new_mem_stack_" #syze "(%p, %lu)\n", \
1500 (void*)new_SP, (SizeT)syze ); \
1501 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + new_SP, syze ); \
1502 if (syze >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE)) \
1503 all__sanity_check("evh__new_mem_stack_" #syze "-post"); \
1504 if (UNLIKELY(thr->pthread_create_nesting_level > 0)) \
1505 shadow_mem_make_Untracked( thr, new_SP, syze ); \
1508 DCL_evh__new_mem_stack(4);
1509 DCL_evh__new_mem_stack(8);
1510 DCL_evh__new_mem_stack(12);
1511 DCL_evh__new_mem_stack(16);
1512 DCL_evh__new_mem_stack(32);
1513 DCL_evh__new_mem_stack(112);
1514 DCL_evh__new_mem_stack(128);
1515 DCL_evh__new_mem_stack(144);
1516 DCL_evh__new_mem_stack(160);
1519 void evh__new_mem_w_tid ( Addr a
, SizeT len
, ThreadId tid
) {
1520 Thread
*thr
= get_current_Thread();
1521 if (SHOW_EVENTS
>= 2)
1522 VG_(printf
)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a
, len
);
1523 shadow_mem_make_New( thr
, a
, len
);
1524 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1525 all__sanity_check("evh__new_mem_w_tid-post");
1526 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1527 shadow_mem_make_Untracked( thr
, a
, len
);
1531 void evh__new_mem_w_perms ( Addr a
, SizeT len
,
1532 Bool rr
, Bool ww
, Bool xx
, ULong di_handle
) {
1533 Thread
*thr
= get_current_Thread();
1534 if (SHOW_EVENTS
>= 1)
1535 VG_(printf
)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1536 (void*)a
, len
, (Int
)rr
, (Int
)ww
, (Int
)xx
);
1537 if (rr
|| ww
|| xx
) {
1538 shadow_mem_make_New( thr
, a
, len
);
1539 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1540 shadow_mem_make_Untracked( thr
, a
, len
);
1542 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1543 all__sanity_check("evh__new_mem_w_perms-post");
1547 void evh__set_perms ( Addr a
, SizeT len
,
1548 Bool rr
, Bool ww
, Bool xx
) {
1549 // This handles mprotect requests. If the memory is being put
1550 // into no-R no-W state, paint it as NoAccess, for the reasons
1551 // documented at evh__die_mem_munmap().
1552 if (SHOW_EVENTS
>= 1)
1553 VG_(printf
)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1554 (void*)a
, len
, (Int
)rr
, (Int
)ww
, (Int
)xx
);
1555 /* Hmm. What should we do here, that actually makes any sense?
1556 Let's say: if neither readable nor writable, then declare it
1557 NoAccess, else leave it alone. */
1559 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a
, len
);
1560 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1561 all__sanity_check("evh__set_perms-post");
1565 void evh__die_mem ( Addr a
, SizeT len
) {
1566 // Urr, libhb ignores this.
1567 if (SHOW_EVENTS
>= 2)
1568 VG_(printf
)("evh__die_mem(%p, %lu)\n", (void*)a
, len
);
1569 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a
, len
);
1570 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1571 all__sanity_check("evh__die_mem-post");
1575 void evh__die_mem_munmap ( Addr a
, SizeT len
) {
1576 // It's important that libhb doesn't ignore this. If, as is likely,
1577 // the client is subject to address space layout randomization,
1578 // then unmapped areas may never get remapped over, even in long
1579 // runs. If we just ignore them we wind up with large resource
1580 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1581 // VTS references in the affected area are dropped. Marking memory
1582 // as NoAccess is expensive, but we assume that munmap is sufficiently
1583 // rare that the space gains of doing this are worth the costs.
1584 if (SHOW_EVENTS
>= 2)
1585 VG_(printf
)("evh__die_mem_munmap(%p, %lu)\n", (void*)a
, len
);
1586 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a
, len
);
1590 void evh__untrack_mem ( Addr a
, SizeT len
) {
1591 // Libhb doesn't ignore this.
1592 if (SHOW_EVENTS
>= 2)
1593 VG_(printf
)("evh__untrack_mem(%p, %lu)\n", (void*)a
, len
);
1594 shadow_mem_make_Untracked( get_current_Thread(), a
, len
);
1595 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1596 all__sanity_check("evh__untrack_mem-post");
1600 void evh__copy_mem ( Addr src
, Addr dst
, SizeT len
) {
1601 if (SHOW_EVENTS
>= 2)
1602 VG_(printf
)("evh__copy_mem(%p, %p, %lu)\n", (void*)src
, (void*)dst
, len
);
1603 Thread
*thr
= get_current_Thread();
1604 if (LIKELY(thr
->synchr_nesting
== 0))
1605 shadow_mem_scopy_range( thr
, src
, dst
, len
);
1606 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1607 all__sanity_check("evh__copy_mem-post");
1611 void evh__pre_thread_ll_create ( ThreadId parent
, ThreadId child
)
1613 if (SHOW_EVENTS
>= 1)
1614 VG_(printf
)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1615 (Int
)parent
, (Int
)child
);
1617 if (parent
!= VG_INVALID_THREADID
) {
1623 tl_assert(HG_(is_sane_ThreadId
)(parent
));
1624 tl_assert(HG_(is_sane_ThreadId
)(child
));
1625 tl_assert(parent
!= child
);
1627 thr_p
= map_threads_maybe_lookup( parent
);
1628 thr_c
= map_threads_maybe_lookup( child
);
1630 tl_assert(thr_p
!= NULL
);
1631 tl_assert(thr_c
== NULL
);
1633 hbthr_p
= thr_p
->hbthr
;
1634 tl_assert(hbthr_p
!= NULL
);
1635 tl_assert( libhb_get_Thr_hgthread(hbthr_p
) == thr_p
);
1637 hbthr_c
= libhb_create ( hbthr_p
);
1639 /* Create a new thread record for the child. */
1640 /* a Thread for the new thread ... */
1641 thr_c
= mk_Thread( hbthr_c
);
1642 tl_assert( libhb_get_Thr_hgthread(hbthr_c
) == NULL
);
1643 libhb_set_Thr_hgthread(hbthr_c
, thr_c
);
1645 /* and bind it in the thread-map table */
1646 map_threads
[child
] = thr_c
;
1647 tl_assert(thr_c
->coretid
== VG_INVALID_THREADID
);
1648 thr_c
->coretid
= child
;
1650 /* Record where the parent is so we can later refer to this in
1653 On x86/amd64-linux, this entails a nasty glibc specific hack.
1654 The stack snapshot is taken immediately after the parent has
1655 returned from its sys_clone call. Unfortunately there is no
1656 unwind info for the insn following "syscall" - reading the
1657 glibc sources confirms this. So we ask for a snapshot to be
1658 taken as if RIP was 3 bytes earlier, in a place where there
1659 is unwind info. Sigh.
1661 { Word first_ip_delta
= 0;
1662 # if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1663 first_ip_delta
= -3;
1664 # elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1665 first_ip_delta
= -1;
1667 thr_c
->created_at
= VG_(record_ExeContext
)(parent
, first_ip_delta
);
1670 if (HG_(clo_ignore_thread_creation
)) {
1671 HG_(thread_enter_pthread_create
)(thr_c
);
1672 tl_assert(thr_c
->synchr_nesting
== 0);
1673 HG_(thread_enter_synchr
)(thr_c
);
1674 /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1678 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1679 all__sanity_check("evh__pre_thread_create-post");
1683 void evh__pre_thread_ll_exit ( ThreadId quit_tid
)
1687 if (SHOW_EVENTS
>= 1)
1688 VG_(printf
)("evh__pre_thread_ll_exit(thr=%d)\n",
1691 /* quit_tid has disappeared without joining to any other thread.
1692 Therefore there is no synchronisation event associated with its
1693 exit and so we have to pretty much treat it as if it was still
1694 alive but mysteriously making no progress. That is because, if
1695 we don't know when it really exited, then we can never say there
1696 is a point in time when we're sure the thread really has
1697 finished, and so we need to consider the possibility that it
1698 lingers indefinitely and continues to interact with other
1700 /* However, it might have rendezvous'd with a thread that called
1701 pthread_join with this one as arg, prior to this point (that's
1702 how NPTL works). In which case there has already been a prior
1703 sync event. So in any case, just let the thread exit. On NPTL,
1704 all thread exits go through here. */
1705 tl_assert(HG_(is_sane_ThreadId
)(quit_tid
));
1706 thr_q
= map_threads_maybe_lookup( quit_tid
);
1707 tl_assert(thr_q
!= NULL
);
1709 /* Complain if this thread holds any locks. */
1710 nHeld
= HG_(cardinalityWS
)( univ_lsets
, thr_q
->locksetA
);
1711 tl_assert(nHeld
>= 0);
1714 VG_(sprintf
)(buf
, "Exiting thread still holds %d lock%s",
1715 nHeld
, nHeld
> 1 ? "s" : "");
1716 HG_(record_error_Misc
)( thr_q
, buf
);
1719 /* Not much to do here:
1720 - tell libhb the thread is gone
1721 - clear the map_threads entry, in order that the Valgrind core
1723 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1725 tl_assert(thr_q
->hbthr
);
1726 libhb_async_exit(thr_q
->hbthr
);
1727 tl_assert(thr_q
->coretid
== quit_tid
);
1728 thr_q
->coretid
= VG_INVALID_THREADID
;
1729 map_threads_delete( quit_tid
);
1731 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1732 all__sanity_check("evh__pre_thread_ll_exit-post");
1735 /* This is called immediately after fork, for the child only. 'tid'
1736 is the only surviving thread (as per POSIX rules on fork() in
1737 threaded programs), so we have to clean up map_threads to remove
1738 entries for any other threads. */
1740 void evh__atfork_child ( ThreadId tid
)
1744 /* Slot 0 should never be used. */
1745 thr
= map_threads_maybe_lookup( 0/*INVALID*/ );
1747 /* Clean up all other slots except 'tid'. */
1748 for (i
= 1; i
< VG_N_THREADS
; i
++) {
1751 thr
= map_threads_maybe_lookup(i
);
1754 /* Cleanup actions (next 5 lines) copied from end of
1755 evh__pre_thread_ll_exit; keep in sync. */
1756 tl_assert(thr
->hbthr
);
1757 libhb_async_exit(thr
->hbthr
);
1758 tl_assert(thr
->coretid
== i
);
1759 thr
->coretid
= VG_INVALID_THREADID
;
1760 map_threads_delete(i
);
1764 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1766 void generate_quitter_stayer_dependence (Thr
* hbthr_q
, Thr
* hbthr_s
)
1769 /* Allocate a temporary synchronisation object and use it to send
1770 an imaginary message from the quitter to the stayer, the purpose
1771 being to generate a dependence from the quitter to the
1773 so
= libhb_so_alloc();
1775 /* Send last arg of _so_send as False, since the sending thread
1776 doesn't actually exist any more, so we don't want _so_send to
1777 try taking stack snapshots of it. */
1778 libhb_so_send(hbthr_q
, so
, True
/*strong_send*//*?!? wrt comment above*/);
1779 libhb_so_recv(hbthr_s
, so
, True
/*strong_recv*/);
1780 libhb_so_dealloc(so
);
1782 /* Tell libhb that the quitter has been reaped. Note that we might
1783 have to be cleverer about this, to exclude 2nd and subsequent
1784 notifications for the same hbthr_q, in the case where the app is
1785 buggy (calls pthread_join twice or more on the same thread) AND
1786 where libpthread is also buggy and doesn't return ESRCH on
1787 subsequent calls. (If libpthread isn't thusly buggy, then the
1788 wrapper for pthread_join in hg_intercepts.c will stop us getting
1789 notified here multiple times for the same joinee.) See also
1790 comments in helgrind/tests/jointwice.c. */
1791 libhb_joinedwith_done(hbthr_q
);
1796 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid
, Thread
* quit_thr
)
1803 if (SHOW_EVENTS
>= 1)
1804 VG_(printf
)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1805 (Int
)stay_tid
, quit_thr
);
1807 tl_assert(HG_(is_sane_ThreadId
)(stay_tid
));
1809 thr_s
= map_threads_maybe_lookup( stay_tid
);
1811 tl_assert(thr_s
!= NULL
);
1812 tl_assert(thr_q
!= NULL
);
1813 tl_assert(thr_s
!= thr_q
);
1815 hbthr_s
= thr_s
->hbthr
;
1816 hbthr_q
= thr_q
->hbthr
;
1817 tl_assert(hbthr_s
!= hbthr_q
);
1818 tl_assert( libhb_get_Thr_hgthread(hbthr_s
) == thr_s
);
1819 tl_assert( libhb_get_Thr_hgthread(hbthr_q
) == thr_q
);
1821 generate_quitter_stayer_dependence (hbthr_q
, hbthr_s
);
1823 /* evh__pre_thread_ll_exit issues an error message if the exiting
1824 thread holds any locks. No need to check here. */
1826 /* This holds because, at least when using NPTL as the thread
1827 library, we should be notified the low level thread exit before
1828 we hear of any join event on it. The low level exit
1829 notification feeds through into evh__pre_thread_ll_exit,
1830 which should clear the map_threads entry for it. Hence we
1831 expect there to be no map_threads entry at this point. */
1832 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q
)
1833 == VG_INVALID_THREADID
);
1835 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1836 all__sanity_check("evh__post_thread_join-post");
1840 void evh__pre_mem_read ( CorePart part
, ThreadId tid
, const HChar
* s
,
1841 Addr a
, SizeT size
) {
1842 if (SHOW_EVENTS
>= 2
1843 || (SHOW_EVENTS
>= 1 && size
!= 1))
1844 VG_(printf
)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1845 (Int
)tid
, s
, (void*)a
, size
);
1846 Thread
*thr
= map_threads_lookup(tid
);
1847 if (LIKELY(thr
->synchr_nesting
== 0))
1848 shadow_mem_cread_range(thr
, a
, size
);
1849 if (size
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1850 all__sanity_check("evh__pre_mem_read-post");
1854 void evh__pre_mem_read_asciiz ( CorePart part
, ThreadId tid
,
1855 const HChar
* s
, Addr a
) {
1857 if (SHOW_EVENTS
>= 1)
1858 VG_(printf
)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1859 (Int
)tid
, s
, (void*)a
);
1860 // Don't segfault if the string starts in an obviously stupid
1861 // place. Actually we should check the whole string, not just
1862 // the start address, but that's too much trouble. At least
1863 // checking the first byte is better than nothing. See #255009.
1864 if (!VG_(am_is_valid_for_client
) (a
, 1, VKI_PROT_READ
))
1866 Thread
*thr
= map_threads_lookup(tid
);
1867 len
= VG_(strlen
)( (HChar
*) a
);
1868 if (LIKELY(thr
->synchr_nesting
== 0))
1869 shadow_mem_cread_range( thr
, a
, len
+1 );
1870 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1871 all__sanity_check("evh__pre_mem_read_asciiz-post");
1875 void evh__pre_mem_write ( CorePart part
, ThreadId tid
, const HChar
* s
,
1876 Addr a
, SizeT size
) {
1877 if (SHOW_EVENTS
>= 1)
1878 VG_(printf
)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1879 (Int
)tid
, s
, (void*)a
, size
);
1880 Thread
*thr
= map_threads_lookup(tid
);
1881 if (LIKELY(thr
->synchr_nesting
== 0))
1882 shadow_mem_cwrite_range(thr
, a
, size
);
1883 if (size
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1884 all__sanity_check("evh__pre_mem_write-post");
1888 void evh__new_mem_heap ( Addr a
, SizeT len
, Bool is_inited
) {
1889 if (SHOW_EVENTS
>= 1)
1890 VG_(printf
)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1891 (void*)a
, len
, (Int
)is_inited
);
1892 // We ignore the initialisation state (is_inited); that's ok.
1893 shadow_mem_make_New(get_current_Thread(), a
, len
);
1894 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1895 all__sanity_check("evh__pre_mem_read-post");
1899 void evh__die_mem_heap ( Addr a
, SizeT len
) {
1901 if (SHOW_EVENTS
>= 1)
1902 VG_(printf
)("evh__die_mem_heap(%p, %lu)\n", (void*)a
, len
);
1903 thr
= get_current_Thread();
1905 if (HG_(clo_free_is_write
)) {
1906 /* Treat frees as if the memory was written immediately prior to
1907 the free. This shakes out more races, specifically, cases
1908 where memory is referenced by one thread, and freed by
1909 another, and there's no observable synchronisation event to
1910 guarantee that the reference happens before the free. */
1911 if (LIKELY(thr
->synchr_nesting
== 0))
1912 shadow_mem_cwrite_range(thr
, a
, len
);
1914 shadow_mem_make_NoAccess_AHAE( thr
, a
, len
);
1915 /* We used to call instead
1916 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1917 A non-buggy application will not access anymore
1918 the freed memory, and so marking no access is in theory useless.
1919 Not marking freed memory would avoid the overhead for applications
1920 doing mostly malloc/free, as the freed memory should then be recycled
1921 very quickly after marking.
1922 We rather mark it noaccess for the following reasons:
1923 * accessibility bits then always correctly represents the memory
1924 status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1925 * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1926 blocks, on a ppc64le, for a unrealistic workload of an application
1927 doing only malloc/free).
1928 * marking no access allows to GC the SecMap, which might improve
1929 performance and/or memory usage.
1930 * we might detect more applications bugs when memory is marked
1932 If needed, we could support here an option --free-is-noaccess=yes|no
1933 to avoid marking freed memory as no access if some applications
1934 would need to avoid the marking noaccess overhead. */
1936 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1937 all__sanity_check("evh__pre_mem_read-post");
1940 /* --- Event handlers called from generated code --- */
1942 static VG_REGPARM(1)
1943 void evh__mem_help_cread_1(Addr a
) {
1944 Thread
* thr
= get_current_Thread_in_C_C();
1945 Thr
* hbthr
= thr
->hbthr
;
1946 if (LIKELY(thr
->synchr_nesting
== 0))
1947 LIBHB_CREAD_1(hbthr
, a
);
1950 static VG_REGPARM(1)
1951 void evh__mem_help_cread_2(Addr a
) {
1952 Thread
* thr
= get_current_Thread_in_C_C();
1953 Thr
* hbthr
= thr
->hbthr
;
1954 if (LIKELY(thr
->synchr_nesting
== 0))
1955 LIBHB_CREAD_2(hbthr
, a
);
1958 static VG_REGPARM(1)
1959 void evh__mem_help_cread_4(Addr a
) {
1960 Thread
* thr
= get_current_Thread_in_C_C();
1961 Thr
* hbthr
= thr
->hbthr
;
1962 if (LIKELY(thr
->synchr_nesting
== 0))
1963 LIBHB_CREAD_4(hbthr
, a
);
1966 static VG_REGPARM(1)
1967 void evh__mem_help_cread_8(Addr a
) {
1968 Thread
* thr
= get_current_Thread_in_C_C();
1969 Thr
* hbthr
= thr
->hbthr
;
1970 if (LIKELY(thr
->synchr_nesting
== 0))
1971 LIBHB_CREAD_8(hbthr
, a
);
1974 static VG_REGPARM(2)
1975 void evh__mem_help_cread_N(Addr a
, SizeT size
) {
1976 Thread
* thr
= get_current_Thread_in_C_C();
1977 Thr
* hbthr
= thr
->hbthr
;
1978 if (LIKELY(thr
->synchr_nesting
== 0))
1979 LIBHB_CREAD_N(hbthr
, a
, size
);
1982 static VG_REGPARM(1)
1983 void evh__mem_help_cwrite_1(Addr a
) {
1984 Thread
* thr
= get_current_Thread_in_C_C();
1985 Thr
* hbthr
= thr
->hbthr
;
1986 if (LIKELY(thr
->synchr_nesting
== 0))
1987 LIBHB_CWRITE_1(hbthr
, a
);
1990 static VG_REGPARM(1)
1991 void evh__mem_help_cwrite_2(Addr a
) {
1992 Thread
* thr
= get_current_Thread_in_C_C();
1993 Thr
* hbthr
= thr
->hbthr
;
1994 if (LIKELY(thr
->synchr_nesting
== 0))
1995 LIBHB_CWRITE_2(hbthr
, a
);
1998 static VG_REGPARM(1)
1999 void evh__mem_help_cwrite_4(Addr a
) {
2000 Thread
* thr
= get_current_Thread_in_C_C();
2001 Thr
* hbthr
= thr
->hbthr
;
2002 if (LIKELY(thr
->synchr_nesting
== 0))
2003 LIBHB_CWRITE_4(hbthr
, a
);
2006 /* Same as evh__mem_help_cwrite_4 but unwind will use a first_sp_delta of
2008 static VG_REGPARM(1)
2009 void evh__mem_help_cwrite_4_fixupSP(Addr a
) {
2010 Thread
* thr
= get_current_Thread_in_C_C();
2011 Thr
* hbthr
= thr
->hbthr
;
2013 thr
->first_sp_delta
= sizeof(Word
);
2014 if (LIKELY(thr
->synchr_nesting
== 0))
2015 LIBHB_CWRITE_4(hbthr
, a
);
2016 thr
->first_sp_delta
= 0;
2019 static VG_REGPARM(1)
2020 void evh__mem_help_cwrite_8(Addr a
) {
2021 Thread
* thr
= get_current_Thread_in_C_C();
2022 Thr
* hbthr
= thr
->hbthr
;
2023 if (LIKELY(thr
->synchr_nesting
== 0))
2024 LIBHB_CWRITE_8(hbthr
, a
);
2027 /* Same as evh__mem_help_cwrite_8 but unwind will use a first_sp_delta of
2029 static VG_REGPARM(1)
2030 void evh__mem_help_cwrite_8_fixupSP(Addr a
) {
2031 Thread
* thr
= get_current_Thread_in_C_C();
2032 Thr
* hbthr
= thr
->hbthr
;
2034 thr
->first_sp_delta
= sizeof(Word
);
2035 if (LIKELY(thr
->synchr_nesting
== 0))
2036 LIBHB_CWRITE_8(hbthr
, a
);
2037 thr
->first_sp_delta
= 0;
2040 static VG_REGPARM(2)
2041 void evh__mem_help_cwrite_N(Addr a
, SizeT size
) {
2042 Thread
* thr
= get_current_Thread_in_C_C();
2043 Thr
* hbthr
= thr
->hbthr
;
2044 if (LIKELY(thr
->synchr_nesting
== 0))
2045 LIBHB_CWRITE_N(hbthr
, a
, size
);
2049 /* ------------------------------------------------------- */
2050 /* -------------- events to do with mutexes -------------- */
2051 /* ------------------------------------------------------- */
2053 /* EXPOSITION only: by intercepting lock init events we can show the
2054 user where the lock was initialised, rather than only being able to
2055 show where it was first locked. Intercepting lock initialisations
2056 is not necessary for the basic operation of the race checker. */
2058 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid
,
2059 void* mutex
, Word mbRec
)
2061 if (SHOW_EVENTS
>= 1)
2062 VG_(printf
)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2063 (Int
)tid
, mbRec
, (void*)mutex
);
2064 tl_assert(mbRec
== 0 || mbRec
== 1);
2065 map_locks_lookup_or_create( mbRec
? LK_mbRec
: LK_nonRec
,
2067 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2068 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2072 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid
, void* mutex
,
2073 Bool mutex_is_init
)
2077 if (SHOW_EVENTS
>= 1)
2078 VG_(printf
)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2079 "(ctid=%d, %p, isInit=%d)\n",
2080 (Int
)tid
, (void*)mutex
, (Int
)mutex_is_init
);
2082 thr
= map_threads_maybe_lookup( tid
);
2083 /* cannot fail - Thread* must already exist */
2084 tl_assert( HG_(is_sane_Thread
)(thr
) );
2086 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2088 if (lk
== NULL
&& mutex_is_init
) {
2089 /* We're destroying a mutex which we don't have any record of,
2090 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2091 Assume it never got used, and so we don't need to do anything
2096 if (lk
== NULL
|| (lk
->kind
!= LK_nonRec
&& lk
->kind
!= LK_mbRec
)) {
2097 HG_(record_error_Misc
)(
2098 thr
, "pthread_mutex_destroy with invalid argument" );
2102 tl_assert( HG_(is_sane_LockN
)(lk
) );
2103 tl_assert( lk
->guestaddr
== (Addr
)mutex
);
2105 /* Basically act like we unlocked the lock */
2106 HG_(record_error_Misc
)(
2107 thr
, "pthread_mutex_destroy of a locked mutex" );
2108 /* remove lock from locksets of all owning threads */
2109 remove_Lock_from_locksets_of_all_owning_Threads( lk
);
2110 VG_(deleteBag
)( lk
->heldBy
);
2113 lk
->acquired_at
= NULL
;
2115 tl_assert( !lk
->heldBy
);
2116 tl_assert( HG_(is_sane_LockN
)(lk
) );
2118 if (HG_(clo_track_lockorders
))
2119 laog__handle_one_lock_deletion(lk
);
2120 map_locks_delete( lk
->guestaddr
);
2125 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2126 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2129 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid
,
2130 void* mutex
, Word isTryLock
)
2132 /* Just check the mutex is sane; nothing else to do. */
2133 // 'mutex' may be invalid - not checked by wrapper
2136 if (SHOW_EVENTS
>= 1)
2137 VG_(printf
)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2138 (Int
)tid
, (void*)mutex
);
2140 tl_assert(isTryLock
== 0 || isTryLock
== 1);
2141 thr
= map_threads_maybe_lookup( tid
);
2142 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2144 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2146 if (lk
&& (lk
->kind
== LK_rdwr
)) {
2147 HG_(record_error_Misc
)( thr
, "pthread_mutex_lock with a "
2148 "pthread_rwlock_t* argument " );
2153 && (lk
->kind
== LK_nonRec
|| lk
->kind
== LK_rdwr
)
2156 && VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) > 0 ) {
2157 /* uh, it's a non-recursive lock and we already w-hold it, and
2158 this is a real lock operation (not a speculative "tryLock"
2159 kind of thing). Duh. Deadlock coming up; but at least
2160 produce an error message. */
2161 const HChar
* errstr
= "Attempt to re-lock a "
2162 "non-recursive lock I already hold";
2163 const HChar
* auxstr
= "Lock was previously acquired";
2164 if (lk
->acquired_at
) {
2165 HG_(record_error_Misc_w_aux
)( thr
, errstr
, auxstr
, lk
->acquired_at
);
2167 HG_(record_error_Misc
)( thr
, errstr
);
2172 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid
, void* mutex
)
2174 // only called if the real library call succeeded - so mutex is sane
2176 if (SHOW_EVENTS
>= 1)
2177 VG_(printf
)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2178 (Int
)tid
, (void*)mutex
);
2180 thr
= map_threads_maybe_lookup( tid
);
2181 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2183 evhH__post_thread_w_acquires_lock(
2185 LK_mbRec
, /* if not known, create new lock with this LockKind */
2190 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid
, void* mutex
)
2192 // 'mutex' may be invalid - not checked by wrapper
2194 if (SHOW_EVENTS
>= 1)
2195 VG_(printf
)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2196 (Int
)tid
, (void*)mutex
);
2198 thr
= map_threads_maybe_lookup( tid
);
2199 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2201 evhH__pre_thread_releases_lock( thr
, (Addr
)mutex
, False
/*!isRDWR*/ );
2204 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid
, void* mutex
)
2206 // only called if the real library call succeeded - so mutex is sane
2208 if (SHOW_EVENTS
>= 1)
2209 VG_(printf
)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2210 (Int
)tid
, (void*)mutex
);
2211 thr
= map_threads_maybe_lookup( tid
);
2212 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2214 // anything we should do here?
2218 /* ------------------------------------------------------- */
2219 /* -------------- events to do with spinlocks ------------ */
2220 /* ------------------------------------------------------- */
2222 /* All a bit of a kludge. Pretend we're really dealing with ordinary
2223 pthread_mutex_t's instead, for the most part. */
2225 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid
,
2230 /* In glibc's kludgey world, we're either initialising or unlocking
2231 it. Since this is the pre-routine, if it is locked, unlock it
2232 and take a dependence edge. Otherwise, do nothing. */
2234 if (SHOW_EVENTS
>= 1)
2235 VG_(printf
)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2236 "(ctid=%d, slock=%p)\n",
2237 (Int
)tid
, (void*)slock
);
2239 thr
= map_threads_maybe_lookup( tid
);
2240 /* cannot fail - Thread* must already exist */;
2241 tl_assert( HG_(is_sane_Thread
)(thr
) );
2243 lk
= map_locks_maybe_lookup( (Addr
)slock
);
2244 if (lk
&& lk
->heldBy
) {
2245 /* it's held. So do the normal pre-unlock actions, as copied
2246 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2247 duplicates the map_locks_maybe_lookup. */
2248 evhH__pre_thread_releases_lock( thr
, (Addr
)slock
,
2253 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid
,
2257 /* More kludgery. If the lock has never been seen before, do
2258 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2261 if (SHOW_EVENTS
>= 1)
2262 VG_(printf
)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2263 "(ctid=%d, slock=%p)\n",
2264 (Int
)tid
, (void*)slock
);
2266 lk
= map_locks_maybe_lookup( (Addr
)slock
);
2268 map_locks_lookup_or_create( LK_nonRec
, (Addr
)slock
, tid
);
2272 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid
,
2273 void* slock
, Word isTryLock
)
2275 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, slock
, isTryLock
);
2278 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid
,
2281 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, slock
);
2284 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid
,
2287 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid
, slock
, 0/*!isInit*/ );
2291 /* ----------------------------------------------------- */
2292 /* --------------- events to do with CVs --------------- */
2293 /* ----------------------------------------------------- */
2295 /* A mapping from CV to (the SO associated with it, plus some
2296 auxiliary data for error checking). When the CV is
2297 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2298 wait on it completes, we do a 'recv' from the SO. This is believed
2299 to give the correct happens-before events arising from CV
2300 signallings/broadcasts.
2303 /* .so is the SO for this CV.
2304 .mx_ga is the associated mutex, when .nWaiters > 0
2306 POSIX says effectively that the first pthread_cond_{timed}wait call
2307 causes a dynamic binding between the CV and the mutex, and that
2308 lasts until such time as the waiter count falls to zero. Hence
2309 need to keep track of the number of waiters in order to do
2310 consistency tracking. */
2313 SO
* so
; /* libhb-allocated SO */
2314 void* mx_ga
; /* addr of associated mutex, if any */
2315 UWord nWaiters
; /* # threads waiting on the CV */
2320 /* pthread_cond_t* -> CVInfo* */
2321 static WordFM
* map_cond_to_CVInfo
= NULL
;
2323 static void map_cond_to_CVInfo_INIT ( void ) {
2324 if (UNLIKELY(map_cond_to_CVInfo
== NULL
)) {
2325 map_cond_to_CVInfo
= VG_(newFM
)( HG_(zalloc
),
2326 "hg.mctCI.1", HG_(free
), NULL
);
2330 static CVInfo
* map_cond_to_CVInfo_lookup_or_alloc ( void* cond
) {
2332 map_cond_to_CVInfo_INIT();
2333 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &key
, &val
, (UWord
)cond
)) {
2334 tl_assert(key
== (UWord
)cond
);
2335 return (CVInfo
*)val
;
2337 SO
* so
= libhb_so_alloc();
2338 CVInfo
* cvi
= HG_(zalloc
)("hg.mctCloa.1", sizeof(CVInfo
));
2341 VG_(addToFM
)( map_cond_to_CVInfo
, (UWord
)cond
, (UWord
)cvi
);
2346 static CVInfo
* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond
) {
2348 map_cond_to_CVInfo_INIT();
2349 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &key
, &val
, (UWord
)cond
)) {
2350 tl_assert(key
== (UWord
)cond
);
2351 return (CVInfo
*)val
;
2357 static void map_cond_to_CVInfo_delete ( ThreadId tid
,
2358 void* cond
, Bool cond_is_init
) {
2362 thr
= map_threads_maybe_lookup( tid
);
2363 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2365 map_cond_to_CVInfo_INIT();
2366 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &keyW
, &valW
, (UWord
)cond
)) {
2367 CVInfo
* cvi
= (CVInfo
*)valW
;
2368 tl_assert(keyW
== (UWord
)cond
);
2371 if (cvi
->nWaiters
> 0) {
2372 HG_(record_error_Misc
)(
2373 thr
, "pthread_cond_destroy:"
2374 " destruction of condition variable being waited upon");
2375 /* Destroying a cond var being waited upon outcome is EBUSY and
2376 variable is not destroyed. */
2379 if (!VG_(delFromFM
)( map_cond_to_CVInfo
, &keyW
, &valW
, (UWord
)cond
))
2380 tl_assert(0); // cond var found above, and not here ???
2381 libhb_so_dealloc(cvi
->so
);
2385 /* We have no record of this CV. So complain about it
2386 .. except, don't bother to complain if it has exactly the
2387 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2388 was initialised like that but never used. */
2389 if (!cond_is_init
) {
2390 HG_(record_error_Misc
)(
2391 thr
, "pthread_cond_destroy: destruction of unknown cond var");
2396 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid
, void* cond
)
2398 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2399 cond to a SO if it is not already so bound, and 'send' on the
2400 SO. This is later used by other thread(s) which successfully
2401 exit from a pthread_cond_wait on the same cv; then they 'recv'
2402 from the SO, thereby acquiring a dependency on this signalling
2408 if (SHOW_EVENTS
>= 1)
2409 VG_(printf
)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2410 (Int
)tid
, (void*)cond
);
2412 thr
= map_threads_maybe_lookup( tid
);
2413 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2415 cvi
= map_cond_to_CVInfo_lookup_or_alloc( cond
);
2419 // error-if: mutex is bogus
2420 // error-if: mutex is not locked
2421 // Hmm. POSIX doesn't actually say that it's an error to call
2422 // pthread_cond_signal with the associated mutex being unlocked.
2423 // Although it does say that it should be "if consistent scheduling
2424 // is desired." For that reason, print "dubious" if the lock isn't
2425 // held by any thread. Skip the "dubious" if it is held by some
2426 // other thread; that sounds straight-out wrong.
2428 // Anybody who writes code that signals on a CV without holding
2429 // the associated MX needs to be shipped off to a lunatic asylum
2430 // ASAP, even though POSIX doesn't actually declare such behaviour
2431 // illegal -- it makes code extremely difficult to understand/
2432 // reason about. In particular it puts the signalling thread in
2433 // a situation where it is racing against the released waiter
2434 // as soon as the signalling is done, and so there needs to be
2435 // some auxiliary synchronisation mechanism in the program that
2436 // makes this safe -- or the race(s) need to be harmless, or
2437 // probably nonexistent.
2441 if (cvi
->mx_ga
!= 0) {
2442 lk
= map_locks_maybe_lookup( (Addr
)cvi
->mx_ga
);
2444 /* note: lk could be NULL. Be careful. */
2446 if (lk
->kind
== LK_rdwr
) {
2447 HG_(record_error_Misc
)(thr
,
2448 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2450 if (lk
->heldBy
== NULL
) {
2451 HG_(record_error_Misc
)(thr
,
2452 "pthread_cond_{signal,broadcast}: dubious: "
2453 "associated lock is not held by any thread");
2455 if (lk
->heldBy
!= NULL
&& 0 == VG_(elemBag
)(lk
->heldBy
, (UWord
)thr
)) {
2456 HG_(record_error_Misc
)(thr
,
2457 "pthread_cond_{signal,broadcast}: "
2458 "associated lock is not held by calling thread");
2461 /* Couldn't even find the damn thing. */
2462 // But actually .. that's not necessarily an error. We don't
2463 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2464 // shows us what it is, and if that may not have happened yet.
2465 // So just keep quiet in this circumstance.
2466 //HG_(record_error_Misc)( thr,
2467 // "pthread_cond_{signal,broadcast}: "
2468 // "no or invalid mutex associated with cond");
2472 libhb_so_send( thr
->hbthr
, cvi
->so
, True
/*strong_send*/ );
2475 /* returns True if it reckons 'mutex' is valid and held by this
2476 thread, else False */
2477 static Bool
evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid
,
2478 void* cond
, void* mutex
)
2482 Bool lk_valid
= True
;
2485 if (SHOW_EVENTS
>= 1)
2486 VG_(printf
)("evh__hg_PTHREAD_COND_WAIT_PRE"
2487 "(ctid=%d, cond=%p, mutex=%p)\n",
2488 (Int
)tid
, (void*)cond
, (void*)mutex
);
2490 thr
= map_threads_maybe_lookup( tid
);
2491 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2493 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2495 /* Check for stupid mutex arguments. There are various ways to be
2496 a bozo. Only complain once, though, even if more than one thing
2500 HG_(record_error_Misc
)(
2502 "pthread_cond_{timed}wait called with invalid mutex" );
2504 tl_assert( HG_(is_sane_LockN
)(lk
) );
2505 if (lk
->kind
== LK_rdwr
) {
2507 HG_(record_error_Misc
)(
2508 thr
, "pthread_cond_{timed}wait called with mutex "
2509 "of type pthread_rwlock_t*" );
2511 if (lk
->heldBy
== NULL
) {
2513 HG_(record_error_Misc
)(
2514 thr
, "pthread_cond_{timed}wait called with un-held mutex");
2516 if (lk
->heldBy
!= NULL
2517 && VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) == 0) {
2519 HG_(record_error_Misc
)(
2520 thr
, "pthread_cond_{timed}wait called with mutex "
2521 "held by a different thread" );
2525 // error-if: cond is also associated with a different mutex
2526 cvi
= map_cond_to_CVInfo_lookup_or_alloc(cond
);
2529 if (cvi
->nWaiters
== 0) {
2530 /* form initial (CV,MX) binding */
2533 else /* check existing (CV,MX) binding */
2534 if (cvi
->mx_ga
!= mutex
) {
2535 HG_(record_error_Misc
)(
2536 thr
, "pthread_cond_{timed}wait: cond is associated "
2537 "with a different mutex");
2544 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid
,
2545 void* cond
, void* mutex
,
2548 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2549 the SO for this cond, and 'recv' from it so as to acquire a
2550 dependency edge back to the signaller/broadcaster. */
2554 if (SHOW_EVENTS
>= 1)
2555 VG_(printf
)("evh__HG_PTHREAD_COND_WAIT_POST"
2556 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2557 (Int
)tid
, (void*)cond
, (void*)mutex
, (Int
)timeout
);
2559 thr
= map_threads_maybe_lookup( tid
);
2560 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2562 // error-if: cond is also associated with a different mutex
2564 cvi
= map_cond_to_CVInfo_lookup_NO_alloc( cond
);
2566 /* This could be either a bug in helgrind or the guest application
2567 that did an error (e.g. cond var was destroyed by another thread.
2568 Let's assume helgrind is perfect ...
2569 Note that this is similar to drd behaviour. */
2570 HG_(record_error_Misc
)(thr
, "condition variable has been destroyed while"
2571 " being waited upon");
2577 tl_assert(cvi
->nWaiters
> 0);
2579 if (!timeout
&& !libhb_so_everSent(cvi
->so
)) {
2580 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2581 it? If this happened it would surely be a bug in the threads
2582 library. Or one of those fabled "spurious wakeups". */
2583 HG_(record_error_Misc
)( thr
, "Bug in libpthread: pthread_cond_wait "
2585 " without prior pthread_cond_post");
2588 /* anyway, acquire a dependency on it. */
2589 libhb_so_recv( thr
->hbthr
, cvi
->so
, True
/*strong_recv*/ );
2594 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid
,
2595 void* cond
, void* cond_attr
)
2599 if (SHOW_EVENTS
>= 1)
2600 VG_(printf
)("evh__HG_PTHREAD_COND_INIT_POST"
2601 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2602 (Int
)tid
, (void*)cond
, (void*) cond_attr
);
2604 cvi
= map_cond_to_CVInfo_lookup_or_alloc( cond
);
2606 tl_assert (cvi
->so
);
2610 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid
,
2611 void* cond
, Bool cond_is_init
)
2613 /* Deal with destroy events. The only purpose is to free storage
2614 associated with the CV, so as to avoid any possible resource
2616 if (SHOW_EVENTS
>= 1)
2617 VG_(printf
)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2618 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2619 (Int
)tid
, (void*)cond
, (Int
)cond_is_init
);
2621 map_cond_to_CVInfo_delete( tid
, cond
, cond_is_init
);
2625 /* ------------------------------------------------------- */
2626 /* -------------- events to do with rwlocks -------------- */
2627 /* ------------------------------------------------------- */
2629 /* EXPOSITION only */
2631 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid
, void* rwl
)
2633 if (SHOW_EVENTS
>= 1)
2634 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2635 (Int
)tid
, (void*)rwl
);
2636 map_locks_lookup_or_create( LK_rdwr
, (Addr
)rwl
, tid
);
2637 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2638 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2642 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid
, void* rwl
)
2646 if (SHOW_EVENTS
>= 1)
2647 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2648 (Int
)tid
, (void*)rwl
);
2650 thr
= map_threads_maybe_lookup( tid
);
2651 /* cannot fail - Thread* must already exist */
2652 tl_assert( HG_(is_sane_Thread
)(thr
) );
2654 lk
= map_locks_maybe_lookup( (Addr
)rwl
);
2656 if (lk
== NULL
|| lk
->kind
!= LK_rdwr
) {
2657 HG_(record_error_Misc
)(
2658 thr
, "pthread_rwlock_destroy with invalid argument" );
2662 tl_assert( HG_(is_sane_LockN
)(lk
) );
2663 tl_assert( lk
->guestaddr
== (Addr
)rwl
);
2665 /* Basically act like we unlocked the lock */
2666 HG_(record_error_Misc
)(
2667 thr
, "pthread_rwlock_destroy of a locked mutex" );
2668 /* remove lock from locksets of all owning threads */
2669 remove_Lock_from_locksets_of_all_owning_Threads( lk
);
2670 VG_(deleteBag
)( lk
->heldBy
);
2673 lk
->acquired_at
= NULL
;
2675 tl_assert( !lk
->heldBy
);
2676 tl_assert( HG_(is_sane_LockN
)(lk
) );
2678 if (HG_(clo_track_lockorders
))
2679 laog__handle_one_lock_deletion(lk
);
2680 map_locks_delete( lk
->guestaddr
);
2684 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2685 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2689 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid
,
2691 Word isW
, Word isTryLock
)
2693 /* Just check the rwl is sane; nothing else to do. */
2694 // 'rwl' may be invalid - not checked by wrapper
2697 if (SHOW_EVENTS
>= 1)
2698 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2699 (Int
)tid
, (Int
)isW
, (void*)rwl
);
2701 tl_assert(isW
== 0 || isW
== 1); /* assured us by wrapper */
2702 tl_assert(isTryLock
== 0 || isTryLock
== 1); /* assured us by wrapper */
2703 thr
= map_threads_maybe_lookup( tid
);
2704 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2706 lk
= map_locks_maybe_lookup( (Addr
)rwl
);
2708 && (lk
->kind
== LK_nonRec
|| lk
->kind
== LK_mbRec
) ) {
2709 /* Wrong kind of lock. Duh. */
2710 HG_(record_error_Misc
)(
2711 thr
, "pthread_rwlock_{rd,rw}lock with a "
2712 "pthread_mutex_t* argument " );
2717 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid
, void* rwl
, Word isW
)
2719 // only called if the real library call succeeded - so mutex is sane
2721 if (SHOW_EVENTS
>= 1)
2722 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2723 (Int
)tid
, (Int
)isW
, (void*)rwl
);
2725 tl_assert(isW
== 0 || isW
== 1); /* assured us by wrapper */
2726 thr
= map_threads_maybe_lookup( tid
);
2727 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2729 (isW
? evhH__post_thread_w_acquires_lock
2730 : evhH__post_thread_r_acquires_lock
)(
2732 LK_rdwr
, /* if not known, create new lock with this LockKind */
2737 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid
, void* rwl
)
2739 // 'rwl' may be invalid - not checked by wrapper
2741 if (SHOW_EVENTS
>= 1)
2742 VG_(printf
)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2743 (Int
)tid
, (void*)rwl
);
2745 thr
= map_threads_maybe_lookup( tid
);
2746 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2748 evhH__pre_thread_releases_lock( thr
, (Addr
)rwl
, True
/*isRDWR*/ );
2751 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid
, void* rwl
)
2753 // only called if the real library call succeeded - so mutex is sane
2755 if (SHOW_EVENTS
>= 1)
2756 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2757 (Int
)tid
, (void*)rwl
);
2758 thr
= map_threads_maybe_lookup( tid
);
2759 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2761 // anything we should do here?
2765 /* ---------------------------------------------------------- */
2766 /* -------------- events to do with semaphores -------------- */
2767 /* ---------------------------------------------------------- */
2769 /* This is similar to but not identical to the handling for condition
2772 /* For each semaphore, we maintain a stack of SOs. When a 'post'
2773 operation is done on a semaphore (unlocking, essentially), a new SO
2774 is created for the posting thread, the posting thread does a strong
2775 send to it (which merely installs the posting thread's VC in the
2776 SO), and the SO is pushed on the semaphore's stack.
2778 Later, when a (probably different) thread completes 'wait' on the
2779 semaphore, we pop a SO off the semaphore's stack (which should be
2780 nonempty), and do a strong recv from it. This mechanism creates
2781 dependencies between posters and waiters of the semaphore.
2783 It may not be necessary to use a stack - perhaps a bag of SOs would
2784 do. But we do need to keep track of how many unused-up posts have
2785 happened for the semaphore.
2787 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2788 twice on S. T3 cannot complete its waits without both T1 and T2
2789 posting. The above mechanism will ensure that T3 acquires
2790 dependencies on both T1 and T2.
2792 When a semaphore is initialised with value N, we do as if we'd
2793 posted N times on the semaphore: basically create N SOs and do a
2794 strong send to all of then. This allows up to N waits on the
2795 semaphore to acquire a dependency on the initialisation point,
2796 which AFAICS is the correct behaviour.
2798 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2802 /* sem_t* -> XArray* SO* */
2803 static WordFM
* map_sem_to_SO_stack
= NULL
;
2805 static void map_sem_to_SO_stack_INIT ( void ) {
2806 if (map_sem_to_SO_stack
== NULL
) {
2807 map_sem_to_SO_stack
= VG_(newFM
)( HG_(zalloc
), "hg.mstSs.1",
2812 static void push_SO_for_sem ( void* sem
, SO
* so
) {
2816 map_sem_to_SO_stack_INIT();
2817 if (VG_(lookupFM
)( map_sem_to_SO_stack
,
2818 &keyW
, (UWord
*)&xa
, (UWord
)sem
)) {
2819 tl_assert(keyW
== (UWord
)sem
);
2821 VG_(addToXA
)( xa
, &so
);
2823 xa
= VG_(newXA
)( HG_(zalloc
), "hg.pSfs.1", HG_(free
), sizeof(SO
*) );
2824 VG_(addToXA
)( xa
, &so
);
2825 VG_(addToFM
)( map_sem_to_SO_stack
, (UWord
)sem
, (UWord
)xa
);
2829 static SO
* mb_pop_SO_for_sem ( void* sem
) {
2833 map_sem_to_SO_stack_INIT();
2834 if (VG_(lookupFM
)( map_sem_to_SO_stack
,
2835 &keyW
, (UWord
*)&xa
, (UWord
)sem
)) {
2836 /* xa is the stack for this semaphore. */
2838 tl_assert(keyW
== (UWord
)sem
);
2839 sz
= VG_(sizeXA
)( xa
);
2842 return NULL
; /* odd, the stack is empty */
2843 so
= *(SO
**)VG_(indexXA
)( xa
, sz
-1 );
2845 VG_(dropTailXA
)( xa
, 1 );
2848 /* hmm, that's odd. No stack for this semaphore. */
2853 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid
, void* sem
)
2858 if (SHOW_EVENTS
>= 1)
2859 VG_(printf
)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2860 (Int
)tid
, (void*)sem
);
2862 map_sem_to_SO_stack_INIT();
2864 /* Empty out the semaphore's SO stack. This way of doing it is
2865 stupid, but at least it's easy. */
2867 so
= mb_pop_SO_for_sem( sem
);
2869 libhb_so_dealloc(so
);
2872 if (VG_(delFromFM
)( map_sem_to_SO_stack
, &keyW
, &valW
, (UWord
)sem
)) {
2873 XArray
* xa
= (XArray
*)valW
;
2874 tl_assert(keyW
== (UWord
)sem
);
2876 tl_assert(VG_(sizeXA
)(xa
) == 0); /* preceding loop just emptied it */
2882 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid
, void* sem
, UWord value
)
2887 if (SHOW_EVENTS
>= 1)
2888 VG_(printf
)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2889 (Int
)tid
, (void*)sem
, value
);
2891 thr
= map_threads_maybe_lookup( tid
);
2892 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2894 /* Empty out the semaphore's SO stack. This way of doing it is
2895 stupid, but at least it's easy. */
2897 so
= mb_pop_SO_for_sem( sem
);
2899 libhb_so_dealloc(so
);
2902 /* If we don't do this check, the following while loop runs us out
2903 of memory for stupid initial values of 'value'. */
2904 if (value
> 10000) {
2905 HG_(record_error_Misc
)(
2906 thr
, "sem_init: initial value exceeds 10000; using 10000" );
2910 /* Now create 'valid' new SOs for the thread, do a strong send to
2911 each of them, and push them all on the stack. */
2912 for (; value
> 0; value
--) {
2913 Thr
* hbthr
= thr
->hbthr
;
2916 so
= libhb_so_alloc();
2917 libhb_so_send( hbthr
, so
, True
/*strong send*/ );
2918 push_SO_for_sem( sem
, so
);
2922 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid
, void* sem
)
2924 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2925 it (iow, write our VC into it, then tick ours), and push the SO
2926 on on a stack of SOs associated with 'sem'. This is later used
2927 by other thread(s) which successfully exit from a sem_wait on
2928 the same sem; by doing a strong recv from SOs popped of the
2929 stack, they acquire dependencies on the posting thread
2936 if (SHOW_EVENTS
>= 1)
2937 VG_(printf
)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2938 (Int
)tid
, (void*)sem
);
2940 thr
= map_threads_maybe_lookup( tid
);
2941 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2943 // error-if: sem is bogus
2948 so
= libhb_so_alloc();
2949 libhb_so_send( hbthr
, so
, True
/*strong send*/ );
2950 push_SO_for_sem( sem
, so
);
2953 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid
, void* sem
)
2955 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2956 the 'sem' from this semaphore's SO-stack, and do a strong recv
2957 from it. This creates a dependency back to one of the post-ers
2958 for the semaphore. */
2964 if (SHOW_EVENTS
>= 1)
2965 VG_(printf
)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2966 (Int
)tid
, (void*)sem
);
2968 thr
= map_threads_maybe_lookup( tid
);
2969 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2971 // error-if: sem is bogus
2973 so
= mb_pop_SO_for_sem( sem
);
2979 libhb_so_recv( hbthr
, so
, True
/*strong recv*/ );
2980 libhb_so_dealloc(so
);
2982 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2983 If this happened it would surely be a bug in the threads
2985 HG_(record_error_Misc
)(
2986 thr
, "Bug in libpthread: sem_wait succeeded on"
2987 " semaphore without prior sem_post");
2992 /* -------------------------------------------------------- */
2993 /* -------------- events to do with barriers -------------- */
2994 /* -------------------------------------------------------- */
2998 Bool initted
; /* has it yet been initted by guest? */
2999 Bool resizable
; /* is resizing allowed? */
3000 UWord size
; /* declared size */
3001 XArray
* waiting
; /* XA of Thread*. # present is 0 .. .size */
3005 static Bar
* new_Bar ( void ) {
3006 Bar
* bar
= HG_(zalloc
)( "hg.nB.1 (new_Bar)", sizeof(Bar
) );
3007 /* all fields are zero */
3008 tl_assert(bar
->initted
== False
);
3012 static void delete_Bar ( Bar
* bar
) {
3015 VG_(deleteXA
)(bar
->waiting
);
3019 /* A mapping which stores auxiliary data for barriers. */
3021 /* pthread_barrier_t* -> Bar* */
3022 static WordFM
* map_barrier_to_Bar
= NULL
;
3024 static void map_barrier_to_Bar_INIT ( void ) {
3025 if (UNLIKELY(map_barrier_to_Bar
== NULL
)) {
3026 map_barrier_to_Bar
= VG_(newFM
)( HG_(zalloc
),
3027 "hg.mbtBI.1", HG_(free
), NULL
);
3031 static Bar
* map_barrier_to_Bar_lookup_or_alloc ( void* barrier
) {
3033 map_barrier_to_Bar_INIT();
3034 if (VG_(lookupFM
)( map_barrier_to_Bar
, &key
, &val
, (UWord
)barrier
)) {
3035 tl_assert(key
== (UWord
)barrier
);
3038 Bar
* bar
= new_Bar();
3039 VG_(addToFM
)( map_barrier_to_Bar
, (UWord
)barrier
, (UWord
)bar
);
3044 static void map_barrier_to_Bar_delete ( void* barrier
) {
3046 map_barrier_to_Bar_INIT();
3047 if (VG_(delFromFM
)( map_barrier_to_Bar
, &keyW
, &valW
, (UWord
)barrier
)) {
3048 Bar
* bar
= (Bar
*)valW
;
3049 tl_assert(keyW
== (UWord
)barrier
);
3055 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid
,
3063 if (SHOW_EVENTS
>= 1)
3064 VG_(printf
)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3065 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3066 (Int
)tid
, (void*)barrier
, count
, resizable
);
3068 thr
= map_threads_maybe_lookup( tid
);
3069 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3072 HG_(record_error_Misc
)(
3073 thr
, "pthread_barrier_init: 'count' argument is zero"
3077 if (resizable
!= 0 && resizable
!= 1) {
3078 HG_(record_error_Misc
)(
3079 thr
, "pthread_barrier_init: invalid 'resizable' argument"
3083 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3087 HG_(record_error_Misc
)(
3088 thr
, "pthread_barrier_init: barrier is already initialised"
3092 if (bar
->waiting
&& VG_(sizeXA
)(bar
->waiting
) > 0) {
3093 tl_assert(bar
->initted
);
3094 HG_(record_error_Misc
)(
3095 thr
, "pthread_barrier_init: threads are waiting at barrier"
3097 VG_(dropTailXA
)(bar
->waiting
, VG_(sizeXA
)(bar
->waiting
));
3099 if (!bar
->waiting
) {
3100 bar
->waiting
= VG_(newXA
)( HG_(zalloc
), "hg.eHPBIP.1", HG_(free
),
3104 tl_assert(VG_(sizeXA
)(bar
->waiting
) == 0);
3105 bar
->initted
= True
;
3106 bar
->resizable
= resizable
== 1 ? True
: False
;
3111 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid
,
3117 /* Deal with destroy events. The only purpose is to free storage
3118 associated with the barrier, so as to avoid any possible
3120 if (SHOW_EVENTS
>= 1)
3121 VG_(printf
)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3122 "(tid=%d, barrier=%p)\n",
3123 (Int
)tid
, (void*)barrier
);
3125 thr
= map_threads_maybe_lookup( tid
);
3126 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3128 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3131 if (!bar
->initted
) {
3132 HG_(record_error_Misc
)(
3133 thr
, "pthread_barrier_destroy: barrier was never initialised"
3137 if (bar
->initted
&& bar
->waiting
&& VG_(sizeXA
)(bar
->waiting
) > 0) {
3138 HG_(record_error_Misc
)(
3139 thr
, "pthread_barrier_destroy: threads are waiting at barrier"
3143 /* Maybe we shouldn't do this; just let it persist, so that when it
3144 is reinitialised we don't need to do any dynamic memory
3145 allocation? The downside is a potentially unlimited space leak,
3146 if the client creates (in turn) a large number of barriers all
3147 at different locations. Note that if we do later move to the
3148 don't-delete-it scheme, we need to mark the barrier as
3149 uninitialised again since otherwise a later _init call will
3150 elicit a duplicate-init error. */
3151 map_barrier_to_Bar_delete( barrier
);
3155 /* All the threads have arrived. Now do the Interesting Bit. Get a
3156 new synchronisation object and do a weak send to it from all the
3157 participating threads. This makes its vector clocks be the join of
3158 all the individual threads' vector clocks. Then do a strong
3159 receive from it back to all threads, so that their VCs are a copy
3160 of it (hence are all equal to the join of their original VCs.) */
3161 static void do_barrier_cross_sync_and_empty ( Bar
* bar
)
3163 /* XXX check bar->waiting has no duplicates */
3165 SO
* so
= libhb_so_alloc();
3167 tl_assert(bar
->waiting
);
3168 tl_assert(VG_(sizeXA
)(bar
->waiting
) == bar
->size
);
3170 /* compute the join ... */
3171 for (i
= 0; i
< bar
->size
; i
++) {
3172 Thread
* t
= *(Thread
**)VG_(indexXA
)(bar
->waiting
, i
);
3173 Thr
* hbthr
= t
->hbthr
;
3174 libhb_so_send( hbthr
, so
, False
/*weak send*/ );
3176 /* ... and distribute to all threads */
3177 for (i
= 0; i
< bar
->size
; i
++) {
3178 Thread
* t
= *(Thread
**)VG_(indexXA
)(bar
->waiting
, i
);
3179 Thr
* hbthr
= t
->hbthr
;
3180 libhb_so_recv( hbthr
, so
, True
/*strong recv*/ );
3183 /* finally, we must empty out the waiting vector */
3184 VG_(dropTailXA
)(bar
->waiting
, VG_(sizeXA
)(bar
->waiting
));
3186 /* and we don't need this any more. Perhaps a stack-allocated
3187 SO would be better? */
3188 libhb_so_dealloc(so
);
3192 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid
,
3195 /* This function gets called after a client thread calls
3196 pthread_barrier_wait but before it arrives at the real
3197 pthread_barrier_wait.
3199 Why is the following correct? It's a bit subtle.
3201 If this is not the last thread arriving at the barrier, we simply
3202 note its presence and return. Because valgrind (at least as of
3203 Nov 08) is single threaded, we are guaranteed safe from any race
3204 conditions when in this function -- no other client threads are
3207 If this is the last thread, then we are again the only running
3208 thread. All the other threads will have either arrived at the
3209 real pthread_barrier_wait or are on their way to it, but in any
3210 case are guaranteed not to be able to move past it, because this
3211 thread is currently in this function and so has not yet arrived
3212 at the real pthread_barrier_wait. That means that:
3214 1. While we are in this function, none of the other threads
3215 waiting at the barrier can move past it.
3217 2. When this function returns (and simulated execution resumes),
3218 this thread and all other waiting threads will be able to move
3219 past the real barrier.
3221 Because of this, it is now safe to update the vector clocks of
3222 all threads, to represent the fact that they all arrived at the
3223 barrier and have all moved on. There is no danger of any
3224 complications to do with some threads leaving the barrier and
3225 racing back round to the front, whilst others are still leaving
3226 (which is the primary source of complication in correct handling/
3227 implementation of barriers). That can't happen because we update
3228 here our data structures so as to indicate that the threads have
3229 passed the barrier, even though, as per (2) above, they are
3230 guaranteed not to pass the barrier until we return.
3232 This relies crucially on Valgrind being single threaded. If that
3233 changes, this will need to be reconsidered.
3239 if (SHOW_EVENTS
>= 1)
3240 VG_(printf
)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3241 "(tid=%d, barrier=%p)\n",
3242 (Int
)tid
, (void*)barrier
);
3244 thr
= map_threads_maybe_lookup( tid
);
3245 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3247 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3250 if (!bar
->initted
) {
3251 HG_(record_error_Misc
)(
3252 thr
, "pthread_barrier_wait: barrier is uninitialised"
3254 return; /* client is broken .. avoid assertions below */
3257 /* guaranteed by _INIT_PRE above */
3258 tl_assert(bar
->size
> 0);
3259 tl_assert(bar
->waiting
);
3261 VG_(addToXA
)( bar
->waiting
, &thr
);
3263 /* guaranteed by this function */
3264 present
= VG_(sizeXA
)(bar
->waiting
);
3265 tl_assert(present
> 0 && present
<= bar
->size
);
3267 if (present
< bar
->size
)
3270 do_barrier_cross_sync_and_empty(bar
);
3274 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid
,
3282 if (SHOW_EVENTS
>= 1)
3283 VG_(printf
)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3284 "(tid=%d, barrier=%p, newcount=%lu)\n",
3285 (Int
)tid
, (void*)barrier
, newcount
);
3287 thr
= map_threads_maybe_lookup( tid
);
3288 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3290 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3293 if (!bar
->initted
) {
3294 HG_(record_error_Misc
)(
3295 thr
, "pthread_barrier_resize: barrier is uninitialised"
3297 return; /* client is broken .. avoid assertions below */
3300 if (!bar
->resizable
) {
3301 HG_(record_error_Misc
)(
3302 thr
, "pthread_barrier_resize: barrier is may not be resized"
3304 return; /* client is broken .. avoid assertions below */
3307 if (newcount
== 0) {
3308 HG_(record_error_Misc
)(
3309 thr
, "pthread_barrier_resize: 'newcount' argument is zero"
3311 return; /* client is broken .. avoid assertions below */
3314 /* guaranteed by _INIT_PRE above */
3315 tl_assert(bar
->size
> 0);
3316 tl_assert(bar
->waiting
);
3317 /* Guaranteed by this fn */
3318 tl_assert(newcount
> 0);
3320 if (newcount
>= bar
->size
) {
3321 /* Increasing the capacity. There's no possibility of threads
3322 moving on from the barrier in this situation, so just note
3323 the fact and do nothing more. */
3324 bar
->size
= newcount
;
3326 /* Decreasing the capacity. If we decrease it to be equal or
3327 below the number of waiting threads, they will now move past
3328 the barrier, so need to mess with dep edges in the same way
3329 as if the barrier had filled up normally. */
3330 present
= VG_(sizeXA
)(bar
->waiting
);
3331 tl_assert(present
>= 0 && present
<= bar
->size
);
3332 if (newcount
<= present
) {
3333 bar
->size
= present
; /* keep the cross_sync call happy */
3334 do_barrier_cross_sync_and_empty(bar
);
3336 bar
->size
= newcount
;
3341 /* ----------------------------------------------------- */
3342 /* ----- events to do with user-specified HB edges ----- */
3343 /* ----------------------------------------------------- */
3345 /* A mapping from arbitrary UWord tag to the SO associated with it.
3346 The UWord tags are meaningless to us, interpreted only by the
3352 static WordFM
* map_usertag_to_SO
= NULL
;
3354 static void map_usertag_to_SO_INIT ( void ) {
3355 if (UNLIKELY(map_usertag_to_SO
== NULL
)) {
3356 map_usertag_to_SO
= VG_(newFM
)( HG_(zalloc
),
3357 "hg.mutS.1", HG_(free
), NULL
);
3361 static SO
* map_usertag_to_SO_lookup_or_alloc ( UWord usertag
) {
3363 map_usertag_to_SO_INIT();
3364 if (VG_(lookupFM
)( map_usertag_to_SO
, &key
, &val
, usertag
)) {
3365 tl_assert(key
== (UWord
)usertag
);
3368 SO
* so
= libhb_so_alloc();
3369 VG_(addToFM
)( map_usertag_to_SO
, usertag
, (UWord
)so
);
3374 static void map_usertag_to_SO_delete ( UWord usertag
) {
3376 map_usertag_to_SO_INIT();
3377 if (VG_(delFromFM
)( map_usertag_to_SO
, &keyW
, &valW
, usertag
)) {
3379 tl_assert(keyW
== usertag
);
3381 libhb_so_dealloc(so
);
3387 void evh__HG_USERSO_SEND_PRE ( ThreadId tid
, UWord usertag
)
3389 /* TID is just about to notionally sent a message on a notional
3390 abstract synchronisation object whose identity is given by
3391 USERTAG. Bind USERTAG to a real SO if it is not already so
3392 bound, and do a 'weak send' on the SO. This joins the vector
3393 clocks from this thread into any vector clocks already present
3394 in the SO. The resulting SO vector clocks are later used by
3395 other thread(s) which successfully 'receive' from the SO,
3396 thereby acquiring a dependency on all the events that have
3397 previously signalled on this SO. */
3401 if (SHOW_EVENTS
>= 1)
3402 VG_(printf
)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3403 (Int
)tid
, usertag
);
3405 thr
= map_threads_maybe_lookup( tid
);
3406 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3408 so
= map_usertag_to_SO_lookup_or_alloc( usertag
);
3411 libhb_so_send( thr
->hbthr
, so
, False
/*!strong_send*/ );
3415 void evh__HG_USERSO_RECV_POST ( ThreadId tid
, UWord usertag
)
3417 /* TID has just notionally received a message from a notional
3418 abstract synchronisation object whose identity is given by
3419 USERTAG. Bind USERTAG to a real SO if it is not already so
3420 bound. If the SO has at some point in the past been 'sent' on,
3421 to a 'strong receive' on it, thereby acquiring a dependency on
3426 if (SHOW_EVENTS
>= 1)
3427 VG_(printf
)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3428 (Int
)tid
, usertag
);
3430 thr
= map_threads_maybe_lookup( tid
);
3431 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3433 so
= map_usertag_to_SO_lookup_or_alloc( usertag
);
3436 /* Acquire a dependency on it. If the SO has never so far been
3437 sent on, then libhb_so_recv will do nothing. So we're safe
3438 regardless of SO's history. */
3439 libhb_so_recv( thr
->hbthr
, so
, True
/*strong_recv*/ );
3443 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid
, UWord usertag
)
3445 /* TID declares that any happens-before edges notionally stored in
3446 USERTAG can be deleted. If (as would normally be the case) a
3447 SO is associated with USERTAG, then the association is removed
3448 and all resources associated with SO are freed. Importantly,
3449 that frees up any VTSs stored in SO. */
3450 if (SHOW_EVENTS
>= 1)
3451 VG_(printf
)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3452 (Int
)tid
, usertag
);
3454 map_usertag_to_SO_delete( usertag
);
3458 #if defined(VGO_solaris)
3459 /* ----------------------------------------------------- */
3460 /* --- events to do with bind guard/clear intercepts --- */
3461 /* ----------------------------------------------------- */
3464 void evh__HG_RTLD_BIND_GUARD(ThreadId tid
, Int flags
)
3466 if (SHOW_EVENTS
>= 1)
3467 VG_(printf
)("evh__HG_RTLD_BIND_GUARD"
3468 "(tid=%d, flags=%d)\n",
3471 Thread
*thr
= map_threads_maybe_lookup(tid
);
3472 tl_assert(thr
!= NULL
);
3474 Int bindflag
= (flags
& VKI_THR_FLG_RTLD
);
3475 if ((bindflag
& thr
->bind_guard_flag
) == 0) {
3476 thr
->bind_guard_flag
|= bindflag
;
3477 HG_(thread_enter_synchr
)(thr
);
3478 /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3479 HG_(thread_enter_pthread_create
)(thr
);
3484 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid
, Int flags
)
3486 if (SHOW_EVENTS
>= 1)
3487 VG_(printf
)("evh__HG_RTLD_BIND_CLEAR"
3488 "(tid=%d, flags=%d)\n",
3491 Thread
*thr
= map_threads_maybe_lookup(tid
);
3492 tl_assert(thr
!= NULL
);
3494 Int bindflag
= (flags
& VKI_THR_FLG_RTLD
);
3495 if ((thr
->bind_guard_flag
& bindflag
) != 0) {
3496 thr
->bind_guard_flag
&= ~bindflag
;
3497 HG_(thread_leave_synchr
)(thr
);
3498 HG_(thread_leave_pthread_create
)(thr
);
3501 #endif /* VGO_solaris */
3504 /*--------------------------------------------------------------*/
3505 /*--- Lock acquisition order monitoring ---*/
3506 /*--------------------------------------------------------------*/
3508 /* FIXME: here are some optimisations still to do in
3509 laog__pre_thread_acquires_lock.
3511 The graph is structured so that if L1 --*--> L2 then L1 must be
3514 The common case is that some thread T holds (eg) L1 L2 and L3 and
3515 is repeatedly acquiring and releasing Ln, and there is no ordering
3516 error in what it is doing. Hence it repeatedly:
3518 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3519 produces the answer No (because there is no error).
3521 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3522 (because they already got added the first time T acquired Ln).
3524 Hence cache these two events:
3526 (1) Cache result of the query from last time. Invalidate the cache
3527 any time any edges are added to or deleted from laog.
3529 (2) Cache these add-edge requests and ignore them if said edges
3530 have already been added to laog. Invalidate the cache any time
3531 any edges are deleted from laog.
3536 WordSetID inns
; /* in univ_laog */
3537 WordSetID outs
; /* in univ_laog */
3541 /* lock order acquisition graph */
3542 static WordFM
* laog
= NULL
; /* WordFM Lock* LAOGLinks* */
3544 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3545 where that edge was created, so that we can show the user later if
3549 Addr src_ga
; /* Lock guest addresses for */
3550 Addr dst_ga
; /* src/dst of the edge */
3551 ExeContext
* src_ec
; /* And corresponding places where that */
3552 ExeContext
* dst_ec
; /* ordering was established */
3556 static Word
cmp_LAOGLinkExposition ( UWord llx1W
, UWord llx2W
) {
3557 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3558 LAOGLinkExposition
* llx1
= (LAOGLinkExposition
*)llx1W
;
3559 LAOGLinkExposition
* llx2
= (LAOGLinkExposition
*)llx2W
;
3560 if (llx1
->src_ga
< llx2
->src_ga
) return -1;
3561 if (llx1
->src_ga
> llx2
->src_ga
) return 1;
3562 if (llx1
->dst_ga
< llx2
->dst_ga
) return -1;
3563 if (llx1
->dst_ga
> llx2
->dst_ga
) return 1;
3567 static WordFM
* laog_exposition
= NULL
; /* WordFM LAOGLinkExposition* NULL */
3568 /* end EXPOSITION ONLY */
3571 __attribute__((noinline
))
3572 static void laog__init ( void )
3575 tl_assert(!laog_exposition
);
3576 tl_assert(HG_(clo_track_lockorders
));
3578 laog
= VG_(newFM
)( HG_(zalloc
), "hg.laog__init.1",
3579 HG_(free
), NULL
/*unboxedcmp*/ );
3581 laog_exposition
= VG_(newFM
)( HG_(zalloc
), "hg.laog__init.2", HG_(free
),
3582 cmp_LAOGLinkExposition
);
3585 static void laog__show ( const HChar
* who
) {
3590 VG_(printf
)("laog (requested by %s) {\n", who
);
3591 VG_(initIterFM
)( laog
);
3594 while (VG_(nextIterFM
)( laog
, (UWord
*)&me
,
3598 VG_(printf
)(" node %p:\n", me
);
3599 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->inns
);
3600 for (i
= 0; i
< ws_size
; i
++)
3601 VG_(printf
)(" inn %#lx\n", ws_words
[i
] );
3602 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->outs
);
3603 for (i
= 0; i
< ws_size
; i
++)
3604 VG_(printf
)(" out %#lx\n", ws_words
[i
] );
3608 VG_(doneIterFM
)( laog
);
3612 static void univ_laog_do_GC ( void ) {
3616 Int prev_next_gc_univ_laog
= next_gc_univ_laog
;
3617 const UWord univ_laog_cardinality
= HG_(cardinalityWSU
)( univ_laog
);
3619 Bool
*univ_laog_seen
= HG_(zalloc
) ( "hg.gc_univ_laog.1",
3620 (Int
) univ_laog_cardinality
3622 // univ_laog_seen[*] set to 0 (False) by zalloc.
3624 VG_(initIterFM
)( laog
);
3626 while (VG_(nextIterFM
)( laog
, NULL
, (UWord
*)&links
)) {
3628 tl_assert(links
->inns
>= 0 && links
->inns
< univ_laog_cardinality
);
3629 univ_laog_seen
[links
->inns
] = True
;
3630 tl_assert(links
->outs
>= 0 && links
->outs
< univ_laog_cardinality
);
3631 univ_laog_seen
[links
->outs
] = True
;
3634 VG_(doneIterFM
)( laog
);
3636 for (i
= 0; i
< (Int
)univ_laog_cardinality
; i
++) {
3637 if (univ_laog_seen
[i
])
3640 HG_(dieWS
) ( univ_laog
, (WordSet
)i
);
3643 HG_(free
) (univ_laog_seen
);
3645 // We need to decide the value of the next_gc.
3646 // 3 solutions were looked at:
3647 // Sol 1: garbage collect at seen * 2
3648 // This solution was a lot slower, probably because we both do a lot of
3649 // garbage collection and do not keep long enough laog WV that will become
3650 // useful again very soon.
3651 // Sol 2: garbage collect at a percentage increase of the current cardinality
3652 // (with a min increase of 1)
3653 // Trials on a small test program with 1%, 5% and 10% increase was done.
3654 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3655 // However, on a big application, this caused the memory to be exhausted,
3656 // as even a 1% increase of size at each gc becomes a lot, when many gc
3658 // Sol 3: always garbage collect at current cardinality + 1.
3659 // This solution was the fastest of the 3 solutions, and caused no memory
3660 // exhaustion in the big application.
3662 // With regards to cost introduced by gc: on the t2t perf test (doing only
3663 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3664 // version with garbage collection. With t2t 50 20 2, my machine started
3665 // to page out, and so the garbage collected version was much faster.
3666 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3667 // difference performance is insignificant (~ 0.1 s).
3668 // Of course, it might be that real life programs are not well represented
3671 // If ever we want to have a more sophisticated control
3672 // (e.g. clo options to control the percentage increase or fixed increased),
3673 // we should do it here, eg.
3674 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3675 // Currently, we just hard-code the solution 3 above.
3676 next_gc_univ_laog
= prev_next_gc_univ_laog
+ 1;
3681 "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3682 (Int
)univ_laog_cardinality
, (Int
)seen
, next_gc_univ_laog
);
3686 __attribute__((noinline
))
3687 static void laog__add_edge ( Lock
* src
, Lock
* dst
) {
3690 Bool presentF
, presentR
;
3691 if (0) VG_(printf
)("laog__add_edge %p %p\n", src
, dst
);
3693 /* Take the opportunity to sanity check the graph. Record in
3694 presentF if there is already a src->dst mapping in this node's
3695 forwards links, and presentR if there is already a src->dst
3696 mapping in this node's backwards links. They should agree!
3697 Also, we need to know whether the edge was already present so as
3698 to decide whether or not to update the link details mapping. We
3699 can compute presentF and presentR essentially for free, so may
3700 as well do this always. */
3701 presentF
= presentR
= False
;
3703 /* Update the out edges for src */
3706 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)src
)) {
3709 tl_assert(keyW
== (UWord
)src
);
3710 outs_new
= HG_(addToWS
)( univ_laog
, links
->outs
, (UWord
)dst
);
3711 presentF
= outs_new
== links
->outs
;
3712 links
->outs
= outs_new
;
3714 links
= HG_(zalloc
)("hg.lae.1", sizeof(LAOGLinks
));
3715 links
->inns
= HG_(emptyWS
)( univ_laog
);
3716 links
->outs
= HG_(singletonWS
)( univ_laog
, (UWord
)dst
);
3717 VG_(addToFM
)( laog
, (UWord
)src
, (UWord
)links
);
3719 /* Update the in edges for dst */
3722 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)dst
)) {
3725 tl_assert(keyW
== (UWord
)dst
);
3726 inns_new
= HG_(addToWS
)( univ_laog
, links
->inns
, (UWord
)src
);
3727 presentR
= inns_new
== links
->inns
;
3728 links
->inns
= inns_new
;
3730 links
= HG_(zalloc
)("hg.lae.2", sizeof(LAOGLinks
));
3731 links
->inns
= HG_(singletonWS
)( univ_laog
, (UWord
)src
);
3732 links
->outs
= HG_(emptyWS
)( univ_laog
);
3733 VG_(addToFM
)( laog
, (UWord
)dst
, (UWord
)links
);
3736 tl_assert( (presentF
&& presentR
) || (!presentF
&& !presentR
) );
3738 if (!presentF
&& src
->acquired_at
&& dst
->acquired_at
) {
3739 LAOGLinkExposition expo
;
3740 /* If this edge is entering the graph, and we have acquired_at
3741 information for both src and dst, record those acquisition
3742 points. Hence, if there is later a violation of this
3743 ordering, we can show the user the two places in which the
3744 required src-dst ordering was previously established. */
3745 if (0) VG_(printf
)("acquire edge %#lx %#lx\n",
3746 src
->guestaddr
, dst
->guestaddr
);
3747 expo
.src_ga
= src
->guestaddr
;
3748 expo
.dst_ga
= dst
->guestaddr
;
3751 tl_assert(laog_exposition
);
3752 if (VG_(lookupFM
)( laog_exposition
, NULL
, NULL
, (UWord
)&expo
)) {
3753 /* we already have it; do nothing */
3755 LAOGLinkExposition
* expo2
= HG_(zalloc
)("hg.lae.3",
3756 sizeof(LAOGLinkExposition
));
3757 expo2
->src_ga
= src
->guestaddr
;
3758 expo2
->dst_ga
= dst
->guestaddr
;
3759 expo2
->src_ec
= src
->acquired_at
;
3760 expo2
->dst_ec
= dst
->acquired_at
;
3761 VG_(addToFM
)( laog_exposition
, (UWord
)expo2
, (UWord
)NULL
);
3765 if (HG_(cardinalityWSU
) (univ_laog
) >= next_gc_univ_laog
)
3769 __attribute__((noinline
))
3770 static void laog__del_edge ( Lock
* src
, Lock
* dst
) {
3773 if (0) VG_(printf
)("laog__del_edge enter %p %p\n", src
, dst
);
3774 /* Update the out edges for src */
3777 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)src
)) {
3779 tl_assert(keyW
== (UWord
)src
);
3780 links
->outs
= HG_(delFromWS
)( univ_laog
, links
->outs
, (UWord
)dst
);
3782 /* Update the in edges for dst */
3785 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)dst
)) {
3787 tl_assert(keyW
== (UWord
)dst
);
3788 links
->inns
= HG_(delFromWS
)( univ_laog
, links
->inns
, (UWord
)src
);
3791 /* Remove the exposition of src,dst (if present) */
3793 LAOGLinkExposition
*fm_expo
;
3795 LAOGLinkExposition expo
;
3796 expo
.src_ga
= src
->guestaddr
;
3797 expo
.dst_ga
= dst
->guestaddr
;
3801 if (VG_(delFromFM
) (laog_exposition
,
3802 (UWord
*)&fm_expo
, NULL
, (UWord
)&expo
)) {
3803 HG_(free
) (fm_expo
);
3807 /* deleting edges can increase nr of of WS so check for gc. */
3808 if (HG_(cardinalityWSU
) (univ_laog
) >= next_gc_univ_laog
)
3810 if (0) VG_(printf
)("laog__del_edge exit\n");
3813 __attribute__((noinline
))
3814 static WordSetID
/* in univ_laog */ laog__succs ( Lock
* lk
) {
3819 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)lk
)) {
3821 tl_assert(keyW
== (UWord
)lk
);
3824 return HG_(emptyWS
)( univ_laog
);
3828 __attribute__((noinline
))
3829 static WordSetID
/* in univ_laog */ laog__preds ( Lock
* lk
) {
3834 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)lk
)) {
3836 tl_assert(keyW
== (UWord
)lk
);
3839 return HG_(emptyWS
)( univ_laog
);
3843 __attribute__((noinline
))
3844 static void laog__sanity_check ( const HChar
* who
) {
3849 VG_(initIterFM
)( laog
);
3852 if (0) VG_(printf
)("laog sanity check\n");
3853 while (VG_(nextIterFM
)( laog
, (UWord
*)&me
,
3857 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->inns
);
3858 for (i
= 0; i
< ws_size
; i
++) {
3859 if ( ! HG_(elemWS
)( univ_laog
,
3860 laog__succs( (Lock
*)ws_words
[i
] ),
3864 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->outs
);
3865 for (i
= 0; i
< ws_size
; i
++) {
3866 if ( ! HG_(elemWS
)( univ_laog
,
3867 laog__preds( (Lock
*)ws_words
[i
] ),
3874 VG_(doneIterFM
)( laog
);
3878 VG_(printf
)("laog__sanity_check(%s) FAILED\n", who
);
3883 /* If there is a path in laog from 'src' to any of the elements in
3884 'dst', return an arbitrarily chosen element of 'dst' reachable from
3885 'src'. If no path exist from 'src' to any element in 'dst', return
3887 __attribute__((noinline
))
3889 Lock
* laog__do_dfs_from_to ( Lock
* src
, WordSetID dsts
/* univ_lsets */ )
3893 XArray
* stack
; /* of Lock* */
3894 WordFM
* visited
; /* Lock* -> void, iow, Set(Lock*) */
3897 UWord succs_size
, i
;
3899 //laog__sanity_check();
3901 /* If the destination set is empty, we can never get there from
3902 'src' :-), so don't bother to try */
3903 if (HG_(isEmptyWS
)( univ_lsets
, dsts
))
3907 stack
= VG_(newXA
)( HG_(zalloc
), "hg.lddft.1", HG_(free
), sizeof(Lock
*) );
3908 visited
= VG_(newFM
)( HG_(zalloc
), "hg.lddft.2", HG_(free
), NULL
/*unboxedcmp*/ );
3910 (void) VG_(addToXA
)( stack
, &src
);
3914 ssz
= VG_(sizeXA
)( stack
);
3916 if (ssz
== 0) { ret
= NULL
; break; }
3918 here
= *(Lock
**) VG_(indexXA
)( stack
, ssz
-1 );
3919 VG_(dropTailXA
)( stack
, 1 );
3921 if (HG_(elemWS
)( univ_lsets
, dsts
, (UWord
)here
)) { ret
= here
; break; }
3923 if (VG_(lookupFM
)( visited
, NULL
, NULL
, (UWord
)here
))
3926 VG_(addToFM
)( visited
, (UWord
)here
, 0 );
3928 succs
= laog__succs( here
);
3929 HG_(getPayloadWS
)( &succs_words
, &succs_size
, univ_laog
, succs
);
3930 for (i
= 0; i
< succs_size
; i
++)
3931 (void) VG_(addToXA
)( stack
, &succs_words
[i
] );
3934 VG_(deleteFM
)( visited
, NULL
, NULL
);
3935 VG_(deleteXA
)( stack
);
3940 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3941 between 'lk' and the locks already held by 'thr' and issue a
3942 complaint if so. Also, update the ordering graph appropriately.
3944 __attribute__((noinline
))
3945 static void laog__pre_thread_acquires_lock (
3946 Thread
* thr
, /* NB: BEFORE lock is added */
3954 /* It may be that 'thr' already holds 'lk' and is recursively
3955 relocking in. In this case we just ignore the call. */
3956 /* NB: univ_lsets really is correct here */
3957 if (HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
))
3960 /* First, the check. Complain if there is any path in laog from lk
3961 to any of the locks already held by thr, since if any such path
3962 existed, it would mean that previously lk was acquired before
3963 (rather than after, as we are doing here) at least one of those
3966 other
= laog__do_dfs_from_to(lk
, thr
->locksetA
);
3968 LAOGLinkExposition key
, *found
;
3969 /* So we managed to find a path lk --*--> other in the graph,
3970 which implies that 'lk' should have been acquired before
3971 'other' but is in fact being acquired afterwards. We present
3972 the lk/other arguments to record_error_LockOrder in the order
3973 in which they should have been acquired. */
3974 /* Go look in the laog_exposition mapping, to find the allocation
3975 points for this edge, so we can show the user. */
3976 key
.src_ga
= lk
->guestaddr
;
3977 key
.dst_ga
= other
->guestaddr
;
3981 if (VG_(lookupFM
)( laog_exposition
,
3982 (UWord
*)&found
, NULL
, (UWord
)&key
)) {
3983 tl_assert(found
!= &key
);
3984 tl_assert(found
->src_ga
== key
.src_ga
);
3985 tl_assert(found
->dst_ga
== key
.dst_ga
);
3986 tl_assert(found
->src_ec
);
3987 tl_assert(found
->dst_ec
);
3988 HG_(record_error_LockOrder
)(
3990 found
->src_ec
, found
->dst_ec
, other
->acquired_at
);
3992 /* Hmm. This can't happen (can it?) */
3993 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3994 Imagine we have 3 philosophers A B C, and the forks
4003 Let's have the following actions:
4011 Helgrind will report a lock order error when C takes fCA.
4012 Effectively, we have a deadlock if the following
4018 The error reported is:
4019 Observed (incorrect) order fBC followed by fCA
4020 but the stack traces that have established the required order
4023 This is because there is no pair (fCA, fBC) in laog exposition :
4024 the laog_exposition records all pairs of locks between a new lock
4025 taken by a thread and all the already taken locks.
4026 So, there is no laog_exposition (fCA, fBC) as no thread ever
4027 first locked fCA followed by fBC.
4029 In other words, when the deadlock cycle involves more than
4030 two locks, then helgrind does not report the sequence of
4031 operations that created the cycle.
4033 However, we can report the current stack trace (where
4034 lk is being taken), and the stack trace where other was acquired:
4035 Effectively, the variable 'other' contains a lock currently
4036 held by this thread, with its 'acquired_at'. */
4038 HG_(record_error_LockOrder
)(
4040 NULL
, NULL
, other
->acquired_at
);
4044 /* Second, add to laog the pairs
4045 (old, lk) | old <- locks already held by thr
4046 Since both old and lk are currently held by thr, their acquired_at
4047 fields must be non-NULL.
4049 tl_assert(lk
->acquired_at
);
4050 HG_(getPayloadWS
)( &ls_words
, &ls_size
, univ_lsets
, thr
->locksetA
);
4051 for (i
= 0; i
< ls_size
; i
++) {
4052 Lock
* old
= (Lock
*)ls_words
[i
];
4053 tl_assert(old
->acquired_at
);
4054 laog__add_edge( old
, lk
);
4057 /* Why "except_Locks" ? We're here because a lock is being
4058 acquired by a thread, and we're in an inconsistent state here.
4059 See the call points in evhH__post_thread_{r,w}_acquires_lock.
4060 When called in this inconsistent state, locks__sanity_check duly
4062 if (HG_(clo_sanity_flags
) & SCE_LAOG
)
4063 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4066 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
4067 static UWord
* UWordV_dup(UWord
* words
, Word words_size
)
4071 if (words_size
== 0)
4074 UWord
*dup
= HG_(zalloc
) ("hg.dup.1", (SizeT
) words_size
* sizeof(UWord
));
4076 for (i
= 0; i
< words_size
; i
++)
4082 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4084 __attribute__((noinline
))
4085 static void laog__handle_one_lock_deletion ( Lock
* lk
)
4087 WordSetID preds
, succs
;
4088 UWord preds_size
, succs_size
, i
, j
;
4089 UWord
*preds_words
, *succs_words
;
4091 preds
= laog__preds( lk
);
4092 succs
= laog__succs( lk
);
4094 // We need to duplicate the payload, as these can be garbage collected
4095 // during the del/add operations below.
4096 HG_(getPayloadWS
)( &preds_words
, &preds_size
, univ_laog
, preds
);
4097 preds_words
= UWordV_dup(preds_words
, preds_size
);
4099 HG_(getPayloadWS
)( &succs_words
, &succs_size
, univ_laog
, succs
);
4100 succs_words
= UWordV_dup(succs_words
, succs_size
);
4102 for (i
= 0; i
< preds_size
; i
++)
4103 laog__del_edge( (Lock
*)preds_words
[i
], lk
);
4105 for (j
= 0; j
< succs_size
; j
++)
4106 laog__del_edge( lk
, (Lock
*)succs_words
[j
] );
4108 for (i
= 0; i
< preds_size
; i
++) {
4109 for (j
= 0; j
< succs_size
; j
++) {
4110 if (preds_words
[i
] != succs_words
[j
]) {
4111 /* This can pass unlocked locks to laog__add_edge, since
4112 we're deleting stuff. So their acquired_at fields may
4114 laog__add_edge( (Lock
*)preds_words
[i
], (Lock
*)succs_words
[j
] );
4120 HG_(free
) (preds_words
);
4122 HG_(free
) (succs_words
);
4124 // Remove lk information from laog links FM
4129 if (VG_(delFromFM
) (laog
,
4130 (UWord
*)&linked_lk
, (UWord
*)&links
, (UWord
)lk
)) {
4131 tl_assert (linked_lk
== lk
);
4135 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4138 //__attribute__((noinline))
4139 //static void laog__handle_lock_deletions (
4140 // WordSetID /* in univ_laog */ locksToDelete
4147 // HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4148 // UWordV_dup call needed here ...
4149 // for (i = 0; i < ws_size; i++)
4150 // laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4152 // if (HG_(clo_sanity_flags) & SCE_LAOG)
4153 // all__sanity_check("laog__handle_lock_deletions-post");
4157 /*--------------------------------------------------------------*/
4158 /*--- Malloc/free replacements ---*/
4159 /*--------------------------------------------------------------*/
4163 void* next
; /* required by m_hashtable */
4164 Addr payload
; /* ptr to actual block */
4165 SizeT szB
; /* size requested */
4166 ExeContext
* where
; /* where it was allocated */
4167 Thread
* thr
; /* allocating thread */
4171 /* A hash table of MallocMetas, used to track malloc'd blocks
4173 static VgHashTable
*hg_mallocmeta_table
= NULL
;
4175 /* MallocMeta are small elements. We use a pool to avoid
4176 the overhead of malloc for each MallocMeta. */
4177 static PoolAlloc
*MallocMeta_poolalloc
= NULL
;
4179 static MallocMeta
* new_MallocMeta ( void ) {
4180 MallocMeta
* md
= VG_(allocEltPA
) (MallocMeta_poolalloc
);
4181 VG_(memset
)(md
, 0, sizeof(MallocMeta
));
4184 static void delete_MallocMeta ( MallocMeta
* md
) {
4185 VG_(freeEltPA
)(MallocMeta_poolalloc
, md
);
4189 /* Allocate a client block and set up the metadata for it. */
4192 void* handle_alloc ( ThreadId tid
,
4193 SizeT szB
, SizeT alignB
, Bool is_zeroed
)
4198 tl_assert( ((SSizeT
)szB
) >= 0 );
4199 p
= (Addr
)VG_(cli_malloc
)(alignB
, szB
);
4204 VG_(memset
)((void*)p
, 0, szB
);
4206 /* Note that map_threads_lookup must succeed (cannot assert), since
4207 memory can only be allocated by currently alive threads, hence
4208 they must have an entry in map_threads. */
4209 md
= new_MallocMeta();
4212 md
->where
= VG_(record_ExeContext
)( tid
, 0 );
4213 md
->thr
= map_threads_lookup( tid
);
4215 VG_(HT_add_node
)( hg_mallocmeta_table
, (VgHashNode
*)md
);
4216 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
4217 VG_(XTMemory_Full_alloc
)(md
->szB
, md
->where
);
4219 /* Tell the lower level memory wranglers. */
4220 evh__new_mem_heap( p
, szB
, is_zeroed
);
4225 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4226 Cast to a signed type to catch any unexpectedly negative args.
4227 We're assuming here that the size asked for is not greater than
4228 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4230 static void* hg_cli__malloc ( ThreadId tid
, SizeT n
) {
4231 if (((SSizeT
)n
) < 0) return NULL
;
4232 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4233 /*is_zeroed*/False
);
4235 static void* hg_cli____builtin_new ( ThreadId tid
, SizeT n
) {
4236 if (((SSizeT
)n
) < 0) return NULL
;
4237 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4238 /*is_zeroed*/False
);
4240 static void* hg_cli____builtin_vec_new ( ThreadId tid
, SizeT n
) {
4241 if (((SSizeT
)n
) < 0) return NULL
;
4242 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4243 /*is_zeroed*/False
);
4245 static void* hg_cli__memalign ( ThreadId tid
, SizeT align
, SizeT n
) {
4246 if (((SSizeT
)n
) < 0) return NULL
;
4247 return handle_alloc ( tid
, n
, align
,
4248 /*is_zeroed*/False
);
4250 static void* hg_cli__calloc ( ThreadId tid
, SizeT nmemb
, SizeT size1
) {
4251 if ( ((SSizeT
)nmemb
) < 0 || ((SSizeT
)size1
) < 0 ) return NULL
;
4252 return handle_alloc ( tid
, nmemb
*size1
, VG_(clo_alignment
),
4253 /*is_zeroed*/True
);
4257 /* Free a client block, including getting rid of the relevant
4260 static void handle_free ( ThreadId tid
, void* p
)
4262 MallocMeta
*md
, *old_md
;
4265 /* First see if we can find the metadata for 'p'. */
4266 md
= (MallocMeta
*) VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)p
);
4268 return; /* apparently freeing a bogus address. Oh well. */
4270 tl_assert(md
->payload
== (Addr
)p
);
4272 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
)) {
4273 ExeContext
* ec_free
= VG_(record_ExeContext
)( tid
, 0 );
4274 VG_(XTMemory_Full_free
)(md
->szB
, md
->where
, ec_free
);
4277 /* Nuke the metadata block */
4278 old_md
= (MallocMeta
*)
4279 VG_(HT_remove
)( hg_mallocmeta_table
, (UWord
)p
);
4280 tl_assert(old_md
); /* it must be present - we just found it */
4281 tl_assert(old_md
== md
);
4282 tl_assert(old_md
->payload
== (Addr
)p
);
4284 VG_(cli_free
)((void*)old_md
->payload
);
4285 delete_MallocMeta(old_md
);
4287 /* Tell the lower level memory wranglers. */
4288 evh__die_mem_heap( (Addr
)p
, szB
);
4291 static void hg_cli__free ( ThreadId tid
, void* p
) {
4292 handle_free(tid
, p
);
4294 static void hg_cli____builtin_delete ( ThreadId tid
, void* p
) {
4295 handle_free(tid
, p
);
4297 static void hg_cli____builtin_vec_delete ( ThreadId tid
, void* p
) {
4298 handle_free(tid
, p
);
4302 static void* hg_cli__realloc ( ThreadId tid
, void* payloadV
, SizeT new_size
)
4304 MallocMeta
*md
, *md_new
, *md_tmp
;
4307 Addr payload
= (Addr
)payloadV
;
4309 if (((SSizeT
)new_size
) < 0) return NULL
;
4311 md
= (MallocMeta
*) VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)payload
);
4313 return NULL
; /* apparently realloc-ing a bogus address. Oh well. */
4315 tl_assert(md
->payload
== payload
);
4317 if (md
->szB
== new_size
) {
4318 /* size unchanged */
4319 md
->where
= VG_(record_ExeContext
)(tid
, 0);
4323 if (md
->szB
> new_size
) {
4324 /* new size is smaller */
4326 md
->where
= VG_(record_ExeContext
)(tid
, 0);
4327 evh__die_mem_heap( md
->payload
+ new_size
, md
->szB
- new_size
);
4332 /* new size is bigger */
4333 Addr p_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_size
);
4335 /* First half kept and copied, second half new */
4336 // FIXME: shouldn't we use a copier which implements the
4337 // memory state machine?
4338 evh__copy_mem( payload
, p_new
, md
->szB
);
4339 evh__new_mem_heap ( p_new
+ md
->szB
, new_size
- md
->szB
,
4341 /* FIXME: can anything funny happen here? specifically, if the
4342 old range contained a lock, then die_mem_heap will complain.
4343 Is that the correct behaviour? Not sure. */
4344 evh__die_mem_heap( payload
, md
->szB
);
4346 /* Copy from old to new */
4347 for (i
= 0; i
< md
->szB
; i
++)
4348 ((UChar
*)p_new
)[i
] = ((UChar
*)payload
)[i
];
4350 /* Because the metadata hash table is index by payload address,
4351 we have to get rid of the old hash table entry and make a new
4352 one. We can't just modify the existing metadata in place,
4353 because then it would (almost certainly) be in the wrong hash
4355 md_new
= new_MallocMeta();
4358 md_tmp
= VG_(HT_remove
)( hg_mallocmeta_table
, payload
);
4360 tl_assert(md_tmp
== md
);
4362 VG_(cli_free
)((void*)md
->payload
);
4363 delete_MallocMeta(md
);
4366 md_new
->where
= VG_(record_ExeContext
)( tid
, 0 );
4367 md_new
->szB
= new_size
;
4368 md_new
->payload
= p_new
;
4369 md_new
->thr
= map_threads_lookup( tid
);
4372 VG_(HT_add_node
)( hg_mallocmeta_table
, (VgHashNode
*)md_new
);
4374 return (void*)p_new
;
4378 static SizeT
hg_cli_malloc_usable_size ( ThreadId tid
, void* p
)
4380 MallocMeta
*md
= VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)p
);
4382 // There may be slop, but pretend there isn't because only the asked-for
4383 // area will have been shadowed properly.
4384 return ( md
? md
->szB
: 0 );
4388 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4389 Slow linear search. With a bit of hash table help if 'data_addr'
4390 is either the start of a block or up to 15 word-sized steps along
4391 from the start of a block. */
4393 static inline Bool
addr_is_in_MM_Chunk( MallocMeta
* mm
, Addr a
)
4395 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4397 if (UNLIKELY(mm
->szB
== 0 && a
== mm
->payload
))
4399 /* else normal interval rules apply */
4400 if (LIKELY(a
< mm
->payload
)) return False
;
4401 if (LIKELY(a
>= mm
->payload
+ mm
->szB
)) return False
;
4405 Bool
HG_(mm_find_containing_block
)( /*OUT*/ExeContext
** where
,
4407 /*OUT*/Addr
* payload
,
4413 const Int n_fast_check_words
= 16;
4415 /* Before searching the list of allocated blocks in hg_mallocmeta_table,
4416 first verify that data_addr is in a heap client segment. */
4417 const NSegment
*s
= VG_(am_find_nsegment
) (data_addr
);
4418 if (s
== NULL
|| !s
->isCH
)
4421 /* First, do a few fast searches on the basis that data_addr might
4422 be exactly the start of a block or up to 15 words inside. This
4423 can happen commonly via the creq
4424 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4425 for (i
= 0; i
< n_fast_check_words
; i
++) {
4426 mm
= VG_(HT_lookup
)( hg_mallocmeta_table
,
4427 data_addr
- (UWord
)(UInt
)i
* sizeof(UWord
) );
4428 if (UNLIKELY(mm
&& addr_is_in_MM_Chunk(mm
, data_addr
)))
4432 /* Well, this totally sucks. But without using an interval tree or
4433 some such, it's hard to see how to do better. We have to check
4434 every block in the entire table. */
4435 VG_(HT_ResetIter
)(hg_mallocmeta_table
);
4436 while ( (mm
= VG_(HT_Next
)(hg_mallocmeta_table
)) ) {
4437 if (UNLIKELY(addr_is_in_MM_Chunk(mm
, data_addr
)))
4441 /* Not found. Bah. */
4447 tl_assert(addr_is_in_MM_Chunk(mm
, data_addr
));
4448 if (where
) *where
= mm
->where
;
4449 if (tnr
) *tnr
= mm
->thr
->errmsg_index
;
4450 if (payload
) *payload
= mm
->payload
;
4451 if (szB
) *szB
= mm
->szB
;
4456 /*--------------------------------------------------------------*/
4457 /*--- Instrumentation ---*/
4458 /*--------------------------------------------------------------*/
4460 #define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
4461 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4462 #define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4463 #define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4464 #define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4465 #define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4467 /* This takes and returns atoms, of course. Not full IRExprs. */
4468 static IRExpr
* mk_And1 ( IRSB
* sbOut
, IRExpr
* arg1
, IRExpr
* arg2
)
4470 tl_assert(arg1
&& arg2
);
4471 tl_assert(isIRAtom(arg1
));
4472 tl_assert(isIRAtom(arg2
));
4473 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4475 IRTemp wide1
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4476 IRTemp wide2
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4477 IRTemp anded
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4478 IRTemp res
= newIRTemp(sbOut
->tyenv
, Ity_I1
);
4479 addStmtToIRSB(sbOut
, assign(wide1
, unop(Iop_1Uto32
, arg1
)));
4480 addStmtToIRSB(sbOut
, assign(wide2
, unop(Iop_1Uto32
, arg2
)));
4481 addStmtToIRSB(sbOut
, assign(anded
, binop(Iop_And32
, mkexpr(wide1
),
4483 addStmtToIRSB(sbOut
, assign(res
, unop(Iop_32to1
, mkexpr(anded
))));
4487 static void instrument_mem_access ( IRSB
* sbOut
,
4491 Bool fixupSP_needed
,
4495 /* goff_sp_s1 is the offset in guest
4496 state where the cachedstack validity
4498 IRExpr
* guard
) /* NULL => True */
4500 IRType tyAddr
= Ity_INVALID
;
4501 const HChar
* hName
= NULL
;
4504 IRExpr
** argv
= NULL
;
4507 // THRESH is the size of the window above SP (well,
4508 // mostly above) that we assume implies a stack reference.
4509 const Int THRESH
= 4096 * 4; // somewhat arbitrary
4510 const Int rz_szB
= VG_STACK_REDZONE_SZB
;
4512 tl_assert(isIRAtom(addr
));
4513 tl_assert(hWordTy_szB
== 4 || hWordTy_szB
== 8);
4515 tyAddr
= typeOfIRExpr( sbOut
->tyenv
, addr
);
4516 tl_assert(tyAddr
== Ity_I32
|| tyAddr
== Ity_I64
);
4518 /* So the effective address is in 'addr' now. */
4519 regparms
= 1; // unless stated otherwise
4523 hName
= "evh__mem_help_cwrite_1";
4524 hAddr
= &evh__mem_help_cwrite_1
;
4525 argv
= mkIRExprVec_1( addr
);
4528 hName
= "evh__mem_help_cwrite_2";
4529 hAddr
= &evh__mem_help_cwrite_2
;
4530 argv
= mkIRExprVec_1( addr
);
4533 if (fixupSP_needed
) {
4534 /* Unwind has to be done with a SP fixed up with one word.
4535 See Ist_Put heuristic in hg_instrument. */
4536 hName
= "evh__mem_help_cwrite_4_fixupSP";
4537 hAddr
= &evh__mem_help_cwrite_4_fixupSP
;
4539 hName
= "evh__mem_help_cwrite_4";
4540 hAddr
= &evh__mem_help_cwrite_4
;
4542 argv
= mkIRExprVec_1( addr
);
4545 if (fixupSP_needed
) {
4546 /* Unwind has to be done with a SP fixed up with one word.
4547 See Ist_Put heuristic in hg_instrument. */
4548 hName
= "evh__mem_help_cwrite_8_fixupSP";
4549 hAddr
= &evh__mem_help_cwrite_8_fixupSP
;
4551 hName
= "evh__mem_help_cwrite_8";
4552 hAddr
= &evh__mem_help_cwrite_8
;
4554 argv
= mkIRExprVec_1( addr
);
4557 tl_assert(szB
> 8 && szB
<= 512); /* stay sane */
4559 hName
= "evh__mem_help_cwrite_N";
4560 hAddr
= &evh__mem_help_cwrite_N
;
4561 argv
= mkIRExprVec_2( addr
, mkIRExpr_HWord( szB
));
4567 hName
= "evh__mem_help_cread_1";
4568 hAddr
= &evh__mem_help_cread_1
;
4569 argv
= mkIRExprVec_1( addr
);
4572 hName
= "evh__mem_help_cread_2";
4573 hAddr
= &evh__mem_help_cread_2
;
4574 argv
= mkIRExprVec_1( addr
);
4577 hName
= "evh__mem_help_cread_4";
4578 hAddr
= &evh__mem_help_cread_4
;
4579 argv
= mkIRExprVec_1( addr
);
4582 hName
= "evh__mem_help_cread_8";
4583 hAddr
= &evh__mem_help_cread_8
;
4584 argv
= mkIRExprVec_1( addr
);
4587 tl_assert(szB
> 8 && szB
<= 512); /* stay sane */
4589 hName
= "evh__mem_help_cread_N";
4590 hAddr
= &evh__mem_help_cread_N
;
4591 argv
= mkIRExprVec_2( addr
, mkIRExpr_HWord( szB
));
4596 /* Create the helper. */
4600 di
= unsafeIRDirty_0_N( regparms
,
4601 hName
, VG_(fnptr_to_fnentry
)( hAddr
),
4604 if (HG_(clo_delta_stacktrace
)) {
4605 /* memory access helper might read the shadow1 SP offset, that
4606 indicates if the cached stacktrace is valid. */
4607 di
->fxState
[0].fx
= Ifx_Read
;
4608 di
->fxState
[0].offset
= goff_sp_s1
;
4609 di
->fxState
[0].size
= hWordTy_szB
;
4610 di
->fxState
[0].nRepeats
= 0;
4611 di
->fxState
[0].repeatLen
= 0;
4615 if (! HG_(clo_check_stack_refs
)) {
4616 /* We're ignoring memory references which are (obviously) to the
4617 stack. In fact just skip stack refs that are within 4 pages
4618 of SP (SP - the redzone, really), as that's simple, easy, and
4619 filters out most stack references. */
4620 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4621 some arbitrary N. If that is true then addr is outside the
4622 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4623 pages) then we can say addr is within a few pages of SP and
4624 so can't possibly be a heap access, and so can be skipped.
4626 Note that the condition simplifies to
4627 (addr - SP + RZ) >u N
4628 which generates better code in x86/amd64 backends, but it does
4629 not unfortunately simplify to
4630 (addr - SP) >u (N - RZ)
4631 (would be beneficial because N - RZ is a constant) because
4632 wraparound arithmetic messes up the comparison. eg.
4634 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4636 IRTemp sp
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4637 addStmtToIRSB( sbOut
, assign(sp
, IRExpr_Get(goff_sp
, tyAddr
)));
4640 IRTemp addr_minus_sp
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4643 assign(addr_minus_sp
,
4645 ? binop(Iop_Sub32
, addr
, mkexpr(sp
))
4646 : binop(Iop_Sub64
, addr
, mkexpr(sp
)))
4649 /* "addr - SP + RZ" */
4650 IRTemp diff
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4655 ? binop(Iop_Add32
, mkexpr(addr_minus_sp
), mkU32(rz_szB
))
4656 : binop(Iop_Add64
, mkexpr(addr_minus_sp
), mkU64(rz_szB
)))
4659 /* guardA == "guard on the address" */
4660 IRTemp guardA
= newIRTemp(sbOut
->tyenv
, Ity_I1
);
4665 ? binop(Iop_CmpLT32U
, mkU32(THRESH
), mkexpr(diff
))
4666 : binop(Iop_CmpLT64U
, mkU64(THRESH
), mkexpr(diff
)))
4668 di
->guard
= mkexpr(guardA
);
4671 /* If there's a guard on the access itself (as supplied by the
4672 caller of this routine), we need to AND that in to any guard we
4673 might already have. */
4675 di
->guard
= mk_And1(sbOut
, di
->guard
, guard
);
4678 /* Add the helper. */
4679 addStmtToIRSB( sbOut
, IRStmt_Dirty(di
) );
4683 /* Figure out if GA is a guest code address in the dynamic linker, and
4684 if so return True. Otherwise (and in case of any doubt) return
4685 False. (sidedly safe w/ False as the safe value) */
4686 static Bool
is_in_dynamic_linker_shared_object( Addr ga
)
4689 const HChar
* soname
;
4691 dinfo
= VG_(find_DebugInfo
)( VG_(current_DiEpoch
)(), ga
);
4692 if (!dinfo
) return False
;
4694 soname
= VG_(DebugInfo_get_soname
)(dinfo
);
4696 if (0) VG_(printf
)("%s\n", soname
);
4698 return VG_(is_soname_ld_so
)(soname
);
4702 void addInvalidateCachedStack (IRSB
* bbOut
,
4706 /* Invalidate cached stack: Write 0 in the shadow1 offset 0 */
4707 addStmtToIRSB( bbOut
,
4708 IRStmt_Put(goff_sp_s1
,
4710 mkU32(0) : mkU64(0)));
4711 /// ???? anything more efficient than assign a Word???
4715 IRSB
* hg_instrument ( VgCallbackClosure
* closure
,
4717 const VexGuestLayout
* layout
,
4718 const VexGuestExtents
* vge
,
4719 const VexArchInfo
* archinfo_host
,
4720 IRType gWordTy
, IRType hWordTy
)
4724 Addr cia
; /* address of current insn */
4726 Bool inLDSO
= False
;
4727 Addr inLDSOmask4K
= 1; /* mismatches on first check */
4729 // Set to True when SP must be fixed up when taking a stack trace for the
4730 // mem accesses in the rest of the instruction
4731 Bool fixupSP_needed
= False
;
4733 const Int goff_SP
= layout
->offset_SP
;
4734 /* SP in shadow1 indicates if cached stack is valid.
4735 We have to invalidate the cached stack e.g. when seeing call or ret. */
4736 const Int goff_SP_s1
= layout
->total_sizeB
+ layout
->offset_SP
;
4737 const Int hWordTy_szB
= sizeofIRType(hWordTy
);
4739 if (gWordTy
!= hWordTy
) {
4740 /* We don't currently support this case. */
4741 VG_(tool_panic
)("host/guest word size mismatch");
4744 if (VKI_PAGE_SIZE
< 4096 || VG_(log2
)(VKI_PAGE_SIZE
) == -1) {
4745 VG_(tool_panic
)("implausible or too-small VKI_PAGE_SIZE");
4749 bbOut
= emptyIRSB();
4750 bbOut
->tyenv
= deepCopyIRTypeEnv(bbIn
->tyenv
);
4751 bbOut
->next
= deepCopyIRExpr(bbIn
->next
);
4752 bbOut
->jumpkind
= bbIn
->jumpkind
;
4753 bbOut
->offsIP
= bbIn
->offsIP
;
4755 // Copy verbatim any IR preamble preceding the first IMark
4757 while (i
< bbIn
->stmts_used
&& bbIn
->stmts
[i
]->tag
!= Ist_IMark
) {
4758 addStmtToIRSB( bbOut
, bbIn
->stmts
[i
] );
4762 // Get the first statement, and initial cia from it
4763 tl_assert(bbIn
->stmts_used
> 0);
4764 tl_assert(i
< bbIn
->stmts_used
);
4765 st
= bbIn
->stmts
[i
];
4766 tl_assert(Ist_IMark
== st
->tag
);
4767 cia
= st
->Ist
.IMark
.addr
;
4770 for (/*use current i*/; i
< bbIn
->stmts_used
; i
++) {
4771 st
= bbIn
->stmts
[i
];
4773 tl_assert(isFlatIRStmt(st
));
4776 /* No memory reference, but if we do anything else than
4777 Ijk_Boring, indicate to helgrind that the previously
4778 recorded stack is invalid.
4779 For Ijk_Boring, also invalidate the stack if the exit
4780 instruction has no CF info. This heuristic avoids cached
4781 stack trace mismatch in some cases such as longjmp
4782 implementation. Similar logic below for the bb exit. */
4783 if (HG_(clo_delta_stacktrace
)
4784 && (st
->Ist
.Exit
.jk
!= Ijk_Boring
|| ! VG_(has_CF_info
)(cia
)))
4785 addInvalidateCachedStack(bbOut
, goff_SP_s1
, hWordTy_szB
);
4789 /* None of these can contain any memory references. */
4792 /* This cannot contain any memory references. */
4793 /* If we see a put to SP, from now on in this instruction,
4794 the SP needed to unwind has to be fixed up by one word.
4795 This very simple heuristic ensures correct unwinding in the
4796 typical case of a push instruction. If we need to cover more
4797 cases, then we need to better track how the SP is modified by
4798 the instruction (and calculate a precise sp delta), rather than
4799 assuming that the SP is decremented by a Word size. */
4800 if (HG_(clo_delta_stacktrace
) && st
->Ist
.Put
.offset
== goff_SP
) {
4801 fixupSP_needed
= True
;
4805 /* This cannot contain any memory references. */
4809 fixupSP_needed
= False
;
4811 /* no mem refs, but note the insn address. */
4812 cia
= st
->Ist
.IMark
.addr
;
4814 /* Don't instrument the dynamic linker. It generates a
4815 lot of races which we just expensively suppress, so
4818 Avoid flooding is_in_dynamic_linker_shared_object with
4819 requests by only checking at transitions between 4K
4821 if ((cia
& ~(Addr
)0xFFF) != inLDSOmask4K
) {
4822 if (0) VG_(printf
)("NEW %#lx\n", cia
);
4823 inLDSOmask4K
= cia
& ~(Addr
)0xFFF;
4824 inLDSO
= is_in_dynamic_linker_shared_object(cia
);
4826 if (0) VG_(printf
)("old %#lx\n", cia
);
4831 switch (st
->Ist
.MBE
.event
) {
4833 case Imbe_CancelReservation
:
4834 break; /* not interesting */
4841 /* Atomic read-modify-write cycle. Just pretend it's a
4843 IRCAS
* cas
= st
->Ist
.CAS
.details
;
4844 Bool isDCAS
= cas
->oldHi
!= IRTemp_INVALID
;
4846 tl_assert(cas
->expdHi
);
4847 tl_assert(cas
->dataHi
);
4849 tl_assert(!cas
->expdHi
);
4850 tl_assert(!cas
->dataHi
);
4852 /* Just be boring about it. */
4854 instrument_mem_access(
4858 * sizeofIRType(typeOfIRExpr(bbIn
->tyenv
, cas
->dataLo
)),
4859 False
/*!isStore*/, fixupSP_needed
,
4860 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4868 /* We pretend store-conditionals don't exist, viz, ignore
4869 them. Whereas load-linked's are treated the same as
4872 if (st
->Ist
.LLSC
.storedata
== NULL
) {
4874 dataTy
= typeOfIRTemp(bbIn
->tyenv
, st
->Ist
.LLSC
.result
);
4876 instrument_mem_access(
4879 sizeofIRType(dataTy
),
4880 False
/*!isStore*/, fixupSP_needed
,
4881 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4894 instrument_mem_access(
4897 sizeofIRType(typeOfIRExpr(bbIn
->tyenv
, st
->Ist
.Store
.data
)),
4898 True
/*isStore*/, fixupSP_needed
,
4899 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4906 IRStoreG
* sg
= st
->Ist
.StoreG
.details
;
4907 IRExpr
* data
= sg
->data
;
4908 IRExpr
* addr
= sg
->addr
;
4909 IRType type
= typeOfIRExpr(bbIn
->tyenv
, data
);
4910 tl_assert(type
!= Ity_INVALID
);
4911 instrument_mem_access( bbOut
, addr
, sizeofIRType(type
),
4912 True
/*isStore*/, fixupSP_needed
,
4914 goff_SP
, goff_SP_s1
, sg
->guard
);
4919 IRLoadG
* lg
= st
->Ist
.LoadG
.details
;
4920 IRType type
= Ity_INVALID
; /* loaded type */
4921 IRType typeWide
= Ity_INVALID
; /* after implicit widening */
4922 IRExpr
* addr
= lg
->addr
;
4923 typeOfIRLoadGOp(lg
->cvt
, &typeWide
, &type
);
4924 tl_assert(type
!= Ity_INVALID
);
4925 instrument_mem_access( bbOut
, addr
, sizeofIRType(type
),
4926 False
/*!isStore*/, fixupSP_needed
,
4928 goff_SP
, goff_SP_s1
, lg
->guard
);
4933 IRExpr
* data
= st
->Ist
.WrTmp
.data
;
4934 if (data
->tag
== Iex_Load
) {
4936 instrument_mem_access(
4938 data
->Iex
.Load
.addr
,
4939 sizeofIRType(data
->Iex
.Load
.ty
),
4940 False
/*!isStore*/, fixupSP_needed
,
4941 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4951 IRDirty
* d
= st
->Ist
.Dirty
.details
;
4952 if (d
->mFx
!= Ifx_None
) {
4953 /* This dirty helper accesses memory. Collect the
4955 tl_assert(d
->mAddr
!= NULL
);
4956 tl_assert(d
->mSize
!= 0);
4957 dataSize
= d
->mSize
;
4958 if (d
->mFx
== Ifx_Read
|| d
->mFx
== Ifx_Modify
) {
4960 instrument_mem_access(
4961 bbOut
, d
->mAddr
, dataSize
,
4962 False
/*!isStore*/, fixupSP_needed
,
4963 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4968 if (d
->mFx
== Ifx_Write
|| d
->mFx
== Ifx_Modify
) {
4970 instrument_mem_access(
4971 bbOut
, d
->mAddr
, dataSize
,
4972 True
/*isStore*/, fixupSP_needed
,
4973 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4979 tl_assert(d
->mAddr
== NULL
);
4980 tl_assert(d
->mSize
== 0);
4990 } /* switch (st->tag) */
4992 addStmtToIRSB( bbOut
, st
);
4993 } /* iterate over bbIn->stmts */
4995 // See above the case Ist_Exit:
4996 if (HG_(clo_delta_stacktrace
)
4997 && (bbOut
->jumpkind
!= Ijk_Boring
|| ! VG_(has_CF_info
)(cia
)))
4998 addInvalidateCachedStack(bbOut
, goff_SP_s1
, hWordTy_szB
);
5010 /*----------------------------------------------------------------*/
5011 /*--- Client requests ---*/
5012 /*----------------------------------------------------------------*/
5014 /* Sheesh. Yet another goddam finite map. */
5015 static WordFM
* map_pthread_t_to_Thread
= NULL
; /* pthread_t -> Thread* */
5017 static void map_pthread_t_to_Thread_INIT ( void ) {
5018 if (UNLIKELY(map_pthread_t_to_Thread
== NULL
)) {
5019 map_pthread_t_to_Thread
= VG_(newFM
)( HG_(zalloc
), "hg.mpttT.1",
5024 /* A list of Ada dependent tasks and their masters. Used for implementing
5025 the Ada task termination semantic as implemented by the
5026 gcc gnat Ada runtime. */
5029 void* dependent
; // Ada Task Control Block of the Dependent
5030 void* master
; // ATCB of the master
5031 Word master_level
; // level of dependency between master and dependent
5032 Thread
* hg_dependent
; // helgrind Thread* for dependent task.
5034 GNAT_dmml
; // (d)ependent (m)aster (m)aster_(l)evel.
5035 static XArray
* gnat_dmmls
; /* of GNAT_dmml */
5036 static void gnat_dmmls_INIT (void)
5038 if (UNLIKELY(gnat_dmmls
== NULL
)) {
5039 gnat_dmmls
= VG_(newXA
) (HG_(zalloc
), "hg.gnat_md.1",
5041 sizeof(GNAT_dmml
) );
5045 static void xtmemory_report_next_block(XT_Allocs
* xta
, ExeContext
** ec_alloc
)
5047 const MallocMeta
* md
= VG_(HT_Next
)(hg_mallocmeta_table
);
5049 xta
->nbytes
= md
->szB
;
5051 *ec_alloc
= md
->where
;
5055 static void HG_(xtmemory_report
) ( const HChar
* filename
, Bool fini
)
5057 // Make xtmemory_report_next_block ready to be called.
5058 VG_(HT_ResetIter
)(hg_mallocmeta_table
);
5059 VG_(XTMemory_report
)(filename
, fini
, xtmemory_report_next_block
,
5060 VG_(XT_filter_1top_and_maybe_below_main
));
5063 static void print_monitor_help ( void )
5068 "helgrind monitor commands:\n"
5069 " info locks [lock_addr] : show status of lock at addr lock_addr\n"
5070 " with no lock_addr, show status of all locks\n"
5071 " accesshistory <addr> [<len>] : show access history recorded\n"
5072 " for <len> (or 1) bytes at <addr>\n"
5073 " xtmemory [<filename>]\n"
5074 " dump xtree memory profile in <filename> (default xtmemory.kcg.%%p.%%n)\n"
5078 /* return True if request recognised, False otherwise */
5079 static Bool
handle_gdb_monitor_command (ThreadId tid
, HChar
*req
)
5082 HChar s
[VG_(strlen
)(req
)]; /* copy for strtok_r */
5086 VG_(strcpy
) (s
, req
);
5088 wcmd
= VG_(strtok_r
) (s
, " ", &ssaveptr
);
5089 /* NB: if possible, avoid introducing a new command below which
5090 starts with the same first letter(s) as an already existing
5091 command. This ensures a shorter abbreviation for the user. */
5092 switch (VG_(keyword_id
)
5093 ("help info accesshistory xtmemory",
5094 wcmd
, kwd_report_duplicated_matches
)) {
5095 case -2: /* multiple matches */
5097 case -1: /* not found */
5100 print_monitor_help();
5103 wcmd
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5104 switch (kwdid
= VG_(keyword_id
)
5106 wcmd
, kwd_report_all
)) {
5114 Bool lk_shown
= False
;
5115 Bool all_locks
= True
;
5119 wa
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5121 if (VG_(parse_Addr
) (&wa
, &lk_addr
) )
5124 VG_(gdb_printf
) ("missing or malformed address\n");
5127 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
) {
5128 if (all_locks
|| lk_addr
== lk
->guestaddr
) {
5130 True
/* show_lock_addrdescr */,
5131 False
/* show_internal_data */);
5136 VG_(gdb_printf
) ("no locks\n");
5137 if (!all_locks
&& !lk_shown
)
5138 VG_(gdb_printf
) ("lock with address %p not found\n",
5147 case 2: /* accesshistory */
5151 if (HG_(clo_history_level
) < 2) {
5153 ("helgrind must be started with --history-level=full"
5154 " to use accesshistory\n");
5157 if (VG_(strtok_get_address_and_size
) (&address
, &szB
, &ssaveptr
)) {
5159 libhb_event_map_access_history (address
, szB
, HG_(print_access
));
5161 VG_(gdb_printf
) ("len must be >=1\n");
5166 case 3: { /* xtmemory */
5168 filename
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5169 HG_(xtmemory_report
)(filename
, False
);
5180 Bool
hg_handle_client_request ( ThreadId tid
, UWord
* args
, UWord
* ret
)
5182 if (!VG_IS_TOOL_USERREQ('H','G',args
[0])
5183 && VG_USERREQ__GDB_MONITOR_COMMAND
!= args
[0])
5186 /* Anything that gets past the above check is one of ours, so we
5187 should be able to handle it. */
5189 /* default, meaningless return value, unless otherwise set */
5194 /* --- --- User-visible client requests --- --- */
5196 case VG_USERREQ__HG_CLEAN_MEMORY
:
5197 if (0) VG_(printf
)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5199 /* Call die_mem to (expensively) tidy up properly, if there
5200 are any held locks etc in the area. Calling evh__die_mem
5201 and then evh__new_mem is a bit inefficient; probably just
5202 the latter would do. */
5203 if (args
[2] > 0) { /* length */
5204 evh__die_mem(args
[1], args
[2]);
5205 /* and then set it to New */
5206 evh__new_mem(args
[1], args
[2]);
5210 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK
: {
5213 if (0) VG_(printf
)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5215 if (HG_(mm_find_containing_block
)(NULL
, NULL
,
5216 &payload
, &pszB
, args
[1])) {
5218 evh__die_mem(payload
, pszB
);
5219 evh__new_mem(payload
, pszB
);
5228 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED
:
5229 if (0) VG_(printf
)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5231 if (args
[2] > 0) { /* length */
5232 evh__untrack_mem(args
[1], args
[2]);
5236 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED
:
5237 if (0) VG_(printf
)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5239 if (args
[2] > 0) { /* length */
5240 evh__new_mem(args
[1], args
[2]);
5244 case _VG_USERREQ__HG_GET_ABITS
:
5245 if (0) VG_(printf
)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5246 args
[1], args
[2], args
[3]);
5247 UChar
*zzabit
= (UChar
*) args
[2];
5249 || VG_(am_is_valid_for_client
)((Addr
)zzabit
, (SizeT
)args
[3],
5250 VKI_PROT_READ
|VKI_PROT_WRITE
))
5251 *ret
= (UWord
) libhb_srange_get_abits ((Addr
) args
[1],
5258 /* This thread (tid) (a master) is informing us that it has
5259 seen the termination of a dependent task, and that this should
5260 be considered as a join between master and dependent. */
5261 case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN
: {
5263 const Thread
*stayer
= map_threads_maybe_lookup( tid
);
5264 const void *dependent
= (void*)args
[1];
5265 const void *master
= (void*)args
[2];
5268 VG_(printf
)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5269 "self_id = %p Thread* = %p dependent %p\n",
5270 (Int
)tid
, master
, stayer
, dependent
);
5273 /* Similar loop as for master completed hook below, but stops at
5274 the first matching occurence, only comparing master and
5276 for (n
= VG_(sizeXA
) (gnat_dmmls
) - 1; n
>= 0; n
--) {
5277 GNAT_dmml
*dmml
= (GNAT_dmml
*) VG_(indexXA
)(gnat_dmmls
, n
);
5278 if (dmml
->master
== master
5279 && dmml
->dependent
== dependent
) {
5281 VG_(printf
)("quitter %p dependency to stayer %p (join)\n",
5282 dmml
->hg_dependent
->hbthr
, stayer
->hbthr
);
5283 tl_assert(dmml
->hg_dependent
->hbthr
!= stayer
->hbthr
);
5284 generate_quitter_stayer_dependence (dmml
->hg_dependent
->hbthr
,
5286 VG_(removeIndexXA
) (gnat_dmmls
, n
);
5293 /* --- --- Client requests for Helgrind's use only --- --- */
5295 /* Some thread is telling us its pthread_t value. Record the
5296 binding between that and the associated Thread*, so we can
5297 later find the Thread* again when notified of a join by the
5299 case _VG_USERREQ__HG_SET_MY_PTHREAD_T
: {
5300 Thread
* my_thr
= NULL
;
5302 VG_(printf
)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int
)tid
,
5304 map_pthread_t_to_Thread_INIT();
5305 my_thr
= map_threads_maybe_lookup( tid
);
5306 /* This assertion should hold because the map_threads (tid to
5307 Thread*) binding should have been made at the point of
5308 low-level creation of this thread, which should have
5309 happened prior to us getting this client request for it.
5310 That's because this client request is sent from
5311 client-world from the 'thread_wrapper' function, which
5312 only runs once the thread has been low-level created. */
5313 tl_assert(my_thr
!= NULL
);
5314 /* So now we know that (pthread_t)args[1] is associated with
5315 (Thread*)my_thr. Note that down. */
5317 VG_(printf
)("XXXX: bind pthread_t %p to Thread* %p\n",
5318 (void*)args
[1], (void*)my_thr
);
5319 VG_(addToFM
)( map_pthread_t_to_Thread
, (UWord
)args
[1], (UWord
)my_thr
);
5321 if (my_thr
->coretid
!= 1) {
5322 /* FIXME: hardwires assumption about identity of the root thread. */
5323 if (HG_(clo_ignore_thread_creation
)) {
5324 HG_(thread_leave_pthread_create
)(my_thr
);
5325 HG_(thread_leave_synchr
)(my_thr
);
5326 tl_assert(my_thr
->synchr_nesting
== 0);
5332 case _VG_USERREQ__HG_PTH_API_ERROR
: {
5333 Thread
* my_thr
= NULL
;
5334 map_pthread_t_to_Thread_INIT();
5335 my_thr
= map_threads_maybe_lookup( tid
);
5336 tl_assert(my_thr
); /* See justification above in SET_MY_PTHREAD_T */
5337 HG_(record_error_PthAPIerror
)(
5338 my_thr
, (HChar
*)args
[1], (UWord
)args
[2], (HChar
*)args
[3] );
5342 /* This thread (tid) has completed a join with the quitting
5343 thread whose pthread_t is in args[1]. */
5344 case _VG_USERREQ__HG_PTHREAD_JOIN_POST
: {
5345 Thread
* thr_q
= NULL
; /* quitter Thread* */
5348 VG_(printf
)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int
)tid
,
5350 map_pthread_t_to_Thread_INIT();
5351 found
= VG_(lookupFM
)( map_pthread_t_to_Thread
,
5352 NULL
, (UWord
*)&thr_q
, (UWord
)args
[1] );
5353 /* Can this fail? It would mean that our pthread_join
5354 wrapper observed a successful join on args[1] yet that
5355 thread never existed (or at least, it never lodged an
5356 entry in the mapping (via SET_MY_PTHREAD_T)). Which
5357 sounds like a bug in the threads library. */
5358 // FIXME: get rid of this assertion; handle properly
5362 VG_(printf
)(".................... quitter Thread* = %p\n",
5364 evh__HG_PTHREAD_JOIN_POST( tid
, thr_q
);
5369 /* This thread (tid) is informing us of its master. */
5370 case _VG_USERREQ__HG_GNAT_MASTER_HOOK
: {
5372 dmml
.dependent
= (void*)args
[1];
5373 dmml
.master
= (void*)args
[2];
5374 dmml
.master_level
= (Word
)args
[3];
5375 dmml
.hg_dependent
= map_threads_maybe_lookup( tid
);
5376 tl_assert(dmml
.hg_dependent
);
5379 VG_(printf
)("HG_GNAT_MASTER_HOOK (tid %d): "
5380 "dependent = %p master = %p master_level = %ld"
5381 " dependent Thread* = %p\n",
5382 (Int
)tid
, dmml
.dependent
, dmml
.master
, dmml
.master_level
,
5385 VG_(addToXA
) (gnat_dmmls
, &dmml
);
5389 /* This thread (tid) is informing us that it has completed a
5391 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK
: {
5393 const Thread
*stayer
= map_threads_maybe_lookup( tid
);
5394 const void *master
= (void*)args
[1];
5395 const Word master_level
= (Word
) args
[2];
5399 VG_(printf
)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5400 "self_id = %p master_level = %ld Thread* = %p\n",
5401 (Int
)tid
, master
, master_level
, stayer
);
5404 /* Reverse loop on the array, simulating a pthread_join for
5405 the Dependent tasks of the completed master, and removing
5406 them from the array. */
5407 for (n
= VG_(sizeXA
) (gnat_dmmls
) - 1; n
>= 0; n
--) {
5408 GNAT_dmml
*dmml
= (GNAT_dmml
*) VG_(indexXA
)(gnat_dmmls
, n
);
5409 if (dmml
->master
== master
5410 && dmml
->master_level
== master_level
) {
5412 VG_(printf
)("quitter %p dependency to stayer %p\n",
5413 dmml
->hg_dependent
->hbthr
, stayer
->hbthr
);
5414 tl_assert(dmml
->hg_dependent
->hbthr
!= stayer
->hbthr
);
5415 generate_quitter_stayer_dependence (dmml
->hg_dependent
->hbthr
,
5417 VG_(removeIndexXA
) (gnat_dmmls
, n
);
5423 /* EXPOSITION only: by intercepting lock init events we can show
5424 the user where the lock was initialised, rather than only
5425 being able to show where it was first locked. Intercepting
5426 lock initialisations is not necessary for the basic operation
5427 of the race checker. */
5428 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST
:
5429 evh__HG_PTHREAD_MUTEX_INIT_POST( tid
, (void*)args
[1], args
[2] );
5432 /* mutex=arg[1], mutex_is_init=arg[2] */
5433 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE
:
5434 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid
, (void*)args
[1], args
[2] != 0 );
5437 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE
: // pth_mx_t*
5438 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5439 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5440 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid
, (void*)args
[1] );
5443 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST
: // pth_mx_t*
5444 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5445 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid
, (void*)args
[1] );
5446 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5449 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE
: // pth_mx_t*
5450 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5451 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5452 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5455 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST
: // pth_mx_t*, long
5456 if ((args
[2] == True
) // lock actually taken
5457 && (HG_(get_pthread_create_nesting_level
)(tid
) == 0))
5458 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, (void*)args
[1] );
5459 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5462 /* This thread is about to do pthread_cond_signal on the
5463 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5464 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE
:
5465 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE
:
5466 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5467 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid
, (void*)args
[1] );
5470 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST
:
5471 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST
:
5472 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5475 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5476 Returns a flag indicating whether or not the mutex is believed to be
5477 valid for this operation. */
5478 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE
: {
5479 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5481 = evh__HG_PTHREAD_COND_WAIT_PRE( tid
, (void*)args
[1],
5483 *ret
= mutex_is_valid
? 1 : 0;
5487 /* Thread successfully completed pthread_cond_init:
5488 cond=arg[1], cond_attr=arg[2] */
5489 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST
:
5490 evh__HG_PTHREAD_COND_INIT_POST( tid
,
5491 (void*)args
[1], (void*)args
[2] );
5494 /* cond=arg[1], cond_is_init=arg[2] */
5495 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE
:
5496 evh__HG_PTHREAD_COND_DESTROY_PRE( tid
, (void*)args
[1], args
[2] != 0 );
5499 /* Thread completed pthread_cond_wait, cond=arg[1],
5500 mutex=arg[2], timeout=arg[3], successful=arg[4] */
5501 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST
:
5502 if (args
[4] == True
)
5503 evh__HG_PTHREAD_COND_WAIT_POST( tid
,
5504 (void*)args
[1], (void*)args
[2],
5506 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5509 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST
:
5510 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid
, (void*)args
[1] );
5513 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE
:
5514 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid
, (void*)args
[1] );
5517 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5518 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE
:
5519 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5520 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5521 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid
, (void*)args
[1],
5525 /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5526 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST
:
5527 if ((args
[3] == True
)
5528 && (HG_(get_pthread_create_nesting_level
)(tid
) == 0))
5529 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid
, (void*)args
[1], args
[2] );
5530 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5533 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE
:
5534 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5535 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5536 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid
, (void*)args
[1] );
5539 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST
:
5540 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5541 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid
, (void*)args
[1] );
5542 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5545 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST
: /* sem_t*, unsigned long */
5546 evh__HG_POSIX_SEM_INIT_POST( tid
, (void*)args
[1], args
[2] );
5549 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE
: /* sem_t* */
5550 evh__HG_POSIX_SEM_DESTROY_PRE( tid
, (void*)args
[1] );
5553 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE
: /* sem_t* */
5554 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5555 evh__HG_POSIX_SEM_POST_PRE( tid
, (void*)args
[1] );
5558 case _VG_USERREQ__HG_POSIX_SEM_POST_POST
: /* sem_t* */
5559 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5562 case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE
: /* sem_t* */
5563 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5566 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST
: /* sem_t*, long tookLock */
5567 if (args
[2] == True
)
5568 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5569 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5572 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE
:
5573 /* pth_bar_t*, ulong count, ulong resizable */
5574 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid
, (void*)args
[1],
5578 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE
:
5579 /* pth_bar_t*, ulong newcount */
5580 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid
, (void*)args
[1],
5584 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE
:
5586 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid
, (void*)args
[1] );
5589 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE
:
5591 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid
, (void*)args
[1] );
5594 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE
:
5595 /* pth_spinlock_t* */
5596 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid
, (void*)args
[1] );
5599 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST
:
5600 /* pth_spinlock_t* */
5601 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid
, (void*)args
[1] );
5604 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE
:
5605 /* pth_spinlock_t*, Word */
5606 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5609 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST
:
5610 /* pth_spinlock_t* */
5611 evh__HG_PTHREAD_SPIN_LOCK_POST( tid
, (void*)args
[1] );
5614 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE
:
5615 /* pth_spinlock_t* */
5616 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid
, (void*)args
[1] );
5619 case _VG_USERREQ__HG_CLIENTREQ_UNIMP
: {
5621 HChar
* who
= (HChar
*)args
[1];
5623 Thread
* thr
= map_threads_maybe_lookup( tid
);
5624 tl_assert( thr
); /* I must be mapped */
5626 tl_assert( VG_(strlen
)(who
) <= 50 );
5627 VG_(sprintf
)(buf
, "Unimplemented client request macro \"%s\"", who
);
5628 /* record_error_Misc strdup's buf, so this is safe: */
5629 HG_(record_error_Misc
)( thr
, buf
);
5633 case _VG_USERREQ__HG_USERSO_SEND_PRE
:
5634 /* UWord arbitrary-SO-tag */
5635 evh__HG_USERSO_SEND_PRE( tid
, args
[1] );
5638 case _VG_USERREQ__HG_USERSO_RECV_POST
:
5639 /* UWord arbitrary-SO-tag */
5640 evh__HG_USERSO_RECV_POST( tid
, args
[1] );
5643 case _VG_USERREQ__HG_USERSO_FORGET_ALL
:
5644 /* UWord arbitrary-SO-tag */
5645 evh__HG_USERSO_FORGET_ALL( tid
, args
[1] );
5648 case VG_USERREQ__GDB_MONITOR_COMMAND
: {
5649 Bool handled
= handle_gdb_monitor_command (tid
, (HChar
*)args
[1]);
5657 case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN
: {
5658 Thread
*thr
= map_threads_maybe_lookup(tid
);
5659 if (HG_(clo_ignore_thread_creation
)) {
5660 HG_(thread_enter_pthread_create
)(thr
);
5661 HG_(thread_enter_synchr
)(thr
);
5666 case _VG_USERREQ__HG_PTHREAD_CREATE_END
: {
5667 Thread
*thr
= map_threads_maybe_lookup(tid
);
5668 if (HG_(clo_ignore_thread_creation
)) {
5669 HG_(thread_leave_pthread_create
)(thr
);
5670 HG_(thread_leave_synchr
)(thr
);
5675 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE
: // pth_mx_t*, long tryLock
5676 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5679 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST
: // pth_mx_t*
5680 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, (void*)args
[1] );
5683 case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED
: // void*, long isW
5684 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid
, (void*)args
[1], args
[2] );
5687 case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED
: // void*
5688 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid
, (void*)args
[1] );
5691 case _VG_USERREQ__HG_POSIX_SEM_RELEASED
: /* sem_t* */
5692 evh__HG_POSIX_SEM_POST_PRE( tid
, (void*)args
[1] );
5695 case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED
: /* sem_t* */
5696 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5699 #if defined(VGO_solaris)
5700 case _VG_USERREQ__HG_RTLD_BIND_GUARD
:
5701 evh__HG_RTLD_BIND_GUARD(tid
, args
[1]);
5704 case _VG_USERREQ__HG_RTLD_BIND_CLEAR
:
5705 evh__HG_RTLD_BIND_CLEAR(tid
, args
[1]);
5707 #endif /* VGO_solaris */
5710 /* Unhandled Helgrind client request! */
5711 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5719 /*----------------------------------------------------------------*/
5721 /*----------------------------------------------------------------*/
5723 static Bool
hg_process_cmd_line_option ( const HChar
* arg
)
5725 const HChar
* tmp_str
;
5727 if VG_BOOL_CLO(arg
, "--track-lockorders",
5728 HG_(clo_track_lockorders
)) {}
5729 else if VG_BOOL_CLO(arg
, "--cmp-race-err-addrs",
5730 HG_(clo_cmp_race_err_addrs
)) {}
5732 else if VG_XACT_CLO(arg
, "--history-level=none",
5733 HG_(clo_history_level
), 0);
5734 else if VG_XACT_CLO(arg
, "--history-level=approx",
5735 HG_(clo_history_level
), 1);
5736 else if VG_XACT_CLO(arg
, "--history-level=full",
5737 HG_(clo_history_level
), 2);
5739 else if VG_BOOL_CLO(arg
, "--delta-stacktrace",
5740 HG_(clo_delta_stacktrace
)) {}
5742 else if VG_BINT_CLO(arg
, "--conflict-cache-size",
5743 HG_(clo_conflict_cache_size
), 10*1000, 150*1000*1000) {}
5745 /* "stuvwx" --> stuvwx (binary) */
5746 else if VG_STR_CLO(arg
, "--hg-sanity-flags", tmp_str
) {
5749 if (6 != VG_(strlen
)(tmp_str
)) {
5750 VG_(message
)(Vg_UserMsg
,
5751 "--hg-sanity-flags argument must have 6 digits\n");
5754 for (j
= 0; j
< 6; j
++) {
5755 if ('0' == tmp_str
[j
]) { /* do nothing */ }
5756 else if ('1' == tmp_str
[j
]) HG_(clo_sanity_flags
) |= (1 << (6-1-j
));
5758 VG_(message
)(Vg_UserMsg
, "--hg-sanity-flags argument can "
5759 "only contain 0s and 1s\n");
5763 if (0) VG_(printf
)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags
));
5766 else if VG_BOOL_CLO(arg
, "--free-is-write",
5767 HG_(clo_free_is_write
)) {}
5769 else if VG_XACT_CLO(arg
, "--vts-pruning=never",
5770 HG_(clo_vts_pruning
), 0);
5771 else if VG_XACT_CLO(arg
, "--vts-pruning=auto",
5772 HG_(clo_vts_pruning
), 1);
5773 else if VG_XACT_CLO(arg
, "--vts-pruning=always",
5774 HG_(clo_vts_pruning
), 2);
5776 else if VG_BOOL_CLO(arg
, "--check-stack-refs",
5777 HG_(clo_check_stack_refs
)) {}
5778 else if VG_BOOL_CLO(arg
, "--ignore-thread-creation",
5779 HG_(clo_ignore_thread_creation
)) {}
5782 return VG_(replacement_malloc_process_cmd_line_option
)(arg
);
5787 static void hg_print_usage ( void )
5790 " --free-is-write=no|yes treat heap frees as writes [no]\n"
5791 " --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5792 " --history-level=none|approx|full [full]\n"
5793 " full: show both stack traces for a data race (can be very slow)\n"
5794 " approx: full trace for one thread, approx for the other (faster)\n"
5795 " none: only show trace for one thread in a race (fastest)\n"
5796 " --delta-stacktrace=no|yes [yes on linux amd64/x86]\n"
5797 " no : always compute a full history stacktrace from unwind info\n"
5798 " yes : derive a stacktrace from the previous stacktrace\n"
5799 " if there was no call/return or similar instruction\n"
5800 " --conflict-cache-size=N size of 'full' history cache [2000000]\n"
5801 " --check-stack-refs=no|yes race-check reads and writes on the\n"
5802 " main stack and thread stacks? [yes]\n"
5803 " --ignore-thread-creation=yes|no Ignore activities during thread\n"
5805 HG_(clo_ignore_thread_creation
) ? "yes" : "no"
5809 static void hg_print_debug_usage ( void )
5811 VG_(printf
)(" --cmp-race-err-addrs=no|yes are data addresses in "
5812 "race errors significant? [no]\n");
5813 VG_(printf
)(" --hg-sanity-flags=<XXXXXX> sanity check "
5814 " at events (X = 0|1) [000000]\n");
5815 VG_(printf
)(" --hg-sanity-flags values:\n");
5816 VG_(printf
)(" 010000 after changes to "
5817 "lock-order-acquisition-graph\n");
5818 VG_(printf
)(" 001000 at memory accesses\n");
5819 VG_(printf
)(" 000100 at mem permission setting for "
5820 "ranges >= %d bytes\n", SCE_BIGRANGE_T
);
5821 VG_(printf
)(" 000010 at lock/unlock events\n");
5822 VG_(printf
)(" 000001 at thread create/join events\n");
5824 " --vts-pruning=never|auto|always [auto]\n"
5825 " never: is never done (may cause big space leaks in Helgrind)\n"
5826 " auto: done just often enough to keep space usage under control\n"
5827 " always: done after every VTS GC (mostly just a big time waster)\n"
5831 static void hg_print_stats (void)
5836 HG_(ppWSUstats
)( univ_lsets
, "univ_lsets" );
5837 if (HG_(clo_track_lockorders
)) {
5839 HG_(ppWSUstats
)( univ_laog
, "univ_laog" );
5843 //zz VG_(printf)("\n");
5844 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5845 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5846 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5847 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5848 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5849 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5850 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5851 //zz stats__hbefore_stk_hwm);
5852 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5853 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5856 VG_(printf
)(" locksets: %'8d unique lock sets\n",
5857 (Int
)HG_(cardinalityWSU
)( univ_lsets
));
5858 if (HG_(clo_track_lockorders
)) {
5859 VG_(printf
)(" univ_laog: %'8d unique lock sets\n",
5860 (Int
)HG_(cardinalityWSU
)( univ_laog
));
5863 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5864 // stats__ga_LL_adds,
5865 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5867 VG_(printf
)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5868 HG_(stats__LockN_to_P_queries
),
5869 HG_(stats__LockN_to_P_get_map_size
)() );
5871 VG_(printf
)("client malloc-ed blocks: %'8u\n",
5872 VG_(HT_count_nodes
)(hg_mallocmeta_table
));
5874 VG_(printf
)("string table map: %'8llu queries (%llu map size)\n",
5875 HG_(stats__string_table_queries
),
5876 HG_(stats__string_table_get_map_size
)() );
5877 if (HG_(clo_track_lockorders
)) {
5878 VG_(printf
)(" LAOG: %'8d map size\n",
5879 (Int
)(laog
? VG_(sizeFM
)( laog
) : 0));
5880 VG_(printf
)(" LAOG exposition: %'8d map size\n",
5881 (Int
)(laog_exposition
? VG_(sizeFM
)( laog_exposition
) : 0));
5884 VG_(printf
)(" locks: %'8lu acquires, "
5886 stats__lockN_acquires
,
5887 stats__lockN_releases
5889 VG_(printf
)(" sanity checks: %'8lu\n", stats__sanity_checks
);
5892 libhb_shutdown(True
); // This in fact only print stats.
5895 static void hg_fini ( Int exitcode
)
5897 HG_(xtmemory_report
) (VG_(clo_xtree_memory_file
), True
);
5899 if (VG_(clo_verbosity
) == 1 && !VG_(clo_xml
)
5900 && HG_(clo_history_level
) >= 2) {
5902 "Use --history-level=approx or =none to gain increased speed, at\n" );
5904 "the cost of reduced accuracy of conflicting-access information\n");
5907 if (SHOW_DATA_STRUCTURES
)
5908 pp_everything( PP_ALL
, "SK_(fini)" );
5909 if (HG_(clo_sanity_flags
))
5910 all__sanity_check("SK_(fini)");
5916 /* FIXME: move these somewhere sane */
5919 void for_libhb__get_stacktrace ( Thr
* hbt
, Addr
* frames
, UWord nRequest
)
5925 thr
= libhb_get_Thr_hgthread( hbt
);
5927 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
5928 nActual
= (UWord
)VG_(get_StackTrace_with_deltas
)
5929 ( tid
, frames
, (UInt
)nRequest
,
5931 thr
->first_sp_delta
);
5932 tl_assert(nActual
<= nRequest
);
5933 for (; nActual
< nRequest
; nActual
++)
5934 frames
[nActual
] = 0;
5938 ExeContext
* for_libhb__get_EC ( Thr
* hbt
)
5944 thr
= libhb_get_Thr_hgthread( hbt
);
5946 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
5947 /* this will assert if tid is invalid */
5948 ec
= VG_(record_ExeContext
)( tid
, 0 );
5953 static void hg_post_clo_init ( void )
5957 if (HG_(clo_delta_stacktrace
)
5958 && VG_(clo_vex_control
).guest_chase_thresh
!= 0) {
5959 if (VG_(clo_verbosity
) >= 2)
5960 VG_(message
)(Vg_UserMsg
,
5961 "helgrind --delta-stacktrace=yes only works with "
5962 "--vex-guest-chase-thresh=0\n"
5963 "=> (re-setting it to 0\n");
5964 VG_(clo_vex_control
).guest_chase_thresh
= 0;
5968 /////////////////////////////////////////////
5969 hbthr_root
= libhb_init( for_libhb__get_stacktrace
,
5970 for_libhb__get_EC
);
5971 /////////////////////////////////////////////
5974 if (HG_(clo_track_lockorders
))
5977 initialise_data_structures(hbthr_root
);
5978 if (VG_(clo_xtree_memory
) == Vg_XTMemory_Full
)
5979 // Activate full xtree memory profiling.
5980 VG_(XTMemory_Full_init
)(VG_(XT_filter_1top_and_maybe_below_main
));
5983 static void hg_info_location (DiEpoch ep
, Addr a
)
5985 (void) HG_(get_and_pp_addrdescr
) (ep
, a
);
5988 static void hg_pre_clo_init ( void )
5990 VG_(details_name
) ("Helgrind");
5991 VG_(details_version
) (NULL
);
5992 VG_(details_description
) ("a thread error detector");
5993 VG_(details_copyright_author
)(
5994 "Copyright (C) 2007-2017, and GNU GPL'd, by OpenWorks LLP et al.");
5995 VG_(details_bug_reports_to
) (VG_BUGS_TO
);
5996 VG_(details_avg_translation_sizeB
) ( 320 );
5998 VG_(basic_tool_funcs
) (hg_post_clo_init
,
6002 VG_(needs_core_errors
) ();
6003 VG_(needs_tool_errors
) (HG_(eq_Error
),
6004 HG_(before_pp_Error
),
6006 False
,/*show TIDs for errors*/
6008 HG_(recognised_suppression
),
6009 HG_(read_extra_suppression_info
),
6010 HG_(error_matches_suppression
),
6011 HG_(get_error_name
),
6012 HG_(get_extra_suppression_info
),
6013 HG_(print_extra_suppression_use
),
6014 HG_(update_extra_suppression_use
));
6016 VG_(needs_xml_output
) ();
6018 VG_(needs_command_line_options
)(hg_process_cmd_line_option
,
6020 hg_print_debug_usage
);
6021 VG_(needs_client_requests
) (hg_handle_client_request
);
6024 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
6025 // hg_expensive_sanity_check);
6027 VG_(needs_print_stats
) (hg_print_stats
);
6028 VG_(needs_info_location
) (hg_info_location
);
6030 VG_(needs_malloc_replacement
) (hg_cli__malloc
,
6031 hg_cli____builtin_new
,
6032 hg_cli____builtin_vec_new
,
6036 hg_cli____builtin_delete
,
6037 hg_cli____builtin_vec_delete
,
6039 hg_cli_malloc_usable_size
,
6040 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB
);
6042 /* 21 Dec 08: disabled this; it mostly causes H to start more
6043 slowly and use significantly more memory, without very often
6044 providing useful results. The user can request to load this
6045 information manually with --read-var-info=yes. */
6046 if (0) VG_(needs_var_info
)(); /* optional */
6048 VG_(track_new_mem_startup
) ( evh__new_mem_w_perms
);
6049 VG_(track_new_mem_stack_signal
)( evh__new_mem_w_tid
);
6050 VG_(track_new_mem_brk
) ( evh__new_mem_w_tid
);
6051 VG_(track_new_mem_mmap
) ( evh__new_mem_w_perms
);
6052 VG_(track_new_mem_stack
) ( evh__new_mem_stack
);
6053 VG_(track_new_mem_stack_4
) ( evh__new_mem_stack_4
);
6054 VG_(track_new_mem_stack_8
) ( evh__new_mem_stack_8
);
6055 VG_(track_new_mem_stack_12
) ( evh__new_mem_stack_12
);
6056 VG_(track_new_mem_stack_16
) ( evh__new_mem_stack_16
);
6057 VG_(track_new_mem_stack_32
) ( evh__new_mem_stack_32
);
6058 VG_(track_new_mem_stack_112
) ( evh__new_mem_stack_112
);
6059 VG_(track_new_mem_stack_128
) ( evh__new_mem_stack_128
);
6060 VG_(track_new_mem_stack_144
) ( evh__new_mem_stack_144
);
6061 VG_(track_new_mem_stack_160
) ( evh__new_mem_stack_160
);
6063 // FIXME: surely this isn't thread-aware
6064 VG_(track_copy_mem_remap
) ( evh__copy_mem
);
6066 VG_(track_change_mem_mprotect
) ( evh__set_perms
);
6068 VG_(track_die_mem_stack_signal
)( evh__die_mem
);
6069 VG_(track_die_mem_brk
) ( evh__die_mem_munmap
);
6070 VG_(track_die_mem_munmap
) ( evh__die_mem_munmap
);
6072 /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
6073 which has no effect. We do not use VG_(track_die_mem_stack),
6074 as this would be an expensive way to do nothing. */
6075 // VG_(track_die_mem_stack) ( evh__die_mem );
6077 // FIXME: what is this for?
6078 VG_(track_ban_mem_stack
) (NULL
);
6080 VG_(track_pre_mem_read
) ( evh__pre_mem_read
);
6081 VG_(track_pre_mem_read_asciiz
) ( evh__pre_mem_read_asciiz
);
6082 VG_(track_pre_mem_write
) ( evh__pre_mem_write
);
6083 VG_(track_post_mem_write
) (NULL
);
6087 VG_(track_pre_thread_ll_create
)( evh__pre_thread_ll_create
);
6088 VG_(track_pre_thread_ll_exit
) ( evh__pre_thread_ll_exit
);
6090 VG_(track_start_client_code
)( evh__start_client_code
);
6091 VG_(track_stop_client_code
)( evh__stop_client_code
);
6093 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
6094 as described in comments at the top of pub_tool_hashtable.h, are
6096 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta
*) );
6097 tl_assert( sizeof(UWord
) == sizeof(Addr
) );
6099 = VG_(HT_construct
)( "hg_malloc_metadata_table" );
6101 MallocMeta_poolalloc
= VG_(newPA
) ( sizeof(MallocMeta
),
6104 "hg_malloc_metadata_pool",
6107 // add a callback to clean up on (threaded) fork.
6108 VG_(atfork
)(NULL
/*pre*/, NULL
/*parent*/, evh__atfork_child
/*child*/);
6111 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init
)
6113 /*--------------------------------------------------------------------*/
6114 /*--- end hg_main.c ---*/
6115 /*--------------------------------------------------------------------*/