2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors ---*/
4 /*--- in threaded programs. hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
8 This file is part of Helgrind, a Valgrind tool for detecting errors
11 Copyright (C) 2007-2017 OpenWorks LLP
14 Copyright (C) 2007-2017 Apple, Inc.
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, see <http://www.gnu.org/licenses/>.
29 The GNU General Public License is contained in the file COPYING.
31 Neither the names of the U.S. Department of Energy nor the
32 University of California nor the names of its contributors may be
33 used to endorse or promote products derived from this software
34 without prior written permission.
37 #include "pub_tool_basics.h"
38 #include "pub_tool_gdbserver.h"
39 #include "pub_tool_libcassert.h"
40 #include "pub_tool_libcbase.h"
41 #include "pub_tool_libcprint.h"
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h"
44 #include "pub_tool_hashtable.h"
45 #include "pub_tool_replacemalloc.h"
46 #include "pub_tool_machine.h"
47 #include "pub_tool_options.h"
48 #include "pub_tool_xarray.h"
49 #include "pub_tool_stacktrace.h"
50 #include "pub_tool_wordfm.h"
51 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
52 #include "pub_tool_redir.h" // sonames for the dynamic linkers
53 #include "pub_tool_vki.h" // VKI_PAGE_SIZE
54 #include "pub_tool_libcproc.h"
55 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
56 #include "pub_tool_poolalloc.h"
57 #include "pub_tool_addrinfo.h"
58 #include "pub_tool_xtree.h"
59 #include "pub_tool_xtmemory.h"
61 #include "hg_basics.h"
62 #include "hg_wordset.h"
63 #include "hg_addrdescr.h"
64 #include "hg_lock_n_thread.h"
65 #include "hg_errors.h"
72 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
74 // FIXME: when client destroys a lock or a CV, remove these
75 // from our mappings, so that the associated SO can be freed up
77 /*----------------------------------------------------------------*/
79 /*----------------------------------------------------------------*/
81 /* Note this needs to be compiled with -fno-strict-aliasing, since it
82 contains a whole bunch of calls to lookupFM etc which cast between
83 Word and pointer types. gcc rightly complains this breaks ANSI C
84 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
85 worthwhile performance benefits over -O.
88 // FIXME what is supposed to happen to locks in memory which
89 // is relocated as a result of client realloc?
91 // FIXME put referencing ThreadId into Thread and get
92 // rid of the slow reverse mapping function.
94 // FIXME accesses to NoAccess areas: change state to Excl?
96 // FIXME report errors for accesses of NoAccess memory?
98 // FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99 // the thread still holds the lock.
101 /* ------------ Debug/trace options ------------ */
103 // 0 for silent, 1 for some stuff, 2 for lots of stuff
104 #define SHOW_EVENTS 0
107 static void all__sanity_check ( const HChar
* who
); /* fwds */
109 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
111 // 0 for none, 1 for dump at end of run
112 #define SHOW_DATA_STRUCTURES 0
115 /* ------------ Misc comments ------------ */
117 // FIXME: don't hardwire initial entries for root thread.
118 // Instead, let the pre_thread_ll_create handler do this.
121 /*----------------------------------------------------------------*/
122 /*--- Primary data structures ---*/
123 /*----------------------------------------------------------------*/
125 /* Admin linked list of Threads */
126 static Thread
* admin_threads
= NULL
;
127 Thread
* get_admin_threads ( void ) { return admin_threads
; }
129 /* Admin double linked list of Locks */
130 /* We need a double linked list to properly and efficiently
132 static Lock
* admin_locks
= NULL
;
134 /* Mapping table for core ThreadIds to Thread* */
135 static Thread
** map_threads
= NULL
; /* Array[VG_N_THREADS] of Thread* */
137 /* Mapping table for lock guest addresses to Lock* */
138 static WordFM
* map_locks
= NULL
; /* WordFM LockAddr Lock* */
140 /* The word-set universes for lock sets. */
141 static WordSetU
* univ_lsets
= NULL
; /* sets of Lock* */
142 static WordSetU
* univ_laog
= NULL
; /* sets of Lock*, for LAOG */
143 static Int next_gc_univ_laog
= 1;
144 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
145 >= next_gc_univ_laog. */
147 /* Allow libhb to get at the universe of locksets stored
149 WordSetU
* HG_(get_univ_lsets
) ( void ) { return univ_lsets
; }
151 /* Allow libhb to get at the list of locks stored here. Ditto
153 Lock
* HG_(get_admin_locks
) ( void ) { return admin_locks
; }
156 /*----------------------------------------------------------------*/
157 /*--- Simple helpers for the data structures ---*/
158 /*----------------------------------------------------------------*/
160 static UWord stats__lockN_acquires
= 0;
161 static UWord stats__lockN_releases
= 0;
163 #if defined(VGO_solaris)
164 Bool
HG_(clo_ignore_thread_creation
) = True
;
166 Bool
HG_(clo_ignore_thread_creation
) = False
;
167 #endif /* VGO_solaris */
170 ThreadId
map_threads_maybe_reverse_lookup_SLOW ( Thread
* thr
); /*fwds*/
172 /* --------- Constructors --------- */
174 static Thread
* mk_Thread ( Thr
* hbthr
) {
176 Thread
* thread
= HG_(zalloc
)( "hg.mk_Thread.1", sizeof(Thread
) );
177 thread
->locksetA
= HG_(emptyWS
)( univ_lsets
);
178 thread
->locksetW
= HG_(emptyWS
)( univ_lsets
);
179 thread
->magic
= Thread_MAGIC
;
180 thread
->hbthr
= hbthr
;
181 thread
->coretid
= VG_INVALID_THREADID
;
182 thread
->created_at
= NULL
;
183 thread
->announced
= False
;
184 thread
->first_sp_delta
= 0;
185 thread
->errmsg_index
= indx
++;
186 thread
->admin
= admin_threads
;
187 thread
->synchr_nesting
= 0;
188 thread
->pthread_create_nesting_level
= 0;
189 #if defined(VGO_solaris)
190 thread
->bind_guard_flag
= 0;
191 #endif /* VGO_solaris */
193 admin_threads
= thread
;
197 // Make a new lock which is unlocked (hence ownerless)
198 // and insert the new lock in admin_locks double linked list.
199 static Lock
* mk_LockN ( LockKind kind
, Addr guestaddr
) {
200 static ULong unique
= 0;
201 Lock
* lock
= HG_(zalloc
)( "hg.mk_Lock.1", sizeof(Lock
) );
202 /* begin: add to double linked list */
204 admin_locks
->admin_prev
= lock
;
205 lock
->admin_next
= admin_locks
;
206 lock
->admin_prev
= NULL
;
209 lock
->unique
= unique
++;
210 lock
->magic
= LockN_MAGIC
;
211 lock
->appeared_at
= NULL
;
212 lock
->acquired_at
= NULL
;
213 lock
->hbso
= libhb_so_alloc();
214 lock
->guestaddr
= guestaddr
;
218 tl_assert(HG_(is_sane_LockN
)(lock
));
222 /* Release storage for a Lock. Also release storage in .heldBy, if
223 any. Removes from admin_locks double linked list. */
224 static void del_LockN ( Lock
* lk
)
226 tl_assert(HG_(is_sane_LockN
)(lk
));
228 libhb_so_dealloc(lk
->hbso
);
230 VG_(deleteBag
)( lk
->heldBy
);
231 /* begin: del lock from double linked list */
232 if (lk
== admin_locks
) {
233 tl_assert(lk
->admin_prev
== NULL
);
235 lk
->admin_next
->admin_prev
= NULL
;
236 admin_locks
= lk
->admin_next
;
239 tl_assert(lk
->admin_prev
!= NULL
);
240 lk
->admin_prev
->admin_next
= lk
->admin_next
;
242 lk
->admin_next
->admin_prev
= lk
->admin_prev
;
245 VG_(memset
)(lk
, 0xAA, sizeof(*lk
));
249 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
250 it. This is done strictly: only combinations resulting from
251 correct program and libpthread behaviour are allowed. */
252 static void lockN_acquire_writer ( Lock
* lk
, Thread
* thr
)
254 tl_assert(HG_(is_sane_LockN
)(lk
));
255 tl_assert(HG_(is_sane_Thread
)(thr
));
257 stats__lockN_acquires
++;
259 /* EXPOSITION only */
260 /* We need to keep recording snapshots of where the lock was
261 acquired, so as to produce better lock-order error messages. */
262 if (lk
->acquired_at
== NULL
) {
264 tl_assert(lk
->heldBy
== NULL
);
265 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
267 = VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
269 tl_assert(lk
->heldBy
!= NULL
);
271 /* end EXPOSITION only */
276 tl_assert(lk
->heldBy
== NULL
); /* can't w-lock recursively */
277 tl_assert(!lk
->heldW
);
279 lk
->heldBy
= VG_(newBag
)( HG_(zalloc
), "hg.lNaw.1", HG_(free
) );
280 VG_(addToBag
)( lk
->heldBy
, (UWord
)thr
);
283 if (lk
->heldBy
== NULL
)
285 /* 2nd and subsequent locking of a lock by its owner */
286 tl_assert(lk
->heldW
);
287 /* assert: lk is only held by one thread .. */
288 tl_assert(VG_(sizeUniqueBag
)(lk
->heldBy
) == 1);
289 /* assert: .. and that thread is 'thr'. */
290 tl_assert(VG_(elemBag
)(lk
->heldBy
, (UWord
)thr
)
291 == VG_(sizeTotalBag
)(lk
->heldBy
));
292 VG_(addToBag
)(lk
->heldBy
, (UWord
)thr
);
295 tl_assert(lk
->heldBy
== NULL
&& !lk
->heldW
); /* must be unheld */
300 tl_assert(HG_(is_sane_LockN
)(lk
));
303 static void lockN_acquire_reader ( Lock
* lk
, Thread
* thr
)
305 tl_assert(HG_(is_sane_LockN
)(lk
));
306 tl_assert(HG_(is_sane_Thread
)(thr
));
307 /* can only add reader to a reader-writer lock. */
308 tl_assert(lk
->kind
== LK_rdwr
);
309 /* lk must be free or already r-held. */
310 tl_assert(lk
->heldBy
== NULL
311 || (lk
->heldBy
!= NULL
&& !lk
->heldW
));
313 stats__lockN_acquires
++;
315 /* EXPOSITION only */
316 /* We need to keep recording snapshots of where the lock was
317 acquired, so as to produce better lock-order error messages. */
318 if (lk
->acquired_at
== NULL
) {
320 tl_assert(lk
->heldBy
== NULL
);
321 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
323 = VG_(record_ExeContext
)(tid
, 0/*first_ip_delta*/);
325 tl_assert(lk
->heldBy
!= NULL
);
327 /* end EXPOSITION only */
330 VG_(addToBag
)(lk
->heldBy
, (UWord
)thr
);
333 lk
->heldBy
= VG_(newBag
)( HG_(zalloc
), "hg.lNar.1", HG_(free
) );
334 VG_(addToBag
)( lk
->heldBy
, (UWord
)thr
);
336 tl_assert(!lk
->heldW
);
337 tl_assert(HG_(is_sane_LockN
)(lk
));
340 /* Update 'lk' to reflect a release of it by 'thr'. This is done
341 strictly: only combinations resulting from correct program and
342 libpthread behaviour are allowed. */
344 static void lockN_release ( Lock
* lk
, Thread
* thr
)
347 tl_assert(HG_(is_sane_LockN
)(lk
));
348 tl_assert(HG_(is_sane_Thread
)(thr
));
349 /* lock must be held by someone */
350 tl_assert(lk
->heldBy
);
351 stats__lockN_releases
++;
352 /* Remove it from the holder set */
353 b
= VG_(delFromBag
)(lk
->heldBy
, (UWord
)thr
);
354 /* thr must actually have been a holder of lk */
357 tl_assert(lk
->acquired_at
);
358 if (VG_(isEmptyBag
)(lk
->heldBy
)) {
359 VG_(deleteBag
)(lk
->heldBy
);
362 lk
->acquired_at
= NULL
;
364 tl_assert(HG_(is_sane_LockN
)(lk
));
367 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock
* lk
)
371 tl_assert(!lk
->heldW
);
374 /* for each thread that holds this lock do ... */
375 VG_(initIterBag
)( lk
->heldBy
);
376 while (VG_(nextIterBag
)( lk
->heldBy
, (UWord
*)&thr
, NULL
)) {
377 tl_assert(HG_(is_sane_Thread
)(thr
));
378 tl_assert(HG_(elemWS
)( univ_lsets
,
379 thr
->locksetA
, (UWord
)lk
));
381 = HG_(delFromWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
384 tl_assert(HG_(elemWS
)( univ_lsets
,
385 thr
->locksetW
, (UWord
)lk
));
387 = HG_(delFromWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lk
);
390 VG_(doneIterBag
)( lk
->heldBy
);
394 /*----------------------------------------------------------------*/
395 /*--- Print out the primary data structures ---*/
396 /*----------------------------------------------------------------*/
398 #define PP_THREADS (1<<1)
399 #define PP_LOCKS (1<<2)
400 #define PP_ALL (PP_THREADS | PP_LOCKS)
403 static const Int sHOW_ADMIN
= 0;
405 static void space ( Int n
)
409 tl_assert(n
>= 0 && n
< 128);
412 for (i
= 0; i
< n
; i
++)
415 tl_assert(i
< 128+1);
416 VG_(printf
)("%s", spaces
);
419 static void pp_Thread ( Int d
, Thread
* t
)
421 space(d
+0); VG_(printf
)("Thread %p {\n", t
);
423 space(d
+3); VG_(printf
)("admin %p\n", t
->admin
);
424 space(d
+3); VG_(printf
)("magic 0x%x\n", (UInt
)t
->magic
);
426 space(d
+3); VG_(printf
)("locksetA %d\n", (Int
)t
->locksetA
);
427 space(d
+3); VG_(printf
)("locksetW %d\n", (Int
)t
->locksetW
);
428 space(d
+0); VG_(printf
)("}\n");
431 static void pp_admin_threads ( Int d
)
435 for (n
= 0, t
= admin_threads
; t
; n
++, t
= t
->admin
) {
438 space(d
); VG_(printf
)("admin_threads (%d records) {\n", n
);
439 for (i
= 0, t
= admin_threads
; t
; i
++, t
= t
->admin
) {
442 VG_(printf
)("admin_threads record %d of %d:\n", i
, n
);
446 space(d
); VG_(printf
)("}\n");
449 static void pp_map_threads ( Int d
)
452 space(d
); VG_(printf
)("map_threads ");
453 for (i
= 0; i
< VG_N_THREADS
; i
++) {
454 if (map_threads
[i
] != NULL
)
457 VG_(printf
)("(%d entries) {\n", n
);
458 for (i
= 0; i
< VG_N_THREADS
; i
++) {
459 if (map_threads
[i
] == NULL
)
462 VG_(printf
)("coretid %d -> Thread %p\n", i
, map_threads
[i
]);
464 space(d
); VG_(printf
)("}\n");
467 static const HChar
* show_LockKind ( LockKind lkk
) {
469 case LK_mbRec
: return "mbRec";
470 case LK_nonRec
: return "nonRec";
471 case LK_rdwr
: return "rdwr";
472 default: tl_assert(0);
476 /* Pretty Print lock lk.
477 if show_lock_addrdescr, describes the (guest) lock address.
478 (this description will be more complete with --read-var-info=yes).
479 if show_internal_data, shows also helgrind internal information.
480 d is the level at which output is indented. */
481 static void pp_Lock ( Int d
, Lock
* lk
,
482 Bool show_lock_addrdescr
,
483 Bool show_internal_data
)
485 // FIXME PW EPOCH should use the epoch of the allocated_at ec.
486 const DiEpoch cur_ep
= VG_(current_DiEpoch
)();
488 if (show_internal_data
)
489 VG_(printf
)("Lock %p (ga %#lx) {\n", lk
, lk
->guestaddr
);
491 VG_(printf
)("Lock ga %#lx {\n", lk
->guestaddr
);
492 if (!show_lock_addrdescr
493 || !HG_(get_and_pp_addrdescr
) (cur_ep
, (Addr
) lk
->guestaddr
))
497 space(d
+3); VG_(printf
)("admin_n %p\n", lk
->admin_next
);
498 space(d
+3); VG_(printf
)("admin_p %p\n", lk
->admin_prev
);
499 space(d
+3); VG_(printf
)("magic 0x%x\n", (UInt
)lk
->magic
);
501 if (show_internal_data
) {
502 space(d
+3); VG_(printf
)("unique %llu\n", lk
->unique
);
504 space(d
+3); VG_(printf
)("kind %s\n", show_LockKind(lk
->kind
));
505 if (show_internal_data
) {
506 space(d
+3); VG_(printf
)("heldW %s\n", lk
->heldW
? "yes" : "no");
508 if (show_internal_data
) {
509 space(d
+3); VG_(printf
)("heldBy %p", lk
->heldBy
);
515 VG_(initIterBag
)( lk
->heldBy
);
516 while (VG_(nextIterBag
)( lk
->heldBy
, (UWord
*)&thr
, &count
)) {
517 if (show_internal_data
)
518 VG_(printf
)("%lu:%p ", count
, thr
);
520 VG_(printf
)("%c%lu:thread #%d ",
521 lk
->heldW
? 'W' : 'R',
522 count
, thr
->errmsg_index
);
523 if (thr
->coretid
== VG_INVALID_THREADID
)
524 VG_(printf
)("tid (exited) ");
526 VG_(printf
)("tid %u ", thr
->coretid
);
530 VG_(doneIterBag
)( lk
->heldBy
);
533 space(d
+0); VG_(printf
)("}\n");
536 static void pp_admin_locks ( Int d
)
540 for (n
= 0, lk
= admin_locks
; lk
; n
++, lk
= lk
->admin_next
) {
543 space(d
); VG_(printf
)("admin_locks (%d records) {\n", n
);
544 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
) {
547 VG_(printf
)("admin_locks record %d of %d:\n", i
, n
);
550 False
/* show_lock_addrdescr */,
551 True
/* show_internal_data */);
553 space(d
); VG_(printf
)("}\n");
556 static void pp_map_locks ( Int d
)
560 space(d
); VG_(printf
)("map_locks (%d entries) {\n",
561 (Int
)VG_(sizeFM
)( map_locks
));
562 VG_(initIterFM
)( map_locks
);
563 while (VG_(nextIterFM
)( map_locks
, (UWord
*)&gla
,
566 VG_(printf
)("guest %p -> Lock %p\n", gla
, lk
);
568 VG_(doneIterFM
)( map_locks
);
569 space(d
); VG_(printf
)("}\n");
572 static void pp_everything ( Int flags
, const HChar
* caller
)
576 VG_(printf
)("All_Data_Structures (caller = \"%s\") {\n", caller
);
577 if (flags
& PP_THREADS
) {
579 pp_admin_threads(d
+3);
583 if (flags
& PP_LOCKS
) {
598 /*----------------------------------------------------------------*/
599 /*--- Initialise the primary data structures ---*/
600 /*----------------------------------------------------------------*/
602 static void initialise_data_structures ( Thr
* hbthr_root
)
607 /* Get everything initialised and zeroed. */
608 tl_assert(admin_threads
== NULL
);
609 tl_assert(admin_locks
== NULL
);
611 tl_assert(map_threads
== NULL
);
612 map_threads
= HG_(zalloc
)( "hg.ids.1", VG_N_THREADS
* sizeof(Thread
*) );
614 tl_assert(sizeof(Addr
) == sizeof(UWord
));
615 tl_assert(map_locks
== NULL
);
616 map_locks
= VG_(newFM
)( HG_(zalloc
), "hg.ids.2", HG_(free
),
617 NULL
/*unboxed Word cmp*/);
619 tl_assert(univ_lsets
== NULL
);
620 univ_lsets
= HG_(newWordSetU
)( HG_(zalloc
), "hg.ids.4", HG_(free
),
622 tl_assert(univ_lsets
!= NULL
);
623 /* Ensure that univ_lsets is non-empty, with lockset zero being the
624 empty lockset. hg_errors.c relies on the assumption that
625 lockset number zero in univ_lsets is always valid. */
626 wsid
= HG_(emptyWS
)(univ_lsets
);
627 tl_assert(wsid
== 0);
629 tl_assert(univ_laog
== NULL
);
630 if (HG_(clo_track_lockorders
)) {
631 univ_laog
= HG_(newWordSetU
)( HG_(zalloc
), "hg.ids.5 (univ_laog)",
632 HG_(free
), 24/*cacheSize*/ );
633 tl_assert(univ_laog
!= NULL
);
636 /* Set up entries for the root thread */
637 // FIXME: this assumes that the first real ThreadId is 1
639 /* a Thread for the new thread ... */
640 thr
= mk_Thread(hbthr_root
);
641 thr
->coretid
= 1; /* FIXME: hardwires an assumption about the
642 identity of the root thread. */
643 tl_assert( libhb_get_Thr_hgthread(hbthr_root
) == NULL
);
644 libhb_set_Thr_hgthread(hbthr_root
, thr
);
646 /* and bind it in the thread-map table. */
647 tl_assert(HG_(is_sane_ThreadId
)(thr
->coretid
));
648 tl_assert(thr
->coretid
!= VG_INVALID_THREADID
);
650 map_threads
[thr
->coretid
] = thr
;
652 tl_assert(VG_INVALID_THREADID
== 0);
654 all__sanity_check("initialise_data_structures");
658 /*----------------------------------------------------------------*/
659 /*--- map_threads :: array[core-ThreadId] of Thread* ---*/
660 /*----------------------------------------------------------------*/
662 /* Doesn't assert if the relevant map_threads entry is NULL. */
663 static Thread
* map_threads_maybe_lookup ( ThreadId coretid
)
666 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
667 thr
= map_threads
[coretid
];
671 /* Asserts if the relevant map_threads entry is NULL. */
672 static inline Thread
* map_threads_lookup ( ThreadId coretid
)
675 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
676 thr
= map_threads
[coretid
];
681 /* Do a reverse lookup. Does not assert if 'thr' is not found in
683 static ThreadId
map_threads_maybe_reverse_lookup_SLOW ( Thread
* thr
)
686 tl_assert(HG_(is_sane_Thread
)(thr
));
687 /* Check nobody used the invalid-threadid slot */
688 tl_assert(VG_INVALID_THREADID
< VG_N_THREADS
);
689 tl_assert(map_threads
[VG_INVALID_THREADID
] == NULL
);
691 tl_assert(HG_(is_sane_ThreadId
)(tid
));
695 /* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
696 is not found in map_threads. */
697 static ThreadId
map_threads_reverse_lookup_SLOW ( Thread
* thr
)
699 ThreadId tid
= map_threads_maybe_reverse_lookup_SLOW( thr
);
700 tl_assert(tid
!= VG_INVALID_THREADID
);
701 tl_assert(map_threads
[tid
]);
702 tl_assert(map_threads
[tid
]->coretid
== tid
);
706 static void map_threads_delete ( ThreadId coretid
)
709 tl_assert(coretid
!= 0);
710 tl_assert( HG_(is_sane_ThreadId
)(coretid
) );
711 thr
= map_threads
[coretid
];
713 map_threads
[coretid
] = NULL
;
716 static void HG_(thread_enter_synchr
)(Thread
*thr
) {
717 tl_assert(thr
->synchr_nesting
>= 0);
718 #if defined(VGO_solaris) || defined(VGO_freebsd)
719 thr
->synchr_nesting
+= 1;
720 #endif /* VGO_solaris */
723 static void HG_(thread_leave_synchr
)(Thread
*thr
) {
724 #if defined(VGO_solaris) || defined(VGO_freebsd)
725 thr
->synchr_nesting
-= 1;
726 #endif /* VGO_solaris */
727 tl_assert(thr
->synchr_nesting
>= 0);
730 #if defined(VGO_freebsd)
731 static Int
HG_(get_pthread_synchr_nesting_level
)(ThreadId tid
) {
732 Thread
*thr
= map_threads_maybe_lookup(tid
);
733 return thr
->synchr_nesting
;
737 static void HG_(thread_enter_pthread_create
)(Thread
*thr
) {
738 tl_assert(thr
->pthread_create_nesting_level
>= 0);
739 thr
->pthread_create_nesting_level
+= 1;
742 static void HG_(thread_leave_pthread_create
)(Thread
*thr
) {
743 tl_assert(thr
->pthread_create_nesting_level
> 0);
744 thr
->pthread_create_nesting_level
-= 1;
747 static Int
HG_(get_pthread_create_nesting_level
)(ThreadId tid
) {
748 Thread
*thr
= map_threads_maybe_lookup(tid
);
749 return thr
->pthread_create_nesting_level
;
752 /*----------------------------------------------------------------*/
753 /*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
754 /*----------------------------------------------------------------*/
756 /* Make sure there is a lock table entry for the given (lock) guest
757 address. If not, create one of the stated 'kind' in unheld state.
758 In any case, return the address of the existing or new Lock. */
760 Lock
* map_locks_lookup_or_create ( LockKind lkk
, Addr ga
, ThreadId tid
)
763 Lock
* oldlock
= NULL
;
764 tl_assert(HG_(is_sane_ThreadId
)(tid
));
765 found
= VG_(lookupFM
)( map_locks
,
766 NULL
, (UWord
*)&oldlock
, (UWord
)ga
);
768 Lock
* lock
= mk_LockN(lkk
, ga
);
769 lock
->appeared_at
= VG_(record_ExeContext
)( tid
, 0 );
770 tl_assert(HG_(is_sane_LockN
)(lock
));
771 VG_(addToFM
)( map_locks
, (UWord
)ga
, (UWord
)lock
);
772 tl_assert(oldlock
== NULL
);
775 tl_assert(oldlock
!= NULL
);
776 tl_assert(HG_(is_sane_LockN
)(oldlock
));
777 tl_assert(oldlock
->guestaddr
== ga
);
782 static Lock
* map_locks_maybe_lookup ( Addr ga
)
786 found
= VG_(lookupFM
)( map_locks
, NULL
, (UWord
*)&lk
, (UWord
)ga
);
787 tl_assert(found
? lk
!= NULL
: lk
== NULL
);
791 static void map_locks_delete ( Addr ga
)
795 VG_(delFromFM
)( map_locks
,
796 (UWord
*)&ga2
, (UWord
*)&lk
, (UWord
)ga
);
797 /* delFromFM produces the val which is being deleted, if it is
798 found. So assert it is non-null; that in effect asserts that we
799 are deleting a (ga, Lock) pair which actually exists. */
800 tl_assert(lk
!= NULL
);
801 tl_assert(ga2
== ga
);
806 /*----------------------------------------------------------------*/
807 /*--- Sanity checking the data structures ---*/
808 /*----------------------------------------------------------------*/
810 static UWord stats__sanity_checks
= 0;
812 static void laog__sanity_check ( const HChar
* who
); /* fwds */
814 /* REQUIRED INVARIANTS:
816 Thread vs Segment/Lock/SecMaps
818 for each t in Threads {
820 // Thread.lockset: each element is really a valid Lock
822 // Thread.lockset: each Lock in set is actually held by that thread
823 for lk in Thread.lockset
826 // Thread.csegid is a valid SegmentID
827 // and the associated Segment has .thr == t
831 all thread Locksets are pairwise empty under intersection
832 (that is, no lock is claimed to be held by more than one thread)
833 -- this is guaranteed if all locks in locksets point back to their
836 Lock vs Thread/Segment/SecMaps
838 for each entry (gla, la) in map_locks
839 gla == la->guest_addr
841 for each lk in Locks {
844 lk->guest_addr does not have shadow state NoAccess
845 if lk == LockedBy(t), then t->lockset contains lk
846 if lk == UnlockedBy(segid) then segid is valid SegmentID
847 and can be mapped to a valid Segment(seg)
848 and seg->thr->lockset does not contain lk
849 if lk == UnlockedNew then (no lockset contains lk)
851 secmaps for lk has .mbHasLocks == True
855 Segment vs Thread/Lock/SecMaps
857 the Segment graph is a dag (no cycles)
858 all of the Segment graph must be reachable from the segids
859 mentioned in the Threads
861 for seg in Segments {
863 seg->thr is a sane Thread
867 SecMaps vs Segment/Thread/Lock
872 if any shadow word is ShR or ShM then .mbHasShared == True
874 for each Excl(segid) state
875 map_segments_lookup maps to a sane Segment(seg)
876 for each ShM/ShR(tsetid,lsetid) state
877 each lk in lset is a valid Lock
878 each thr in tset is a valid thread, which is non-dead
884 /* Return True iff 'thr' holds 'lk' in some mode. */
885 static Bool
thread_is_a_holder_of_Lock ( Thread
* thr
, Lock
* lk
)
888 return VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) > 0;
893 /* Sanity check Threads, as far as possible */
894 __attribute__((noinline
))
895 static void threads__sanity_check ( const HChar
* who
)
897 #define BAD(_str) do { how = (_str); goto bad; } while (0)
898 const HChar
* how
= "no error";
904 for (thr
= admin_threads
; thr
; thr
= thr
->admin
) {
905 if (!HG_(is_sane_Thread
)(thr
)) BAD("1");
908 // locks held in W mode are a subset of all locks held
909 if (!HG_(isSubsetOf
)( univ_lsets
, wsW
, wsA
)) BAD("7");
910 HG_(getPayloadWS
)( &ls_words
, &ls_size
, univ_lsets
, wsA
);
911 for (i
= 0; i
< ls_size
; i
++) {
912 lk
= (Lock
*)ls_words
[i
];
913 // Thread.lockset: each element is really a valid Lock
914 if (!HG_(is_sane_LockN
)(lk
)) BAD("2");
915 // Thread.lockset: each Lock in set is actually held by that
917 if (!thread_is_a_holder_of_Lock(thr
,lk
)) BAD("3");
922 VG_(printf
)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who
, how
);
928 /* Sanity check Locks, as far as possible */
929 __attribute__((noinline
))
930 static void locks__sanity_check ( const HChar
* who
)
932 #define BAD(_str) do { how = (_str); goto bad; } while (0)
933 const HChar
* how
= "no error";
937 // # entries in admin_locks == # entries in map_locks
938 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
)
940 if (i
!= VG_(sizeFM
)(map_locks
)) BAD("1");
941 // for each entry (gla, lk) in map_locks
942 // gla == lk->guest_addr
943 VG_(initIterFM
)( map_locks
);
944 while (VG_(nextIterFM
)( map_locks
,
945 (UWord
*)&gla
, (UWord
*)&lk
)) {
946 if (lk
->guestaddr
!= gla
) BAD("2");
948 VG_(doneIterFM
)( map_locks
);
949 // scan through admin_locks ...
950 for (lk
= admin_locks
; lk
; lk
= lk
->admin_next
) {
951 // lock is sane. Quite comprehensive, also checks that
952 // referenced (holder) threads are sane.
953 if (!HG_(is_sane_LockN
)(lk
)) BAD("3");
954 // map_locks binds guest address back to this lock
955 if (lk
!= map_locks_maybe_lookup(lk
->guestaddr
)) BAD("4");
956 // look at all threads mentioned as holders of this lock. Ensure
957 // this lock is mentioned in their locksets.
961 VG_(initIterBag
)( lk
->heldBy
);
962 while (VG_(nextIterBag
)( lk
->heldBy
,
963 (UWord
*)&thr
, &count
)) {
964 // HG_(is_sane_LockN) above ensures these
965 tl_assert(count
>= 1);
966 tl_assert(HG_(is_sane_Thread
)(thr
));
967 if (!HG_(elemWS
)(univ_lsets
, thr
->locksetA
, (UWord
)lk
))
969 // also check the w-only lockset
971 && !HG_(elemWS
)(univ_lsets
, thr
->locksetW
, (UWord
)lk
))
974 && HG_(elemWS
)(univ_lsets
, thr
->locksetW
, (UWord
)lk
))
977 VG_(doneIterBag
)( lk
->heldBy
);
979 /* lock not held by anybody */
980 if (lk
->heldW
) BAD("9"); /* should be False if !heldBy */
981 // since lk is unheld, then (no lockset contains lk)
982 // hmm, this is really too expensive to check. Hmm.
988 VG_(printf
)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who
, how
);
994 static void all_except_Locks__sanity_check ( const HChar
* who
) {
995 stats__sanity_checks
++;
996 if (0) VG_(printf
)("all_except_Locks__sanity_check(%s)\n", who
);
997 threads__sanity_check(who
);
998 if (HG_(clo_track_lockorders
))
999 laog__sanity_check(who
);
1001 static void all__sanity_check ( const HChar
* who
) {
1002 all_except_Locks__sanity_check(who
);
1003 locks__sanity_check(who
);
1007 /*----------------------------------------------------------------*/
1008 /*--- Shadow value and address range handlers ---*/
1009 /*----------------------------------------------------------------*/
1011 static void laog__pre_thread_acquires_lock ( Thread
*, Lock
* ); /* fwds */
1012 //static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
1013 static inline Thread
* get_current_Thread ( void ); /* fwds */
1014 __attribute__((noinline
))
1015 static void laog__handle_one_lock_deletion ( Lock
* lk
); /* fwds */
1018 /* Block-copy states (needed for implementing realloc()). */
1019 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1020 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1021 static void shadow_mem_scopy_range ( Thread
* thr
,
1022 Addr src
, Addr dst
, SizeT len
)
1024 Thr
* hbthr
= thr
->hbthr
;
1026 libhb_copy_shadow_state( hbthr
, src
, dst
, len
);
1029 static void shadow_mem_cread_range ( Thread
* thr
, Addr a
, SizeT len
)
1031 Thr
* hbthr
= thr
->hbthr
;
1033 LIBHB_CREAD_N(hbthr
, a
, len
);
1036 static void shadow_mem_cwrite_range ( Thread
* thr
, Addr a
, SizeT len
) {
1037 Thr
* hbthr
= thr
->hbthr
;
1039 LIBHB_CWRITE_N(hbthr
, a
, len
);
1042 inline static void shadow_mem_make_New ( Thread
* thr
, Addr a
, SizeT len
)
1044 libhb_srange_new( thr
->hbthr
, a
, len
);
1047 inline static void shadow_mem_make_NoAccess_NoFX ( Thread
* thr
, Addr aIN
,
1051 VG_(printf
)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN
, len
);
1052 // has no effect (NoFX)
1053 libhb_srange_noaccess_NoFX( thr
->hbthr
, aIN
, len
);
1056 inline static void shadow_mem_make_NoAccess_AHAE ( Thread
* thr
, Addr aIN
,
1060 VG_(printf
)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN
, len
);
1061 // Actually Has An Effect (AHAE)
1062 libhb_srange_noaccess_AHAE( thr
->hbthr
, aIN
, len
);
1065 inline static void shadow_mem_make_Untracked ( Thread
* thr
, Addr aIN
,
1069 VG_(printf
)("make Untracked ( %#lx, %lu )\n", aIN
, len
);
1070 libhb_srange_untrack( thr
->hbthr
, aIN
, len
);
1074 /*----------------------------------------------------------------*/
1075 /*--- Event handlers (evh__* functions) ---*/
1076 /*--- plus helpers (evhH__* functions) ---*/
1077 /*----------------------------------------------------------------*/
1079 /*--------- Event handler helpers (evhH__* functions) ---------*/
1081 /* Create a new segment for 'thr', making it depend (.prev) on its
1082 existing segment, bind together the SegmentID and Segment, and
1083 return both of them. Also update 'thr' so it references the new
1086 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1087 //zz /*OUT*/Segment** new_segP,
1090 //zz Segment* cur_seg;
1091 //zz tl_assert(new_segP);
1092 //zz tl_assert(new_segidP);
1093 //zz tl_assert(HG_(is_sane_Thread)(thr));
1094 //zz cur_seg = map_segments_lookup( thr->csegid );
1095 //zz tl_assert(cur_seg);
1096 //zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1097 //zz at their owner thread. */
1098 //zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1099 //zz *new_segidP = alloc_SegmentID();
1100 //zz map_segments_add( *new_segidP, *new_segP );
1101 //zz thr->csegid = *new_segidP;
1105 /* The lock at 'lock_ga' has acquired a writer. Make all necessary
1106 updates, and also do all possible error checks. */
1108 void evhH__post_thread_w_acquires_lock ( Thread
* thr
,
1109 LockKind lkk
, Addr lock_ga
)
1113 /* Basically what we need to do is call lockN_acquire_writer.
1114 However, that will barf if any 'invalid' lock states would
1115 result. Therefore check before calling. Side effect is that
1116 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1119 Because this routine is only called after successful lock
1120 acquisition, we should not be asked to move the lock into any
1121 invalid states. Requests to do so are bugs in libpthread, since
1122 that should have rejected any such requests. */
1124 tl_assert(HG_(is_sane_Thread
)(thr
));
1125 /* Try to find the lock. If we can't, then create a new one with
1127 lk
= map_locks_lookup_or_create(
1128 lkk
, lock_ga
, map_threads_reverse_lookup_SLOW(thr
) );
1129 tl_assert( HG_(is_sane_LockN
)(lk
) );
1131 /* check libhb level entities exist */
1132 tl_assert(thr
->hbthr
);
1133 tl_assert(lk
->hbso
);
1135 if (lk
->heldBy
== NULL
) {
1136 /* the lock isn't held. Simple. */
1137 tl_assert(!lk
->heldW
);
1138 lockN_acquire_writer( lk
, thr
);
1139 /* acquire a dependency from the lock's VCs */
1140 libhb_so_recv( thr
->hbthr
, lk
->hbso
, True
/*strong_recv*/ );
1144 /* So the lock is already held. If held as a r-lock then
1145 libpthread must be buggy. */
1146 tl_assert(lk
->heldBy
);
1148 HG_(record_error_Misc
)(
1149 thr
, "Bug in libpthread: write lock "
1150 "granted on rwlock which is currently rd-held");
1154 /* So the lock is held in w-mode. If it's held by some other
1155 thread, then libpthread must be buggy. */
1156 tl_assert(VG_(sizeUniqueBag
)(lk
->heldBy
) == 1); /* from precondition */
1158 if (thr
!= (Thread
*)VG_(anyElementOfBag
)(lk
->heldBy
)) {
1159 HG_(record_error_Misc
)(
1160 thr
, "Bug in libpthread: write lock "
1161 "granted on mutex/rwlock which is currently "
1162 "wr-held by a different thread");
1166 /* So the lock is already held in w-mode by 'thr'. That means this
1167 is an attempt to lock it recursively, which is only allowable
1168 for LK_mbRec kinded locks. Since this routine is called only
1169 once the lock has been acquired, this must also be a libpthread
1171 if (lk
->kind
!= LK_mbRec
) {
1172 HG_(record_error_Misc
)(
1173 thr
, "Bug in libpthread: recursive write lock "
1174 "granted on mutex/wrlock which does not "
1175 "support recursion");
1179 /* So we are recursively re-locking a lock we already w-hold. */
1180 lockN_acquire_writer( lk
, thr
);
1181 /* acquire a dependency from the lock's VC. Probably pointless,
1182 but also harmless. */
1183 libhb_so_recv( thr
->hbthr
, lk
->hbso
, True
/*strong_recv*/ );
1187 if (HG_(clo_track_lockorders
)) {
1188 /* check lock order acquisition graph, and update. This has to
1189 happen before the lock is added to the thread's locksetA/W. */
1190 laog__pre_thread_acquires_lock( thr
, lk
);
1192 /* update the thread's held-locks set */
1193 thr
->locksetA
= HG_(addToWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
1194 thr
->locksetW
= HG_(addToWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lk
);
1198 tl_assert(HG_(is_sane_LockN
)(lk
));
1202 /* The lock at 'lock_ga' has acquired a reader. Make all necessary
1203 updates, and also do all possible error checks. */
1205 void evhH__post_thread_r_acquires_lock ( Thread
* thr
,
1206 LockKind lkk
, Addr lock_ga
)
1210 /* Basically what we need to do is call lockN_acquire_reader.
1211 However, that will barf if any 'invalid' lock states would
1212 result. Therefore check before calling. Side effect is that
1213 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1216 Because this routine is only called after successful lock
1217 acquisition, we should not be asked to move the lock into any
1218 invalid states. Requests to do so are bugs in libpthread, since
1219 that should have rejected any such requests. */
1221 tl_assert(HG_(is_sane_Thread
)(thr
));
1222 /* Try to find the lock. If we can't, then create a new one with
1223 kind 'lkk'. Only a reader-writer lock can be read-locked,
1224 hence the first assertion. */
1225 tl_assert(lkk
== LK_rdwr
);
1226 lk
= map_locks_lookup_or_create(
1227 lkk
, lock_ga
, map_threads_reverse_lookup_SLOW(thr
) );
1228 tl_assert( HG_(is_sane_LockN
)(lk
) );
1230 /* check libhb level entities exist */
1231 tl_assert(thr
->hbthr
);
1232 tl_assert(lk
->hbso
);
1234 if (lk
->heldBy
== NULL
) {
1235 /* the lock isn't held. Simple. */
1236 tl_assert(!lk
->heldW
);
1237 lockN_acquire_reader( lk
, thr
);
1238 /* acquire a dependency from the lock's VC */
1239 libhb_so_recv( thr
->hbthr
, lk
->hbso
, False
/*!strong_recv*/ );
1243 /* So the lock is already held. If held as a w-lock then
1244 libpthread must be buggy. */
1245 tl_assert(lk
->heldBy
);
1247 HG_(record_error_Misc
)( thr
, "Bug in libpthread: read lock "
1248 "granted on rwlock which is "
1249 "currently wr-held");
1253 /* Easy enough. In short anybody can get a read-lock on a rwlock
1254 provided it is either unlocked or already in rd-held. */
1255 lockN_acquire_reader( lk
, thr
);
1256 /* acquire a dependency from the lock's VC. Probably pointless,
1257 but also harmless. */
1258 libhb_so_recv( thr
->hbthr
, lk
->hbso
, False
/*!strong_recv*/ );
1262 if (HG_(clo_track_lockorders
)) {
1263 /* check lock order acquisition graph, and update. This has to
1264 happen before the lock is added to the thread's locksetA/W. */
1265 laog__pre_thread_acquires_lock( thr
, lk
);
1267 /* update the thread's held-locks set */
1268 thr
->locksetA
= HG_(addToWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
);
1269 /* but don't update thr->locksetW, since lk is only rd-held */
1273 tl_assert(HG_(is_sane_LockN
)(lk
));
1277 /* The lock at 'lock_ga' is just about to be unlocked. Make all
1278 necessary updates, and also do all possible error checks. */
1280 void evhH__pre_thread_releases_lock ( Thread
* thr
,
1281 Addr lock_ga
, Bool isRDWR
)
1287 /* This routine is called prior to a lock release, before
1288 libpthread has had a chance to validate the call. Hence we need
1289 to detect and reject any attempts to move the lock into an
1290 invalid state. Such attempts are bugs in the client.
1292 isRDWR is True if we know from the wrapper context that lock_ga
1293 should refer to a reader-writer lock, and is False if [ditto]
1294 lock_ga should refer to a standard mutex. */
1296 tl_assert(HG_(is_sane_Thread
)(thr
));
1297 lock
= map_locks_maybe_lookup( lock_ga
);
1300 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1301 the client is trying to unlock it. So complain, then ignore
1303 HG_(record_error_UnlockBogus
)( thr
, lock_ga
);
1307 tl_assert(lock
->guestaddr
== lock_ga
);
1308 tl_assert(HG_(is_sane_LockN
)(lock
));
1310 if (isRDWR
&& lock
->kind
!= LK_rdwr
) {
1311 HG_(record_error_Misc
)( thr
, "pthread_rwlock_unlock with a "
1312 "pthread_mutex_t* argument " );
1314 if ((!isRDWR
) && lock
->kind
== LK_rdwr
) {
1315 HG_(record_error_Misc
)( thr
, "pthread_mutex_unlock with a "
1316 "pthread_rwlock_t* argument " );
1319 if (!lock
->heldBy
) {
1320 /* The lock is not held. This indicates a serious bug in the
1322 tl_assert(!lock
->heldW
);
1323 HG_(record_error_UnlockUnlocked
)( thr
, lock
);
1324 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1325 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1329 /* test just above dominates */
1330 tl_assert(lock
->heldBy
);
1331 was_heldW
= lock
->heldW
;
1333 /* The lock is held. Is this thread one of the holders? If not,
1334 report a bug in the client. */
1335 n
= VG_(elemBag
)( lock
->heldBy
, (UWord
)thr
);
1338 /* We are not a current holder of the lock. This is a bug in
1339 the guest, and (per POSIX pthread rules) the unlock
1340 attempt will fail. So just complain and do nothing
1342 Thread
* realOwner
= (Thread
*)VG_(anyElementOfBag
)( lock
->heldBy
);
1343 tl_assert(HG_(is_sane_Thread
)(realOwner
));
1344 tl_assert(realOwner
!= thr
);
1345 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1346 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1347 HG_(record_error_UnlockForeign
)( thr
, realOwner
, lock
);
1351 /* Ok, we hold the lock 'n' times. */
1354 lockN_release( lock
, thr
);
1360 tl_assert(lock
->heldBy
);
1361 tl_assert(n
== VG_(elemBag
)( lock
->heldBy
, (UWord
)thr
));
1362 /* We still hold the lock. So either it's a recursive lock
1363 or a rwlock which is currently r-held. */
1364 tl_assert(lock
->kind
== LK_mbRec
1365 || (lock
->kind
== LK_rdwr
&& !lock
->heldW
));
1366 tl_assert(HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
));
1368 tl_assert(HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1370 tl_assert(!HG_(elemWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
));
1372 /* n is zero. This means we don't hold the lock any more. But
1373 if it's a rwlock held in r-mode, someone else could still
1374 hold it. Just do whatever sanity checks we can. */
1375 if (lock
->kind
== LK_rdwr
&& lock
->heldBy
) {
1376 /* It's a rwlock. We no longer hold it but we used to;
1377 nevertheless it still appears to be held by someone else.
1378 The implication is that, prior to this release, it must
1379 have been shared by us and and whoever else is holding it;
1380 which in turn implies it must be r-held, since a lock
1381 can't be w-held by more than one thread. */
1382 /* The lock is now R-held by somebody else: */
1383 tl_assert(lock
->heldW
== False
);
1385 /* Normal case. It's either not a rwlock, or it's a rwlock
1386 that we used to hold in w-mode (which is pretty much the
1387 same thing as a non-rwlock.) Since this transaction is
1388 atomic (V does not allow multiple threads to run
1389 simultaneously), it must mean the lock is now not held by
1390 anybody. Hence assert for it. */
1391 /* The lock is now not held by anybody: */
1392 tl_assert(!lock
->heldBy
);
1393 tl_assert(lock
->heldW
== False
);
1395 //if (lock->heldBy) {
1396 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1398 /* update this thread's lockset accordingly. */
1400 = HG_(delFromWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lock
);
1402 = HG_(delFromWS
)( univ_lsets
, thr
->locksetW
, (UWord
)lock
);
1403 /* push our VC into the lock */
1404 tl_assert(thr
->hbthr
);
1405 tl_assert(lock
->hbso
);
1406 /* If the lock was previously W-held, then we want to do a
1407 strong send, and if previously R-held, then a weak send. */
1408 libhb_so_send( thr
->hbthr
, lock
->hbso
, was_heldW
);
1413 tl_assert(HG_(is_sane_LockN
)(lock
));
1417 /* ---------------------------------------------------------- */
1418 /* -------- Event handlers proper (evh__* functions) -------- */
1419 /* ---------------------------------------------------------- */
1421 /* What is the Thread* for the currently running thread? This is
1422 absolutely performance critical. We receive notifications from the
1423 core for client code starts/stops, and cache the looked-up result
1424 in 'current_Thread'. Hence, for the vast majority of requests,
1425 finding the current thread reduces to a read of a global variable,
1426 provided get_current_Thread_in_C_C is inlined.
1428 Outside of client code, current_Thread is NULL, and presumably
1429 any uses of it will cause a segfault. Hence:
1431 - for uses definitely within client code, use
1432 get_current_Thread_in_C_C.
1434 - for all other uses, use get_current_Thread.
1437 static Thread
*current_Thread
= NULL
,
1438 *current_Thread_prev
= NULL
;
1440 static void evh__start_client_code ( ThreadId tid
, ULong nDisp
) {
1441 if (0) VG_(printf
)("start %d %llu\n", (Int
)tid
, nDisp
);
1442 tl_assert(current_Thread
== NULL
);
1443 current_Thread
= map_threads_lookup( tid
);
1444 tl_assert(current_Thread
!= NULL
);
1445 if (current_Thread
!= current_Thread_prev
) {
1446 libhb_Thr_resumes( current_Thread
->hbthr
);
1447 current_Thread_prev
= current_Thread
;
1450 static void evh__stop_client_code ( ThreadId tid
, ULong nDisp
) {
1451 if (0) VG_(printf
)(" stop %d %llu\n", (Int
)tid
, nDisp
);
1452 tl_assert(current_Thread
!= NULL
);
1453 current_Thread
= NULL
;
1456 static inline Thread
* get_current_Thread_in_C_C ( void ) {
1457 return current_Thread
;
1459 static inline Thread
* get_current_Thread ( void ) {
1462 thr
= get_current_Thread_in_C_C();
1465 /* evidently not in client code. Do it the slow way. */
1466 coretid
= VG_(get_running_tid
)();
1467 /* FIXME: get rid of the following kludge. It exists because
1468 evh__new_mem is called during initialisation (as notification
1469 of initial memory layout) and VG_(get_running_tid)() returns
1470 VG_INVALID_THREADID at that point. */
1471 if (coretid
== VG_INVALID_THREADID
)
1472 coretid
= 1; /* KLUDGE */
1473 thr
= map_threads_lookup( coretid
);
1478 void evh__new_mem ( Addr a
, SizeT len
) {
1479 Thread
*thr
= get_current_Thread();
1480 if (SHOW_EVENTS
>= 2)
1481 VG_(printf
)("evh__new_mem(%p, %lu)\n", (void*)a
, len
);
1482 shadow_mem_make_New( thr
, a
, len
);
1483 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1484 all__sanity_check("evh__new_mem-post");
1485 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1486 shadow_mem_make_Untracked( thr
, a
, len
);
1490 void evh__new_mem_stack ( Addr a
, SizeT len
) {
1491 Thread
*thr
= get_current_Thread();
1492 if (SHOW_EVENTS
>= 2)
1493 VG_(printf
)("evh__new_mem_stack(%p, %lu)\n", (void*)a
, len
);
1494 shadow_mem_make_New( thr
, -VG_STACK_REDZONE_SZB
+ a
, len
);
1495 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1496 all__sanity_check("evh__new_mem_stack-post");
1497 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1498 shadow_mem_make_Untracked( thr
, a
, len
);
1501 #define DCL_evh__new_mem_stack(syze) \
1502 static void VG_REGPARM(1) evh__new_mem_stack_##syze(Addr new_SP) \
1504 Thread *thr = get_current_Thread(); \
1505 if (SHOW_EVENTS >= 2) \
1506 VG_(printf)("evh__new_mem_stack_" #syze "(%p, %lu)\n", \
1507 (void*)new_SP, (SizeT)syze ); \
1508 shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + new_SP, syze ); \
1509 if (syze >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE)) \
1510 all__sanity_check("evh__new_mem_stack_" #syze "-post"); \
1511 if (UNLIKELY(thr->pthread_create_nesting_level > 0)) \
1512 shadow_mem_make_Untracked( thr, new_SP, syze ); \
1515 DCL_evh__new_mem_stack(4);
1516 DCL_evh__new_mem_stack(8);
1517 DCL_evh__new_mem_stack(12);
1518 DCL_evh__new_mem_stack(16);
1519 DCL_evh__new_mem_stack(32);
1520 DCL_evh__new_mem_stack(112);
1521 DCL_evh__new_mem_stack(128);
1522 DCL_evh__new_mem_stack(144);
1523 DCL_evh__new_mem_stack(160);
1526 void evh__new_mem_w_tid ( Addr a
, SizeT len
, ThreadId tid
) {
1527 Thread
*thr
= get_current_Thread();
1528 if (SHOW_EVENTS
>= 2)
1529 VG_(printf
)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a
, len
);
1530 shadow_mem_make_New( thr
, a
, len
);
1531 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1532 all__sanity_check("evh__new_mem_w_tid-post");
1533 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1534 shadow_mem_make_Untracked( thr
, a
, len
);
1538 void evh__new_mem_w_perms ( Addr a
, SizeT len
,
1539 Bool rr
, Bool ww
, Bool xx
, ULong di_handle
) {
1540 Thread
*thr
= get_current_Thread();
1541 if (SHOW_EVENTS
>= 1)
1542 VG_(printf
)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1543 (void*)a
, len
, (Int
)rr
, (Int
)ww
, (Int
)xx
);
1544 if (rr
|| ww
|| xx
) {
1545 shadow_mem_make_New( thr
, a
, len
);
1546 if (UNLIKELY(thr
->pthread_create_nesting_level
> 0))
1547 shadow_mem_make_Untracked( thr
, a
, len
);
1549 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1550 all__sanity_check("evh__new_mem_w_perms-post");
1554 void evh__set_perms ( Addr a
, SizeT len
,
1555 Bool rr
, Bool ww
, Bool xx
) {
1556 // This handles mprotect requests. If the memory is being put
1557 // into no-R no-W state, paint it as NoAccess, for the reasons
1558 // documented at evh__die_mem_munmap().
1559 if (SHOW_EVENTS
>= 1)
1560 VG_(printf
)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1561 (void*)a
, len
, (Int
)rr
, (Int
)ww
, (Int
)xx
);
1562 /* Hmm. What should we do here, that actually makes any sense?
1563 Let's say: if neither readable nor writable, then declare it
1564 NoAccess, else leave it alone. */
1566 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a
, len
);
1567 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1568 all__sanity_check("evh__set_perms-post");
1572 void evh__die_mem ( Addr a
, SizeT len
) {
1573 // Urr, libhb ignores this.
1574 if (SHOW_EVENTS
>= 2)
1575 VG_(printf
)("evh__die_mem(%p, %lu)\n", (void*)a
, len
);
1576 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a
, len
);
1577 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1578 all__sanity_check("evh__die_mem-post");
1582 void evh__die_mem_munmap ( Addr a
, SizeT len
) {
1583 // It's important that libhb doesn't ignore this. If, as is likely,
1584 // the client is subject to address space layout randomization,
1585 // then unmapped areas may never get remapped over, even in long
1586 // runs. If we just ignore them we wind up with large resource
1587 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1588 // VTS references in the affected area are dropped. Marking memory
1589 // as NoAccess is expensive, but we assume that munmap is sufficiently
1590 // rare that the space gains of doing this are worth the costs.
1591 if (SHOW_EVENTS
>= 2)
1592 VG_(printf
)("evh__die_mem_munmap(%p, %lu)\n", (void*)a
, len
);
1593 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a
, len
);
1597 void evh__untrack_mem ( Addr a
, SizeT len
) {
1598 // Libhb doesn't ignore this.
1599 if (SHOW_EVENTS
>= 2)
1600 VG_(printf
)("evh__untrack_mem(%p, %lu)\n", (void*)a
, len
);
1601 shadow_mem_make_Untracked( get_current_Thread(), a
, len
);
1602 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1603 all__sanity_check("evh__untrack_mem-post");
1607 void evh__copy_mem ( Addr src
, Addr dst
, SizeT len
) {
1608 if (SHOW_EVENTS
>= 2)
1609 VG_(printf
)("evh__copy_mem(%p, %p, %lu)\n", (void*)src
, (void*)dst
, len
);
1610 Thread
*thr
= get_current_Thread();
1611 if (LIKELY(thr
->synchr_nesting
== 0))
1612 shadow_mem_scopy_range( thr
, src
, dst
, len
);
1613 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1614 all__sanity_check("evh__copy_mem-post");
1618 void evh__pre_thread_ll_create ( ThreadId parent
, ThreadId child
)
1620 if (SHOW_EVENTS
>= 1)
1621 VG_(printf
)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1622 (Int
)parent
, (Int
)child
);
1624 if (parent
!= VG_INVALID_THREADID
) {
1630 tl_assert(HG_(is_sane_ThreadId
)(parent
));
1631 tl_assert(HG_(is_sane_ThreadId
)(child
));
1632 tl_assert(parent
!= child
);
1634 thr_p
= map_threads_maybe_lookup( parent
);
1635 thr_c
= map_threads_maybe_lookup( child
);
1637 tl_assert(thr_p
!= NULL
);
1638 tl_assert(thr_c
== NULL
);
1640 hbthr_p
= thr_p
->hbthr
;
1641 tl_assert(hbthr_p
!= NULL
);
1642 tl_assert( libhb_get_Thr_hgthread(hbthr_p
) == thr_p
);
1644 hbthr_c
= libhb_create ( hbthr_p
);
1646 /* Create a new thread record for the child. */
1647 /* a Thread for the new thread ... */
1648 thr_c
= mk_Thread( hbthr_c
);
1649 tl_assert( libhb_get_Thr_hgthread(hbthr_c
) == NULL
);
1650 libhb_set_Thr_hgthread(hbthr_c
, thr_c
);
1652 /* and bind it in the thread-map table */
1653 map_threads
[child
] = thr_c
;
1654 tl_assert(thr_c
->coretid
== VG_INVALID_THREADID
);
1655 thr_c
->coretid
= child
;
1657 /* Record where the parent is so we can later refer to this in
1660 On x86/amd64-linux, this entails a nasty glibc specific hack.
1661 The stack snapshot is taken immediately after the parent has
1662 returned from its sys_clone call. Unfortunately there is no
1663 unwind info for the insn following "syscall" - reading the
1664 glibc sources confirms this. So we ask for a snapshot to be
1665 taken as if RIP was 3 bytes earlier, in a place where there
1666 is unwind info. Sigh.
1668 { Word first_ip_delta
= 0;
1669 # if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1670 first_ip_delta
= -3;
1671 # elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1672 first_ip_delta
= -1;
1674 thr_c
->created_at
= VG_(record_ExeContext
)(parent
, first_ip_delta
);
1677 if (HG_(clo_ignore_thread_creation
)) {
1678 HG_(thread_enter_pthread_create
)(thr_c
);
1679 tl_assert(thr_c
->synchr_nesting
== 0);
1680 HG_(thread_enter_synchr
)(thr_c
);
1681 /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1685 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1686 all__sanity_check("evh__pre_thread_create-post");
1690 void evh__pre_thread_ll_exit ( ThreadId quit_tid
)
1694 if (SHOW_EVENTS
>= 1)
1695 VG_(printf
)("evh__pre_thread_ll_exit(thr=%d)\n",
1698 /* quit_tid has disappeared without joining to any other thread.
1699 Therefore there is no synchronisation event associated with its
1700 exit and so we have to pretty much treat it as if it was still
1701 alive but mysteriously making no progress. That is because, if
1702 we don't know when it really exited, then we can never say there
1703 is a point in time when we're sure the thread really has
1704 finished, and so we need to consider the possibility that it
1705 lingers indefinitely and continues to interact with other
1707 /* However, it might have rendezvous'd with a thread that called
1708 pthread_join with this one as arg, prior to this point (that's
1709 how NPTL works). In which case there has already been a prior
1710 sync event. So in any case, just let the thread exit. On NPTL,
1711 all thread exits go through here. */
1712 tl_assert(HG_(is_sane_ThreadId
)(quit_tid
));
1713 thr_q
= map_threads_maybe_lookup( quit_tid
);
1714 tl_assert(thr_q
!= NULL
);
1716 /* Complain if this thread holds any locks. */
1717 nHeld
= HG_(cardinalityWS
)( univ_lsets
, thr_q
->locksetA
);
1718 tl_assert(nHeld
>= 0);
1721 VG_(sprintf
)(buf
, "Exiting thread still holds %d lock%s",
1722 nHeld
, nHeld
> 1 ? "s" : "");
1723 HG_(record_error_Misc
)( thr_q
, buf
);
1726 /* Not much to do here:
1727 - tell libhb the thread is gone
1728 - clear the map_threads entry, in order that the Valgrind core
1730 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1732 tl_assert(thr_q
->hbthr
);
1733 libhb_async_exit(thr_q
->hbthr
);
1734 tl_assert(thr_q
->coretid
== quit_tid
);
1735 thr_q
->coretid
= VG_INVALID_THREADID
;
1736 map_threads_delete( quit_tid
);
1738 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1739 all__sanity_check("evh__pre_thread_ll_exit-post");
1742 /* This is called immediately after fork, for the child only. 'tid'
1743 is the only surviving thread (as per POSIX rules on fork() in
1744 threaded programs), so we have to clean up map_threads to remove
1745 entries for any other threads. */
1747 void evh__atfork_child ( ThreadId tid
)
1751 /* Slot 0 should never be used. */
1752 thr
= map_threads_maybe_lookup( 0/*INVALID*/ );
1754 /* Clean up all other slots except 'tid'. */
1755 for (i
= 1; i
< VG_N_THREADS
; i
++) {
1758 thr
= map_threads_maybe_lookup(i
);
1761 /* Cleanup actions (next 5 lines) copied from end of
1762 evh__pre_thread_ll_exit; keep in sync. */
1763 tl_assert(thr
->hbthr
);
1764 libhb_async_exit(thr
->hbthr
);
1765 tl_assert(thr
->coretid
== i
);
1766 thr
->coretid
= VG_INVALID_THREADID
;
1767 map_threads_delete(i
);
1771 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1773 void generate_quitter_stayer_dependence (Thr
* hbthr_q
, Thr
* hbthr_s
)
1776 /* Allocate a temporary synchronisation object and use it to send
1777 an imaginary message from the quitter to the stayer, the purpose
1778 being to generate a dependence from the quitter to the
1780 so
= libhb_so_alloc();
1782 /* Send last arg of _so_send as False, since the sending thread
1783 doesn't actually exist any more, so we don't want _so_send to
1784 try taking stack snapshots of it. */
1785 libhb_so_send(hbthr_q
, so
, True
/*strong_send*//*?!? wrt comment above*/);
1786 libhb_so_recv(hbthr_s
, so
, True
/*strong_recv*/);
1787 libhb_so_dealloc(so
);
1789 /* Tell libhb that the quitter has been reaped. Note that we might
1790 have to be cleverer about this, to exclude 2nd and subsequent
1791 notifications for the same hbthr_q, in the case where the app is
1792 buggy (calls pthread_join twice or more on the same thread) AND
1793 where libpthread is also buggy and doesn't return ESRCH on
1794 subsequent calls. (If libpthread isn't thusly buggy, then the
1795 wrapper for pthread_join in hg_intercepts.c will stop us getting
1796 notified here multiple times for the same joinee.) See also
1797 comments in helgrind/tests/jointwice.c. */
1798 libhb_joinedwith_done(hbthr_q
);
1803 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid
, Thread
* quit_thr
)
1810 if (SHOW_EVENTS
>= 1)
1811 VG_(printf
)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1812 (Int
)stay_tid
, quit_thr
);
1814 tl_assert(HG_(is_sane_ThreadId
)(stay_tid
));
1816 thr_s
= map_threads_maybe_lookup( stay_tid
);
1818 tl_assert(thr_s
!= NULL
);
1819 tl_assert(thr_q
!= NULL
);
1820 tl_assert(thr_s
!= thr_q
);
1822 hbthr_s
= thr_s
->hbthr
;
1823 hbthr_q
= thr_q
->hbthr
;
1824 tl_assert(hbthr_s
!= hbthr_q
);
1825 tl_assert( libhb_get_Thr_hgthread(hbthr_s
) == thr_s
);
1826 tl_assert( libhb_get_Thr_hgthread(hbthr_q
) == thr_q
);
1828 generate_quitter_stayer_dependence (hbthr_q
, hbthr_s
);
1830 /* evh__pre_thread_ll_exit issues an error message if the exiting
1831 thread holds any locks. No need to check here. */
1833 /* This holds because, at least when using NPTL as the thread
1834 library, we should be notified the low level thread exit before
1835 we hear of any join event on it. The low level exit
1836 notification feeds through into evh__pre_thread_ll_exit,
1837 which should clear the map_threads entry for it. Hence we
1838 expect there to be no map_threads entry at this point. */
1839 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q
)
1840 == VG_INVALID_THREADID
);
1842 if (HG_(clo_sanity_flags
) & SCE_THREADS
)
1843 all__sanity_check("evh__post_thread_join-post");
1847 void evh__pre_mem_read ( CorePart part
, ThreadId tid
, const HChar
* s
,
1848 Addr a
, SizeT size
) {
1849 if (SHOW_EVENTS
>= 2
1850 || (SHOW_EVENTS
>= 1 && size
!= 1))
1851 VG_(printf
)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1852 (Int
)tid
, s
, (void*)a
, size
);
1853 Thread
*thr
= map_threads_lookup(tid
);
1854 if (LIKELY(thr
->synchr_nesting
== 0))
1855 shadow_mem_cread_range(thr
, a
, size
);
1856 if (size
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1857 all__sanity_check("evh__pre_mem_read-post");
1861 void evh__pre_mem_read_asciiz ( CorePart part
, ThreadId tid
,
1862 const HChar
* s
, Addr a
) {
1864 if (SHOW_EVENTS
>= 1)
1865 VG_(printf
)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1866 (Int
)tid
, s
, (void*)a
);
1867 // Don't segfault if the string starts in an obviously stupid
1868 // place. Actually we should check the whole string, not just
1869 // the start address, but that's too much trouble. At least
1870 // checking the first byte is better than nothing. See #255009.
1871 if (!VG_(am_is_valid_for_client
) (a
, 1, VKI_PROT_READ
))
1873 Thread
*thr
= map_threads_lookup(tid
);
1874 len
= VG_(strlen
)( (HChar
*) a
);
1875 if (LIKELY(thr
->synchr_nesting
== 0))
1876 shadow_mem_cread_range( thr
, a
, len
+1 );
1877 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1878 all__sanity_check("evh__pre_mem_read_asciiz-post");
1882 void evh__pre_mem_write ( CorePart part
, ThreadId tid
, const HChar
* s
,
1883 Addr a
, SizeT size
) {
1884 if (SHOW_EVENTS
>= 1)
1885 VG_(printf
)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1886 (Int
)tid
, s
, (void*)a
, size
);
1887 Thread
*thr
= map_threads_lookup(tid
);
1888 if (LIKELY(thr
->synchr_nesting
== 0))
1889 shadow_mem_cwrite_range(thr
, a
, size
);
1890 if (size
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1891 all__sanity_check("evh__pre_mem_write-post");
1895 void evh__new_mem_heap ( Addr a
, SizeT len
, Bool is_inited
) {
1896 if (SHOW_EVENTS
>= 1)
1897 VG_(printf
)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1898 (void*)a
, len
, (Int
)is_inited
);
1899 // We ignore the initialisation state (is_inited); that's ok.
1900 shadow_mem_make_New(get_current_Thread(), a
, len
);
1901 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1902 all__sanity_check("evh__pre_mem_read-post");
1906 void evh__die_mem_heap ( Addr a
, SizeT len
) {
1908 if (SHOW_EVENTS
>= 1)
1909 VG_(printf
)("evh__die_mem_heap(%p, %lu)\n", (void*)a
, len
);
1910 thr
= get_current_Thread();
1912 if (HG_(clo_free_is_write
)) {
1913 /* Treat frees as if the memory was written immediately prior to
1914 the free. This shakes out more races, specifically, cases
1915 where memory is referenced by one thread, and freed by
1916 another, and there's no observable synchronisation event to
1917 guarantee that the reference happens before the free. */
1918 if (LIKELY(thr
->synchr_nesting
== 0))
1919 shadow_mem_cwrite_range(thr
, a
, len
);
1921 shadow_mem_make_NoAccess_AHAE( thr
, a
, len
);
1922 /* We used to call instead
1923 shadow_mem_make_NoAccess_NoFX( thr, a, len );
1924 A non-buggy application will not access anymore
1925 the freed memory, and so marking no access is in theory useless.
1926 Not marking freed memory would avoid the overhead for applications
1927 doing mostly malloc/free, as the freed memory should then be recycled
1928 very quickly after marking.
1929 We rather mark it noaccess for the following reasons:
1930 * accessibility bits then always correctly represents the memory
1931 status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1932 * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1933 blocks, on a ppc64le, for a unrealistic workload of an application
1934 doing only malloc/free).
1935 * marking no access allows to GC the SecMap, which might improve
1936 performance and/or memory usage.
1937 * we might detect more applications bugs when memory is marked
1939 If needed, we could support here an option --free-is-noaccess=yes|no
1940 to avoid marking freed memory as no access if some applications
1941 would need to avoid the marking noaccess overhead. */
1943 if (len
>= SCE_BIGRANGE_T
&& (HG_(clo_sanity_flags
) & SCE_BIGRANGE
))
1944 all__sanity_check("evh__pre_mem_read-post");
1947 /* --- Event handlers called from generated code --- */
1949 static VG_REGPARM(1)
1950 void evh__mem_help_cread_1(Addr a
) {
1951 Thread
* thr
= get_current_Thread_in_C_C();
1952 Thr
* hbthr
= thr
->hbthr
;
1953 if (LIKELY(thr
->synchr_nesting
== 0))
1954 LIBHB_CREAD_1(hbthr
, a
);
1957 static VG_REGPARM(1)
1958 void evh__mem_help_cread_2(Addr a
) {
1959 Thread
* thr
= get_current_Thread_in_C_C();
1960 Thr
* hbthr
= thr
->hbthr
;
1961 if (LIKELY(thr
->synchr_nesting
== 0))
1962 LIBHB_CREAD_2(hbthr
, a
);
1965 static VG_REGPARM(1)
1966 void evh__mem_help_cread_4(Addr a
) {
1967 Thread
* thr
= get_current_Thread_in_C_C();
1968 Thr
* hbthr
= thr
->hbthr
;
1969 if (LIKELY(thr
->synchr_nesting
== 0))
1970 LIBHB_CREAD_4(hbthr
, a
);
1973 static VG_REGPARM(1)
1974 void evh__mem_help_cread_8(Addr a
) {
1975 Thread
* thr
= get_current_Thread_in_C_C();
1976 Thr
* hbthr
= thr
->hbthr
;
1977 if (LIKELY(thr
->synchr_nesting
== 0))
1978 LIBHB_CREAD_8(hbthr
, a
);
1981 static VG_REGPARM(2)
1982 void evh__mem_help_cread_N(Addr a
, SizeT size
) {
1983 Thread
* thr
= get_current_Thread_in_C_C();
1984 Thr
* hbthr
= thr
->hbthr
;
1985 if (LIKELY(thr
->synchr_nesting
== 0))
1986 LIBHB_CREAD_N(hbthr
, a
, size
);
1989 static VG_REGPARM(1)
1990 void evh__mem_help_cwrite_1(Addr a
) {
1991 Thread
* thr
= get_current_Thread_in_C_C();
1992 Thr
* hbthr
= thr
->hbthr
;
1993 if (LIKELY(thr
->synchr_nesting
== 0))
1994 LIBHB_CWRITE_1(hbthr
, a
);
1997 static VG_REGPARM(1)
1998 void evh__mem_help_cwrite_2(Addr a
) {
1999 Thread
* thr
= get_current_Thread_in_C_C();
2000 Thr
* hbthr
= thr
->hbthr
;
2001 if (LIKELY(thr
->synchr_nesting
== 0))
2002 LIBHB_CWRITE_2(hbthr
, a
);
2005 static VG_REGPARM(1)
2006 void evh__mem_help_cwrite_4(Addr a
) {
2007 Thread
* thr
= get_current_Thread_in_C_C();
2008 Thr
* hbthr
= thr
->hbthr
;
2009 if (LIKELY(thr
->synchr_nesting
== 0))
2010 LIBHB_CWRITE_4(hbthr
, a
);
2013 /* Same as evh__mem_help_cwrite_4 but unwind will use a first_sp_delta of
2015 static VG_REGPARM(1)
2016 void evh__mem_help_cwrite_4_fixupSP(Addr a
) {
2017 Thread
* thr
= get_current_Thread_in_C_C();
2018 Thr
* hbthr
= thr
->hbthr
;
2020 thr
->first_sp_delta
= sizeof(Word
);
2021 if (LIKELY(thr
->synchr_nesting
== 0))
2022 LIBHB_CWRITE_4(hbthr
, a
);
2023 thr
->first_sp_delta
= 0;
2026 static VG_REGPARM(1)
2027 void evh__mem_help_cwrite_8(Addr a
) {
2028 Thread
* thr
= get_current_Thread_in_C_C();
2029 Thr
* hbthr
= thr
->hbthr
;
2030 if (LIKELY(thr
->synchr_nesting
== 0))
2031 LIBHB_CWRITE_8(hbthr
, a
);
2034 /* Same as evh__mem_help_cwrite_8 but unwind will use a first_sp_delta of
2036 static VG_REGPARM(1)
2037 void evh__mem_help_cwrite_8_fixupSP(Addr a
) {
2038 Thread
* thr
= get_current_Thread_in_C_C();
2039 Thr
* hbthr
= thr
->hbthr
;
2041 thr
->first_sp_delta
= sizeof(Word
);
2042 if (LIKELY(thr
->synchr_nesting
== 0))
2043 LIBHB_CWRITE_8(hbthr
, a
);
2044 thr
->first_sp_delta
= 0;
2047 static VG_REGPARM(2)
2048 void evh__mem_help_cwrite_N(Addr a
, SizeT size
) {
2049 Thread
* thr
= get_current_Thread_in_C_C();
2050 Thr
* hbthr
= thr
->hbthr
;
2051 if (LIKELY(thr
->synchr_nesting
== 0))
2052 LIBHB_CWRITE_N(hbthr
, a
, size
);
2056 /* ------------------------------------------------------- */
2057 /* -------------- events to do with mutexes -------------- */
2058 /* ------------------------------------------------------- */
2060 /* EXPOSITION only: by intercepting lock init events we can show the
2061 user where the lock was initialised, rather than only being able to
2062 show where it was first locked. Intercepting lock initialisations
2063 is not necessary for the basic operation of the race checker. */
2065 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid
,
2066 void* mutex
, Word mbRec
)
2068 if (SHOW_EVENTS
>= 1)
2069 VG_(printf
)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2070 (Int
)tid
, mbRec
, (void*)mutex
);
2071 tl_assert(mbRec
== 0 || mbRec
== 1);
2072 map_locks_lookup_or_create( mbRec
? LK_mbRec
: LK_nonRec
,
2074 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2075 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2079 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid
, void* mutex
,
2080 Bool mutex_is_init
)
2084 if (SHOW_EVENTS
>= 1)
2085 VG_(printf
)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2086 "(ctid=%d, %p, isInit=%d)\n",
2087 (Int
)tid
, (void*)mutex
, (Int
)mutex_is_init
);
2089 thr
= map_threads_maybe_lookup( tid
);
2090 /* cannot fail - Thread* must already exist */
2091 tl_assert( HG_(is_sane_Thread
)(thr
) );
2093 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2095 if (lk
== NULL
&& mutex_is_init
) {
2096 /* We're destroying a mutex which we don't have any record of,
2097 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2098 Assume it never got used, and so we don't need to do anything
2103 if (lk
== NULL
|| (lk
->kind
!= LK_nonRec
&& lk
->kind
!= LK_mbRec
)) {
2104 HG_(record_error_Misc
)(
2105 thr
, "pthread_mutex_destroy with invalid argument" );
2109 tl_assert( HG_(is_sane_LockN
)(lk
) );
2110 tl_assert( lk
->guestaddr
== (Addr
)mutex
);
2112 /* Basically act like we unlocked the lock */
2113 HG_(record_error_Misc
)(
2114 thr
, "pthread_mutex_destroy of a locked mutex" );
2115 /* remove lock from locksets of all owning threads */
2116 remove_Lock_from_locksets_of_all_owning_Threads( lk
);
2117 VG_(deleteBag
)( lk
->heldBy
);
2120 lk
->acquired_at
= NULL
;
2122 tl_assert( !lk
->heldBy
);
2123 tl_assert( HG_(is_sane_LockN
)(lk
) );
2125 if (HG_(clo_track_lockorders
))
2126 laog__handle_one_lock_deletion(lk
);
2127 map_locks_delete( lk
->guestaddr
);
2132 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2133 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2136 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid
,
2137 void* mutex
, Word isTryLock
)
2139 /* Just check the mutex is sane; nothing else to do. */
2140 // 'mutex' may be invalid - not checked by wrapper
2143 if (SHOW_EVENTS
>= 1)
2144 VG_(printf
)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2145 (Int
)tid
, (void*)mutex
);
2147 tl_assert(isTryLock
== 0 || isTryLock
== 1);
2148 thr
= map_threads_maybe_lookup( tid
);
2149 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2151 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2153 if (lk
&& (lk
->kind
== LK_rdwr
)) {
2154 HG_(record_error_Misc
)( thr
, "pthread_mutex_lock with a "
2155 "pthread_rwlock_t* argument " );
2160 && (lk
->kind
== LK_nonRec
|| lk
->kind
== LK_rdwr
)
2163 && VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) > 0 ) {
2164 /* uh, it's a non-recursive lock and we already w-hold it, and
2165 this is a real lock operation (not a speculative "tryLock"
2166 kind of thing). Duh. Deadlock coming up; but at least
2167 produce an error message. */
2168 const HChar
* errstr
= "Attempt to re-lock a "
2169 "non-recursive lock I already hold";
2170 const HChar
* auxstr
= "Lock was previously acquired";
2171 if (lk
->acquired_at
) {
2172 HG_(record_error_Misc_w_aux
)( thr
, errstr
, auxstr
, lk
->acquired_at
);
2174 HG_(record_error_Misc
)( thr
, errstr
);
2179 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid
, void* mutex
)
2181 // only called if the real library call succeeded - so mutex is sane
2183 if (SHOW_EVENTS
>= 1)
2184 VG_(printf
)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2185 (Int
)tid
, (void*)mutex
);
2187 thr
= map_threads_maybe_lookup( tid
);
2188 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2190 evhH__post_thread_w_acquires_lock(
2192 LK_mbRec
, /* if not known, create new lock with this LockKind */
2197 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid
, void* mutex
)
2199 // 'mutex' may be invalid - not checked by wrapper
2201 if (SHOW_EVENTS
>= 1)
2202 VG_(printf
)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2203 (Int
)tid
, (void*)mutex
);
2205 thr
= map_threads_maybe_lookup( tid
);
2206 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2208 evhH__pre_thread_releases_lock( thr
, (Addr
)mutex
, False
/*!isRDWR*/ );
2211 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid
, void* mutex
)
2213 // only called if the real library call succeeded - so mutex is sane
2215 if (SHOW_EVENTS
>= 1)
2216 VG_(printf
)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2217 (Int
)tid
, (void*)mutex
);
2218 thr
= map_threads_maybe_lookup( tid
);
2219 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2221 // anything we should do here?
2225 /* ------------------------------------------------------- */
2226 /* -------------- events to do with spinlocks ------------ */
2227 /* ------------------------------------------------------- */
2229 /* All a bit of a kludge. Pretend we're really dealing with ordinary
2230 pthread_mutex_t's instead, for the most part. */
2232 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid
,
2237 /* In glibc's kludgey world, we're either initialising or unlocking
2238 it. Since this is the pre-routine, if it is locked, unlock it
2239 and take a dependence edge. Otherwise, do nothing. */
2241 if (SHOW_EVENTS
>= 1)
2242 VG_(printf
)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2243 "(ctid=%d, slock=%p)\n",
2244 (Int
)tid
, (void*)slock
);
2246 thr
= map_threads_maybe_lookup( tid
);
2247 /* cannot fail - Thread* must already exist */;
2248 tl_assert( HG_(is_sane_Thread
)(thr
) );
2250 lk
= map_locks_maybe_lookup( (Addr
)slock
);
2251 if (lk
&& lk
->heldBy
) {
2252 /* it's held. So do the normal pre-unlock actions, as copied
2253 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2254 duplicates the map_locks_maybe_lookup. */
2255 evhH__pre_thread_releases_lock( thr
, (Addr
)slock
,
2260 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid
,
2264 /* More kludgery. If the lock has never been seen before, do
2265 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2268 if (SHOW_EVENTS
>= 1)
2269 VG_(printf
)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2270 "(ctid=%d, slock=%p)\n",
2271 (Int
)tid
, (void*)slock
);
2273 lk
= map_locks_maybe_lookup( (Addr
)slock
);
2275 map_locks_lookup_or_create( LK_nonRec
, (Addr
)slock
, tid
);
2279 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid
,
2280 void* slock
, Word isTryLock
)
2282 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, slock
, isTryLock
);
2285 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid
,
2288 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, slock
);
2291 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid
,
2294 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid
, slock
, 0/*!isInit*/ );
2298 /* ----------------------------------------------------- */
2299 /* --------------- events to do with CVs --------------- */
2300 /* ----------------------------------------------------- */
2302 /* A mapping from CV to (the SO associated with it, plus some
2303 auxiliary data for error checking). When the CV is
2304 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2305 wait on it completes, we do a 'recv' from the SO. This is believed
2306 to give the correct happens-before events arising from CV
2307 signallings/broadcasts.
2310 /* .so is the SO for this CV.
2311 .mx_ga is the associated mutex, when .nWaiters > 0
2313 POSIX says effectively that the first pthread_cond_{timed}wait call
2314 causes a dynamic binding between the CV and the mutex, and that
2315 lasts until such time as the waiter count falls to zero. Hence
2316 need to keep track of the number of waiters in order to do
2317 consistency tracking. */
2320 SO
* so
; /* libhb-allocated SO */
2321 void* mx_ga
; /* addr of associated mutex, if any */
2322 UWord nWaiters
; /* # threads waiting on the CV */
2327 /* pthread_cond_t* -> CVInfo* */
2328 static WordFM
* map_cond_to_CVInfo
= NULL
;
2330 static void map_cond_to_CVInfo_INIT ( void ) {
2331 if (UNLIKELY(map_cond_to_CVInfo
== NULL
)) {
2332 map_cond_to_CVInfo
= VG_(newFM
)( HG_(zalloc
),
2333 "hg.mctCI.1", HG_(free
), NULL
);
2337 static CVInfo
* map_cond_to_CVInfo_lookup_or_alloc ( void* cond
) {
2339 map_cond_to_CVInfo_INIT();
2340 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &key
, &val
, (UWord
)cond
)) {
2341 tl_assert(key
== (UWord
)cond
);
2342 return (CVInfo
*)val
;
2344 SO
* so
= libhb_so_alloc();
2345 CVInfo
* cvi
= HG_(zalloc
)("hg.mctCloa.1", sizeof(CVInfo
));
2348 VG_(addToFM
)( map_cond_to_CVInfo
, (UWord
)cond
, (UWord
)cvi
);
2353 static CVInfo
* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond
) {
2355 map_cond_to_CVInfo_INIT();
2356 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &key
, &val
, (UWord
)cond
)) {
2357 tl_assert(key
== (UWord
)cond
);
2358 return (CVInfo
*)val
;
2364 static void map_cond_to_CVInfo_delete ( ThreadId tid
,
2365 void* cond
, Bool cond_is_init
) {
2369 thr
= map_threads_maybe_lookup( tid
);
2370 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2372 map_cond_to_CVInfo_INIT();
2373 if (VG_(lookupFM
)( map_cond_to_CVInfo
, &keyW
, &valW
, (UWord
)cond
)) {
2374 CVInfo
* cvi
= (CVInfo
*)valW
;
2375 tl_assert(keyW
== (UWord
)cond
);
2378 if (cvi
->nWaiters
> 0) {
2379 HG_(record_error_Misc
)(
2380 thr
, "pthread_cond_destroy:"
2381 " destruction of condition variable being waited upon");
2382 /* Destroying a cond var being waited upon outcome is EBUSY and
2383 variable is not destroyed. */
2386 if (!VG_(delFromFM
)( map_cond_to_CVInfo
, &keyW
, &valW
, (UWord
)cond
))
2387 tl_assert(0); // cond var found above, and not here ???
2388 libhb_so_dealloc(cvi
->so
);
2392 /* We have no record of this CV. So complain about it
2393 .. except, don't bother to complain if it has exactly the
2394 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2395 was initialised like that but never used. */
2396 if (!cond_is_init
) {
2397 HG_(record_error_Misc
)(
2398 thr
, "pthread_cond_destroy: destruction of unknown cond var");
2403 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid
, void* cond
)
2405 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2406 cond to a SO if it is not already so bound, and 'send' on the
2407 SO. This is later used by other thread(s) which successfully
2408 exit from a pthread_cond_wait on the same cv; then they 'recv'
2409 from the SO, thereby acquiring a dependency on this signalling
2415 if (SHOW_EVENTS
>= 1)
2416 VG_(printf
)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2417 (Int
)tid
, (void*)cond
);
2419 thr
= map_threads_maybe_lookup( tid
);
2420 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2422 cvi
= map_cond_to_CVInfo_lookup_or_alloc( cond
);
2426 // error-if: mutex is bogus
2427 // error-if: mutex is not locked
2428 // Hmm. POSIX doesn't actually say that it's an error to call
2429 // pthread_cond_signal with the associated mutex being unlocked.
2430 // Although it does say that it should be "if consistent scheduling
2431 // is desired." For that reason, print "dubious" if the lock isn't
2432 // held by any thread. Skip the "dubious" if it is held by some
2433 // other thread; that sounds straight-out wrong.
2435 // Anybody who writes code that signals on a CV without holding
2436 // the associated MX needs to be shipped off to a lunatic asylum
2437 // ASAP, even though POSIX doesn't actually declare such behaviour
2438 // illegal -- it makes code extremely difficult to understand/
2439 // reason about. In particular it puts the signalling thread in
2440 // a situation where it is racing against the released waiter
2441 // as soon as the signalling is done, and so there needs to be
2442 // some auxiliary synchronisation mechanism in the program that
2443 // makes this safe -- or the race(s) need to be harmless, or
2444 // probably nonexistent.
2448 if (cvi
->mx_ga
!= 0) {
2449 lk
= map_locks_maybe_lookup( (Addr
)cvi
->mx_ga
);
2451 /* note: lk could be NULL. Be careful. */
2453 if (lk
->kind
== LK_rdwr
) {
2454 HG_(record_error_Misc
)(thr
,
2455 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2457 if (lk
->heldBy
== NULL
) {
2458 HG_(record_error_Dubious
)(thr
,
2459 "pthread_cond_{signal,broadcast}: dubious: "
2460 "associated lock is not held by any thread");
2462 if (lk
->heldBy
!= NULL
&& 0 == VG_(elemBag
)(lk
->heldBy
, (UWord
)thr
)) {
2463 HG_(record_error_Misc
)(thr
,
2464 "pthread_cond_{signal,broadcast}: "
2465 "associated lock is not held by calling thread");
2468 /* Couldn't even find the damn thing. */
2469 // But actually .. that's not necessarily an error. We don't
2470 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2471 // shows us what it is, and if that may not have happened yet.
2472 // So just keep quiet in this circumstance.
2473 //HG_(record_error_Misc)( thr,
2474 // "pthread_cond_{signal,broadcast}: "
2475 // "no or invalid mutex associated with cond");
2479 libhb_so_send( thr
->hbthr
, cvi
->so
, True
/*strong_send*/ );
2482 /* returns True if it reckons 'mutex' is valid and held by this
2483 thread, else False */
2484 static Bool
evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid
,
2485 void* cond
, void* mutex
)
2489 Bool lk_valid
= True
;
2492 if (SHOW_EVENTS
>= 1)
2493 VG_(printf
)("evh__hg_PTHREAD_COND_WAIT_PRE"
2494 "(ctid=%d, cond=%p, mutex=%p)\n",
2495 (Int
)tid
, (void*)cond
, (void*)mutex
);
2497 thr
= map_threads_maybe_lookup( tid
);
2498 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2500 lk
= map_locks_maybe_lookup( (Addr
)mutex
);
2502 /* Check for stupid mutex arguments. There are various ways to be
2503 a bozo. Only complain once, though, even if more than one thing
2507 HG_(record_error_Misc
)(
2509 "pthread_cond_{timed}wait called with invalid mutex" );
2511 tl_assert( HG_(is_sane_LockN
)(lk
) );
2512 if (lk
->kind
== LK_rdwr
) {
2514 HG_(record_error_Misc
)(
2515 thr
, "pthread_cond_{timed}wait called with mutex "
2516 "of type pthread_rwlock_t*" );
2518 if (lk
->heldBy
== NULL
) {
2520 HG_(record_error_Misc
)(
2521 thr
, "pthread_cond_{timed}wait called with un-held mutex");
2523 if (lk
->heldBy
!= NULL
2524 && VG_(elemBag
)( lk
->heldBy
, (UWord
)thr
) == 0) {
2526 HG_(record_error_Misc
)(
2527 thr
, "pthread_cond_{timed}wait called with mutex "
2528 "held by a different thread" );
2532 // error-if: cond is also associated with a different mutex
2533 cvi
= map_cond_to_CVInfo_lookup_or_alloc(cond
);
2536 if (cvi
->nWaiters
== 0) {
2537 /* form initial (CV,MX) binding */
2540 else /* check existing (CV,MX) binding */
2541 if (cvi
->mx_ga
!= mutex
) {
2542 HG_(record_error_Misc
)(
2543 thr
, "pthread_cond_{timed}wait: cond is associated "
2544 "with a different mutex");
2551 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid
,
2552 void* cond
, void* mutex
,
2555 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2556 the SO for this cond, and 'recv' from it so as to acquire a
2557 dependency edge back to the signaller/broadcaster. */
2561 if (SHOW_EVENTS
>= 1)
2562 VG_(printf
)("evh__HG_PTHREAD_COND_WAIT_POST"
2563 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2564 (Int
)tid
, (void*)cond
, (void*)mutex
, (Int
)timeout
);
2566 thr
= map_threads_maybe_lookup( tid
);
2567 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2569 // error-if: cond is also associated with a different mutex
2571 cvi
= map_cond_to_CVInfo_lookup_NO_alloc( cond
);
2573 /* This could be either a bug in helgrind or the guest application
2574 that did an error (e.g. cond var was destroyed by another thread.
2575 Let's assume helgrind is perfect ...
2576 Note that this is similar to drd behaviour. */
2577 HG_(record_error_Misc
)(thr
, "condition variable has been destroyed while"
2578 " being waited upon");
2584 tl_assert(cvi
->nWaiters
> 0);
2586 if (!timeout
&& !libhb_so_everSent(cvi
->so
)) {
2587 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2588 it? If this happened it would surely be a bug in the threads
2589 library. Or one of those fabled "spurious wakeups". */
2590 HG_(record_error_Misc
)( thr
, "Bug in libpthread: pthread_cond_wait "
2592 " without prior pthread_cond_post");
2595 /* anyway, acquire a dependency on it. */
2596 libhb_so_recv( thr
->hbthr
, cvi
->so
, True
/*strong_recv*/ );
2601 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid
,
2602 void* cond
, void* cond_attr
)
2606 if (SHOW_EVENTS
>= 1)
2607 VG_(printf
)("evh__HG_PTHREAD_COND_INIT_POST"
2608 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2609 (Int
)tid
, (void*)cond
, (void*) cond_attr
);
2611 cvi
= map_cond_to_CVInfo_lookup_or_alloc( cond
);
2613 tl_assert (cvi
->so
);
2617 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid
,
2618 void* cond
, Bool cond_is_init
)
2620 /* Deal with destroy events. The only purpose is to free storage
2621 associated with the CV, so as to avoid any possible resource
2623 if (SHOW_EVENTS
>= 1)
2624 VG_(printf
)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2625 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2626 (Int
)tid
, (void*)cond
, (Int
)cond_is_init
);
2628 map_cond_to_CVInfo_delete( tid
, cond
, cond_is_init
);
2632 /* ------------------------------------------------------- */
2633 /* -------------- events to do with rwlocks -------------- */
2634 /* ------------------------------------------------------- */
2636 /* EXPOSITION only */
2638 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid
, void* rwl
)
2640 if (SHOW_EVENTS
>= 1)
2641 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2642 (Int
)tid
, (void*)rwl
);
2643 map_locks_lookup_or_create( LK_rdwr
, (Addr
)rwl
, tid
);
2644 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2645 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2649 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid
, void* rwl
)
2653 if (SHOW_EVENTS
>= 1)
2654 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2655 (Int
)tid
, (void*)rwl
);
2657 thr
= map_threads_maybe_lookup( tid
);
2658 /* cannot fail - Thread* must already exist */
2659 tl_assert( HG_(is_sane_Thread
)(thr
) );
2661 lk
= map_locks_maybe_lookup( (Addr
)rwl
);
2663 if (lk
== NULL
|| lk
->kind
!= LK_rdwr
) {
2664 HG_(record_error_Misc
)(
2665 thr
, "pthread_rwlock_destroy with invalid argument" );
2669 tl_assert( HG_(is_sane_LockN
)(lk
) );
2670 tl_assert( lk
->guestaddr
== (Addr
)rwl
);
2672 /* Basically act like we unlocked the lock */
2673 HG_(record_error_Misc
)(
2674 thr
, "pthread_rwlock_destroy of a locked mutex" );
2675 /* remove lock from locksets of all owning threads */
2676 remove_Lock_from_locksets_of_all_owning_Threads( lk
);
2677 VG_(deleteBag
)( lk
->heldBy
);
2680 lk
->acquired_at
= NULL
;
2682 tl_assert( !lk
->heldBy
);
2683 tl_assert( HG_(is_sane_LockN
)(lk
) );
2685 if (HG_(clo_track_lockorders
))
2686 laog__handle_one_lock_deletion(lk
);
2687 map_locks_delete( lk
->guestaddr
);
2691 if (HG_(clo_sanity_flags
) & SCE_LOCKS
)
2692 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2696 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid
,
2698 Word isW
, Word isTryLock
)
2700 /* Just check the rwl is sane; nothing else to do. */
2701 // 'rwl' may be invalid - not checked by wrapper
2704 if (SHOW_EVENTS
>= 1)
2705 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2706 (Int
)tid
, (Int
)isW
, (void*)rwl
);
2708 tl_assert(isW
== 0 || isW
== 1); /* assured us by wrapper */
2709 tl_assert(isTryLock
== 0 || isTryLock
== 1); /* assured us by wrapper */
2710 thr
= map_threads_maybe_lookup( tid
);
2711 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2713 lk
= map_locks_maybe_lookup( (Addr
)rwl
);
2715 && (lk
->kind
== LK_nonRec
|| lk
->kind
== LK_mbRec
) ) {
2716 /* Wrong kind of lock. Duh. */
2717 HG_(record_error_Misc
)(
2718 thr
, "pthread_rwlock_{rd,rw}lock with a "
2719 "pthread_mutex_t* argument " );
2724 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid
, void* rwl
, Word isW
)
2726 // only called if the real library call succeeded - so mutex is sane
2728 if (SHOW_EVENTS
>= 1)
2729 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2730 (Int
)tid
, (Int
)isW
, (void*)rwl
);
2732 tl_assert(isW
== 0 || isW
== 1); /* assured us by wrapper */
2733 thr
= map_threads_maybe_lookup( tid
);
2734 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2736 (isW
? evhH__post_thread_w_acquires_lock
2737 : evhH__post_thread_r_acquires_lock
)(
2739 LK_rdwr
, /* if not known, create new lock with this LockKind */
2744 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid
, void* rwl
)
2746 // 'rwl' may be invalid - not checked by wrapper
2748 if (SHOW_EVENTS
>= 1)
2749 VG_(printf
)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2750 (Int
)tid
, (void*)rwl
);
2752 thr
= map_threads_maybe_lookup( tid
);
2753 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2755 evhH__pre_thread_releases_lock( thr
, (Addr
)rwl
, True
/*isRDWR*/ );
2758 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid
, void* rwl
)
2760 // only called if the real library call succeeded - so mutex is sane
2762 if (SHOW_EVENTS
>= 1)
2763 VG_(printf
)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2764 (Int
)tid
, (void*)rwl
);
2765 thr
= map_threads_maybe_lookup( tid
);
2766 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2768 // anything we should do here?
2772 /* ---------------------------------------------------------- */
2773 /* -------------- events to do with semaphores -------------- */
2774 /* ---------------------------------------------------------- */
2776 /* This is similar to but not identical to the handling for condition
2779 /* For each semaphore, we maintain a stack of SOs. When a 'post'
2780 operation is done on a semaphore (unlocking, essentially), a new SO
2781 is created for the posting thread, the posting thread does a strong
2782 send to it (which merely installs the posting thread's VC in the
2783 SO), and the SO is pushed on the semaphore's stack.
2785 Later, when a (probably different) thread completes 'wait' on the
2786 semaphore, we pop a SO off the semaphore's stack (which should be
2787 nonempty), and do a strong recv from it. This mechanism creates
2788 dependencies between posters and waiters of the semaphore.
2790 It may not be necessary to use a stack - perhaps a bag of SOs would
2791 do. But we do need to keep track of how many unused-up posts have
2792 happened for the semaphore.
2794 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2795 twice on S. T3 cannot complete its waits without both T1 and T2
2796 posting. The above mechanism will ensure that T3 acquires
2797 dependencies on both T1 and T2.
2799 When a semaphore is initialised with value N, we do as if we'd
2800 posted N times on the semaphore: basically create N SOs and do a
2801 strong send to all of then. This allows up to N waits on the
2802 semaphore to acquire a dependency on the initialisation point,
2803 which AFAICS is the correct behaviour.
2805 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2809 /* sem_t* -> XArray* SO* */
2810 static WordFM
* map_sem_to_SO_stack
= NULL
;
2812 static void map_sem_to_SO_stack_INIT ( void ) {
2813 if (map_sem_to_SO_stack
== NULL
) {
2814 map_sem_to_SO_stack
= VG_(newFM
)( HG_(zalloc
), "hg.mstSs.1",
2819 static void push_SO_for_sem ( void* sem
, SO
* so
) {
2823 map_sem_to_SO_stack_INIT();
2824 if (VG_(lookupFM
)( map_sem_to_SO_stack
,
2825 &keyW
, (UWord
*)&xa
, (UWord
)sem
)) {
2826 tl_assert(keyW
== (UWord
)sem
);
2828 VG_(addToXA
)( xa
, &so
);
2830 xa
= VG_(newXA
)( HG_(zalloc
), "hg.pSfs.1", HG_(free
), sizeof(SO
*) );
2831 VG_(addToXA
)( xa
, &so
);
2832 VG_(addToFM
)( map_sem_to_SO_stack
, (UWord
)sem
, (UWord
)xa
);
2836 static SO
* mb_pop_SO_for_sem ( void* sem
) {
2840 map_sem_to_SO_stack_INIT();
2841 if (VG_(lookupFM
)( map_sem_to_SO_stack
,
2842 &keyW
, (UWord
*)&xa
, (UWord
)sem
)) {
2843 /* xa is the stack for this semaphore. */
2845 tl_assert(keyW
== (UWord
)sem
);
2846 sz
= VG_(sizeXA
)( xa
);
2849 return NULL
; /* odd, the stack is empty */
2850 so
= *(SO
**)VG_(indexXA
)( xa
, sz
-1 );
2852 VG_(dropTailXA
)( xa
, 1 );
2855 /* hmm, that's odd. No stack for this semaphore. */
2860 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid
, void* sem
)
2865 if (SHOW_EVENTS
>= 1)
2866 VG_(printf
)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2867 (Int
)tid
, (void*)sem
);
2869 map_sem_to_SO_stack_INIT();
2871 /* Empty out the semaphore's SO stack. This way of doing it is
2872 stupid, but at least it's easy. */
2874 so
= mb_pop_SO_for_sem( sem
);
2876 libhb_so_dealloc(so
);
2879 if (VG_(delFromFM
)( map_sem_to_SO_stack
, &keyW
, &valW
, (UWord
)sem
)) {
2880 XArray
* xa
= (XArray
*)valW
;
2881 tl_assert(keyW
== (UWord
)sem
);
2883 tl_assert(VG_(sizeXA
)(xa
) == 0); /* preceding loop just emptied it */
2889 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid
, void* sem
, UWord value
)
2894 if (SHOW_EVENTS
>= 1)
2895 VG_(printf
)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2896 (Int
)tid
, (void*)sem
, value
);
2898 thr
= map_threads_maybe_lookup( tid
);
2899 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2901 /* Empty out the semaphore's SO stack. This way of doing it is
2902 stupid, but at least it's easy. */
2904 so
= mb_pop_SO_for_sem( sem
);
2906 libhb_so_dealloc(so
);
2909 /* If we don't do this check, the following while loop runs us out
2910 of memory for stupid initial values of 'value'. */
2911 if (value
> 10000) {
2912 HG_(record_error_Misc
)(
2913 thr
, "sem_init: initial value exceeds 10000; using 10000" );
2917 /* Now create 'valid' new SOs for the thread, do a strong send to
2918 each of them, and push them all on the stack. */
2919 for (; value
> 0; value
--) {
2920 Thr
* hbthr
= thr
->hbthr
;
2923 so
= libhb_so_alloc();
2924 libhb_so_send( hbthr
, so
, True
/*strong send*/ );
2925 push_SO_for_sem( sem
, so
);
2929 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid
, void* sem
)
2931 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2932 it (iow, write our VC into it, then tick ours), and push the SO
2933 on on a stack of SOs associated with 'sem'. This is later used
2934 by other thread(s) which successfully exit from a sem_wait on
2935 the same sem; by doing a strong recv from SOs popped of the
2936 stack, they acquire dependencies on the posting thread
2943 if (SHOW_EVENTS
>= 1)
2944 VG_(printf
)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2945 (Int
)tid
, (void*)sem
);
2947 thr
= map_threads_maybe_lookup( tid
);
2948 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2950 // error-if: sem is bogus
2955 so
= libhb_so_alloc();
2956 libhb_so_send( hbthr
, so
, True
/*strong send*/ );
2957 push_SO_for_sem( sem
, so
);
2960 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid
, void* sem
)
2962 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2963 the 'sem' from this semaphore's SO-stack, and do a strong recv
2964 from it. This creates a dependency back to one of the post-ers
2965 for the semaphore. */
2971 if (SHOW_EVENTS
>= 1)
2972 VG_(printf
)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2973 (Int
)tid
, (void*)sem
);
2975 thr
= map_threads_maybe_lookup( tid
);
2976 tl_assert(thr
); /* cannot fail - Thread* must already exist */
2978 // error-if: sem is bogus
2980 so
= mb_pop_SO_for_sem( sem
);
2986 libhb_so_recv( hbthr
, so
, True
/*strong recv*/ );
2987 libhb_so_dealloc(so
);
2989 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2990 If this happened it would surely be a bug in the threads
2992 HG_(record_error_Misc
)(
2993 thr
, "Bug in libpthread: sem_wait succeeded on"
2994 " semaphore without prior sem_post");
2999 /* -------------------------------------------------------- */
3000 /* -------------- events to do with barriers -------------- */
3001 /* -------------------------------------------------------- */
3005 Bool initted
; /* has it yet been initted by guest? */
3006 Bool resizable
; /* is resizing allowed? */
3007 UWord size
; /* declared size */
3008 XArray
* waiting
; /* XA of Thread*. # present is 0 .. .size */
3012 static Bar
* new_Bar ( void ) {
3013 Bar
* bar
= HG_(zalloc
)( "hg.nB.1 (new_Bar)", sizeof(Bar
) );
3014 /* all fields are zero */
3015 tl_assert(bar
->initted
== False
);
3019 static void delete_Bar ( Bar
* bar
) {
3022 VG_(deleteXA
)(bar
->waiting
);
3026 /* A mapping which stores auxiliary data for barriers. */
3028 /* pthread_barrier_t* -> Bar* */
3029 static WordFM
* map_barrier_to_Bar
= NULL
;
3031 static void map_barrier_to_Bar_INIT ( void ) {
3032 if (UNLIKELY(map_barrier_to_Bar
== NULL
)) {
3033 map_barrier_to_Bar
= VG_(newFM
)( HG_(zalloc
),
3034 "hg.mbtBI.1", HG_(free
), NULL
);
3038 static Bar
* map_barrier_to_Bar_lookup_or_alloc ( void* barrier
) {
3040 map_barrier_to_Bar_INIT();
3041 if (VG_(lookupFM
)( map_barrier_to_Bar
, &key
, &val
, (UWord
)barrier
)) {
3042 tl_assert(key
== (UWord
)barrier
);
3045 Bar
* bar
= new_Bar();
3046 VG_(addToFM
)( map_barrier_to_Bar
, (UWord
)barrier
, (UWord
)bar
);
3051 static void map_barrier_to_Bar_delete ( void* barrier
) {
3053 map_barrier_to_Bar_INIT();
3054 if (VG_(delFromFM
)( map_barrier_to_Bar
, &keyW
, &valW
, (UWord
)barrier
)) {
3055 Bar
* bar
= (Bar
*)valW
;
3056 tl_assert(keyW
== (UWord
)barrier
);
3062 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid
,
3070 if (SHOW_EVENTS
>= 1)
3071 VG_(printf
)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3072 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3073 (Int
)tid
, (void*)barrier
, count
, resizable
);
3075 thr
= map_threads_maybe_lookup( tid
);
3076 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3079 HG_(record_error_Misc
)(
3080 thr
, "pthread_barrier_init: 'count' argument is zero"
3084 if (resizable
!= 0 && resizable
!= 1) {
3085 HG_(record_error_Misc
)(
3086 thr
, "pthread_barrier_init: invalid 'resizable' argument"
3090 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3094 HG_(record_error_Misc
)(
3095 thr
, "pthread_barrier_init: barrier is already initialised"
3099 if (bar
->waiting
&& VG_(sizeXA
)(bar
->waiting
) > 0) {
3100 tl_assert(bar
->initted
);
3101 HG_(record_error_Misc
)(
3102 thr
, "pthread_barrier_init: threads are waiting at barrier"
3104 VG_(dropTailXA
)(bar
->waiting
, VG_(sizeXA
)(bar
->waiting
));
3106 if (!bar
->waiting
) {
3107 bar
->waiting
= VG_(newXA
)( HG_(zalloc
), "hg.eHPBIP.1", HG_(free
),
3111 tl_assert(VG_(sizeXA
)(bar
->waiting
) == 0);
3112 bar
->initted
= True
;
3113 bar
->resizable
= resizable
== 1 ? True
: False
;
3118 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid
,
3124 /* Deal with destroy events. The only purpose is to free storage
3125 associated with the barrier, so as to avoid any possible
3127 if (SHOW_EVENTS
>= 1)
3128 VG_(printf
)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3129 "(tid=%d, barrier=%p)\n",
3130 (Int
)tid
, (void*)barrier
);
3132 thr
= map_threads_maybe_lookup( tid
);
3133 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3135 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3138 if (!bar
->initted
) {
3139 HG_(record_error_Misc
)(
3140 thr
, "pthread_barrier_destroy: barrier was never initialised"
3144 if (bar
->initted
&& bar
->waiting
&& VG_(sizeXA
)(bar
->waiting
) > 0) {
3145 HG_(record_error_Misc
)(
3146 thr
, "pthread_barrier_destroy: threads are waiting at barrier"
3150 /* Maybe we shouldn't do this; just let it persist, so that when it
3151 is reinitialised we don't need to do any dynamic memory
3152 allocation? The downside is a potentially unlimited space leak,
3153 if the client creates (in turn) a large number of barriers all
3154 at different locations. Note that if we do later move to the
3155 don't-delete-it scheme, we need to mark the barrier as
3156 uninitialised again since otherwise a later _init call will
3157 elicit a duplicate-init error. */
3158 map_barrier_to_Bar_delete( barrier
);
3162 /* All the threads have arrived. Now do the Interesting Bit. Get a
3163 new synchronisation object and do a weak send to it from all the
3164 participating threads. This makes its vector clocks be the join of
3165 all the individual threads' vector clocks. Then do a strong
3166 receive from it back to all threads, so that their VCs are a copy
3167 of it (hence are all equal to the join of their original VCs.) */
3168 static void do_barrier_cross_sync_and_empty ( Bar
* bar
)
3170 /* XXX check bar->waiting has no duplicates */
3172 SO
* so
= libhb_so_alloc();
3174 tl_assert(bar
->waiting
);
3175 tl_assert(VG_(sizeXA
)(bar
->waiting
) == bar
->size
);
3177 /* compute the join ... */
3178 for (i
= 0; i
< bar
->size
; i
++) {
3179 Thread
* t
= *(Thread
**)VG_(indexXA
)(bar
->waiting
, i
);
3180 Thr
* hbthr
= t
->hbthr
;
3181 libhb_so_send( hbthr
, so
, False
/*weak send*/ );
3183 /* ... and distribute to all threads */
3184 for (i
= 0; i
< bar
->size
; i
++) {
3185 Thread
* t
= *(Thread
**)VG_(indexXA
)(bar
->waiting
, i
);
3186 Thr
* hbthr
= t
->hbthr
;
3187 libhb_so_recv( hbthr
, so
, True
/*strong recv*/ );
3190 /* finally, we must empty out the waiting vector */
3191 VG_(dropTailXA
)(bar
->waiting
, VG_(sizeXA
)(bar
->waiting
));
3193 /* and we don't need this any more. Perhaps a stack-allocated
3194 SO would be better? */
3195 libhb_so_dealloc(so
);
3199 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid
,
3202 /* This function gets called after a client thread calls
3203 pthread_barrier_wait but before it arrives at the real
3204 pthread_barrier_wait.
3206 Why is the following correct? It's a bit subtle.
3208 If this is not the last thread arriving at the barrier, we simply
3209 note its presence and return. Because valgrind (at least as of
3210 Nov 08) is single threaded, we are guaranteed safe from any race
3211 conditions when in this function -- no other client threads are
3214 If this is the last thread, then we are again the only running
3215 thread. All the other threads will have either arrived at the
3216 real pthread_barrier_wait or are on their way to it, but in any
3217 case are guaranteed not to be able to move past it, because this
3218 thread is currently in this function and so has not yet arrived
3219 at the real pthread_barrier_wait. That means that:
3221 1. While we are in this function, none of the other threads
3222 waiting at the barrier can move past it.
3224 2. When this function returns (and simulated execution resumes),
3225 this thread and all other waiting threads will be able to move
3226 past the real barrier.
3228 Because of this, it is now safe to update the vector clocks of
3229 all threads, to represent the fact that they all arrived at the
3230 barrier and have all moved on. There is no danger of any
3231 complications to do with some threads leaving the barrier and
3232 racing back round to the front, whilst others are still leaving
3233 (which is the primary source of complication in correct handling/
3234 implementation of barriers). That can't happen because we update
3235 here our data structures so as to indicate that the threads have
3236 passed the barrier, even though, as per (2) above, they are
3237 guaranteed not to pass the barrier until we return.
3239 This relies crucially on Valgrind being single threaded. If that
3240 changes, this will need to be reconsidered.
3246 if (SHOW_EVENTS
>= 1)
3247 VG_(printf
)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3248 "(tid=%d, barrier=%p)\n",
3249 (Int
)tid
, (void*)barrier
);
3251 thr
= map_threads_maybe_lookup( tid
);
3252 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3254 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3257 if (!bar
->initted
) {
3258 HG_(record_error_Misc
)(
3259 thr
, "pthread_barrier_wait: barrier is uninitialised"
3261 return; /* client is broken .. avoid assertions below */
3264 /* guaranteed by _INIT_PRE above */
3265 tl_assert(bar
->size
> 0);
3266 tl_assert(bar
->waiting
);
3268 VG_(addToXA
)( bar
->waiting
, &thr
);
3270 /* guaranteed by this function */
3271 present
= VG_(sizeXA
)(bar
->waiting
);
3272 tl_assert(present
> 0 && present
<= bar
->size
);
3274 if (present
< bar
->size
)
3277 do_barrier_cross_sync_and_empty(bar
);
3281 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid
,
3289 if (SHOW_EVENTS
>= 1)
3290 VG_(printf
)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3291 "(tid=%d, barrier=%p, newcount=%lu)\n",
3292 (Int
)tid
, (void*)barrier
, newcount
);
3294 thr
= map_threads_maybe_lookup( tid
);
3295 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3297 bar
= map_barrier_to_Bar_lookup_or_alloc(barrier
);
3300 if (!bar
->initted
) {
3301 HG_(record_error_Misc
)(
3302 thr
, "pthread_barrier_resize: barrier is uninitialised"
3304 return; /* client is broken .. avoid assertions below */
3307 if (!bar
->resizable
) {
3308 HG_(record_error_Misc
)(
3309 thr
, "pthread_barrier_resize: barrier is may not be resized"
3311 return; /* client is broken .. avoid assertions below */
3314 if (newcount
== 0) {
3315 HG_(record_error_Misc
)(
3316 thr
, "pthread_barrier_resize: 'newcount' argument is zero"
3318 return; /* client is broken .. avoid assertions below */
3321 /* guaranteed by _INIT_PRE above */
3322 tl_assert(bar
->size
> 0);
3323 tl_assert(bar
->waiting
);
3324 /* Guaranteed by this fn */
3325 tl_assert(newcount
> 0);
3327 if (newcount
>= bar
->size
) {
3328 /* Increasing the capacity. There's no possibility of threads
3329 moving on from the barrier in this situation, so just note
3330 the fact and do nothing more. */
3331 bar
->size
= newcount
;
3333 /* Decreasing the capacity. If we decrease it to be equal or
3334 below the number of waiting threads, they will now move past
3335 the barrier, so need to mess with dep edges in the same way
3336 as if the barrier had filled up normally. */
3337 present
= VG_(sizeXA
)(bar
->waiting
);
3338 tl_assert(present
<= bar
->size
);
3339 if (newcount
<= present
) {
3340 bar
->size
= present
; /* keep the cross_sync call happy */
3341 do_barrier_cross_sync_and_empty(bar
);
3343 bar
->size
= newcount
;
3348 /* ----------------------------------------------------- */
3349 /* ----- events to do with user-specified HB edges ----- */
3350 /* ----------------------------------------------------- */
3352 /* A mapping from arbitrary UWord tag to the SO associated with it.
3353 The UWord tags are meaningless to us, interpreted only by the
3359 static WordFM
* map_usertag_to_SO
= NULL
;
3361 static void map_usertag_to_SO_INIT ( void ) {
3362 if (UNLIKELY(map_usertag_to_SO
== NULL
)) {
3363 map_usertag_to_SO
= VG_(newFM
)( HG_(zalloc
),
3364 "hg.mutS.1", HG_(free
), NULL
);
3368 static SO
* map_usertag_to_SO_lookup_or_alloc ( UWord usertag
) {
3370 map_usertag_to_SO_INIT();
3371 if (VG_(lookupFM
)( map_usertag_to_SO
, &key
, &val
, usertag
)) {
3372 tl_assert(key
== (UWord
)usertag
);
3375 SO
* so
= libhb_so_alloc();
3376 VG_(addToFM
)( map_usertag_to_SO
, usertag
, (UWord
)so
);
3381 static void map_usertag_to_SO_delete ( UWord usertag
) {
3383 map_usertag_to_SO_INIT();
3384 if (VG_(delFromFM
)( map_usertag_to_SO
, &keyW
, &valW
, usertag
)) {
3386 tl_assert(keyW
== usertag
);
3388 libhb_so_dealloc(so
);
3394 void evh__HG_USERSO_SEND_PRE ( ThreadId tid
, UWord usertag
)
3396 /* TID is just about to notionally sent a message on a notional
3397 abstract synchronisation object whose identity is given by
3398 USERTAG. Bind USERTAG to a real SO if it is not already so
3399 bound, and do a 'weak send' on the SO. This joins the vector
3400 clocks from this thread into any vector clocks already present
3401 in the SO. The resulting SO vector clocks are later used by
3402 other thread(s) which successfully 'receive' from the SO,
3403 thereby acquiring a dependency on all the events that have
3404 previously signalled on this SO. */
3408 if (SHOW_EVENTS
>= 1)
3409 VG_(printf
)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3410 (Int
)tid
, usertag
);
3412 thr
= map_threads_maybe_lookup( tid
);
3413 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3415 so
= map_usertag_to_SO_lookup_or_alloc( usertag
);
3418 libhb_so_send( thr
->hbthr
, so
, False
/*!strong_send*/ );
3422 void evh__HG_USERSO_RECV_POST ( ThreadId tid
, UWord usertag
)
3424 /* TID has just notionally received a message from a notional
3425 abstract synchronisation object whose identity is given by
3426 USERTAG. Bind USERTAG to a real SO if it is not already so
3427 bound. If the SO has at some point in the past been 'sent' on,
3428 to a 'strong receive' on it, thereby acquiring a dependency on
3433 if (SHOW_EVENTS
>= 1)
3434 VG_(printf
)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3435 (Int
)tid
, usertag
);
3437 thr
= map_threads_maybe_lookup( tid
);
3438 tl_assert(thr
); /* cannot fail - Thread* must already exist */
3440 so
= map_usertag_to_SO_lookup_or_alloc( usertag
);
3443 /* Acquire a dependency on it. If the SO has never so far been
3444 sent on, then libhb_so_recv will do nothing. So we're safe
3445 regardless of SO's history. */
3446 libhb_so_recv( thr
->hbthr
, so
, True
/*strong_recv*/ );
3450 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid
, UWord usertag
)
3452 /* TID declares that any happens-before edges notionally stored in
3453 USERTAG can be deleted. If (as would normally be the case) a
3454 SO is associated with USERTAG, then the association is removed
3455 and all resources associated with SO are freed. Importantly,
3456 that frees up any VTSs stored in SO. */
3457 if (SHOW_EVENTS
>= 1)
3458 VG_(printf
)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3459 (Int
)tid
, usertag
);
3461 map_usertag_to_SO_delete( usertag
);
3465 #if defined(VGO_solaris)
3466 /* ----------------------------------------------------- */
3467 /* --- events to do with bind guard/clear intercepts --- */
3468 /* ----------------------------------------------------- */
3471 void evh__HG_RTLD_BIND_GUARD(ThreadId tid
, Int flags
)
3473 if (SHOW_EVENTS
>= 1)
3474 VG_(printf
)("evh__HG_RTLD_BIND_GUARD"
3475 "(tid=%d, flags=%d)\n",
3478 Thread
*thr
= map_threads_maybe_lookup(tid
);
3479 tl_assert(thr
!= NULL
);
3481 Int bindflag
= (flags
& VKI_THR_FLG_RTLD
);
3482 if ((bindflag
& thr
->bind_guard_flag
) == 0) {
3483 thr
->bind_guard_flag
|= bindflag
;
3484 HG_(thread_enter_synchr
)(thr
);
3485 /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3486 HG_(thread_enter_pthread_create
)(thr
);
3491 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid
, Int flags
)
3493 if (SHOW_EVENTS
>= 1)
3494 VG_(printf
)("evh__HG_RTLD_BIND_CLEAR"
3495 "(tid=%d, flags=%d)\n",
3498 Thread
*thr
= map_threads_maybe_lookup(tid
);
3499 tl_assert(thr
!= NULL
);
3501 Int bindflag
= (flags
& VKI_THR_FLG_RTLD
);
3502 if ((thr
->bind_guard_flag
& bindflag
) != 0) {
3503 thr
->bind_guard_flag
&= ~bindflag
;
3504 HG_(thread_leave_synchr
)(thr
);
3505 HG_(thread_leave_pthread_create
)(thr
);
3508 #endif /* VGO_solaris */
3511 /*--------------------------------------------------------------*/
3512 /*--- Lock acquisition order monitoring ---*/
3513 /*--------------------------------------------------------------*/
3515 /* FIXME: here are some optimisations still to do in
3516 laog__pre_thread_acquires_lock.
3518 The graph is structured so that if L1 --*--> L2 then L1 must be
3521 The common case is that some thread T holds (eg) L1 L2 and L3 and
3522 is repeatedly acquiring and releasing Ln, and there is no ordering
3523 error in what it is doing. Hence it repeatedly:
3525 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3526 produces the answer No (because there is no error).
3528 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3529 (because they already got added the first time T acquired Ln).
3531 Hence cache these two events:
3533 (1) Cache result of the query from last time. Invalidate the cache
3534 any time any edges are added to or deleted from laog.
3536 (2) Cache these add-edge requests and ignore them if said edges
3537 have already been added to laog. Invalidate the cache any time
3538 any edges are deleted from laog.
3543 WordSetID inns
; /* in univ_laog */
3544 WordSetID outs
; /* in univ_laog */
3548 /* lock order acquisition graph */
3549 static WordFM
* laog
= NULL
; /* WordFM Lock* LAOGLinks* */
3551 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3552 where that edge was created, so that we can show the user later if
3556 Addr src_ga
; /* Lock guest addresses for */
3557 Addr dst_ga
; /* src/dst of the edge */
3558 ExeContext
* src_ec
; /* And corresponding places where that */
3559 ExeContext
* dst_ec
; /* ordering was established */
3563 static Word
cmp_LAOGLinkExposition ( UWord llx1W
, UWord llx2W
) {
3564 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3565 LAOGLinkExposition
* llx1
= (LAOGLinkExposition
*)llx1W
;
3566 LAOGLinkExposition
* llx2
= (LAOGLinkExposition
*)llx2W
;
3567 if (llx1
->src_ga
< llx2
->src_ga
) return -1;
3568 if (llx1
->src_ga
> llx2
->src_ga
) return 1;
3569 if (llx1
->dst_ga
< llx2
->dst_ga
) return -1;
3570 if (llx1
->dst_ga
> llx2
->dst_ga
) return 1;
3574 static WordFM
* laog_exposition
= NULL
; /* WordFM LAOGLinkExposition* NULL */
3575 /* end EXPOSITION ONLY */
3578 __attribute__((noinline
))
3579 static void laog__init ( void )
3582 tl_assert(!laog_exposition
);
3583 tl_assert(HG_(clo_track_lockorders
));
3585 laog
= VG_(newFM
)( HG_(zalloc
), "hg.laog__init.1",
3586 HG_(free
), NULL
/*unboxedcmp*/ );
3588 laog_exposition
= VG_(newFM
)( HG_(zalloc
), "hg.laog__init.2", HG_(free
),
3589 cmp_LAOGLinkExposition
);
3592 static void laog__show ( const HChar
* who
) {
3597 VG_(printf
)("laog (requested by %s) {\n", who
);
3598 VG_(initIterFM
)( laog
);
3601 while (VG_(nextIterFM
)( laog
, (UWord
*)&me
,
3605 VG_(printf
)(" node %p:\n", me
);
3606 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->inns
);
3607 for (i
= 0; i
< ws_size
; i
++)
3608 VG_(printf
)(" inn %#lx\n", ws_words
[i
] );
3609 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->outs
);
3610 for (i
= 0; i
< ws_size
; i
++)
3611 VG_(printf
)(" out %#lx\n", ws_words
[i
] );
3615 VG_(doneIterFM
)( laog
);
3619 static void univ_laog_do_GC ( void ) {
3623 Int prev_next_gc_univ_laog
= next_gc_univ_laog
;
3624 const UWord univ_laog_cardinality
= HG_(cardinalityWSU
)( univ_laog
);
3626 Bool
*univ_laog_seen
= HG_(zalloc
) ( "hg.gc_univ_laog.1",
3627 (Int
) univ_laog_cardinality
3629 // univ_laog_seen[*] set to 0 (False) by zalloc.
3631 VG_(initIterFM
)( laog
);
3633 while (VG_(nextIterFM
)( laog
, NULL
, (UWord
*)&links
)) {
3635 tl_assert(links
->inns
< univ_laog_cardinality
);
3636 univ_laog_seen
[links
->inns
] = True
;
3637 tl_assert(links
->outs
< univ_laog_cardinality
);
3638 univ_laog_seen
[links
->outs
] = True
;
3641 VG_(doneIterFM
)( laog
);
3643 for (i
= 0; i
< (Int
)univ_laog_cardinality
; i
++) {
3644 if (univ_laog_seen
[i
])
3647 HG_(dieWS
) ( univ_laog
, (WordSet
)i
);
3650 HG_(free
) (univ_laog_seen
);
3652 // We need to decide the value of the next_gc.
3653 // 3 solutions were looked at:
3654 // Sol 1: garbage collect at seen * 2
3655 // This solution was a lot slower, probably because we both do a lot of
3656 // garbage collection and do not keep long enough laog WV that will become
3657 // useful again very soon.
3658 // Sol 2: garbage collect at a percentage increase of the current cardinality
3659 // (with a min increase of 1)
3660 // Trials on a small test program with 1%, 5% and 10% increase was done.
3661 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3662 // However, on a big application, this caused the memory to be exhausted,
3663 // as even a 1% increase of size at each gc becomes a lot, when many gc
3665 // Sol 3: always garbage collect at current cardinality + 1.
3666 // This solution was the fastest of the 3 solutions, and caused no memory
3667 // exhaustion in the big application.
3669 // With regards to cost introduced by gc: on the t2t perf test (doing only
3670 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3671 // version with garbage collection. With t2t 50 20 2, my machine started
3672 // to page out, and so the garbage collected version was much faster.
3673 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3674 // difference performance is insignificant (~ 0.1 s).
3675 // Of course, it might be that real life programs are not well represented
3678 // If ever we want to have a more sophisticated control
3679 // (e.g. clo options to control the percentage increase or fixed increased),
3680 // we should do it here, eg.
3681 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3682 // Currently, we just hard-code the solution 3 above.
3683 next_gc_univ_laog
= prev_next_gc_univ_laog
+ 1;
3688 "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3689 (Int
)univ_laog_cardinality
, (Int
)seen
, next_gc_univ_laog
);
3693 __attribute__((noinline
))
3694 static void laog__add_edge ( Lock
* src
, Lock
* dst
) {
3697 Bool presentF
, presentR
;
3698 if (0) VG_(printf
)("laog__add_edge %p %p\n", src
, dst
);
3700 /* Take the opportunity to sanity check the graph. Record in
3701 presentF if there is already a src->dst mapping in this node's
3702 forwards links, and presentR if there is already a src->dst
3703 mapping in this node's backwards links. They should agree!
3704 Also, we need to know whether the edge was already present so as
3705 to decide whether or not to update the link details mapping. We
3706 can compute presentF and presentR essentially for free, so may
3707 as well do this always. */
3708 presentF
= presentR
= False
;
3710 /* Update the out edges for src */
3713 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)src
)) {
3716 tl_assert(keyW
== (UWord
)src
);
3717 outs_new
= HG_(addToWS
)( univ_laog
, links
->outs
, (UWord
)dst
);
3718 presentF
= outs_new
== links
->outs
;
3719 links
->outs
= outs_new
;
3721 links
= HG_(zalloc
)("hg.lae.1", sizeof(LAOGLinks
));
3722 links
->inns
= HG_(emptyWS
)( univ_laog
);
3723 links
->outs
= HG_(singletonWS
)( univ_laog
, (UWord
)dst
);
3724 VG_(addToFM
)( laog
, (UWord
)src
, (UWord
)links
);
3726 /* Update the in edges for dst */
3729 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)dst
)) {
3732 tl_assert(keyW
== (UWord
)dst
);
3733 inns_new
= HG_(addToWS
)( univ_laog
, links
->inns
, (UWord
)src
);
3734 presentR
= inns_new
== links
->inns
;
3735 links
->inns
= inns_new
;
3737 links
= HG_(zalloc
)("hg.lae.2", sizeof(LAOGLinks
));
3738 links
->inns
= HG_(singletonWS
)( univ_laog
, (UWord
)src
);
3739 links
->outs
= HG_(emptyWS
)( univ_laog
);
3740 VG_(addToFM
)( laog
, (UWord
)dst
, (UWord
)links
);
3743 tl_assert( (presentF
&& presentR
) || (!presentF
&& !presentR
) );
3745 if (!presentF
&& src
->acquired_at
&& dst
->acquired_at
) {
3746 LAOGLinkExposition expo
;
3747 /* If this edge is entering the graph, and we have acquired_at
3748 information for both src and dst, record those acquisition
3749 points. Hence, if there is later a violation of this
3750 ordering, we can show the user the two places in which the
3751 required src-dst ordering was previously established. */
3752 if (0) VG_(printf
)("acquire edge %#lx %#lx\n",
3753 src
->guestaddr
, dst
->guestaddr
);
3754 expo
.src_ga
= src
->guestaddr
;
3755 expo
.dst_ga
= dst
->guestaddr
;
3758 tl_assert(laog_exposition
);
3759 if (VG_(lookupFM
)( laog_exposition
, NULL
, NULL
, (UWord
)&expo
)) {
3760 /* we already have it; do nothing */
3762 LAOGLinkExposition
* expo2
= HG_(zalloc
)("hg.lae.3",
3763 sizeof(LAOGLinkExposition
));
3764 expo2
->src_ga
= src
->guestaddr
;
3765 expo2
->dst_ga
= dst
->guestaddr
;
3766 expo2
->src_ec
= src
->acquired_at
;
3767 expo2
->dst_ec
= dst
->acquired_at
;
3768 VG_(addToFM
)( laog_exposition
, (UWord
)expo2
, (UWord
)NULL
);
3772 if (HG_(cardinalityWSU
) (univ_laog
) >= next_gc_univ_laog
)
3776 __attribute__((noinline
))
3777 static void laog__del_edge ( Lock
* src
, Lock
* dst
) {
3780 if (0) VG_(printf
)("laog__del_edge enter %p %p\n", src
, dst
);
3781 /* Update the out edges for src */
3784 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)src
)) {
3786 tl_assert(keyW
== (UWord
)src
);
3787 links
->outs
= HG_(delFromWS
)( univ_laog
, links
->outs
, (UWord
)dst
);
3789 /* Update the in edges for dst */
3792 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)dst
)) {
3794 tl_assert(keyW
== (UWord
)dst
);
3795 links
->inns
= HG_(delFromWS
)( univ_laog
, links
->inns
, (UWord
)src
);
3798 /* Remove the exposition of src,dst (if present) */
3800 LAOGLinkExposition
*fm_expo
;
3802 LAOGLinkExposition expo
;
3803 expo
.src_ga
= src
->guestaddr
;
3804 expo
.dst_ga
= dst
->guestaddr
;
3808 if (VG_(delFromFM
) (laog_exposition
,
3809 (UWord
*)&fm_expo
, NULL
, (UWord
)&expo
)) {
3810 HG_(free
) (fm_expo
);
3814 /* deleting edges can increase nr of of WS so check for gc. */
3815 if (HG_(cardinalityWSU
) (univ_laog
) >= next_gc_univ_laog
)
3817 if (0) VG_(printf
)("laog__del_edge exit\n");
3820 __attribute__((noinline
))
3821 static WordSetID
/* in univ_laog */ laog__succs ( Lock
* lk
) {
3826 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)lk
)) {
3828 tl_assert(keyW
== (UWord
)lk
);
3831 return HG_(emptyWS
)( univ_laog
);
3835 __attribute__((noinline
))
3836 static WordSetID
/* in univ_laog */ laog__preds ( Lock
* lk
) {
3841 if (VG_(lookupFM
)( laog
, &keyW
, (UWord
*)&links
, (UWord
)lk
)) {
3843 tl_assert(keyW
== (UWord
)lk
);
3846 return HG_(emptyWS
)( univ_laog
);
3850 __attribute__((noinline
))
3851 static void laog__sanity_check ( const HChar
* who
) {
3856 VG_(initIterFM
)( laog
);
3859 if (0) VG_(printf
)("laog sanity check\n");
3860 while (VG_(nextIterFM
)( laog
, (UWord
*)&me
,
3864 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->inns
);
3865 for (i
= 0; i
< ws_size
; i
++) {
3866 if ( ! HG_(elemWS
)( univ_laog
,
3867 laog__succs( (Lock
*)ws_words
[i
] ),
3871 HG_(getPayloadWS
)( &ws_words
, &ws_size
, univ_laog
, links
->outs
);
3872 for (i
= 0; i
< ws_size
; i
++) {
3873 if ( ! HG_(elemWS
)( univ_laog
,
3874 laog__preds( (Lock
*)ws_words
[i
] ),
3881 VG_(doneIterFM
)( laog
);
3885 VG_(printf
)("laog__sanity_check(%s) FAILED\n", who
);
3890 /* If there is a path in laog from 'src' to any of the elements in
3891 'dst', return an arbitrarily chosen element of 'dst' reachable from
3892 'src'. If no path exist from 'src' to any element in 'dst', return
3894 __attribute__((noinline
))
3896 Lock
* laog__do_dfs_from_to ( Lock
* src
, WordSetID dsts
/* univ_lsets */ )
3900 XArray
* stack
; /* of Lock* */
3901 WordFM
* visited
; /* Lock* -> void, iow, Set(Lock*) */
3904 UWord succs_size
, i
;
3906 //laog__sanity_check();
3908 /* If the destination set is empty, we can never get there from
3909 'src' :-), so don't bother to try */
3910 if (HG_(isEmptyWS
)( univ_lsets
, dsts
))
3914 stack
= VG_(newXA
)( HG_(zalloc
), "hg.lddft.1", HG_(free
), sizeof(Lock
*) );
3915 visited
= VG_(newFM
)( HG_(zalloc
), "hg.lddft.2", HG_(free
), NULL
/*unboxedcmp*/ );
3917 (void) VG_(addToXA
)( stack
, &src
);
3921 ssz
= VG_(sizeXA
)( stack
);
3923 if (ssz
== 0) { ret
= NULL
; break; }
3925 here
= *(Lock
**) VG_(indexXA
)( stack
, ssz
-1 );
3926 VG_(dropTailXA
)( stack
, 1 );
3928 if (HG_(elemWS
)( univ_lsets
, dsts
, (UWord
)here
)) { ret
= here
; break; }
3930 if (VG_(lookupFM
)( visited
, NULL
, NULL
, (UWord
)here
))
3933 VG_(addToFM
)( visited
, (UWord
)here
, 0 );
3935 succs
= laog__succs( here
);
3936 HG_(getPayloadWS
)( &succs_words
, &succs_size
, univ_laog
, succs
);
3937 for (i
= 0; i
< succs_size
; i
++)
3938 (void) VG_(addToXA
)( stack
, &succs_words
[i
] );
3941 VG_(deleteFM
)( visited
, NULL
, NULL
);
3942 VG_(deleteXA
)( stack
);
3947 /* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3948 between 'lk' and the locks already held by 'thr' and issue a
3949 complaint if so. Also, update the ordering graph appropriately.
3951 __attribute__((noinline
))
3952 static void laog__pre_thread_acquires_lock (
3953 Thread
* thr
, /* NB: BEFORE lock is added */
3961 /* It may be that 'thr' already holds 'lk' and is recursively
3962 relocking in. In this case we just ignore the call. */
3963 /* NB: univ_lsets really is correct here */
3964 if (HG_(elemWS
)( univ_lsets
, thr
->locksetA
, (UWord
)lk
))
3967 /* First, the check. Complain if there is any path in laog from lk
3968 to any of the locks already held by thr, since if any such path
3969 existed, it would mean that previously lk was acquired before
3970 (rather than after, as we are doing here) at least one of those
3973 other
= laog__do_dfs_from_to(lk
, thr
->locksetA
);
3975 LAOGLinkExposition key
, *found
;
3976 /* So we managed to find a path lk --*--> other in the graph,
3977 which implies that 'lk' should have been acquired before
3978 'other' but is in fact being acquired afterwards. We present
3979 the lk/other arguments to record_error_LockOrder in the order
3980 in which they should have been acquired. */
3981 /* Go look in the laog_exposition mapping, to find the allocation
3982 points for this edge, so we can show the user. */
3983 key
.src_ga
= lk
->guestaddr
;
3984 key
.dst_ga
= other
->guestaddr
;
3988 if (VG_(lookupFM
)( laog_exposition
,
3989 (UWord
*)&found
, NULL
, (UWord
)&key
)) {
3990 tl_assert(found
!= &key
);
3991 tl_assert(found
->src_ga
== key
.src_ga
);
3992 tl_assert(found
->dst_ga
== key
.dst_ga
);
3993 tl_assert(found
->src_ec
);
3994 tl_assert(found
->dst_ec
);
3995 HG_(record_error_LockOrder
)(
3997 found
->src_ec
, found
->dst_ec
, other
->acquired_at
);
3999 /* Hmm. This can't happen (can it?) */
4000 /* Yes, it can happen: see tests/tc14_laog_dinphils.
4001 Imagine we have 3 philosophers A B C, and the forks
4010 Let's have the following actions:
4018 Helgrind will report a lock order error when C takes fCA.
4019 Effectively, we have a deadlock if the following
4025 The error reported is:
4026 Observed (incorrect) order fBC followed by fCA
4027 but the stack traces that have established the required order
4030 This is because there is no pair (fCA, fBC) in laog exposition :
4031 the laog_exposition records all pairs of locks between a new lock
4032 taken by a thread and all the already taken locks.
4033 So, there is no laog_exposition (fCA, fBC) as no thread ever
4034 first locked fCA followed by fBC.
4036 In other words, when the deadlock cycle involves more than
4037 two locks, then helgrind does not report the sequence of
4038 operations that created the cycle.
4040 However, we can report the current stack trace (where
4041 lk is being taken), and the stack trace where other was acquired:
4042 Effectively, the variable 'other' contains a lock currently
4043 held by this thread, with its 'acquired_at'. */
4045 HG_(record_error_LockOrder
)(
4047 NULL
, NULL
, other
->acquired_at
);
4051 /* Second, add to laog the pairs
4052 (old, lk) | old <- locks already held by thr
4053 Since both old and lk are currently held by thr, their acquired_at
4054 fields must be non-NULL.
4056 tl_assert(lk
->acquired_at
);
4057 HG_(getPayloadWS
)( &ls_words
, &ls_size
, univ_lsets
, thr
->locksetA
);
4058 for (i
= 0; i
< ls_size
; i
++) {
4059 Lock
* old
= (Lock
*)ls_words
[i
];
4060 tl_assert(old
->acquired_at
);
4061 laog__add_edge( old
, lk
);
4064 /* Why "except_Locks" ? We're here because a lock is being
4065 acquired by a thread, and we're in an inconsistent state here.
4066 See the call points in evhH__post_thread_{r,w}_acquires_lock.
4067 When called in this inconsistent state, locks__sanity_check duly
4069 if (HG_(clo_sanity_flags
) & SCE_LAOG
)
4070 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4073 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
4074 static UWord
* UWordV_dup(UWord
* words
, Word words_size
)
4078 if (words_size
== 0)
4081 UWord
*dup
= HG_(zalloc
) ("hg.dup.1", (SizeT
) words_size
* sizeof(UWord
));
4083 for (i
= 0; i
< words_size
; i
++)
4089 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4091 __attribute__((noinline
))
4092 static void laog__handle_one_lock_deletion ( Lock
* lk
)
4094 WordSetID preds
, succs
;
4095 UWord preds_size
, succs_size
, i
, j
;
4096 UWord
*preds_words
, *succs_words
;
4098 preds
= laog__preds( lk
);
4099 succs
= laog__succs( lk
);
4101 // We need to duplicate the payload, as these can be garbage collected
4102 // during the del/add operations below.
4103 HG_(getPayloadWS
)( &preds_words
, &preds_size
, univ_laog
, preds
);
4104 preds_words
= UWordV_dup(preds_words
, preds_size
);
4106 HG_(getPayloadWS
)( &succs_words
, &succs_size
, univ_laog
, succs
);
4107 succs_words
= UWordV_dup(succs_words
, succs_size
);
4109 for (i
= 0; i
< preds_size
; i
++)
4110 laog__del_edge( (Lock
*)preds_words
[i
], lk
);
4112 for (j
= 0; j
< succs_size
; j
++)
4113 laog__del_edge( lk
, (Lock
*)succs_words
[j
] );
4115 for (i
= 0; i
< preds_size
; i
++) {
4116 for (j
= 0; j
< succs_size
; j
++) {
4117 if (preds_words
[i
] != succs_words
[j
]) {
4118 /* This can pass unlocked locks to laog__add_edge, since
4119 we're deleting stuff. So their acquired_at fields may
4121 laog__add_edge( (Lock
*)preds_words
[i
], (Lock
*)succs_words
[j
] );
4127 HG_(free
) (preds_words
);
4129 HG_(free
) (succs_words
);
4131 // Remove lk information from laog links FM
4136 if (VG_(delFromFM
) (laog
,
4137 (UWord
*)&linked_lk
, (UWord
*)&links
, (UWord
)lk
)) {
4138 tl_assert (linked_lk
== lk
);
4142 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4145 //__attribute__((noinline))
4146 //static void laog__handle_lock_deletions (
4147 // WordSetID /* in univ_laog */ locksToDelete
4154 // HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4155 // UWordV_dup call needed here ...
4156 // for (i = 0; i < ws_size; i++)
4157 // laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4159 // if (HG_(clo_sanity_flags) & SCE_LAOG)
4160 // all__sanity_check("laog__handle_lock_deletions-post");
4164 /*--------------------------------------------------------------*/
4165 /*--- Malloc/free replacements ---*/
4166 /*--------------------------------------------------------------*/
4170 void* next
; /* required by m_hashtable */
4171 Addr payload
; /* ptr to actual block */
4172 SizeT szB
; /* size requested */
4173 ExeContext
* where
; /* where it was allocated */
4174 Thread
* thr
; /* allocating thread */
4178 /* A hash table of MallocMetas, used to track malloc'd blocks
4180 static VgHashTable
*hg_mallocmeta_table
= NULL
;
4182 /* MallocMeta are small elements. We use a pool to avoid
4183 the overhead of malloc for each MallocMeta. */
4184 static PoolAlloc
*MallocMeta_poolalloc
= NULL
;
4186 static MallocMeta
* new_MallocMeta ( void ) {
4187 MallocMeta
* md
= VG_(allocEltPA
) (MallocMeta_poolalloc
);
4188 VG_(memset
)(md
, 0, sizeof(MallocMeta
));
4191 static void delete_MallocMeta ( MallocMeta
* md
) {
4192 VG_(freeEltPA
)(MallocMeta_poolalloc
, md
);
4196 /* Allocate a client block and set up the metadata for it. */
4199 void* handle_alloc ( ThreadId tid
,
4200 SizeT szB
, SizeT alignB
, Bool is_zeroed
)
4205 tl_assert( ((SSizeT
)szB
) >= 0 );
4206 p
= (Addr
)VG_(cli_malloc
)(alignB
, szB
);
4211 VG_(memset
)((void*)p
, 0, szB
);
4213 /* Note that map_threads_lookup must succeed (cannot assert), since
4214 memory can only be allocated by currently alive threads, hence
4215 they must have an entry in map_threads. */
4216 md
= new_MallocMeta();
4219 md
->where
= VG_(record_ExeContext
)( tid
, 0 );
4220 md
->thr
= map_threads_lookup( tid
);
4222 VG_(HT_add_node
)( hg_mallocmeta_table
, (VgHashNode
*)md
);
4223 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
))
4224 VG_(XTMemory_Full_alloc
)(md
->szB
, md
->where
);
4226 /* Tell the lower level memory wranglers. */
4227 evh__new_mem_heap( p
, szB
, is_zeroed
);
4232 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4233 Cast to a signed type to catch any unexpectedly negative args.
4234 We're assuming here that the size asked for is not greater than
4235 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4237 static void* hg_cli__malloc ( ThreadId tid
, SizeT n
) {
4238 if (((SSizeT
)n
) < 0) return NULL
;
4239 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4240 /*is_zeroed*/False
);
4242 static void* hg_cli____builtin_new ( ThreadId tid
, SizeT n
) {
4243 if (((SSizeT
)n
) < 0) return NULL
;
4244 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4245 /*is_zeroed*/False
);
4247 static void* hg_cli____builtin_new_aligned ( ThreadId tid
, SizeT n
, SizeT align
, SizeT orig_align
) {
4248 if (((SSizeT
)n
) < 0) return NULL
;
4249 return handle_alloc ( tid
, n
, align
,
4250 /*is_zeroed*/False
);
4252 static void* hg_cli____builtin_vec_new ( ThreadId tid
, SizeT n
) {
4253 if (((SSizeT
)n
) < 0) return NULL
;
4254 return handle_alloc ( tid
, n
, VG_(clo_alignment
),
4255 /*is_zeroed*/False
);
4257 static void* hg_cli____builtin_vec_new_aligned ( ThreadId tid
, SizeT n
, SizeT align
, SizeT orig_align
) {
4258 if (((SSizeT
)n
) < 0) return NULL
;
4259 return handle_alloc ( tid
, n
, align
,
4260 /*is_zeroed*/False
);
4262 static void* hg_cli__memalign ( ThreadId tid
, SizeT align
, SizeT orig_alignT
, SizeT n
) {
4263 if (((SSizeT
)n
) < 0) return NULL
;
4264 return handle_alloc ( tid
, n
, align
,
4265 /*is_zeroed*/False
);
4267 static void* hg_cli__calloc ( ThreadId tid
, SizeT nmemb
, SizeT size1
) {
4268 if ( ((SSizeT
)nmemb
) < 0 || ((SSizeT
)size1
) < 0 ) return NULL
;
4269 return handle_alloc ( tid
, nmemb
*size1
, VG_(clo_alignment
),
4270 /*is_zeroed*/True
);
4274 /* Free a client block, including getting rid of the relevant
4277 static void handle_free ( ThreadId tid
, void* p
)
4279 MallocMeta
*md
, *old_md
;
4282 /* First see if we can find the metadata for 'p'. */
4283 md
= (MallocMeta
*) VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)p
);
4285 return; /* apparently freeing a bogus address. Oh well. */
4287 tl_assert(md
->payload
== (Addr
)p
);
4289 if (UNLIKELY(VG_(clo_xtree_memory
) == Vg_XTMemory_Full
)) {
4290 ExeContext
* ec_free
= VG_(record_ExeContext
)( tid
, 0 );
4291 VG_(XTMemory_Full_free
)(md
->szB
, md
->where
, ec_free
);
4294 /* Nuke the metadata block */
4295 old_md
= (MallocMeta
*)
4296 VG_(HT_remove
)( hg_mallocmeta_table
, (UWord
)p
);
4297 tl_assert(old_md
); /* it must be present - we just found it */
4298 tl_assert(old_md
== md
);
4299 tl_assert(old_md
->payload
== (Addr
)p
);
4301 VG_(cli_free
)((void*)old_md
->payload
);
4302 delete_MallocMeta(old_md
);
4304 /* Tell the lower level memory wranglers. */
4305 evh__die_mem_heap( (Addr
)p
, szB
);
4308 static void hg_cli__free ( ThreadId tid
, void* p
) {
4309 handle_free(tid
, p
);
4311 static void hg_cli____builtin_delete ( ThreadId tid
, void* p
) {
4312 handle_free(tid
, p
);
4314 static void hg_cli____builtin_delete_aligned ( ThreadId tid
, void* p
, SizeT align
) {
4315 handle_free(tid
, p
);
4317 static void hg_cli____builtin_vec_delete ( ThreadId tid
, void* p
) {
4318 handle_free(tid
, p
);
4320 static void hg_cli____builtin_vec_delete_aligned ( ThreadId tid
, void* p
, SizeT align
) {
4321 handle_free(tid
, p
);
4324 static void* hg_cli__realloc ( ThreadId tid
, void* payloadV
, SizeT new_size
)
4326 MallocMeta
*md
, *md_new
, *md_tmp
;
4329 Addr payload
= (Addr
)payloadV
;
4331 if (((SSizeT
)new_size
) < 0) return NULL
;
4333 if (payloadV
== NULL
) {
4334 return handle_alloc ( tid
, new_size
, VG_(clo_alignment
),
4335 /*is_zeroed*/False
);
4338 md
= (MallocMeta
*) VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)payload
);
4340 return NULL
; /* apparently realloc-ing a bogus address. Oh well. */
4342 tl_assert(md
->payload
== payload
);
4344 if (new_size
== 0U ) {
4345 if (VG_(clo_realloc_zero_bytes_frees
) == True
) {
4346 md_tmp
= VG_(HT_remove
)( hg_mallocmeta_table
, payload
);
4348 tl_assert(md_tmp
== md
);
4350 VG_(cli_free
)((void*)md
->payload
);
4351 delete_MallocMeta(md
);
4358 if (md
->szB
== new_size
) {
4359 /* size unchanged */
4360 md
->where
= VG_(record_ExeContext
)(tid
, 0);
4364 if (md
->szB
> new_size
) {
4365 /* new size is smaller */
4367 md
->where
= VG_(record_ExeContext
)(tid
, 0);
4368 evh__die_mem_heap( md
->payload
+ new_size
, md
->szB
- new_size
);
4373 /* new size is bigger */
4374 Addr p_new
= (Addr
)VG_(cli_malloc
)(VG_(clo_alignment
), new_size
);
4376 // Nb: if realloc fails, NULL is returned but the old block is not
4377 // touched. What an awful function.
4381 /* First half kept and copied, second half new */
4382 // FIXME: shouldn't we use a copier which implements the
4383 // memory state machine?
4384 evh__copy_mem( payload
, p_new
, md
->szB
);
4385 evh__new_mem_heap ( p_new
+ md
->szB
, new_size
- md
->szB
,
4387 /* FIXME: can anything funny happen here? specifically, if the
4388 old range contained a lock, then die_mem_heap will complain.
4389 Is that the correct behaviour? Not sure. */
4390 evh__die_mem_heap( payload
, md
->szB
);
4392 /* Copy from old to new */
4393 for (i
= 0; i
< md
->szB
; i
++)
4394 ((UChar
*)p_new
)[i
] = ((UChar
*)payload
)[i
];
4396 /* Because the metadata hash table is index by payload address,
4397 we have to get rid of the old hash table entry and make a new
4398 one. We can't just modify the existing metadata in place,
4399 because then it would (almost certainly) be in the wrong hash
4401 md_new
= new_MallocMeta();
4404 md_tmp
= VG_(HT_remove
)( hg_mallocmeta_table
, payload
);
4406 tl_assert(md_tmp
== md
);
4408 VG_(cli_free
)((void*)md
->payload
);
4409 delete_MallocMeta(md
);
4412 md_new
->where
= VG_(record_ExeContext
)( tid
, 0 );
4413 md_new
->szB
= new_size
;
4414 md_new
->payload
= p_new
;
4415 md_new
->thr
= map_threads_lookup( tid
);
4418 VG_(HT_add_node
)( hg_mallocmeta_table
, (VgHashNode
*)md_new
);
4420 return (void*)p_new
;
4424 static SizeT
hg_cli_malloc_usable_size ( ThreadId tid
, void* p
)
4426 MallocMeta
*md
= VG_(HT_lookup
)( hg_mallocmeta_table
, (UWord
)p
);
4428 // There may be slop, but pretend there isn't because only the asked-for
4429 // area will have been shadowed properly.
4430 return ( md
? md
->szB
: 0 );
4434 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4435 Slow linear search. With a bit of hash table help if 'data_addr'
4436 is either the start of a block or up to 15 word-sized steps along
4437 from the start of a block. */
4439 static inline Bool
addr_is_in_MM_Chunk( MallocMeta
* mm
, Addr a
)
4441 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4443 if (UNLIKELY(mm
->szB
== 0 && a
== mm
->payload
))
4445 /* else normal interval rules apply */
4446 if (LIKELY(a
< mm
->payload
)) return False
;
4447 if (LIKELY(a
>= mm
->payload
+ mm
->szB
)) return False
;
4451 Bool
HG_(mm_find_containing_block
)( /*OUT*/ExeContext
** where
,
4453 /*OUT*/Addr
* payload
,
4459 const Int n_fast_check_words
= 16;
4461 /* Before searching the list of allocated blocks in hg_mallocmeta_table,
4462 first verify that data_addr is in a heap client segment. */
4463 const NSegment
*s
= VG_(am_find_nsegment
) (data_addr
);
4464 if (s
== NULL
|| !s
->isCH
)
4467 /* First, do a few fast searches on the basis that data_addr might
4468 be exactly the start of a block or up to 15 words inside. This
4469 can happen commonly via the creq
4470 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4471 for (i
= 0; i
< n_fast_check_words
; i
++) {
4472 mm
= VG_(HT_lookup
)( hg_mallocmeta_table
,
4473 data_addr
- (UWord
)(UInt
)i
* sizeof(UWord
) );
4474 if (UNLIKELY(mm
&& addr_is_in_MM_Chunk(mm
, data_addr
)))
4478 /* Well, this totally sucks. But without using an interval tree or
4479 some such, it's hard to see how to do better. We have to check
4480 every block in the entire table. */
4481 VG_(HT_ResetIter
)(hg_mallocmeta_table
);
4482 while ( (mm
= VG_(HT_Next
)(hg_mallocmeta_table
)) ) {
4483 if (UNLIKELY(addr_is_in_MM_Chunk(mm
, data_addr
)))
4487 /* Not found. Bah. */
4493 tl_assert(addr_is_in_MM_Chunk(mm
, data_addr
));
4494 if (where
) *where
= mm
->where
;
4495 if (tnr
) *tnr
= mm
->thr
->errmsg_index
;
4496 if (payload
) *payload
= mm
->payload
;
4497 if (szB
) *szB
= mm
->szB
;
4502 /*--------------------------------------------------------------*/
4503 /*--- Instrumentation ---*/
4504 /*--------------------------------------------------------------*/
4506 #define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
4507 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4508 #define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4509 #define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4510 #define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4511 #define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4513 /* This takes and returns atoms, of course. Not full IRExprs. */
4514 static IRExpr
* mk_And1 ( IRSB
* sbOut
, IRExpr
* arg1
, IRExpr
* arg2
)
4516 tl_assert(arg1
&& arg2
);
4517 tl_assert(isIRAtom(arg1
));
4518 tl_assert(isIRAtom(arg2
));
4519 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4521 IRTemp wide1
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4522 IRTemp wide2
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4523 IRTemp anded
= newIRTemp(sbOut
->tyenv
, Ity_I32
);
4524 IRTemp res
= newIRTemp(sbOut
->tyenv
, Ity_I1
);
4525 addStmtToIRSB(sbOut
, assign(wide1
, unop(Iop_1Uto32
, arg1
)));
4526 addStmtToIRSB(sbOut
, assign(wide2
, unop(Iop_1Uto32
, arg2
)));
4527 addStmtToIRSB(sbOut
, assign(anded
, binop(Iop_And32
, mkexpr(wide1
),
4529 addStmtToIRSB(sbOut
, assign(res
, unop(Iop_32to1
, mkexpr(anded
))));
4533 static void instrument_mem_access ( IRSB
* sbOut
,
4537 Bool fixupSP_needed
,
4541 /* goff_sp_s1 is the offset in guest
4542 state where the cachedstack validity
4544 IRExpr
* guard
) /* NULL => True */
4546 IRType tyAddr
= Ity_INVALID
;
4547 const HChar
* hName
= NULL
;
4550 IRExpr
** argv
= NULL
;
4553 // THRESH is the size of the window above SP (well,
4554 // mostly above) that we assume implies a stack reference.
4555 const Int THRESH
= 4096 * 4; // somewhat arbitrary
4556 const Int rz_szB
= VG_STACK_REDZONE_SZB
;
4558 tl_assert(isIRAtom(addr
));
4559 tl_assert(hWordTy_szB
== 4 || hWordTy_szB
== 8);
4561 tyAddr
= typeOfIRExpr( sbOut
->tyenv
, addr
);
4562 tl_assert(tyAddr
== Ity_I32
|| tyAddr
== Ity_I64
);
4564 /* So the effective address is in 'addr' now. */
4565 regparms
= 1; // unless stated otherwise
4569 hName
= "evh__mem_help_cwrite_1";
4570 hAddr
= &evh__mem_help_cwrite_1
;
4571 argv
= mkIRExprVec_1( addr
);
4574 hName
= "evh__mem_help_cwrite_2";
4575 hAddr
= &evh__mem_help_cwrite_2
;
4576 argv
= mkIRExprVec_1( addr
);
4579 if (fixupSP_needed
) {
4580 /* Unwind has to be done with a SP fixed up with one word.
4581 See Ist_Put heuristic in hg_instrument. */
4582 hName
= "evh__mem_help_cwrite_4_fixupSP";
4583 hAddr
= &evh__mem_help_cwrite_4_fixupSP
;
4585 hName
= "evh__mem_help_cwrite_4";
4586 hAddr
= &evh__mem_help_cwrite_4
;
4588 argv
= mkIRExprVec_1( addr
);
4591 if (fixupSP_needed
) {
4592 /* Unwind has to be done with a SP fixed up with one word.
4593 See Ist_Put heuristic in hg_instrument. */
4594 hName
= "evh__mem_help_cwrite_8_fixupSP";
4595 hAddr
= &evh__mem_help_cwrite_8_fixupSP
;
4597 hName
= "evh__mem_help_cwrite_8";
4598 hAddr
= &evh__mem_help_cwrite_8
;
4600 argv
= mkIRExprVec_1( addr
);
4603 tl_assert(szB
> 8 && szB
<= 512); /* stay sane */
4605 hName
= "evh__mem_help_cwrite_N";
4606 hAddr
= &evh__mem_help_cwrite_N
;
4607 argv
= mkIRExprVec_2( addr
, mkIRExpr_HWord( szB
));
4613 hName
= "evh__mem_help_cread_1";
4614 hAddr
= &evh__mem_help_cread_1
;
4615 argv
= mkIRExprVec_1( addr
);
4618 hName
= "evh__mem_help_cread_2";
4619 hAddr
= &evh__mem_help_cread_2
;
4620 argv
= mkIRExprVec_1( addr
);
4623 hName
= "evh__mem_help_cread_4";
4624 hAddr
= &evh__mem_help_cread_4
;
4625 argv
= mkIRExprVec_1( addr
);
4628 hName
= "evh__mem_help_cread_8";
4629 hAddr
= &evh__mem_help_cread_8
;
4630 argv
= mkIRExprVec_1( addr
);
4633 tl_assert(szB
> 8 && szB
<= 512); /* stay sane */
4635 hName
= "evh__mem_help_cread_N";
4636 hAddr
= &evh__mem_help_cread_N
;
4637 argv
= mkIRExprVec_2( addr
, mkIRExpr_HWord( szB
));
4642 /* Create the helper. */
4646 di
= unsafeIRDirty_0_N( regparms
,
4647 hName
, VG_(fnptr_to_fnentry
)( hAddr
),
4650 if (HG_(clo_delta_stacktrace
)) {
4651 /* memory access helper might read the shadow1 SP offset, that
4652 indicates if the cached stacktrace is valid. */
4653 di
->fxState
[0].fx
= Ifx_Read
;
4654 di
->fxState
[0].offset
= goff_sp_s1
;
4655 di
->fxState
[0].size
= hWordTy_szB
;
4656 di
->fxState
[0].nRepeats
= 0;
4657 di
->fxState
[0].repeatLen
= 0;
4661 if (! HG_(clo_check_stack_refs
)) {
4662 /* We're ignoring memory references which are (obviously) to the
4663 stack. In fact just skip stack refs that are within 4 pages
4664 of SP (SP - the redzone, really), as that's simple, easy, and
4665 filters out most stack references. */
4666 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4667 some arbitrary N. If that is true then addr is outside the
4668 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4669 pages) then we can say addr is within a few pages of SP and
4670 so can't possibly be a heap access, and so can be skipped.
4672 Note that the condition simplifies to
4673 (addr - SP + RZ) >u N
4674 which generates better code in x86/amd64 backends, but it does
4675 not unfortunately simplify to
4676 (addr - SP) >u (N - RZ)
4677 (would be beneficial because N - RZ is a constant) because
4678 wraparound arithmetic messes up the comparison. eg.
4680 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4682 IRTemp sp
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4683 addStmtToIRSB( sbOut
, assign(sp
, IRExpr_Get(goff_sp
, tyAddr
)));
4686 IRTemp addr_minus_sp
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4689 assign(addr_minus_sp
,
4691 ? binop(Iop_Sub32
, addr
, mkexpr(sp
))
4692 : binop(Iop_Sub64
, addr
, mkexpr(sp
)))
4695 /* "addr - SP + RZ" */
4696 IRTemp diff
= newIRTemp(sbOut
->tyenv
, tyAddr
);
4701 ? binop(Iop_Add32
, mkexpr(addr_minus_sp
), mkU32(rz_szB
))
4702 : binop(Iop_Add64
, mkexpr(addr_minus_sp
), mkU64(rz_szB
)))
4705 /* guardA == "guard on the address" */
4706 IRTemp guardA
= newIRTemp(sbOut
->tyenv
, Ity_I1
);
4711 ? binop(Iop_CmpLT32U
, mkU32(THRESH
), mkexpr(diff
))
4712 : binop(Iop_CmpLT64U
, mkU64(THRESH
), mkexpr(diff
)))
4714 di
->guard
= mkexpr(guardA
);
4717 /* If there's a guard on the access itself (as supplied by the
4718 caller of this routine), we need to AND that in to any guard we
4719 might already have. */
4721 di
->guard
= mk_And1(sbOut
, di
->guard
, guard
);
4724 /* Add the helper. */
4725 addStmtToIRSB( sbOut
, IRStmt_Dirty(di
) );
4729 /* Figure out if GA is a guest code address in the dynamic linker, and
4730 if so return True. Otherwise (and in case of any doubt) return
4731 False. (sidedly safe w/ False as the safe value) */
4732 static Bool
is_in_dynamic_linker_shared_object( Addr ga
)
4735 const HChar
* soname
;
4737 dinfo
= VG_(find_DebugInfo
)( VG_(current_DiEpoch
)(), ga
);
4738 if (!dinfo
) return False
;
4740 soname
= VG_(DebugInfo_get_soname
)(dinfo
);
4742 if (0) VG_(printf
)("%s\n", soname
);
4744 return VG_(is_soname_ld_so
)(soname
);
4748 void addInvalidateCachedStack (IRSB
* bbOut
,
4752 /* Invalidate cached stack: Write 0 in the shadow1 offset 0 */
4753 addStmtToIRSB( bbOut
,
4754 IRStmt_Put(goff_sp_s1
,
4756 mkU32(0) : mkU64(0)));
4757 /// ???? anything more efficient than assign a Word???
4761 IRSB
* hg_instrument ( VgCallbackClosure
* closure
,
4763 const VexGuestLayout
* layout
,
4764 const VexGuestExtents
* vge
,
4765 const VexArchInfo
* archinfo_host
,
4766 IRType gWordTy
, IRType hWordTy
)
4770 Addr cia
; /* address of current insn */
4772 Bool inLDSO
= False
;
4773 Addr inLDSOmask4K
= 1; /* mismatches on first check */
4775 // Set to True when SP must be fixed up when taking a stack trace for the
4776 // mem accesses in the rest of the instruction
4777 Bool fixupSP_needed
= False
;
4779 const Int goff_SP
= layout
->offset_SP
;
4780 /* SP in shadow1 indicates if cached stack is valid.
4781 We have to invalidate the cached stack e.g. when seeing call or ret. */
4782 const Int goff_SP_s1
= layout
->total_sizeB
+ layout
->offset_SP
;
4783 const Int hWordTy_szB
= sizeofIRType(hWordTy
);
4785 if (gWordTy
!= hWordTy
) {
4786 /* We don't currently support this case. */
4787 VG_(tool_panic
)("host/guest word size mismatch");
4790 if (VKI_PAGE_SIZE
< 4096 || VG_(log2
)(VKI_PAGE_SIZE
) == -1) {
4791 VG_(tool_panic
)("implausible or too-small VKI_PAGE_SIZE");
4795 bbOut
= emptyIRSB();
4796 bbOut
->tyenv
= deepCopyIRTypeEnv(bbIn
->tyenv
);
4797 bbOut
->next
= deepCopyIRExpr(bbIn
->next
);
4798 bbOut
->jumpkind
= bbIn
->jumpkind
;
4799 bbOut
->offsIP
= bbIn
->offsIP
;
4801 // Copy verbatim any IR preamble preceding the first IMark
4803 while (i
< bbIn
->stmts_used
&& bbIn
->stmts
[i
]->tag
!= Ist_IMark
) {
4804 addStmtToIRSB( bbOut
, bbIn
->stmts
[i
] );
4808 // Get the first statement, and initial cia from it
4809 tl_assert(bbIn
->stmts_used
> 0);
4810 tl_assert(i
< bbIn
->stmts_used
);
4811 st
= bbIn
->stmts
[i
];
4812 tl_assert(Ist_IMark
== st
->tag
);
4813 cia
= st
->Ist
.IMark
.addr
;
4816 for (/*use current i*/; i
< bbIn
->stmts_used
; i
++) {
4817 st
= bbIn
->stmts
[i
];
4819 tl_assert(isFlatIRStmt(st
));
4822 /* No memory reference, but if we do anything else than
4823 Ijk_Boring, indicate to helgrind that the previously
4824 recorded stack is invalid.
4825 For Ijk_Boring, also invalidate the stack if the exit
4826 instruction has no CF info. This heuristic avoids cached
4827 stack trace mismatch in some cases such as longjmp
4828 implementation. Similar logic below for the bb exit. */
4829 if (HG_(clo_delta_stacktrace
)
4830 && (st
->Ist
.Exit
.jk
!= Ijk_Boring
|| ! VG_(has_CF_info
)(cia
)))
4831 addInvalidateCachedStack(bbOut
, goff_SP_s1
, hWordTy_szB
);
4835 /* None of these can contain any memory references. */
4838 /* This cannot contain any memory references. */
4839 /* If we see a put to SP, from now on in this instruction,
4840 the SP needed to unwind has to be fixed up by one word.
4841 This very simple heuristic ensures correct unwinding in the
4842 typical case of a push instruction. If we need to cover more
4843 cases, then we need to better track how the SP is modified by
4844 the instruction (and calculate a precise sp delta), rather than
4845 assuming that the SP is decremented by a Word size. */
4846 if (HG_(clo_delta_stacktrace
) && st
->Ist
.Put
.offset
== goff_SP
) {
4847 fixupSP_needed
= True
;
4851 /* This cannot contain any memory references. */
4855 fixupSP_needed
= False
;
4857 /* no mem refs, but note the insn address. */
4858 cia
= st
->Ist
.IMark
.addr
;
4860 /* Don't instrument the dynamic linker. It generates a
4861 lot of races which we just expensively suppress, so
4864 Avoid flooding is_in_dynamic_linker_shared_object with
4865 requests by only checking at transitions between 4K
4867 if ((cia
& ~(Addr
)0xFFF) != inLDSOmask4K
) {
4868 if (0) VG_(printf
)("NEW %#lx\n", cia
);
4869 inLDSOmask4K
= cia
& ~(Addr
)0xFFF;
4870 inLDSO
= is_in_dynamic_linker_shared_object(cia
);
4872 if (0) VG_(printf
)("old %#lx\n", cia
);
4877 switch (st
->Ist
.MBE
.event
) {
4879 case Imbe_CancelReservation
:
4880 break; /* not interesting */
4887 /* Atomic read-modify-write cycle. Just pretend it's a
4889 IRCAS
* cas
= st
->Ist
.CAS
.details
;
4890 Bool isDCAS
= cas
->oldHi
!= IRTemp_INVALID
;
4892 tl_assert(cas
->expdHi
);
4893 tl_assert(cas
->dataHi
);
4895 tl_assert(!cas
->expdHi
);
4896 tl_assert(!cas
->dataHi
);
4898 /* Just be boring about it. */
4900 instrument_mem_access(
4904 * sizeofIRType(typeOfIRExpr(bbIn
->tyenv
, cas
->dataLo
)),
4905 False
/*!isStore*/, fixupSP_needed
,
4906 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4914 /* We pretend store-conditionals don't exist, viz, ignore
4915 them. Whereas load-linked's are treated the same as
4918 if (st
->Ist
.LLSC
.storedata
== NULL
) {
4920 dataTy
= typeOfIRTemp(bbIn
->tyenv
, st
->Ist
.LLSC
.result
);
4922 instrument_mem_access(
4925 sizeofIRType(dataTy
),
4926 False
/*!isStore*/, fixupSP_needed
,
4927 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4940 instrument_mem_access(
4943 sizeofIRType(typeOfIRExpr(bbIn
->tyenv
, st
->Ist
.Store
.data
)),
4944 True
/*isStore*/, fixupSP_needed
,
4945 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4952 IRStoreG
* sg
= st
->Ist
.StoreG
.details
;
4953 IRExpr
* data
= sg
->data
;
4954 IRExpr
* addr
= sg
->addr
;
4955 IRType type
= typeOfIRExpr(bbIn
->tyenv
, data
);
4956 tl_assert(type
!= Ity_INVALID
);
4957 instrument_mem_access( bbOut
, addr
, sizeofIRType(type
),
4958 True
/*isStore*/, fixupSP_needed
,
4960 goff_SP
, goff_SP_s1
, sg
->guard
);
4965 IRLoadG
* lg
= st
->Ist
.LoadG
.details
;
4966 IRType type
= Ity_INVALID
; /* loaded type */
4967 IRType typeWide
= Ity_INVALID
; /* after implicit widening */
4968 IRExpr
* addr
= lg
->addr
;
4969 typeOfIRLoadGOp(lg
->cvt
, &typeWide
, &type
);
4970 tl_assert(type
!= Ity_INVALID
);
4971 instrument_mem_access( bbOut
, addr
, sizeofIRType(type
),
4972 False
/*!isStore*/, fixupSP_needed
,
4974 goff_SP
, goff_SP_s1
, lg
->guard
);
4979 IRExpr
* data
= st
->Ist
.WrTmp
.data
;
4980 if (data
->tag
== Iex_Load
) {
4982 instrument_mem_access(
4984 data
->Iex
.Load
.addr
,
4985 sizeofIRType(data
->Iex
.Load
.ty
),
4986 False
/*!isStore*/, fixupSP_needed
,
4987 hWordTy_szB
, goff_SP
, goff_SP_s1
,
4997 IRDirty
* d
= st
->Ist
.Dirty
.details
;
4998 if (d
->mFx
!= Ifx_None
) {
4999 /* This dirty helper accesses memory. Collect the
5001 tl_assert(d
->mAddr
!= NULL
);
5002 tl_assert(d
->mSize
!= 0);
5003 dataSize
= d
->mSize
;
5004 if (d
->mFx
== Ifx_Read
|| d
->mFx
== Ifx_Modify
) {
5006 instrument_mem_access(
5007 bbOut
, d
->mAddr
, dataSize
,
5008 False
/*!isStore*/, fixupSP_needed
,
5009 hWordTy_szB
, goff_SP
, goff_SP_s1
,
5014 if (d
->mFx
== Ifx_Write
|| d
->mFx
== Ifx_Modify
) {
5016 instrument_mem_access(
5017 bbOut
, d
->mAddr
, dataSize
,
5018 True
/*isStore*/, fixupSP_needed
,
5019 hWordTy_szB
, goff_SP
, goff_SP_s1
,
5025 tl_assert(d
->mAddr
== NULL
);
5026 tl_assert(d
->mSize
== 0);
5036 } /* switch (st->tag) */
5038 addStmtToIRSB( bbOut
, st
);
5039 } /* iterate over bbIn->stmts */
5041 // See above the case Ist_Exit:
5042 if (HG_(clo_delta_stacktrace
)
5043 && (bbOut
->jumpkind
!= Ijk_Boring
|| ! VG_(has_CF_info
)(cia
)))
5044 addInvalidateCachedStack(bbOut
, goff_SP_s1
, hWordTy_szB
);
5056 /*----------------------------------------------------------------*/
5057 /*--- Client requests ---*/
5058 /*----------------------------------------------------------------*/
5060 /* Sheesh. Yet another goddam finite map. */
5061 static WordFM
* map_pthread_t_to_Thread
= NULL
; /* pthread_t -> Thread* */
5063 static void map_pthread_t_to_Thread_INIT ( void ) {
5064 if (UNLIKELY(map_pthread_t_to_Thread
== NULL
)) {
5065 map_pthread_t_to_Thread
= VG_(newFM
)( HG_(zalloc
), "hg.mpttT.1",
5070 /* A list of Ada dependent tasks and their masters. Used for implementing
5071 the Ada task termination semantic as implemented by the
5072 gcc gnat Ada runtime. */
5075 void* dependent
; // Ada Task Control Block of the Dependent
5076 void* master
; // ATCB of the master
5077 Word master_level
; // level of dependency between master and dependent
5078 Thread
* hg_dependent
; // helgrind Thread* for dependent task.
5080 GNAT_dmml
; // (d)ependent (m)aster (m)aster_(l)evel.
5081 static XArray
* gnat_dmmls
; /* of GNAT_dmml */
5082 static void gnat_dmmls_INIT (void)
5084 if (UNLIKELY(gnat_dmmls
== NULL
)) {
5085 gnat_dmmls
= VG_(newXA
) (HG_(zalloc
), "hg.gnat_md.1",
5087 sizeof(GNAT_dmml
) );
5091 static void xtmemory_report_next_block(XT_Allocs
* xta
, ExeContext
** ec_alloc
)
5093 const MallocMeta
* md
= VG_(HT_Next
)(hg_mallocmeta_table
);
5095 xta
->nbytes
= md
->szB
;
5097 *ec_alloc
= md
->where
;
5101 static void HG_(xtmemory_report
) ( const HChar
* filename
, Bool fini
)
5103 // Make xtmemory_report_next_block ready to be called.
5104 VG_(HT_ResetIter
)(hg_mallocmeta_table
);
5105 VG_(XTMemory_report
)(filename
, fini
, xtmemory_report_next_block
,
5106 VG_(XT_filter_1top_and_maybe_below_main
));
5109 static void print_monitor_help ( void )
5114 "helgrind monitor commands:\n"
5115 " info locks [lock_addr] : show status of lock at addr lock_addr\n"
5116 " with no lock_addr, show status of all locks\n"
5117 " accesshistory <addr> [<len>] : show access history recorded\n"
5118 " for <len> (or 1) bytes at <addr>\n"
5119 " xtmemory [<filename>]\n"
5120 " dump xtree memory profile in <filename> (default xtmemory.kcg.%%p.%%n)\n"
5124 /* return True if request recognised, False otherwise */
5125 static Bool
handle_gdb_monitor_command (ThreadId tid
, HChar
*req
)
5128 HChar s
[VG_(strlen
)(req
)]; /* copy for strtok_r */
5132 VG_(strcpy
) (s
, req
);
5134 wcmd
= VG_(strtok_r
) (s
, " ", &ssaveptr
);
5135 /* NB: if possible, avoid introducing a new command below which
5136 starts with the same first letter(s) as an already existing
5137 command. This ensures a shorter abbreviation for the user. */
5138 switch (VG_(keyword_id
)
5139 ("help info accesshistory xtmemory",
5140 wcmd
, kwd_report_duplicated_matches
)) {
5141 case -2: /* multiple matches */
5143 case -1: /* not found */
5146 print_monitor_help();
5149 wcmd
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5150 switch (kwdid
= VG_(keyword_id
)
5152 wcmd
, kwd_report_all
)) {
5160 Bool lk_shown
= False
;
5161 Bool all_locks
= True
;
5165 wa
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5167 if (VG_(parse_Addr
) (&wa
, &lk_addr
) )
5170 VG_(gdb_printf
) ("missing or malformed address\n");
5173 for (i
= 0, lk
= admin_locks
; lk
; i
++, lk
= lk
->admin_next
) {
5174 if (all_locks
|| lk_addr
== lk
->guestaddr
) {
5176 True
/* show_lock_addrdescr */,
5177 False
/* show_internal_data */);
5182 VG_(gdb_printf
) ("no locks\n");
5183 if (!all_locks
&& !lk_shown
)
5184 VG_(gdb_printf
) ("lock with address %p not found\n",
5193 case 2: /* accesshistory */
5197 if (HG_(clo_history_level
) < 2) {
5199 ("helgrind must be started with --history-level=full"
5200 " to use accesshistory\n");
5203 if (VG_(strtok_get_address_and_size
) (&address
, &szB
, &ssaveptr
)) {
5205 libhb_event_map_access_history (address
, szB
, HG_(print_access
));
5207 VG_(gdb_printf
) ("len must be >=1\n");
5212 case 3: { /* xtmemory */
5214 filename
= VG_(strtok_r
) (NULL
, " ", &ssaveptr
);
5215 HG_(xtmemory_report
)(filename
, False
);
5226 Bool
hg_handle_client_request ( ThreadId tid
, UWord
* args
, UWord
* ret
)
5228 if (!VG_IS_TOOL_USERREQ('H','G',args
[0])
5229 && VG_USERREQ__GDB_MONITOR_COMMAND
!= args
[0])
5232 /* Anything that gets past the above check is one of ours, so we
5233 should be able to handle it. */
5235 /* default, meaningless return value, unless otherwise set */
5240 /* --- --- User-visible client requests --- --- */
5242 case VG_USERREQ__HG_CLEAN_MEMORY
:
5243 if (0) VG_(printf
)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5245 /* Call die_mem to (expensively) tidy up properly, if there
5246 are any held locks etc in the area. Calling evh__die_mem
5247 and then evh__new_mem is a bit inefficient; probably just
5248 the latter would do. */
5249 if (args
[2] > 0) { /* length */
5250 evh__die_mem(args
[1], args
[2]);
5251 /* and then set it to New */
5252 evh__new_mem(args
[1], args
[2]);
5256 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK
: {
5259 if (0) VG_(printf
)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5261 if (HG_(mm_find_containing_block
)(NULL
, NULL
,
5262 &payload
, &pszB
, args
[1])) {
5264 evh__die_mem(payload
, pszB
);
5265 evh__new_mem(payload
, pszB
);
5274 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED
:
5275 if (0) VG_(printf
)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5277 if (args
[2] > 0) { /* length */
5278 evh__untrack_mem(args
[1], args
[2]);
5282 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED
:
5283 if (0) VG_(printf
)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5285 if (args
[2] > 0) { /* length */
5286 evh__new_mem(args
[1], args
[2]);
5290 case _VG_USERREQ__HG_GET_ABITS
:
5291 if (0) VG_(printf
)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5292 args
[1], args
[2], args
[3]);
5293 UChar
*zzabit
= (UChar
*) args
[2];
5295 || VG_(am_is_valid_for_client
)((Addr
)zzabit
, (SizeT
)args
[3],
5296 VKI_PROT_READ
|VKI_PROT_WRITE
))
5297 *ret
= (UWord
) libhb_srange_get_abits ((Addr
) args
[1],
5304 /* This thread (tid) (a master) is informing us that it has
5305 seen the termination of a dependent task, and that this should
5306 be considered as a join between master and dependent. */
5307 case _VG_USERREQ__HG_GNAT_DEPENDENT_MASTER_JOIN
: {
5309 const Thread
*stayer
= map_threads_maybe_lookup( tid
);
5310 const void *dependent
= (void*)args
[1];
5311 const void *master
= (void*)args
[2];
5314 VG_(printf
)("HG_GNAT_DEPENDENT_MASTER_JOIN (tid %d): "
5315 "self_id = %p Thread* = %p dependent %p\n",
5316 (Int
)tid
, master
, stayer
, dependent
);
5319 /* Similar loop as for master completed hook below, but stops at
5320 the first matching occurrence, only comparing master and
5322 for (n
= VG_(sizeXA
) (gnat_dmmls
) - 1; n
>= 0; n
--) {
5323 GNAT_dmml
*dmml
= (GNAT_dmml
*) VG_(indexXA
)(gnat_dmmls
, n
);
5324 if (dmml
->master
== master
5325 && dmml
->dependent
== dependent
) {
5327 VG_(printf
)("quitter %p dependency to stayer %p (join)\n",
5328 dmml
->hg_dependent
->hbthr
, stayer
->hbthr
);
5329 tl_assert(dmml
->hg_dependent
->hbthr
!= stayer
->hbthr
);
5330 generate_quitter_stayer_dependence (dmml
->hg_dependent
->hbthr
,
5332 VG_(removeIndexXA
) (gnat_dmmls
, n
);
5339 /* --- --- Client requests for Helgrind's use only --- --- */
5341 /* Some thread is telling us its pthread_t value. Record the
5342 binding between that and the associated Thread*, so we can
5343 later find the Thread* again when notified of a join by the
5345 case _VG_USERREQ__HG_SET_MY_PTHREAD_T
: {
5346 Thread
* my_thr
= NULL
;
5348 VG_(printf
)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int
)tid
,
5350 map_pthread_t_to_Thread_INIT();
5351 my_thr
= map_threads_maybe_lookup( tid
);
5352 /* This assertion should hold because the map_threads (tid to
5353 Thread*) binding should have been made at the point of
5354 low-level creation of this thread, which should have
5355 happened prior to us getting this client request for it.
5356 That's because this client request is sent from
5357 client-world from the 'thread_wrapper' function, which
5358 only runs once the thread has been low-level created. */
5359 tl_assert(my_thr
!= NULL
);
5360 /* So now we know that (pthread_t)args[1] is associated with
5361 (Thread*)my_thr. Note that down. */
5363 VG_(printf
)("XXXX: bind pthread_t %p to Thread* %p\n",
5364 (void*)args
[1], (void*)my_thr
);
5365 VG_(addToFM
)( map_pthread_t_to_Thread
, (UWord
)args
[1], (UWord
)my_thr
);
5367 if (my_thr
->coretid
!= 1) {
5368 /* FIXME: hardwires assumption about identity of the root thread. */
5369 if (HG_(clo_ignore_thread_creation
)) {
5370 HG_(thread_leave_pthread_create
)(my_thr
);
5371 HG_(thread_leave_synchr
)(my_thr
);
5372 tl_assert(my_thr
->synchr_nesting
== 0);
5378 case _VG_USERREQ__HG_PTH_API_ERROR
: {
5379 Thread
* my_thr
= NULL
;
5380 map_pthread_t_to_Thread_INIT();
5381 my_thr
= map_threads_maybe_lookup( tid
);
5382 tl_assert(my_thr
); /* See justification above in SET_MY_PTHREAD_T */
5383 #if defined(VGO_freebsd)
5384 if (HG_(get_pthread_synchr_nesting_level
)(tid
) >= 1) {
5388 HG_(record_error_PthAPIerror
)(
5389 my_thr
, (HChar
*)args
[1], (UWord
)args
[2], (HChar
*)args
[3] );
5393 /* This thread (tid) has completed a join with the quitting
5394 thread whose pthread_t is in args[1]. */
5395 case _VG_USERREQ__HG_PTHREAD_JOIN_POST
: {
5396 Thread
* thr_q
= NULL
; /* quitter Thread* */
5399 VG_(printf
)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int
)tid
,
5401 map_pthread_t_to_Thread_INIT();
5402 found
= VG_(lookupFM
)( map_pthread_t_to_Thread
,
5403 NULL
, (UWord
*)&thr_q
, (UWord
)args
[1] );
5404 /* Can this fail? It would mean that our pthread_join
5405 wrapper observed a successful join on args[1] yet that
5406 thread never existed (or at least, it never lodged an
5407 entry in the mapping (via SET_MY_PTHREAD_T)). Which
5408 sounds like a bug in the threads library. */
5409 // FIXME: get rid of this assertion; handle properly
5413 VG_(printf
)(".................... quitter Thread* = %p\n",
5415 evh__HG_PTHREAD_JOIN_POST( tid
, thr_q
);
5420 /* This thread (tid) is informing us of its master. */
5421 case _VG_USERREQ__HG_GNAT_MASTER_HOOK
: {
5423 dmml
.dependent
= (void*)args
[1];
5424 dmml
.master
= (void*)args
[2];
5425 dmml
.master_level
= (Word
)args
[3];
5426 dmml
.hg_dependent
= map_threads_maybe_lookup( tid
);
5427 tl_assert(dmml
.hg_dependent
);
5430 VG_(printf
)("HG_GNAT_MASTER_HOOK (tid %d): "
5431 "dependent = %p master = %p master_level = %ld"
5432 " dependent Thread* = %p\n",
5433 (Int
)tid
, dmml
.dependent
, dmml
.master
, dmml
.master_level
,
5436 VG_(addToXA
) (gnat_dmmls
, &dmml
);
5440 /* This thread (tid) is informing us that it has completed a
5442 case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK
: {
5444 const Thread
*stayer
= map_threads_maybe_lookup( tid
);
5445 const void *master
= (void*)args
[1];
5446 const Word master_level
= (Word
) args
[2];
5450 VG_(printf
)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5451 "self_id = %p master_level = %ld Thread* = %p\n",
5452 (Int
)tid
, master
, master_level
, stayer
);
5455 /* Reverse loop on the array, simulating a pthread_join for
5456 the Dependent tasks of the completed master, and removing
5457 them from the array. */
5458 for (n
= VG_(sizeXA
) (gnat_dmmls
) - 1; n
>= 0; n
--) {
5459 GNAT_dmml
*dmml
= (GNAT_dmml
*) VG_(indexXA
)(gnat_dmmls
, n
);
5460 if (dmml
->master
== master
5461 && dmml
->master_level
== master_level
) {
5463 VG_(printf
)("quitter %p dependency to stayer %p\n",
5464 dmml
->hg_dependent
->hbthr
, stayer
->hbthr
);
5465 tl_assert(dmml
->hg_dependent
->hbthr
!= stayer
->hbthr
);
5466 generate_quitter_stayer_dependence (dmml
->hg_dependent
->hbthr
,
5468 VG_(removeIndexXA
) (gnat_dmmls
, n
);
5474 /* EXPOSITION only: by intercepting lock init events we can show
5475 the user where the lock was initialised, rather than only
5476 being able to show where it was first locked. Intercepting
5477 lock initialisations is not necessary for the basic operation
5478 of the race checker. */
5479 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST
:
5480 evh__HG_PTHREAD_MUTEX_INIT_POST( tid
, (void*)args
[1], args
[2] );
5483 /* mutex=arg[1], mutex_is_init=arg[2] */
5484 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE
:
5485 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid
, (void*)args
[1], args
[2] != 0 );
5488 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE
: // pth_mx_t*
5489 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5490 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5491 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid
, (void*)args
[1] );
5494 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST
: // pth_mx_t*
5495 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5496 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid
, (void*)args
[1] );
5497 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5500 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE
: // pth_mx_t*
5501 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5502 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5503 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5506 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST
: // pth_mx_t*, long
5507 if ((args
[2] == True
) // lock actually taken
5508 && (HG_(get_pthread_create_nesting_level
)(tid
) == 0))
5509 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, (void*)args
[1] );
5510 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5513 /* This thread is about to do pthread_cond_signal on the
5514 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
5515 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE
:
5516 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE
:
5517 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5518 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid
, (void*)args
[1] );
5521 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST
:
5522 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST
:
5523 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5526 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5527 Returns a flag indicating whether or not the mutex is believed to be
5528 valid for this operation. */
5529 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE
: {
5530 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5532 = evh__HG_PTHREAD_COND_WAIT_PRE( tid
, (void*)args
[1],
5534 *ret
= mutex_is_valid
? 1 : 0;
5538 /* Thread successfully completed pthread_cond_init:
5539 cond=arg[1], cond_attr=arg[2] */
5540 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST
:
5541 evh__HG_PTHREAD_COND_INIT_POST( tid
,
5542 (void*)args
[1], (void*)args
[2] );
5545 /* cond=arg[1], cond_is_init=arg[2] */
5546 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE
:
5547 evh__HG_PTHREAD_COND_DESTROY_PRE( tid
, (void*)args
[1], args
[2] != 0 );
5550 /* Thread completed pthread_cond_wait, cond=arg[1],
5551 mutex=arg[2], timeout=arg[3], successful=arg[4] */
5552 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST
:
5553 if (args
[4] == True
)
5554 evh__HG_PTHREAD_COND_WAIT_POST( tid
,
5555 (void*)args
[1], (void*)args
[2],
5557 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5560 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST
:
5561 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid
, (void*)args
[1] );
5564 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE
:
5565 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid
, (void*)args
[1] );
5568 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5569 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE
:
5570 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5571 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5572 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid
, (void*)args
[1],
5576 /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5577 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST
:
5578 if ((args
[3] == True
)
5579 && (HG_(get_pthread_create_nesting_level
)(tid
) == 0))
5580 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid
, (void*)args
[1], args
[2] );
5581 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5584 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE
:
5585 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5586 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5587 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid
, (void*)args
[1] );
5590 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST
:
5591 if (HG_(get_pthread_create_nesting_level
)(tid
) == 0)
5592 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid
, (void*)args
[1] );
5593 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5596 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST
: /* sem_t*, unsigned long */
5597 evh__HG_POSIX_SEM_INIT_POST( tid
, (void*)args
[1], args
[2] );
5600 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE
: /* sem_t* */
5601 evh__HG_POSIX_SEM_DESTROY_PRE( tid
, (void*)args
[1] );
5604 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE
: /* sem_t* */
5605 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5606 evh__HG_POSIX_SEM_POST_PRE( tid
, (void*)args
[1] );
5609 case _VG_USERREQ__HG_POSIX_SEM_POST_POST
: /* sem_t* */
5610 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5613 case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE
: /* sem_t* */
5614 HG_(thread_enter_synchr
)(map_threads_maybe_lookup(tid
));
5617 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST
: /* sem_t*, long tookLock */
5618 #if defined(VGO_freebsd)
5619 if (args
[2] == True
&& HG_(get_pthread_synchr_nesting_level
)(tid
) == 1)
5620 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5622 if (args
[2] == True
)
5623 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5625 HG_(thread_leave_synchr
)(map_threads_maybe_lookup(tid
));
5628 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE
:
5629 /* pth_bar_t*, ulong count, ulong resizable */
5630 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid
, (void*)args
[1],
5634 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE
:
5635 /* pth_bar_t*, ulong newcount */
5636 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid
, (void*)args
[1],
5640 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE
:
5642 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid
, (void*)args
[1] );
5645 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE
:
5647 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid
, (void*)args
[1] );
5650 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE
:
5651 /* pth_spinlock_t* */
5652 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid
, (void*)args
[1] );
5655 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST
:
5656 /* pth_spinlock_t* */
5657 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid
, (void*)args
[1] );
5660 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE
:
5661 /* pth_spinlock_t*, Word */
5662 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5665 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST
:
5666 /* pth_spinlock_t* */
5667 evh__HG_PTHREAD_SPIN_LOCK_POST( tid
, (void*)args
[1] );
5670 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE
:
5671 /* pth_spinlock_t* */
5672 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid
, (void*)args
[1] );
5675 case _VG_USERREQ__HG_CLIENTREQ_UNIMP
: {
5677 HChar
* who
= (HChar
*)args
[1];
5679 Thread
* thr
= map_threads_maybe_lookup( tid
);
5680 tl_assert( thr
); /* I must be mapped */
5682 tl_assert( VG_(strlen
)(who
) <= 50 );
5683 VG_(sprintf
)(buf
, "Unimplemented client request macro \"%s\"", who
);
5684 /* record_error_Misc strdup's buf, so this is safe: */
5685 HG_(record_error_Misc
)( thr
, buf
);
5689 case _VG_USERREQ__HG_USERSO_SEND_PRE
:
5690 /* UWord arbitrary-SO-tag */
5691 evh__HG_USERSO_SEND_PRE( tid
, args
[1] );
5694 case _VG_USERREQ__HG_USERSO_RECV_POST
:
5695 /* UWord arbitrary-SO-tag */
5696 evh__HG_USERSO_RECV_POST( tid
, args
[1] );
5699 case _VG_USERREQ__HG_USERSO_FORGET_ALL
:
5700 /* UWord arbitrary-SO-tag */
5701 evh__HG_USERSO_FORGET_ALL( tid
, args
[1] );
5704 case VG_USERREQ__GDB_MONITOR_COMMAND
: {
5705 Bool handled
= handle_gdb_monitor_command (tid
, (HChar
*)args
[1]);
5713 case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN
: {
5714 Thread
*thr
= map_threads_maybe_lookup(tid
);
5715 if (HG_(clo_ignore_thread_creation
)) {
5716 HG_(thread_enter_pthread_create
)(thr
);
5717 HG_(thread_enter_synchr
)(thr
);
5722 case _VG_USERREQ__HG_PTHREAD_CREATE_END
: {
5723 Thread
*thr
= map_threads_maybe_lookup(tid
);
5724 if (HG_(clo_ignore_thread_creation
)) {
5725 HG_(thread_leave_pthread_create
)(thr
);
5726 HG_(thread_leave_synchr
)(thr
);
5731 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE
: // pth_mx_t*, long tryLock
5732 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid
, (void*)args
[1], args
[2] );
5735 case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST
: // pth_mx_t*
5736 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid
, (void*)args
[1] );
5739 case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED
: // void*, long isW
5740 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid
, (void*)args
[1], args
[2] );
5743 case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED
: // void*
5744 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid
, (void*)args
[1] );
5747 case _VG_USERREQ__HG_POSIX_SEM_RELEASED
: /* sem_t* */
5748 evh__HG_POSIX_SEM_POST_PRE( tid
, (void*)args
[1] );
5751 case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED
: /* sem_t* */
5752 evh__HG_POSIX_SEM_WAIT_POST( tid
, (void*)args
[1] );
5755 #if defined(VGO_solaris)
5756 case _VG_USERREQ__HG_RTLD_BIND_GUARD
:
5757 evh__HG_RTLD_BIND_GUARD(tid
, args
[1]);
5760 case _VG_USERREQ__HG_RTLD_BIND_CLEAR
:
5761 evh__HG_RTLD_BIND_CLEAR(tid
, args
[1]);
5763 #endif /* VGO_solaris */
5766 /* Unhandled Helgrind client request! */
5767 VG_(message
)(Vg_UserMsg
,
5768 "Warning: unknown Helgrind client request code %llx\n",
5777 /*----------------------------------------------------------------*/
5779 /*----------------------------------------------------------------*/
5781 static Bool
hg_process_cmd_line_option ( const HChar
* arg
)
5783 const HChar
* tmp_str
;
5785 if VG_BOOL_CLO(arg
, "--track-lockorders",
5786 HG_(clo_track_lockorders
)) {}
5787 else if VG_BOOL_CLO(arg
, "--cmp-race-err-addrs",
5788 HG_(clo_cmp_race_err_addrs
)) {}
5790 else if VG_XACT_CLO(arg
, "--history-level=none",
5791 HG_(clo_history_level
), 0);
5792 else if VG_XACT_CLO(arg
, "--history-level=approx",
5793 HG_(clo_history_level
), 1);
5794 else if VG_XACT_CLO(arg
, "--history-level=full",
5795 HG_(clo_history_level
), 2);
5797 else if VG_BINT_CLO(arg
, "--history-backtrace-size",
5798 HG_(clo_history_backtrace_size
), 2, 500) {}
5799 // 500 just in case someone with a lot of CPU and memory would like to use
5800 // the same value for --num-callers and this.
5802 else if VG_BOOL_CLO(arg
, "--delta-stacktrace",
5803 HG_(clo_delta_stacktrace
)) {}
5805 else if VG_BINT_CLO(arg
, "--conflict-cache-size",
5806 HG_(clo_conflict_cache_size
), 10*1000, 150*1000*1000) {}
5808 /* "stuvwx" --> stuvwx (binary) */
5809 else if VG_STR_CLO(arg
, "--hg-sanity-flags", tmp_str
) {
5812 if (6 != VG_(strlen
)(tmp_str
)) {
5813 VG_(message
)(Vg_UserMsg
,
5814 "--hg-sanity-flags argument must have 6 digits\n");
5817 for (j
= 0; j
< 6; j
++) {
5818 if ('0' == tmp_str
[j
]) { /* do nothing */ }
5819 else if ('1' == tmp_str
[j
]) HG_(clo_sanity_flags
) |= (1 << (6-1-j
));
5821 VG_(message
)(Vg_UserMsg
, "--hg-sanity-flags argument can "
5822 "only contain 0s and 1s\n");
5826 if (0) VG_(printf
)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags
));
5829 else if VG_BOOL_CLO(arg
, "--free-is-write",
5830 HG_(clo_free_is_write
)) {}
5832 else if VG_XACT_CLO(arg
, "--vts-pruning=never",
5833 HG_(clo_vts_pruning
), 0);
5834 else if VG_XACT_CLO(arg
, "--vts-pruning=auto",
5835 HG_(clo_vts_pruning
), 1);
5836 else if VG_XACT_CLO(arg
, "--vts-pruning=always",
5837 HG_(clo_vts_pruning
), 2);
5839 else if VG_BOOL_CLO(arg
, "--check-stack-refs",
5840 HG_(clo_check_stack_refs
)) {}
5841 else if VG_BOOL_CLO(arg
, "--ignore-thread-creation",
5842 HG_(clo_ignore_thread_creation
)) {}
5845 return VG_(replacement_malloc_process_cmd_line_option
)(arg
);
5850 static void hg_print_usage ( void )
5853 " --free-is-write=no|yes treat heap frees as writes [no]\n"
5854 " --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5855 " --history-level=none|approx|full [full]\n"
5856 " full: show both stack traces for a data race (can be very slow)\n"
5857 " approx: full trace for one thread, approx for the other (faster)\n"
5858 " none: only show trace for one thread in a race (fastest)\n"
5859 " --history-backtrace-size=<number> record <number> callers for full\n"
5860 " history level [8]\n"
5861 " --delta-stacktrace=no|yes [yes on linux amd64/x86]\n"
5862 " no : always compute a full history stacktrace from unwind info\n"
5863 " yes : derive a stacktrace from the previous stacktrace\n"
5864 " if there was no call/return or similar instruction\n"
5865 " --conflict-cache-size=N size of 'full' history cache [2000000]\n"
5866 " --check-stack-refs=no|yes race-check reads and writes on the\n"
5867 " main stack and thread stacks? [yes]\n"
5868 " --ignore-thread-creation=yes|no Ignore activities during thread\n"
5870 HG_(clo_ignore_thread_creation
) ? "yes" : "no"
5874 static void hg_print_debug_usage ( void )
5876 VG_(printf
)(" --cmp-race-err-addrs=no|yes are data addresses in "
5877 "race errors significant? [no]\n");
5878 VG_(printf
)(" --hg-sanity-flags=<XXXXXX> sanity check "
5879 " at events (X = 0|1) [000000]\n");
5880 VG_(printf
)(" --hg-sanity-flags values:\n");
5881 VG_(printf
)(" 010000 after changes to "
5882 "lock-order-acquisition-graph\n");
5883 VG_(printf
)(" 001000 at memory accesses\n");
5884 VG_(printf
)(" 000100 at mem permission setting for "
5885 "ranges >= %d bytes\n", SCE_BIGRANGE_T
);
5886 VG_(printf
)(" 000010 at lock/unlock events\n");
5887 VG_(printf
)(" 000001 at thread create/join events\n");
5889 " --vts-pruning=never|auto|always [auto]\n"
5890 " never: is never done (may cause big space leaks in Helgrind)\n"
5891 " auto: done just often enough to keep space usage under control\n"
5892 " always: done after every VTS GC (mostly just a big time waster)\n"
5896 static void hg_print_stats (void)
5901 HG_(ppWSUstats
)( univ_lsets
, "univ_lsets" );
5902 if (HG_(clo_track_lockorders
)) {
5904 HG_(ppWSUstats
)( univ_laog
, "univ_laog" );
5908 //zz VG_(printf)("\n");
5909 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5910 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5911 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5912 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5913 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5914 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5915 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5916 //zz stats__hbefore_stk_hwm);
5917 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5918 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5921 VG_(printf
)(" locksets: %'8d unique lock sets\n",
5922 (Int
)HG_(cardinalityWSU
)( univ_lsets
));
5923 if (HG_(clo_track_lockorders
)) {
5924 VG_(printf
)(" univ_laog: %'8d unique lock sets\n",
5925 (Int
)HG_(cardinalityWSU
)( univ_laog
));
5928 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5929 // stats__ga_LL_adds,
5930 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5932 VG_(printf
)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5933 HG_(stats__LockN_to_P_queries
),
5934 HG_(stats__LockN_to_P_get_map_size
)() );
5936 VG_(printf
)("client malloc-ed blocks: %'8u\n",
5937 VG_(HT_count_nodes
)(hg_mallocmeta_table
));
5939 VG_(printf
)("string table map: %'8llu queries (%llu map size)\n",
5940 HG_(stats__string_table_queries
),
5941 HG_(stats__string_table_get_map_size
)() );
5942 if (HG_(clo_track_lockorders
)) {
5943 VG_(printf
)(" LAOG: %'8d map size\n",
5944 (Int
)(laog
? VG_(sizeFM
)( laog
) : 0));
5945 VG_(printf
)(" LAOG exposition: %'8d map size\n",
5946 (Int
)(laog_exposition
? VG_(sizeFM
)( laog_exposition
) : 0));
5949 VG_(printf
)(" locks: %'8lu acquires, "
5951 stats__lockN_acquires
,
5952 stats__lockN_releases
5954 VG_(printf
)(" sanity checks: %'8lu\n", stats__sanity_checks
);
5957 libhb_shutdown(True
); // This in fact only print stats.
5960 static void hg_fini ( Int exitcode
)
5962 HG_(xtmemory_report
) (VG_(clo_xtree_memory_file
), True
);
5964 if (VG_(clo_verbosity
) == 1 && !VG_(clo_xml
)
5965 && HG_(clo_history_level
) >= 2) {
5967 "Use --history-level=approx or =none to gain increased speed, at\n" );
5969 "the cost of reduced accuracy of conflicting-access information\n");
5972 if (SHOW_DATA_STRUCTURES
)
5973 pp_everything( PP_ALL
, "SK_(fini)" );
5974 if (HG_(clo_sanity_flags
))
5975 all__sanity_check("SK_(fini)");
5981 /* FIXME: move these somewhere sane */
5984 void for_libhb__get_stacktrace ( Thr
* hbt
, Addr
* frames
, UWord nRequest
)
5990 thr
= libhb_get_Thr_hgthread( hbt
);
5992 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
5993 nActual
= (UWord
)VG_(get_StackTrace_with_deltas
)
5994 ( tid
, frames
, (UInt
)nRequest
,
5996 thr
->first_sp_delta
);
5997 tl_assert(nActual
<= nRequest
);
5998 for (; nActual
< nRequest
; nActual
++)
5999 frames
[nActual
] = 0;
6003 ExeContext
* for_libhb__get_EC ( Thr
* hbt
)
6009 thr
= libhb_get_Thr_hgthread( hbt
);
6011 tid
= map_threads_maybe_reverse_lookup_SLOW(thr
);
6012 /* this will assert if tid is invalid */
6013 ec
= VG_(record_ExeContext
)( tid
, 0 );
6018 static void hg_post_clo_init ( void )
6022 if (HG_(clo_delta_stacktrace
) && VG_(clo_vex_control
).guest_chase
) {
6023 if (VG_(clo_verbosity
) >= 2)
6024 VG_(message
)(Vg_UserMsg
,
6025 "helgrind --delta-stacktrace=yes only works with "
6026 "--vex-guest-chase=no\n"
6027 "=> (re-setting it to 'no')\n");
6028 VG_(clo_vex_control
).guest_chase
= False
;
6032 /////////////////////////////////////////////
6033 hbthr_root
= libhb_init( for_libhb__get_stacktrace
,
6034 for_libhb__get_EC
);
6035 /////////////////////////////////////////////
6038 if (HG_(clo_track_lockorders
))
6041 initialise_data_structures(hbthr_root
);
6042 if (VG_(clo_xtree_memory
) == Vg_XTMemory_Full
)
6043 // Activate full xtree memory profiling.
6044 VG_(XTMemory_Full_init
)(VG_(XT_filter_1top_and_maybe_below_main
));
6047 static void hg_info_location (DiEpoch ep
, Addr a
)
6049 (void) HG_(get_and_pp_addrdescr
) (ep
, a
);
6052 static void hg_pre_clo_init ( void )
6054 VG_(details_name
) ("Helgrind");
6055 VG_(details_version
) (NULL
);
6056 VG_(details_description
) ("a thread error detector");
6057 VG_(details_copyright_author
)(
6058 "Copyright (C) 2007-2024, and GNU GPL'd, by OpenWorks LLP et al.");
6059 VG_(details_bug_reports_to
) (VG_BUGS_TO
);
6060 VG_(details_avg_translation_sizeB
) ( 320 );
6062 VG_(basic_tool_funcs
) (hg_post_clo_init
,
6066 VG_(needs_core_errors
) (True
);
6067 VG_(needs_tool_errors
) (HG_(eq_Error
),
6068 HG_(before_pp_Error
),
6070 False
,/*show TIDs for errors*/
6072 HG_(recognised_suppression
),
6073 HG_(read_extra_suppression_info
),
6074 HG_(error_matches_suppression
),
6075 HG_(get_error_name
),
6076 HG_(get_extra_suppression_info
),
6077 HG_(print_extra_suppression_use
),
6078 HG_(update_extra_suppression_use
));
6080 VG_(needs_xml_output
) ();
6082 VG_(needs_command_line_options
)(hg_process_cmd_line_option
,
6084 hg_print_debug_usage
);
6085 VG_(needs_client_requests
) (hg_handle_client_request
);
6088 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
6089 // hg_expensive_sanity_check);
6091 VG_(needs_print_stats
) (hg_print_stats
);
6092 VG_(needs_info_location
) (hg_info_location
);
6094 VG_(needs_malloc_replacement
) (hg_cli__malloc
,
6095 hg_cli____builtin_new
,
6096 hg_cli____builtin_new_aligned
,
6097 hg_cli____builtin_vec_new
,
6098 hg_cli____builtin_vec_new_aligned
,
6102 hg_cli____builtin_delete
,
6103 hg_cli____builtin_delete_aligned
,
6104 hg_cli____builtin_vec_delete
,
6105 hg_cli____builtin_vec_delete_aligned
,
6107 hg_cli_malloc_usable_size
,
6108 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB
);
6110 /* 21 Dec 08: disabled this; it mostly causes H to start more
6111 slowly and use significantly more memory, without very often
6112 providing useful results. The user can request to load this
6113 information manually with --read-var-info=yes. */
6114 if (0) VG_(needs_var_info
)(); /* optional */
6116 VG_(track_new_mem_startup
) ( evh__new_mem_w_perms
);
6117 VG_(track_new_mem_stack_signal
)( evh__new_mem_w_tid
);
6118 VG_(track_new_mem_brk
) ( evh__new_mem_w_tid
);
6119 VG_(track_new_mem_mmap
) ( evh__new_mem_w_perms
);
6120 VG_(track_new_mem_stack
) ( evh__new_mem_stack
);
6121 VG_(track_new_mem_stack_4
) ( evh__new_mem_stack_4
);
6122 VG_(track_new_mem_stack_8
) ( evh__new_mem_stack_8
);
6123 VG_(track_new_mem_stack_12
) ( evh__new_mem_stack_12
);
6124 VG_(track_new_mem_stack_16
) ( evh__new_mem_stack_16
);
6125 VG_(track_new_mem_stack_32
) ( evh__new_mem_stack_32
);
6126 VG_(track_new_mem_stack_112
) ( evh__new_mem_stack_112
);
6127 VG_(track_new_mem_stack_128
) ( evh__new_mem_stack_128
);
6128 VG_(track_new_mem_stack_144
) ( evh__new_mem_stack_144
);
6129 VG_(track_new_mem_stack_160
) ( evh__new_mem_stack_160
);
6131 // FIXME: surely this isn't thread-aware
6132 VG_(track_copy_mem_remap
) ( evh__copy_mem
);
6134 VG_(track_change_mem_mprotect
) ( evh__set_perms
);
6136 VG_(track_die_mem_stack_signal
)( evh__die_mem
);
6137 VG_(track_die_mem_brk
) ( evh__die_mem_munmap
);
6138 VG_(track_die_mem_munmap
) ( evh__die_mem_munmap
);
6140 /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
6141 which has no effect. We do not use VG_(track_die_mem_stack),
6142 as this would be an expensive way to do nothing. */
6143 // VG_(track_die_mem_stack) ( evh__die_mem );
6145 // FIXME: what is this for?
6146 VG_(track_ban_mem_stack
) (NULL
);
6148 VG_(track_pre_mem_read
) ( evh__pre_mem_read
);
6149 VG_(track_pre_mem_read_asciiz
) ( evh__pre_mem_read_asciiz
);
6150 VG_(track_pre_mem_write
) ( evh__pre_mem_write
);
6151 VG_(track_post_mem_write
) (NULL
);
6155 VG_(track_pre_thread_ll_create
)( evh__pre_thread_ll_create
);
6156 VG_(track_pre_thread_ll_exit
) ( evh__pre_thread_ll_exit
);
6158 VG_(track_start_client_code
)( evh__start_client_code
);
6159 VG_(track_stop_client_code
)( evh__stop_client_code
);
6161 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
6162 as described in comments at the top of pub_tool_hashtable.h, are
6164 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta
*) );
6165 tl_assert( sizeof(UWord
) == sizeof(Addr
) );
6167 = VG_(HT_construct
)( "hg_malloc_metadata_table" );
6169 MallocMeta_poolalloc
= VG_(newPA
) ( sizeof(MallocMeta
),
6172 "hg_malloc_metadata_pool",
6175 // add a callback to clean up on (threaded) fork.
6176 VG_(atfork
)(NULL
/*pre*/, NULL
/*parent*/, evh__atfork_child
/*child*/);
6179 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init
)
6181 /*--------------------------------------------------------------------*/
6182 /*--- end hg_main.c ---*/
6183 /*--------------------------------------------------------------------*/