Add DRD suppression patterns for races triggered by std::ostream
[valgrind.git] / helgrind / libhb.h
blobffbf5fe9c02cded643d96d87444e368c93d76da2
2 /*--------------------------------------------------------------------*/
3 /*--- LibHB: a library for implementing and checking ---*/
4 /*--- the happens-before relationship in concurrent programs. ---*/
5 /*--- libhb_main.c ---*/
6 /*--------------------------------------------------------------------*/
8 /*
9 This file is part of LibHB, a library for implementing and checking
10 the happens-before relationship in concurrent programs.
12 Copyright (C) 2008-2017 OpenWorks Ltd
13 info@open-works.co.uk
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
30 The GNU General Public License is contained in the file COPYING.
33 #ifndef __LIBHB_H
34 #define __LIBHB_H
36 /* Abstract to user: thread identifiers */
37 /* typedef struct _Thr Thr; */ /* now in hg_lock_n_thread.h */
39 /* Abstract to user: synchronisation objects */
40 /* typedef struct _SO SO; */ /* now in hg_lock_n_thread.h */
42 /* Initialise library; returns Thr* for root thread. 'shadow_alloc'
43 should never return NULL, instead it should simply not return if
44 they encounter an out-of-memory condition. */
45 Thr* libhb_init (
46 void (*get_stacktrace)( Thr*, Addr*, UWord ),
47 ExeContext* (*get_EC)( Thr* )
50 /* Shut down the library, and print stats (in fact that's _all_
51 this is for.) */
52 void libhb_shutdown ( Bool show_stats );
54 /* Thread creation: returns Thr* for new thread */
55 Thr* libhb_create ( Thr* parent );
57 /* Thread async exit */
58 void libhb_async_exit ( Thr* exitter );
59 void libhb_joinedwith_done ( Thr* exitter );
61 /* Synchronisation objects (abstract to caller) */
63 /* Allocate a new one (alloc'd by library) */
64 SO* libhb_so_alloc ( void );
66 /* Dealloc one */
67 void libhb_so_dealloc ( SO* so );
69 /* Send a message via a sync object. If strong_send is true, the
70 resulting inter-thread dependency seen by a future receiver of this
71 message will be a dependency on this thread only. That is, in a
72 strong send, the VC inside the SO is replaced by the clock of the
73 sending thread. For a weak send, the sender's VC is joined into
74 that already in the SO, if any. This subtlety is needed to model
75 rwlocks: a strong send corresponds to releasing a rwlock that had
76 been w-held (or releasing a standard mutex). A weak send
77 corresponds to releasing a rwlock that has been r-held.
79 (rationale): Since in general many threads may hold a rwlock in
80 r-mode, a weak send facility is necessary in order that the final
81 SO reflects the join of the VCs of all the threads releasing the
82 rwlock, rather than merely holding the VC of the most recent thread
83 to release it. */
84 void libhb_so_send ( Thr* thr, SO* so, Bool strong_send );
86 /* Recv a message from a sync object. If strong_recv is True, the
87 resulting inter-thread dependency is considered adequate to induce
88 a h-b ordering on both reads and writes. If it is False, the
89 implied h-b ordering exists only for reads, not writes. This is
90 subtlety is required in order to support reader-writer locks: a
91 thread doing a write-acquire of a rwlock (or acquiring a normal
92 mutex) models this by doing a strong receive. A thread doing a
93 read-acquire of a rwlock models this by doing a !strong_recv. */
94 void libhb_so_recv ( Thr* thr, SO* so, Bool strong_recv );
96 /* Has this SO ever been sent on? */
97 Bool libhb_so_everSent ( SO* so );
99 /* Memory accesses (1/2/4/8 byte size). They report a race if one is
100 found. */
101 #define LIBHB_CWRITE_1(_thr,_a) zsm_sapply08_f__msmcwrite((_thr),(_a))
102 #define LIBHB_CWRITE_2(_thr,_a) zsm_sapply16_f__msmcwrite((_thr),(_a))
103 #define LIBHB_CWRITE_4(_thr,_a) zsm_sapply32_f__msmcwrite((_thr),(_a))
104 #define LIBHB_CWRITE_8(_thr,_a) zsm_sapply64_f__msmcwrite((_thr),(_a))
105 #define LIBHB_CWRITE_N(_thr,_a,_n) zsm_sapplyNN_f__msmcwrite((_thr),(_a),(_n))
107 #define LIBHB_CREAD_1(_thr,_a) zsm_sapply08_f__msmcread((_thr),(_a))
108 #define LIBHB_CREAD_2(_thr,_a) zsm_sapply16_f__msmcread((_thr),(_a))
109 #define LIBHB_CREAD_4(_thr,_a) zsm_sapply32_f__msmcread((_thr),(_a))
110 #define LIBHB_CREAD_8(_thr,_a) zsm_sapply64_f__msmcread((_thr),(_a))
111 #define LIBHB_CREAD_N(_thr,_a,_n) zsm_sapplyNN_f__msmcread((_thr),(_a),(_n))
113 void zsm_sapply08_f__msmcwrite ( Thr* thr, Addr a );
114 void zsm_sapply16_f__msmcwrite ( Thr* thr, Addr a );
115 void zsm_sapply32_f__msmcwrite ( Thr* thr, Addr a );
116 void zsm_sapply64_f__msmcwrite ( Thr* thr, Addr a );
117 void zsm_sapplyNN_f__msmcwrite ( Thr* thr, Addr a, SizeT len );
119 void zsm_sapply08_f__msmcread ( Thr* thr, Addr a );
120 void zsm_sapply16_f__msmcread ( Thr* thr, Addr a );
121 void zsm_sapply32_f__msmcread ( Thr* thr, Addr a );
122 void zsm_sapply64_f__msmcread ( Thr* thr, Addr a );
123 void zsm_sapplyNN_f__msmcread ( Thr* thr, Addr a, SizeT len );
125 void libhb_Thr_resumes ( Thr* thr );
127 /* Set memory address ranges to new (freshly allocated), or noaccess
128 (no longer accessible). NB: "AHAE" == "Actually Has An Effect" :-) */
129 void libhb_srange_new ( Thr*, Addr, SizeT );
130 void libhb_srange_untrack ( Thr*, Addr, SizeT );
131 void libhb_srange_noaccess_NoFX ( Thr*, Addr, SizeT ); /* IS IGNORED */
132 void libhb_srange_noaccess_AHAE ( Thr*, Addr, SizeT ); /* IS NOT IGNORED */
134 /* Counts the nr of bytes addressable in the range [a, a+len[
135 (so a+len excluded) and returns the nr of addressable bytes found.
136 If abits /= NULL, abits must point to a block of memory of length len.
137 In this array, each addressable byte will be indicated with 0xff.
138 Non-addressable bytes are indicated with 0x00. */
139 UWord libhb_srange_get_abits (Addr a, /*OUT*/UChar *abits, SizeT len);
141 /* Get and set the hgthread (pointer to corresponding Thread
142 structure). */
143 Thread* libhb_get_Thr_hgthread ( Thr* );
144 void libhb_set_Thr_hgthread ( Thr*, Thread* );
146 /* Low level copy of shadow state from [src,src+len) to [dst,dst+len).
147 Overlapping moves are checked for and asserted against. */
148 void libhb_copy_shadow_state ( Thr* thr, Addr src, Addr dst, SizeT len );
150 /* Call this periodically to give libhb the opportunity to
151 garbage-collect its internal data structures. */
152 void libhb_maybe_GC ( void );
154 /* Extract info from the conflicting-access machinery. */
155 Bool libhb_event_map_lookup ( /*OUT*/ExeContext** resEC,
156 /*OUT*/Thr** resThr,
157 /*OUT*/SizeT* resSzB,
158 /*OUT*/Bool* resIsW,
159 /*OUT*/WordSetID* locksHeldW,
160 Thr* thr, Addr a, SizeT szB, Bool isW );
162 typedef void (*Access_t) (StackTrace ips, UInt n_ips,
163 Thr* Thr_a,
164 Addr ga,
165 SizeT SzB,
166 Bool isW,
167 WordSetID locksHeldW );
168 /* Call fn for each recorded access history that overlaps with range [a, a+szB[.
169 fn is first called for oldest access.*/
170 void libhb_event_map_access_history ( Addr a, SizeT szB, Access_t fn );
172 /* ------ Exported from hg_main.c ------ */
173 /* Yes, this is a horrible tangle. Sigh. */
175 /* Get the univ_lset (universe for locksets) from hg_main.c. Sigh. */
176 WordSetU* HG_(get_univ_lsets) ( void );
178 /* Get the the header pointer for the double linked list of locks
179 (admin_locks). */
180 Lock* HG_(get_admin_locks) ( void );
182 #endif /* __LIBHB_H */
184 /*--------------------------------------------------------------------*/
185 /*--- end libhb.h ---*/
186 /*--------------------------------------------------------------------*/