configure.ac: Generalize glibc version check
[valgrind.git] / helgrind / helgrind.h
blob406dc67f8d979432fdf492eadf5e3055a3a31937
1 /*
2 ----------------------------------------------------------------
4 Notice that the above BSD-style license applies to this one file
5 (helgrind.h) only. The entire rest of Valgrind is licensed under
6 the terms of the GNU General Public License, version 2. See the
7 COPYING file in the source distribution for details.
9 ----------------------------------------------------------------
11 This file is part of Helgrind, a Valgrind tool for detecting errors
12 in threaded programs.
14 Copyright (C) 2007-2013 OpenWorks LLP
15 info@open-works.co.uk
17 Redistribution and use in source and binary forms, with or without
18 modification, are permitted provided that the following conditions
19 are met:
21 1. Redistributions of source code must retain the above copyright
22 notice, this list of conditions and the following disclaimer.
24 2. The origin of this software must not be misrepresented; you must
25 not claim that you wrote the original software. If you use this
26 software in a product, an acknowledgment in the product
27 documentation would be appreciated but is not required.
29 3. Altered source versions must be plainly marked as such, and must
30 not be misrepresented as being the original software.
32 4. The name of the author may not be used to endorse or promote
33 products derived from this software without specific prior written
34 permission.
36 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 ----------------------------------------------------------------
50 Notice that the above BSD-style license applies to this one file
51 (helgrind.h) only. The entire rest of Valgrind is licensed under
52 the terms of the GNU General Public License, version 2. See the
53 COPYING file in the source distribution for details.
55 ----------------------------------------------------------------
58 #ifndef __HELGRIND_H
59 #define __HELGRIND_H
61 #include "valgrind.h"
63 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
64 This enum comprises an ABI exported by Valgrind to programs
65 which use client requests. DO NOT CHANGE THE ORDER OF THESE
66 ENTRIES, NOR DELETE ANY -- add new ones at the end. */
67 typedef
68 enum {
69 VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
71 /* The rest are for Helgrind's internal use. Not for end-user
72 use. Do not use them unless you are a Valgrind developer. */
74 /* Notify the tool what this thread's pthread_t is. */
75 _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
76 + 256,
77 _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */
78 _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */
79 _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */
80 _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t*, long isInit */
81 _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */
82 _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */
83 _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */
84 _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t* */
85 _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */
86 _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
87 _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */
88 _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */
89 _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t*, long isInit */
90 _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */
91 _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
92 _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */
93 _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*, long isW */
94 _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */
95 _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
96 _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */
97 _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */
98 _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */
99 _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t* */
100 _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */
101 _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */
102 _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
103 _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */
104 _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
105 _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */
106 _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */
107 _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */
108 _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */
109 _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */
110 _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */
111 _VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */
112 _VG_USERREQ__HG_RESERVED2, /* Do not use */
113 _VG_USERREQ__HG_RESERVED3, /* Do not use */
114 _VG_USERREQ__HG_RESERVED4, /* Do not use */
115 _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
116 _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */
117 _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
118 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
119 _VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/
120 _VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */
121 _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK /* void*s,Word ml */
123 } Vg_TCheckClientRequest;
126 /*----------------------------------------------------------------*/
127 /*--- ---*/
128 /*--- Implementation-only facilities. Not for end-user use. ---*/
129 /*--- For end-user facilities see below (the next section in ---*/
130 /*--- this file.) ---*/
131 /*--- ---*/
132 /*----------------------------------------------------------------*/
134 /* Do a client request. These are macros rather than a functions so
135 as to avoid having an extra frame in stack traces.
137 NB: these duplicate definitions in hg_intercepts.c. But here, we
138 have to make do with weaker typing (no definition of Word etc) and
139 no assertions, whereas in helgrind.h we can use those facilities.
140 Obviously it's important the two sets of definitions are kept in
141 sync.
143 The commented-out asserts should actually hold, but unfortunately
144 they can't be allowed to be visible here, because that would
145 require the end-user code to #include <assert.h>.
148 #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \
149 do { \
150 long int _arg1; \
151 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
152 _arg1 = (long int)(_arg1F); \
153 VALGRIND_DO_CLIENT_REQUEST_STMT( \
154 (_creqF), \
155 _arg1, 0,0,0,0); \
156 } while (0)
158 #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
159 do { \
160 long int arg1; \
161 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
162 _arg1 = (long int)(_arg1F); \
163 _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \
164 (_dfltF), \
165 (_creqF), \
166 _arg1, 0,0,0,0); \
167 _resF = _qzz_res; \
168 } while (0)
170 #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
171 do { \
172 long int _arg1, _arg2; \
173 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
174 /* assert(sizeof(_ty2F) == sizeof(long int)); */ \
175 _arg1 = (long int)(_arg1F); \
176 _arg2 = (long int)(_arg2F); \
177 VALGRIND_DO_CLIENT_REQUEST_STMT( \
178 (_creqF), \
179 _arg1,_arg2,0,0,0); \
180 } while (0)
182 #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \
183 _ty2F,_arg2F, _ty3F, _arg3F) \
184 do { \
185 long int _arg1, _arg2, _arg3; \
186 /* assert(sizeof(_ty1F) == sizeof(long int)); */ \
187 /* assert(sizeof(_ty2F) == sizeof(long int)); */ \
188 /* assert(sizeof(_ty3F) == sizeof(long int)); */ \
189 _arg1 = (long int)(_arg1F); \
190 _arg2 = (long int)(_arg2F); \
191 _arg3 = (long int)(_arg3F); \
192 VALGRIND_DO_CLIENT_REQUEST_STMT( \
193 (_creqF), \
194 _arg1,_arg2,_arg3,0,0); \
195 } while (0)
198 #define _HG_CLIENTREQ_UNIMP(_qzz_str) \
199 DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \
200 (char*),(_qzz_str))
203 /*----------------------------------------------------------------*/
204 /*--- ---*/
205 /*--- Helgrind-native requests. These allow access to ---*/
206 /*--- the same set of annotation primitives that are used ---*/
207 /*--- to build the POSIX pthread wrappers. ---*/
208 /*--- ---*/
209 /*----------------------------------------------------------------*/
211 /* ----------------------------------------------------------
212 For describing ordinary mutexes (non-rwlocks). For rwlock
213 descriptions see ANNOTATE_RWLOCK_* below.
214 ---------------------------------------------------------- */
216 /* Notify here immediately after mutex creation. _mbRec == 0 for a
217 non-recursive mutex, 1 for a recursive mutex. */
218 #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \
219 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \
220 void*,(_mutex), long,(_mbRec))
222 /* Notify here immediately before mutex acquisition. _isTryLock == 0
223 for a normal acquisition, 1 for a "try" style acquisition. */
224 #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \
225 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, \
226 void*,(_mutex), long,(_isTryLock))
228 /* Notify here immediately after a successful mutex acquisition. */
229 #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \
230 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, \
231 void*,(_mutex))
233 /* Notify here immediately before a mutex release. */
234 #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \
235 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \
236 void*,(_mutex))
238 /* Notify here immediately after a mutex release. */
239 #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \
240 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \
241 void*,(_mutex))
243 /* Notify here immediately before mutex destruction. */
244 #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \
245 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \
246 void*,(_mutex))
248 /* ----------------------------------------------------------
249 For describing semaphores.
250 ---------------------------------------------------------- */
252 /* Notify here immediately after semaphore creation. */
253 #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \
254 DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \
255 void*, (_sem), unsigned long, (_value))
257 /* Notify here immediately after a semaphore wait (an acquire-style
258 operation) */
259 #define VALGRIND_HG_SEM_WAIT_POST(_sem) \
260 DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_WAIT_POST, \
261 void*,(_sem))
263 /* Notify here immediately before semaphore post (a release-style
264 operation) */
265 #define VALGRIND_HG_SEM_POST_PRE(_sem) \
266 DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_POST_PRE, \
267 void*,(_sem))
269 /* Notify here immediately before semaphore destruction. */
270 #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \
271 DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \
272 void*, (_sem))
274 /* ----------------------------------------------------------
275 For describing barriers.
276 ---------------------------------------------------------- */
278 /* Notify here immediately before barrier creation. _count is the
279 capacity. _resizable == 0 means the barrier may not be resized, 1
280 means it may be. */
281 #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
282 DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \
283 void*,(_bar), \
284 unsigned long,(_count), \
285 unsigned long,(_resizable))
287 /* Notify here immediately before arrival at a barrier. */
288 #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \
289 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \
290 void*,(_bar))
292 /* Notify here immediately before a resize (change of barrier
293 capacity). If _newcount >= the existing capacity, then there is no
294 change in the state of any threads waiting at the barrier. If
295 _newcount < the existing capacity, and >= _newcount threads are
296 currently waiting at the barrier, then this notification is
297 considered to also have the effect of telling the checker that all
298 waiting threads have now moved past the barrier. (I can't think of
299 any other sane semantics.) */
300 #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \
301 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \
302 void*,(_bar), \
303 unsigned long,(_newcount))
305 /* Notify here immediately before barrier destruction. */
306 #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \
307 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \
308 void*,(_bar))
310 /* ----------------------------------------------------------
311 For describing memory ownership changes.
312 ---------------------------------------------------------- */
314 /* Clean memory state. This makes Helgrind forget everything it knew
315 about the specified memory range. Effectively this announces that
316 the specified memory range now "belongs" to the calling thread, so
317 that: (1) the calling thread can access it safely without
318 synchronisation, and (2) all other threads must sync with this one
319 to access it safely. This is particularly useful for memory
320 allocators that wish to recycle memory. */
321 #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \
322 DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \
323 void*,(_qzz_start), \
324 unsigned long,(_qzz_len))
326 /* The same, but for the heap block starting at _qzz_blockstart. This
327 allows painting when we only know the address of an object, but not
328 its size, which is sometimes the case in C++ code involving
329 inheritance, and in which RTTI is not, for whatever reason,
330 available. Returns the number of bytes painted, which can be zero
331 for a zero-sized block. Hence, return values >= 0 indicate success
332 (the block was found), and the value -1 indicates block not
333 found, and -2 is returned when not running on Helgrind. */
334 #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \
335 (__extension__ \
336 ({long int _npainted; \
337 DO_CREQ_W_W(_npainted, (-2)/*default*/, \
338 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \
339 void*,(_qzz_blockstart)); \
340 _npainted; \
343 /* ----------------------------------------------------------
344 For error control.
345 ---------------------------------------------------------- */
347 /* Tell H that an address range is not to be "tracked" until further
348 notice. This puts it in the NOACCESS state, in which case we
349 ignore all reads and writes to it. Useful for ignoring ranges of
350 memory where there might be races we don't want to see. If the
351 memory is subsequently reallocated via malloc/new/stack allocation,
352 then it is put back in the trackable state. Hence it is safe in
353 the situation where checking is disabled, the containing area is
354 deallocated and later reallocated for some other purpose. */
355 #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \
356 DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \
357 void*,(_qzz_start), \
358 unsigned long,(_qzz_len))
360 /* And put it back into the normal "tracked" state, that is, make it
361 once again subject to the normal race-checking machinery. This
362 puts it in the same state as new memory allocated by this thread --
363 that is, basically owned exclusively by this thread. */
364 #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \
365 DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \
366 void*,(_qzz_start), \
367 unsigned long,(_qzz_len))
370 /*----------------------------------------------------------------*/
371 /*--- ---*/
372 /*--- ThreadSanitizer-compatible requests ---*/
373 /*--- (mostly unimplemented) ---*/
374 /*--- ---*/
375 /*----------------------------------------------------------------*/
377 /* A quite-broad set of annotations, as used in the ThreadSanitizer
378 project. This implementation aims to be a (source-level)
379 compatible implementation of the macros defined in:
381 http://code.google.com/p/data-race-test/source
382 /browse/trunk/dynamic_annotations/dynamic_annotations.h
384 (some of the comments below are taken from the above file)
386 The implementation here is very incomplete, and intended as a
387 starting point. Many of the macros are unimplemented. Rather than
388 allowing unimplemented macros to silently do nothing, they cause an
389 assertion. Intention is to implement them on demand.
391 The major use of these macros is to make visible to race detectors,
392 the behaviour (effects) of user-implemented synchronisation
393 primitives, that the detectors could not otherwise deduce from the
394 normal observation of pthread etc calls.
396 Some of the macros are no-ops in Helgrind. That's because Helgrind
397 is a pure happens-before detector, whereas ThreadSanitizer uses a
398 hybrid lockset and happens-before scheme, which requires more
399 accurate annotations for correct operation.
401 The macros are listed in the same order as in dynamic_annotations.h
402 (URL just above).
404 I should point out that I am less than clear about the intended
405 semantics of quite a number of them. Comments and clarifications
406 welcomed!
409 /* ----------------------------------------------------------------
410 These four allow description of user-level condition variables,
411 apparently in the style of POSIX's pthread_cond_t. Currently
412 unimplemented and will assert.
413 ----------------------------------------------------------------
415 /* Report that wait on the condition variable at address CV has
416 succeeded and the lock at address LOCK is now held. CV and LOCK
417 are completely arbitrary memory addresses which presumably mean
418 something to the application, but are meaningless to Helgrind. */
419 #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
420 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
422 /* Report that wait on the condition variable at CV has succeeded.
423 Variant w/o lock. */
424 #define ANNOTATE_CONDVAR_WAIT(cv) \
425 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
427 /* Report that we are about to signal on the condition variable at
428 address CV. */
429 #define ANNOTATE_CONDVAR_SIGNAL(cv) \
430 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
432 /* Report that we are about to signal_all on the condition variable at
433 CV. */
434 #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
435 _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
438 /* ----------------------------------------------------------------
439 Create completely arbitrary happens-before edges between threads.
441 If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
442 (w.r.t. some notional global clock for the computation) thread Tm
443 does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
444 memory accesses done by T1 .. Tn before the ..BEFORE.. call as
445 happening-before all memory accesses done by Tm after the
446 ..AFTER.. call. Hence Helgrind won't complain about races if Tm's
447 accesses afterwards are to the same locations as accesses before by
448 any of T1 .. Tn.
450 OBJ is a machine word (unsigned long, or void*), is completely
451 arbitrary, and denotes the identity of some synchronisation object
452 you're modelling.
454 You must do the _BEFORE call just before the real sync event on the
455 signaller's side, and _AFTER just after the real sync event on the
456 waiter's side.
458 If none of the rest of these macros make sense to you, at least
459 take the time to understand these two. They form the very essence
460 of describing arbitrary inter-thread synchronisation events to
461 Helgrind. You can get a long way just with them alone.
463 See also, extensive discussion on semantics of this in
464 https://bugs.kde.org/show_bug.cgi?id=243935
466 ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
467 as bug 243935 is fully resolved. It instructs Helgrind to forget
468 about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
469 effect putting it back in its original state. Once in that state,
470 a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
471 thread.
473 An implementation may optionally release resources it has
474 associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
475 happens. Users are recommended to use
476 ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
477 synchronisation object is no longer needed, so as to avoid
478 potential indefinite resource leaks.
479 ----------------------------------------------------------------
481 #define ANNOTATE_HAPPENS_BEFORE(obj) \
482 DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
484 #define ANNOTATE_HAPPENS_AFTER(obj) \
485 DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
487 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
488 DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
490 /* ----------------------------------------------------------------
491 Memory publishing. The TSan sources say:
493 Report that the bytes in the range [pointer, pointer+size) are about
494 to be published safely. The race checker will create a happens-before
495 arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
496 subsequent accesses to this memory.
498 I'm not sure I understand what this means exactly, nor whether it
499 is relevant for a pure h-b detector. Leaving unimplemented for
500 now.
501 ----------------------------------------------------------------
503 #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
504 _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
506 /* DEPRECATED. Don't use it. */
507 /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
509 /* DEPRECATED. Don't use it. */
510 /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
513 /* ----------------------------------------------------------------
514 TSan sources say:
516 Instruct the tool to create a happens-before arc between
517 MU->Unlock() and MU->Lock(). This annotation may slow down the
518 race detector; normally it is used only when it would be
519 difficult to annotate each of the mutex's critical sections
520 individually using the annotations above.
522 If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
523 In any case, leave as unimp for now. I'm unsure about the intended
524 behaviour.
525 ----------------------------------------------------------------
527 #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
528 _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
530 /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
531 /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
534 /* ----------------------------------------------------------------
535 TSan sources say:
537 Annotations useful when defining memory allocators, or when
538 memory that was protected in one way starts to be protected in
539 another.
541 Report that a new memory at "address" of size "size" has been
542 allocated. This might be used when the memory has been retrieved
543 from a free list and is about to be reused, or when a the locking
544 discipline for a variable changes.
546 AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
547 ----------------------------------------------------------------
549 #define ANNOTATE_NEW_MEMORY(address, size) \
550 VALGRIND_HG_CLEAN_MEMORY((address), (size))
553 /* ----------------------------------------------------------------
554 TSan sources say:
556 Annotations useful when defining FIFO queues that transfer data
557 between threads.
559 All unimplemented. Am not claiming to understand this (yet).
560 ----------------------------------------------------------------
563 /* Report that the producer-consumer queue object at address PCQ has
564 been created. The ANNOTATE_PCQ_* annotations should be used only
565 for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
566 (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
567 #define ANNOTATE_PCQ_CREATE(pcq) \
568 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
570 /* Report that the queue at address PCQ is about to be destroyed. */
571 #define ANNOTATE_PCQ_DESTROY(pcq) \
572 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
574 /* Report that we are about to put an element into a FIFO queue at
575 address PCQ. */
576 #define ANNOTATE_PCQ_PUT(pcq) \
577 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
579 /* Report that we've just got an element from a FIFO queue at address
580 PCQ. */
581 #define ANNOTATE_PCQ_GET(pcq) \
582 _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
585 /* ----------------------------------------------------------------
586 Annotations that suppress errors. It is usually better to express
587 the program's synchronization using the other annotations, but
588 these can be used when all else fails.
590 Currently these are all unimplemented. I can't think of a simple
591 way to implement them without at least some performance overhead.
592 ----------------------------------------------------------------
595 /* Report that we may have a benign race at "pointer", with size
596 "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the
597 point where "pointer" has been allocated, preferably close to the point
598 where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
600 XXX: what's this actually supposed to do? And what's the type of
601 DESCRIPTION? When does the annotation stop having an effect?
603 #define ANNOTATE_BENIGN_RACE(pointer, description) \
604 _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
606 /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
607 the memory range [address, address+size). */
608 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
609 VALGRIND_HG_DISABLE_CHECKING(address, size)
611 /* Request the analysis tool to ignore all reads in the current thread
612 until ANNOTATE_IGNORE_READS_END is called. Useful to ignore
613 intentional racey reads, while still checking other reads and all
614 writes. */
615 #define ANNOTATE_IGNORE_READS_BEGIN() \
616 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
618 /* Stop ignoring reads. */
619 #define ANNOTATE_IGNORE_READS_END() \
620 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
622 /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
623 #define ANNOTATE_IGNORE_WRITES_BEGIN() \
624 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
626 /* Stop ignoring writes. */
627 #define ANNOTATE_IGNORE_WRITES_END() \
628 _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
630 /* Start ignoring all memory accesses (reads and writes). */
631 #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
632 do { \
633 ANNOTATE_IGNORE_READS_BEGIN(); \
634 ANNOTATE_IGNORE_WRITES_BEGIN(); \
635 } while (0)
637 /* Stop ignoring all memory accesses. */
638 #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
639 do { \
640 ANNOTATE_IGNORE_WRITES_END(); \
641 ANNOTATE_IGNORE_READS_END(); \
642 } while (0)
645 /* ----------------------------------------------------------------
646 Annotations useful for debugging.
648 Again, so for unimplemented, partly for performance reasons.
649 ----------------------------------------------------------------
652 /* Request to trace every access to ADDRESS. */
653 #define ANNOTATE_TRACE_MEMORY(address) \
654 _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
656 /* Report the current thread name to a race detector. */
657 #define ANNOTATE_THREAD_NAME(name) \
658 _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
661 /* ----------------------------------------------------------------
662 Annotations for describing behaviour of user-implemented lock
663 primitives. In all cases, the LOCK argument is a completely
664 arbitrary machine word (unsigned long, or void*) and can be any
665 value which gives a unique identity to the lock objects being
666 modelled.
668 We just pretend they're ordinary posix rwlocks. That'll probably
669 give some rather confusing wording in error messages, claiming that
670 the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
671 they are not. Ah well.
672 ----------------------------------------------------------------
674 /* Report that a lock has just been created at address LOCK. */
675 #define ANNOTATE_RWLOCK_CREATE(lock) \
676 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \
677 void*,(lock))
679 /* Report that the lock at address LOCK is about to be destroyed. */
680 #define ANNOTATE_RWLOCK_DESTROY(lock) \
681 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \
682 void*,(lock))
684 /* Report that the lock at address LOCK has just been acquired.
685 is_w=1 for writer lock, is_w=0 for reader lock. */
686 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
687 DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, \
688 void*,(lock), unsigned long,(is_w))
690 /* Report that the lock at address LOCK is about to be released. */
691 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
692 DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, \
693 void*,(lock)) /* is_w is ignored */
696 /* -------------------------------------------------------------
697 Annotations useful when implementing barriers. They are not
698 normally needed by modules that merely use barriers.
699 The "barrier" argument is a pointer to the barrier object.
700 ----------------------------------------------------------------
703 /* Report that the "barrier" has been initialized with initial
704 "count". If 'reinitialization_allowed' is true, initialization is
705 allowed to happen multiple times w/o calling barrier_destroy() */
706 #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
707 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
709 /* Report that we are about to enter barrier_wait("barrier"). */
710 #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
711 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
713 /* Report that we just exited barrier_wait("barrier"). */
714 #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
715 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
717 /* Report that the "barrier" has been destroyed. */
718 #define ANNOTATE_BARRIER_DESTROY(barrier) \
719 _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
722 /* ----------------------------------------------------------------
723 Annotations useful for testing race detectors.
724 ----------------------------------------------------------------
727 /* Report that we expect a race on the variable at ADDRESS. Use only
728 in unit tests for a race detector. */
729 #define ANNOTATE_EXPECT_RACE(address, description) \
730 _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
732 /* A no-op. Insert where you like to test the interceptors. */
733 #define ANNOTATE_NO_OP(arg) \
734 _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
736 /* Force the race detector to flush its state. The actual effect depends on
737 * the implementation of the detector. */
738 #define ANNOTATE_FLUSH_STATE() \
739 _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
741 #endif /* __HELGRIND_H */