2 * Test program that illustrates how to annotate a smart pointer
3 * implementation. In a multithreaded program the following is relevant when
4 * working with smart pointers:
5 * - whether or not the objects pointed at are shared over threads.
6 * - whether or not the methods of the objects pointed at are thread-safe.
7 * - whether or not the smart pointer objects are shared over threads.
8 * - whether or not the smart pointer object itself is thread-safe.
10 * Most smart pointer implementations are not thread-safe
11 * (e.g. boost::shared_ptr<>, tr1::shared_ptr<> and the smart_ptr<>
12 * implementation below). This means that it is not safe to modify a shared
13 * pointer object that is shared over threads without proper synchronization.
15 * Even for non-thread-safe smart pointers it is possible to have different
16 * threads access the same object via smart pointers without triggering data
17 * races on the smart pointer objects.
19 * A smart pointer implementation guarantees that the destructor of the object
20 * pointed at is invoked after the last smart pointer that points to that
21 * object has been destroyed or reset. Data race detection tools cannot detect
22 * this ordering without explicit annotation for smart pointers that track
23 * references without invoking synchronization operations recognized by data
24 * race detection tools.
28 #include <cassert> // assert()
29 #include <climits> // PTHREAD_STACK_MIN
30 #include <iostream> // std::cerr
31 #include <stdlib.h> // atoi()
34 #include <process.h> // _beginthreadex()
35 #include <windows.h> // CRITICAL_SECTION
37 #include <pthread.h> // pthread_mutex_t
39 #include "unified_annotations.h"
43 static bool s_enable_annotations
;
51 AtomicInt32(const int value
= 0) : m_value(value
) { }
53 LONG
operator++() { return InterlockedIncrement(&m_value
); }
54 LONG
operator--() { return InterlockedDecrement(&m_value
); }
57 volatile LONG m_value
;
64 { InitializeCriticalSection(&m_mutex
); }
66 { DeleteCriticalSection(&m_mutex
); }
68 { EnterCriticalSection(&m_mutex
); }
70 { LeaveCriticalSection(&m_mutex
); }
73 CRITICAL_SECTION m_mutex
;
79 Thread() : m_thread(INVALID_HANDLE_VALUE
) { }
81 void Create(void* (*pf
)(void*), void* arg
)
83 WrapperArgs
* wrapper_arg_p
= new WrapperArgs(pf
, arg
);
84 m_thread
= reinterpret_cast<HANDLE
>(_beginthreadex(NULL
, 0, wrapper
,
85 wrapper_arg_p
, 0, NULL
));
88 { WaitForSingleObject(m_thread
, INFINITE
); }
93 WrapperArgs(void* (*pf
)(void*), void* arg
) : m_pf(pf
), m_arg(arg
) { }
98 static unsigned int __stdcall
wrapper(void* arg
)
100 WrapperArgs
* wrapper_arg_p
= reinterpret_cast<WrapperArgs
*>(arg
);
101 WrapperArgs wa
= *wrapper_arg_p
;
102 delete wrapper_arg_p
;
103 return reinterpret_cast<unsigned>((wa
.m_pf
)(wa
.m_arg
));
113 AtomicInt32(const int value
= 0) : m_value(value
) { }
115 int operator++() { return __sync_add_and_fetch(&m_value
, 1); }
116 int operator--() { return __sync_sub_and_fetch(&m_value
, 1); }
118 volatile int m_value
;
125 { pthread_mutex_init(&m_mutex
, NULL
); }
127 { pthread_mutex_destroy(&m_mutex
); }
129 { pthread_mutex_lock(&m_mutex
); }
131 { pthread_mutex_unlock(&m_mutex
); }
134 pthread_mutex_t m_mutex
;
140 Thread() : m_tid() { }
142 void Create(void* (*pf
)(void*), void* arg
)
145 pthread_attr_init(&attr
);
146 #if !defined(VGO_freebsd)
147 pthread_attr_setstacksize(&attr
, PTHREAD_STACK_MIN
+ 4096);
149 pthread_create(&m_tid
, &attr
, pf
, arg
);
150 pthread_attr_destroy(&attr
);
153 { pthread_join(m_tid
, NULL
); }
158 #endif // !defined(_WIN32)
165 typedef AtomicInt32 counter_t
;
167 template <typename Q
> friend class smart_ptr
;
170 : m_ptr(NULL
), m_count_ptr(NULL
)
173 explicit smart_ptr(T
* const pT
)
174 : m_ptr(NULL
), m_count_ptr(NULL
)
176 set(pT
, pT
? new counter_t(0) : NULL
);
179 template <typename Q
>
180 explicit smart_ptr(Q
* const q
)
181 : m_ptr(NULL
), m_count_ptr(NULL
)
183 set(q
, q
? new counter_t(0) : NULL
);
191 smart_ptr(const smart_ptr
<T
>& sp
)
192 : m_ptr(NULL
), m_count_ptr(NULL
)
194 set(sp
.m_ptr
, sp
.m_count_ptr
);
197 template <typename Q
>
198 smart_ptr(const smart_ptr
<Q
>& sp
)
199 : m_ptr(NULL
), m_count_ptr(NULL
)
201 set(sp
.m_ptr
, sp
.m_count_ptr
);
204 smart_ptr
& operator=(const smart_ptr
<T
>& sp
)
206 set(sp
.m_ptr
, sp
.m_count_ptr
);
210 smart_ptr
& operator=(T
* const p
)
212 set(p
, p
? new counter_t(0) : NULL
);
216 template <typename Q
>
217 smart_ptr
& operator=(Q
* const q
)
219 set(q
, q
? new counter_t(0) : NULL
);
223 T
* operator->() const
236 void set(T
* const pT
, counter_t
* const count_ptr
)
242 if (s_enable_annotations
)
243 U_ANNOTATE_HAPPENS_BEFORE(m_count_ptr
);
244 if (--(*m_count_ptr
) == 0)
246 if (s_enable_annotations
)
247 U_ANNOTATE_HAPPENS_AFTER(m_count_ptr
);
255 m_count_ptr
= count_ptr
;
262 counter_t
* m_count_ptr
;
269 : m_mutex(), m_count()
273 // Data race detection tools that do not recognize the
274 // ANNOTATE_HAPPENS_BEFORE() / ANNOTATE_HAPPENS_AFTER() annotations in the
275 // smart_ptr<> implementation will report that the assignment below
276 // triggers a data race.
297 mutable Mutex m_mutex
;
301 static void* thread_func(void* arg
)
303 smart_ptr
<counter
>* pp
= reinterpret_cast<smart_ptr
<counter
>*>(arg
);
304 (*pp
)->post_increment();
310 int main(int argc
, char** argv
)
312 const int nthreads
= std::max(argc
> 1 ? atoi(argv
[1]) : 1, 1);
313 const int iterations
= std::max(argc
> 2 ? atoi(argv
[2]) : 1, 1);
314 s_enable_annotations
= argc
> 3 ? !!atoi(argv
[3]) : true;
316 for (int j
= 0; j
< iterations
; ++j
)
318 std::vector
<Thread
> T(nthreads
);
319 smart_ptr
<counter
> p(new counter
);
321 for (std::vector
<Thread
>::iterator q
= T
.begin(); q
!= T
.end(); q
++)
322 q
->Create(thread_func
, new smart_ptr
<counter
>(p
));
324 // Avoid that counter.m_mutex introduces a false ordering on the
325 // counter.m_count accesses.
326 const timespec delay
= { 0, 100 * 1000 * 1000 };
327 nanosleep(&delay
, 0);
330 for (std::vector
<Thread
>::iterator q
= T
.begin(); q
!= T
.end(); q
++)
333 std::cerr
<< "Done.\n";