2 * Test program that illustrates how to annotate a smart pointer
3 * implementation. In a multithreaded program the following is relevant when
4 * working with smart pointers:
5 * - whether or not the objects pointed at are shared over threads.
6 * - whether or not the methods of the objects pointed at are thread-safe.
7 * - whether or not the smart pointer objects are shared over threads.
8 * - whether or not the smart pointer object itself is thread-safe.
10 * Most smart pointer implemenations are not thread-safe
11 * (e.g. boost::shared_ptr<>, tr1::shared_ptr<> and the smart_ptr<>
12 * implementation below). This means that it is not safe to modify a shared
13 * pointer object that is shared over threads without proper synchronization.
15 * Even for non-thread-safe smart pointers it is possible to have different
16 * threads access the same object via smart pointers without triggering data
17 * races on the smart pointer objects.
19 * A smart pointer implementation guarantees that the destructor of the object
20 * pointed at is invoked after the last smart pointer that points to that
21 * object has been destroyed or reset. Data race detection tools cannot detect
22 * this ordering without explicit annotation for smart pointers that track
23 * references without invoking synchronization operations recognized by data
24 * race detection tools.
28 #include <cassert> // assert()
29 #include <climits> // PTHREAD_STACK_MIN
30 #include <iostream> // std::cerr
31 #include <stdlib.h> // atoi()
34 #include <process.h> // _beginthreadex()
35 #include <windows.h> // CRITICAL_SECTION
37 #include <pthread.h> // pthread_mutex_t
39 #include "unified_annotations.h"
42 static bool s_enable_annotations
;
50 AtomicInt32(const int value
= 0) : m_value(value
) { }
52 LONG
operator++() { return InterlockedIncrement(&m_value
); }
53 LONG
operator--() { return InterlockedDecrement(&m_value
); }
56 volatile LONG m_value
;
63 { InitializeCriticalSection(&m_mutex
); }
65 { DeleteCriticalSection(&m_mutex
); }
67 { EnterCriticalSection(&m_mutex
); }
69 { LeaveCriticalSection(&m_mutex
); }
72 CRITICAL_SECTION m_mutex
;
78 Thread() : m_thread(INVALID_HANDLE_VALUE
) { }
80 void Create(void* (*pf
)(void*), void* arg
)
82 WrapperArgs
* wrapper_arg_p
= new WrapperArgs(pf
, arg
);
83 m_thread
= reinterpret_cast<HANDLE
>(_beginthreadex(NULL
, 0, wrapper
,
84 wrapper_arg_p
, 0, NULL
));
87 { WaitForSingleObject(m_thread
, INFINITE
); }
92 WrapperArgs(void* (*pf
)(void*), void* arg
) : m_pf(pf
), m_arg(arg
) { }
97 static unsigned int __stdcall
wrapper(void* arg
)
99 WrapperArgs
* wrapper_arg_p
= reinterpret_cast<WrapperArgs
*>(arg
);
100 WrapperArgs wa
= *wrapper_arg_p
;
101 delete wrapper_arg_p
;
102 return reinterpret_cast<unsigned>((wa
.m_pf
)(wa
.m_arg
));
112 AtomicInt32(const int value
= 0) : m_value(value
) { }
114 int operator++() { return __sync_add_and_fetch(&m_value
, 1); }
115 int operator--() { return __sync_sub_and_fetch(&m_value
, 1); }
117 volatile int m_value
;
124 { pthread_mutex_init(&m_mutex
, NULL
); }
126 { pthread_mutex_destroy(&m_mutex
); }
128 { pthread_mutex_lock(&m_mutex
); }
130 { pthread_mutex_unlock(&m_mutex
); }
133 pthread_mutex_t m_mutex
;
139 Thread() : m_tid() { }
141 void Create(void* (*pf
)(void*), void* arg
)
144 pthread_attr_init(&attr
);
145 pthread_attr_setstacksize(&attr
, PTHREAD_STACK_MIN
+ 4096);
146 pthread_create(&m_tid
, &attr
, pf
, arg
);
147 pthread_attr_destroy(&attr
);
150 { pthread_join(m_tid
, NULL
); }
155 #endif // !defined(_WIN32)
162 typedef AtomicInt32 counter_t
;
164 template <typename Q
> friend class smart_ptr
;
167 : m_ptr(NULL
), m_count_ptr(NULL
)
170 explicit smart_ptr(T
* const pT
)
171 : m_ptr(NULL
), m_count_ptr(NULL
)
173 set(pT
, pT
? new counter_t(0) : NULL
);
176 template <typename Q
>
177 explicit smart_ptr(Q
* const q
)
178 : m_ptr(NULL
), m_count_ptr(NULL
)
180 set(q
, q
? new counter_t(0) : NULL
);
188 smart_ptr(const smart_ptr
<T
>& sp
)
189 : m_ptr(NULL
), m_count_ptr(NULL
)
191 set(sp
.m_ptr
, sp
.m_count_ptr
);
194 template <typename Q
>
195 smart_ptr(const smart_ptr
<Q
>& sp
)
196 : m_ptr(NULL
), m_count_ptr(NULL
)
198 set(sp
.m_ptr
, sp
.m_count_ptr
);
201 smart_ptr
& operator=(const smart_ptr
<T
>& sp
)
203 set(sp
.m_ptr
, sp
.m_count_ptr
);
207 smart_ptr
& operator=(T
* const p
)
209 set(p
, p
? new counter_t(0) : NULL
);
213 template <typename Q
>
214 smart_ptr
& operator=(Q
* const q
)
216 set(q
, q
? new counter_t(0) : NULL
);
220 T
* operator->() const
233 void set(T
* const pT
, counter_t
* const count_ptr
)
239 if (s_enable_annotations
)
240 U_ANNOTATE_HAPPENS_BEFORE(m_count_ptr
);
241 if (--(*m_count_ptr
) == 0)
243 if (s_enable_annotations
)
244 U_ANNOTATE_HAPPENS_AFTER(m_count_ptr
);
252 m_count_ptr
= count_ptr
;
259 counter_t
* m_count_ptr
;
266 : m_mutex(), m_count()
270 // Data race detection tools that do not recognize the
271 // ANNOTATE_HAPPENS_BEFORE() / ANNOTATE_HAPPENS_AFTER() annotations in the
272 // smart_ptr<> implementation will report that the assignment below
273 // triggers a data race.
294 mutable Mutex m_mutex
;
298 static void* thread_func(void* arg
)
300 smart_ptr
<counter
>* pp
= reinterpret_cast<smart_ptr
<counter
>*>(arg
);
301 (*pp
)->post_increment();
307 int main(int argc
, char** argv
)
309 const int nthreads
= std::max(argc
> 1 ? atoi(argv
[1]) : 1, 1);
310 const int iterations
= std::max(argc
> 2 ? atoi(argv
[2]) : 1, 1);
311 s_enable_annotations
= argc
> 3 ? !!atoi(argv
[3]) : true;
313 for (int j
= 0; j
< iterations
; ++j
)
315 std::vector
<Thread
> T(nthreads
);
316 smart_ptr
<counter
> p(new counter
);
318 for (std::vector
<Thread
>::iterator q
= T
.begin(); q
!= T
.end(); q
++)
319 q
->Create(thread_func
, new smart_ptr
<counter
>(p
));
321 // Avoid that counter.m_mutex introduces a false ordering on the
322 // counter.m_count accesses.
323 const timespec delay
= { 0, 100 * 1000 * 1000 };
324 nanosleep(&delay
, 0);
327 for (std::vector
<Thread
>::iterator q
= T
.begin(); q
!= T
.end(); q
++)
330 std::cerr
<< "Done.\n";