1 // lock-free queue queue from
2 // Michael, M. M. and Scott, M. L.,
3 // "simple, fast and practical non-blocking and blocking concurrent queue algorithms"
5 // Copyright (C) 2008, 2009, 2010, 2011 Tim Blechmann
7 // Distributed under the Boost Software License, Version 1.0. (See
8 // accompanying file LICENSE_1_0.txt or copy at
9 // http://www.boost.org/LICENSE_1_0.txt)
11 #ifndef BOOST_LOCKFREE_FIFO_HPP_INCLUDED
12 #define BOOST_LOCKFREE_FIFO_HPP_INCLUDED
14 #include <memory> /* std::auto_ptr */
16 #include <boost/noncopyable.hpp>
17 #include <boost/static_assert.hpp>
18 #include <boost/type_traits/has_trivial_assign.hpp>
19 #include <boost/type_traits/has_trivial_destructor.hpp>
21 #include <boost/lockfree/detail/atomic.hpp>
22 #include <boost/lockfree/detail/copy_payload.hpp>
23 #include <boost/lockfree/detail/freelist.hpp>
24 #include <boost/lockfree/detail/parameter.hpp>
25 #include <boost/lockfree/detail/tagged_ptr.hpp>
31 typedef parameter::parameters
<boost::parameter::optional
<tag::allocator
>,
32 boost::parameter::optional
<tag::capacity
>
35 } /* namespace detail */
38 /** The queue class provides a multi-writer/multi-reader queue, pushing and popping is lock-free,
39 * construction/destruction has to be synchronized. It uses a freelist for memory management,
40 * freed nodes are pushed to the freelist and not returned to the OS before the queue is destroyed.
43 * - \ref boost::lockfree::fixed_sized, defaults to \c boost::lockfree::fixed_sized<false> \n
44 * Can be used to completely disable dynamic memory allocations during push in order to ensure lockfree behavior. \n
45 * If the data structure is configured as fixed-sized, the internal nodes are stored inside an array and they are addressed
46 * by array indexing. This limits the possible size of the queue to the number of elements that can be addressed by the index
47 * type (usually 2**16-2), but on platforms that lack double-width compare-and-exchange instructions, this is the best way
48 * to achieve lock-freedom.
50 * - \ref boost::lockfree::capacity, optional \n
51 * If this template argument is passed to the options, the size of the queue is set at compile-time.\n
52 * It this option implies \c fixed_sized<true>
54 * - \ref boost::lockfree::allocator, defaults to \c boost::lockfree::allocator<std::allocator<void>> \n
55 * Specifies the allocator that is used for the internal freelist
58 * - T must have a copy constructor
59 * - T must have a trivial assignment operator
60 * - T must have a trivial destructor
63 #ifndef BOOST_DOXYGEN_INVOKED
65 class A0
= boost::parameter::void_
,
66 class A1
= boost::parameter::void_
,
67 class A2
= boost::parameter::void_
>
69 template <typename T
, ...Options
>
75 #ifndef BOOST_DOXYGEN_INVOKED
76 typedef typename
detail::queue_signature::bind
<A0
, A1
, A2
>::type bound_args
;
78 static const bool has_capacity
= detail::extract_capacity
<bound_args
>::has_capacity
;
79 static const size_t capacity
= detail::extract_capacity
<bound_args
>::capacity
;
80 static const bool fixed_sized
= detail::extract_fixed_sized
<bound_args
>::value
;
81 static const bool node_based
= !(has_capacity
|| fixed_sized
);
82 static const bool compile_time_sized
= has_capacity
;
84 struct BOOST_LOCKFREE_CACHELINE_ALIGNMENT node
86 typedef typename
detail::select_tagged_handle
<node
, node_based
>::tagged_handle_type tagged_node_handle
;
87 typedef typename
detail::select_tagged_handle
<node
, node_based
>::handle_type handle_type
;
89 node(T
const & v
, handle_type null_handle
):
90 data(v
)//, next(tagged_node_handle(0, 0))
92 /* increment tag to avoid ABA problem */
93 tagged_node_handle old_next
= next
.load(memory_order_relaxed
);
94 tagged_node_handle
new_next (null_handle
, old_next
.get_tag()+1);
95 next
.store(new_next
, memory_order_release
);
98 node (handle_type null_handle
):
99 next(tagged_node_handle(null_handle
, 0))
105 atomic
<tagged_node_handle
> next
;
109 typedef typename
detail::extract_allocator
<bound_args
, node
>::type node_allocator
;
110 typedef typename
detail::select_freelist
<node
, node_allocator
, compile_time_sized
, fixed_sized
, capacity
>::type pool_t
;
111 typedef typename
pool_t::tagged_node_handle tagged_node_handle
;
112 typedef typename
detail::select_tagged_handle
<node
, node_based
>::handle_type handle_type
;
114 void initialize(void)
116 node
* n
= pool
.template construct
<true, false>(pool
.null_handle());
117 tagged_node_handle
dummy_node(pool
.get_handle(n
), 0);
118 head_
.store(dummy_node
, memory_order_relaxed
);
119 tail_
.store(dummy_node
, memory_order_release
);
122 struct implementation_defined
124 typedef node_allocator allocator
;
125 typedef std::size_t size_type
;
131 typedef T value_type
;
132 typedef typename
implementation_defined::allocator allocator
;
133 typedef typename
implementation_defined::size_type size_type
;
136 * \return true, if implementation is lock-free.
138 * \warning It only checks, if the queue head and tail nodes and the freelist can be modified in a lock-free manner.
139 * On most platforms, the whole implementation is lock-free, if this is true. Using c++0x-style atomics, there is
140 * no possibility to provide a completely accurate implementation, because one would need to test every internal
141 * node, which is impossible if further nodes will be allocated from the operating system.
143 bool is_lock_free (void) const
145 return head_
.is_lock_free() && tail_
.is_lock_free() && pool
.is_lock_free();
151 head_(tagged_node_handle(0, 0)),
152 tail_(tagged_node_handle(0, 0)),
153 pool(node_allocator(), has_capacity
? capacity
: 0)
155 BOOST_STATIC_ASSERT(has_capacity
);
159 template <typename U
>
160 explicit queue(typename
node_allocator::template rebind
<U
>::other
const & alloc
):
161 head_(tagged_node_handle(0, 0)),
162 tail_(tagged_node_handle(0, 0)),
163 pool(alloc
, has_capacity
? capacity
: 0)
165 BOOST_STATIC_ASSERT(has_capacity
);
169 explicit queue(allocator
const & alloc
):
170 head_(tagged_node_handle(0, 0)),
171 tail_(tagged_node_handle(0, 0)),
172 pool(alloc
, has_capacity
? capacity
: 0)
174 BOOST_STATIC_ASSERT(has_capacity
);
179 //! Construct queue, allocate n nodes for the freelist.
181 explicit queue(size_type n
):
182 head_(tagged_node_handle(0, 0)),
183 tail_(tagged_node_handle(0, 0)),
184 pool(node_allocator(), n
+ 1)
186 BOOST_STATIC_ASSERT(!has_capacity
);
190 template <typename U
>
191 queue(size_type n
, typename
node_allocator::template rebind
<U
>::other
const & alloc
):
192 head_(tagged_node_handle(0, 0)),
193 tail_(tagged_node_handle(0, 0)),
196 BOOST_STATIC_ASSERT(!has_capacity
);
201 /** \copydoc boost::lockfree::stack::reserve
203 void reserve(size_type n
)
205 pool
.template reserve
<true>(n
);
208 /** \copydoc boost::lockfree::stack::reserve_unsafe
210 void reserve_unsafe(size_type n
)
212 pool
.template reserve
<false>(n
);
215 /** Destroys queue, free all nodes from freelist.
220 while(unsynchronized_pop(dummy
))
223 pool
.template destruct
<false>(head_
.load(memory_order_relaxed
));
226 /** Check if the queue is empty
228 * \return true, if the queue is empty, false otherwise
229 * \note The result is only accurate, if no other thread modifies the queue. Therefore it is rarely practical to use this
230 * value in program logic.
234 return pool
.get_handle(head_
.load()) == pool
.get_handle(tail_
.load());
237 /** Pushes object t to the queue.
239 * \post object will be pushed to the queue, if internal node can be allocated
240 * \returns true, if the push operation is successful.
242 * \note Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
243 * from the OS. This may not be lock-free.
245 bool push(T
const & t
)
247 return do_push
<false>(t
);
250 /** Pushes object t to the queue.
252 * \post object will be pushed to the queue, if internal node can be allocated
253 * \returns true, if the push operation is successful.
255 * \note Thread-safe and non-blocking. If internal memory pool is exhausted, operation will fail
256 * \throws if memory allocator throws
258 bool bounded_push(T
const & t
)
260 return do_push
<true>(t
);
265 #ifndef BOOST_DOXYGEN_INVOKED
266 template <bool Bounded
>
267 bool do_push(T
const & t
)
269 using detail::likely
;
271 node
* n
= pool
.template construct
<true, Bounded
>(t
, pool
.null_handle());
272 handle_type node_handle
= pool
.get_handle(n
);
278 tagged_node_handle tail
= tail_
.load(memory_order_acquire
);
279 node
* tail_node
= pool
.get_pointer(tail
);
280 tagged_node_handle next
= tail_node
->next
.load(memory_order_acquire
);
281 node
* next_ptr
= pool
.get_pointer(next
);
283 tagged_node_handle tail2
= tail_
.load(memory_order_acquire
);
284 if (likely(tail
== tail2
)) {
286 tagged_node_handle
new_tail_next(node_handle
, next
.get_tag() + 1);
287 if ( tail_node
->next
.compare_exchange_weak(next
, new_tail_next
) ) {
288 tagged_node_handle
new_tail(node_handle
, tail
.get_tag() + 1);
289 tail_
.compare_exchange_strong(tail
, new_tail
);
294 tagged_node_handle
new_tail(pool
.get_handle(next_ptr
), tail
.get_tag() + 1);
295 tail_
.compare_exchange_strong(tail
, new_tail
);
304 /** Pushes object t to the queue.
306 * \post object will be pushed to the queue, if internal node can be allocated
307 * \returns true, if the push operation is successful.
309 * \note Not Thread-safe. If internal memory pool is exhausted and the memory pool is not fixed-sized, a new node will be allocated
310 * from the OS. This may not be lock-free.
311 * \throws if memory allocator throws
313 bool unsynchronized_push(T
const & t
)
315 node
* n
= pool
.template construct
<false, false>(t
, pool
.null_handle());
321 tagged_node_handle tail
= tail_
.load(memory_order_relaxed
);
322 tagged_node_handle next
= tail
->next
.load(memory_order_relaxed
);
323 node
* next_ptr
= next
.get_ptr();
326 tail
->next
.store(tagged_node_handle(n
, next
.get_tag() + 1), memory_order_relaxed
);
327 tail_
.store(tagged_node_handle(n
, tail
.get_tag() + 1), memory_order_relaxed
);
331 tail_
.store(tagged_node_handle(next_ptr
, tail
.get_tag() + 1), memory_order_relaxed
);
335 /** Pops object from queue.
337 * \post if pop operation is successful, object will be copied to ret.
338 * \returns true, if the pop operation is successful, false if queue was empty.
340 * \note Thread-safe and non-blocking
347 /** Pops object from queue.
349 * \pre type U must be constructible by T and copyable, or T must be convertible to U
350 * \post if pop operation is successful, object will be copied to ret.
351 * \returns true, if the pop operation is successful, false if queue was empty.
353 * \note Thread-safe and non-blocking
355 template <typename U
>
358 using detail::likely
;
360 tagged_node_handle head
= head_
.load(memory_order_acquire
);
361 node
* head_ptr
= pool
.get_pointer(head
);
363 tagged_node_handle tail
= tail_
.load(memory_order_acquire
);
364 tagged_node_handle next
= head_ptr
->next
.load(memory_order_acquire
);
365 node
* next_ptr
= pool
.get_pointer(next
);
367 tagged_node_handle head2
= head_
.load(memory_order_acquire
);
368 if (likely(head
== head2
)) {
369 if (pool
.get_handle(head
) == pool
.get_handle(tail
)) {
373 tagged_node_handle
new_tail(pool
.get_handle(next
), tail
.get_tag() + 1);
374 tail_
.compare_exchange_strong(tail
, new_tail
);
378 /* this check is not part of the original algorithm as published by michael and scott
380 * however we reuse the tagged_ptr part for the freelist and clear the next part during node
381 * allocation. we can observe a null-pointer here.
384 detail::copy_payload(next_ptr
->data
, ret
);
386 tagged_node_handle
new_head(pool
.get_handle(next
), head
.get_tag() + 1);
387 if (head_
.compare_exchange_weak(head
, new_head
)) {
388 pool
.template destruct
<true>(head
);
396 /** Pops object from queue.
398 * \post if pop operation is successful, object will be copied to ret.
399 * \returns true, if the pop operation is successful, false if queue was empty.
401 * \note Not thread-safe, but non-blocking
404 bool unsynchronized_pop (T
& ret
)
406 return unsynchronized_pop
<T
>(ret
);
409 /** Pops object from queue.
411 * \pre type U must be constructible by T and copyable, or T must be convertible to U
412 * \post if pop operation is successful, object will be copied to ret.
413 * \returns true, if the pop operation is successful, false if queue was empty.
415 * \note Not thread-safe, but non-blocking
418 template <typename U
>
419 bool unsynchronized_pop (U
& ret
)
422 tagged_node_handle head
= head_
.load(memory_order_relaxed
);
423 node
* head_ptr
= pool
.get_pointer(head
);
424 tagged_node_handle tail
= tail_
.load(memory_order_relaxed
);
425 tagged_node_handle next
= head_ptr
->next
.load(memory_order_relaxed
);
426 node
* next_ptr
= pool
.get_pointer(next
);
428 if (pool
.get_handle(head
) == pool
.get_handle(tail
)) {
432 tagged_node_handle
new_tail(pool
.get_handle(next
), tail
.get_tag() + 1);
433 tail_
.store(new_tail
);
436 /* this check is not part of the original algorithm as published by michael and scott
438 * however we reuse the tagged_ptr part for the freelist and clear the next part during node
439 * allocation. we can observe a null-pointer here.
442 detail::copy_payload(next_ptr
->data
, ret
);
443 tagged_node_handle
new_head(pool
.get_handle(next
), head
.get_tag() + 1);
444 head_
.store(new_head
);
445 pool
.template destruct
<false>(head
);
452 #ifndef BOOST_DOXYGEN_INVOKED
453 atomic
<tagged_node_handle
> head_
;
454 static const int padding_size
= BOOST_LOCKFREE_CACHELINE_BYTES
- sizeof(tagged_node_handle
);
455 char padding1
[padding_size
];
456 atomic
<tagged_node_handle
> tail_
;
457 char padding2
[padding_size
];
463 } /* namespace lockfree */
464 } /* namespace boost */
466 #endif /* BOOST_LOCKFREE_FIFO_HPP_INCLUDED */