2 //===----------------------------------------------------------------------===//
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 //===----------------------------------------------------------------------===//
10 #ifndef _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
11 #define _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H
13 #include <__cxx03/__assert>
14 #include <__cxx03/__config>
15 #include <__cxx03/__stop_token/atomic_unique_lock.h>
16 #include <__cxx03/__stop_token/intrusive_list_view.h>
17 #include <__cxx03/__thread/id.h>
18 #include <__cxx03/atomic>
19 #include <__cxx03/cstdint>
21 #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
22 # pragma GCC system_header
25 _LIBCPP_BEGIN_NAMESPACE_STD
27 #if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)
29 struct __stop_callback_base
: __intrusive_node_base
<__stop_callback_base
> {
30 using __callback_fn_t
= void(__stop_callback_base
*) noexcept
;
31 _LIBCPP_HIDE_FROM_ABI
explicit __stop_callback_base(__callback_fn_t
* __callback_fn
) : __callback_fn_(__callback_fn
) {}
33 _LIBCPP_HIDE_FROM_ABI
void __invoke() noexcept
{ __callback_fn_(this); }
35 __callback_fn_t
* __callback_fn_
;
36 atomic
<bool> __completed_
= false;
37 bool* __destroyed_
= nullptr;
41 static constexpr uint32_t __stop_requested_bit
= 1;
42 static constexpr uint32_t __callback_list_locked_bit
= 1 << 1;
43 static constexpr uint32_t __stop_source_counter_shift
= 2;
45 // The "stop_source counter" is not used for lifetime reference counting.
46 // When the number of stop_source reaches 0, the remaining stop_tokens's
47 // stop_possible will return false. We need this counter to track this.
49 // The "callback list locked" bit implements the atomic_unique_lock to
50 // guard the operations on the callback list
53 // stop_source counter | callback list locked | stop_requested |
54 atomic
<uint32_t> __state_
= 0;
56 // Reference count for stop_token + stop_callback + stop_source
57 // When the counter reaches zero, the state is destroyed
58 // It is used by __intrusive_shared_ptr, but it is stored here for better layout
59 atomic
<uint32_t> __ref_count_
= 0;
61 using __state_t
= uint32_t;
62 using __callback_list_lock
= __atomic_unique_lock
<__state_t
, __callback_list_locked_bit
>;
63 using __callback_list
= __intrusive_list_view
<__stop_callback_base
>;
65 __callback_list __callback_list_
;
66 __thread_id __requesting_thread_
;
69 _LIBCPP_HIDE_FROM_ABI
__stop_state() noexcept
= default;
71 _LIBCPP_HIDE_FROM_ABI
void __increment_stop_source_counter() noexcept
{
72 _LIBCPP_ASSERT_UNCATEGORIZED(
73 __state_
.load(std::memory_order_relaxed
) <= static_cast<__state_t
>(~(1 << __stop_source_counter_shift
)),
74 "stop_source's counter reaches the maximum. Incrementing the counter will overflow");
75 __state_
.fetch_add(1 << __stop_source_counter_shift
, std::memory_order_relaxed
);
78 // We are not destroying the object after counter decrements to zero, nor do we have
79 // operations depend on the ordering of decrementing the counter. relaxed is enough.
80 _LIBCPP_HIDE_FROM_ABI
void __decrement_stop_source_counter() noexcept
{
81 _LIBCPP_ASSERT_UNCATEGORIZED(
82 __state_
.load(std::memory_order_relaxed
) >= static_cast<__state_t
>(1 << __stop_source_counter_shift
),
83 "stop_source's counter is 0. Decrementing the counter will underflow");
84 __state_
.fetch_sub(1 << __stop_source_counter_shift
, std::memory_order_relaxed
);
87 _LIBCPP_HIDE_FROM_ABI
bool __stop_requested() const noexcept
{
88 // acquire because [thread.stoptoken.intro] A call to request_stop that returns true
89 // synchronizes with a call to stop_requested on an associated stop_token or stop_source
90 // object that returns true.
91 // request_stop's compare_exchange_weak has release which syncs with this acquire
92 return (__state_
.load(std::memory_order_acquire
) & __stop_requested_bit
) != 0;
95 _LIBCPP_HIDE_FROM_ABI
bool __stop_possible_for_stop_token() const noexcept
{
96 // [stoptoken.mem] false if "a stop request was not made and there are no associated stop_source objects"
97 // Todo: Can this be std::memory_order_relaxed as the standard does not say anything except not to introduce data
99 __state_t __curent_state
= __state_
.load(std::memory_order_acquire
);
100 return ((__curent_state
& __stop_requested_bit
) != 0) || ((__curent_state
>> __stop_source_counter_shift
) != 0);
103 _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI
bool __request_stop() noexcept
{
104 auto __cb_list_lock
= __try_lock_for_request_stop();
105 if (!__cb_list_lock
.__owns_lock()) {
108 __requesting_thread_
= this_thread::get_id();
110 while (!__callback_list_
.__empty()) {
111 auto __cb
= __callback_list_
.__pop_front();
113 // allow other callbacks to be removed while invoking the current callback
114 __cb_list_lock
.__unlock();
116 bool __destroyed
= false;
117 __cb
->__destroyed_
= &__destroyed
;
121 // __cb's invoke function could potentially delete itself. We need to check before accessing __cb's member
123 // needs to set __destroyed_ pointer to nullptr, otherwise it points to a local variable
124 // which is to be destroyed at the end of the loop
125 __cb
->__destroyed_
= nullptr;
127 // [stopcallback.cons] If callback is concurrently executing on another thread, then the return
128 // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.
129 // this release syncs with the acquire in the remove_callback
130 __cb
->__completed_
.store(true, std::memory_order_release
);
131 __cb
->__completed_
.notify_all();
134 __cb_list_lock
.__lock();
140 _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI
bool __add_callback(__stop_callback_base
* __cb
) noexcept
{
141 // If it is already stop_requested. Do not try to request it again.
142 const auto __give_up_trying_to_lock_condition
= [__cb
](__state_t __state
) {
143 if ((__state
& __stop_requested_bit
) != 0) {
144 // already stop requested, synchronously run the callback and no need to lock the list again
148 // no stop source. no need to lock the list to add the callback as it can never be invoked
149 return (__state
>> __stop_source_counter_shift
) == 0;
152 __callback_list_lock
__cb_list_lock(__state_
, __give_up_trying_to_lock_condition
);
154 if (!__cb_list_lock
.__owns_lock()) {
158 __callback_list_
.__push_front(__cb
);
161 // unlock here: [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of
163 // Note: this release sync with the acquire in the request_stop' __try_lock_for_request_stop
166 // called by the destructor of stop_callback
167 _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI
void __remove_callback(__stop_callback_base
* __cb
) noexcept
{
168 __callback_list_lock
__cb_list_lock(__state_
);
170 // under below condition, the request_stop call just popped __cb from the list and could execute it now
171 bool __potentially_executing_now
= __cb
->__prev_
== nullptr && !__callback_list_
.__is_head(__cb
);
173 if (__potentially_executing_now
) {
174 auto __requested_thread
= __requesting_thread_
;
175 __cb_list_lock
.__unlock();
177 if (std::this_thread::get_id() != __requested_thread
) {
178 // [stopcallback.cons] If callback is concurrently executing on another thread, then the return
179 // from the invocation of callback strongly happens before ([intro.races]) callback is destroyed.
180 __cb
->__completed_
.wait(false, std::memory_order_acquire
);
182 // The destructor of stop_callback runs on the same thread of the thread that invokes the callback.
183 // The callback is potentially invoking its own destuctor. Set the flag to avoid accessing destroyed
184 // members on the invoking side
185 if (__cb
->__destroyed_
) {
186 *__cb
->__destroyed_
= true;
190 __callback_list_
.__remove(__cb
);
195 _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI __callback_list_lock
__try_lock_for_request_stop() noexcept
{
196 // If it is already stop_requested, do not try to request stop or lock the list again.
197 const auto __lock_fail_condition
= [](__state_t __state
) { return (__state
& __stop_requested_bit
) != 0; };
199 // set locked and requested bit at the same time
200 const auto __after_lock_state
= [](__state_t __state
) {
201 return __state
| __callback_list_locked_bit
| __stop_requested_bit
;
204 // acq because [thread.stoptoken.intro] Registration of a callback synchronizes with the invocation of that
205 // callback. We are going to invoke the callback after getting the lock, acquire so that we can see the
206 // registration of a callback (and other writes that happens-before the add_callback)
207 // Note: the rel (unlock) in the add_callback syncs with this acq
208 // rel because [thread.stoptoken.intro] A call to request_stop that returns true synchronizes with a call
209 // to stop_requested on an associated stop_token or stop_source object that returns true.
210 // We need to make sure that all writes (including user code) before request_stop will be made visible
211 // to the threads that waiting for `stop_requested == true`
212 // Note: this rel syncs with the acq in `stop_requested`
213 const auto __locked_ordering
= std::memory_order_acq_rel
;
215 return __callback_list_lock(__state_
, __lock_fail_condition
, __after_lock_state
, __locked_ordering
);
219 friend struct __intrusive_shared_ptr_traits
;
223 struct __intrusive_shared_ptr_traits
;
226 struct __intrusive_shared_ptr_traits
<__stop_state
> {
227 _LIBCPP_HIDE_FROM_ABI
static atomic
<uint32_t>& __get_atomic_ref_count(__stop_state
& __state
) {
228 return __state
.__ref_count_
;
232 #endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_THREADS)
234 _LIBCPP_END_NAMESPACE_STD
236 #endif // _LIBCPP___CXX03___STOP_TOKEN_STOP_STATE_H