1 //===------------------------ memory.cpp ----------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is dual licensed under the MIT and the University of Illinois Open
6 // Source Licenses. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define _LIBCPP_BUILDING_MEMORY
12 #ifndef _LIBCPP_HAS_NO_THREADS
16 #include "include/atomic_support.h"
18 _LIBCPP_BEGIN_NAMESPACE_STD
20 const allocator_arg_t allocator_arg
= allocator_arg_t();
22 bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT
{}
25 bad_weak_ptr::what() const _NOEXCEPT
27 return "bad_weak_ptr";
30 __shared_count::~__shared_count()
34 __shared_weak_count::~__shared_weak_count()
38 #if defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS)
40 __shared_count::__add_shared() _NOEXCEPT
42 __libcpp_atomic_refcount_increment(__shared_owners_
);
46 __shared_count::__release_shared() _NOEXCEPT
48 if (__libcpp_atomic_refcount_decrement(__shared_owners_
) == -1)
57 __shared_weak_count::__add_shared() _NOEXCEPT
59 __shared_count::__add_shared();
63 __shared_weak_count::__add_weak() _NOEXCEPT
65 __libcpp_atomic_refcount_increment(__shared_weak_owners_
);
69 __shared_weak_count::__release_shared() _NOEXCEPT
71 if (__shared_count::__release_shared())
75 #endif // _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS
78 __shared_weak_count::__release_weak() _NOEXCEPT
80 // NOTE: The acquire load here is an optimization of the very
81 // common case where a shared pointer is being destructed while
82 // having no other contended references.
84 // BENEFIT: We avoid expensive atomic stores like XADD and STREX
85 // in a common case. Those instructions are slow and do nasty
88 // IS THIS SAFE? Yes. During weak destruction, if we see that we
89 // are the last reference, we know that no-one else is accessing
90 // us. If someone were accessing us, then they would be doing so
91 // while the last shared / weak_ptr was being destructed, and
92 // that's undefined anyway.
94 // If we see anything other than a 0, then we have possible
95 // contention, and need to use an atomicrmw primitive.
96 // The same arguments don't apply for increment, where it is legal
97 // (though inadvisable) to share shared_ptr references between
98 // threads, and have them all get copied at once. The argument
99 // also doesn't apply for __release_shared, because an outstanding
100 // weak_ptr::lock() could read / modify the shared count.
101 if (__libcpp_atomic_load(&__shared_weak_owners_
, _AO_Acquire
) == 0)
103 // no need to do this store, because we are about
104 // to destroy everything.
105 //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release);
106 __on_zero_shared_weak();
108 else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_
) == -1)
109 __on_zero_shared_weak();
113 __shared_weak_count::lock() _NOEXCEPT
115 long object_owners
= __libcpp_atomic_load(&__shared_owners_
);
116 while (object_owners
!= -1)
118 if (__libcpp_atomic_compare_exchange(&__shared_owners_
,
126 #if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC)
129 __shared_weak_count::__get_deleter(const type_info
&) const _NOEXCEPT
134 #endif // _LIBCPP_NO_RTTI
136 #if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
138 _LIBCPP_SAFE_STATIC
static const std::size_t __sp_mut_count
= 16;
139 _LIBCPP_SAFE_STATIC
static __libcpp_mutex_t mut_back
[__sp_mut_count
] =
141 _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
,
142 _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
,
143 _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
,
144 _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
, _LIBCPP_MUTEX_INITIALIZER
147 _LIBCPP_CONSTEXPR
__sp_mut::__sp_mut(void* p
) _NOEXCEPT
153 __sp_mut::lock() _NOEXCEPT
155 auto m
= static_cast<__libcpp_mutex_t
*>(__lx
);
157 while (!__libcpp_mutex_trylock(m
))
161 __libcpp_mutex_lock(m
);
164 this_thread::yield();
169 __sp_mut::unlock() _NOEXCEPT
171 __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t
*>(__lx
));
175 __get_sp_mut(const void* p
)
177 static __sp_mut muts
[__sp_mut_count
]
179 &mut_back
[ 0], &mut_back
[ 1], &mut_back
[ 2], &mut_back
[ 3],
180 &mut_back
[ 4], &mut_back
[ 5], &mut_back
[ 6], &mut_back
[ 7],
181 &mut_back
[ 8], &mut_back
[ 9], &mut_back
[10], &mut_back
[11],
182 &mut_back
[12], &mut_back
[13], &mut_back
[14], &mut_back
[15]
184 return muts
[hash
<const void*>()(p
) & (__sp_mut_count
-1)];
187 #endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER)
190 declare_reachable(void*)
195 declare_no_pointers(char*, size_t)
200 undeclare_no_pointers(char*, size_t)
204 #if !defined(_LIBCPP_ABI_POINTER_SAFETY_ENUM_TYPE)
205 pointer_safety
get_pointer_safety() _NOEXCEPT
207 return pointer_safety::relaxed
;
212 __undeclare_reachable(void* p
)
218 align(size_t alignment
, size_t size
, void*& ptr
, size_t& space
)
223 char* p1
= static_cast<char*>(ptr
);
224 char* p2
= reinterpret_cast<char*>(reinterpret_cast<size_t>(p1
+ (alignment
- 1)) & -alignment
);
225 size_t d
= static_cast<size_t>(p2
- p1
);
226 if (d
<= space
- size
)
236 _LIBCPP_END_NAMESPACE_STD