1 /* Spin locks for communication between threads and signal handlers.
2 Copyright (C) 2020-2024 Free Software Foundation, Inc.
4 This file is free software: you can redistribute it and/or modify
5 it under the terms of the GNU Lesser General Public License as
6 published by the Free Software Foundation; either version 2.1 of the
7 License, or (at your option) any later version.
9 This file is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public License
15 along with this program. If not, see <https://www.gnu.org/licenses/>. */
17 /* Written by Bruno Haible <bruno@clisp.org>, 2020. */
22 #include "asyncsafe-spin.h"
26 # include <sys/atomic_op.h>
28 #if 0x590 <= __SUNPRO_C && __STDC__
32 #if defined _WIN32 && ! defined __CYGWIN__
33 /* Use Windows threads. */
36 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
38 glwthread_spin_init (lock
);
42 do_lock (asyncsafe_spinlock_t
*lock
)
44 glwthread_spin_lock (lock
);
48 do_unlock (asyncsafe_spinlock_t
*lock
)
50 if (glwthread_spin_unlock (lock
))
55 asyncsafe_spin_destroy (asyncsafe_spinlock_t
*lock
)
57 glwthread_spin_destroy (lock
);
63 /* Use POSIX threads. */
65 /* We don't use semaphores (although sem_post() is allowed in signal handlers),
66 because it would require to link with -lrt on HP-UX 11, OSF/1, Solaris 10,
67 and also because on macOS only named semaphores work.
69 We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
70 require to link with -latomic. */
72 # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7) \
73 || __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 1)) \
75 /* Use GCC built-ins (available in GCC >= 4.7 and clang >= 3.1) that operate on
76 the first byte of the lock.
78 <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
82 /* An implementation that verifies the unlocks. */
85 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
87 __atomic_store_n (lock
, 0, __ATOMIC_SEQ_CST
);
91 do_lock (asyncsafe_spinlock_t
*lock
)
93 /* Wait until *lock becomes 0, then replace it with 1. */
94 asyncsafe_spinlock_t zero
;
96 __atomic_compare_exchange_n (lock
, &zero
, 1, false,
97 __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
)))
102 do_unlock (asyncsafe_spinlock_t
*lock
)
104 /* If *lock is 1, then replace it with 0. */
105 asyncsafe_spinlock_t one
= 1;
106 if (!__atomic_compare_exchange_n (lock
, &one
, 0, false,
107 __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
112 /* An implementation that is a little bit more optimized, but does not verify
116 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
118 __atomic_clear (lock
, __ATOMIC_SEQ_CST
);
122 do_lock (asyncsafe_spinlock_t
*lock
)
124 while (__atomic_test_and_set (lock
, __ATOMIC_SEQ_CST
))
129 do_unlock (asyncsafe_spinlock_t
*lock
)
131 __atomic_clear (lock
, __ATOMIC_SEQ_CST
);
136 # elif (((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) \
137 || __clang_major__ >= 3) \
138 && HAVE_ATOMIC_COMPARE_AND_SWAP_GCC41)
139 /* Use GCC built-ins (available on many platforms with GCC >= 4.1 or
142 <https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html> */
145 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
147 volatile unsigned int *vp
= lock
;
149 __sync_synchronize ();
153 do_lock (asyncsafe_spinlock_t
*lock
)
155 /* Wait until *lock becomes 0, then replace it with 1. */
156 while (__sync_val_compare_and_swap (lock
, 0, 1) != 0)
161 do_unlock (asyncsafe_spinlock_t
*lock
)
163 /* If *lock is 1, then replace it with 0. */
164 if (__sync_val_compare_and_swap (lock
, 1, 0) != 1)
172 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
174 atomic_p vp
= (int *) lock
;
179 do_lock (asyncsafe_spinlock_t
*lock
)
181 atomic_p vp
= (int *) lock
;
182 while (_check_lock (vp
, 0, 1))
187 do_unlock (asyncsafe_spinlock_t
*lock
)
189 atomic_p vp
= (int *) lock
;
190 if (_check_lock (vp
, 1, 0))
194 # elif ((defined __GNUC__ || defined __clang__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)) || (defined __TINYC__ && (defined __i386 || defined __x86_64__))
195 /* For older versions of GCC or clang, use inline assembly.
196 GCC, clang, and the Oracle Studio C 12 compiler understand GCC's extended
197 asm syntax, but the plain Oracle Studio C 11 compiler understands only
199 /* An implementation that verifies the unlocks. */
202 memory_barrier (void)
204 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
205 # if defined __i386 || defined __x86_64__
206 # if defined __TINYC__ && defined __i386
207 /* Cannot use the SSE instruction "mfence" with this compiler. */
208 asm volatile ("lock orl $0,(%esp)");
210 asm volatile ("mfence");
214 asm volatile ("membar 2");
217 # if defined __i386 || defined __x86_64__
226 /* Store NEWVAL in *VP if the old value *VP is == CMP.
227 Return the old value. */
229 atomic_compare_and_swap (volatile unsigned int *vp
, unsigned int cmp
,
232 # if defined __GNUC__ || defined __clang__ || __SUNPRO_C >= 0x590 || defined __TINYC__
234 # if defined __i386 || defined __x86_64__
235 asm volatile (" lock\n cmpxchgl %3,(%1)"
236 : "=a" (oldval
) : "r" (vp
), "a" (cmp
), "r" (newval
) : "memory");
239 asm volatile (" cas [%1],%2,%3\n"
241 : "=r" (oldval
) : "r" (vp
), "r" (cmp
), "r" (newval
) : "memory");
244 # else /* __SUNPRO_C */
245 # if defined __x86_64__
246 asm (" movl %esi,%eax\n"
247 " lock\n cmpxchgl %edx,(%rdi)");
248 # elif defined __i386
249 asm (" movl 16(%ebp),%ecx\n"
250 " movl 12(%ebp),%eax\n"
251 " movl 8(%ebp),%edx\n"
252 " lock\n cmpxchgl %ecx,(%edx)");
255 asm (" cas [%i0],%i1,%i2\n"
262 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
264 volatile unsigned int *vp
= lock
;
270 do_lock (asyncsafe_spinlock_t
*lock
)
272 volatile unsigned int *vp
= lock
;
273 while (atomic_compare_and_swap (vp
, 0, 1) != 0)
278 do_unlock (asyncsafe_spinlock_t
*lock
)
280 volatile unsigned int *vp
= lock
;
281 if (atomic_compare_and_swap (vp
, 1, 0) != 1)
286 /* Fallback code. It has some race conditions. */
289 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
291 volatile unsigned int *vp
= lock
;
296 do_lock (asyncsafe_spinlock_t
*lock
)
298 volatile unsigned int *vp
= lock
;
305 do_unlock (asyncsafe_spinlock_t
*lock
)
307 volatile unsigned int *vp
= lock
;
314 /* Provide a dummy implementation for single-threaded applications. */
317 asyncsafe_spin_init (asyncsafe_spinlock_t
*lock
)
322 do_lock (asyncsafe_spinlock_t
*lock
)
327 do_unlock (asyncsafe_spinlock_t
*lock
)
334 asyncsafe_spin_destroy (asyncsafe_spinlock_t
*lock
)
341 asyncsafe_spin_lock (asyncsafe_spinlock_t
*lock
,
342 const sigset_t
*mask
, sigset_t
*saved_mask
)
344 sigprocmask (SIG_BLOCK
, mask
, saved_mask
); /* equivalent to pthread_sigmask */
349 asyncsafe_spin_unlock (asyncsafe_spinlock_t
*lock
, const sigset_t
*saved_mask
)
352 sigprocmask (SIG_SETMASK
, saved_mask
, NULL
); /* equivalent to pthread_sigmask */