2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 * Author: Ryan Lortie <desrt@desrt.ca>
25 * SECTION:atomic_operations
26 * @title: Atomic Operations
27 * @short_description: basic atomic integer and pointer operations
30 * The following is a collection of compiler macros to provide atomic
31 * access to integer and pointer-sized values.
33 * The macros that have 'int' in the name will operate on pointers to
34 * #gint and #guint. The macros with 'pointer' in the name will operate
35 * on pointers to any pointer-sized value, including #gsize. There is
36 * no support for 64bit operations on platforms with 32bit pointers
37 * because it is not generally possible to perform these operations
40 * The get, set and exchange operations for integers and pointers
41 * nominally operate on #gint and #gpointer, respectively. Of the
42 * arithmetic operations, the 'add' operation operates on (and returns)
43 * signed integer values (#gint and #gssize) and the 'and', 'or', and
44 * 'xor' operations operate on (and return) unsigned integer values
45 * (#guint and #gsize).
47 * All of the operations act as a full compiler and (where appropriate)
48 * hardware memory barrier. Acquire and release or producer and
49 * consumer barrier semantics are not available through this API.
51 * It is very important that all accesses to a particular integer or
52 * pointer be performed using only this API and that different sizes of
53 * operation are not mixed or used on overlapping memory regions. Never
54 * read or assign directly from or to a value -- always use this API.
56 * For simple reference counting purposes you should use
57 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
58 * fall outside of simple reference counting patterns are prone to
59 * subtle bugs and occasionally undefined behaviour. It is also worth
60 * noting that since all of these operations require global
61 * synchronisation of the entire machine, they can be quite slow. In
62 * the case of performing multiple atomic operations it can often be
63 * faster to simply acquire a mutex lock around the critical area,
64 * perform the operations normally and then release the lock.
70 * This macro is defined if the atomic operations of GLib are
71 * implemented using real hardware atomic operations. This means that
72 * the GLib atomic API can be used between processes and safely mixed
73 * with other (hardware) atomic APIs.
75 * If this macro is not defined, the atomic operations may be
76 * emulated using a mutex. In that case, the GLib atomic operations are
77 * only atomic relative to themselves and within a single process.
82 * This file is the lowest-level part of GLib.
84 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
85 * messages, etc) call into these functions and macros to get work done.
87 * As such, these functions can not call back into any part of GLib
88 * without risking recursion.
91 #ifdef G_ATOMIC_LOCK_FREE
93 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
94 * implement the atomic operations in a lock-free manner.
97 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
100 * @atomic: a pointer to a #gint or #guint
102 * Gets the current value of @atomic.
104 * This call acts as a full compiler and hardware
105 * memory barrier (before the get).
107 * Returns: the value of the integer
112 (g_atomic_int_get
) (const volatile gint
*atomic
)
114 return g_atomic_int_get (atomic
);
119 * @atomic: a pointer to a #gint or #guint
120 * @newval: a new value to store
122 * Sets the value of @atomic to @newval.
124 * This call acts as a full compiler and hardware
125 * memory barrier (after the set).
130 (g_atomic_int_set
) (volatile gint
*atomic
,
133 g_atomic_int_set (atomic
, newval
);
138 * @atomic: a pointer to a #gint or #guint
140 * Increments the value of @atomic by 1.
142 * Think of this operation as an atomic version of
143 * <literal>{ *@atomic += 1; }</literal>
145 * This call acts as a full compiler and hardware memory barrier.
150 (g_atomic_int_inc
) (volatile gint
*atomic
)
152 g_atomic_int_inc (atomic
);
156 * g_atomic_int_dec_and_test:
157 * @atomic: a pointer to a #gint or #guint
159 * Decrements the value of @atomic by 1.
161 * Think of this operation as an atomic version of
162 * <literal>{ *@atomic -= 1; return (*@atomic == 0); }</literal>
164 * This call acts as a full compiler and hardware memory barrier.
166 * Returns: %TRUE if the resultant value is zero
171 (g_atomic_int_dec_and_test
) (volatile gint
*atomic
)
173 return g_atomic_int_dec_and_test (atomic
);
177 * g_atomic_int_compare_and_exchange:
178 * @atomic: a pointer to a #gint or #guint
179 * @oldval: the value to compare with
180 * @newval: the value to conditionally replace with
182 * Compares @atomic to @oldval and, if equal, sets it to @newval.
183 * If @atomic was not equal to @oldval then no change occurs.
185 * This compare and exchange is done atomically.
187 * Think of this operation as an atomic version of
188 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
190 * This call acts as a full compiler and hardware memory barrier.
192 * Returns: %TRUE if the exchange took place
197 (g_atomic_int_compare_and_exchange
) (volatile gint
*atomic
,
201 return g_atomic_int_compare_and_exchange (atomic
, oldval
, newval
);
206 * @atomic: a pointer to a #gint or #guint
207 * @val: the value to add
209 * Atomically adds @val to the value of @atomic.
211 * Think of this operation as an atomic version of
212 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
214 * This call acts as a full compiler and hardware memory barrier.
216 * Before version 2.30, this function did not return a value
217 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
219 * Returns: the value of @atomic before the add, signed
224 (g_atomic_int_add
) (volatile gint
*atomic
,
227 return g_atomic_int_add (atomic
, val
);
232 * @atomic: a pointer to a #gint or #guint
233 * @val: the value to 'and'
235 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
236 * storing the result back in @atomic.
238 * This call acts as a full compiler and hardware memory barrier.
240 * Think of this operation as an atomic version of
241 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
243 * Returns: the value of @atomic before the operation, unsigned
248 (g_atomic_int_and
) (volatile guint
*atomic
,
251 return g_atomic_int_and (atomic
, val
);
256 * @atomic: a pointer to a #gint or #guint
257 * @val: the value to 'or'
259 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
260 * storing the result back in @atomic.
262 * Think of this operation as an atomic version of
263 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
265 * This call acts as a full compiler and hardware memory barrier.
267 * Returns: the value of @atomic before the operation, unsigned
272 (g_atomic_int_or
) (volatile guint
*atomic
,
275 return g_atomic_int_or (atomic
, val
);
280 * @atomic: a pointer to a #gint or #guint
281 * @val: the value to 'xor'
283 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
284 * storing the result back in @atomic.
286 * Think of this operation as an atomic version of
287 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
289 * This call acts as a full compiler and hardware memory barrier.
291 * Returns: the value of @atomic before the operation, unsigned
296 (g_atomic_int_xor
) (volatile guint
*atomic
,
299 return g_atomic_int_xor (atomic
, val
);
304 * g_atomic_pointer_get:
305 * @atomic: a pointer to a #gpointer-sized value
307 * Gets the current value of @atomic.
309 * This call acts as a full compiler and hardware
310 * memory barrier (before the get).
312 * Returns: the value of the pointer
317 (g_atomic_pointer_get
) (const volatile void *atomic
)
319 return g_atomic_pointer_get ((const volatile gpointer
*) atomic
);
323 * g_atomic_pointer_set:
324 * @atomic: a pointer to a #gpointer-sized value
325 * @newval: a new value to store
327 * Sets the value of @atomic to @newval.
329 * This call acts as a full compiler and hardware
330 * memory barrier (after the set).
335 (g_atomic_pointer_set
) (volatile void *atomic
,
338 g_atomic_pointer_set ((volatile gpointer
*) atomic
, newval
);
342 * g_atomic_pointer_compare_and_exchange:
343 * @atomic: a pointer to a #gpointer-sized value
344 * @oldval: the value to compare with
345 * @newval: the value to conditionally replace with
347 * Compares @atomic to @oldval and, if equal, sets it to @newval.
348 * If @atomic was not equal to @oldval then no change occurs.
350 * This compare and exchange is done atomically.
352 * Think of this operation as an atomic version of
353 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
355 * This call acts as a full compiler and hardware memory barrier.
357 * Returns: %TRUE if the exchange took place
362 (g_atomic_pointer_compare_and_exchange
) (volatile void *atomic
,
366 return g_atomic_pointer_compare_and_exchange ((volatile gpointer
*) atomic
,
371 * g_atomic_pointer_add:
372 * @atomic: a pointer to a #gpointer-sized value
373 * @val: the value to add
375 * Atomically adds @val to the value of @atomic.
377 * Think of this operation as an atomic version of
378 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
380 * This call acts as a full compiler and hardware memory barrier.
382 * Returns: the value of @atomic before the add, signed
387 (g_atomic_pointer_add
) (volatile void *atomic
,
390 return g_atomic_pointer_add ((volatile gpointer
*) atomic
, val
);
394 * g_atomic_pointer_and:
395 * @atomic: a pointer to a #gpointer-sized value
396 * @val: the value to 'and'
398 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
399 * storing the result back in @atomic.
401 * Think of this operation as an atomic version of
402 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
404 * This call acts as a full compiler and hardware memory barrier.
406 * Returns: the value of @atomic before the operation, unsigned
411 (g_atomic_pointer_and
) (volatile void *atomic
,
414 return g_atomic_pointer_and ((volatile gpointer
*) atomic
, val
);
418 * g_atomic_pointer_or:
419 * @atomic: a pointer to a #gpointer-sized value
420 * @val: the value to 'or'
422 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
423 * storing the result back in @atomic.
425 * Think of this operation as an atomic version of
426 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
428 * This call acts as a full compiler and hardware memory barrier.
430 * Returns: the value of @atomic before the operation, unsigned
435 (g_atomic_pointer_or
) (volatile void *atomic
,
438 return g_atomic_pointer_or ((volatile gpointer
*) atomic
, val
);
442 * g_atomic_pointer_xor:
443 * @atomic: a pointer to a #gpointer-sized value
444 * @val: the value to 'xor'
446 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
447 * storing the result back in @atomic.
449 * Think of this operation as an atomic version of
450 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
452 * This call acts as a full compiler and hardware memory barrier.
454 * Returns: the value of @atomic before the operation, unsigned
459 (g_atomic_pointer_xor
) (volatile void *atomic
,
462 return g_atomic_pointer_xor ((volatile gpointer
*) atomic
, val
);
465 #elif defined (G_PLATFORM_WIN32)
468 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
469 #define InterlockedAnd _InterlockedAnd
470 #define InterlockedOr _InterlockedOr
471 #define InterlockedXor _InterlockedXor
474 #if !defined (_MSC_VER) || _MSC_VER <= 1200
475 #include "gmessages.h"
476 /* Inlined versions for older compiler */
478 _gInterlockedAnd (volatile guint
*atomic
,
486 j
= InterlockedCompareExchange(atomic
, i
& val
, i
);
491 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
493 _gInterlockedOr (volatile guint
*atomic
,
501 j
= InterlockedCompareExchange(atomic
, i
| val
, i
);
506 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
508 _gInterlockedXor (volatile guint
*atomic
,
516 j
= InterlockedCompareExchange(atomic
, i
^ val
, i
);
521 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
525 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
528 (g_atomic_int_get
) (const volatile gint
*atomic
)
535 (g_atomic_int_set
) (volatile gint
*atomic
,
543 (g_atomic_int_inc
) (volatile gint
*atomic
)
545 InterlockedIncrement (atomic
);
549 (g_atomic_int_dec_and_test
) (volatile gint
*atomic
)
551 return InterlockedDecrement (atomic
) == 0;
555 (g_atomic_int_compare_and_exchange
) (volatile gint
*atomic
,
559 return InterlockedCompareExchange (atomic
, newval
, oldval
) == oldval
;
563 (g_atomic_int_add
) (volatile gint
*atomic
,
566 return InterlockedExchangeAdd (atomic
, val
);
570 (g_atomic_int_and
) (volatile guint
*atomic
,
573 return InterlockedAnd (atomic
, val
);
577 (g_atomic_int_or
) (volatile guint
*atomic
,
580 return InterlockedOr (atomic
, val
);
584 (g_atomic_int_xor
) (volatile guint
*atomic
,
587 return InterlockedXor (atomic
, val
);
592 (g_atomic_pointer_get
) (const volatile void *atomic
)
594 const volatile gpointer
*ptr
= atomic
;
601 (g_atomic_pointer_set
) (volatile void *atomic
,
604 volatile gpointer
*ptr
= atomic
;
611 (g_atomic_pointer_compare_and_exchange
) (volatile void *atomic
,
615 return InterlockedCompareExchangePointer (atomic
, newval
, oldval
) == oldval
;
619 (g_atomic_pointer_add
) (volatile void *atomic
,
622 #if GLIB_SIZEOF_VOID_P == 8
623 return InterlockedExchangeAdd64 (atomic
, val
);
625 return InterlockedExchangeAdd (atomic
, val
);
630 (g_atomic_pointer_and
) (volatile void *atomic
,
633 #if GLIB_SIZEOF_VOID_P == 8
634 return InterlockedAnd64 (atomic
, val
);
636 return InterlockedAnd (atomic
, val
);
641 (g_atomic_pointer_or
) (volatile void *atomic
,
644 #if GLIB_SIZEOF_VOID_P == 8
645 return InterlockedOr64 (atomic
, val
);
647 return InterlockedOr (atomic
, val
);
652 (g_atomic_pointer_xor
) (volatile void *atomic
,
655 #if GLIB_SIZEOF_VOID_P == 8
656 return InterlockedXor64 (atomic
, val
);
658 return InterlockedXor (atomic
, val
);
663 /* This error occurs when ./configure decided that we should be capable
664 * of lock-free atomics but we find at compile-time that we are not.
666 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
668 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
670 #else /* G_ATOMIC_LOCK_FREE */
672 /* We are not permitted to call into any GLib functions from here, so we
673 * can not use GMutex.
675 * Fortunately, we already take care of the Windows case above, and all
676 * non-Windows platforms on which glib runs have pthreads. Use those.
680 static pthread_mutex_t g_atomic_lock
= PTHREAD_MUTEX_INITIALIZER
;
683 (g_atomic_int_get
) (const volatile gint
*atomic
)
687 pthread_mutex_lock (&g_atomic_lock
);
689 pthread_mutex_unlock (&g_atomic_lock
);
695 (g_atomic_int_set
) (volatile gint
*atomic
,
698 pthread_mutex_lock (&g_atomic_lock
);
700 pthread_mutex_unlock (&g_atomic_lock
);
704 (g_atomic_int_inc
) (volatile gint
*atomic
)
706 pthread_mutex_lock (&g_atomic_lock
);
708 pthread_mutex_unlock (&g_atomic_lock
);
712 (g_atomic_int_dec_and_test
) (volatile gint
*atomic
)
716 pthread_mutex_lock (&g_atomic_lock
);
717 is_zero
= --(*atomic
) == 0;
718 pthread_mutex_unlock (&g_atomic_lock
);
724 (g_atomic_int_compare_and_exchange
) (volatile gint
*atomic
,
730 pthread_mutex_lock (&g_atomic_lock
);
732 if ((success
= (*atomic
== oldval
)))
735 pthread_mutex_unlock (&g_atomic_lock
);
741 (g_atomic_int_add
) (volatile gint
*atomic
,
746 pthread_mutex_lock (&g_atomic_lock
);
748 *atomic
= oldval
+ val
;
749 pthread_mutex_unlock (&g_atomic_lock
);
755 (g_atomic_int_and
) (volatile guint
*atomic
,
760 pthread_mutex_lock (&g_atomic_lock
);
762 *atomic
= oldval
& val
;
763 pthread_mutex_unlock (&g_atomic_lock
);
769 (g_atomic_int_or
) (volatile guint
*atomic
,
774 pthread_mutex_lock (&g_atomic_lock
);
776 *atomic
= oldval
| val
;
777 pthread_mutex_unlock (&g_atomic_lock
);
783 (g_atomic_int_xor
) (volatile guint
*atomic
,
788 pthread_mutex_lock (&g_atomic_lock
);
790 *atomic
= oldval
^ val
;
791 pthread_mutex_unlock (&g_atomic_lock
);
798 (g_atomic_pointer_get
) (const volatile void *atomic
)
800 const volatile gpointer
*ptr
= atomic
;
803 pthread_mutex_lock (&g_atomic_lock
);
805 pthread_mutex_unlock (&g_atomic_lock
);
811 (g_atomic_pointer_set
) (volatile void *atomic
,
814 volatile gpointer
*ptr
= atomic
;
816 pthread_mutex_lock (&g_atomic_lock
);
818 pthread_mutex_unlock (&g_atomic_lock
);
822 (g_atomic_pointer_compare_and_exchange
) (volatile void *atomic
,
826 volatile gpointer
*ptr
= atomic
;
829 pthread_mutex_lock (&g_atomic_lock
);
831 if ((success
= (*ptr
== oldval
)))
834 pthread_mutex_unlock (&g_atomic_lock
);
840 (g_atomic_pointer_add
) (volatile void *atomic
,
843 volatile gssize
*ptr
= atomic
;
846 pthread_mutex_lock (&g_atomic_lock
);
849 pthread_mutex_unlock (&g_atomic_lock
);
855 (g_atomic_pointer_and
) (volatile void *atomic
,
858 volatile gsize
*ptr
= atomic
;
861 pthread_mutex_lock (&g_atomic_lock
);
864 pthread_mutex_unlock (&g_atomic_lock
);
870 (g_atomic_pointer_or
) (volatile void *atomic
,
873 volatile gsize
*ptr
= atomic
;
876 pthread_mutex_lock (&g_atomic_lock
);
879 pthread_mutex_unlock (&g_atomic_lock
);
885 (g_atomic_pointer_xor
) (volatile void *atomic
,
888 volatile gsize
*ptr
= atomic
;
891 pthread_mutex_lock (&g_atomic_lock
);
894 pthread_mutex_unlock (&g_atomic_lock
);
902 * g_atomic_int_exchange_and_add:
903 * @atomic: a pointer to a #gint
904 * @val: the value to add
906 * This function existed before g_atomic_int_add() returned the prior
907 * value of the integer (which it now does). It is retained only for
908 * compatibility reasons. Don't use this function in new code.
910 * Returns: the value of @atomic before the add, signed
912 * Deprecated: 2.30: Use g_atomic_int_add() instead.
915 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
918 return (g_atomic_int_add
) (atomic
, val
);