2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 * Author: Ryan Lortie <desrt@desrt.ca>
25 * SECTION:atomic_operations
26 * @title: Atomic Operations
27 * @short_description: basic atomic integer and pointer operations
30 * The following is a collection of compiler macros to provide atomic
31 * access to integer and pointer-sized values.
33 * The macros that have 'int' in the name will operate on pointers to
34 * #gint and #guint. The macros with 'pointer' in the name will operate
35 * on pointers to any pointer-sized value, including #gsize. There is
36 * no support for 64bit operations on platforms with 32bit pointers
37 * because it is not generally possible to perform these operations
40 * The get, set and exchange operations for integers and pointers
41 * nominally operate on #gint and #gpointer, respectively. Of the
42 * arithmetic operations, the 'add' operation operates on (and returns)
43 * signed integer values (#gint and #gssize) and the 'and', 'or', and
44 * 'xor' operations operate on (and return) unsigned integer values
45 * (#guint and #gsize).
47 * All of the operations act as a full compiler and (where appropriate)
48 * hardware memory barrier. Acquire and release or producer and
49 * consumer barrier semantics are not available through this API.
51 * It is very important that all accesses to a particular integer or
52 * pointer be performed using only this API and that different sizes of
53 * operation are not mixed or used on overlapping memory regions. Never
54 * read or assign directly from or to a value -- always use this API.
56 * For simple reference counting purposes you should use
57 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
58 * fall outside of simple reference counting patterns are prone to
59 * subtle bugs and occasionally undefined behaviour. It is also worth
60 * noting that since all of these operations require global
61 * synchronisation of the entire machine, they can be quite slow. In
62 * the case of performing multiple atomic operations it can often be
63 * faster to simply acquire a mutex lock around the critical area,
64 * perform the operations normally and then release the lock.
70 * This macro is defined if the atomic operations of GLib are
71 * implemented using real hardware atomic operations. This means that
72 * the GLib atomic API can be used between processes and safely mixed
73 * with other (hardware) atomic APIs.
75 * If this macro is not defined, the atomic operations may be
76 * emulated using a mutex. In that case, the GLib atomic operations are
77 * only atomic relative to themselves and within a single process.
82 * This file is the lowest-level part of GLib.
84 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
85 * messages, etc) call into these functions and macros to get work done.
87 * As such, these functions can not call back into any part of GLib
88 * without risking recursion.
91 #ifdef G_ATOMIC_LOCK_FREE
93 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
94 * implement the atomic operations in a lock-free manner.
97 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
99 #if defined(__ATOMIC_SEQ_CST) && !defined(__clang__)
100 /* The implementation used in this code path in gatomic.h assumes
102 G_STATIC_ASSERT (sizeof (gint
) == 4);
104 /* The implementations in gatomic.h assume 4- or 8-byte pointers */
105 G_STATIC_ASSERT (sizeof (void *) == 4 || sizeof (void *) == 8);
110 * @atomic: a pointer to a #gint or #guint
112 * Gets the current value of @atomic.
114 * This call acts as a full compiler and hardware
115 * memory barrier (before the get).
117 * Returns: the value of the integer
122 (g_atomic_int_get
) (const volatile gint
*atomic
)
124 return g_atomic_int_get (atomic
);
129 * @atomic: a pointer to a #gint or #guint
130 * @newval: a new value to store
132 * Sets the value of @atomic to @newval.
134 * This call acts as a full compiler and hardware
135 * memory barrier (after the set).
140 (g_atomic_int_set
) (volatile gint
*atomic
,
143 g_atomic_int_set (atomic
, newval
);
148 * @atomic: a pointer to a #gint or #guint
150 * Increments the value of @atomic by 1.
152 * Think of this operation as an atomic version of `{ *atomic += 1; }`.
154 * This call acts as a full compiler and hardware memory barrier.
159 (g_atomic_int_inc
) (volatile gint
*atomic
)
161 g_atomic_int_inc (atomic
);
165 * g_atomic_int_dec_and_test:
166 * @atomic: a pointer to a #gint or #guint
168 * Decrements the value of @atomic by 1.
170 * Think of this operation as an atomic version of
171 * `{ *atomic -= 1; return (*atomic == 0); }`.
173 * This call acts as a full compiler and hardware memory barrier.
175 * Returns: %TRUE if the resultant value is zero
180 (g_atomic_int_dec_and_test
) (volatile gint
*atomic
)
182 return g_atomic_int_dec_and_test (atomic
);
186 * g_atomic_int_compare_and_exchange:
187 * @atomic: a pointer to a #gint or #guint
188 * @oldval: the value to compare with
189 * @newval: the value to conditionally replace with
191 * Compares @atomic to @oldval and, if equal, sets it to @newval.
192 * If @atomic was not equal to @oldval then no change occurs.
194 * This compare and exchange is done atomically.
196 * Think of this operation as an atomic version of
197 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
199 * This call acts as a full compiler and hardware memory barrier.
201 * Returns: %TRUE if the exchange took place
206 (g_atomic_int_compare_and_exchange
) (volatile gint
*atomic
,
210 return g_atomic_int_compare_and_exchange (atomic
, oldval
, newval
);
215 * @atomic: a pointer to a #gint or #guint
216 * @val: the value to add
218 * Atomically adds @val to the value of @atomic.
220 * Think of this operation as an atomic version of
221 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
223 * This call acts as a full compiler and hardware memory barrier.
225 * Before version 2.30, this function did not return a value
226 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
228 * Returns: the value of @atomic before the add, signed
233 (g_atomic_int_add
) (volatile gint
*atomic
,
236 return g_atomic_int_add (atomic
, val
);
241 * @atomic: a pointer to a #gint or #guint
242 * @val: the value to 'and'
244 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
245 * storing the result back in @atomic.
247 * This call acts as a full compiler and hardware memory barrier.
249 * Think of this operation as an atomic version of
250 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
252 * Returns: the value of @atomic before the operation, unsigned
257 (g_atomic_int_and
) (volatile guint
*atomic
,
260 return g_atomic_int_and (atomic
, val
);
265 * @atomic: a pointer to a #gint or #guint
266 * @val: the value to 'or'
268 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
269 * storing the result back in @atomic.
271 * Think of this operation as an atomic version of
272 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
274 * This call acts as a full compiler and hardware memory barrier.
276 * Returns: the value of @atomic before the operation, unsigned
281 (g_atomic_int_or
) (volatile guint
*atomic
,
284 return g_atomic_int_or (atomic
, val
);
289 * @atomic: a pointer to a #gint or #guint
290 * @val: the value to 'xor'
292 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
293 * storing the result back in @atomic.
295 * Think of this operation as an atomic version of
296 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
298 * This call acts as a full compiler and hardware memory barrier.
300 * Returns: the value of @atomic before the operation, unsigned
305 (g_atomic_int_xor
) (volatile guint
*atomic
,
308 return g_atomic_int_xor (atomic
, val
);
313 * g_atomic_pointer_get:
314 * @atomic: (not nullable): a pointer to a #gpointer-sized value
316 * Gets the current value of @atomic.
318 * This call acts as a full compiler and hardware
319 * memory barrier (before the get).
321 * Returns: the value of the pointer
326 (g_atomic_pointer_get
) (const volatile void *atomic
)
328 return g_atomic_pointer_get ((const volatile gpointer
*) atomic
);
332 * g_atomic_pointer_set:
333 * @atomic: (not nullable): a pointer to a #gpointer-sized value
334 * @newval: a new value to store
336 * Sets the value of @atomic to @newval.
338 * This call acts as a full compiler and hardware
339 * memory barrier (after the set).
344 (g_atomic_pointer_set
) (volatile void *atomic
,
347 g_atomic_pointer_set ((volatile gpointer
*) atomic
, newval
);
351 * g_atomic_pointer_compare_and_exchange:
352 * @atomic: (not nullable): a pointer to a #gpointer-sized value
353 * @oldval: the value to compare with
354 * @newval: the value to conditionally replace with
356 * Compares @atomic to @oldval and, if equal, sets it to @newval.
357 * If @atomic was not equal to @oldval then no change occurs.
359 * This compare and exchange is done atomically.
361 * Think of this operation as an atomic version of
362 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
364 * This call acts as a full compiler and hardware memory barrier.
366 * Returns: %TRUE if the exchange took place
371 (g_atomic_pointer_compare_and_exchange
) (volatile void *atomic
,
375 return g_atomic_pointer_compare_and_exchange ((volatile gpointer
*) atomic
,
380 * g_atomic_pointer_add:
381 * @atomic: (not nullable): a pointer to a #gpointer-sized value
382 * @val: the value to add
384 * Atomically adds @val to the value of @atomic.
386 * Think of this operation as an atomic version of
387 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
389 * This call acts as a full compiler and hardware memory barrier.
391 * Returns: the value of @atomic before the add, signed
396 (g_atomic_pointer_add
) (volatile void *atomic
,
399 return g_atomic_pointer_add ((volatile gpointer
*) atomic
, val
);
403 * g_atomic_pointer_and:
404 * @atomic: (not nullable): a pointer to a #gpointer-sized value
405 * @val: the value to 'and'
407 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
408 * storing the result back in @atomic.
410 * Think of this operation as an atomic version of
411 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
413 * This call acts as a full compiler and hardware memory barrier.
415 * Returns: the value of @atomic before the operation, unsigned
420 (g_atomic_pointer_and
) (volatile void *atomic
,
423 return g_atomic_pointer_and ((volatile gpointer
*) atomic
, val
);
427 * g_atomic_pointer_or:
428 * @atomic: (not nullable): a pointer to a #gpointer-sized value
429 * @val: the value to 'or'
431 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
432 * storing the result back in @atomic.
434 * Think of this operation as an atomic version of
435 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
437 * This call acts as a full compiler and hardware memory barrier.
439 * Returns: the value of @atomic before the operation, unsigned
444 (g_atomic_pointer_or
) (volatile void *atomic
,
447 return g_atomic_pointer_or ((volatile gpointer
*) atomic
, val
);
451 * g_atomic_pointer_xor:
452 * @atomic: (not nullable): a pointer to a #gpointer-sized value
453 * @val: the value to 'xor'
455 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
456 * storing the result back in @atomic.
458 * Think of this operation as an atomic version of
459 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
461 * This call acts as a full compiler and hardware memory barrier.
463 * Returns: the value of @atomic before the operation, unsigned
468 (g_atomic_pointer_xor
) (volatile void *atomic
,
471 return g_atomic_pointer_xor ((volatile gpointer
*) atomic
, val
);
474 #elif defined (G_PLATFORM_WIN32)
477 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
478 #define InterlockedAnd _InterlockedAnd
479 #define InterlockedOr _InterlockedOr
480 #define InterlockedXor _InterlockedXor
483 #if !defined (_MSC_VER) || _MSC_VER <= 1200
484 #include "gmessages.h"
485 /* Inlined versions for older compiler */
487 _gInterlockedAnd (volatile guint
*atomic
,
495 j
= InterlockedCompareExchange(atomic
, i
& val
, i
);
500 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
502 _gInterlockedOr (volatile guint
*atomic
,
510 j
= InterlockedCompareExchange(atomic
, i
| val
, i
);
515 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
517 _gInterlockedXor (volatile guint
*atomic
,
525 j
= InterlockedCompareExchange(atomic
, i
^ val
, i
);
530 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
534 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
537 (g_atomic_int_get
) (const volatile gint
*atomic
)
544 (g_atomic_int_set
) (volatile gint
*atomic
,
552 (g_atomic_int_inc
) (volatile gint
*atomic
)
554 InterlockedIncrement (atomic
);
558 (g_atomic_int_dec_and_test
) (volatile gint
*atomic
)
560 return InterlockedDecrement (atomic
) == 0;
564 (g_atomic_int_compare_and_exchange
) (volatile gint
*atomic
,
568 return InterlockedCompareExchange (atomic
, newval
, oldval
) == oldval
;
572 (g_atomic_int_add
) (volatile gint
*atomic
,
575 return InterlockedExchangeAdd (atomic
, val
);
579 (g_atomic_int_and
) (volatile guint
*atomic
,
582 return InterlockedAnd (atomic
, val
);
586 (g_atomic_int_or
) (volatile guint
*atomic
,
589 return InterlockedOr (atomic
, val
);
593 (g_atomic_int_xor
) (volatile guint
*atomic
,
596 return InterlockedXor (atomic
, val
);
601 (g_atomic_pointer_get
) (const volatile void *atomic
)
603 const volatile gpointer
*ptr
= atomic
;
610 (g_atomic_pointer_set
) (volatile void *atomic
,
613 volatile gpointer
*ptr
= atomic
;
620 (g_atomic_pointer_compare_and_exchange
) (volatile void *atomic
,
624 return InterlockedCompareExchangePointer (atomic
, newval
, oldval
) == oldval
;
628 (g_atomic_pointer_add
) (volatile void *atomic
,
631 #if GLIB_SIZEOF_VOID_P == 8
632 return InterlockedExchangeAdd64 (atomic
, val
);
634 return InterlockedExchangeAdd (atomic
, val
);
639 (g_atomic_pointer_and
) (volatile void *atomic
,
642 #if GLIB_SIZEOF_VOID_P == 8
643 return InterlockedAnd64 (atomic
, val
);
645 return InterlockedAnd (atomic
, val
);
650 (g_atomic_pointer_or
) (volatile void *atomic
,
653 #if GLIB_SIZEOF_VOID_P == 8
654 return InterlockedOr64 (atomic
, val
);
656 return InterlockedOr (atomic
, val
);
661 (g_atomic_pointer_xor
) (volatile void *atomic
,
664 #if GLIB_SIZEOF_VOID_P == 8
665 return InterlockedXor64 (atomic
, val
);
667 return InterlockedXor (atomic
, val
);
672 /* This error occurs when ./configure decided that we should be capable
673 * of lock-free atomics but we find at compile-time that we are not.
675 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
677 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
679 #else /* G_ATOMIC_LOCK_FREE */
681 /* We are not permitted to call into any GLib functions from here, so we
682 * can not use GMutex.
684 * Fortunately, we already take care of the Windows case above, and all
685 * non-Windows platforms on which glib runs have pthreads. Use those.
689 static pthread_mutex_t g_atomic_lock
= PTHREAD_MUTEX_INITIALIZER
;
692 (g_atomic_int_get
) (const volatile gint
*atomic
)
696 pthread_mutex_lock (&g_atomic_lock
);
698 pthread_mutex_unlock (&g_atomic_lock
);
704 (g_atomic_int_set
) (volatile gint
*atomic
,
707 pthread_mutex_lock (&g_atomic_lock
);
709 pthread_mutex_unlock (&g_atomic_lock
);
713 (g_atomic_int_inc
) (volatile gint
*atomic
)
715 pthread_mutex_lock (&g_atomic_lock
);
717 pthread_mutex_unlock (&g_atomic_lock
);
721 (g_atomic_int_dec_and_test
) (volatile gint
*atomic
)
725 pthread_mutex_lock (&g_atomic_lock
);
726 is_zero
= --(*atomic
) == 0;
727 pthread_mutex_unlock (&g_atomic_lock
);
733 (g_atomic_int_compare_and_exchange
) (volatile gint
*atomic
,
739 pthread_mutex_lock (&g_atomic_lock
);
741 if ((success
= (*atomic
== oldval
)))
744 pthread_mutex_unlock (&g_atomic_lock
);
750 (g_atomic_int_add
) (volatile gint
*atomic
,
755 pthread_mutex_lock (&g_atomic_lock
);
757 *atomic
= oldval
+ val
;
758 pthread_mutex_unlock (&g_atomic_lock
);
764 (g_atomic_int_and
) (volatile guint
*atomic
,
769 pthread_mutex_lock (&g_atomic_lock
);
771 *atomic
= oldval
& val
;
772 pthread_mutex_unlock (&g_atomic_lock
);
778 (g_atomic_int_or
) (volatile guint
*atomic
,
783 pthread_mutex_lock (&g_atomic_lock
);
785 *atomic
= oldval
| val
;
786 pthread_mutex_unlock (&g_atomic_lock
);
792 (g_atomic_int_xor
) (volatile guint
*atomic
,
797 pthread_mutex_lock (&g_atomic_lock
);
799 *atomic
= oldval
^ val
;
800 pthread_mutex_unlock (&g_atomic_lock
);
807 (g_atomic_pointer_get
) (const volatile void *atomic
)
809 const volatile gpointer
*ptr
= atomic
;
812 pthread_mutex_lock (&g_atomic_lock
);
814 pthread_mutex_unlock (&g_atomic_lock
);
820 (g_atomic_pointer_set
) (volatile void *atomic
,
823 volatile gpointer
*ptr
= atomic
;
825 pthread_mutex_lock (&g_atomic_lock
);
827 pthread_mutex_unlock (&g_atomic_lock
);
831 (g_atomic_pointer_compare_and_exchange
) (volatile void *atomic
,
835 volatile gpointer
*ptr
= atomic
;
838 pthread_mutex_lock (&g_atomic_lock
);
840 if ((success
= (*ptr
== oldval
)))
843 pthread_mutex_unlock (&g_atomic_lock
);
849 (g_atomic_pointer_add
) (volatile void *atomic
,
852 volatile gssize
*ptr
= atomic
;
855 pthread_mutex_lock (&g_atomic_lock
);
858 pthread_mutex_unlock (&g_atomic_lock
);
864 (g_atomic_pointer_and
) (volatile void *atomic
,
867 volatile gsize
*ptr
= atomic
;
870 pthread_mutex_lock (&g_atomic_lock
);
873 pthread_mutex_unlock (&g_atomic_lock
);
879 (g_atomic_pointer_or
) (volatile void *atomic
,
882 volatile gsize
*ptr
= atomic
;
885 pthread_mutex_lock (&g_atomic_lock
);
888 pthread_mutex_unlock (&g_atomic_lock
);
894 (g_atomic_pointer_xor
) (volatile void *atomic
,
897 volatile gsize
*ptr
= atomic
;
900 pthread_mutex_lock (&g_atomic_lock
);
903 pthread_mutex_unlock (&g_atomic_lock
);
911 * g_atomic_int_exchange_and_add:
912 * @atomic: a pointer to a #gint
913 * @val: the value to add
915 * This function existed before g_atomic_int_add() returned the prior
916 * value of the integer (which it now does). It is retained only for
917 * compatibility reasons. Don't use this function in new code.
919 * Returns: the value of @atomic before the add, signed
921 * Deprecated: 2.30: Use g_atomic_int_add() instead.
924 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
927 return (g_atomic_int_add
) (atomic
, val
);