add 2.36 index to gobject docs
[glib.git] / glib / gatomic.c
blob2fc4f4a4fc51ab235d1908edfa100365caa0f57a
1 /*
2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
19 * Author: Ryan Lortie <desrt@desrt.ca>
22 #include "config.h"
24 #include "gatomic.h"
26 /**
27 * SECTION:atomic_operations
28 * @title: Atomic Operations
29 * @short_description: basic atomic integer and pointer operations
30 * @see_also: #GMutex
32 * The following is a collection of compiler macros to provide atomic
33 * access to integer and pointer-sized values.
35 * The macros that have 'int' in the name will operate on pointers to
36 * #gint and #guint. The macros with 'pointer' in the name will operate
37 * on pointers to any pointer-sized value, including #gsize. There is
38 * no support for 64bit operations on platforms with 32bit pointers
39 * because it is not generally possible to perform these operations
40 * atomically.
42 * The get, set and exchange operations for integers and pointers
43 * nominally operate on #gint and #gpointer, respectively. Of the
44 * arithmetic operations, the 'add' operation operates on (and returns)
45 * signed integer values (#gint and #gssize) and the 'and', 'or', and
46 * 'xor' operations operate on (and return) unsigned integer values
47 * (#guint and #gsize).
49 * All of the operations act as a full compiler and (where appropriate)
50 * hardware memory barrier. Acquire and release or producer and
51 * consumer barrier semantics are not available through this API.
53 * It is very important that all accesses to a particular integer or
54 * pointer be performed using only this API and that different sizes of
55 * operation are not mixed or used on overlapping memory regions. Never
56 * read or assign directly from or to a value -- always use this API.
58 * For simple reference counting purposes you should use
59 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
60 * fall outside of simple reference counting patterns are prone to
61 * subtle bugs and occasionally undefined behaviour. It is also worth
62 * noting that since all of these operations require global
63 * synchronisation of the entire machine, they can be quite slow. In
64 * the case of performing multiple atomic operations it can often be
65 * faster to simply acquire a mutex lock around the critical area,
66 * perform the operations normally and then release the lock.
67 **/
69 /**
70 * G_ATOMIC_LOCK_FREE:
72 * This macro is defined if the atomic operations of GLib are
73 * implemented using real hardware atomic operations. This means that
74 * the GLib atomic API can be used between processes and safely mixed
75 * with other (hardware) atomic APIs.
77 * If this macro is not defined, the atomic operations may be
78 * emulated using a mutex. In that case, the GLib atomic operations are
79 * only atomic relative to themselves and within a single process.
80 **/
82 /* NOTE CAREFULLY:
84 * This file is the lowest-level part of GLib.
86 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
87 * messages, etc) call into these functions and macros to get work done.
89 * As such, these functions can not call back into any part of GLib
90 * without risking recursion.
93 #ifdef G_ATOMIC_LOCK_FREE
95 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
96 * implement the atomic operations in a lock-free manner.
99 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
101 * g_atomic_int_get:
102 * @atomic: a pointer to a #gint or #guint
104 * Gets the current value of @atomic.
106 * This call acts as a full compiler and hardware
107 * memory barrier (before the get).
109 * Returns: the value of the integer
111 * Since: 2.4
113 gint
114 (g_atomic_int_get) (const volatile gint *atomic)
116 return g_atomic_int_get (atomic);
120 * g_atomic_int_set:
121 * @atomic: a pointer to a #gint or #guint
122 * @newval: a new value to store
124 * Sets the value of @atomic to @newval.
126 * This call acts as a full compiler and hardware
127 * memory barrier (after the set).
129 * Since: 2.4
131 void
132 (g_atomic_int_set) (volatile gint *atomic,
133 gint newval)
135 g_atomic_int_set (atomic, newval);
139 * g_atomic_int_inc:
140 * @atomic: a pointer to a #gint or #guint
142 * Increments the value of @atomic by 1.
144 * Think of this operation as an atomic version of
145 * <literal>{ *@atomic += 1; }</literal>
147 * This call acts as a full compiler and hardware memory barrier.
149 * Since: 2.4
151 void
152 (g_atomic_int_inc) (volatile gint *atomic)
154 g_atomic_int_inc (atomic);
158 * g_atomic_int_dec_and_test:
159 * @atomic: a pointer to a #gint or #guint
161 * Decrements the value of @atomic by 1.
163 * Think of this operation as an atomic version of
164 * <literal>{ *@atomic -= 1; return (*@atomic == 0); }</literal>
166 * This call acts as a full compiler and hardware memory barrier.
168 * Returns: %TRUE if the resultant value is zero
170 * Since: 2.4
172 gboolean
173 (g_atomic_int_dec_and_test) (volatile gint *atomic)
175 return g_atomic_int_dec_and_test (atomic);
179 * g_atomic_int_compare_and_exchange:
180 * @atomic: a pointer to a #gint or #guint
181 * @oldval: the value to compare with
182 * @newval: the value to conditionally replace with
184 * Compares @atomic to @oldval and, if equal, sets it to @newval.
185 * If @atomic was not equal to @oldval then no change occurs.
187 * This compare and exchange is done atomically.
189 * Think of this operation as an atomic version of
190 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
192 * This call acts as a full compiler and hardware memory barrier.
194 * Returns: %TRUE if the exchange took place
196 * Since: 2.4
198 gboolean
199 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
200 gint oldval,
201 gint newval)
203 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
207 * g_atomic_int_add:
208 * @atomic: a pointer to a #gint or #guint
209 * @val: the value to add
211 * Atomically adds @val to the value of @atomic.
213 * Think of this operation as an atomic version of
214 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
216 * This call acts as a full compiler and hardware memory barrier.
218 * Before version 2.30, this function did not return a value
219 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
221 * Returns: the value of @atomic before the add, signed
223 * Since: 2.4
225 gint
226 (g_atomic_int_add) (volatile gint *atomic,
227 gint val)
229 return g_atomic_int_add (atomic, val);
233 * g_atomic_int_and:
234 * @atomic: a pointer to a #gint or #guint
235 * @val: the value to 'and'
237 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
238 * storing the result back in @atomic.
240 * This call acts as a full compiler and hardware memory barrier.
242 * Think of this operation as an atomic version of
243 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
245 * Returns: the value of @atomic before the operation, unsigned
247 * Since: 2.30
249 guint
250 (g_atomic_int_and) (volatile guint *atomic,
251 guint val)
253 return g_atomic_int_and (atomic, val);
257 * g_atomic_int_or:
258 * @atomic: a pointer to a #gint or #guint
259 * @val: the value to 'or'
261 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
262 * storing the result back in @atomic.
264 * Think of this operation as an atomic version of
265 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
267 * This call acts as a full compiler and hardware memory barrier.
269 * Returns: the value of @atomic before the operation, unsigned
271 * Since: 2.30
273 guint
274 (g_atomic_int_or) (volatile guint *atomic,
275 guint val)
277 return g_atomic_int_or (atomic, val);
281 * g_atomic_int_xor:
282 * @atomic: a pointer to a #gint or #guint
283 * @val: the value to 'xor'
285 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
286 * storing the result back in @atomic.
288 * Think of this operation as an atomic version of
289 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
291 * This call acts as a full compiler and hardware memory barrier.
293 * Returns: the value of @atomic before the operation, unsigned
295 * Since: 2.30
297 guint
298 (g_atomic_int_xor) (volatile guint *atomic,
299 guint val)
301 return g_atomic_int_xor (atomic, val);
306 * g_atomic_pointer_get:
307 * @atomic: a pointer to a #gpointer-sized value
309 * Gets the current value of @atomic.
311 * This call acts as a full compiler and hardware
312 * memory barrier (before the get).
314 * Returns: the value of the pointer
316 * Since: 2.4
318 gpointer
319 (g_atomic_pointer_get) (const volatile void *atomic)
321 return g_atomic_pointer_get ((const volatile gpointer *) atomic);
325 * g_atomic_pointer_set:
326 * @atomic: a pointer to a #gpointer-sized value
327 * @newval: a new value to store
329 * Sets the value of @atomic to @newval.
331 * This call acts as a full compiler and hardware
332 * memory barrier (after the set).
334 * Since: 2.4
336 void
337 (g_atomic_pointer_set) (volatile void *atomic,
338 gpointer newval)
340 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
344 * g_atomic_pointer_compare_and_exchange:
345 * @atomic: a pointer to a #gpointer-sized value
346 * @oldval: the value to compare with
347 * @newval: the value to conditionally replace with
349 * Compares @atomic to @oldval and, if equal, sets it to @newval.
350 * If @atomic was not equal to @oldval then no change occurs.
352 * This compare and exchange is done atomically.
354 * Think of this operation as an atomic version of
355 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
357 * This call acts as a full compiler and hardware memory barrier.
359 * Returns: %TRUE if the exchange took place
361 * Since: 2.4
363 gboolean
364 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
365 gpointer oldval,
366 gpointer newval)
368 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
369 oldval, newval);
373 * g_atomic_pointer_add:
374 * @atomic: a pointer to a #gpointer-sized value
375 * @val: the value to add
377 * Atomically adds @val to the value of @atomic.
379 * Think of this operation as an atomic version of
380 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
382 * This call acts as a full compiler and hardware memory barrier.
384 * Returns: the value of @atomic before the add, signed
386 * Since: 2.30
388 gssize
389 (g_atomic_pointer_add) (volatile void *atomic,
390 gssize val)
392 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
396 * g_atomic_pointer_and:
397 * @atomic: a pointer to a #gpointer-sized value
398 * @val: the value to 'and'
400 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
401 * storing the result back in @atomic.
403 * Think of this operation as an atomic version of
404 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
406 * This call acts as a full compiler and hardware memory barrier.
408 * Returns: the value of @atomic before the operation, unsigned
410 * Since: 2.30
412 gsize
413 (g_atomic_pointer_and) (volatile void *atomic,
414 gsize val)
416 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
420 * g_atomic_pointer_or:
421 * @atomic: a pointer to a #gpointer-sized value
422 * @val: the value to 'or'
424 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
425 * storing the result back in @atomic.
427 * Think of this operation as an atomic version of
428 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
430 * This call acts as a full compiler and hardware memory barrier.
432 * Returns: the value of @atomic before the operation, unsigned
434 * Since: 2.30
436 gsize
437 (g_atomic_pointer_or) (volatile void *atomic,
438 gsize val)
440 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
444 * g_atomic_pointer_xor:
445 * @atomic: a pointer to a #gpointer-sized value
446 * @val: the value to 'xor'
448 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
449 * storing the result back in @atomic.
451 * Think of this operation as an atomic version of
452 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
454 * This call acts as a full compiler and hardware memory barrier.
456 * Returns: the value of @atomic before the operation, unsigned
458 * Since: 2.30
460 gsize
461 (g_atomic_pointer_xor) (volatile void *atomic,
462 gsize val)
464 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
467 #elif defined (G_PLATFORM_WIN32)
469 #include <windows.h>
470 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64)
471 #define InterlockedAnd _InterlockedAnd
472 #define InterlockedOr _InterlockedOr
473 #define InterlockedXor _InterlockedXor
474 #endif
477 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
479 gint
480 (g_atomic_int_get) (const volatile gint *atomic)
482 MemoryBarrier ();
483 return *atomic;
486 void
487 (g_atomic_int_set) (volatile gint *atomic,
488 gint newval)
490 *atomic = newval;
491 MemoryBarrier ();
494 void
495 (g_atomic_int_inc) (volatile gint *atomic)
497 InterlockedIncrement (atomic);
500 gboolean
501 (g_atomic_int_dec_and_test) (volatile gint *atomic)
503 return InterlockedDecrement (atomic) == 0;
506 gboolean
507 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
508 gint oldval,
509 gint newval)
511 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
514 gint
515 (g_atomic_int_add) (volatile gint *atomic,
516 gint val)
518 return InterlockedExchangeAdd (atomic, val);
521 guint
522 (g_atomic_int_and) (volatile guint *atomic,
523 guint val)
525 return InterlockedAnd (atomic, val);
528 guint
529 (g_atomic_int_or) (volatile guint *atomic,
530 guint val)
532 return InterlockedOr (atomic, val);
535 guint
536 (g_atomic_int_xor) (volatile guint *atomic,
537 guint val)
539 return InterlockedXor (atomic, val);
543 gpointer
544 (g_atomic_pointer_get) (const volatile void *atomic)
546 const volatile gpointer *ptr = atomic;
548 MemoryBarrier ();
549 return *ptr;
552 void
553 (g_atomic_pointer_set) (volatile void *atomic,
554 gpointer newval)
556 volatile gpointer *ptr = atomic;
558 *ptr = newval;
559 MemoryBarrier ();
562 gboolean
563 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
564 gpointer oldval,
565 gpointer newval)
567 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
570 gssize
571 (g_atomic_pointer_add) (volatile void *atomic,
572 gssize val)
574 #if GLIB_SIZEOF_VOID_P == 8
575 return InterlockedExchangeAdd64 (atomic, val);
576 #else
577 return InterlockedExchangeAdd (atomic, val);
578 #endif
581 gsize
582 (g_atomic_pointer_and) (volatile void *atomic,
583 gsize val)
585 #if GLIB_SIZEOF_VOID_P == 8
586 return InterlockedAnd64 (atomic, val);
587 #else
588 return InterlockedAnd (atomic, val);
589 #endif
592 gsize
593 (g_atomic_pointer_or) (volatile void *atomic,
594 gsize val)
596 #if GLIB_SIZEOF_VOID_P == 8
597 return InterlockedOr64 (atomic, val);
598 #else
599 return InterlockedOr (atomic, val);
600 #endif
603 gsize
604 (g_atomic_pointer_xor) (volatile void *atomic,
605 gsize val)
607 #if GLIB_SIZEOF_VOID_P == 8
608 return InterlockedXor64 (atomic, val);
609 #else
610 return InterlockedXor (atomic, val);
611 #endif
613 #else
615 /* This error occurs when ./configure decided that we should be capable
616 * of lock-free atomics but we find at compile-time that we are not.
618 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
620 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
622 #else /* G_ATOMIC_LOCK_FREE */
624 /* We are not permitted to call into any GLib functions from here, so we
625 * can not use GMutex.
627 * Fortunately, we already take care of the Windows case above, and all
628 * non-Windows platforms on which glib runs have pthreads. Use those.
630 #include <pthread.h>
632 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
634 gint
635 (g_atomic_int_get) (volatile gint *atomic)
637 gint value;
639 pthread_mutex_lock (&g_atomic_lock);
640 value = *atomic;
641 pthread_mutex_unlock (&g_atomic_lock);
643 return value;
646 void
647 (g_atomic_int_set) (volatile gint *atomic,
648 gint value)
650 pthread_mutex_lock (&g_atomic_lock);
651 *atomic = value;
652 pthread_mutex_unlock (&g_atomic_lock);
655 void
656 (g_atomic_int_inc) (volatile gint *atomic)
658 pthread_mutex_lock (&g_atomic_lock);
659 (*atomic)++;
660 pthread_mutex_unlock (&g_atomic_lock);
663 gboolean
664 (g_atomic_int_dec_and_test) (volatile gint *atomic)
666 gboolean is_zero;
668 pthread_mutex_lock (&g_atomic_lock);
669 is_zero = --(*atomic) == 0;
670 pthread_mutex_unlock (&g_atomic_lock);
672 return is_zero;
675 gboolean
676 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
677 gint oldval,
678 gint newval)
680 gboolean success;
682 pthread_mutex_lock (&g_atomic_lock);
684 if ((success = (*atomic == oldval)))
685 *atomic = newval;
687 pthread_mutex_unlock (&g_atomic_lock);
689 return success;
692 gint
693 (g_atomic_int_add) (volatile gint *atomic,
694 gint val)
696 gint oldval;
698 pthread_mutex_lock (&g_atomic_lock);
699 oldval = *atomic;
700 *atomic = oldval + val;
701 pthread_mutex_unlock (&g_atomic_lock);
703 return oldval;
706 guint
707 (g_atomic_int_and) (volatile guint *atomic,
708 guint val)
710 guint oldval;
712 pthread_mutex_lock (&g_atomic_lock);
713 oldval = *atomic;
714 *atomic = oldval & val;
715 pthread_mutex_unlock (&g_atomic_lock);
717 return oldval;
720 guint
721 (g_atomic_int_or) (volatile guint *atomic,
722 guint val)
724 guint oldval;
726 pthread_mutex_lock (&g_atomic_lock);
727 oldval = *atomic;
728 *atomic = oldval | val;
729 pthread_mutex_unlock (&g_atomic_lock);
731 return oldval;
734 guint
735 (g_atomic_int_xor) (volatile guint *atomic,
736 guint val)
738 guint oldval;
740 pthread_mutex_lock (&g_atomic_lock);
741 oldval = *atomic;
742 *atomic = oldval ^ val;
743 pthread_mutex_unlock (&g_atomic_lock);
745 return oldval;
749 gpointer
750 (g_atomic_pointer_get) (volatile void *atomic)
752 volatile gpointer *ptr = atomic;
753 gpointer value;
755 pthread_mutex_lock (&g_atomic_lock);
756 value = *ptr;
757 pthread_mutex_unlock (&g_atomic_lock);
759 return value;
762 void
763 (g_atomic_pointer_set) (volatile void *atomic,
764 gpointer newval)
766 volatile gpointer *ptr = atomic;
768 pthread_mutex_lock (&g_atomic_lock);
769 *ptr = newval;
770 pthread_mutex_unlock (&g_atomic_lock);
773 gboolean
774 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
775 gpointer oldval,
776 gpointer newval)
778 volatile gpointer *ptr = atomic;
779 gboolean success;
781 pthread_mutex_lock (&g_atomic_lock);
783 if ((success = (*ptr == oldval)))
784 *ptr = newval;
786 pthread_mutex_unlock (&g_atomic_lock);
788 return success;
791 gssize
792 (g_atomic_pointer_add) (volatile void *atomic,
793 gssize val)
795 volatile gssize *ptr = atomic;
796 gssize oldval;
798 pthread_mutex_lock (&g_atomic_lock);
799 oldval = *ptr;
800 *ptr = oldval + val;
801 pthread_mutex_unlock (&g_atomic_lock);
803 return oldval;
806 gsize
807 (g_atomic_pointer_and) (volatile void *atomic,
808 gsize val)
810 volatile gsize *ptr = atomic;
811 gsize oldval;
813 pthread_mutex_lock (&g_atomic_lock);
814 oldval = *ptr;
815 *ptr = oldval & val;
816 pthread_mutex_unlock (&g_atomic_lock);
818 return oldval;
821 gsize
822 (g_atomic_pointer_or) (volatile void *atomic,
823 gsize val)
825 volatile gsize *ptr = atomic;
826 gsize oldval;
828 pthread_mutex_lock (&g_atomic_lock);
829 oldval = *ptr;
830 *ptr = oldval | val;
831 pthread_mutex_unlock (&g_atomic_lock);
833 return oldval;
836 gsize
837 (g_atomic_pointer_xor) (volatile void *atomic,
838 gsize val)
840 volatile gsize *ptr = atomic;
841 gsize oldval;
843 pthread_mutex_lock (&g_atomic_lock);
844 oldval = *ptr;
845 *ptr = oldval ^ val;
846 pthread_mutex_unlock (&g_atomic_lock);
848 return oldval;
851 #endif
854 * g_atomic_int_exchange_and_add:
855 * @atomic: a pointer to a #gint
856 * @val: the value to add
858 * This function existed before g_atomic_int_add() returned the prior
859 * value of the integer (which it now does). It is retained only for
860 * compatibility reasons. Don't use this function in new code.
862 * Returns: the value of @atomic before the add, signed
863 * Since: 2.4
864 * Deprecated: 2.30: Use g_atomic_int_add() instead.
866 gint
867 g_atomic_int_exchange_and_add (volatile gint *atomic,
868 gint val)
870 return (g_atomic_int_add) (atomic, val);