Update Visual Studio property sheets
[glib.git] / glib / gatomic.c
blob5cba767adb9f8cbde4487719dfed20f017b4c0cb
1 /*
2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
19 * Author: Ryan Lortie <desrt@desrt.ca>
22 #include "config.h"
24 #include "gatomic.h"
26 /**
27 * SECTION:atomic_operations
28 * @title: Atomic Operations
29 * @short_description: basic atomic integer and pointer operations
30 * @see_also: #GMutex
32 * The following is a collection of compiler macros to provide atomic
33 * access to integer and pointer-sized values.
35 * The macros that have 'int' in the name will operate on pointers to
36 * #gint and #guint. The macros with 'pointer' in the name will operate
37 * on pointers to any pointer-sized value, including #gsize. There is
38 * no support for 64bit operations on platforms with 32bit pointers
39 * because it is not generally possible to perform these operations
40 * atomically.
42 * The get, set and exchange operations for integers and pointers
43 * nominally operate on #gint and #gpointer, respectively. Of the
44 * arithmetic operations, the 'add' operation operates on (and returns)
45 * signed integer values (#gint and #gssize) and the 'and', 'or', and
46 * 'xor' operations operate on (and return) unsigned integer values
47 * (#guint and #gsize).
49 * All of the operations act as a full compiler and (where appropriate)
50 * hardware memory barrier. Acquire and release or producer and
51 * consumer barrier semantics are not available through this API.
53 * It is very important that all accesses to a particular integer or
54 * pointer be performed using only this API and that different sizes of
55 * operation are not mixed or used on overlapping memory regions. Never
56 * read or assign directly from or to a value -- always use this API.
58 * For simple reference counting purposes you should use
59 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
60 * fall outside of simple reference counting patterns are prone to
61 * subtle bugs and occasionally undefined behaviour. It is also worth
62 * noting that since all of these operations require global
63 * synchronisation of the entire machine, they can be quite slow. In
64 * the case of performing multiple atomic operations it can often be
65 * faster to simply acquire a mutex lock around the critical area,
66 * perform the operations normally and then release the lock.
67 **/
69 /**
70 * G_ATOMIC_LOCK_FREE:
72 * This macro is defined if the atomic operations of GLib are
73 * implemented using real hardware atomic operations. This means that
74 * the GLib atomic API can be used between processes and safely mixed
75 * with other (hardware) atomic APIs.
77 * If this macro is not defined, the atomic operations may be
78 * emulated using a mutex. In that case, the GLib atomic operations are
79 * only atomic relative to themselves and within a single process.
80 **/
82 /* NOTE CAREFULLY:
84 * This file is the lowest-level part of GLib.
86 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
87 * messages, etc) call into these functions and macros to get work done.
89 * As such, these functions can not call back into any part of GLib
90 * without risking recursion.
93 #ifdef G_ATOMIC_LOCK_FREE
95 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
96 * implement the atomic operations in a lock-free manner.
99 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
101 * g_atomic_int_get:
102 * @atomic: a pointer to a #gint or #guint
104 * Gets the current value of @atomic.
106 * This call acts as a full compiler and hardware
107 * memory barrier (before the get).
109 * Returns: the value of the integer
111 * Since: 2.4
113 gint
114 (g_atomic_int_get) (const volatile gint *atomic)
116 return g_atomic_int_get (atomic);
120 * g_atomic_int_set:
121 * @atomic: a pointer to a #gint or #guint
122 * @newval: a new value to store
124 * Sets the value of @atomic to @newval.
126 * This call acts as a full compiler and hardware
127 * memory barrier (after the set).
129 * Since: 2.4
131 void
132 (g_atomic_int_set) (volatile gint *atomic,
133 gint newval)
135 g_atomic_int_set (atomic, newval);
139 * g_atomic_int_inc:
140 * @atomic: a pointer to a #gint or #guint
142 * Increments the value of @atomic by 1.
144 * Think of this operation as an atomic version of
145 * <literal>{ *@atomic += 1; }</literal>
147 * This call acts as a full compiler and hardware memory barrier.
149 * Since: 2.4
151 void
152 (g_atomic_int_inc) (volatile gint *atomic)
154 g_atomic_int_inc (atomic);
158 * g_atomic_int_dec_and_test:
159 * @atomic: a pointer to a #gint or #guint
161 * Decrements the value of @atomic by 1.
163 * Think of this operation as an atomic version of
164 * <literal>{ *@atomic -= 1; return (*@atomic == 0); }</literal>
166 * This call acts as a full compiler and hardware memory barrier.
168 * Returns: %TRUE if the resultant value is zero
170 * Since: 2.4
172 gboolean
173 (g_atomic_int_dec_and_test) (volatile gint *atomic)
175 return g_atomic_int_dec_and_test (atomic);
179 * g_atomic_int_compare_and_exchange:
180 * @atomic: a pointer to a #gint or #guint
181 * @oldval: the value to compare with
182 * @newval: the value to conditionally replace with
184 * Compares @atomic to @oldval and, if equal, sets it to @newval.
185 * If @atomic was not equal to @oldval then no change occurs.
187 * This compare and exchange is done atomically.
189 * Think of this operation as an atomic version of
190 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
192 * This call acts as a full compiler and hardware memory barrier.
194 * Returns: %TRUE if the exchange took place
196 * Since: 2.4
198 gboolean
199 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
200 gint oldval,
201 gint newval)
203 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
207 * g_atomic_int_add:
208 * @atomic: a pointer to a #gint or #guint
209 * @val: the value to add
211 * Atomically adds @val to the value of @atomic.
213 * Think of this operation as an atomic version of
214 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
216 * This call acts as a full compiler and hardware memory barrier.
218 * Before version 2.30, this function did not return a value
219 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
221 * Returns: the value of @atomic before the add, signed
223 * Since: 2.4
225 gint
226 (g_atomic_int_add) (volatile gint *atomic,
227 gint val)
229 return g_atomic_int_add (atomic, val);
233 * g_atomic_int_and:
234 * @atomic: a pointer to a #gint or #guint
235 * @val: the value to 'and'
237 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
238 * storing the result back in @atomic.
240 * This call acts as a full compiler and hardware memory barrier.
242 * Think of this operation as an atomic version of
243 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
245 * Returns: the value of @atomic before the operation, unsigned
247 * Since: 2.30
249 guint
250 (g_atomic_int_and) (volatile guint *atomic,
251 guint val)
253 return g_atomic_int_and (atomic, val);
257 * g_atomic_int_or:
258 * @atomic: a pointer to a #gint or #guint
259 * @val: the value to 'or'
261 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
262 * storing the result back in @atomic.
264 * Think of this operation as an atomic version of
265 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
267 * This call acts as a full compiler and hardware memory barrier.
269 * Returns: the value of @atomic before the operation, unsigned
271 * Since: 2.30
273 guint
274 (g_atomic_int_or) (volatile guint *atomic,
275 guint val)
277 return g_atomic_int_or (atomic, val);
281 * g_atomic_int_xor:
282 * @atomic: a pointer to a #gint or #guint
283 * @val: the value to 'xor'
285 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
286 * storing the result back in @atomic.
288 * Think of this operation as an atomic version of
289 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
291 * This call acts as a full compiler and hardware memory barrier.
293 * Returns: the value of @atomic before the operation, unsigned
295 * Since: 2.30
297 guint
298 (g_atomic_int_xor) (volatile guint *atomic,
299 guint val)
301 return g_atomic_int_xor (atomic, val);
306 * g_atomic_pointer_get:
307 * @atomic: a pointer to a #gpointer-sized value
309 * Gets the current value of @atomic.
311 * This call acts as a full compiler and hardware
312 * memory barrier (before the get).
314 * Returns: the value of the pointer
316 * Since: 2.4
318 gpointer
319 (g_atomic_pointer_get) (const volatile void *atomic)
321 return g_atomic_pointer_get ((const volatile gpointer *) atomic);
325 * g_atomic_pointer_set:
326 * @atomic: a pointer to a #gpointer-sized value
327 * @newval: a new value to store
329 * Sets the value of @atomic to @newval.
331 * This call acts as a full compiler and hardware
332 * memory barrier (after the set).
334 * Since: 2.4
336 void
337 (g_atomic_pointer_set) (volatile void *atomic,
338 gpointer newval)
340 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
344 * g_atomic_pointer_compare_and_exchange:
345 * @atomic: a pointer to a #gpointer-sized value
346 * @oldval: the value to compare with
347 * @newval: the value to conditionally replace with
349 * Compares @atomic to @oldval and, if equal, sets it to @newval.
350 * If @atomic was not equal to @oldval then no change occurs.
352 * This compare and exchange is done atomically.
354 * Think of this operation as an atomic version of
355 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
357 * This call acts as a full compiler and hardware memory barrier.
359 * Returns: %TRUE if the exchange took place
361 * Since: 2.4
363 gboolean
364 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
365 gpointer oldval,
366 gpointer newval)
368 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
369 oldval, newval);
373 * g_atomic_pointer_add:
374 * @atomic: a pointer to a #gpointer-sized value
375 * @val: the value to add
377 * Atomically adds @val to the value of @atomic.
379 * Think of this operation as an atomic version of
380 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
382 * This call acts as a full compiler and hardware memory barrier.
384 * Returns: the value of @atomic before the add, signed
386 * Since: 2.30
388 gssize
389 (g_atomic_pointer_add) (volatile void *atomic,
390 gssize val)
392 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
396 * g_atomic_pointer_and:
397 * @atomic: a pointer to a #gpointer-sized value
398 * @val: the value to 'and'
400 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
401 * storing the result back in @atomic.
403 * Think of this operation as an atomic version of
404 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
406 * This call acts as a full compiler and hardware memory barrier.
408 * Returns: the value of @atomic before the operation, unsigned
410 * Since: 2.30
412 gsize
413 (g_atomic_pointer_and) (volatile void *atomic,
414 gsize val)
416 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
420 * g_atomic_pointer_or:
421 * @atomic: a pointer to a #gpointer-sized value
422 * @val: the value to 'or'
424 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
425 * storing the result back in @atomic.
427 * Think of this operation as an atomic version of
428 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
430 * This call acts as a full compiler and hardware memory barrier.
432 * Returns: the value of @atomic before the operation, unsigned
434 * Since: 2.30
436 gsize
437 (g_atomic_pointer_or) (volatile void *atomic,
438 gsize val)
440 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
444 * g_atomic_pointer_xor:
445 * @atomic: a pointer to a #gpointer-sized value
446 * @val: the value to 'xor'
448 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
449 * storing the result back in @atomic.
451 * Think of this operation as an atomic version of
452 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
454 * This call acts as a full compiler and hardware memory barrier.
456 * Returns: the value of @atomic before the operation, unsigned
458 * Since: 2.30
460 gsize
461 (g_atomic_pointer_xor) (volatile void *atomic,
462 gsize val)
464 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
467 #elif defined (G_PLATFORM_WIN32)
469 #include <windows.h>
470 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
471 #define InterlockedAnd _InterlockedAnd
472 #define InterlockedOr _InterlockedOr
473 #define InterlockedXor _InterlockedXor
474 #endif
476 #if !defined (_MSC_VER) || _MSC_VER <= 1200
477 #include "gmessages.h"
478 /* Inlined versions for older compiler */
479 static LONG
480 _gInterlockedAnd (volatile guint *atomic,
481 guint val)
483 LONG i, j;
485 j = *atomic;
486 do {
487 i = j;
488 j = InterlockedCompareExchange(atomic, i & val, i);
489 } while (i != j);
491 return j;
493 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
494 static LONG
495 _gInterlockedOr (volatile guint *atomic,
496 guint val)
498 LONG i, j;
500 j = *atomic;
501 do {
502 i = j;
503 j = InterlockedCompareExchange(atomic, i | val, i);
504 } while (i != j);
506 return j;
508 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
509 static LONG
510 _gInterlockedXor (volatile guint *atomic,
511 guint val)
513 LONG i, j;
515 j = *atomic;
516 do {
517 i = j;
518 j = InterlockedCompareExchange(atomic, i ^ val, i);
519 } while (i != j);
521 return j;
523 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
524 #endif
527 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
529 gint
530 (g_atomic_int_get) (const volatile gint *atomic)
532 MemoryBarrier ();
533 return *atomic;
536 void
537 (g_atomic_int_set) (volatile gint *atomic,
538 gint newval)
540 *atomic = newval;
541 MemoryBarrier ();
544 void
545 (g_atomic_int_inc) (volatile gint *atomic)
547 InterlockedIncrement (atomic);
550 gboolean
551 (g_atomic_int_dec_and_test) (volatile gint *atomic)
553 return InterlockedDecrement (atomic) == 0;
556 gboolean
557 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
558 gint oldval,
559 gint newval)
561 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
564 gint
565 (g_atomic_int_add) (volatile gint *atomic,
566 gint val)
568 return InterlockedExchangeAdd (atomic, val);
571 guint
572 (g_atomic_int_and) (volatile guint *atomic,
573 guint val)
575 return InterlockedAnd (atomic, val);
578 guint
579 (g_atomic_int_or) (volatile guint *atomic,
580 guint val)
582 return InterlockedOr (atomic, val);
585 guint
586 (g_atomic_int_xor) (volatile guint *atomic,
587 guint val)
589 return InterlockedXor (atomic, val);
593 gpointer
594 (g_atomic_pointer_get) (const volatile void *atomic)
596 const volatile gpointer *ptr = atomic;
598 MemoryBarrier ();
599 return *ptr;
602 void
603 (g_atomic_pointer_set) (volatile void *atomic,
604 gpointer newval)
606 volatile gpointer *ptr = atomic;
608 *ptr = newval;
609 MemoryBarrier ();
612 gboolean
613 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
614 gpointer oldval,
615 gpointer newval)
617 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
620 gssize
621 (g_atomic_pointer_add) (volatile void *atomic,
622 gssize val)
624 #if GLIB_SIZEOF_VOID_P == 8
625 return InterlockedExchangeAdd64 (atomic, val);
626 #else
627 return InterlockedExchangeAdd (atomic, val);
628 #endif
631 gsize
632 (g_atomic_pointer_and) (volatile void *atomic,
633 gsize val)
635 #if GLIB_SIZEOF_VOID_P == 8
636 return InterlockedAnd64 (atomic, val);
637 #else
638 return InterlockedAnd (atomic, val);
639 #endif
642 gsize
643 (g_atomic_pointer_or) (volatile void *atomic,
644 gsize val)
646 #if GLIB_SIZEOF_VOID_P == 8
647 return InterlockedOr64 (atomic, val);
648 #else
649 return InterlockedOr (atomic, val);
650 #endif
653 gsize
654 (g_atomic_pointer_xor) (volatile void *atomic,
655 gsize val)
657 #if GLIB_SIZEOF_VOID_P == 8
658 return InterlockedXor64 (atomic, val);
659 #else
660 return InterlockedXor (atomic, val);
661 #endif
663 #else
665 /* This error occurs when ./configure decided that we should be capable
666 * of lock-free atomics but we find at compile-time that we are not.
668 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
670 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
672 #else /* G_ATOMIC_LOCK_FREE */
674 /* We are not permitted to call into any GLib functions from here, so we
675 * can not use GMutex.
677 * Fortunately, we already take care of the Windows case above, and all
678 * non-Windows platforms on which glib runs have pthreads. Use those.
680 #include <pthread.h>
682 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
684 gint
685 (g_atomic_int_get) (volatile gint *atomic)
687 gint value;
689 pthread_mutex_lock (&g_atomic_lock);
690 value = *atomic;
691 pthread_mutex_unlock (&g_atomic_lock);
693 return value;
696 void
697 (g_atomic_int_set) (volatile gint *atomic,
698 gint value)
700 pthread_mutex_lock (&g_atomic_lock);
701 *atomic = value;
702 pthread_mutex_unlock (&g_atomic_lock);
705 void
706 (g_atomic_int_inc) (volatile gint *atomic)
708 pthread_mutex_lock (&g_atomic_lock);
709 (*atomic)++;
710 pthread_mutex_unlock (&g_atomic_lock);
713 gboolean
714 (g_atomic_int_dec_and_test) (volatile gint *atomic)
716 gboolean is_zero;
718 pthread_mutex_lock (&g_atomic_lock);
719 is_zero = --(*atomic) == 0;
720 pthread_mutex_unlock (&g_atomic_lock);
722 return is_zero;
725 gboolean
726 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
727 gint oldval,
728 gint newval)
730 gboolean success;
732 pthread_mutex_lock (&g_atomic_lock);
734 if ((success = (*atomic == oldval)))
735 *atomic = newval;
737 pthread_mutex_unlock (&g_atomic_lock);
739 return success;
742 gint
743 (g_atomic_int_add) (volatile gint *atomic,
744 gint val)
746 gint oldval;
748 pthread_mutex_lock (&g_atomic_lock);
749 oldval = *atomic;
750 *atomic = oldval + val;
751 pthread_mutex_unlock (&g_atomic_lock);
753 return oldval;
756 guint
757 (g_atomic_int_and) (volatile guint *atomic,
758 guint val)
760 guint oldval;
762 pthread_mutex_lock (&g_atomic_lock);
763 oldval = *atomic;
764 *atomic = oldval & val;
765 pthread_mutex_unlock (&g_atomic_lock);
767 return oldval;
770 guint
771 (g_atomic_int_or) (volatile guint *atomic,
772 guint val)
774 guint oldval;
776 pthread_mutex_lock (&g_atomic_lock);
777 oldval = *atomic;
778 *atomic = oldval | val;
779 pthread_mutex_unlock (&g_atomic_lock);
781 return oldval;
784 guint
785 (g_atomic_int_xor) (volatile guint *atomic,
786 guint val)
788 guint oldval;
790 pthread_mutex_lock (&g_atomic_lock);
791 oldval = *atomic;
792 *atomic = oldval ^ val;
793 pthread_mutex_unlock (&g_atomic_lock);
795 return oldval;
799 gpointer
800 (g_atomic_pointer_get) (volatile void *atomic)
802 volatile gpointer *ptr = atomic;
803 gpointer value;
805 pthread_mutex_lock (&g_atomic_lock);
806 value = *ptr;
807 pthread_mutex_unlock (&g_atomic_lock);
809 return value;
812 void
813 (g_atomic_pointer_set) (volatile void *atomic,
814 gpointer newval)
816 volatile gpointer *ptr = atomic;
818 pthread_mutex_lock (&g_atomic_lock);
819 *ptr = newval;
820 pthread_mutex_unlock (&g_atomic_lock);
823 gboolean
824 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
825 gpointer oldval,
826 gpointer newval)
828 volatile gpointer *ptr = atomic;
829 gboolean success;
831 pthread_mutex_lock (&g_atomic_lock);
833 if ((success = (*ptr == oldval)))
834 *ptr = newval;
836 pthread_mutex_unlock (&g_atomic_lock);
838 return success;
841 gssize
842 (g_atomic_pointer_add) (volatile void *atomic,
843 gssize val)
845 volatile gssize *ptr = atomic;
846 gssize oldval;
848 pthread_mutex_lock (&g_atomic_lock);
849 oldval = *ptr;
850 *ptr = oldval + val;
851 pthread_mutex_unlock (&g_atomic_lock);
853 return oldval;
856 gsize
857 (g_atomic_pointer_and) (volatile void *atomic,
858 gsize val)
860 volatile gsize *ptr = atomic;
861 gsize oldval;
863 pthread_mutex_lock (&g_atomic_lock);
864 oldval = *ptr;
865 *ptr = oldval & val;
866 pthread_mutex_unlock (&g_atomic_lock);
868 return oldval;
871 gsize
872 (g_atomic_pointer_or) (volatile void *atomic,
873 gsize val)
875 volatile gsize *ptr = atomic;
876 gsize oldval;
878 pthread_mutex_lock (&g_atomic_lock);
879 oldval = *ptr;
880 *ptr = oldval | val;
881 pthread_mutex_unlock (&g_atomic_lock);
883 return oldval;
886 gsize
887 (g_atomic_pointer_xor) (volatile void *atomic,
888 gsize val)
890 volatile gsize *ptr = atomic;
891 gsize oldval;
893 pthread_mutex_lock (&g_atomic_lock);
894 oldval = *ptr;
895 *ptr = oldval ^ val;
896 pthread_mutex_unlock (&g_atomic_lock);
898 return oldval;
901 #endif
904 * g_atomic_int_exchange_and_add:
905 * @atomic: a pointer to a #gint
906 * @val: the value to add
908 * This function existed before g_atomic_int_add() returned the prior
909 * value of the integer (which it now does). It is retained only for
910 * compatibility reasons. Don't use this function in new code.
912 * Returns: the value of @atomic before the add, signed
913 * Since: 2.4
914 * Deprecated: 2.30: Use g_atomic_int_add() instead.
916 gint
917 g_atomic_int_exchange_and_add (volatile gint *atomic,
918 gint val)
920 return (g_atomic_int_add) (atomic, val);