Merge branch 'test-ip_mreq_source-android-only' into 'master'
[glib.git] / glib / gatomic.c
blob2c1faeeef350ffdece2623b5316c966fdccc913c
1 /*
2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 * Author: Ryan Lortie <desrt@desrt.ca>
20 #include "config.h"
22 #include "gatomic.h"
24 /**
25 * SECTION:atomic_operations
26 * @title: Atomic Operations
27 * @short_description: basic atomic integer and pointer operations
28 * @see_also: #GMutex
30 * The following is a collection of compiler macros to provide atomic
31 * access to integer and pointer-sized values.
33 * The macros that have 'int' in the name will operate on pointers to
34 * #gint and #guint. The macros with 'pointer' in the name will operate
35 * on pointers to any pointer-sized value, including #gsize. There is
36 * no support for 64bit operations on platforms with 32bit pointers
37 * because it is not generally possible to perform these operations
38 * atomically.
40 * The get, set and exchange operations for integers and pointers
41 * nominally operate on #gint and #gpointer, respectively. Of the
42 * arithmetic operations, the 'add' operation operates on (and returns)
43 * signed integer values (#gint and #gssize) and the 'and', 'or', and
44 * 'xor' operations operate on (and return) unsigned integer values
45 * (#guint and #gsize).
47 * All of the operations act as a full compiler and (where appropriate)
48 * hardware memory barrier. Acquire and release or producer and
49 * consumer barrier semantics are not available through this API.
51 * It is very important that all accesses to a particular integer or
52 * pointer be performed using only this API and that different sizes of
53 * operation are not mixed or used on overlapping memory regions. Never
54 * read or assign directly from or to a value -- always use this API.
56 * For simple reference counting purposes you should use
57 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
58 * fall outside of simple reference counting patterns are prone to
59 * subtle bugs and occasionally undefined behaviour. It is also worth
60 * noting that since all of these operations require global
61 * synchronisation of the entire machine, they can be quite slow. In
62 * the case of performing multiple atomic operations it can often be
63 * faster to simply acquire a mutex lock around the critical area,
64 * perform the operations normally and then release the lock.
65 **/
67 /**
68 * G_ATOMIC_LOCK_FREE:
70 * This macro is defined if the atomic operations of GLib are
71 * implemented using real hardware atomic operations. This means that
72 * the GLib atomic API can be used between processes and safely mixed
73 * with other (hardware) atomic APIs.
75 * If this macro is not defined, the atomic operations may be
76 * emulated using a mutex. In that case, the GLib atomic operations are
77 * only atomic relative to themselves and within a single process.
78 **/
80 /* NOTE CAREFULLY:
82 * This file is the lowest-level part of GLib.
84 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
85 * messages, etc) call into these functions and macros to get work done.
87 * As such, these functions can not call back into any part of GLib
88 * without risking recursion.
91 #ifdef G_ATOMIC_LOCK_FREE
93 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
94 * implement the atomic operations in a lock-free manner.
97 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
99 #if defined(__ATOMIC_SEQ_CST) && !defined(__clang__)
100 /* The implementation used in this code path in gatomic.h assumes
101 * 4-byte int */
102 G_STATIC_ASSERT (sizeof (gint) == 4);
104 /* The implementations in gatomic.h assume 4- or 8-byte pointers */
105 G_STATIC_ASSERT (sizeof (void *) == 4 || sizeof (void *) == 8);
106 #endif
109 * g_atomic_int_get:
110 * @atomic: a pointer to a #gint or #guint
112 * Gets the current value of @atomic.
114 * This call acts as a full compiler and hardware
115 * memory barrier (before the get).
117 * Returns: the value of the integer
119 * Since: 2.4
121 gint
122 (g_atomic_int_get) (const volatile gint *atomic)
124 return g_atomic_int_get (atomic);
128 * g_atomic_int_set:
129 * @atomic: a pointer to a #gint or #guint
130 * @newval: a new value to store
132 * Sets the value of @atomic to @newval.
134 * This call acts as a full compiler and hardware
135 * memory barrier (after the set).
137 * Since: 2.4
139 void
140 (g_atomic_int_set) (volatile gint *atomic,
141 gint newval)
143 g_atomic_int_set (atomic, newval);
147 * g_atomic_int_inc:
148 * @atomic: a pointer to a #gint or #guint
150 * Increments the value of @atomic by 1.
152 * Think of this operation as an atomic version of `{ *atomic += 1; }`.
154 * This call acts as a full compiler and hardware memory barrier.
156 * Since: 2.4
158 void
159 (g_atomic_int_inc) (volatile gint *atomic)
161 g_atomic_int_inc (atomic);
165 * g_atomic_int_dec_and_test:
166 * @atomic: a pointer to a #gint or #guint
168 * Decrements the value of @atomic by 1.
170 * Think of this operation as an atomic version of
171 * `{ *atomic -= 1; return (*atomic == 0); }`.
173 * This call acts as a full compiler and hardware memory barrier.
175 * Returns: %TRUE if the resultant value is zero
177 * Since: 2.4
179 gboolean
180 (g_atomic_int_dec_and_test) (volatile gint *atomic)
182 return g_atomic_int_dec_and_test (atomic);
186 * g_atomic_int_compare_and_exchange:
187 * @atomic: a pointer to a #gint or #guint
188 * @oldval: the value to compare with
189 * @newval: the value to conditionally replace with
191 * Compares @atomic to @oldval and, if equal, sets it to @newval.
192 * If @atomic was not equal to @oldval then no change occurs.
194 * This compare and exchange is done atomically.
196 * Think of this operation as an atomic version of
197 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
199 * This call acts as a full compiler and hardware memory barrier.
201 * Returns: %TRUE if the exchange took place
203 * Since: 2.4
205 gboolean
206 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
207 gint oldval,
208 gint newval)
210 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
214 * g_atomic_int_add:
215 * @atomic: a pointer to a #gint or #guint
216 * @val: the value to add
218 * Atomically adds @val to the value of @atomic.
220 * Think of this operation as an atomic version of
221 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
223 * This call acts as a full compiler and hardware memory barrier.
225 * Before version 2.30, this function did not return a value
226 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
228 * Returns: the value of @atomic before the add, signed
230 * Since: 2.4
232 gint
233 (g_atomic_int_add) (volatile gint *atomic,
234 gint val)
236 return g_atomic_int_add (atomic, val);
240 * g_atomic_int_and:
241 * @atomic: a pointer to a #gint or #guint
242 * @val: the value to 'and'
244 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
245 * storing the result back in @atomic.
247 * This call acts as a full compiler and hardware memory barrier.
249 * Think of this operation as an atomic version of
250 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
252 * Returns: the value of @atomic before the operation, unsigned
254 * Since: 2.30
256 guint
257 (g_atomic_int_and) (volatile guint *atomic,
258 guint val)
260 return g_atomic_int_and (atomic, val);
264 * g_atomic_int_or:
265 * @atomic: a pointer to a #gint or #guint
266 * @val: the value to 'or'
268 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
269 * storing the result back in @atomic.
271 * Think of this operation as an atomic version of
272 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
274 * This call acts as a full compiler and hardware memory barrier.
276 * Returns: the value of @atomic before the operation, unsigned
278 * Since: 2.30
280 guint
281 (g_atomic_int_or) (volatile guint *atomic,
282 guint val)
284 return g_atomic_int_or (atomic, val);
288 * g_atomic_int_xor:
289 * @atomic: a pointer to a #gint or #guint
290 * @val: the value to 'xor'
292 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
293 * storing the result back in @atomic.
295 * Think of this operation as an atomic version of
296 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
298 * This call acts as a full compiler and hardware memory barrier.
300 * Returns: the value of @atomic before the operation, unsigned
302 * Since: 2.30
304 guint
305 (g_atomic_int_xor) (volatile guint *atomic,
306 guint val)
308 return g_atomic_int_xor (atomic, val);
313 * g_atomic_pointer_get:
314 * @atomic: (not nullable): a pointer to a #gpointer-sized value
316 * Gets the current value of @atomic.
318 * This call acts as a full compiler and hardware
319 * memory barrier (before the get).
321 * Returns: the value of the pointer
323 * Since: 2.4
325 gpointer
326 (g_atomic_pointer_get) (const volatile void *atomic)
328 return g_atomic_pointer_get ((const volatile gpointer *) atomic);
332 * g_atomic_pointer_set:
333 * @atomic: (not nullable): a pointer to a #gpointer-sized value
334 * @newval: a new value to store
336 * Sets the value of @atomic to @newval.
338 * This call acts as a full compiler and hardware
339 * memory barrier (after the set).
341 * Since: 2.4
343 void
344 (g_atomic_pointer_set) (volatile void *atomic,
345 gpointer newval)
347 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
351 * g_atomic_pointer_compare_and_exchange:
352 * @atomic: (not nullable): a pointer to a #gpointer-sized value
353 * @oldval: the value to compare with
354 * @newval: the value to conditionally replace with
356 * Compares @atomic to @oldval and, if equal, sets it to @newval.
357 * If @atomic was not equal to @oldval then no change occurs.
359 * This compare and exchange is done atomically.
361 * Think of this operation as an atomic version of
362 * `{ if (*atomic == oldval) { *atomic = newval; return TRUE; } else return FALSE; }`.
364 * This call acts as a full compiler and hardware memory barrier.
366 * Returns: %TRUE if the exchange took place
368 * Since: 2.4
370 gboolean
371 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
372 gpointer oldval,
373 gpointer newval)
375 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
376 oldval, newval);
380 * g_atomic_pointer_add:
381 * @atomic: (not nullable): a pointer to a #gpointer-sized value
382 * @val: the value to add
384 * Atomically adds @val to the value of @atomic.
386 * Think of this operation as an atomic version of
387 * `{ tmp = *atomic; *atomic += val; return tmp; }`.
389 * This call acts as a full compiler and hardware memory barrier.
391 * Returns: the value of @atomic before the add, signed
393 * Since: 2.30
395 gssize
396 (g_atomic_pointer_add) (volatile void *atomic,
397 gssize val)
399 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
403 * g_atomic_pointer_and:
404 * @atomic: (not nullable): a pointer to a #gpointer-sized value
405 * @val: the value to 'and'
407 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
408 * storing the result back in @atomic.
410 * Think of this operation as an atomic version of
411 * `{ tmp = *atomic; *atomic &= val; return tmp; }`.
413 * This call acts as a full compiler and hardware memory barrier.
415 * Returns: the value of @atomic before the operation, unsigned
417 * Since: 2.30
419 gsize
420 (g_atomic_pointer_and) (volatile void *atomic,
421 gsize val)
423 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
427 * g_atomic_pointer_or:
428 * @atomic: (not nullable): a pointer to a #gpointer-sized value
429 * @val: the value to 'or'
431 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
432 * storing the result back in @atomic.
434 * Think of this operation as an atomic version of
435 * `{ tmp = *atomic; *atomic |= val; return tmp; }`.
437 * This call acts as a full compiler and hardware memory barrier.
439 * Returns: the value of @atomic before the operation, unsigned
441 * Since: 2.30
443 gsize
444 (g_atomic_pointer_or) (volatile void *atomic,
445 gsize val)
447 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
451 * g_atomic_pointer_xor:
452 * @atomic: (not nullable): a pointer to a #gpointer-sized value
453 * @val: the value to 'xor'
455 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
456 * storing the result back in @atomic.
458 * Think of this operation as an atomic version of
459 * `{ tmp = *atomic; *atomic ^= val; return tmp; }`.
461 * This call acts as a full compiler and hardware memory barrier.
463 * Returns: the value of @atomic before the operation, unsigned
465 * Since: 2.30
467 gsize
468 (g_atomic_pointer_xor) (volatile void *atomic,
469 gsize val)
471 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
474 #elif defined (G_PLATFORM_WIN32)
476 #include <windows.h>
477 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
478 #define InterlockedAnd _InterlockedAnd
479 #define InterlockedOr _InterlockedOr
480 #define InterlockedXor _InterlockedXor
481 #endif
483 #if !defined (_MSC_VER) || _MSC_VER <= 1200
484 #include "gmessages.h"
485 /* Inlined versions for older compiler */
486 static LONG
487 _gInterlockedAnd (volatile guint *atomic,
488 guint val)
490 LONG i, j;
492 j = *atomic;
493 do {
494 i = j;
495 j = InterlockedCompareExchange(atomic, i & val, i);
496 } while (i != j);
498 return j;
500 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
501 static LONG
502 _gInterlockedOr (volatile guint *atomic,
503 guint val)
505 LONG i, j;
507 j = *atomic;
508 do {
509 i = j;
510 j = InterlockedCompareExchange(atomic, i | val, i);
511 } while (i != j);
513 return j;
515 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
516 static LONG
517 _gInterlockedXor (volatile guint *atomic,
518 guint val)
520 LONG i, j;
522 j = *atomic;
523 do {
524 i = j;
525 j = InterlockedCompareExchange(atomic, i ^ val, i);
526 } while (i != j);
528 return j;
530 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
531 #endif
534 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
536 gint
537 (g_atomic_int_get) (const volatile gint *atomic)
539 MemoryBarrier ();
540 return *atomic;
543 void
544 (g_atomic_int_set) (volatile gint *atomic,
545 gint newval)
547 *atomic = newval;
548 MemoryBarrier ();
551 void
552 (g_atomic_int_inc) (volatile gint *atomic)
554 InterlockedIncrement (atomic);
557 gboolean
558 (g_atomic_int_dec_and_test) (volatile gint *atomic)
560 return InterlockedDecrement (atomic) == 0;
563 gboolean
564 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
565 gint oldval,
566 gint newval)
568 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
571 gint
572 (g_atomic_int_add) (volatile gint *atomic,
573 gint val)
575 return InterlockedExchangeAdd (atomic, val);
578 guint
579 (g_atomic_int_and) (volatile guint *atomic,
580 guint val)
582 return InterlockedAnd (atomic, val);
585 guint
586 (g_atomic_int_or) (volatile guint *atomic,
587 guint val)
589 return InterlockedOr (atomic, val);
592 guint
593 (g_atomic_int_xor) (volatile guint *atomic,
594 guint val)
596 return InterlockedXor (atomic, val);
600 gpointer
601 (g_atomic_pointer_get) (const volatile void *atomic)
603 const volatile gpointer *ptr = atomic;
605 MemoryBarrier ();
606 return *ptr;
609 void
610 (g_atomic_pointer_set) (volatile void *atomic,
611 gpointer newval)
613 volatile gpointer *ptr = atomic;
615 *ptr = newval;
616 MemoryBarrier ();
619 gboolean
620 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
621 gpointer oldval,
622 gpointer newval)
624 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
627 gssize
628 (g_atomic_pointer_add) (volatile void *atomic,
629 gssize val)
631 #if GLIB_SIZEOF_VOID_P == 8
632 return InterlockedExchangeAdd64 (atomic, val);
633 #else
634 return InterlockedExchangeAdd (atomic, val);
635 #endif
638 gsize
639 (g_atomic_pointer_and) (volatile void *atomic,
640 gsize val)
642 #if GLIB_SIZEOF_VOID_P == 8
643 return InterlockedAnd64 (atomic, val);
644 #else
645 return InterlockedAnd (atomic, val);
646 #endif
649 gsize
650 (g_atomic_pointer_or) (volatile void *atomic,
651 gsize val)
653 #if GLIB_SIZEOF_VOID_P == 8
654 return InterlockedOr64 (atomic, val);
655 #else
656 return InterlockedOr (atomic, val);
657 #endif
660 gsize
661 (g_atomic_pointer_xor) (volatile void *atomic,
662 gsize val)
664 #if GLIB_SIZEOF_VOID_P == 8
665 return InterlockedXor64 (atomic, val);
666 #else
667 return InterlockedXor (atomic, val);
668 #endif
670 #else
672 /* This error occurs when ./configure decided that we should be capable
673 * of lock-free atomics but we find at compile-time that we are not.
675 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
677 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
679 #else /* G_ATOMIC_LOCK_FREE */
681 /* We are not permitted to call into any GLib functions from here, so we
682 * can not use GMutex.
684 * Fortunately, we already take care of the Windows case above, and all
685 * non-Windows platforms on which glib runs have pthreads. Use those.
687 #include <pthread.h>
689 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
691 gint
692 (g_atomic_int_get) (const volatile gint *atomic)
694 gint value;
696 pthread_mutex_lock (&g_atomic_lock);
697 value = *atomic;
698 pthread_mutex_unlock (&g_atomic_lock);
700 return value;
703 void
704 (g_atomic_int_set) (volatile gint *atomic,
705 gint value)
707 pthread_mutex_lock (&g_atomic_lock);
708 *atomic = value;
709 pthread_mutex_unlock (&g_atomic_lock);
712 void
713 (g_atomic_int_inc) (volatile gint *atomic)
715 pthread_mutex_lock (&g_atomic_lock);
716 (*atomic)++;
717 pthread_mutex_unlock (&g_atomic_lock);
720 gboolean
721 (g_atomic_int_dec_and_test) (volatile gint *atomic)
723 gboolean is_zero;
725 pthread_mutex_lock (&g_atomic_lock);
726 is_zero = --(*atomic) == 0;
727 pthread_mutex_unlock (&g_atomic_lock);
729 return is_zero;
732 gboolean
733 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
734 gint oldval,
735 gint newval)
737 gboolean success;
739 pthread_mutex_lock (&g_atomic_lock);
741 if ((success = (*atomic == oldval)))
742 *atomic = newval;
744 pthread_mutex_unlock (&g_atomic_lock);
746 return success;
749 gint
750 (g_atomic_int_add) (volatile gint *atomic,
751 gint val)
753 gint oldval;
755 pthread_mutex_lock (&g_atomic_lock);
756 oldval = *atomic;
757 *atomic = oldval + val;
758 pthread_mutex_unlock (&g_atomic_lock);
760 return oldval;
763 guint
764 (g_atomic_int_and) (volatile guint *atomic,
765 guint val)
767 guint oldval;
769 pthread_mutex_lock (&g_atomic_lock);
770 oldval = *atomic;
771 *atomic = oldval & val;
772 pthread_mutex_unlock (&g_atomic_lock);
774 return oldval;
777 guint
778 (g_atomic_int_or) (volatile guint *atomic,
779 guint val)
781 guint oldval;
783 pthread_mutex_lock (&g_atomic_lock);
784 oldval = *atomic;
785 *atomic = oldval | val;
786 pthread_mutex_unlock (&g_atomic_lock);
788 return oldval;
791 guint
792 (g_atomic_int_xor) (volatile guint *atomic,
793 guint val)
795 guint oldval;
797 pthread_mutex_lock (&g_atomic_lock);
798 oldval = *atomic;
799 *atomic = oldval ^ val;
800 pthread_mutex_unlock (&g_atomic_lock);
802 return oldval;
806 gpointer
807 (g_atomic_pointer_get) (const volatile void *atomic)
809 const volatile gpointer *ptr = atomic;
810 gpointer value;
812 pthread_mutex_lock (&g_atomic_lock);
813 value = *ptr;
814 pthread_mutex_unlock (&g_atomic_lock);
816 return value;
819 void
820 (g_atomic_pointer_set) (volatile void *atomic,
821 gpointer newval)
823 volatile gpointer *ptr = atomic;
825 pthread_mutex_lock (&g_atomic_lock);
826 *ptr = newval;
827 pthread_mutex_unlock (&g_atomic_lock);
830 gboolean
831 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
832 gpointer oldval,
833 gpointer newval)
835 volatile gpointer *ptr = atomic;
836 gboolean success;
838 pthread_mutex_lock (&g_atomic_lock);
840 if ((success = (*ptr == oldval)))
841 *ptr = newval;
843 pthread_mutex_unlock (&g_atomic_lock);
845 return success;
848 gssize
849 (g_atomic_pointer_add) (volatile void *atomic,
850 gssize val)
852 volatile gssize *ptr = atomic;
853 gssize oldval;
855 pthread_mutex_lock (&g_atomic_lock);
856 oldval = *ptr;
857 *ptr = oldval + val;
858 pthread_mutex_unlock (&g_atomic_lock);
860 return oldval;
863 gsize
864 (g_atomic_pointer_and) (volatile void *atomic,
865 gsize val)
867 volatile gsize *ptr = atomic;
868 gsize oldval;
870 pthread_mutex_lock (&g_atomic_lock);
871 oldval = *ptr;
872 *ptr = oldval & val;
873 pthread_mutex_unlock (&g_atomic_lock);
875 return oldval;
878 gsize
879 (g_atomic_pointer_or) (volatile void *atomic,
880 gsize val)
882 volatile gsize *ptr = atomic;
883 gsize oldval;
885 pthread_mutex_lock (&g_atomic_lock);
886 oldval = *ptr;
887 *ptr = oldval | val;
888 pthread_mutex_unlock (&g_atomic_lock);
890 return oldval;
893 gsize
894 (g_atomic_pointer_xor) (volatile void *atomic,
895 gsize val)
897 volatile gsize *ptr = atomic;
898 gsize oldval;
900 pthread_mutex_lock (&g_atomic_lock);
901 oldval = *ptr;
902 *ptr = oldval ^ val;
903 pthread_mutex_unlock (&g_atomic_lock);
905 return oldval;
908 #endif
911 * g_atomic_int_exchange_and_add:
912 * @atomic: a pointer to a #gint
913 * @val: the value to add
915 * This function existed before g_atomic_int_add() returned the prior
916 * value of the integer (which it now does). It is retained only for
917 * compatibility reasons. Don't use this function in new code.
919 * Returns: the value of @atomic before the add, signed
920 * Since: 2.4
921 * Deprecated: 2.30: Use g_atomic_int_add() instead.
923 gint
924 g_atomic_int_exchange_and_add (volatile gint *atomic,
925 gint val)
927 return (g_atomic_int_add) (atomic, val);