GSimpleProxyResolver: convert docs to markdown
[glib.git] / glib / gatomic.c
blob9327b6b823fd1f557ab437ee1e55c151bc00e7e5
1 /*
2 * Copyright © 2011 Ryan Lortie
4 * This library is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU Lesser General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * licence, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 * Author: Ryan Lortie <desrt@desrt.ca>
20 #include "config.h"
22 #include "gatomic.h"
24 /**
25 * SECTION:atomic_operations
26 * @title: Atomic Operations
27 * @short_description: basic atomic integer and pointer operations
28 * @see_also: #GMutex
30 * The following is a collection of compiler macros to provide atomic
31 * access to integer and pointer-sized values.
33 * The macros that have 'int' in the name will operate on pointers to
34 * #gint and #guint. The macros with 'pointer' in the name will operate
35 * on pointers to any pointer-sized value, including #gsize. There is
36 * no support for 64bit operations on platforms with 32bit pointers
37 * because it is not generally possible to perform these operations
38 * atomically.
40 * The get, set and exchange operations for integers and pointers
41 * nominally operate on #gint and #gpointer, respectively. Of the
42 * arithmetic operations, the 'add' operation operates on (and returns)
43 * signed integer values (#gint and #gssize) and the 'and', 'or', and
44 * 'xor' operations operate on (and return) unsigned integer values
45 * (#guint and #gsize).
47 * All of the operations act as a full compiler and (where appropriate)
48 * hardware memory barrier. Acquire and release or producer and
49 * consumer barrier semantics are not available through this API.
51 * It is very important that all accesses to a particular integer or
52 * pointer be performed using only this API and that different sizes of
53 * operation are not mixed or used on overlapping memory regions. Never
54 * read or assign directly from or to a value -- always use this API.
56 * For simple reference counting purposes you should use
57 * g_atomic_int_inc() and g_atomic_int_dec_and_test(). Other uses that
58 * fall outside of simple reference counting patterns are prone to
59 * subtle bugs and occasionally undefined behaviour. It is also worth
60 * noting that since all of these operations require global
61 * synchronisation of the entire machine, they can be quite slow. In
62 * the case of performing multiple atomic operations it can often be
63 * faster to simply acquire a mutex lock around the critical area,
64 * perform the operations normally and then release the lock.
65 **/
67 /**
68 * G_ATOMIC_LOCK_FREE:
70 * This macro is defined if the atomic operations of GLib are
71 * implemented using real hardware atomic operations. This means that
72 * the GLib atomic API can be used between processes and safely mixed
73 * with other (hardware) atomic APIs.
75 * If this macro is not defined, the atomic operations may be
76 * emulated using a mutex. In that case, the GLib atomic operations are
77 * only atomic relative to themselves and within a single process.
78 **/
80 /* NOTE CAREFULLY:
82 * This file is the lowest-level part of GLib.
84 * Other lowlevel parts of GLib (threads, slice allocator, g_malloc,
85 * messages, etc) call into these functions and macros to get work done.
87 * As such, these functions can not call back into any part of GLib
88 * without risking recursion.
91 #ifdef G_ATOMIC_LOCK_FREE
93 /* if G_ATOMIC_LOCK_FREE was defined by ./configure then we MUST
94 * implement the atomic operations in a lock-free manner.
97 #if defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
98 /**
99 * g_atomic_int_get:
100 * @atomic: a pointer to a #gint or #guint
102 * Gets the current value of @atomic.
104 * This call acts as a full compiler and hardware
105 * memory barrier (before the get).
107 * Returns: the value of the integer
109 * Since: 2.4
111 gint
112 (g_atomic_int_get) (const volatile gint *atomic)
114 return g_atomic_int_get (atomic);
118 * g_atomic_int_set:
119 * @atomic: a pointer to a #gint or #guint
120 * @newval: a new value to store
122 * Sets the value of @atomic to @newval.
124 * This call acts as a full compiler and hardware
125 * memory barrier (after the set).
127 * Since: 2.4
129 void
130 (g_atomic_int_set) (volatile gint *atomic,
131 gint newval)
133 g_atomic_int_set (atomic, newval);
137 * g_atomic_int_inc:
138 * @atomic: a pointer to a #gint or #guint
140 * Increments the value of @atomic by 1.
142 * Think of this operation as an atomic version of
143 * <literal>{ *@atomic += 1; }</literal>
145 * This call acts as a full compiler and hardware memory barrier.
147 * Since: 2.4
149 void
150 (g_atomic_int_inc) (volatile gint *atomic)
152 g_atomic_int_inc (atomic);
156 * g_atomic_int_dec_and_test:
157 * @atomic: a pointer to a #gint or #guint
159 * Decrements the value of @atomic by 1.
161 * Think of this operation as an atomic version of
162 * <literal>{ *@atomic -= 1; return (*@atomic == 0); }</literal>
164 * This call acts as a full compiler and hardware memory barrier.
166 * Returns: %TRUE if the resultant value is zero
168 * Since: 2.4
170 gboolean
171 (g_atomic_int_dec_and_test) (volatile gint *atomic)
173 return g_atomic_int_dec_and_test (atomic);
177 * g_atomic_int_compare_and_exchange:
178 * @atomic: a pointer to a #gint or #guint
179 * @oldval: the value to compare with
180 * @newval: the value to conditionally replace with
182 * Compares @atomic to @oldval and, if equal, sets it to @newval.
183 * If @atomic was not equal to @oldval then no change occurs.
185 * This compare and exchange is done atomically.
187 * Think of this operation as an atomic version of
188 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
190 * This call acts as a full compiler and hardware memory barrier.
192 * Returns: %TRUE if the exchange took place
194 * Since: 2.4
196 gboolean
197 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
198 gint oldval,
199 gint newval)
201 return g_atomic_int_compare_and_exchange (atomic, oldval, newval);
205 * g_atomic_int_add:
206 * @atomic: a pointer to a #gint or #guint
207 * @val: the value to add
209 * Atomically adds @val to the value of @atomic.
211 * Think of this operation as an atomic version of
212 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
214 * This call acts as a full compiler and hardware memory barrier.
216 * Before version 2.30, this function did not return a value
217 * (but g_atomic_int_exchange_and_add() did, and had the same meaning).
219 * Returns: the value of @atomic before the add, signed
221 * Since: 2.4
223 gint
224 (g_atomic_int_add) (volatile gint *atomic,
225 gint val)
227 return g_atomic_int_add (atomic, val);
231 * g_atomic_int_and:
232 * @atomic: a pointer to a #gint or #guint
233 * @val: the value to 'and'
235 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
236 * storing the result back in @atomic.
238 * This call acts as a full compiler and hardware memory barrier.
240 * Think of this operation as an atomic version of
241 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
243 * Returns: the value of @atomic before the operation, unsigned
245 * Since: 2.30
247 guint
248 (g_atomic_int_and) (volatile guint *atomic,
249 guint val)
251 return g_atomic_int_and (atomic, val);
255 * g_atomic_int_or:
256 * @atomic: a pointer to a #gint or #guint
257 * @val: the value to 'or'
259 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
260 * storing the result back in @atomic.
262 * Think of this operation as an atomic version of
263 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
265 * This call acts as a full compiler and hardware memory barrier.
267 * Returns: the value of @atomic before the operation, unsigned
269 * Since: 2.30
271 guint
272 (g_atomic_int_or) (volatile guint *atomic,
273 guint val)
275 return g_atomic_int_or (atomic, val);
279 * g_atomic_int_xor:
280 * @atomic: a pointer to a #gint or #guint
281 * @val: the value to 'xor'
283 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
284 * storing the result back in @atomic.
286 * Think of this operation as an atomic version of
287 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
289 * This call acts as a full compiler and hardware memory barrier.
291 * Returns: the value of @atomic before the operation, unsigned
293 * Since: 2.30
295 guint
296 (g_atomic_int_xor) (volatile guint *atomic,
297 guint val)
299 return g_atomic_int_xor (atomic, val);
304 * g_atomic_pointer_get:
305 * @atomic: a pointer to a #gpointer-sized value
307 * Gets the current value of @atomic.
309 * This call acts as a full compiler and hardware
310 * memory barrier (before the get).
312 * Returns: the value of the pointer
314 * Since: 2.4
316 gpointer
317 (g_atomic_pointer_get) (const volatile void *atomic)
319 return g_atomic_pointer_get ((const volatile gpointer *) atomic);
323 * g_atomic_pointer_set:
324 * @atomic: a pointer to a #gpointer-sized value
325 * @newval: a new value to store
327 * Sets the value of @atomic to @newval.
329 * This call acts as a full compiler and hardware
330 * memory barrier (after the set).
332 * Since: 2.4
334 void
335 (g_atomic_pointer_set) (volatile void *atomic,
336 gpointer newval)
338 g_atomic_pointer_set ((volatile gpointer *) atomic, newval);
342 * g_atomic_pointer_compare_and_exchange:
343 * @atomic: a pointer to a #gpointer-sized value
344 * @oldval: the value to compare with
345 * @newval: the value to conditionally replace with
347 * Compares @atomic to @oldval and, if equal, sets it to @newval.
348 * If @atomic was not equal to @oldval then no change occurs.
350 * This compare and exchange is done atomically.
352 * Think of this operation as an atomic version of
353 * <literal>{ if (*@atomic == @oldval) { *@atomic = @newval; return TRUE; } else return FALSE; }</literal>
355 * This call acts as a full compiler and hardware memory barrier.
357 * Returns: %TRUE if the exchange took place
359 * Since: 2.4
361 gboolean
362 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
363 gpointer oldval,
364 gpointer newval)
366 return g_atomic_pointer_compare_and_exchange ((volatile gpointer *) atomic,
367 oldval, newval);
371 * g_atomic_pointer_add:
372 * @atomic: a pointer to a #gpointer-sized value
373 * @val: the value to add
375 * Atomically adds @val to the value of @atomic.
377 * Think of this operation as an atomic version of
378 * <literal>{ tmp = *atomic; *@atomic += @val; return tmp; }</literal>
380 * This call acts as a full compiler and hardware memory barrier.
382 * Returns: the value of @atomic before the add, signed
384 * Since: 2.30
386 gssize
387 (g_atomic_pointer_add) (volatile void *atomic,
388 gssize val)
390 return g_atomic_pointer_add ((volatile gpointer *) atomic, val);
394 * g_atomic_pointer_and:
395 * @atomic: a pointer to a #gpointer-sized value
396 * @val: the value to 'and'
398 * Performs an atomic bitwise 'and' of the value of @atomic and @val,
399 * storing the result back in @atomic.
401 * Think of this operation as an atomic version of
402 * <literal>{ tmp = *atomic; *@atomic &= @val; return tmp; }</literal>
404 * This call acts as a full compiler and hardware memory barrier.
406 * Returns: the value of @atomic before the operation, unsigned
408 * Since: 2.30
410 gsize
411 (g_atomic_pointer_and) (volatile void *atomic,
412 gsize val)
414 return g_atomic_pointer_and ((volatile gpointer *) atomic, val);
418 * g_atomic_pointer_or:
419 * @atomic: a pointer to a #gpointer-sized value
420 * @val: the value to 'or'
422 * Performs an atomic bitwise 'or' of the value of @atomic and @val,
423 * storing the result back in @atomic.
425 * Think of this operation as an atomic version of
426 * <literal>{ tmp = *atomic; *@atomic |= @val; return tmp; }</literal>
428 * This call acts as a full compiler and hardware memory barrier.
430 * Returns: the value of @atomic before the operation, unsigned
432 * Since: 2.30
434 gsize
435 (g_atomic_pointer_or) (volatile void *atomic,
436 gsize val)
438 return g_atomic_pointer_or ((volatile gpointer *) atomic, val);
442 * g_atomic_pointer_xor:
443 * @atomic: a pointer to a #gpointer-sized value
444 * @val: the value to 'xor'
446 * Performs an atomic bitwise 'xor' of the value of @atomic and @val,
447 * storing the result back in @atomic.
449 * Think of this operation as an atomic version of
450 * <literal>{ tmp = *atomic; *@atomic ^= @val; return tmp; }</literal>
452 * This call acts as a full compiler and hardware memory barrier.
454 * Returns: the value of @atomic before the operation, unsigned
456 * Since: 2.30
458 gsize
459 (g_atomic_pointer_xor) (volatile void *atomic,
460 gsize val)
462 return g_atomic_pointer_xor ((volatile gpointer *) atomic, val);
465 #elif defined (G_PLATFORM_WIN32)
467 #include <windows.h>
468 #if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) && !(defined _MSC_VER && _MSC_VER <= 1200)
469 #define InterlockedAnd _InterlockedAnd
470 #define InterlockedOr _InterlockedOr
471 #define InterlockedXor _InterlockedXor
472 #endif
474 #if !defined (_MSC_VER) || _MSC_VER <= 1200
475 #include "gmessages.h"
476 /* Inlined versions for older compiler */
477 static LONG
478 _gInterlockedAnd (volatile guint *atomic,
479 guint val)
481 LONG i, j;
483 j = *atomic;
484 do {
485 i = j;
486 j = InterlockedCompareExchange(atomic, i & val, i);
487 } while (i != j);
489 return j;
491 #define InterlockedAnd(a,b) _gInterlockedAnd(a,b)
492 static LONG
493 _gInterlockedOr (volatile guint *atomic,
494 guint val)
496 LONG i, j;
498 j = *atomic;
499 do {
500 i = j;
501 j = InterlockedCompareExchange(atomic, i | val, i);
502 } while (i != j);
504 return j;
506 #define InterlockedOr(a,b) _gInterlockedOr(a,b)
507 static LONG
508 _gInterlockedXor (volatile guint *atomic,
509 guint val)
511 LONG i, j;
513 j = *atomic;
514 do {
515 i = j;
516 j = InterlockedCompareExchange(atomic, i ^ val, i);
517 } while (i != j);
519 return j;
521 #define InterlockedXor(a,b) _gInterlockedXor(a,b)
522 #endif
525 * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx
527 gint
528 (g_atomic_int_get) (const volatile gint *atomic)
530 MemoryBarrier ();
531 return *atomic;
534 void
535 (g_atomic_int_set) (volatile gint *atomic,
536 gint newval)
538 *atomic = newval;
539 MemoryBarrier ();
542 void
543 (g_atomic_int_inc) (volatile gint *atomic)
545 InterlockedIncrement (atomic);
548 gboolean
549 (g_atomic_int_dec_and_test) (volatile gint *atomic)
551 return InterlockedDecrement (atomic) == 0;
554 gboolean
555 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
556 gint oldval,
557 gint newval)
559 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
562 gint
563 (g_atomic_int_add) (volatile gint *atomic,
564 gint val)
566 return InterlockedExchangeAdd (atomic, val);
569 guint
570 (g_atomic_int_and) (volatile guint *atomic,
571 guint val)
573 return InterlockedAnd (atomic, val);
576 guint
577 (g_atomic_int_or) (volatile guint *atomic,
578 guint val)
580 return InterlockedOr (atomic, val);
583 guint
584 (g_atomic_int_xor) (volatile guint *atomic,
585 guint val)
587 return InterlockedXor (atomic, val);
591 gpointer
592 (g_atomic_pointer_get) (const volatile void *atomic)
594 const volatile gpointer *ptr = atomic;
596 MemoryBarrier ();
597 return *ptr;
600 void
601 (g_atomic_pointer_set) (volatile void *atomic,
602 gpointer newval)
604 volatile gpointer *ptr = atomic;
606 *ptr = newval;
607 MemoryBarrier ();
610 gboolean
611 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
612 gpointer oldval,
613 gpointer newval)
615 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
618 gssize
619 (g_atomic_pointer_add) (volatile void *atomic,
620 gssize val)
622 #if GLIB_SIZEOF_VOID_P == 8
623 return InterlockedExchangeAdd64 (atomic, val);
624 #else
625 return InterlockedExchangeAdd (atomic, val);
626 #endif
629 gsize
630 (g_atomic_pointer_and) (volatile void *atomic,
631 gsize val)
633 #if GLIB_SIZEOF_VOID_P == 8
634 return InterlockedAnd64 (atomic, val);
635 #else
636 return InterlockedAnd (atomic, val);
637 #endif
640 gsize
641 (g_atomic_pointer_or) (volatile void *atomic,
642 gsize val)
644 #if GLIB_SIZEOF_VOID_P == 8
645 return InterlockedOr64 (atomic, val);
646 #else
647 return InterlockedOr (atomic, val);
648 #endif
651 gsize
652 (g_atomic_pointer_xor) (volatile void *atomic,
653 gsize val)
655 #if GLIB_SIZEOF_VOID_P == 8
656 return InterlockedXor64 (atomic, val);
657 #else
658 return InterlockedXor (atomic, val);
659 #endif
661 #else
663 /* This error occurs when ./configure decided that we should be capable
664 * of lock-free atomics but we find at compile-time that we are not.
666 #error G_ATOMIC_LOCK_FREE defined, but incapable of lock-free atomics.
668 #endif /* defined (__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) */
670 #else /* G_ATOMIC_LOCK_FREE */
672 /* We are not permitted to call into any GLib functions from here, so we
673 * can not use GMutex.
675 * Fortunately, we already take care of the Windows case above, and all
676 * non-Windows platforms on which glib runs have pthreads. Use those.
678 #include <pthread.h>
680 static pthread_mutex_t g_atomic_lock = PTHREAD_MUTEX_INITIALIZER;
682 gint
683 (g_atomic_int_get) (const volatile gint *atomic)
685 gint value;
687 pthread_mutex_lock (&g_atomic_lock);
688 value = *atomic;
689 pthread_mutex_unlock (&g_atomic_lock);
691 return value;
694 void
695 (g_atomic_int_set) (volatile gint *atomic,
696 gint value)
698 pthread_mutex_lock (&g_atomic_lock);
699 *atomic = value;
700 pthread_mutex_unlock (&g_atomic_lock);
703 void
704 (g_atomic_int_inc) (volatile gint *atomic)
706 pthread_mutex_lock (&g_atomic_lock);
707 (*atomic)++;
708 pthread_mutex_unlock (&g_atomic_lock);
711 gboolean
712 (g_atomic_int_dec_and_test) (volatile gint *atomic)
714 gboolean is_zero;
716 pthread_mutex_lock (&g_atomic_lock);
717 is_zero = --(*atomic) == 0;
718 pthread_mutex_unlock (&g_atomic_lock);
720 return is_zero;
723 gboolean
724 (g_atomic_int_compare_and_exchange) (volatile gint *atomic,
725 gint oldval,
726 gint newval)
728 gboolean success;
730 pthread_mutex_lock (&g_atomic_lock);
732 if ((success = (*atomic == oldval)))
733 *atomic = newval;
735 pthread_mutex_unlock (&g_atomic_lock);
737 return success;
740 gint
741 (g_atomic_int_add) (volatile gint *atomic,
742 gint val)
744 gint oldval;
746 pthread_mutex_lock (&g_atomic_lock);
747 oldval = *atomic;
748 *atomic = oldval + val;
749 pthread_mutex_unlock (&g_atomic_lock);
751 return oldval;
754 guint
755 (g_atomic_int_and) (volatile guint *atomic,
756 guint val)
758 guint oldval;
760 pthread_mutex_lock (&g_atomic_lock);
761 oldval = *atomic;
762 *atomic = oldval & val;
763 pthread_mutex_unlock (&g_atomic_lock);
765 return oldval;
768 guint
769 (g_atomic_int_or) (volatile guint *atomic,
770 guint val)
772 guint oldval;
774 pthread_mutex_lock (&g_atomic_lock);
775 oldval = *atomic;
776 *atomic = oldval | val;
777 pthread_mutex_unlock (&g_atomic_lock);
779 return oldval;
782 guint
783 (g_atomic_int_xor) (volatile guint *atomic,
784 guint val)
786 guint oldval;
788 pthread_mutex_lock (&g_atomic_lock);
789 oldval = *atomic;
790 *atomic = oldval ^ val;
791 pthread_mutex_unlock (&g_atomic_lock);
793 return oldval;
797 gpointer
798 (g_atomic_pointer_get) (const volatile void *atomic)
800 const volatile gpointer *ptr = atomic;
801 gpointer value;
803 pthread_mutex_lock (&g_atomic_lock);
804 value = *ptr;
805 pthread_mutex_unlock (&g_atomic_lock);
807 return value;
810 void
811 (g_atomic_pointer_set) (volatile void *atomic,
812 gpointer newval)
814 volatile gpointer *ptr = atomic;
816 pthread_mutex_lock (&g_atomic_lock);
817 *ptr = newval;
818 pthread_mutex_unlock (&g_atomic_lock);
821 gboolean
822 (g_atomic_pointer_compare_and_exchange) (volatile void *atomic,
823 gpointer oldval,
824 gpointer newval)
826 volatile gpointer *ptr = atomic;
827 gboolean success;
829 pthread_mutex_lock (&g_atomic_lock);
831 if ((success = (*ptr == oldval)))
832 *ptr = newval;
834 pthread_mutex_unlock (&g_atomic_lock);
836 return success;
839 gssize
840 (g_atomic_pointer_add) (volatile void *atomic,
841 gssize val)
843 volatile gssize *ptr = atomic;
844 gssize oldval;
846 pthread_mutex_lock (&g_atomic_lock);
847 oldval = *ptr;
848 *ptr = oldval + val;
849 pthread_mutex_unlock (&g_atomic_lock);
851 return oldval;
854 gsize
855 (g_atomic_pointer_and) (volatile void *atomic,
856 gsize val)
858 volatile gsize *ptr = atomic;
859 gsize oldval;
861 pthread_mutex_lock (&g_atomic_lock);
862 oldval = *ptr;
863 *ptr = oldval & val;
864 pthread_mutex_unlock (&g_atomic_lock);
866 return oldval;
869 gsize
870 (g_atomic_pointer_or) (volatile void *atomic,
871 gsize val)
873 volatile gsize *ptr = atomic;
874 gsize oldval;
876 pthread_mutex_lock (&g_atomic_lock);
877 oldval = *ptr;
878 *ptr = oldval | val;
879 pthread_mutex_unlock (&g_atomic_lock);
881 return oldval;
884 gsize
885 (g_atomic_pointer_xor) (volatile void *atomic,
886 gsize val)
888 volatile gsize *ptr = atomic;
889 gsize oldval;
891 pthread_mutex_lock (&g_atomic_lock);
892 oldval = *ptr;
893 *ptr = oldval ^ val;
894 pthread_mutex_unlock (&g_atomic_lock);
896 return oldval;
899 #endif
902 * g_atomic_int_exchange_and_add:
903 * @atomic: a pointer to a #gint
904 * @val: the value to add
906 * This function existed before g_atomic_int_add() returned the prior
907 * value of the integer (which it now does). It is retained only for
908 * compatibility reasons. Don't use this function in new code.
910 * Returns: the value of @atomic before the add, signed
911 * Since: 2.4
912 * Deprecated: 2.30: Use g_atomic_int_add() instead.
914 gint
915 g_atomic_int_exchange_and_add (volatile gint *atomic,
916 gint val)
918 return (g_atomic_int_add) (atomic, val);