1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * g_atomic_*: atomic operations.
5 * Copyright (C) 2003 Sebastian Wilhelmi
6 * Copyright (C) 2007 Nokia Corporation
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 02111-1307, USA.
26 #if defined (G_ATOMIC_ARM)
31 #include "gthreadprivate.h"
34 #if defined (__GNUC__)
35 # if defined (G_ATOMIC_I486)
36 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
39 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
44 __asm__
__volatile__ ("lock; xaddl %0,%1"
45 : "=r" (result
), "=m" (*atomic
)
46 : "0" (val
), "m" (*atomic
));
51 g_atomic_int_add (volatile gint
*atomic
,
54 __asm__
__volatile__ ("lock; addl %1,%0"
56 : "ir" (val
), "m" (*atomic
));
60 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
66 __asm__
__volatile__ ("lock; cmpxchgl %2, %1"
67 : "=a" (result
), "=m" (*atomic
)
68 : "r" (newval
), "m" (*atomic
), "0" (oldval
));
70 return result
== oldval
;
73 /* The same code as above, as on i386 gpointer is 32 bit as well.
74 * Duplicating the code here seems more natural than casting the
75 * arguments and calling the former function */
78 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
84 __asm__
__volatile__ ("lock; cmpxchgl %2, %1"
85 : "=a" (result
), "=m" (*atomic
)
86 : "r" (newval
), "m" (*atomic
), "0" (oldval
));
88 return result
== oldval
;
91 # elif defined (G_ATOMIC_SPARCV9)
92 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
94 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
97 __asm__ __volatile__ ("cas [%4], %2, %0" \
98 : "=r" (__result), "=m" (*(atomic)) \
99 : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
101 __result == oldval; \
104 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
106 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
111 __asm__
__volatile__ ("cas [%4], %2, %0"
112 : "=r" (result
), "=m" (*atomic
)
113 : "r" (oldval
), "m" (*atomic
), "r" (atomic
),
115 return result
== oldval
;
117 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
119 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
124 gpointer
*a
= atomic
;
125 __asm__
__volatile__ ("casx [%4], %2, %0"
126 : "=r" (result
), "=m" (*a
)
127 : "r" (oldval
), "m" (*a
), "r" (a
),
129 return result
== oldval
;
131 # else /* What's that */
132 # error "Your system has an unsupported pointer size"
133 # endif /* GLIB_SIZEOF_VOID_P */
134 # define G_ATOMIC_MEMORY_BARRIER \
135 __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" \
136 " | #StoreLoad | #StoreStore" : : : "memory")
138 # elif defined (G_ATOMIC_ALPHA)
139 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
141 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
145 __asm__ __volatile__ ( \
148 " cmpeq %0,%3,%1\n" \
163 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
165 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
171 __asm__
__volatile__ (
189 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
191 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
197 __asm__
__volatile__ (
215 # else /* What's that */
216 # error "Your system has an unsupported pointer size"
217 # endif /* GLIB_SIZEOF_VOID_P */
218 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("mb" : : : "memory")
219 # elif defined (G_ATOMIC_X86_64)
220 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
223 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
228 __asm__
__volatile__ ("lock; xaddl %0,%1"
229 : "=r" (result
), "=m" (*atomic
)
230 : "0" (val
), "m" (*atomic
));
235 g_atomic_int_add (volatile gint
*atomic
,
238 __asm__
__volatile__ ("lock; addl %1,%0"
240 : "ir" (val
), "m" (*atomic
));
244 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
250 __asm__
__volatile__ ("lock; cmpxchgl %2, %1"
251 : "=a" (result
), "=m" (*atomic
)
252 : "r" (newval
), "m" (*atomic
), "0" (oldval
));
254 return result
== oldval
;
258 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
264 __asm__
__volatile__ ("lock; cmpxchgq %q2, %1"
265 : "=a" (result
), "=m" (*atomic
)
266 : "r" (newval
), "m" (*atomic
), "0" (oldval
));
268 return result
== oldval
;
271 # elif defined (G_ATOMIC_POWERPC)
272 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h
273 * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
274 * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
277 /* Non-optimizing compile bails on the following two asm statements
278 * for reasons unknown to the author */
280 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
284 #if ASM_NUMERIC_LABELS
285 __asm__
__volatile__ ("1: lwarx %0,0,%3\n"
289 : "=&b" (result
), "=&r" (temp
), "=m" (*atomic
)
290 : "b" (atomic
), "r" (val
), "m" (*atomic
)
293 __asm__
__volatile__ (".Lieaa%=: lwarx %0,0,%3\n"
297 : "=&b" (result
), "=&r" (temp
), "=m" (*atomic
)
298 : "b" (atomic
), "r" (val
), "m" (*atomic
)
304 /* The same as above, to save a function call repeated here */
306 g_atomic_int_add (volatile gint
*atomic
,
310 #if ASM_NUMERIC_LABELS
311 __asm__
__volatile__ ("1: lwarx %0,0,%3\n"
315 : "=&b" (result
), "=&r" (temp
), "=m" (*atomic
)
316 : "b" (atomic
), "r" (val
), "m" (*atomic
)
319 __asm__
__volatile__ (".Lia%=: lwarx %0,0,%3\n"
323 : "=&b" (result
), "=&r" (temp
), "=m" (*atomic
)
324 : "b" (atomic
), "r" (val
), "m" (*atomic
)
328 # else /* !__OPTIMIZE__ */
330 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
336 while (!g_atomic_int_compare_and_exchange (atomic
, result
, result
+ val
));
342 g_atomic_int_add (volatile gint
*atomic
,
348 while (!g_atomic_int_compare_and_exchange (atomic
, result
, result
+ val
));
350 # endif /* !__OPTIMIZE__ */
352 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
354 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
359 #if ASM_NUMERIC_LABELS
360 __asm__
__volatile__ ("sync\n"
368 : "b" (atomic
), "r" (oldval
), "r" (newval
)
371 __asm__
__volatile__ ("sync\n"
372 ".L1icae%=: lwarx %0,0,%1\n"
379 : "b" (atomic
), "r" (oldval
), "r" (newval
)
386 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
391 #if ASM_NUMERIC_LABELS
392 __asm__
__volatile__ ("sync\n"
400 : "b" (atomic
), "r" (oldval
), "r" (newval
)
403 __asm__
__volatile__ ("sync\n"
404 ".L1pcae%=: lwarx %0,0,%1\n"
411 : "b" (atomic
), "r" (oldval
), "r" (newval
)
416 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
418 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
423 #if ASM_NUMERIC_LABELS
424 __asm__
__volatile__ ("sync\n"
433 : "b" (atomic
), "r" (oldval
), "r" (newval
)
436 __asm__
__volatile__ ("sync\n"
437 ".L1icae%=: lwarx %0,0,%1\n"
445 : "b" (atomic
), "r" (oldval
), "r" (newval
)
452 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
457 #if ASM_NUMERIC_LABELS
458 __asm__
__volatile__ ("sync\n"
466 : "b" (atomic
), "r" (oldval
), "r" (newval
)
469 __asm__
__volatile__ ("sync\n"
470 ".L1pcae%=: ldarx %0,0,%1\n"
477 : "b" (atomic
), "r" (oldval
), "r" (newval
)
482 # else /* What's that */
483 # error "Your system has an unsupported pointer size"
484 # endif /* GLIB_SIZEOF_VOID_P */
486 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
488 # elif defined (G_ATOMIC_IA64)
489 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
492 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
495 return __sync_fetch_and_add (atomic
, val
);
499 g_atomic_int_add (volatile gint
*atomic
,
502 __sync_fetch_and_add (atomic
, val
);
506 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
510 return __sync_bool_compare_and_swap (atomic
, oldval
, newval
);
514 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
518 return __sync_bool_compare_and_swap ((long *)atomic
,
519 (long)oldval
, (long)newval
);
522 # define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
523 # elif defined (G_ATOMIC_S390)
524 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
526 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
528 gint __result = oldval; \
529 __asm__ __volatile__ ("cs %0, %2, %1" \
530 : "+d" (__result), "=Q" (*(atomic)) \
531 : "d" (newval), "m" (*(atomic)) : "cc" ); \
532 __result == oldval; \
535 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
537 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
541 gpointer result
= oldval
;
542 __asm__
__volatile__ ("cs %0, %2, %1"
543 : "+d" (result
), "=Q" (*(atomic
))
544 : "d" (newval
), "m" (*(atomic
)) : "cc" );
545 return result
== oldval
;
547 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
549 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
553 gpointer result
= oldval
;
554 gpointer
*a
= atomic
;
555 __asm__
__volatile__ ("csg %0, %2, %1"
556 : "+d" (result
), "=Q" (*a
)
557 : "d" ((long)(newval
)), "m" (*a
) : "cc" );
558 return result
== oldval
;
560 # else /* What's that */
561 # error "Your system has an unsupported pointer size"
562 # endif /* GLIB_SIZEOF_VOID_P */
563 # elif defined (G_ATOMIC_ARM)
564 static volatile int atomic_spin
= 0;
566 static int atomic_spin_trylock (void)
573 : "r,0" (1), "r,r" (&atomic_spin
)
581 static void atomic_spin_lock (void)
583 while (atomic_spin_trylock())
587 static void atomic_spin_unlock (void)
593 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
601 atomic_spin_unlock();
607 g_atomic_int_add (volatile gint
*atomic
,
612 atomic_spin_unlock();
616 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
623 if (*atomic
== oldval
)
630 atomic_spin_unlock();
636 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
643 if (*atomic
== oldval
)
650 atomic_spin_unlock();
654 # elif defined (G_ATOMIC_CRIS) || defined (G_ATOMIC_CRISV32)
655 # ifdef G_ATOMIC_CRIS
656 # define CRIS_ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
659 __asm__ __volatile__ ("\n" \
661 "cmp.d [%[Atomic]], %[OldVal]\n\t" \
664 "move.d %[NewVal], [%[Atomic]]\n\t" \
666 "1:\tseq %[Result]" \
667 : [Result] "=&r" (__result), \
669 : [Atomic] "r" (atomic), \
670 [OldVal] "r" (oldval), \
671 [NewVal] "r" (newval), \
672 "g" (*(gpointer*) (atomic)) \
677 # define CRIS_ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
680 __asm__ __volatile__ ("\n" \
682 "cmp.d [%[Atomic]], %[OldVal]\n\t" \
685 "move.d %[NewVal], [%[Atomic]]\n\t" \
687 "1:\tseq %[Result]" \
688 : [Result] "=&r" (__result), \
690 : [Atomic] "r" (atomic), \
691 [OldVal] "r" (oldval), \
692 [NewVal] "r" (newval), \
693 "g" (*(gpointer*) (atomic)) \
699 #define CRIS_CACHELINE_SIZE 32
700 #define CRIS_ATOMIC_BREAKS_CACHELINE(atomic) \
701 (((gulong)(atomic) & (CRIS_CACHELINE_SIZE - 1)) > (CRIS_CACHELINE_SIZE - sizeof (atomic)))
703 gint
__g_atomic_int_exchange_and_add (volatile gint
*atomic
,
705 void __g_atomic_int_add (volatile gint
*atomic
,
707 gboolean
__g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
710 gboolean
__g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
715 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
719 if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic
)))
720 return __g_atomic_pointer_compare_and_exchange (atomic
, oldval
, newval
);
722 return CRIS_ATOMIC_INT_CMP_XCHG (atomic
, oldval
, newval
);
726 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
730 if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic
)))
731 return __g_atomic_int_compare_and_exchange (atomic
, oldval
, newval
);
733 return CRIS_ATOMIC_INT_CMP_XCHG (atomic
, oldval
, newval
);
737 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
742 if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic
)))
743 return __g_atomic_int_exchange_and_add (atomic
, val
);
747 while (!CRIS_ATOMIC_INT_CMP_XCHG (atomic
, result
, result
+ val
));
753 g_atomic_int_add (volatile gint
*atomic
,
758 if (G_UNLIKELY (CRIS_ATOMIC_BREAKS_CACHELINE (atomic
)))
759 return __g_atomic_int_add (atomic
, val
);
763 while (!CRIS_ATOMIC_INT_CMP_XCHG (atomic
, result
, result
+ val
));
766 /* We need the atomic mutex for atomic operations where the atomic variable
767 * breaks the 32 byte cache line since the CRIS architecture does not support
768 * atomic operations on such variables. Fortunately this should be rare.
770 # define DEFINE_WITH_MUTEXES
771 # define g_atomic_int_exchange_and_add __g_atomic_int_exchange_and_add
772 # define g_atomic_int_add __g_atomic_int_add
773 # define g_atomic_int_compare_and_exchange __g_atomic_int_compare_and_exchange
774 # define g_atomic_pointer_compare_and_exchange __g_atomic_pointer_compare_and_exchange
776 # else /* !G_ATOMIC_* */
777 # define DEFINE_WITH_MUTEXES
778 # endif /* G_ATOMIC_* */
779 #else /* !__GNUC__ */
780 # ifdef G_PLATFORM_WIN32
781 # define DEFINE_WITH_WIN32_INTERLOCKED
783 # define DEFINE_WITH_MUTEXES
785 #endif /* __GNUC__ */
787 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
788 # include <windows.h>
789 /* Following indicates that InterlockedCompareExchangePointer is
790 * declared in winbase.h (included by windows.h) and needs to be
791 * commented out if not true. It is defined iff WINVER > 0x0400,
792 * which is usually correct but can be wrong if WINVER is set before
793 * windows.h is included.
796 # define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
800 g_atomic_int_exchange_and_add (volatile gint32
*atomic
,
803 return InterlockedExchangeAdd (atomic
, val
);
807 g_atomic_int_add (volatile gint32
*atomic
,
810 InterlockedExchangeAdd (atomic
, val
);
814 g_atomic_int_compare_and_exchange (volatile gint32
*atomic
,
818 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
819 return (guint32
) InterlockedCompareExchange ((PVOID
*)atomic
,
821 (PVOID
)oldval
) == oldval
;
823 return InterlockedCompareExchange (atomic
,
830 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
834 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
835 return InterlockedCompareExchangePointer (atomic
, newval
, oldval
) == oldval
;
837 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
838 # error "InterlockedCompareExchangePointer needed"
840 return InterlockedCompareExchange (atomic
, newval
, oldval
) == oldval
;
844 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
846 #ifdef DEFINE_WITH_MUTEXES
847 /* We have to use the slow, but safe locking method */
848 static GMutex
*g_atomic_mutex
;
851 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
856 g_mutex_lock (g_atomic_mutex
);
859 g_mutex_unlock (g_atomic_mutex
);
866 g_atomic_int_add (volatile gint
*atomic
,
869 g_mutex_lock (g_atomic_mutex
);
871 g_mutex_unlock (g_atomic_mutex
);
875 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
881 g_mutex_lock (g_atomic_mutex
);
882 if (*atomic
== oldval
)
889 g_mutex_unlock (g_atomic_mutex
);
895 g_atomic_pointer_compare_and_exchange (volatile gpointer
*atomic
,
901 g_mutex_lock (g_atomic_mutex
);
902 if (*atomic
== oldval
)
909 g_mutex_unlock (g_atomic_mutex
);
914 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
916 g_atomic_int_get (volatile gint
*atomic
)
920 g_mutex_lock (g_atomic_mutex
);
922 g_mutex_unlock (g_atomic_mutex
);
928 g_atomic_int_set (volatile gint
*atomic
,
931 g_mutex_lock (g_atomic_mutex
);
933 g_mutex_unlock (g_atomic_mutex
);
937 g_atomic_pointer_get (volatile gpointer
*atomic
)
941 g_mutex_lock (g_atomic_mutex
);
943 g_mutex_unlock (g_atomic_mutex
);
949 g_atomic_pointer_set (volatile gpointer
*atomic
,
952 g_mutex_lock (g_atomic_mutex
);
954 g_mutex_unlock (g_atomic_mutex
);
956 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
957 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
959 g_atomic_int_get (volatile gint
*atomic
)
961 G_ATOMIC_MEMORY_BARRIER
;
966 g_atomic_int_set (volatile gint
*atomic
,
970 G_ATOMIC_MEMORY_BARRIER
;
974 g_atomic_pointer_get (volatile gpointer
*atomic
)
976 G_ATOMIC_MEMORY_BARRIER
;
981 g_atomic_pointer_set (volatile gpointer
*atomic
,
985 G_ATOMIC_MEMORY_BARRIER
;
987 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
989 #ifdef ATOMIC_INT_CMP_XCHG
991 g_atomic_int_compare_and_exchange (volatile gint
*atomic
,
995 return ATOMIC_INT_CMP_XCHG (atomic
, oldval
, newval
);
999 g_atomic_int_exchange_and_add (volatile gint
*atomic
,
1005 while (!ATOMIC_INT_CMP_XCHG (atomic
, result
, result
+ val
));
1011 g_atomic_int_add (volatile gint
*atomic
,
1017 while (!ATOMIC_INT_CMP_XCHG (atomic
, result
, result
+ val
));
1019 #endif /* ATOMIC_INT_CMP_XCHG */
1022 _g_atomic_thread_init (void)
1024 #ifdef DEFINE_WITH_MUTEXES
1025 g_atomic_mutex
= g_mutex_new ();
1026 #endif /* DEFINE_WITH_MUTEXES */
1029 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
1031 (g_atomic_int_get
) (volatile gint
*atomic
)
1033 return g_atomic_int_get (atomic
);
1037 (g_atomic_int_set
) (volatile gint
*atomic
,
1040 g_atomic_int_set (atomic
, newval
);
1044 (g_atomic_pointer_get
) (volatile gpointer
*atomic
)
1046 return g_atomic_pointer_get (atomic
);
1050 (g_atomic_pointer_set
) (volatile gpointer
*atomic
,
1053 g_atomic_pointer_set (atomic
, newval
);
1055 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
1057 #define __G_ATOMIC_C__
1058 #include "galiasdef.c"