If a character can't be converted, don't replace it with a NUL byte, but
[glib.git] / glib / gatomic.c
blob3e9ee43eb0c1fcdd7ed18b5d24ca04674b9e12a1
1 /* GLIB - Library of useful routines for C programming
2 * Copyright (C) 1995-1997 Peter Mattis, Spencer Kimball and Josh MacDonald
4 * g_atomic_*: atomic operations.
5 * Copyright (C) 2003 Sebastian Wilhelmi
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the
19 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 * Boston, MA 02111-1307, USA.
23 #include "config.h"
25 #include "glib.h"
26 #include "gthreadprivate.h"
27 #include "galias.h"
29 #if defined (__GNUC__)
30 # if defined (G_ATOMIC_I486)
31 /* Adapted from CVS version 1.10 of glibc's sysdeps/i386/i486/bits/atomic.h
33 gint
34 g_atomic_int_exchange_and_add (volatile gint *atomic,
35 gint val)
37 gint result;
39 __asm__ __volatile__ ("lock; xaddl %0,%1"
40 : "=r" (result), "=m" (*atomic)
41 : "0" (val), "m" (*atomic));
42 return result;
45 void
46 g_atomic_int_add (volatile gint *atomic,
47 gint val)
49 __asm__ __volatile__ ("lock; addl %1,%0"
50 : "=m" (*atomic)
51 : "ir" (val), "m" (*atomic));
54 gboolean
55 g_atomic_int_compare_and_exchange (volatile gint *atomic,
56 gint oldval,
57 gint newval)
59 gint result;
61 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
62 : "=a" (result), "=m" (*atomic)
63 : "r" (newval), "m" (*atomic), "0" (oldval));
65 return result == oldval;
68 /* The same code as above, as on i386 gpointer is 32 bit as well.
69 * Duplicating the code here seems more natural than casting the
70 * arguments and calling the former function */
72 gboolean
73 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
74 gpointer oldval,
75 gpointer newval)
77 gpointer result;
79 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
80 : "=a" (result), "=m" (*atomic)
81 : "r" (newval), "m" (*atomic), "0" (oldval));
83 return result == oldval;
86 # elif defined (G_ATOMIC_SPARCV9)
87 /* Adapted from CVS version 1.3 of glibc's sysdeps/sparc/sparc64/bits/atomic.h
89 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
90 ({ \
91 gint __result; \
92 __asm__ __volatile__ ("cas [%4], %2, %0" \
93 : "=r" (__result), "=m" (*(atomic)) \
94 : "r" (oldval), "m" (*(atomic)), "r" (atomic),\
95 "0" (newval)); \
96 __result == oldval; \
99 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
100 gboolean
101 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
102 gpointer oldval,
103 gpointer newval)
105 gpointer result;
106 __asm__ __volatile__ ("cas [%4], %2, %0"
107 : "=r" (result), "=m" (*atomic)
108 : "r" (oldval), "m" (*atomic), "r" (atomic),
109 "0" (newval));
110 return result == oldval;
112 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
113 gboolean
114 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
115 gpointer oldval,
116 gpointer newval)
118 gpointer result;
119 gpointer *a = atomic;
120 __asm__ __volatile__ ("casx [%4], %2, %0"
121 : "=r" (result), "=m" (*a)
122 : "r" (oldval), "m" (*a), "r" (a),
123 "0" (newval));
124 return result == oldval;
126 # else /* What's that */
127 # error "Your system has an unsupported pointer size"
128 # endif /* GLIB_SIZEOF_VOID_P */
129 # define G_ATOMIC_MEMORY_BARRIER \
130 __asm__ __volatile__ ("membar #LoadLoad | #LoadStore" \
131 " | #StoreLoad | #StoreStore" : : : "memory")
133 # elif defined (G_ATOMIC_ALPHA)
134 /* Adapted from CVS version 1.3 of glibc's sysdeps/alpha/bits/atomic.h
136 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
137 ({ \
138 gint __result; \
139 gint __prev; \
140 __asm__ __volatile__ ( \
141 " mb\n" \
142 "1: ldl_l %0,%2\n" \
143 " cmpeq %0,%3,%1\n" \
144 " beq %1,2f\n" \
145 " mov %4,%1\n" \
146 " stl_c %1,%2\n" \
147 " beq %1,1b\n" \
148 " mb\n" \
149 "2:" \
150 : "=&r" (__prev), \
151 "=&r" (__result) \
152 : "m" (*(atomic)), \
153 "Ir" (oldval), \
154 "Ir" (newval) \
155 : "memory"); \
156 __result != 0; \
158 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
159 gboolean
160 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
161 gpointer oldval,
162 gpointer newval)
164 gint result;
165 gpointer prev;
166 __asm__ __volatile__ (
167 " mb\n"
168 "1: ldl_l %0,%2\n"
169 " cmpeq %0,%3,%1\n"
170 " beq %1,2f\n"
171 " mov %4,%1\n"
172 " stl_c %1,%2\n"
173 " beq %1,1b\n"
174 " mb\n"
175 "2:"
176 : "=&r" (prev),
177 "=&r" (result)
178 : "m" (*atomic),
179 "Ir" (oldval),
180 "Ir" (newval)
181 : "memory");
182 return result != 0;
184 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
185 gboolean
186 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
187 gpointer oldval,
188 gpointer newval)
190 gint result;
191 gpointer prev;
192 __asm__ __volatile__ (
193 " mb\n"
194 "1: ldq_l %0,%2\n"
195 " cmpeq %0,%3,%1\n"
196 " beq %1,2f\n"
197 " mov %4,%1\n"
198 " stq_c %1,%2\n"
199 " beq %1,1b\n"
200 " mb\n"
201 "2:"
202 : "=&r" (prev),
203 "=&r" (result)
204 : "m" (*atomic),
205 "Ir" (oldval),
206 "Ir" (newval)
207 : "memory");
208 return result != 0;
210 # else /* What's that */
211 # error "Your system has an unsupported pointer size"
212 # endif /* GLIB_SIZEOF_VOID_P */
213 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("mb" : : : "memory")
214 # elif defined (G_ATOMIC_X86_64)
215 /* Adapted from CVS version 1.9 of glibc's sysdeps/x86_64/bits/atomic.h
217 gint
218 g_atomic_int_exchange_and_add (volatile gint *atomic,
219 gint val)
221 gint result;
223 __asm__ __volatile__ ("lock; xaddl %0,%1"
224 : "=r" (result), "=m" (*atomic)
225 : "0" (val), "m" (*atomic));
226 return result;
229 void
230 g_atomic_int_add (volatile gint *atomic,
231 gint val)
233 __asm__ __volatile__ ("lock; addl %1,%0"
234 : "=m" (*atomic)
235 : "ir" (val), "m" (*atomic));
238 gboolean
239 g_atomic_int_compare_and_exchange (volatile gint *atomic,
240 gint oldval,
241 gint newval)
243 gint result;
245 __asm__ __volatile__ ("lock; cmpxchgl %2, %1"
246 : "=a" (result), "=m" (*atomic)
247 : "r" (newval), "m" (*atomic), "0" (oldval));
249 return result == oldval;
252 gboolean
253 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
254 gpointer oldval,
255 gpointer newval)
257 gpointer result;
259 __asm__ __volatile__ ("lock; cmpxchgq %q2, %1"
260 : "=a" (result), "=m" (*atomic)
261 : "r" (newval), "m" (*atomic), "0" (oldval));
263 return result == oldval;
266 # elif defined (G_ATOMIC_POWERPC)
267 /* Adapted from CVS version 1.16 of glibc's sysdeps/powerpc/bits/atomic.h
268 * and CVS version 1.4 of glibc's sysdeps/powerpc/powerpc32/bits/atomic.h
269 * and CVS version 1.7 of glibc's sysdeps/powerpc/powerpc64/bits/atomic.h
271 # ifdef __OPTIMIZE__
272 /* Non-optimizing compile bails on the following two asm statements
273 * for reasons unknown to the author */
274 gint
275 g_atomic_int_exchange_and_add (volatile gint *atomic,
276 gint val)
278 gint result, temp;
279 __asm__ __volatile__ ("1: lwarx %0,0,%3\n"
280 " add %1,%0,%4\n"
281 " stwcx. %1,0,%3\n"
282 " bne- 1b"
283 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
284 : "b" (atomic), "r" (val), "m" (*atomic)
285 : "cr0", "memory");
286 return result;
289 /* The same as above, to save a function call repeated here */
290 void
291 g_atomic_int_add (volatile gint *atomic,
292 gint val)
294 gint result, temp;
295 __asm__ __volatile__ ("1: lwarx %0,0,%3\n"
296 " add %1,%0,%4\n"
297 " stwcx. %1,0,%3\n"
298 " bne- 1b"
299 : "=&b" (result), "=&r" (temp), "=m" (*atomic)
300 : "b" (atomic), "r" (val), "m" (*atomic)
301 : "cr0", "memory");
303 # else /* !__OPTIMIZE__ */
304 gint
305 g_atomic_int_exchange_and_add (volatile gint *atomic,
306 gint val)
308 gint result;
310 result = *atomic;
311 while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
313 return result;
316 void
317 g_atomic_int_add (volatile gint *atomic,
318 gint val)
320 gint result;
322 result = *atomic;
323 while (!g_atomic_int_compare_and_exchange (atomic, result, result + val));
325 # endif /* !__OPTIMIZE__ */
327 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
328 gboolean
329 g_atomic_int_compare_and_exchange (volatile gint *atomic,
330 gint oldval,
331 gint newval)
333 gint result;
334 __asm__ __volatile__ ("sync\n"
335 "1: lwarx %0,0,%1\n"
336 " subf. %0,%2,%0\n"
337 " bne 2f\n"
338 " stwcx. %3,0,%1\n"
339 " bne- 1b\n"
340 "2: isync"
341 : "=&r" (result)
342 : "b" (atomic), "r" (oldval), "r" (newval)
343 : "cr0", "memory");
344 return result == 0;
347 gboolean
348 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
349 gpointer oldval,
350 gpointer newval)
352 gpointer result;
353 __asm__ __volatile__ ("sync\n"
354 "1: lwarx %0,0,%1\n"
355 " subf. %0,%2,%0\n"
356 " bne 2f\n"
357 " stwcx. %3,0,%1\n"
358 " bne- 1b\n"
359 "2: isync"
360 : "=&r" (result)
361 : "b" (atomic), "r" (oldval), "r" (newval)
362 : "cr0", "memory");
363 return result == 0;
365 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
366 gboolean
367 g_atomic_int_compare_and_exchange (volatile gint *atomic,
368 gint oldval,
369 gint newval)
371 gpointer result;
372 __asm__ __volatile__ ("sync\n"
373 "1: lwarx %0,0,%1\n"
374 " extsw %0,%0\n"
375 " subf. %0,%2,%0\n"
376 " bne 2f\n"
377 " stwcx. %3,0,%1\n"
378 " bne- 1b\n"
379 "2: isync"
380 : "=&r" (result)
381 : "b" (atomic), "r" (oldval), "r" (newval)
382 : "cr0", "memory");
383 return result == 0;
386 gboolean
387 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
388 gpointer oldval,
389 gpointer newval)
391 gpointer result;
392 __asm__ __volatile__ ("sync\n"
393 "1: ldarx %0,0,%1\n"
394 " subf. %0,%2,%0\n"
395 " bne 2f\n"
396 " stdcx. %3,0,%1\n"
397 " bne- 1b\n"
398 "2: isync"
399 : "=&r" (result)
400 : "b" (atomic), "r" (oldval), "r" (newval)
401 : "cr0", "memory");
402 return result == 0;
404 # else /* What's that */
405 # error "Your system has an unsupported pointer size"
406 # endif /* GLIB_SIZEOF_VOID_P */
408 # define G_ATOMIC_MEMORY_BARRIER __asm__ ("sync" : : : "memory")
410 # elif defined (G_ATOMIC_IA64)
411 /* Adapted from CVS version 1.8 of glibc's sysdeps/ia64/bits/atomic.h
413 gint
414 g_atomic_int_exchange_and_add (volatile gint *atomic,
415 gint val)
417 return __sync_fetch_and_add (atomic, val);
420 void
421 g_atomic_int_add (volatile gint *atomic,
422 gint val)
424 __sync_fetch_and_add (atomic, val);
427 gboolean
428 g_atomic_int_compare_and_exchange (volatile gint *atomic,
429 gint oldval,
430 gint newval)
432 return __sync_bool_compare_and_swap (atomic, oldval, newval);
435 gboolean
436 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
437 gpointer oldval,
438 gpointer newval)
440 return __sync_bool_compare_and_swap ((long *)atomic,
441 (long)oldval, (long)newval);
444 # define G_ATOMIC_MEMORY_BARRIER __sync_synchronize ()
445 # elif defined (G_ATOMIC_S390)
446 /* Adapted from glibc's sysdeps/s390/bits/atomic.h
448 # define ATOMIC_INT_CMP_XCHG(atomic, oldval, newval) \
449 ({ \
450 gint __result = oldval; \
451 __asm__ __volatile__ ("cs %0, %2, %1" \
452 : "+d" (__result), "=Q" (*(atomic)) \
453 : "d" (newval), "m" (*(atomic)) : "cc" ); \
454 __result == oldval; \
457 # if GLIB_SIZEOF_VOID_P == 4 /* 32-bit system */
458 gboolean
459 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
460 gpointer oldval,
461 gpointer newval)
463 gpointer result = oldval;
464 __asm__ __volatile__ ("cs %0, %2, %1"
465 : "+d" (result), "=Q" (*(atomic))
466 : "d" (newval), "m" (*(atomic)) : "cc" );
467 return result == oldval;
469 # elif GLIB_SIZEOF_VOID_P == 8 /* 64-bit system */
470 gboolean
471 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
472 gpointer oldval,
473 gpointer newval)
475 gpointer result = oldval;
476 gpointer *a = atomic;
477 __asm__ __volatile__ ("csg %0, %2, %1"
478 : "+d" (result), "=Q" (*a)
479 : "d" ((long)(newval)), "m" (*a) : "cc" );
480 return result == oldval;
482 # else /* What's that */
483 # error "Your system has an unsupported pointer size"
484 # endif /* GLIB_SIZEOF_VOID_P */
485 # else /* !G_ATOMIC_IA64 */
486 # define DEFINE_WITH_MUTEXES
487 # endif /* G_ATOMIC_IA64 */
488 #else /* !__GNUC__ */
489 # ifdef G_PLATFORM_WIN32
490 # define DEFINE_WITH_WIN32_INTERLOCKED
491 # else
492 # define DEFINE_WITH_MUTEXES
493 # endif
494 #endif /* __GNUC__ */
496 #ifdef DEFINE_WITH_WIN32_INTERLOCKED
497 # include <windows.h>
498 /* Following indicates that InterlockedCompareExchangePointer is
499 * declared in winbase.h (included by windows.h) and needs to be
500 * commented out if not true. It is defined iff WINVER > 0x0400,
501 * which is usually correct but can be wrong if WINVER is set before
502 * windows.h is included.
504 # if WINVER > 0x0400
505 # define HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
506 # endif
508 gint32
509 g_atomic_int_exchange_and_add (volatile gint32 *atomic,
510 gint32 val)
512 return InterlockedExchangeAdd (atomic, val);
515 void
516 g_atomic_int_add (volatile gint32 *atomic,
517 gint32 val)
519 InterlockedExchangeAdd (atomic, val);
522 gboolean
523 g_atomic_int_compare_and_exchange (volatile gint32 *atomic,
524 gint32 oldval,
525 gint32 newval)
527 #ifndef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
528 return (guint32) InterlockedCompareExchange ((PVOID*)atomic,
529 (PVOID)newval,
530 (PVOID)oldval) == oldval;
531 #else
532 return InterlockedCompareExchange (atomic,
533 newval,
534 oldval) == oldval;
535 #endif
538 gboolean
539 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
540 gpointer oldval,
541 gpointer newval)
543 # ifdef HAVE_INTERLOCKED_COMPARE_EXCHANGE_POINTER
544 return InterlockedCompareExchangePointer (atomic, newval, oldval) == oldval;
545 # else
546 # if GLIB_SIZEOF_VOID_P != 4 /* no 32-bit system */
547 # error "InterlockedCompareExchangePointer needed"
548 # else
549 return InterlockedCompareExchange (atomic, newval, oldval) == oldval;
550 # endif
551 # endif
553 #endif /* DEFINE_WITH_WIN32_INTERLOCKED */
555 #ifdef DEFINE_WITH_MUTEXES
556 /* We have to use the slow, but safe locking method */
557 static GMutex *g_atomic_mutex;
559 gint
560 g_atomic_int_exchange_and_add (volatile gint *atomic,
561 gint val)
563 gint result;
565 g_mutex_lock (g_atomic_mutex);
566 result = *atomic;
567 *atomic += val;
568 g_mutex_unlock (g_atomic_mutex);
570 return result;
574 void
575 g_atomic_int_add (volatile gint *atomic,
576 gint val)
578 g_mutex_lock (g_atomic_mutex);
579 *atomic += val;
580 g_mutex_unlock (g_atomic_mutex);
583 gboolean
584 g_atomic_int_compare_and_exchange (volatile gint *atomic,
585 gint oldval,
586 gint newval)
588 gboolean result;
590 g_mutex_lock (g_atomic_mutex);
591 if (*atomic == oldval)
593 result = TRUE;
594 *atomic = newval;
596 else
597 result = FALSE;
598 g_mutex_unlock (g_atomic_mutex);
600 return result;
603 gboolean
604 g_atomic_pointer_compare_and_exchange (volatile gpointer *atomic,
605 gpointer oldval,
606 gpointer newval)
608 gboolean result;
610 g_mutex_lock (g_atomic_mutex);
611 if (*atomic == oldval)
613 result = TRUE;
614 *atomic = newval;
616 else
617 result = FALSE;
618 g_mutex_unlock (g_atomic_mutex);
620 return result;
623 #ifdef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
624 gint
625 g_atomic_int_get (volatile gint *atomic)
627 gint result;
629 g_mutex_lock (g_atomic_mutex);
630 result = *atomic;
631 g_mutex_unlock (g_atomic_mutex);
633 return result;
636 void
637 g_atomic_int_set (volatile gint *atomic,
638 gint newval)
640 g_mutex_lock (g_atomic_mutex);
641 *atomic = newval;
642 g_mutex_unlock (g_atomic_mutex);
645 gpointer
646 g_atomic_pointer_get (volatile gpointer *atomic)
648 gpointer result;
650 g_mutex_lock (g_atomic_mutex);
651 result = *atomic;
652 g_mutex_unlock (g_atomic_mutex);
654 return result;
657 void
658 g_atomic_pointer_set (volatile gpointer *atomic,
659 gpointer newval)
661 g_mutex_lock (g_atomic_mutex);
662 *atomic = newval;
663 g_mutex_unlock (g_atomic_mutex);
665 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
666 #elif defined (G_ATOMIC_OP_MEMORY_BARRIER_NEEDED)
667 gint
668 g_atomic_int_get (volatile gint *atomic)
670 G_ATOMIC_MEMORY_BARRIER;
671 return *atomic;
674 void
675 g_atomic_int_set (volatile gint *atomic,
676 gint newval)
678 *atomic = newval;
679 G_ATOMIC_MEMORY_BARRIER;
682 gpointer
683 g_atomic_pointer_get (volatile gpointer *atomic)
685 G_ATOMIC_MEMORY_BARRIER;
686 return *atomic;
689 void
690 g_atomic_pointer_set (volatile gpointer *atomic,
691 gpointer newval)
693 *atomic = newval;
694 G_ATOMIC_MEMORY_BARRIER;
696 #endif /* DEFINE_WITH_MUTEXES || G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
698 #ifdef ATOMIC_INT_CMP_XCHG
699 gboolean
700 g_atomic_int_compare_and_exchange (volatile gint *atomic,
701 gint oldval,
702 gint newval)
704 return ATOMIC_INT_CMP_XCHG (atomic, oldval, newval);
707 gint
708 g_atomic_int_exchange_and_add (volatile gint *atomic,
709 gint val)
711 gint result;
713 result = *atomic;
714 while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
716 return result;
719 void
720 g_atomic_int_add (volatile gint *atomic,
721 gint val)
723 gint result;
725 result = *atomic;
726 while (!ATOMIC_INT_CMP_XCHG (atomic, result, result + val));
728 #endif /* ATOMIC_INT_CMP_XCHG */
730 void
731 _g_atomic_thread_init (void)
733 #ifdef DEFINE_WITH_MUTEXES
734 g_atomic_mutex = g_mutex_new ();
735 #endif /* DEFINE_WITH_MUTEXES */
738 #ifndef G_ATOMIC_OP_MEMORY_BARRIER_NEEDED
739 gint
740 (g_atomic_int_get) (volatile gint *atomic)
742 return g_atomic_int_get (atomic);
745 void
746 (g_atomic_int_set) (volatile gint *atomic,
747 gint newval)
749 g_atomic_int_set (atomic, newval);
752 gpointer
753 (g_atomic_pointer_get) (volatile gpointer *atomic)
755 return g_atomic_pointer_get (atomic);
758 void
759 (g_atomic_pointer_set) (volatile gpointer *atomic,
760 gpointer newval)
762 g_atomic_pointer_set (atomic, newval);
764 #endif /* G_ATOMIC_OP_MEMORY_BARRIER_NEEDED */
766 #define __G_ATOMIC_C__
767 #include "galiasdef.c"