Rename exporter APIs
[glib.git] / glib / gbitlock.c
blob7674f9a3117ef340ae90dde7c7b8ee31958a9b67
1 /*
2 * Copyright © 2008 Ryan Lortie
3 * Copyright © 2010 Codethink Limited
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the licence, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
20 * Author: Ryan Lortie <desrt@desrt.ca>
23 #include "gbitlock.h"
25 #include <glib/gmessages.h>
26 #include <glib/gatomic.h>
27 #include <glib/gslist.h>
28 #include <glib/gthread.h>
29 #include <glib/gslice.h>
31 #include "gthreadprivate.h"
32 #include "config.h"
34 #undef HAVE_FUTEX
35 #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION
36 #endif
38 #ifndef HAVE_FUTEX
39 static GMutex g_futex_mutex;
40 static GSList *g_futex_address_list = NULL;
41 #endif
43 #ifdef HAVE_FUTEX
45 * We have headers for futex(2) on the build machine. This does not
46 * imply that every system that ever runs the resulting glib will have
47 * kernel support for futex, but you'd have to have a pretty old
48 * kernel in order for that not to be the case.
50 * If anyone actually gets bit by this, please file a bug. :)
52 #include <linux/futex.h>
53 #include <sys/syscall.h>
54 #include <unistd.h>
56 /* < private >
57 * g_futex_wait:
58 * @address: a pointer to an integer
59 * @value: the value that should be at @address
61 * Atomically checks that the value stored at @address is equal to
62 * @value and then blocks. If the value stored at @address is not
63 * equal to @value then this function returns immediately.
65 * To unblock, call g_futex_wake() on @address.
67 * This call may spuriously unblock (for example, in response to the
68 * process receiving a signal) but this is not guaranteed. Unlike the
69 * Linux system call of a similar name, there is no guarantee that a
70 * waiting process will unblock due to a g_futex_wake() call in a
71 * separate process.
73 static void
74 g_futex_wait (const volatile gint *address,
75 gint value)
77 syscall (__NR_futex, address, (gsize) FUTEX_WAIT, (gsize) value, NULL);
80 /* < private >
81 * g_futex_wake:
82 * @address: a pointer to an integer
84 * Nominally, wakes one thread that is blocked in g_futex_wait() on
85 * @address (if any thread is currently waiting).
87 * As mentioned in the documention for g_futex_wait(), spurious
88 * wakeups may occur. As such, this call may result in more than one
89 * thread being woken up.
91 static void
92 g_futex_wake (const volatile gint *address)
94 syscall (__NR_futex, address, (gsize) FUTEX_WAKE, (gsize) 1, NULL);
97 #else
99 /* emulate futex(2) */
100 typedef struct
102 const volatile gint *address;
103 gint ref_count;
104 GCond wait_queue;
105 } WaitAddress;
107 static WaitAddress *
108 g_futex_find_address (const volatile gint *address)
110 GSList *node;
112 for (node = g_futex_address_list; node; node = node->next)
114 WaitAddress *waiter = node->data;
116 if (waiter->address == address)
117 return waiter;
120 return NULL;
123 static void
124 g_futex_wait (const volatile gint *address,
125 gint value)
127 g_mutex_lock (&g_futex_mutex);
128 if G_LIKELY (g_atomic_int_get (address) == value)
130 WaitAddress *waiter;
132 if ((waiter = g_futex_find_address (address)) == NULL)
134 waiter = g_slice_new (WaitAddress);
135 waiter->address = address;
136 g_cond_init (&waiter->wait_queue);
137 waiter->ref_count = 0;
138 g_futex_address_list =
139 g_slist_prepend (g_futex_address_list, waiter);
142 waiter->ref_count++;
143 g_cond_wait (&waiter->wait_queue, &g_futex_mutex);
145 if (!--waiter->ref_count)
147 g_futex_address_list =
148 g_slist_remove (g_futex_address_list, waiter);
149 g_cond_clear (&waiter->wait_queue);
150 g_slice_free (WaitAddress, waiter);
153 g_mutex_unlock (&g_futex_mutex);
156 static void
157 g_futex_wake (const volatile gint *address)
159 WaitAddress *waiter;
161 /* need to lock here for two reasons:
162 * 1) need to acquire/release lock to ensure waiter is not in
163 * the process of registering a wait
164 * 2) need to -stay- locked until the end to ensure a wake()
165 * in another thread doesn't cause 'waiter' to stop existing
167 g_mutex_lock (&g_futex_mutex);
168 if ((waiter = g_futex_find_address (address)))
169 g_cond_signal (&waiter->wait_queue);
170 g_mutex_unlock (&g_futex_mutex);
172 #endif
174 #define CONTENTION_CLASSES 11
175 static volatile gint g_bit_lock_contended[CONTENTION_CLASSES];
177 #if (defined (i386) || defined (__amd64__))
178 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
179 #define USE_ASM_GOTO 1
180 #endif
181 #endif
184 * g_bit_lock:
185 * @address: a pointer to an integer
186 * @lock_bit: a bit value between 0 and 31
188 * Sets the indicated @lock_bit in @address. If the bit is already
189 * set, this call will block until g_bit_unlock() unsets the
190 * corresponding bit.
192 * Attempting to lock on two different bits within the same integer is
193 * not supported and will very probably cause deadlocks.
195 * The value of the bit that is set is (1u << @bit). If @bit is not
196 * between 0 and 31 then the result is undefined.
198 * This function accesses @address atomically. All other accesses to
199 * @address must be atomic in order for this function to work
200 * reliably.
202 * Since: 2.24
204 void
205 g_bit_lock (volatile gint *address,
206 gint lock_bit)
208 #ifdef USE_ASM_GOTO
209 retry:
210 asm volatile goto ("lock bts %1, (%0)\n"
211 "jc %l[contended]"
212 : /* no output */
213 : "r" (address), "r" (lock_bit)
214 : "cc", "memory"
215 : contended);
216 return;
218 contended:
220 guint mask = 1u << lock_bit;
221 guint v;
223 v = g_atomic_int_get (address);
224 if (v & mask)
226 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
228 g_atomic_int_add (&g_bit_lock_contended[class], +1);
229 g_futex_wait (address, v);
230 g_atomic_int_add (&g_bit_lock_contended[class], -1);
233 goto retry;
234 #else
235 guint mask = 1u << lock_bit;
236 guint v;
238 retry:
239 v = g_atomic_int_or (address, mask);
240 if (v & mask)
241 /* already locked */
243 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
245 g_atomic_int_add (&g_bit_lock_contended[class], +1);
246 g_futex_wait (address, v);
247 g_atomic_int_add (&g_bit_lock_contended[class], -1);
249 goto retry;
251 #endif
255 * g_bit_trylock:
256 * @address: a pointer to an integer
257 * @lock_bit: a bit value between 0 and 31
259 * Sets the indicated @lock_bit in @address, returning %TRUE if
260 * successful. If the bit is already set, returns %FALSE immediately.
262 * Attempting to lock on two different bits within the same integer is
263 * not supported.
265 * The value of the bit that is set is (1u << @bit). If @bit is not
266 * between 0 and 31 then the result is undefined.
268 * This function accesses @address atomically. All other accesses to
269 * @address must be atomic in order for this function to work
270 * reliably.
272 * Returns: %TRUE if the lock was acquired
274 * Since: 2.24
276 gboolean
277 g_bit_trylock (volatile gint *address,
278 gint lock_bit)
280 #ifdef USE_ASM_GOTO
281 gboolean result;
283 asm volatile ("lock bts %2, (%1)\n"
284 "setnc %%al\n"
285 "movzx %%al, %0"
286 : "=r" (result)
287 : "r" (address), "r" (lock_bit)
288 : "cc", "memory");
290 return result;
291 #else
292 guint mask = 1u << lock_bit;
293 guint v;
295 v = g_atomic_int_or (address, mask);
297 return ~v & mask;
298 #endif
302 * g_bit_unlock:
303 * @address: a pointer to an integer
304 * @lock_bit: a bit value between 0 and 31
306 * Clears the indicated @lock_bit in @address. If another thread is
307 * currently blocked in g_bit_lock() on this same bit then it will be
308 * woken up.
310 * This function accesses @address atomically. All other accesses to
311 * @address must be atomic in order for this function to work
312 * reliably.
314 * Since: 2.24
316 void
317 g_bit_unlock (volatile gint *address,
318 gint lock_bit)
320 #ifdef USE_ASM_GOTO
321 asm volatile ("lock btr %1, (%0)"
322 : /* no output */
323 : "r" (address), "r" (lock_bit)
324 : "cc", "memory");
325 #else
326 guint mask = 1u << lock_bit;
328 g_atomic_int_and (address, ~mask);
329 #endif
332 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
334 if (g_atomic_int_get (&g_bit_lock_contended[class]))
335 g_futex_wake (address);
340 /* We emulate pointer-sized futex(2) because the kernel API only
341 * supports integers.
343 * We assume that the 'interesting' part is always the lower order bits.
344 * This assumption holds because pointer bitlocks are restricted to
345 * using the low order bits of the pointer as the lock.
347 * On 32 bits, there is nothing to do since the pointer size is equal to
348 * the integer size. On little endian the lower-order bits don't move,
349 * so do nothing. Only on 64bit big endian do we need to do a bit of
350 * pointer arithmetic: the low order bits are shifted by 4 bytes. We
351 * have a helper function that always does the right thing here.
353 * Since we always consider the low-order bits of the integer value, a
354 * simple cast from (gsize) to (guint) always takes care of that.
356 * After that, pointer-sized futex becomes as simple as:
358 * g_futex_wait (g_futex_int_address (address), (guint) value);
360 * and
362 * g_futex_wake (g_futex_int_address (int_address));
364 static const volatile gint *
365 g_futex_int_address (const volatile void *address)
367 const volatile gint *int_address = address;
369 #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8
370 int_address++;
371 #endif
373 return int_address;
377 * g_pointer_bit_lock:
378 * @address: a pointer to a #gpointer-sized value
379 * @lock_bit: a bit value between 0 and 31
381 * This is equivalent to g_bit_lock, but working on pointers (or other
382 * pointer-sized values).
384 * For portability reasons, you may only lock on the bottom 32 bits of
385 * the pointer.
387 * Since: 2.30
389 void
390 (g_pointer_bit_lock) (volatile void *address,
391 gint lock_bit)
393 g_return_if_fail (lock_bit < 32);
396 #ifdef USE_ASM_GOTO
397 retry:
398 asm volatile goto ("lock bts %1, (%0)\n"
399 "jc %l[contended]"
400 : /* no output */
401 : "r" (address), "r" ((gsize) lock_bit)
402 : "cc", "memory"
403 : contended);
404 return;
406 contended:
408 volatile gsize *pointer_address = address;
409 gsize mask = 1u << lock_bit;
410 gsize v;
412 v = (gsize) g_atomic_pointer_get (pointer_address);
413 if (v & mask)
415 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
417 g_atomic_int_add (&g_bit_lock_contended[class], +1);
418 g_futex_wait (g_futex_int_address (address), v);
419 g_atomic_int_add (&g_bit_lock_contended[class], -1);
422 goto retry;
423 #else
424 volatile gsize *pointer_address = address;
425 gsize mask = 1u << lock_bit;
426 gsize v;
428 retry:
429 v = g_atomic_pointer_or (pointer_address, mask);
430 if (v & mask)
431 /* already locked */
433 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
435 g_atomic_int_add (&g_bit_lock_contended[class], +1);
436 g_futex_wait (g_futex_int_address (address), (guint) v);
437 g_atomic_int_add (&g_bit_lock_contended[class], -1);
439 goto retry;
441 #endif
446 * g_pointer_bit_trylock:
447 * @address: a pointer to a #gpointer-sized value
448 * @lock_bit: a bit value between 0 and 31
450 * This is equivalent to g_bit_trylock, but working on pointers (or
451 * other pointer-sized values).
453 * For portability reasons, you may only lock on the bottom 32 bits of
454 * the pointer.
456 * Returns: %TRUE if the lock was acquired
458 * Since: 2.30
460 gboolean
461 (g_pointer_bit_trylock) (volatile void *address,
462 gint lock_bit)
464 g_return_val_if_fail (lock_bit < 32, FALSE);
467 #ifdef USE_ASM_GOTO
468 gboolean result;
470 asm volatile ("lock bts %2, (%1)\n"
471 "setnc %%al\n"
472 "movzx %%al, %0"
473 : "=r" (result)
474 : "r" (address), "r" ((gsize) lock_bit)
475 : "cc", "memory");
477 return result;
478 #else
479 volatile gsize *pointer_address = address;
480 gsize mask = 1u << lock_bit;
481 gsize v;
483 g_return_val_if_fail (lock_bit < 32, FALSE);
485 v = g_atomic_pointer_or (pointer_address, mask);
487 return ~v & mask;
488 #endif
493 * g_pointer_bit_unlock:
494 * @address: a pointer to a #gpointer-sized value
495 * @lock_bit: a bit value between 0 and 31
497 * This is equivalent to g_bit_unlock, but working on pointers (or other
498 * pointer-sized values).
500 * For portability reasons, you may only lock on the bottom 32 bits of
501 * the pointer.
503 * Since: 2.30
505 void
506 (g_pointer_bit_unlock) (volatile void *address,
507 gint lock_bit)
509 g_return_if_fail (lock_bit < 32);
512 #ifdef USE_ASM_GOTO
513 asm volatile ("lock btr %1, (%0)"
514 : /* no output */
515 : "r" (address), "r" ((gsize) lock_bit)
516 : "cc", "memory");
517 #else
518 volatile gsize *pointer_address = address;
519 gsize mask = 1u << lock_bit;
521 g_atomic_pointer_and (pointer_address, ~mask);
522 #endif
525 guint class = ((gsize) address) % G_N_ELEMENTS (g_bit_lock_contended);
526 if (g_atomic_int_get (&g_bit_lock_contended[class]))
527 g_futex_wake (g_futex_int_address (address));