2 * Copyright © 2008 Ryan Lortie
3 * Copyright © 2010 Codethink Limited
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the licence, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
20 * Author: Ryan Lortie <desrt@desrt.ca>
25 #include <glib/gmessages.h>
26 #include <glib/gatomic.h>
27 #include <glib/gslist.h>
28 #include <glib/gthread.h>
29 #include <glib/gslice.h>
31 #include "gthreadprivate.h"
35 #ifdef G_BIT_LOCK_FORCE_FUTEX_EMULATION
39 static GMutex g_futex_mutex
;
40 static GSList
*g_futex_address_list
= NULL
;
45 * We have headers for futex(2) on the build machine. This does not
46 * imply that every system that ever runs the resulting glib will have
47 * kernel support for futex, but you'd have to have a pretty old
48 * kernel in order for that not to be the case.
50 * If anyone actually gets bit by this, please file a bug. :)
52 #include <linux/futex.h>
53 #include <sys/syscall.h>
58 * @address: a pointer to an integer
59 * @value: the value that should be at @address
61 * Atomically checks that the value stored at @address is equal to
62 * @value and then blocks. If the value stored at @address is not
63 * equal to @value then this function returns immediately.
65 * To unblock, call g_futex_wake() on @address.
67 * This call may spuriously unblock (for example, in response to the
68 * process receiving a signal) but this is not guaranteed. Unlike the
69 * Linux system call of a similar name, there is no guarantee that a
70 * waiting process will unblock due to a g_futex_wake() call in a
74 g_futex_wait (const volatile gint
*address
,
77 syscall (__NR_futex
, address
, (gsize
) FUTEX_WAIT
, (gsize
) value
, NULL
);
82 * @address: a pointer to an integer
84 * Nominally, wakes one thread that is blocked in g_futex_wait() on
85 * @address (if any thread is currently waiting).
87 * As mentioned in the documention for g_futex_wait(), spurious
88 * wakeups may occur. As such, this call may result in more than one
89 * thread being woken up.
92 g_futex_wake (const volatile gint
*address
)
94 syscall (__NR_futex
, address
, (gsize
) FUTEX_WAKE
, (gsize
) 1, NULL
);
99 /* emulate futex(2) */
102 const volatile gint
*address
;
108 g_futex_find_address (const volatile gint
*address
)
112 for (node
= g_futex_address_list
; node
; node
= node
->next
)
114 WaitAddress
*waiter
= node
->data
;
116 if (waiter
->address
== address
)
124 g_futex_wait (const volatile gint
*address
,
127 g_mutex_lock (&g_futex_mutex
);
128 if G_LIKELY (g_atomic_int_get (address
) == value
)
132 if ((waiter
= g_futex_find_address (address
)) == NULL
)
134 waiter
= g_slice_new (WaitAddress
);
135 waiter
->address
= address
;
136 g_cond_init (&waiter
->wait_queue
);
137 waiter
->ref_count
= 0;
138 g_futex_address_list
=
139 g_slist_prepend (g_futex_address_list
, waiter
);
143 g_cond_wait (&waiter
->wait_queue
, &g_futex_mutex
);
145 if (!--waiter
->ref_count
)
147 g_futex_address_list
=
148 g_slist_remove (g_futex_address_list
, waiter
);
149 g_cond_clear (&waiter
->wait_queue
);
150 g_slice_free (WaitAddress
, waiter
);
153 g_mutex_unlock (&g_futex_mutex
);
157 g_futex_wake (const volatile gint
*address
)
161 /* need to lock here for two reasons:
162 * 1) need to acquire/release lock to ensure waiter is not in
163 * the process of registering a wait
164 * 2) need to -stay- locked until the end to ensure a wake()
165 * in another thread doesn't cause 'waiter' to stop existing
167 g_mutex_lock (&g_futex_mutex
);
168 if ((waiter
= g_futex_find_address (address
)))
169 g_cond_signal (&waiter
->wait_queue
);
170 g_mutex_unlock (&g_futex_mutex
);
174 #define CONTENTION_CLASSES 11
175 static volatile gint g_bit_lock_contended
[CONTENTION_CLASSES
];
177 #if (defined (i386) || defined (__amd64__))
178 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
179 #define USE_ASM_GOTO 1
185 * @address: a pointer to an integer
186 * @lock_bit: a bit value between 0 and 31
188 * Sets the indicated @lock_bit in @address. If the bit is already
189 * set, this call will block until g_bit_unlock() unsets the
192 * Attempting to lock on two different bits within the same integer is
193 * not supported and will very probably cause deadlocks.
195 * The value of the bit that is set is (1u << @bit). If @bit is not
196 * between 0 and 31 then the result is undefined.
198 * This function accesses @address atomically. All other accesses to
199 * @address must be atomic in order for this function to work
205 g_bit_lock (volatile gint
*address
,
210 asm volatile goto ("lock bts %1, (%0)\n"
213 : "r" (address
), "r" (lock_bit
)
220 guint mask
= 1u << lock_bit
;
223 v
= g_atomic_int_get (address
);
226 guint
class = ((gsize
) address
) % G_N_ELEMENTS (g_bit_lock_contended
);
228 g_atomic_int_add (&g_bit_lock_contended
[class], +1);
229 g_futex_wait (address
, v
);
230 g_atomic_int_add (&g_bit_lock_contended
[class], -1);
235 guint mask
= 1u << lock_bit
;
239 v
= g_atomic_int_or (address
, mask
);
243 guint
class = ((gsize
) address
) % G_N_ELEMENTS (g_bit_lock_contended
);
245 g_atomic_int_add (&g_bit_lock_contended
[class], +1);
246 g_futex_wait (address
, v
);
247 g_atomic_int_add (&g_bit_lock_contended
[class], -1);
256 * @address: a pointer to an integer
257 * @lock_bit: a bit value between 0 and 31
259 * Sets the indicated @lock_bit in @address, returning %TRUE if
260 * successful. If the bit is already set, returns %FALSE immediately.
262 * Attempting to lock on two different bits within the same integer is
265 * The value of the bit that is set is (1u << @bit). If @bit is not
266 * between 0 and 31 then the result is undefined.
268 * This function accesses @address atomically. All other accesses to
269 * @address must be atomic in order for this function to work
272 * Returns: %TRUE if the lock was acquired
277 g_bit_trylock (volatile gint
*address
,
283 asm volatile ("lock bts %2, (%1)\n"
287 : "r" (address
), "r" (lock_bit
)
292 guint mask
= 1u << lock_bit
;
295 v
= g_atomic_int_or (address
, mask
);
303 * @address: a pointer to an integer
304 * @lock_bit: a bit value between 0 and 31
306 * Clears the indicated @lock_bit in @address. If another thread is
307 * currently blocked in g_bit_lock() on this same bit then it will be
310 * This function accesses @address atomically. All other accesses to
311 * @address must be atomic in order for this function to work
317 g_bit_unlock (volatile gint
*address
,
321 asm volatile ("lock btr %1, (%0)"
323 : "r" (address
), "r" (lock_bit
)
326 guint mask
= 1u << lock_bit
;
328 g_atomic_int_and (address
, ~mask
);
332 guint
class = ((gsize
) address
) % G_N_ELEMENTS (g_bit_lock_contended
);
334 if (g_atomic_int_get (&g_bit_lock_contended
[class]))
335 g_futex_wake (address
);
340 /* We emulate pointer-sized futex(2) because the kernel API only
343 * We assume that the 'interesting' part is always the lower order bits.
344 * This assumption holds because pointer bitlocks are restricted to
345 * using the low order bits of the pointer as the lock.
347 * On 32 bits, there is nothing to do since the pointer size is equal to
348 * the integer size. On little endian the lower-order bits don't move,
349 * so do nothing. Only on 64bit big endian do we need to do a bit of
350 * pointer arithmetic: the low order bits are shifted by 4 bytes. We
351 * have a helper function that always does the right thing here.
353 * Since we always consider the low-order bits of the integer value, a
354 * simple cast from (gsize) to (guint) always takes care of that.
356 * After that, pointer-sized futex becomes as simple as:
358 * g_futex_wait (g_futex_int_address (address), (guint) value);
362 * g_futex_wake (g_futex_int_address (int_address));
364 static const volatile gint
*
365 g_futex_int_address (const volatile void *address
)
367 const volatile gint
*int_address
= address
;
369 #if G_BYTE_ORDER == G_BIG_ENDIAN && GLIB_SIZEOF_VOID_P == 8
377 * g_pointer_bit_lock:
378 * @address: a pointer to a #gpointer-sized value
379 * @lock_bit: a bit value between 0 and 31
381 * This is equivalent to g_bit_lock, but working on pointers (or other
382 * pointer-sized values).
384 * For portability reasons, you may only lock on the bottom 32 bits of
390 (g_pointer_bit_lock
) (volatile void *address
,
393 g_return_if_fail (lock_bit
< 32);
398 asm volatile goto ("lock bts %1, (%0)\n"
401 : "r" (address
), "r" ((gsize
) lock_bit
)
408 volatile gsize
*pointer_address
= address
;
409 gsize mask
= 1u << lock_bit
;
412 v
= (gsize
) g_atomic_pointer_get (pointer_address
);
415 guint
class = ((gsize
) address
) % G_N_ELEMENTS (g_bit_lock_contended
);
417 g_atomic_int_add (&g_bit_lock_contended
[class], +1);
418 g_futex_wait (g_futex_int_address (address
), v
);
419 g_atomic_int_add (&g_bit_lock_contended
[class], -1);
424 volatile gsize
*pointer_address
= address
;
425 gsize mask
= 1u << lock_bit
;
429 v
= g_atomic_pointer_or (pointer_address
, mask
);
433 guint
class = ((gsize
) address
) % G_N_ELEMENTS (g_bit_lock_contended
);
435 g_atomic_int_add (&g_bit_lock_contended
[class], +1);
436 g_futex_wait (g_futex_int_address (address
), (guint
) v
);
437 g_atomic_int_add (&g_bit_lock_contended
[class], -1);
446 * g_pointer_bit_trylock:
447 * @address: a pointer to a #gpointer-sized value
448 * @lock_bit: a bit value between 0 and 31
450 * This is equivalent to g_bit_trylock, but working on pointers (or
451 * other pointer-sized values).
453 * For portability reasons, you may only lock on the bottom 32 bits of
456 * Returns: %TRUE if the lock was acquired
461 (g_pointer_bit_trylock
) (volatile void *address
,
464 g_return_val_if_fail (lock_bit
< 32, FALSE
);
470 asm volatile ("lock bts %2, (%1)\n"
474 : "r" (address
), "r" ((gsize
) lock_bit
)
479 volatile gsize
*pointer_address
= address
;
480 gsize mask
= 1u << lock_bit
;
483 g_return_val_if_fail (lock_bit
< 32, FALSE
);
485 v
= g_atomic_pointer_or (pointer_address
, mask
);
493 * g_pointer_bit_unlock:
494 * @address: a pointer to a #gpointer-sized value
495 * @lock_bit: a bit value between 0 and 31
497 * This is equivalent to g_bit_unlock, but working on pointers (or other
498 * pointer-sized values).
500 * For portability reasons, you may only lock on the bottom 32 bits of
506 (g_pointer_bit_unlock
) (volatile void *address
,
509 g_return_if_fail (lock_bit
< 32);
513 asm volatile ("lock btr %1, (%0)"
515 : "r" (address
), "r" ((gsize
) lock_bit
)
518 volatile gsize
*pointer_address
= address
;
519 gsize mask
= 1u << lock_bit
;
521 g_atomic_pointer_and (pointer_address
, ~mask
);
525 guint
class = ((gsize
) address
) % G_N_ELEMENTS (g_bit_lock_contended
);
526 if (g_atomic_int_get (&g_bit_lock_contended
[class]))
527 g_futex_wake (g_futex_int_address (address
));