2 * Pull in the generic implementation for the mutex fastpath.
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
8 * Copyright 2006-2009 Analog Devices Inc.
10 * Licensed under the GPL-2 or later.
17 #include <asm-generic/mutex.h>
21 __mutex_fastpath_lock(atomic_t
*count
, void (*fail_fn
)(atomic_t
*))
23 if (unlikely(atomic_dec_return(count
) < 0))
30 __mutex_fastpath_lock_retval(atomic_t
*count
, int (*fail_fn
)(atomic_t
*))
32 if (unlikely(atomic_dec_return(count
) < 0))
33 return fail_fn(count
);
41 __mutex_fastpath_unlock(atomic_t
*count
, void (*fail_fn
)(atomic_t
*))
44 if (unlikely(atomic_inc_return(count
) <= 0))
48 #define __mutex_slowpath_needs_to_unlock() 1
51 __mutex_fastpath_trylock(atomic_t
*count
, int (*fail_fn
)(atomic_t
*))
54 * We have two variants here. The cmpxchg based one is the best one
55 * because it never induce a false contention state. It is included
56 * here because architectures using the inc/dec algorithms over the
57 * xchg ones are much more likely to support cmpxchg natively.
59 * If not we fall back to the spinlock based variant - that is
60 * just as efficient (and simpler) as a 'destructive' probing of
61 * the mutex state would be.
63 #ifdef __HAVE_ARCH_CMPXCHG
64 if (likely(atomic_cmpxchg(count
, 1, 0) == 1)) {
70 return fail_fn(count
);