2 * Pull in the generic implementation for the mutex fastpath.
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
13 #include <asm-generic/mutex-dec.h>
17 __mutex_fastpath_lock(atomic_t
*count
, void (*fail_fn
)(atomic_t
*))
19 if (unlikely(atomic_dec_return(count
) < 0))
26 __mutex_fastpath_lock_retval(atomic_t
*count
, int (*fail_fn
)(atomic_t
*))
28 if (unlikely(atomic_dec_return(count
) < 0))
29 return fail_fn(count
);
37 __mutex_fastpath_unlock(atomic_t
*count
, void (*fail_fn
)(atomic_t
*))
40 if (unlikely(atomic_inc_return(count
) <= 0))
44 #define __mutex_slowpath_needs_to_unlock() 1
47 __mutex_fastpath_trylock(atomic_t
*count
, int (*fail_fn
)(atomic_t
*))
50 * We have two variants here. The cmpxchg based one is the best one
51 * because it never induce a false contention state. It is included
52 * here because architectures using the inc/dec algorithms over the
53 * xchg ones are much more likely to support cmpxchg natively.
55 * If not we fall back to the spinlock based variant - that is
56 * just as efficient (and simpler) as a 'destructive' probing of
57 * the mutex state would be.
59 #ifdef __HAVE_ARCH_CMPXCHG
60 if (likely(atomic_cmpxchg(count
, 1, 0) == 1)) {
66 return fail_fn(count
);