fed up with those stupid warnings
[mmotm.git] / arch / blackfin / include / asm / mutex.h
blobf726e3a80ad0795c4b34b7537e61e99f9db62860
1 /*
2 * Pull in the generic implementation for the mutex fastpath.
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
8 * Copyright 2006-2009 Analog Devices Inc.
10 * Licensed under the GPL-2 or later.
13 #ifndef _ASM_MUTEX_H
14 #define _ASM_MUTEX_H
16 #ifndef CONFIG_SMP
17 #include <asm-generic/mutex.h>
18 #else
20 static inline void
21 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
23 if (unlikely(atomic_dec_return(count) < 0))
24 fail_fn(count);
25 else
26 smp_mb();
29 static inline int
30 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
32 if (unlikely(atomic_dec_return(count) < 0))
33 return fail_fn(count);
34 else {
35 smp_mb();
36 return 0;
40 static inline void
41 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
43 smp_mb();
44 if (unlikely(atomic_inc_return(count) <= 0))
45 fail_fn(count);
48 #define __mutex_slowpath_needs_to_unlock() 1
50 static inline int
51 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
54 * We have two variants here. The cmpxchg based one is the best one
55 * because it never induce a false contention state. It is included
56 * here because architectures using the inc/dec algorithms over the
57 * xchg ones are much more likely to support cmpxchg natively.
59 * If not we fall back to the spinlock based variant - that is
60 * just as efficient (and simpler) as a 'destructive' probing of
61 * the mutex state would be.
63 #ifdef __HAVE_ARCH_CMPXCHG
64 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
65 smp_mb();
66 return 1;
68 return 0;
69 #else
70 return fail_fn(count);
71 #endif
74 #endif
76 #endif