conn rcv_lock converted to spinlock, struct cor_sock created, kernel_packet skb_clone...
[cor_2_6_31.git] / arch / blackfin / include / asm / mutex.h
blob5cc641c5083465ac468cb96b773954b945c02a3e
1 /*
2 * Pull in the generic implementation for the mutex fastpath.
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
9 #ifndef _ASM_MUTEX_H
10 #define _ASM_MUTEX_H
12 #ifndef CONFIG_SMP
13 #include <asm-generic/mutex.h>
14 #else
16 static inline void
17 __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
19 if (unlikely(atomic_dec_return(count) < 0))
20 fail_fn(count);
21 else
22 smp_mb();
25 static inline int
26 __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
28 if (unlikely(atomic_dec_return(count) < 0))
29 return fail_fn(count);
30 else {
31 smp_mb();
32 return 0;
36 static inline void
37 __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
39 smp_mb();
40 if (unlikely(atomic_inc_return(count) <= 0))
41 fail_fn(count);
44 #define __mutex_slowpath_needs_to_unlock() 1
46 static inline int
47 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
50 * We have two variants here. The cmpxchg based one is the best one
51 * because it never induce a false contention state. It is included
52 * here because architectures using the inc/dec algorithms over the
53 * xchg ones are much more likely to support cmpxchg natively.
55 * If not we fall back to the spinlock based variant - that is
56 * just as efficient (and simpler) as a 'destructive' probing of
57 * the mutex state would be.
59 #ifdef __HAVE_ARCH_CMPXCHG
60 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
61 smp_mb();
62 return 1;
64 return 0;
65 #else
66 return fail_fn(count);
67 #endif
70 #endif
72 #endif