2 * Assembly implementation of the mutex fastpath, based on atomic
5 * started by Ingo Molnar:
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 #ifndef _ASM_X86_MUTEX_64_H
10 #define _ASM_X86_MUTEX_64_H
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
19 #define __mutex_fastpath_lock(v, fail_fn) \
21 unsigned long dummy; \
23 typecheck(atomic_t *, v); \
24 typecheck_fn(void (*)(atomic_t *), fail_fn); \
26 asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
28 " call " #fail_fn "\n" \
32 : "rax", "rsi", "rdx", "rcx", \
33 "r8", "r9", "r10", "r11", "memory"); \
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
39 * @count: pointer of type atomic_t
41 * Change the count from 1 to a value lower than 1. This function returns 0
42 * if the fastpath succeeds, or -1 otherwise.
44 static inline int __mutex_fastpath_lock_retval(atomic_t
*count
)
46 if (unlikely(atomic_dec_return(count
) < 0))
53 * __mutex_fastpath_unlock - increment and call function if nonpositive
54 * @v: pointer of type atomic_t
55 * @fail_fn: function to call if the result is nonpositive
57 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
59 #define __mutex_fastpath_unlock(v, fail_fn) \
61 unsigned long dummy; \
63 typecheck(atomic_t *, v); \
64 typecheck_fn(void (*)(atomic_t *), fail_fn); \
66 asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
68 " call " #fail_fn "\n" \
72 : "rax", "rsi", "rdx", "rcx", \
73 "r8", "r9", "r10", "r11", "memory"); \
76 #define __mutex_slowpath_needs_to_unlock() 1
79 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
81 * @count: pointer of type atomic_t
82 * @fail_fn: fallback function
84 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
85 * if it wasn't 1 originally. [the fallback function is never used on
86 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
88 static inline int __mutex_fastpath_trylock(atomic_t
*count
,
89 int (*fail_fn
)(atomic_t
*))
91 if (likely(atomic_cmpxchg(count
, 1, 0) == 1))
97 #endif /* _ASM_X86_MUTEX_64_H */