1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
9 * This file is used for SMP configurations only.
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
15 #include <asm/atomic.h>
16 #include <asm/bitops.h>
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
21 volatile unsigned int lock
;
23 unsigned int break_lock
;
27 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
28 #define spin_lock_init(x) ((x)->lock = 0)
32 * Try to get the lock. If we fail to get the lock, make a non-standard call to
33 * ia64_spinlock_contention(). We do not use a normal call because that would force all
34 * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is
35 * carefully coded to touch only those registers that spin_lock() marks "clobbered".
38 #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
41 _raw_spin_lock_flags (spinlock_t
*lock
, unsigned long flags
)
43 register volatile unsigned int *ptr
asm ("r31") = &lock
->lock
;
45 #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
46 # ifdef CONFIG_ITANIUM
47 /* don't use brl on Itanium... */
49 " mov ar.ccv = r0\n\t"
53 "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
54 "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
55 "cmp4.ne p14, p0 = r30, r0\n\t"
58 "(p14) br.cond.spnt.many b6"
59 : "=r"(ptr
) : "r"(ptr
), "r" (flags
) : IA64_SPINLOCK_CLOBBERS
);
62 " mov ar.ccv = r0\n\t"
66 "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
67 "cmp4.ne p14, p0 = r30, r0\n\t"
69 "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
70 : "=r"(ptr
) : "r"(ptr
), "r" (flags
) : IA64_SPINLOCK_CLOBBERS
);
71 # endif /* CONFIG_MCKINLEY */
73 # ifdef CONFIG_ITANIUM
74 /* don't use brl on Itanium... */
75 /* mis-declare, so we get the entry-point, not it's function descriptor: */
76 asm volatile ("mov r30 = 1\n\t"
78 "mov ar.ccv = r0;;\n\t"
79 "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
80 "movl r29 = ia64_spinlock_contention;;\n\t"
81 "cmp4.ne p14, p0 = r30, r0\n\t"
83 "(p14) br.call.spnt.many b6 = b6"
84 : "=r"(ptr
) : "r"(ptr
), "r" (flags
) : IA64_SPINLOCK_CLOBBERS
);
86 asm volatile ("mov r30 = 1\n\t"
88 "mov ar.ccv = r0;;\n\t"
89 "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
90 "cmp4.ne p14, p0 = r30, r0\n\t"
91 "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
92 : "=r"(ptr
) : "r"(ptr
), "r" (flags
) : IA64_SPINLOCK_CLOBBERS
);
93 # endif /* CONFIG_MCKINLEY */
97 #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
99 /* Unlock by doing an ordered store and releasing the cacheline with nta */
100 static inline void _raw_spin_unlock(spinlock_t
*x
) {
102 asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x
));
105 #else /* !ASM_SUPPORTED */
106 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
107 # define _raw_spin_lock(x) \
109 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
110 __u64 ia64_spinlock_val; \
111 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
112 if (unlikely(ia64_spinlock_val)) { \
114 while (*ia64_spinlock_ptr) \
116 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \
117 } while (ia64_spinlock_val); \
120 #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
121 #endif /* !ASM_SUPPORTED */
123 #define spin_is_locked(x) ((x)->lock != 0)
124 #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
125 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
128 volatile unsigned int read_counter
: 24;
129 volatile unsigned int write_lock
: 8;
130 #ifdef CONFIG_PREEMPT
131 unsigned int break_lock
;
134 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
136 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
137 #define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
138 #define write_can_lock(rw) (*(volatile int *)(rw) == 0)
140 #define _raw_read_lock(rw) \
142 rwlock_t *__read_lock_ptr = (rw); \
144 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
145 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
146 while (*(volatile int *)__read_lock_ptr < 0) \
151 #define _raw_read_unlock(rw) \
153 rwlock_t *__read_lock_ptr = (rw); \
154 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
158 #define _raw_write_lock(rw) \
160 __asm__ __volatile__ ( \
161 "mov ar.ccv = r0\n" \
162 "dep r29 = -1, r0, 31, 1;;\n" \
164 "ld4 r2 = [%0];;\n" \
165 "cmp4.eq p0,p7 = r0,r2\n" \
166 "(p7) br.cond.spnt.few 1b \n" \
167 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
168 "cmp4.eq p0,p7 = r0, r2\n" \
169 "(p7) br.cond.spnt.few 1b;;\n" \
170 :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
173 #define _raw_write_trylock(rw) \
175 register long result; \
177 __asm__ __volatile__ ( \
178 "mov ar.ccv = r0\n" \
179 "dep r29 = -1, r0, 31, 1;;\n" \
180 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
181 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
185 static inline void _raw_write_unlock(rwlock_t
*x
)
189 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y
+3) : "memory" );
192 #else /* !ASM_SUPPORTED */
194 #define _raw_write_lock(l) \
196 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
197 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
199 while (*ia64_write_lock_ptr) \
201 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
202 } while (ia64_val); \
205 #define _raw_write_trylock(rw) \
208 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
209 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
213 static inline void _raw_write_unlock(rwlock_t
*x
)
219 #endif /* !ASM_SUPPORTED */
221 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
223 #endif /* _ASM_IA64_SPINLOCK_H */