1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
9 * This file is used for SMP configurations only.
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
16 #include <asm/atomic.h>
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
20 #define __raw_spin_lock_init(x) ((x)->lock = 0)
23 * Ticket locks are conceptually two parts, one indicating the current head of
24 * the queue, and the other indicating the current tail. The lock is acquired
25 * by atomically noting the tail and incrementing it by one (thus adding
26 * ourself to the queue and noting our position), then waiting until the head
27 * becomes equal to the the initial value of the tail.
30 * +----------------------------------------------------+
31 * | next_ticket_number | now_serving |
32 * +----------------------------------------------------+
35 #define TICKET_SHIFT 32
37 static __always_inline
void __ticket_spin_lock(raw_spinlock_t
*lock
)
39 int *p
= (int *)&lock
->lock
, turn
, now_serving
;
42 turn
= ia64_fetchadd(1, p
+1, acq
);
44 if (turn
== now_serving
)
49 } while (ACCESS_ONCE(*p
) != turn
);
52 static __always_inline
int __ticket_spin_trylock(raw_spinlock_t
*lock
)
54 long tmp
= ACCESS_ONCE(lock
->lock
), try;
56 if (!(((tmp
>> TICKET_SHIFT
) ^ tmp
) & ((1L << TICKET_SHIFT
) - 1))) {
57 try = tmp
+ (1L << TICKET_SHIFT
);
59 return ia64_cmpxchg(acq
, &lock
->lock
, tmp
, try, sizeof (tmp
)) == tmp
;
64 static __always_inline
void __ticket_spin_unlock(raw_spinlock_t
*lock
)
66 int *p
= (int *)&lock
->lock
;
68 (void)ia64_fetchadd(1, p
, rel
);
71 static inline int __ticket_spin_is_locked(raw_spinlock_t
*lock
)
73 long tmp
= ACCESS_ONCE(lock
->lock
);
75 return !!(((tmp
>> TICKET_SHIFT
) ^ tmp
) & ((1L << TICKET_SHIFT
) - 1));
78 static inline int __ticket_spin_is_contended(raw_spinlock_t
*lock
)
80 long tmp
= ACCESS_ONCE(lock
->lock
);
82 return (((tmp
>> TICKET_SHIFT
) - tmp
) & ((1L << TICKET_SHIFT
) - 1)) > 1;
85 static inline int __raw_spin_is_locked(raw_spinlock_t
*lock
)
87 return __ticket_spin_is_locked(lock
);
90 static inline int __raw_spin_is_contended(raw_spinlock_t
*lock
)
92 return __ticket_spin_is_contended(lock
);
94 #define __raw_spin_is_contended __raw_spin_is_contended
96 static __always_inline
void __raw_spin_lock(raw_spinlock_t
*lock
)
98 __ticket_spin_lock(lock
);
101 static __always_inline
int __raw_spin_trylock(raw_spinlock_t
*lock
)
103 return __ticket_spin_trylock(lock
);
106 static __always_inline
void __raw_spin_unlock(raw_spinlock_t
*lock
)
108 __ticket_spin_unlock(lock
);
111 static __always_inline
void __raw_spin_lock_flags(raw_spinlock_t
*lock
,
114 __raw_spin_lock(lock
);
117 static inline void __raw_spin_unlock_wait(raw_spinlock_t
*lock
)
119 while (__raw_spin_is_locked(lock
))
123 #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
124 #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
128 static __always_inline
void
129 __raw_read_lock_flags(raw_rwlock_t
*lock
, unsigned long flags
)
131 __asm__
__volatile__ (
132 "tbit.nz p6, p0 = %1,%2\n"
135 "fetchadd4.rel r2 = [%0], -1;;\n"
140 "cmp4.lt p7,p0 = r2, r0\n"
141 "(p7) br.cond.spnt.few 2b\n"
145 "fetchadd4.acq r2 = [%0], 1;;\n"
146 "cmp4.lt p7,p0 = r2, r0\n"
147 "(p7) br.cond.spnt.few 1b\n"
148 : : "r"(lock
), "r"(flags
), "i"(IA64_PSR_I_BIT
)
149 : "p6", "p7", "r2", "memory");
152 #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
154 #else /* !ASM_SUPPORTED */
156 #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
158 #define __raw_read_lock(rw) \
160 raw_rwlock_t *__read_lock_ptr = (rw); \
162 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
163 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
164 while (*(volatile int *)__read_lock_ptr < 0) \
169 #endif /* !ASM_SUPPORTED */
171 #define __raw_read_unlock(rw) \
173 raw_rwlock_t *__read_lock_ptr = (rw); \
174 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
179 static __always_inline
void
180 __raw_write_lock_flags(raw_rwlock_t
*lock
, unsigned long flags
)
182 __asm__
__volatile__ (
183 "tbit.nz p6, p0 = %1, %2\n"
185 "dep r29 = -1, r0, 31, 1\n"
192 "cmp4.eq p0,p7 = r0, r2\n"
193 "(p7) br.cond.spnt.few 2b\n"
197 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
198 "cmp4.eq p0,p7 = r0, r2\n"
199 "(p7) br.cond.spnt.few 1b;;\n"
200 : : "r"(lock
), "r"(flags
), "i"(IA64_PSR_I_BIT
)
201 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
204 #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
206 #define __raw_write_trylock(rw) \
208 register long result; \
210 __asm__ __volatile__ ( \
211 "mov ar.ccv = r0\n" \
212 "dep r29 = -1, r0, 31, 1;;\n" \
213 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
214 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
218 static inline void __raw_write_unlock(raw_rwlock_t
*x
)
222 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y
+3) : "memory" );
225 #else /* !ASM_SUPPORTED */
227 #define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
229 #define __raw_write_lock(l) \
231 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
232 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
234 while (*ia64_write_lock_ptr) \
236 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
237 } while (ia64_val); \
240 #define __raw_write_trylock(rw) \
243 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
244 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
248 static inline void __raw_write_unlock(raw_rwlock_t
*x
)
254 #endif /* !ASM_SUPPORTED */
256 static inline int __raw_read_trylock(raw_rwlock_t
*x
)
262 old
.lock
= new.lock
= *x
;
263 old
.lock
.write_lock
= new.lock
.write_lock
= 0;
264 ++new.lock
.read_counter
;
265 return (u32
)ia64_cmpxchg4_acq((__u32
*)(x
), new.word
, old
.word
) == old
.word
;
268 #define _raw_spin_relax(lock) cpu_relax()
269 #define _raw_read_relax(lock) cpu_relax()
270 #define _raw_write_relax(lock) cpu_relax()
272 #endif /* _ASM_IA64_SPINLOCK_H */