2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
15 * (the type definitions are in asm/spinlock_types.h)
18 #ifndef _ASM_TILE_SPINLOCK_64_H
19 #define _ASM_TILE_SPINLOCK_64_H
21 /* Shifts and masks for the various fields in "lock". */
22 #define __ARCH_SPIN_CURRENT_SHIFT 17
23 #define __ARCH_SPIN_NEXT_MASK 0x7fff
24 #define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
27 * Return the "current" portion of a ticket lock value,
28 * i.e. the number that currently owns the lock.
30 static inline u32
arch_spin_current(u32 val
)
32 return val
>> __ARCH_SPIN_CURRENT_SHIFT
;
36 * Return the "next" portion of a ticket lock value,
37 * i.e. the number that the next task to try to acquire the lock will get.
39 static inline u32
arch_spin_next(u32 val
)
41 return val
& __ARCH_SPIN_NEXT_MASK
;
44 /* The lock is locked if a task would have to wait to get it. */
45 static inline int arch_spin_is_locked(arch_spinlock_t
*lock
)
48 return arch_spin_current(val
) != arch_spin_next(val
);
51 /* Bump the current ticket so the next task owns the lock. */
52 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
54 wmb(); /* guarantee anything modified under the lock is visible */
55 __insn_fetchadd4(&lock
->lock
, 1U << __ARCH_SPIN_CURRENT_SHIFT
);
58 void arch_spin_unlock_wait(arch_spinlock_t
*lock
);
60 void arch_spin_lock_slow(arch_spinlock_t
*lock
, u32 val
);
62 /* Grab the "next" ticket number and bump it atomically.
63 * If the current ticket is not ours, go to the slow path.
64 * We also take the slow path if the "next" value overflows.
66 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
68 u32 val
= __insn_fetchadd4(&lock
->lock
, 1);
69 u32 ticket
= val
& (__ARCH_SPIN_NEXT_MASK
| __ARCH_SPIN_NEXT_OVERFLOW
);
70 if (unlikely(arch_spin_current(val
) != ticket
))
71 arch_spin_lock_slow(lock
, ticket
);
74 /* Try to get the lock, and return whether we succeeded. */
75 int arch_spin_trylock(arch_spinlock_t
*lock
);
77 /* We cannot take an interrupt after getting a ticket, so don't enable them. */
78 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
81 * Read-write spinlocks, allowing multiple readers
82 * but only one writer.
84 * We use fetchadd() for readers, and fetchor() with the sign bit
88 #define __WRITE_LOCK_BIT (1 << 31)
90 static inline int arch_write_val_locked(int val
)
92 return val
< 0; /* Optimize "val & __WRITE_LOCK_BIT". */
96 * read_can_lock - would read_trylock() succeed?
97 * @lock: the rwlock in question.
99 static inline int arch_read_can_lock(arch_rwlock_t
*rw
)
101 return !arch_write_val_locked(rw
->lock
);
105 * write_can_lock - would write_trylock() succeed?
106 * @lock: the rwlock in question.
108 static inline int arch_write_can_lock(arch_rwlock_t
*rw
)
110 return rw
->lock
== 0;
113 extern void __read_lock_failed(arch_rwlock_t
*rw
);
115 static inline void arch_read_lock(arch_rwlock_t
*rw
)
117 u32 val
= __insn_fetchaddgez4(&rw
->lock
, 1);
118 if (unlikely(arch_write_val_locked(val
)))
119 __read_lock_failed(rw
);
122 extern void __write_lock_failed(arch_rwlock_t
*rw
, u32 val
);
124 static inline void arch_write_lock(arch_rwlock_t
*rw
)
126 u32 val
= __insn_fetchor4(&rw
->lock
, __WRITE_LOCK_BIT
);
127 if (unlikely(val
!= 0))
128 __write_lock_failed(rw
, val
);
131 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
134 __insn_fetchadd4(&rw
->lock
, -1);
137 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
140 __insn_exch4(&rw
->lock
, 0); /* Avoid waiting in the write buffer. */
143 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
145 return !arch_write_val_locked(__insn_fetchaddgez4(&rw
->lock
, 1));
148 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
150 u32 val
= __insn_fetchor4(&rw
->lock
, __WRITE_LOCK_BIT
);
151 if (likely(val
== 0))
153 if (!arch_write_val_locked(val
))
154 __insn_fetchand4(&rw
->lock
, ~__WRITE_LOCK_BIT
);
158 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
159 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
161 #endif /* _ASM_TILE_SPINLOCK_64_H */