x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / tile / include / asm / spinlock_64.h
blobb9718fb4e74a7703a0fbf3c37691b9c00a84aced
1 /*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
15 * (the type definitions are in asm/spinlock_types.h)
18 #ifndef _ASM_TILE_SPINLOCK_64_H
19 #define _ASM_TILE_SPINLOCK_64_H
21 #include <linux/compiler.h>
23 /* Shifts and masks for the various fields in "lock". */
24 #define __ARCH_SPIN_CURRENT_SHIFT 17
25 #define __ARCH_SPIN_NEXT_MASK 0x7fff
26 #define __ARCH_SPIN_NEXT_OVERFLOW 0x8000
29 * Return the "current" portion of a ticket lock value,
30 * i.e. the number that currently owns the lock.
32 static inline u32 arch_spin_current(u32 val)
34 return val >> __ARCH_SPIN_CURRENT_SHIFT;
38 * Return the "next" portion of a ticket lock value,
39 * i.e. the number that the next task to try to acquire the lock will get.
41 static inline u32 arch_spin_next(u32 val)
43 return val & __ARCH_SPIN_NEXT_MASK;
46 /* The lock is locked if a task would have to wait to get it. */
47 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
49 /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
50 u32 val = READ_ONCE(lock->lock);
51 return arch_spin_current(val) != arch_spin_next(val);
54 /* Bump the current ticket so the next task owns the lock. */
55 static inline void arch_spin_unlock(arch_spinlock_t *lock)
57 wmb(); /* guarantee anything modified under the lock is visible */
58 __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
61 void arch_spin_unlock_wait(arch_spinlock_t *lock);
63 void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
65 /* Grab the "next" ticket number and bump it atomically.
66 * If the current ticket is not ours, go to the slow path.
67 * We also take the slow path if the "next" value overflows.
69 static inline void arch_spin_lock(arch_spinlock_t *lock)
71 u32 val = __insn_fetchadd4(&lock->lock, 1);
72 u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
73 if (unlikely(arch_spin_current(val) != ticket))
74 arch_spin_lock_slow(lock, ticket);
77 /* Try to get the lock, and return whether we succeeded. */
78 int arch_spin_trylock(arch_spinlock_t *lock);
80 /* We cannot take an interrupt after getting a ticket, so don't enable them. */
81 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
84 * Read-write spinlocks, allowing multiple readers
85 * but only one writer.
87 * We use fetchadd() for readers, and fetchor() with the sign bit
88 * for writers.
91 #define __WRITE_LOCK_BIT (1 << 31)
93 static inline int arch_write_val_locked(int val)
95 return val < 0; /* Optimize "val & __WRITE_LOCK_BIT". */
98 /**
99 * read_can_lock - would read_trylock() succeed?
100 * @lock: the rwlock in question.
102 static inline int arch_read_can_lock(arch_rwlock_t *rw)
104 return !arch_write_val_locked(rw->lock);
108 * write_can_lock - would write_trylock() succeed?
109 * @lock: the rwlock in question.
111 static inline int arch_write_can_lock(arch_rwlock_t *rw)
113 return rw->lock == 0;
116 extern void __read_lock_failed(arch_rwlock_t *rw);
118 static inline void arch_read_lock(arch_rwlock_t *rw)
120 u32 val = __insn_fetchaddgez4(&rw->lock, 1);
121 if (unlikely(arch_write_val_locked(val)))
122 __read_lock_failed(rw);
125 extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
127 static inline void arch_write_lock(arch_rwlock_t *rw)
129 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
130 if (unlikely(val != 0))
131 __write_lock_failed(rw, val);
134 static inline void arch_read_unlock(arch_rwlock_t *rw)
136 __insn_mf();
137 __insn_fetchadd4(&rw->lock, -1);
140 static inline void arch_write_unlock(arch_rwlock_t *rw)
142 __insn_mf();
143 __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
146 static inline int arch_read_trylock(arch_rwlock_t *rw)
148 return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
151 static inline int arch_write_trylock(arch_rwlock_t *rw)
153 u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
154 if (likely(val == 0))
155 return 1;
156 if (!arch_write_val_locked(val))
157 __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
158 return 0;
161 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
162 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
164 #endif /* _ASM_TILE_SPINLOCK_64_H */