Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / arch / mn10300 / include / asm / spinlock.h
blob879cd0df53ba21c284a9ed2802a5e54566982aae
1 /* MN10300 spinlock support
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_SPINLOCK_H
12 #define _ASM_SPINLOCK_H
14 #include <linux/atomic.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/rwlock.h>
18 #include <asm/page.h>
21 * Simple spin lock operations. There are two variants, one clears IRQ's
22 * on the local processor, one does not.
24 * We make no fairness assumptions. They have a cost.
27 #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
29 static inline void arch_spin_unlock(arch_spinlock_t *lock)
31 asm volatile(
32 " bclr 1,(0,%0) \n"
34 : "a"(&lock->slock)
35 : "memory", "cc");
38 static inline int arch_spin_trylock(arch_spinlock_t *lock)
40 int ret;
42 asm volatile(
43 " mov 1,%0 \n"
44 " bset %0,(%1) \n"
45 " bne 1f \n"
46 " clr %0 \n"
47 "1: xor 1,%0 \n"
48 : "=d"(ret)
49 : "a"(&lock->slock)
50 : "memory", "cc");
52 return ret;
55 static inline void arch_spin_lock(arch_spinlock_t *lock)
57 asm volatile(
58 "1: bset 1,(0,%0) \n"
59 " bne 1b \n"
61 : "a"(&lock->slock)
62 : "memory", "cc");
65 static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
66 unsigned long flags)
68 int temp;
70 asm volatile(
71 "1: bset 1,(0,%2) \n"
72 " beq 3f \n"
73 " mov %1,epsw \n"
74 "2: mov (0,%2),%0 \n"
75 " or %0,%0 \n"
76 " bne 2b \n"
77 " mov %3,%0 \n"
78 " mov %0,epsw \n"
79 " nop \n"
80 " nop \n"
81 " bra 1b\n"
82 "3: \n"
83 : "=&d" (temp)
84 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
85 : "memory", "cc");
87 #define arch_spin_lock_flags arch_spin_lock_flags
89 #ifdef __KERNEL__
92 * Read-write spinlocks, allowing multiple readers
93 * but only one writer.
95 * NOTE! it is quite common to have readers in interrupts
96 * but no interrupt writers. For those circumstances we
97 * can "mix" irq-safe locks - any writer needs to get a
98 * irq-safe write-lock, but readers can get non-irqsafe
99 * read-locks.
103 * On mn10300, we implement read-write locks as a 32-bit counter
104 * with the high bit (sign) being the "contended" bit.
106 static inline void arch_read_lock(arch_rwlock_t *rw)
108 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
109 __build_read_lock(rw, "__read_lock_failed");
110 #else
112 atomic_t *count = (atomic_t *)rw;
113 while (atomic_dec_return(count) < 0)
114 atomic_inc(count);
116 #endif
119 static inline void arch_write_lock(arch_rwlock_t *rw)
121 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
122 __build_write_lock(rw, "__write_lock_failed");
123 #else
125 atomic_t *count = (atomic_t *)rw;
126 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
127 atomic_add(RW_LOCK_BIAS, count);
129 #endif
132 static inline void arch_read_unlock(arch_rwlock_t *rw)
134 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
135 __build_read_unlock(rw);
136 #else
138 atomic_t *count = (atomic_t *)rw;
139 atomic_inc(count);
141 #endif
144 static inline void arch_write_unlock(arch_rwlock_t *rw)
146 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
147 __build_write_unlock(rw);
148 #else
150 atomic_t *count = (atomic_t *)rw;
151 atomic_add(RW_LOCK_BIAS, count);
153 #endif
156 static inline int arch_read_trylock(arch_rwlock_t *lock)
158 atomic_t *count = (atomic_t *)lock;
159 atomic_dec(count);
160 if (atomic_read(count) >= 0)
161 return 1;
162 atomic_inc(count);
163 return 0;
166 static inline int arch_write_trylock(arch_rwlock_t *lock)
168 atomic_t *count = (atomic_t *)lock;
169 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
170 return 1;
171 atomic_add(RW_LOCK_BIAS, count);
172 return 0;
175 #define _raw_spin_relax(lock) cpu_relax()
176 #define _raw_read_relax(lock) cpu_relax()
177 #define _raw_write_relax(lock) cpu_relax()
179 #endif /* __KERNEL__ */
180 #endif /* _ASM_SPINLOCK_H */