repo init
[linux-rt-nao.git] / include / linux / seqlock.h
blobaaa20cb887acddf711bc0e18a0d8d7d601b08c8e
1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
3 /*
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. Readers block
7 * on write contention (and where applicable, pi-boost the writer).
8 * Readers without contention on entry acquire the critical section
9 * without any atomic operations, but they may have to retry if a writer
10 * enters before the critical section ends. Writers do not wait for readers.
12 * This is not as cache friendly as brlock. Also, this will not work
13 * for data that contains pointers, because any writer could
14 * invalidate a pointer that a reader was following.
16 * Expected reader usage:
17 * do {
18 * seq = read_seqbegin(&foo);
19 * ...
20 * } while (read_seqretry(&foo, seq));
23 * On non-SMP the spin locks disappear but the writer still needs
24 * to increment the sequence variables because an interrupt routine could
25 * change the state of the data.
27 * Based on x86_64 vsyscall gettimeofday
28 * by Keith Owens and Andrea Arcangeli
30 * Priority inheritance and live-lock avoidance by Gregory Haskins
33 #include <linux/pickop.h>
34 #include <linux/spinlock.h>
35 #include <linux/preempt.h>
37 typedef struct {
38 unsigned sequence;
39 rwlock_t lock;
40 } __seqlock_t;
42 typedef struct {
43 unsigned sequence;
44 raw_spinlock_t lock;
45 } __raw_seqlock_t;
47 #define seqlock_need_resched(seq) lock_need_resched(&(seq)->lock)
49 #ifdef CONFIG_PREEMPT_RT
50 typedef __seqlock_t seqlock_t;
51 #else
52 typedef __raw_seqlock_t seqlock_t;
53 #endif
55 typedef __raw_seqlock_t raw_seqlock_t;
58 * These macros triggered gcc-3.x compile-time problems. We think these are
59 * OK now. Be cautious.
61 #define __RAW_SEQLOCK_UNLOCKED(lockname) \
62 { 0, RAW_SPIN_LOCK_UNLOCKED(lockname) }
64 #ifdef CONFIG_PREEMPT_RT
65 # define __SEQLOCK_UNLOCKED(lockname) { 0, __RW_LOCK_UNLOCKED(lockname) }
66 #else
67 # define __SEQLOCK_UNLOCKED(lockname) __RAW_SEQLOCK_UNLOCKED(lockname)
68 #endif
70 #define SEQLOCK_UNLOCKED \
71 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
73 static inline void __raw_seqlock_init(raw_seqlock_t *seqlock)
75 *seqlock = (raw_seqlock_t) __RAW_SEQLOCK_UNLOCKED(x);
76 spin_lock_init(&seqlock->lock);
79 #ifdef CONFIG_PREEMPT_RT
80 static inline void __seqlock_init(seqlock_t *seqlock)
82 *seqlock = (seqlock_t) __SEQLOCK_UNLOCKED(seqlock);
83 rwlock_init(&seqlock->lock);
85 #else
86 extern void __seqlock_init(seqlock_t *seqlock);
87 #endif
89 #define seqlock_init(seq) \
90 PICK_FUNCTION(raw_seqlock_t *, seqlock_t *, \
91 __raw_seqlock_init, __seqlock_init, seq);
93 #define DEFINE_SEQLOCK(x) \
94 seqlock_t x = __SEQLOCK_UNLOCKED(x)
96 #define DEFINE_RAW_SEQLOCK(name) \
97 raw_seqlock_t name __cacheline_aligned_in_smp = \
98 __RAW_SEQLOCK_UNLOCKED(name)
101 /* Lock out other writers and update the count.
102 * Acts like a normal spin_lock/unlock.
103 * Don't need preempt_disable() because that is in the spin_lock already.
105 static inline void __write_seqlock(seqlock_t *sl)
107 write_lock(&sl->lock);
108 ++sl->sequence;
109 smp_wmb();
112 static __always_inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
114 unsigned long flags;
116 local_save_flags(flags);
117 __write_seqlock(sl);
118 return flags;
121 static inline void __write_sequnlock(seqlock_t *sl)
123 smp_wmb();
124 sl->sequence++;
125 write_unlock(&sl->lock);
128 #define __write_sequnlock_irqrestore(sl, flags) __write_sequnlock(sl)
130 static inline int __write_tryseqlock(seqlock_t *sl)
132 int ret = write_trylock(&sl->lock);
134 if (ret) {
135 ++sl->sequence;
136 smp_wmb();
138 return ret;
141 /* Start of read calculation -- fetch last complete writer token */
142 static __always_inline unsigned __read_seqbegin(seqlock_t *sl)
144 unsigned ret;
146 ret = sl->sequence;
147 smp_rmb();
148 if (unlikely(ret & 1)) {
150 * Serialze with the writer which will ensure they are
151 * pi-boosted if necessary and prevent us from starving
152 * them.
154 read_lock(&sl->lock);
155 ret = sl->sequence;
156 read_unlock(&sl->lock);
159 BUG_ON(ret & 1);
161 return ret;
165 * Test if reader processed invalid data.
167 * If sequence value changed then writer changed data while in section.
169 static inline int __read_seqretry(seqlock_t *sl, unsigned iv)
171 smp_rmb();
172 return (sl->sequence != iv);
175 static __always_inline void __write_seqlock_raw(raw_seqlock_t *sl)
177 spin_lock(&sl->lock);
178 ++sl->sequence;
179 smp_wmb();
182 static __always_inline unsigned long
183 __write_seqlock_irqsave_raw(raw_seqlock_t *sl)
185 unsigned long flags;
187 local_irq_save(flags);
188 __write_seqlock_raw(sl);
189 return flags;
192 static __always_inline void __write_seqlock_irq_raw(raw_seqlock_t *sl)
194 local_irq_disable();
195 __write_seqlock_raw(sl);
198 static __always_inline void __write_seqlock_bh_raw(raw_seqlock_t *sl)
200 local_bh_disable();
201 __write_seqlock_raw(sl);
204 static __always_inline void __write_sequnlock_raw(raw_seqlock_t *sl)
206 smp_wmb();
207 sl->sequence++;
208 spin_unlock(&sl->lock);
211 static __always_inline void
212 __write_sequnlock_irqrestore_raw(raw_seqlock_t *sl, unsigned long flags)
214 __write_sequnlock_raw(sl);
215 local_irq_restore(flags);
216 preempt_check_resched();
219 static __always_inline void __write_sequnlock_irq_raw(raw_seqlock_t *sl)
221 __write_sequnlock_raw(sl);
222 local_irq_enable();
223 preempt_check_resched();
226 static __always_inline void __write_sequnlock_bh_raw(raw_seqlock_t *sl)
228 __write_sequnlock_raw(sl);
229 local_bh_enable();
232 static __always_inline int __write_tryseqlock_raw(raw_seqlock_t *sl)
234 int ret = spin_trylock(&sl->lock);
236 if (ret) {
237 ++sl->sequence;
238 smp_wmb();
240 return ret;
243 static __always_inline unsigned __read_seqbegin_raw(const raw_seqlock_t *sl)
245 unsigned ret;
247 repeat:
248 ret = sl->sequence;
249 smp_rmb();
250 if (unlikely(ret & 1)) {
251 cpu_relax();
252 goto repeat;
255 return ret;
258 static __always_inline int __read_seqretry_raw(const raw_seqlock_t *sl, unsigned start)
260 smp_rmb();
262 return (sl->sequence != start);
265 extern int __bad_seqlock_type(void);
268 * PICK_SEQ_OP() is a small redirector to allow less typing of the lock
269 * types raw_seqlock_t, seqlock_t, at the front of the PICK_FUNCTION
270 * macro.
272 #define PICK_SEQ_OP(...) \
273 PICK_FUNCTION(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__)
274 #define PICK_SEQ_OP_RET(...) \
275 PICK_FUNCTION_RET(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__)
277 #define write_seqlock(sl) PICK_SEQ_OP(__write_seqlock_raw, __write_seqlock, sl)
279 #define write_sequnlock(sl) \
280 PICK_SEQ_OP(__write_sequnlock_raw, __write_sequnlock, sl)
282 #define write_tryseqlock(sl) \
283 PICK_SEQ_OP_RET(__write_tryseqlock_raw, __write_tryseqlock, sl)
285 #define read_seqbegin(sl) \
286 PICK_SEQ_OP_RET(__read_seqbegin_raw, __read_seqbegin, sl)
288 #define read_seqretry(sl, iv) \
289 PICK_SEQ_OP_RET(__read_seqretry_raw, __read_seqretry, sl, iv)
291 #define write_seqlock_irqsave(lock, flags) \
292 do { \
293 flags = PICK_SEQ_OP_RET(__write_seqlock_irqsave_raw, \
294 __write_seqlock_irqsave, lock); \
295 } while (0)
297 #define write_seqlock_irq(lock) \
298 PICK_SEQ_OP(__write_seqlock_irq_raw, __write_seqlock, lock)
300 #define write_seqlock_bh(lock) \
301 PICK_SEQ_OP(__write_seqlock_bh_raw, __write_seqlock, lock)
303 #define write_sequnlock_irqrestore(lock, flags) \
304 PICK_SEQ_OP(__write_sequnlock_irqrestore_raw, \
305 __write_sequnlock_irqrestore, lock, flags)
307 #define write_sequnlock_bh(lock) \
308 PICK_SEQ_OP(__write_sequnlock_bh_raw, __write_sequnlock, lock)
310 #define write_sequnlock_irq(lock) \
311 PICK_SEQ_OP(__write_sequnlock_irq_raw, __write_sequnlock, lock)
313 static __always_inline
314 unsigned long __seq_irqsave_raw(raw_seqlock_t *sl)
316 unsigned long flags;
318 local_irq_save(flags);
319 return flags;
322 static __always_inline unsigned long __seq_irqsave(seqlock_t *sl)
324 unsigned long flags;
326 local_save_flags(flags);
327 return flags;
330 #define read_seqbegin_irqsave(lock, flags) \
331 ({ \
332 flags = PICK_SEQ_OP_RET(__seq_irqsave_raw, __seq_irqsave, lock);\
333 read_seqbegin(lock); \
336 static __always_inline int
337 __read_seqretry_irqrestore(seqlock_t *sl, unsigned iv, unsigned long flags)
339 return __read_seqretry(sl, iv);
342 static __always_inline int
343 __read_seqretry_irqrestore_raw(raw_seqlock_t *sl, unsigned iv,
344 unsigned long flags)
346 int ret = read_seqretry(sl, iv);
347 local_irq_restore(flags);
348 preempt_check_resched();
349 return ret;
352 #define read_seqretry_irqrestore(lock, iv, flags) \
353 PICK_SEQ_OP_RET(__read_seqretry_irqrestore_raw, \
354 __read_seqretry_irqrestore, lock, iv, flags)
357 * Version using sequence counter only.
358 * This can be used when code has its own mutex protecting the
359 * updating starting before the write_seqcountbeqin() and ending
360 * after the write_seqcount_end().
363 typedef struct seqcount {
364 unsigned sequence;
365 } seqcount_t;
367 #define SEQCNT_ZERO { 0 }
368 #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
370 /* Start of read using pointer to a sequence counter only. */
371 static inline unsigned read_seqcount_begin(const seqcount_t *s)
373 unsigned ret;
375 repeat:
376 ret = s->sequence;
377 smp_rmb();
378 if (unlikely(ret & 1)) {
379 cpu_relax();
380 goto repeat;
382 return ret;
386 * Test if reader processed invalid data because sequence number has changed.
388 static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
390 smp_rmb();
392 return s->sequence != start;
397 * Sequence counter only version assumes that callers are using their
398 * own mutexing.
400 static inline void write_seqcount_begin(seqcount_t *s)
402 s->sequence++;
403 smp_wmb();
406 static inline void write_seqcount_end(seqcount_t *s)
408 smp_wmb();
409 s->sequence++;
411 #endif /* __LINUX_SEQLOCK_H */