1 #ifndef __LINUX_SEQLOCK_H
2 #define __LINUX_SEQLOCK_H
4 * Reader/writer consistent mechanism without starving writers. This type of
5 * lock for data where the reader wants a consistent set of information
6 * and is willing to retry if the information changes. Readers block
7 * on write contention (and where applicable, pi-boost the writer).
8 * Readers without contention on entry acquire the critical section
9 * without any atomic operations, but they may have to retry if a writer
10 * enters before the critical section ends. Writers do not wait for readers.
12 * This is not as cache friendly as brlock. Also, this will not work
13 * for data that contains pointers, because any writer could
14 * invalidate a pointer that a reader was following.
16 * Expected reader usage:
18 * seq = read_seqbegin(&foo);
20 * } while (read_seqretry(&foo, seq));
23 * On non-SMP the spin locks disappear but the writer still needs
24 * to increment the sequence variables because an interrupt routine could
25 * change the state of the data.
27 * Based on x86_64 vsyscall gettimeofday
28 * by Keith Owens and Andrea Arcangeli
30 * Priority inheritance and live-lock avoidance by Gregory Haskins
33 #include <linux/pickop.h>
34 #include <linux/spinlock.h>
35 #include <linux/preempt.h>
47 #define seqlock_need_resched(seq) lock_need_resched(&(seq)->lock)
49 #ifdef CONFIG_PREEMPT_RT
50 typedef __seqlock_t seqlock_t
;
52 typedef __raw_seqlock_t seqlock_t
;
55 typedef __raw_seqlock_t raw_seqlock_t
;
58 * These macros triggered gcc-3.x compile-time problems. We think these are
59 * OK now. Be cautious.
61 #define __RAW_SEQLOCK_UNLOCKED(lockname) \
62 { 0, RAW_SPIN_LOCK_UNLOCKED(lockname) }
64 #ifdef CONFIG_PREEMPT_RT
65 # define __SEQLOCK_UNLOCKED(lockname) { 0, __RW_LOCK_UNLOCKED(lockname) }
67 # define __SEQLOCK_UNLOCKED(lockname) __RAW_SEQLOCK_UNLOCKED(lockname)
70 #define SEQLOCK_UNLOCKED \
71 __SEQLOCK_UNLOCKED(old_style_seqlock_init)
73 static inline void __raw_seqlock_init(raw_seqlock_t
*seqlock
)
75 *seqlock
= (raw_seqlock_t
) __RAW_SEQLOCK_UNLOCKED(x
);
76 spin_lock_init(&seqlock
->lock
);
79 #ifdef CONFIG_PREEMPT_RT
80 static inline void __seqlock_init(seqlock_t
*seqlock
)
82 *seqlock
= (seqlock_t
) __SEQLOCK_UNLOCKED(seqlock
);
83 rwlock_init(&seqlock
->lock
);
86 extern void __seqlock_init(seqlock_t
*seqlock
);
89 #define seqlock_init(seq) \
90 PICK_FUNCTION(raw_seqlock_t *, seqlock_t *, \
91 __raw_seqlock_init, __seqlock_init, seq);
93 #define DEFINE_SEQLOCK(x) \
94 seqlock_t x = __SEQLOCK_UNLOCKED(x)
96 #define DEFINE_RAW_SEQLOCK(name) \
97 raw_seqlock_t name __cacheline_aligned_in_smp = \
98 __RAW_SEQLOCK_UNLOCKED(name)
101 /* Lock out other writers and update the count.
102 * Acts like a normal spin_lock/unlock.
103 * Don't need preempt_disable() because that is in the spin_lock already.
105 static inline void __write_seqlock(seqlock_t
*sl
)
107 write_lock(&sl
->lock
);
112 static __always_inline
unsigned long __write_seqlock_irqsave(seqlock_t
*sl
)
116 local_save_flags(flags
);
121 static inline void __write_sequnlock(seqlock_t
*sl
)
125 write_unlock(&sl
->lock
);
128 #define __write_sequnlock_irqrestore(sl, flags) __write_sequnlock(sl)
130 static inline int __write_tryseqlock(seqlock_t
*sl
)
132 int ret
= write_trylock(&sl
->lock
);
141 /* Start of read calculation -- fetch last complete writer token */
142 static __always_inline
unsigned __read_seqbegin(seqlock_t
*sl
)
148 if (unlikely(ret
& 1)) {
150 * Serialze with the writer which will ensure they are
151 * pi-boosted if necessary and prevent us from starving
154 read_lock(&sl
->lock
);
156 read_unlock(&sl
->lock
);
165 * Test if reader processed invalid data.
167 * If sequence value changed then writer changed data while in section.
169 static inline int __read_seqretry(seqlock_t
*sl
, unsigned iv
)
172 return (sl
->sequence
!= iv
);
175 static __always_inline
void __write_seqlock_raw(raw_seqlock_t
*sl
)
177 spin_lock(&sl
->lock
);
182 static __always_inline
unsigned long
183 __write_seqlock_irqsave_raw(raw_seqlock_t
*sl
)
187 local_irq_save(flags
);
188 __write_seqlock_raw(sl
);
192 static __always_inline
void __write_seqlock_irq_raw(raw_seqlock_t
*sl
)
195 __write_seqlock_raw(sl
);
198 static __always_inline
void __write_seqlock_bh_raw(raw_seqlock_t
*sl
)
201 __write_seqlock_raw(sl
);
204 static __always_inline
void __write_sequnlock_raw(raw_seqlock_t
*sl
)
208 spin_unlock(&sl
->lock
);
211 static __always_inline
void
212 __write_sequnlock_irqrestore_raw(raw_seqlock_t
*sl
, unsigned long flags
)
214 __write_sequnlock_raw(sl
);
215 local_irq_restore(flags
);
216 preempt_check_resched();
219 static __always_inline
void __write_sequnlock_irq_raw(raw_seqlock_t
*sl
)
221 __write_sequnlock_raw(sl
);
223 preempt_check_resched();
226 static __always_inline
void __write_sequnlock_bh_raw(raw_seqlock_t
*sl
)
228 __write_sequnlock_raw(sl
);
232 static __always_inline
int __write_tryseqlock_raw(raw_seqlock_t
*sl
)
234 int ret
= spin_trylock(&sl
->lock
);
243 static __always_inline
unsigned __read_seqbegin_raw(const raw_seqlock_t
*sl
)
250 if (unlikely(ret
& 1)) {
258 static __always_inline
int __read_seqretry_raw(const raw_seqlock_t
*sl
, unsigned start
)
262 return (sl
->sequence
!= start
);
265 extern int __bad_seqlock_type(void);
268 * PICK_SEQ_OP() is a small redirector to allow less typing of the lock
269 * types raw_seqlock_t, seqlock_t, at the front of the PICK_FUNCTION
272 #define PICK_SEQ_OP(...) \
273 PICK_FUNCTION(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__)
274 #define PICK_SEQ_OP_RET(...) \
275 PICK_FUNCTION_RET(raw_seqlock_t *, seqlock_t *, ##__VA_ARGS__)
277 #define write_seqlock(sl) PICK_SEQ_OP(__write_seqlock_raw, __write_seqlock, sl)
279 #define write_sequnlock(sl) \
280 PICK_SEQ_OP(__write_sequnlock_raw, __write_sequnlock, sl)
282 #define write_tryseqlock(sl) \
283 PICK_SEQ_OP_RET(__write_tryseqlock_raw, __write_tryseqlock, sl)
285 #define read_seqbegin(sl) \
286 PICK_SEQ_OP_RET(__read_seqbegin_raw, __read_seqbegin, sl)
288 #define read_seqretry(sl, iv) \
289 PICK_SEQ_OP_RET(__read_seqretry_raw, __read_seqretry, sl, iv)
291 #define write_seqlock_irqsave(lock, flags) \
293 flags = PICK_SEQ_OP_RET(__write_seqlock_irqsave_raw, \
294 __write_seqlock_irqsave, lock); \
297 #define write_seqlock_irq(lock) \
298 PICK_SEQ_OP(__write_seqlock_irq_raw, __write_seqlock, lock)
300 #define write_seqlock_bh(lock) \
301 PICK_SEQ_OP(__write_seqlock_bh_raw, __write_seqlock, lock)
303 #define write_sequnlock_irqrestore(lock, flags) \
304 PICK_SEQ_OP(__write_sequnlock_irqrestore_raw, \
305 __write_sequnlock_irqrestore, lock, flags)
307 #define write_sequnlock_bh(lock) \
308 PICK_SEQ_OP(__write_sequnlock_bh_raw, __write_sequnlock, lock)
310 #define write_sequnlock_irq(lock) \
311 PICK_SEQ_OP(__write_sequnlock_irq_raw, __write_sequnlock, lock)
313 static __always_inline
314 unsigned long __seq_irqsave_raw(raw_seqlock_t
*sl
)
318 local_irq_save(flags
);
322 static __always_inline
unsigned long __seq_irqsave(seqlock_t
*sl
)
326 local_save_flags(flags
);
330 #define read_seqbegin_irqsave(lock, flags) \
332 flags = PICK_SEQ_OP_RET(__seq_irqsave_raw, __seq_irqsave, lock);\
333 read_seqbegin(lock); \
336 static __always_inline
int
337 __read_seqretry_irqrestore(seqlock_t
*sl
, unsigned iv
, unsigned long flags
)
339 return __read_seqretry(sl
, iv
);
342 static __always_inline
int
343 __read_seqretry_irqrestore_raw(raw_seqlock_t
*sl
, unsigned iv
,
346 int ret
= read_seqretry(sl
, iv
);
347 local_irq_restore(flags
);
348 preempt_check_resched();
352 #define read_seqretry_irqrestore(lock, iv, flags) \
353 PICK_SEQ_OP_RET(__read_seqretry_irqrestore_raw, \
354 __read_seqretry_irqrestore, lock, iv, flags)
357 * Version using sequence counter only.
358 * This can be used when code has its own mutex protecting the
359 * updating starting before the write_seqcountbeqin() and ending
360 * after the write_seqcount_end().
363 typedef struct seqcount
{
367 #define SEQCNT_ZERO { 0 }
368 #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
370 /* Start of read using pointer to a sequence counter only. */
371 static inline unsigned read_seqcount_begin(const seqcount_t
*s
)
378 if (unlikely(ret
& 1)) {
386 * Test if reader processed invalid data because sequence number has changed.
388 static inline int read_seqcount_retry(const seqcount_t
*s
, unsigned start
)
392 return s
->sequence
!= start
;
397 * Sequence counter only version assumes that callers are using their
400 static inline void write_seqcount_begin(seqcount_t
*s
)
406 static inline void write_seqcount_end(seqcount_t
*s
)
411 #endif /* __LINUX_SEQLOCK_H */