Update NEWS for 1.6.22
[pkg-k5-afs_openafs.git] / src / mcas / portable_defns.h
blob16d9202c74fadf1fea8527d4ada414d09e500224
1 /*
2 Copyright (c) 2003, Keir Fraser All rights reserved.
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are
6 met:
8 * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following
12 * disclaimer in the documentation and/or other materials provided
13 * with the distribution. Neither the name of the Keir Fraser
14 * nor the names of its contributors may be used to endorse or
15 * promote products derived from this software without specific
16 * prior written permission.
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef __PORTABLE_DEFNS_H__
32 #define __PORTABLE_DEFNS_H__
34 #define MAX_THREADS 256 /* Nobody will ever have more! */
36 #if defined(SPARC)
37 #include "sparc_defns.h"
38 #elif defined(SOLARIS_X86_686)
39 #include "solaris_x86_defns.h"
40 #elif defined(SOLARIS_X86_AMD64)
41 #include "solaris_amd64_defns.h"
42 #elif defined(INTEL)
43 #include "intel_defns.h"
44 #elif defined(X86_64)
45 #include "amd64_defns.h"
46 #elif defined(PPC)
47 #include "ppc_defns.h"
48 #elif defined(IA64)
49 #include "ia64_defns.h"
50 #elif defined(MIPS)
51 #include "mips_defns.h"
52 #elif defined(ALPHA)
53 #include "alpha_defns.h"
54 #else
55 #error "A valid architecture has not been defined"
56 #endif
58 #include <string.h>
60 #ifndef MB_NEAR_CAS
61 #define RMB_NEAR_CAS() RMB()
62 #define WMB_NEAR_CAS() WMB()
63 #define MB_NEAR_CAS() MB()
64 #endif
66 typedef unsigned long int_addr_t;
68 #define FALSE 0
69 #define TRUE 1
71 #define ADD_TO(_v,_x) \
72 do { \
73 unsigned long __val = (_v), __newval; \
74 while ( (__newval = CASIO(&(_v),__val,__val+(_x))) != __val ) \
75 __val = __newval; \
76 } while ( 0 )
78 /* new 'returning' versions allow use of old value at the successful
79 * CAS update (Matt). This allows an atomic inc to know if it was, for
80 * example, the operation which uniquely incremented _v from 0 to 1, and
81 * all equivalent threshold assertions */
83 #define ADD_TO_RETURNING_OLD(_v,_x,_o) \
84 do { \
85 unsigned long __val = (_v), __newval; \
86 while ( (__newval = CASIO(&(_v),__val,__val+(_x))) != __val ) \
87 __val = __newval; \
88 _o = __val; \
89 } while ( 0 )
91 #define SUB_FROM(_v,_x) \
92 do { \
93 unsigned long __val = (_v), __newval; \
94 while ( (__newval = CASIO(&(_v),__val,__val-(_x))) != __val ) \
95 __val = __newval; \
96 } while ( 0 )
98 #define SUB_FROM_RETURNING_OLD(_v,_x,_o) \
99 do { \
100 unsigned long __val = (_v), __newval; \
101 while ( (__newval = CASIO(&(_v),__val,__val-(_x))) != __val ) \
102 __val = __newval; \
103 _o = __val; \
104 } while ( 0 )
107 * Allow us to efficiently align and pad structures so that shared fields
108 * don't cause contention on thread-local or read-only fields.
110 #define CACHE_PAD(_n) char __pad ## _n [CACHE_LINE_SIZE]
111 #define ALIGNED_ALLOC(_s) \
112 ((void *)(((unsigned long)malloc((_s)+CACHE_LINE_SIZE*2) + \
113 CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE-1)))
116 * Interval counting
119 typedef unsigned int interval_t;
120 #define get_interval(_i) \
121 do { \
122 interval_t _ni = interval; \
123 do { _i = _ni; } while ( (_ni = CASIO(&interval, _i, _i+1)) != _i ); \
124 } while ( 0 )
127 * POINTER MARKING
130 #define get_marked_ref(_p) ((void *)(((unsigned long)(_p)) | 1))
131 #define get_unmarked_ref(_p) ((void *)(((unsigned long)(_p)) & ~1))
132 #define is_marked_ref(_p) (((unsigned long)(_p)) & 1)
136 * SUPPORT FOR WEAK ORDERING OF MEMORY ACCESSES
139 #ifdef WEAK_MEM_ORDER
141 #define MAYBE_GARBAGE (0)
143 /* Read field @_f into variable @_x. */
144 #define READ_FIELD(_x,_f) \
145 do { \
146 (_x) = (_f); \
147 if ( (_x) == MAYBE_GARBAGE ) { RMB(); (_x) = (_f); } \
148 while ( 0 )
150 #define WEAK_DEP_ORDER_RMB() RMB()
151 #define WEAK_DEP_ORDER_WMB() WMB()
152 #define WEAK_DEP_ORDER_MB() MB()
154 #else
156 /* Read field @_f into variable @_x. */
157 #define READ_FIELD(_x,_f) ((_x) = (_f))
159 #define WEAK_DEP_ORDER_RMB() ((void)0)
160 #define WEAK_DEP_ORDER_WMB() ((void)0)
161 #define WEAK_DEP_ORDER_MB() ((void)0)
163 #endif
165 #if !defined(INTEL) && !defined(SOLARIS_X86_686) && !defined(SOLARIS_X86_AMD64) && !defined(X86_64)
168 * Strong LL/SC operations
171 static _u32 strong_ll(_u64 *ptr, int p)
173 _u64 val_read;
174 _u64 new_val;
175 _u64 flag;
177 flag = (1LL << p);
179 new_val = *ptr;
180 do {
181 val_read = new_val;
182 new_val = val_read | flag;
183 } while ( ((val_read & flag) == 0) &&
184 ((new_val = CAS64O(ptr, val_read, new_val)) != val_read) );
186 return (_u32) (val_read >> 32);
188 #endif /* !INTEL */
191 static int strong_vl(_u64 *ptr, int p)
193 _u64 val_read;
194 _u64 flag;
196 flag = (1LL << p);
197 val_read = *ptr;
199 return (val_read & flag);
202 #if !defined(INTEL) && !defined(X86_64)
203 static int strong_sc(_u64 *ptr, int p, _u32 n)
205 _u64 val_read;
206 _u64 new_val;
207 _u64 flag;
209 flag = (1LL << p);
210 val_read = *ptr;
212 while ( (val_read & flag) != 0 )
214 new_val = (((_u64)n) << 32);
216 if ( (new_val = CAS64O(ptr, val_read, new_val)) == val_read )
218 return 1;
221 val_read = new_val;
224 return 0;
226 #endif /* !INTEL */
229 static void s_store(_u64 *ptr, _u32 n)
231 _u64 new_val;
233 new_val = (((_u64)n) << 32);
234 *ptr = new_val;
237 static _u32 s_load(_u64 *ptr)
239 _u64 val_read;
241 val_read = *ptr;
242 return (val_read >> 32);
247 * MCS lock
250 typedef struct qnode_t qnode_t;
252 struct qnode_t {
253 qnode_t *next;
254 int locked;
257 typedef struct {
258 qnode_t *tail;
259 } mcs_lock_t;
261 static void mcs_init(mcs_lock_t *lock)
263 lock->tail = NULL;
266 static void mcs_lock(mcs_lock_t *lock, qnode_t *qn)
268 qnode_t *pred;
270 qn->next = NULL;
271 qn->locked = 1;
272 WMB_NEAR_CAS();
274 pred = FASPO(&lock->tail, qn);
275 if ( pred != NULL )
277 pred->next = qn;
278 while ( qn->locked ) RMB();
281 MB();
284 static void mcs_unlock(mcs_lock_t *lock, qnode_t *qn)
286 qnode_t *t = qn->next;
288 MB();
290 if ( t == NULL )
292 if ( CASPO(&lock->tail, qn, NULL) == qn ) return;
293 while ( (t = qn->next) == NULL ) RMB();
294 WEAK_DEP_ORDER_MB();
297 t->locked = 0;
302 * MCS fair MRSW lock.
305 typedef struct mrsw_qnode_st mrsw_qnode_t;
307 struct mrsw_qnode_st {
308 #define CLS_RD 0
309 #define CLS_WR 1
310 int class;
311 #define ST_NOSUCC 0
312 #define ST_RDSUCC 1
313 #define ST_WRSUCC 2
314 #define ST_SUCCMASK 3
315 #define ST_BLOCKED 4
316 int state;
317 mrsw_qnode_t *next;
320 typedef struct {
321 mrsw_qnode_t *tail;
322 mrsw_qnode_t *next_writer;
323 int reader_count;
324 } mrsw_lock_t;
327 #define CLEAR_BLOCKED(_qn) ADD_TO((_qn)->state, -ST_BLOCKED)
329 static void mrsw_init(mrsw_lock_t *lock)
331 memset(lock, 0, sizeof(*lock));
334 static void rd_lock(mrsw_lock_t *lock, mrsw_qnode_t *qn)
336 mrsw_qnode_t *pred, *next;
338 qn->class = CLS_RD;
339 qn->next = NULL;
340 qn->state = ST_NOSUCC | ST_BLOCKED;
342 WMB_NEAR_CAS();
344 pred = FASPO(&lock->tail, qn);
346 if ( pred == NULL )
348 ADD_TO(lock->reader_count, 1);
349 CLEAR_BLOCKED(qn);
351 else
353 if ( (pred->class == CLS_WR) ||
354 (CASIO(&pred->state, ST_BLOCKED|ST_NOSUCC, ST_BLOCKED|ST_RDSUCC)
355 == (ST_BLOCKED|ST_NOSUCC)) )
357 WEAK_DEP_ORDER_WMB();
358 pred->next = qn;
359 while ( (qn->state & ST_BLOCKED) ) RMB();
361 else
363 ADD_TO(lock->reader_count, 1);
364 pred->next = qn;
365 WEAK_DEP_ORDER_WMB();
366 CLEAR_BLOCKED(qn);
370 if ( qn->state == ST_RDSUCC )
372 while ( (next = qn->next) == NULL ) RMB();
373 ADD_TO(lock->reader_count, 1);
374 WEAK_DEP_ORDER_WMB();
375 CLEAR_BLOCKED(next);
378 RMB();
381 static void rd_unlock(mrsw_lock_t *lock, mrsw_qnode_t *qn)
383 mrsw_qnode_t *next = qn->next;
384 int c, oc;
386 RMB();
388 if ( (next != NULL) || (CASPO(&lock->tail, qn, NULL) != qn) )
390 while ( (next = qn->next) == NULL ) RMB();
391 if ( (qn->state & ST_SUCCMASK) == ST_WRSUCC )
393 lock->next_writer = next;
394 WMB_NEAR_CAS(); /* set next_writer before dec'ing refcnt */
398 /* Bounded to maximum # readers if no native atomic_decrement */
399 c = lock->reader_count;
400 while ( (oc = CASIO(&lock->reader_count, c, c-1)) != c ) c = oc;
402 if ( c == 1 )
404 WEAK_DEP_ORDER_MB();
405 if ( (next = lock->next_writer) != NULL )
407 RMB();
408 if ( (lock->reader_count == 0) &&
409 (CASPO(&lock->next_writer, next, NULL) == next) )
411 WEAK_DEP_ORDER_WMB();
412 CLEAR_BLOCKED(next);
418 static void wr_lock(mrsw_lock_t *lock, mrsw_qnode_t *qn)
420 mrsw_qnode_t *pred;
421 int os, s;
423 qn->class = CLS_WR;
424 qn->next = NULL;
425 qn->state = ST_NOSUCC | ST_BLOCKED;
427 WMB_NEAR_CAS();
429 pred = FASPO(&lock->tail, qn);
431 if ( pred == NULL )
433 WEAK_DEP_ORDER_WMB();
434 lock->next_writer = qn;
435 MB(); /* check reader_count after setting next_writer. */
436 if ( (lock->reader_count == 0) &&
437 (CASPO(&lock->next_writer, qn, NULL) == qn) )
439 CLEAR_BLOCKED(qn);
442 else
444 s = pred->state;
445 /* Bounded while loop: only one other remote update may occur. */
446 while ( (os = CASIO(&pred->state, s, s | ST_WRSUCC)) != s ) s = os;
447 WMB();
448 pred->next = qn;
451 while ( (qn->state & ST_BLOCKED) ) RMB();
453 MB();
456 static void wr_unlock(mrsw_lock_t *lock, mrsw_qnode_t *qn)
458 mrsw_qnode_t *next = qn->next;
460 MB();
462 if ( (next != NULL) || (CASPO(&lock->tail, qn, NULL) != qn) )
464 while ( (next = qn->next) == NULL ) RMB();
465 WEAK_DEP_ORDER_MB();
466 if ( next->class == CLS_RD )
468 ADD_TO(lock->reader_count, 1);
469 WMB();
471 CLEAR_BLOCKED(next);
476 #endif /* __PORTABLE_DEFNS_H__ */