Update NEWS for 1.6.22
[pkg-k5-afs_openafs.git] / src / WINNT / client_osi / osibasel.c
blobd6916e3a6f08dfcd7491e57dcf9857f3a6844c15
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
14 #include <afs/stds.h>
16 #include <windows.h>
17 #include "osi.h"
18 #include <assert.h>
19 #include <stdio.h>
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long atomicIndexCounter = 0;
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
33 void osi_BaseInit(void)
35 int i;
37 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38 InitializeCriticalSection(&osi_baseAtomicCS[i]);
40 if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41 osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
43 if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44 osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
46 InitializeCriticalSection(&lock_ref_CS);
49 void
50 osi_SetLockOrderValidation(int on)
52 lockOrderValidation = (BOOLEAN)on;
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
58 osi_lock_ref_t * lockRefp = NULL;
60 EnterCriticalSection(&lock_ref_CS);
61 if (lock_ref_FreeListp) {
62 lockRefp = lock_ref_FreeListp;
63 osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64 (osi_queue_t **) &lock_ref_FreeListEndp,
65 &lockRefp->q);
67 LeaveCriticalSection(&lock_ref_CS);
69 if (lockRefp == NULL)
70 lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
72 memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73 lockRefp->type = type;
74 switch (type) {
75 case OSI_LOCK_MUTEX:
76 lockRefp->mx = lockp;
77 break;
78 case OSI_LOCK_RW:
79 lockRefp->rw = lockp;
80 break;
81 default:
82 osi_panic("Invalid Lock Type", __FILE__, __LINE__);
85 return lockRefp;
88 static void
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
91 EnterCriticalSection(&lock_ref_CS);
92 osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93 (osi_queue_t **) &lock_ref_FreeListEndp,
94 &lockRefp->q);
95 LeaveCriticalSection(&lock_ref_CS);
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
100 char msg[512];
101 osi_lock_ref_t * lockRefp;
103 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104 if (lockRefp->type == OSI_LOCK_RW) {
105 if (lockRefp->rw == lockp) {
106 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107 osi_panic(msg, __FILE__, __LINE__);
109 if (lockRefp->rw->level > lockp->level) {
110 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112 osi_panic(msg, __FILE__, __LINE__);
114 } else {
115 if (lockRefp->mx->level > lockp->level) {
116 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118 osi_panic(msg, __FILE__, __LINE__);
120 osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
127 char msg[512];
128 osi_lock_ref_t * lockRefp;
130 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131 if (lockRefp->type == OSI_LOCK_MUTEX) {
132 if (lockRefp->mx == lockp) {
133 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134 osi_panic(msg, __FILE__, __LINE__);
136 if (lockRefp->mx->level > lockp->level) {
137 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139 osi_panic(msg, __FILE__, __LINE__);
141 } else {
142 if (lockRefp->rw->level > lockp->level) {
143 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145 osi_panic(msg, __FILE__, __LINE__);
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
153 long i;
154 CRITICAL_SECTION *csp;
155 osi_queue_t * lockRefH, *lockRefT;
156 osi_lock_ref_t *lockRefp;
158 if ((i=lockp->type) != 0) {
159 if (i >= 0 && i < OSI_NLOCKTYPES)
160 (osi_lockOps[i]->ObtainWriteProc)(lockp);
161 return;
164 if (lockOrderValidation) {
165 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
166 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
168 if (lockp->level != 0)
169 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
172 /* otherwise we're the fast base type */
173 csp = &osi_baseAtomicCS[lockp->atomicIndex];
174 EnterCriticalSection(csp);
176 /* here we have the fast lock, so see if we can obtain the real lock */
177 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
178 (lockp->readers > 0)) {
179 lockp->waiters++;
180 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
181 lockp->waiters--;
182 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
183 } else {
184 /* if we're here, all clear to set the lock */
185 lockp->flags |= OSI_LOCKFLAG_EXCL;
186 lockp->tid[0] = thrd_Current();
188 LeaveCriticalSection(csp);
190 if (lockOrderValidation) {
191 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
192 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
193 TlsSetValue(tls_LockRefH, lockRefH);
194 TlsSetValue(tls_LockRefT, lockRefT);
198 void lock_ObtainRead(osi_rwlock_t *lockp)
200 long i;
201 CRITICAL_SECTION *csp;
202 osi_queue_t * lockRefH, *lockRefT;
203 osi_lock_ref_t *lockRefp;
204 DWORD tid = thrd_Current();
206 if ((i=lockp->type) != 0) {
207 if (i >= 0 && i < OSI_NLOCKTYPES)
208 (osi_lockOps[i]->ObtainReadProc)(lockp);
209 return;
212 if (lockOrderValidation) {
213 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
214 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
216 if (lockp->level != 0)
217 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
220 /* otherwise we're the fast base type */
221 csp = &osi_baseAtomicCS[lockp->atomicIndex];
222 EnterCriticalSection(csp);
224 for ( i=0; i < lockp->readers; i++ ) {
225 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
228 /* here we have the fast lock, so see if we can obtain the real lock */
229 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
230 lockp->waiters++;
231 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
232 lockp->waiters--;
233 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
234 } else {
235 /* if we're here, all clear to set the lock */
236 if (++lockp->readers <= OSI_RWLOCK_THREADS)
237 lockp->tid[lockp->readers-1] = tid;
239 LeaveCriticalSection(csp);
241 if (lockOrderValidation) {
242 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
243 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
244 TlsSetValue(tls_LockRefH, lockRefH);
245 TlsSetValue(tls_LockRefT, lockRefT);
249 void lock_ReleaseRead(osi_rwlock_t *lockp)
251 long i;
252 CRITICAL_SECTION *csp;
253 osi_queue_t * lockRefH, *lockRefT;
254 osi_lock_ref_t *lockRefp;
255 DWORD tid = thrd_Current();
257 if ((i = lockp->type) != 0) {
258 if (i >= 0 && i < OSI_NLOCKTYPES)
259 (osi_lockOps[i]->ReleaseReadProc)(lockp);
260 return;
263 if (lockOrderValidation && lockp->level != 0) {
264 int found = 0;
265 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
266 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
268 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
269 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
270 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
271 lock_FreeLockRef(lockRefp);
272 found = 1;
273 break;
276 osi_assertx(found, "read lock not found in TLS queue");
278 TlsSetValue(tls_LockRefH, lockRefH);
279 TlsSetValue(tls_LockRefT, lockRefT);
282 /* otherwise we're the fast base type */
283 csp = &osi_baseAtomicCS[lockp->atomicIndex];
284 EnterCriticalSection(csp);
286 osi_assertx(lockp->readers > 0, "read lock not held");
288 for ( i=0; i < lockp->readers; i++) {
289 if ( lockp->tid[i] == tid ) {
290 for ( ; i < lockp->readers - 1; i++)
291 lockp->tid[i] = lockp->tid[i+1];
292 lockp->tid[i] = 0;
293 break;
297 /* releasing a read lock can allow readers or writers */
298 if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
299 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
301 else {
302 /* and finally release the big lock */
303 LeaveCriticalSection(csp);
307 void lock_ReleaseWrite(osi_rwlock_t *lockp)
309 long i;
310 CRITICAL_SECTION *csp;
311 osi_queue_t * lockRefH, *lockRefT;
312 osi_lock_ref_t *lockRefp;
314 if ((i = lockp->type) != 0) {
315 if (i >= 0 && i < OSI_NLOCKTYPES)
316 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
317 return;
320 if (lockOrderValidation && lockp->level != 0) {
321 int found = 0;
322 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
323 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
325 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
326 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
327 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
328 lock_FreeLockRef(lockRefp);
329 found = 1;
330 break;
333 osi_assertx(found, "write lock not found in TLS queue");
335 TlsSetValue(tls_LockRefH, lockRefH);
336 TlsSetValue(tls_LockRefT, lockRefT);
339 /* otherwise we're the fast base type */
340 csp = &osi_baseAtomicCS[lockp->atomicIndex];
341 EnterCriticalSection(csp);
343 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
344 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
346 lockp->tid[0] = 0;
348 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
349 if (!osi_TEmpty(&lockp->d.turn)) {
350 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
352 else {
353 /* and finally release the big lock */
354 LeaveCriticalSection(csp);
358 void lock_ConvertWToR(osi_rwlock_t *lockp)
360 long i;
361 CRITICAL_SECTION *csp;
363 if ((i = lockp->type) != 0) {
364 if (i >= 0 && i < OSI_NLOCKTYPES)
365 (osi_lockOps[i]->ConvertWToRProc)(lockp);
366 return;
369 /* otherwise we're the fast base type */
370 csp = &osi_baseAtomicCS[lockp->atomicIndex];
371 EnterCriticalSection(csp);
373 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
374 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
376 /* convert write lock to read lock */
377 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
378 lockp->readers++;
380 if (!osi_TEmpty(&lockp->d.turn)) {
381 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
383 else {
384 /* and finally release the big lock */
385 LeaveCriticalSection(csp);
389 void lock_ConvertRToW(osi_rwlock_t *lockp)
391 long i;
392 CRITICAL_SECTION *csp;
393 DWORD tid = thrd_Current();
395 if ((i = lockp->type) != 0) {
396 if (i >= 0 && i < OSI_NLOCKTYPES)
397 (osi_lockOps[i]->ConvertRToWProc)(lockp);
398 return;
401 /* otherwise we're the fast base type */
402 csp = &osi_baseAtomicCS[lockp->atomicIndex];
403 EnterCriticalSection(csp);
405 osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
406 osi_assertx(lockp->readers > 0, "read lock not held");
408 for ( i=0; i < lockp->readers; i++) {
409 if ( lockp->tid[i] == tid ) {
410 for ( ; i < lockp->readers - 1; i++)
411 lockp->tid[i] = lockp->tid[i+1];
412 lockp->tid[i] = 0;
413 break;
417 if (--lockp->readers == 0) {
418 /* convert read lock to write lock */
419 lockp->flags |= OSI_LOCKFLAG_EXCL;
420 lockp->tid[0] = tid;
421 } else {
422 lockp->waiters++;
423 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
424 lockp->waiters--;
425 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
428 LeaveCriticalSection(csp);
431 void lock_ObtainMutex(struct osi_mutex *lockp)
433 long i;
434 CRITICAL_SECTION *csp;
435 osi_queue_t * lockRefH, *lockRefT;
436 osi_lock_ref_t *lockRefp;
438 if ((i=lockp->type) != 0) {
439 if (i >= 0 && i < OSI_NLOCKTYPES)
440 (osi_lockOps[i]->ObtainMutexProc)(lockp);
441 return;
444 if (lockOrderValidation) {
445 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
446 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
448 if (lockp->level != 0)
449 lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
452 /* otherwise we're the fast base type */
453 csp = &osi_baseAtomicCS[lockp->atomicIndex];
454 EnterCriticalSection(csp);
456 /* here we have the fast lock, so see if we can obtain the real lock */
457 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
458 lockp->waiters++;
459 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
460 lockp->waiters--;
461 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
462 } else {
463 /* if we're here, all clear to set the lock */
464 lockp->flags |= OSI_LOCKFLAG_EXCL;
465 lockp->tid = thrd_Current();
467 LeaveCriticalSection(csp);
469 if (lockOrderValidation) {
470 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
471 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
472 TlsSetValue(tls_LockRefH, lockRefH);
473 TlsSetValue(tls_LockRefT, lockRefT);
477 void lock_ReleaseMutex(struct osi_mutex *lockp)
479 long i;
480 CRITICAL_SECTION *csp;
481 osi_queue_t * lockRefH, *lockRefT;
482 osi_lock_ref_t *lockRefp;
484 if ((i = lockp->type) != 0) {
485 if (i >= 0 && i < OSI_NLOCKTYPES)
486 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
487 return;
490 if (lockOrderValidation && lockp->level != 0) {
491 int found = 0;
492 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
493 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
495 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
496 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
497 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
498 lock_FreeLockRef(lockRefp);
499 found = 1;
500 break;
504 osi_assertx(found, "mutex lock not found in TLS queue");
505 TlsSetValue(tls_LockRefH, lockRefH);
506 TlsSetValue(tls_LockRefT, lockRefT);
509 /* otherwise we're the fast base type */
510 csp = &osi_baseAtomicCS[lockp->atomicIndex];
511 EnterCriticalSection(csp);
513 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
514 osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
516 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
517 lockp->tid = 0;
518 if (!osi_TEmpty(&lockp->d.turn)) {
519 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
521 else {
522 /* and finally release the big lock */
523 LeaveCriticalSection(csp);
527 int lock_TryRead(struct osi_rwlock *lockp)
529 long i;
530 CRITICAL_SECTION *csp;
531 osi_queue_t * lockRefH, *lockRefT;
532 osi_lock_ref_t *lockRefp;
534 if ((i=lockp->type) != 0)
535 if (i >= 0 && i < OSI_NLOCKTYPES)
536 return (osi_lockOps[i]->TryReadProc)(lockp);
538 if (lockOrderValidation) {
539 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
540 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
542 if (lockp->level != 0) {
543 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
544 if (lockRefp->type == OSI_LOCK_RW) {
545 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
551 /* otherwise we're the fast base type */
552 csp = &osi_baseAtomicCS[lockp->atomicIndex];
553 EnterCriticalSection(csp);
555 /* here we have the fast lock, so see if we can obtain the real lock */
556 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
557 i = 0;
559 else {
560 /* if we're here, all clear to set the lock */
561 if (++lockp->readers < OSI_RWLOCK_THREADS)
562 lockp->tid[lockp->readers-1] = thrd_Current();
563 i = 1;
566 LeaveCriticalSection(csp);
568 if (lockOrderValidation && i) {
569 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
570 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
571 TlsSetValue(tls_LockRefH, lockRefH);
572 TlsSetValue(tls_LockRefT, lockRefT);
575 return i;
579 int lock_TryWrite(struct osi_rwlock *lockp)
581 long i;
582 CRITICAL_SECTION *csp;
583 osi_queue_t * lockRefH, *lockRefT;
584 osi_lock_ref_t *lockRefp;
586 if ((i=lockp->type) != 0)
587 if (i >= 0 && i < OSI_NLOCKTYPES)
588 return (osi_lockOps[i]->TryWriteProc)(lockp);
590 if (lockOrderValidation) {
591 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
592 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
594 if (lockp->level != 0) {
595 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
596 if (lockRefp->type == OSI_LOCK_RW) {
597 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
603 /* otherwise we're the fast base type */
604 csp = &osi_baseAtomicCS[lockp->atomicIndex];
605 EnterCriticalSection(csp);
607 /* here we have the fast lock, so see if we can obtain the real lock */
608 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
609 || (lockp->readers > 0)) {
610 i = 0;
612 else {
613 /* if we're here, all clear to set the lock */
614 lockp->flags |= OSI_LOCKFLAG_EXCL;
615 lockp->tid[0] = thrd_Current();
616 i = 1;
619 LeaveCriticalSection(csp);
621 if (lockOrderValidation && i) {
622 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
623 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
624 TlsSetValue(tls_LockRefH, lockRefH);
625 TlsSetValue(tls_LockRefT, lockRefT);
628 return i;
632 int lock_TryMutex(struct osi_mutex *lockp) {
633 long i;
634 CRITICAL_SECTION *csp;
635 osi_queue_t * lockRefH, *lockRefT;
636 osi_lock_ref_t *lockRefp;
638 if ((i=lockp->type) != 0)
639 if (i >= 0 && i < OSI_NLOCKTYPES)
640 return (osi_lockOps[i]->TryMutexProc)(lockp);
642 if (lockOrderValidation) {
643 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
644 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
646 if (lockp->level != 0) {
647 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
648 if (lockRefp->type == OSI_LOCK_MUTEX) {
649 osi_assertx(lockRefp->mx != lockp, "Mutex already held");
655 /* otherwise we're the fast base type */
656 csp = &osi_baseAtomicCS[lockp->atomicIndex];
657 EnterCriticalSection(csp);
659 /* here we have the fast lock, so see if we can obtain the real lock */
660 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
661 i = 0;
663 else {
664 /* if we're here, all clear to set the lock */
665 lockp->flags |= OSI_LOCKFLAG_EXCL;
666 lockp->tid = thrd_Current();
667 i = 1;
670 LeaveCriticalSection(csp);
672 if (lockOrderValidation && i) {
673 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
674 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
675 TlsSetValue(tls_LockRefH, lockRefH);
676 TlsSetValue(tls_LockRefT, lockRefT);
678 return i;
681 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
683 long i;
684 CRITICAL_SECTION *csp;
685 osi_queue_t * lockRefH, *lockRefT;
686 osi_lock_ref_t *lockRefp;
687 DWORD tid = thrd_Current();
689 if ((i = lockp->type) != 0) {
690 if (i >= 0 && i < OSI_NLOCKTYPES)
691 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
692 return;
695 if (lockOrderValidation && lockp->level != 0) {
696 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
697 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
699 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
700 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
701 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
702 lock_FreeLockRef(lockRefp);
703 break;
707 TlsSetValue(tls_LockRefH, lockRefH);
708 TlsSetValue(tls_LockRefT, lockRefT);
711 /* otherwise we're the fast base type */
712 csp = &osi_baseAtomicCS[lockp->atomicIndex];
713 EnterCriticalSection(csp);
715 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
717 for ( i=0; i < lockp->readers; i++) {
718 if ( lockp->tid[i] == tid ) {
719 for ( ; i < lockp->readers - 1; i++)
720 lockp->tid[i] = lockp->tid[i+1];
721 lockp->tid[i] = 0;
722 break;
726 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
727 * then do the wakeup after SleepSpin releases the low-level mutex.
729 if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
730 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
733 /* now call into scheduler to sleep atomically with releasing spin lock */
734 osi_SleepSpin(sleepVal, csp);
737 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
739 long i;
740 CRITICAL_SECTION *csp;
741 osi_queue_t * lockRefH, *lockRefT;
742 osi_lock_ref_t *lockRefp;
743 DWORD tid = thrd_Current();
745 if ((i = lockp->type) != 0) {
746 if (i >= 0 && i < OSI_NLOCKTYPES)
747 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
748 return;
751 if (lockOrderValidation && lockp->level != 0) {
752 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
753 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
755 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
756 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
757 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
758 lock_FreeLockRef(lockRefp);
759 break;
763 TlsSetValue(tls_LockRefH, lockRefH);
764 TlsSetValue(tls_LockRefT, lockRefT);
767 /* otherwise we're the fast base type */
768 csp = &osi_baseAtomicCS[lockp->atomicIndex];
769 EnterCriticalSection(csp);
771 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
773 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
774 lockp->tid[0] = 0;
775 if (!osi_TEmpty(&lockp->d.turn)) {
776 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
779 /* and finally release the big lock */
780 osi_SleepSpin(sleepVal, csp);
783 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
785 long i;
786 CRITICAL_SECTION *csp;
787 osi_queue_t * lockRefH, *lockRefT;
788 osi_lock_ref_t *lockRefp;
790 if ((i = lockp->type) != 0) {
791 if (i >= 0 && i < OSI_NLOCKTYPES)
792 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
793 return;
796 if (lockOrderValidation && lockp->level != 0) {
797 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
798 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
800 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
801 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
802 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
803 lock_FreeLockRef(lockRefp);
804 break;
808 TlsSetValue(tls_LockRefH, lockRefH);
809 TlsSetValue(tls_LockRefT, lockRefT);
812 /* otherwise we're the fast base type */
813 csp = &osi_baseAtomicCS[lockp->atomicIndex];
814 EnterCriticalSection(csp);
816 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
818 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
819 lockp->tid = 0;
820 if (!osi_TEmpty(&lockp->d.turn)) {
821 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
824 /* and finally release the big lock */
825 osi_SleepSpin(sleepVal, csp);
828 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
830 long i;
832 if ((i=lockp->type) != 0)
833 if (i >= 0 && i < OSI_NLOCKTYPES)
834 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
837 void lock_FinalizeMutex(osi_mutex_t *lockp)
839 long i;
841 if ((i=lockp->type) != 0)
842 if (i >= 0 && i < OSI_NLOCKTYPES)
843 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
846 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
848 int i;
850 if ((i = osi_lockTypeDefault) > 0) {
851 if (i >= 0 && i < OSI_NLOCKTYPES)
852 (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
853 return;
856 /* otherwise we have the base case, which requires no special
857 * initialization.
859 mp->type = 0;
860 mp->flags = 0;
861 mp->tid = 0;
862 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
863 mp->level = level;
864 osi_TInit(&mp->d.turn);
865 return;
868 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
870 int i;
872 if ((i = osi_lockTypeDefault) > 0) {
873 if (i >= 0 && i < OSI_NLOCKTYPES)
874 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
875 return;
878 /* otherwise we have the base case, which requires no special
879 * initialization.
881 memset(mp, 0, sizeof(osi_rwlock_t));
882 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
883 mp->level = level;
884 osi_TInit(&mp->d.turn);
885 return;
888 int lock_GetRWLockState(osi_rwlock_t *lp)
890 long i;
891 CRITICAL_SECTION *csp;
893 if ((i=lp->type) != 0)
894 if (i >= 0 && i < OSI_NLOCKTYPES)
895 return (osi_lockOps[i]->GetRWLockState)(lp);
897 /* otherwise we're the fast base type */
898 csp = &osi_baseAtomicCS[lp->atomicIndex];
899 EnterCriticalSection(csp);
901 /* here we have the fast lock, so see if we can obtain the real lock */
902 if (lp->flags & OSI_LOCKFLAG_EXCL)
903 i = OSI_RWLOCK_WRITEHELD;
904 else
905 i = 0;
906 if (lp->readers > 0)
907 i |= OSI_RWLOCK_READHELD;
909 LeaveCriticalSection(csp);
911 return i;
914 int lock_GetMutexState(struct osi_mutex *mp)
916 long i;
917 CRITICAL_SECTION *csp;
919 if ((i=mp->type) != 0)
920 if (i >= 0 && i < OSI_NLOCKTYPES)
921 return (osi_lockOps[i]->GetMutexState)(mp);
923 /* otherwise we're the fast base type */
924 csp = &osi_baseAtomicCS[mp->atomicIndex];
925 EnterCriticalSection(csp);
927 if (mp->flags & OSI_LOCKFLAG_EXCL)
928 i = OSI_MUTEX_HELD;
929 else
930 i = 0;
932 LeaveCriticalSection(csp);
934 return i;