Don't use POSIX fnmatch() for pattern matching.
[gromacs/qmmm-gamess-us.git] / src / gmxlib / thread_mpi / winthreads.c
blob0bd4698dfe4f2c3b1b0e9c802feba728e3e83289
1 /*
2 This source code file is part of thread_mpi.
3 Written by Sander Pronk, Erik Lindahl, and possibly others.
5 Copyright (c) 2009, Sander Pronk, Erik Lindahl.
6 All rights reserved.
8 Redistribution and use in source and binary forms, with or without
9 modification, are permitted provided that the following conditions are met:
10 1) Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 2) Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15 3) Neither the name of the copyright holders nor the
16 names of its contributors may be used to endorse or promote products
17 derived from this software without specific prior written permission.
19 THIS SOFTWARE IS PROVIDED BY US ''AS IS'' AND ANY
20 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL WE BE LIABLE FOR ANY
23 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 If you want to redistribute modifications, please consider that
31 scientific software is very special. Version control is crucial -
32 bugs must be traceable. We will be happy to consider code for
33 inclusion in the official distribution, but derived work should not
34 be called official thread_mpi. Details are found in the README & COPYING
35 files.
37 To help us fund development, we humbly ask that you cite
38 any papers on the package - you can find them in the top README file.
44 /* Include the defines that determine which thread library to use.
45 * We do not use HAVE_PTHREAD_H directly, since we might want to
46 * turn off thread support explicity (e.g. for debugging).
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
52 #ifdef THREAD_WINDOWS
54 /* the win32 header */
55 #include <windows.h>
58 #include <errno.h>
59 #include <stdlib.h>
60 #include <stdio.h>
61 #include <stdarg.h>
64 #include "thread_mpi/threads.h"
65 #include "thread_mpi/atomic.h"
67 /*! \brief System mutex for all one-time initialization
69 * This static variable is necessary in order to make the header file
70 * independent of the thread library implementation. Anyway, it
71 * will only be locked a handful of times at the start of program execution.
74 enum tMPI_Thread_once_status tMPI_Thread_system_lock_state=
75 TMPI_THREAD_ONCE_STATUS_NOTCALLED;
76 static CRITICAL_SECTION tMPI_Thread_system_lock;
78 tMPI_Spinlock_t tMPI_Thread_system_lock=TMPI_SPINLOCK_INITIALIZER;
81 void tMPI_Fatal_error(const char *file, int line, const char *message, ...)
83 va_list ap;
85 fprintf(stderr, "tMPI Fatal error in %s, line %d: ", file, line);
86 va_start(ap, message);
87 vfprintf(stderr, message, ap);
88 va_end(ap);
89 fprintf(stderr,"\n");
90 abort();
95 enum tMPI_Thread_support tMPI_Thread_support(void)
97 return TMPI_THREAD_SUPPORT_YES;
100 struct tMPI_Thread_starter_param
102 void *(*start_routine)(void*); /* the function */
103 void *param; /* its parameter */
106 static DWORD WINAPI tMPI_Win32_thread_starter( LPVOID lpParam )
108 struct tMPI_Thread_starter_param *prm=
109 (struct tMPI_Thread_starter_param*)lpParam;
111 (prm->start_routine)(prm->param);
112 return 0;
116 int tMPI_Thread_create(tMPI_Thread_t *thread,
117 void *(*start_routine)(void *), void *arg)
119 DWORD thread_id;
120 struct tMPI_Thread_starter_param *prm;
122 /* a small memory leak to be sure that it doesn't get deallocated
123 once this function ends */
124 prm=(struct tMPI_Thread_starter_param*)
125 malloc(sizeof(struct tMPI_Thread_starter_param));
126 prm->start_routine= start_routine;
127 prm->param=arg;
129 if(thread==NULL)
131 tMPI_Fatal_error(TMPI_FARGS,"Invalid thread pointer.");
132 return EINVAL;
135 *thread = CreateThread(NULL, 0, tMPI_Win32_thread_starter, prm, 0,
136 &thread_id);
138 if(*thread==NULL)
140 tMPI_Fatal_error(TMPI_FARGS,"Failed to create thread, error code=%d",
141 GetLastError());
142 return -1;
145 return 0;
150 int tMPI_Thread_join(tMPI_Thread_t thread, void **value_ptr)
152 DWORD ret,retval;
154 ret = WaitForSingleObject(thread, INFINITE);
156 if (ret != 0)
158 tMPI_Fatal_error(TMPI_FARGS,"Failed to join thread. error code=%d",
159 GetLastError());
160 return -1;
163 if (value_ptr)
165 if (!GetExitCodeThread(thread, &retval))
167 /* TODO: somehow assign value_ptr */
168 tMPI_Fatal_error(TMPI_FARGS,
169 "Failed to get thread exit code: error=%d",
170 GetLastError());
171 return -1;
174 CloseHandle(thread);
176 return 0;
180 void tMPI_Thread_exit(void *value_ptr)
182 /* TODO: fix exit code */
183 /* TODO: call destructors for thread-local storage */
184 ExitThread( 0 );
190 int tMPI_Thread_cancel(tMPI_Thread_t thread)
192 if (!TerminateThread( thread, -1) )
194 tMPI_Fatal_error(TMPI_FARGS,"Failed thread_cancel, error code=%d",
195 GetLastError());
196 return -1;
198 return 0;
204 int tMPI_Thread_mutex_init(tMPI_Thread_mutex_t *mtx)
206 if(mtx==NULL)
208 return EINVAL;
211 InitializeCriticalSection(&(mtx->cs));
212 mtx->init_state = TMPI_THREAD_ONCE_STATUS_READY;
214 return 0;
218 int tMPI_Thread_mutex_destroy(tMPI_Thread_mutex_t *mtx)
220 if(mtx == NULL)
222 return EINVAL;
225 DeleteCriticalSection(&(mtx->cs));
227 return 0;
233 static int tMPI_Thread_mutex_init_once(tMPI_Thread_mutex_t *mtx)
235 int ret;
237 /* This is essentially a copy of the code from the one-time
238 * initialization, but with a call to the mutex init routine instead.
239 * It might seem like overkill, but it will only be executed the first
240 * time you call a static mutex, and it is important to get all the
241 * memory barriers right. Trust me, you don't want a deadlock here...
244 /* Lock the common one-time init mutex so we can check carefully */
245 /*EnterCriticalSection( &tMPI_Thread_system_lock );*/
246 tMPI_Spinlock_lock( &tMPI_Thread_system_lock );
249 #if 0
250 /* If somebody is already initializing, wait until he is finished.
251 * In that case, the mutex will also be unlocked.
253 while (mtx->status == TMPI_THREAD_ONCE_STATUS_PROGRESS)
254 pthread_cond_wait (&tMPI_Thread_pthreads_system_cond,
255 &tMPI_Thread_pthreads_system_mtx);
256 #endif
258 /* Do the actual (locked) check - system mutex is locked if we get here */
259 if (mtx->init_state != TMPI_THREAD_ONCE_STATUS_READY)
261 /*mtx->status = TMPI_THREAD_ONCE_STATUS_PROGRESS;*/
263 /* No need to keep the lock during execution -
264 * Only one thread can do it anyway.
266 /*pthread_mutex_unlock (&tMPI_Thread_pthreads_system_mtx);*/
267 ret=tMPI_Thread_mutex_init(mtx);
268 /*pthread_mutex_lock (&tMPI_Thread_pthreads_system_mtx);*/
270 /* Status will be marked as ready by tMPI_Thread_mutex_init(). */
271 /*pthread_cond_broadcast (&tMPI_Thread_pthreads_system_cond);*/
273 else
275 ret = 0;
278 /*LeaveCriticalSection( &tMPI_Thread_system_lock );*/
279 tMPI_Spinlock_unlock( &tMPI_Thread_system_lock );
281 return ret;
286 int tMPI_Thread_mutex_lock(tMPI_Thread_mutex_t *mtx)
288 /* Ccheck whether this mutex is initialized */
289 if(mtx->init_state != TMPI_THREAD_ONCE_STATUS_READY)
291 tMPI_Thread_mutex_init_once(mtx);
294 /* The mutex is now guaranteed to be valid. */
295 EnterCriticalSection( &(mtx->cs) );
297 return 0;
303 int tMPI_Thread_mutex_trylock(tMPI_Thread_mutex_t *mtx)
305 BOOL ret;
307 /* Ccheck whether this mutex is initialized */
308 if(mtx->init_state != TMPI_THREAD_ONCE_STATUS_READY)
310 tMPI_Thread_mutex_init_once(mtx);
313 /* The mutex is now guaranteed to be valid. */
314 ret=TryEnterCriticalSection( &(mtx->cs) );
316 return (ret != 0);
321 int tMPI_Thread_mutex_unlock(tMPI_Thread_mutex_t *mtx)
323 LeaveCriticalSection( &(mtx->cs) );
325 return 0;
330 int tMPI_Thread_key_create(tMPI_Thread_key_t *key, void (*destructor)(void *))
332 if(key==NULL)
334 tMPI_Fatal_error(TMPI_FARGS,"Invalid key pointer.");
335 return EINVAL;
339 /* TODO: make list of destructors for thread-local storage */
340 *key=TlsAlloc();
342 if ( *key == TLS_OUT_OF_INDEXES )
344 tMPI_Fatal_error(TMPI_FARGS,
345 "Failed to create thread key, error code=%d.",
346 GetLastError());
347 return -1;
350 return 0;
354 int tMPI_Thread_key_delete(tMPI_Thread_key_t key)
356 TlsFree(key);
358 return 0;
363 void * tMPI_Thread_getspecific(tMPI_Thread_key_t key)
365 void *p = NULL;
367 p=TlsGetValue(key);
369 return p;
373 int tMPI_Thread_setspecific(tMPI_Thread_key_t key, void *value)
375 BOOL ret;
377 ret = TlsSetValue(key, value);
379 return ret==0;
383 static BOOL CALLBACK InitHandleWrapperFunction(PINIT_ONCE InitOnce,
384 PVOID Parameter,
385 PVOID *lpContext)
387 void (*fn)(void)=(void (*)(void))Parameter;
389 fn();
391 return TRUE;
394 CRITICAL_SECTION tMPI_Once_cs;
395 tMPI_Spinlock_t tMPI_Once_cs_lock=TMPI_SPINLOCK_INITIALIZER;
396 volatile int tMPI_Once_init=0;
399 int tMPI_Thread_once(tMPI_Thread_once_t *once_control,
400 void (*init_routine)(void))
402 #if 0
403 BOOL bStatus;
404 bStatus = InitOnceExecuteOnce(once_control, InitHandleWrapperFunction,
405 init_routine, NULL);
407 if (!bStatus)
409 tMPI_Fatal_error(TMPI_FARGS,"Failed to run thread_once routine");
410 return -1;
412 #else
413 /* ugly hack to initialize the critical section */
414 if (!tMPI_Once_init)
416 tMPI_Spinlock_lock( &tMPI_Once_cs_lock);
417 InitializeCriticalSection(&(tMPI_Once_cs));
418 tMPI_Once_init=1;
419 tMPI_Spinlock_unlock( &tMPI_Once_cs_lock);
421 EnterCriticalSection(&tMPI_Once_cs);
422 if (*once_control == 0)
424 (*init_routine)(); /* call the function */
425 *once_control=1; /* flag that we're done */
427 LeaveCriticalSection(&tMPI_Once_cs);
428 #endif
429 return 0;
436 int tMPI_Thread_cond_init(tMPI_Thread_cond_t *cond)
438 if(cond==NULL)
440 return EINVAL;
443 #if 0
444 /* use this code once Vista is the minimum version required */
445 InitializeConditionVariable( &(cond->cv) );
446 #else
447 cond->Nwaiters=0;
448 InitializeCriticalSection(&(cond->wtr_lock));
449 cond->Nrelease=0;
450 cond->cycle=0;
451 /* a manual reset, unsignalled event */
452 cond->ev = CreateEvent(NULL, TRUE, FALSE, NULL);
453 #endif
455 cond->init_state=TMPI_THREAD_ONCE_STATUS_READY;
456 return 0;
460 int tMPI_Thread_cond_destroy(tMPI_Thread_cond_t *cond)
462 #if 0
463 /* use this code once Vista is the minimum version required */
464 /* windows doesnt have this function */
465 #else
466 DeleteCriticalSection(&(cond->wtr_lock));
467 #endif
468 return 0;
473 /*! \brief Static init routine for pthread barrier
475 * \internal
477 * This is only used as a wrapper to enable static initialization
478 * of posix thread types together with out abstraction layer for tMPI_Thread.h
480 * \param cond Condition variable, must be statically initialized
482 * \return status - 0 on success, or a standard error code.
484 static int tMPI_Thread_cond_init_once(tMPI_Thread_cond_t *cond)
486 int ret;
488 /* This is essentially a copy of the code from the one-time
489 * initialization, but with a call to the cond init routine instead.
490 * It might seem like overkill, but it will only be executed the first
491 * time you call a static condition variable, and it is important to get
492 * the memory barriers right. Trust me, you don't want a deadlock here...
494 /* Lock the common one-time init mutex so we can check carefully */
495 /*EnterCriticalSection( &tMPI_Thread_system_lock );*/
496 tMPI_Spinlock_lock( &tMPI_Thread_system_lock );
497 /* Do the actual (locked) check - system mutex is locked if we get here */
498 if (cond->init_state != TMPI_THREAD_ONCE_STATUS_READY)
500 ret=tMPI_Thread_cond_init(cond);
502 else
504 ret = 0;
506 /*LeaveCriticalSection( &tMPI_Thread_system_lock );*/
507 tMPI_Spinlock_unlock( &tMPI_Thread_system_lock );
509 return ret;
514 int tMPI_Thread_cond_wait(tMPI_Thread_cond_t *cond, tMPI_Thread_mutex_t *mtx)
516 BOOL wait_done=FALSE;
517 BOOL last_waiter=FALSE;
518 int my_cycle;
520 /* Ccheck whether this condition variable is initialized */
521 if(cond->init_state != TMPI_THREAD_ONCE_STATUS_READY)
523 tMPI_Thread_cond_init_once(cond);
525 if(mtx->init_state != TMPI_THREAD_ONCE_STATUS_READY)
527 tMPI_Thread_mutex_init_once(mtx);
529 #if 0
530 /* use this code once Vista is the minimum version required */
531 ret=SleepConditionVariableCS (&(cond->cv), &(mtx->cs), INFINITE);
533 if (!ret)
535 tMPI_Fatal_error(TMPI_FARGS,"Failed wait for condition, error code=%d",
536 GetLastError());
537 return -1;
539 #else
540 /* serially increase waiter count */
541 EnterCriticalSection(&(cond->wtr_lock));
542 cond->Nwaiters++;
543 my_cycle = cond->cycle;
544 LeaveCriticalSection(&(cond->wtr_lock));
546 /* now it's safe to release the mutex from the fn call */
547 LeaveCriticalSection(&(mtx->cs));
549 /* Loop a wait until we found out we've waited for the right event.
550 Note that this loop is potentially a busy-wait loop in bad
551 circumstances (higher priority threads, for example). */
554 /* do the actual waiting */
555 if (WaitForSingleObject( cond->ev, INFINITE )== WAIT_FAILED)
557 tMPI_Fatal_error(TMPI_FARGS,"Failed event reset, error code=%d",
558 GetLastError());
559 return -1;
562 /* serially check whether we got the right event. */
563 EnterCriticalSection(&(cond->wtr_lock));
564 wait_done = (cond->Nrelease > 0) && (cond->cycle!=my_cycle);
565 LeaveCriticalSection(&(cond->wtr_lock));
567 while(!wait_done);
569 /* We obtain the mutex from the function call */
570 EnterCriticalSection(&(mtx->cs));
572 /* we serially decrease the waiter count and release count */
573 EnterCriticalSection(&(cond->wtr_lock));
574 cond->Nwaiters--;
575 cond->Nrelease--;
576 last_waiter=(cond->Nrelease==0);
577 LeaveCriticalSection(&(cond->wtr_lock));
579 /* manually release the event if everybody's done with it */
580 if (last_waiter)
582 if (!ResetEvent( cond->ev ))
584 tMPI_Fatal_error(TMPI_FARGS,"Failed event reset, error code=%d",
585 GetLastError());
586 return -1;
589 #endif
591 return 0;
597 int tMPI_Thread_cond_signal(tMPI_Thread_cond_t *cond)
599 /* Ccheck whether this condition variable is initialized */
600 if(cond->init_state != TMPI_THREAD_ONCE_STATUS_READY)
602 tMPI_Thread_cond_init_once(cond);
604 /* The condition variable is now guaranteed to be valid. */
605 #if 0
606 /* use this code once Vista is the minimum version required */
607 WakeConditionVariable( &(cond->cv) );
608 #else
609 EnterCriticalSection(&(cond->wtr_lock));
610 /* check if we're not still busy with a release. If we are, do nothing. */
611 if (cond->Nwaiters > cond->Nrelease)
613 cond->Nrelease++;
614 cond->cycle++;
615 if (!SetEvent(cond->ev)) /* actually release the waiting threads */
617 tMPI_Fatal_error(TMPI_FARGS,"Failed SetEvent, error code=%d",
618 GetLastError());
619 return -1;
622 LeaveCriticalSection(&(cond->wtr_lock));
623 #endif
625 return 0;
630 int tMPI_Thread_cond_broadcast(tMPI_Thread_cond_t *cond)
632 /* Ccheck whether this condition variable is initialized */
633 if(cond->init_state != TMPI_THREAD_ONCE_STATUS_READY)
635 tMPI_Thread_cond_init_once(cond);
637 /* The condition variable is now guaranteed to be valid. */
638 #if 0
639 /* use this code once Vista is the minimum version required */
640 WakeAllConditionVariable( &(cond->cv) );
641 #else
642 EnterCriticalSection(&(cond->wtr_lock));
643 /* check whether there are any waiters */
644 if (cond->Nwaiters > 0)
646 cond->Nrelease=cond->Nwaiters;
647 cond->cycle++;
648 if (!SetEvent(cond->ev)) /* actually release the waiting threads */
650 tMPI_Fatal_error(TMPI_FARGS,"Failed SetEvent, error code=%d",
651 GetLastError());
652 return -1;
655 LeaveCriticalSection(&(cond->wtr_lock));
656 #endif
657 return 0;
660 #ifdef TMPI_RWLOCK
661 int tMPI_Thread_rwlock_init(tMPI_Thread_rwlock_t *rwlock)
663 InitializeSRWLock(rwlock);
664 return 0; /* no error value returned by the above function in Windows */
667 int tMPI_Thread_rwlock_destroy(tMPI_Thread_rwlock_t *rwlock)
669 /* Windows doesn't define a function for this. Presumably this means
670 that the lock's size is too small to bother with */
671 return 0;
674 int tMPI_Thread_rwlock_rdlock(tMPI_Thread_rwlock_t *rwlock)
676 AcquireSRWLockShared(rwlock);
677 return 0; /* no error value returned by the above function in Windows */
680 int tMPI_Thread_rwlock_tryrdlock(tMPI_Thread_rwlock_t *rwlock)
682 /*TryAcquireSRWLockShared(rwlock); */
683 return 0; /* no error value returned by the above function in Windows */
686 int tMPI_Thread_rwlock_wrlock(tMPI_Thread_rwlock_t *rwlock)
688 AcquireSRWLockExclusive(rwlock);
689 return 0; /* no error value returned by the above function in Windows */
692 int tMPI_Thread_rwlock_trywrlock(tMPI_Thread_rwlock_t *rwlock)
694 /*TryAcquireSRWLockExclusive(rwlock); */
695 return 0; /* no error value returned by the above function in Windows */
698 int tMPI_Thread_rwlock_rdunlock(tMPI_Thread_rwlock_t *rwlock)
700 ReleaseSRWLockShared(rwlock);
701 return 0; /* no error value returned by the above function in Windows */
704 int tMPI_Thread_rwlock_wrunlock(tMPI_Thread_rwlock_t *rwlock)
706 ReleaseSRWLockExclusive(rwlock);
707 return 0; /* no error value returned by the above function in Windows */
709 #endif
715 int tMPI_Thread_barrier_init(tMPI_Thread_barrier_t *barrier, int n)
717 if(barrier==NULL)
719 return EINVAL;
723 #if 0
724 /* use this once Vista is the oldest supported windows version: */
725 InitializeCriticalSection(&(barrier->cs));
726 InitializeConditionVariable(&(barrier->cv));
727 #else
728 tMPI_Thread_mutex_init(&(barrier->cs));
729 tMPI_Thread_cond_init(&(barrier->cv));
730 #endif
732 barrier->threshold = n;
733 barrier->count = n;
734 barrier->cycle = 0;
736 barrier->init_state = TMPI_THREAD_ONCE_STATUS_READY;
738 return 0;
743 int tMPI_Thread_barrier_destroy(tMPI_Thread_barrier_t *barrier)
745 if(barrier==NULL)
747 return EINVAL;
750 #if 0
751 DeleteCriticalSection(&(barrier->cs));
752 #else
753 tMPI_Thread_mutex_destroy(&(barrier->cs));
754 #endif
756 tMPI_Thread_cond_destroy(&(barrier->cv));
758 return 0;
763 /*! \brief Static init routine for pthread barrier
765 * \internal
767 * This is only used as a wrapper to enable static initialization
768 * of posix thread types together with out abstraction layer for tMPI_Thread.h
770 * \param barrier Statically initialized barrier type
771 * \param n Number of members in barrier
773 * \return status - 0 on success, or a standard error code.
775 static int tMPI_Thread_barrier_init_once(tMPI_Thread_barrier_t *barrier, int n)
777 int ret;
779 /* This is essentially a copy of the code from the one-time
780 * initialization, but with a call to the cond init routine instead.
781 * It might seem like overkill, but it will only be executed the first
782 * time you call a static condition variable, and it is important to get
783 * the memory barriers right. Trust me, you don't want a deadlock here...
785 /* Lock the common one-time init mutex so we can check carefully */
786 /*EnterCriticalSection( &tMPI_Thread_system_lock );*/
787 tMPI_Spinlock_lock( &tMPI_Thread_system_lock );
788 /* Do the actual (locked) check - system mutex is locked if we get here */
789 if (barrier->init_state != TMPI_THREAD_ONCE_STATUS_READY)
791 ret=tMPI_Thread_barrier_init(barrier, n);
793 else
795 ret = 0;
797 /*LeaveCriticalSection( &tMPI_Thread_system_lock );*/
798 tMPI_Spinlock_lock( &tMPI_Thread_system_lock );
800 return ret;
805 int tMPI_Thread_barrier_wait(tMPI_Thread_barrier_t *barrier)
807 int cycle;
808 BOOL rc=FALSE;
809 int ret=0;
810 /*tMPI_Thread_pthread_barrier_t *p;*/
812 if(barrier->init_state != TMPI_THREAD_ONCE_STATUS_READY)
814 tMPI_Thread_barrier_init_once(barrier,barrier->threshold);
817 /*p = (tMPI_Thread_pthread_barrier_t*)barrier->actual_barrier;*/
818 #if 0
819 EnterCriticalSection( &(barrier->cs) );
820 #else
821 tMPI_Thread_mutex_lock( &(barrier->cs) );
822 #endif
826 cycle = barrier->cycle;
828 /* Decrement the count atomically and check if it is zero.
829 * This will only be true for the last thread calling us.
831 if( --(barrier->count) <= 0 )
833 barrier->cycle = !barrier->cycle;
834 barrier->count = barrier->threshold;
835 #if 0
836 WakeAllConditionVariable( &(barrier->cv) );
837 #else
838 tMPI_Thread_cond_broadcast( &(barrier->cv) );
839 #endif
841 else
843 while(cycle == barrier->cycle)
845 #if 0
846 rc=SleepConditionVariableCS (&(barrier->cv), &(barrier->cs),
847 INFINITE);
848 if(!rc)
850 ret=-1;
851 break;
853 #else
854 rc = tMPI_Thread_cond_wait(&barrier->cv,&barrier->cs);
855 if(rc != 0) break;
856 #endif
859 #if 0
860 LeaveCriticalSection( &(barrier->cs) );
861 #else
862 tMPI_Thread_mutex_unlock( &(barrier->cs) );
863 #endif
864 return ret;
869 void tMPI_lockfile(FILE *stream)
871 /* flockfile(stream);*/
872 /* TODO: implement this */
876 void tMPI_unlockfile(FILE *stream)
878 /* funlockfile(stream);*/
879 /* TODO: implement this */
882 #endif /* THREAD_WINDOWS */