2 /* This code implemented by Dag.Gruneau@elsa.preseco.comm.se */
3 /* Fast NonRecursiveMutex support by Yakov Markovitch, markovitch@iso.ru */
9 typedef struct NRMUTEX
{
13 } NRMUTEX
, *PNRMUTEX
;
16 typedef PVOID WINAPI
interlocked_cmp_xchg_t(PVOID
*dest
, PVOID exc
, PVOID comperand
) ;
18 /* Sorry mate, but we haven't got InterlockedCompareExchange in Win95! */
19 static PVOID WINAPI
interlocked_cmp_xchg(PVOID
*dest
, PVOID exc
, PVOID comperand
)
21 static LONG spinlock
= 0 ;
25 /* Acqire spinlock (yielding control to other threads if cant aquire for the moment) */
26 while(InterlockedExchange(&spinlock
, 1))
28 // Using Sleep(0) can cause a priority inversion.
29 // Sleep(0) only yields the processor if there's
30 // another thread of the same priority that's
31 // ready to run. If a high-priority thread is
32 // trying to acquire the lock, which is held by
33 // a low-priority thread, then the low-priority
34 // thread may never get scheduled and hence never
35 // free the lock. NT attempts to avoid priority
36 // inversions by temporarily boosting the priority
37 // of low-priority runnable threads, but the problem
38 // can still occur if there's a medium-priority
39 // thread that's always runnable. If Sleep(1) is used,
40 // then the thread unconditionally yields the CPU. We
41 // only do this for the second and subsequent even
42 // iterations, since a millisecond is a long time to wait
43 // if the thread can be scheduled in again sooner
44 // (~100,000 instructions).
45 // Avoid priority inversion: 0, 1, 0, 1,...
50 if (result
== comperand
)
52 /* Release spinlock */
57 static interlocked_cmp_xchg_t
*ixchg
;
58 BOOL
InitializeNonRecursiveMutex(PNRMUTEX mutex
)
62 /* Sorely, Win95 has no InterlockedCompareExchange API (Win98 has), so we have to use emulation */
63 HANDLE kernel
= GetModuleHandle("kernel32.dll") ;
64 if (!kernel
|| (ixchg
= (interlocked_cmp_xchg_t
*)GetProcAddress(kernel
, "InterlockedCompareExchange")) == NULL
)
65 ixchg
= interlocked_cmp_xchg
;
68 mutex
->owned
= -1 ; /* No threads have entered NonRecursiveMutex */
69 mutex
->thread_id
= 0 ;
70 mutex
->hevent
= CreateEvent(NULL
, FALSE
, FALSE
, NULL
) ;
71 return mutex
->hevent
!= NULL
; /* TRUE if the mutex is created */
74 #ifdef InterlockedCompareExchange
75 #undef InterlockedCompareExchange
77 #define InterlockedCompareExchange(dest,exchange,comperand) (ixchg((dest), (exchange), (comperand)))
79 VOID
DeleteNonRecursiveMutex(PNRMUTEX mutex
)
82 CloseHandle(mutex
->hevent
) ;
83 mutex
->hevent
= NULL
; /* Just in case */
86 DWORD
EnterNonRecursiveMutex(PNRMUTEX mutex
, BOOL wait
)
88 /* Assume that the thread waits successfully */
91 /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */
94 if (InterlockedCompareExchange((PVOID
*)&mutex
->owned
, (PVOID
)0, (PVOID
)-1) != (PVOID
)-1)
99 ret
= InterlockedIncrement(&mutex
->owned
) ?
100 /* Some thread owns the mutex, let's wait... */
101 WaitForSingleObject(mutex
->hevent
, INFINITE
) : WAIT_OBJECT_0
;
103 mutex
->thread_id
= GetCurrentThreadId() ; /* We own it */
107 BOOL
LeaveNonRecursiveMutex(PNRMUTEX mutex
)
109 /* We don't own the mutex */
110 mutex
->thread_id
= 0 ;
112 InterlockedDecrement(&mutex
->owned
) < 0 ||
113 SetEvent(mutex
->hevent
) ; /* Other threads are waiting, wake one on them up */
116 PNRMUTEX
AllocNonRecursiveMutex(void)
118 PNRMUTEX mutex
= (PNRMUTEX
)malloc(sizeof(NRMUTEX
)) ;
119 if (mutex
&& !InitializeNonRecursiveMutex(mutex
))
127 void FreeNonRecursiveMutex(PNRMUTEX mutex
)
131 DeleteNonRecursiveMutex(mutex
) ;
136 long PyThread_get_thread_ident(void);
139 * Change all headers to pure ANSI as no one will use K&R style on an
144 * Initialization of the C package, should not be needed.
146 static void PyThread__init_thread(void)
153 int PyThread_start_new_thread(void (*func
)(void *), void *arg
)
158 dprintf(("%ld: PyThread_start_new_thread called\n", PyThread_get_thread_ident()));
160 PyThread_init_thread();
162 rv
= _beginthread(func
, 0, arg
); /* use default stack size */
166 dprintf(("%ld: PyThread_start_new_thread succeeded: %p\n", PyThread_get_thread_ident(), rv
));
173 * Return the thread Id instead of an handle. The Id is said to uniquely identify the
174 * thread in the system
176 long PyThread_get_thread_ident(void)
179 PyThread_init_thread();
181 return GetCurrentThreadId();
184 static void do_PyThread_exit_thread(int no_cleanup
)
186 dprintf(("%ld: PyThread_exit_thread called\n", PyThread_get_thread_ident()));
195 void PyThread_exit_thread(void)
197 do_PyThread_exit_thread(0);
200 void PyThread__exit_thread(void)
202 do_PyThread_exit_thread(1);
206 static void do_PyThread_exit_prog(int status
, int no_cleanup
)
208 dprintf(("PyThread_exit_prog(%d) called\n", status
));
216 void PyThread_exit_prog(int status
)
218 do_PyThread_exit_prog(status
, 0);
221 void PyThread__exit_prog(int status
)
223 do_PyThread_exit_prog(status
, 1);
225 #endif /* NO_EXIT_PROG */
228 * Lock support. It has too be implemented as semaphores.
229 * I [Dag] tried to implement it with mutex but I could find a way to
230 * tell whether a thread already own the lock or not.
232 PyThread_type_lock
PyThread_allocate_lock(void)
236 dprintf(("PyThread_allocate_lock called\n"));
238 PyThread_init_thread();
240 aLock
= AllocNonRecursiveMutex() ;
242 dprintf(("%ld: PyThread_allocate_lock() -> %p\n", PyThread_get_thread_ident(), aLock
));
244 return (PyThread_type_lock
) aLock
;
247 void PyThread_free_lock(PyThread_type_lock aLock
)
249 dprintf(("%ld: PyThread_free_lock(%p) called\n", PyThread_get_thread_ident(),aLock
));
251 FreeNonRecursiveMutex(aLock
) ;
255 * Return 1 on success if the lock was acquired
257 * and 0 if the lock was not acquired. This means a 0 is returned
258 * if the lock has already been acquired by this thread!
260 int PyThread_acquire_lock(PyThread_type_lock aLock
, int waitflag
)
264 dprintf(("%ld: PyThread_acquire_lock(%p, %d) called\n", PyThread_get_thread_ident(),aLock
, waitflag
));
266 success
= aLock
&& EnterNonRecursiveMutex((PNRMUTEX
) aLock
, (waitflag
== 1 ? INFINITE
: 0)) == WAIT_OBJECT_0
;
268 dprintf(("%ld: PyThread_acquire_lock(%p, %d) -> %d\n", PyThread_get_thread_ident(),aLock
, waitflag
, success
));
273 void PyThread_release_lock(PyThread_type_lock aLock
)
275 dprintf(("%ld: PyThread_release_lock(%p) called\n", PyThread_get_thread_ident(),aLock
));
277 if (!(aLock
&& LeaveNonRecursiveMutex((PNRMUTEX
) aLock
)))
278 dprintf(("%ld: Could not PyThread_release_lock(%p) error: %l\n", PyThread_get_thread_ident(), aLock
, GetLastError()));
284 PyThread_type_sema
PyThread_allocate_sema(int value
)
288 dprintf(("%ld: PyThread_allocate_sema called\n", PyThread_get_thread_ident()));
290 PyThread_init_thread();
292 aSemaphore
= CreateSemaphore( NULL
, /* Security attributes */
293 value
, /* Initial value */
294 INT_MAX
, /* Maximum value */
295 NULL
); /* Name of semaphore */
297 dprintf(("%ld: PyThread_allocate_sema() -> %p\n", PyThread_get_thread_ident(), aSemaphore
));
299 return (PyThread_type_sema
) aSemaphore
;
302 void PyThread_free_sema(PyThread_type_sema aSemaphore
)
304 dprintf(("%ld: PyThread_free_sema(%p) called\n", PyThread_get_thread_ident(), aSemaphore
));
306 CloseHandle((HANDLE
) aSemaphore
);
310 XXX must do something about waitflag
312 int PyThread_down_sema(PyThread_type_sema aSemaphore
, int waitflag
)
316 dprintf(("%ld: PyThread_down_sema(%p) called\n", PyThread_get_thread_ident(), aSemaphore
));
318 waitResult
= WaitForSingleObject( (HANDLE
) aSemaphore
, INFINITE
);
320 dprintf(("%ld: PyThread_down_sema(%p) return: %l\n", PyThread_get_thread_ident(), aSemaphore
, waitResult
));
324 void PyThread_up_sema(PyThread_type_sema aSemaphore
)
327 (HANDLE
) aSemaphore
, /* Handle of semaphore */
328 1, /* increment count by one */
329 NULL
); /* not interested in previous count */
331 dprintf(("%ld: PyThread_up_sema(%p)\n", PyThread_get_thread_ident(), aSemaphore
));