2 /* This code implemented by Dag.Gruneau@elsa.preseco.comm.se */
3 /* Fast NonRecursiveMutex support by Yakov Markovitch, markovitch@iso.ru */
4 /* Eliminated some memory leaks, gsw@agere.com */
10 typedef struct NRMUTEX
{
14 } NRMUTEX
, *PNRMUTEX
;
16 typedef PVOID WINAPI
interlocked_cmp_xchg_t(PVOID
*dest
, PVOID exc
, PVOID comperand
) ;
18 /* Sorry mate, but we haven't got InterlockedCompareExchange in Win95! */
19 static PVOID WINAPI
interlocked_cmp_xchg(PVOID
*dest
, PVOID exc
, PVOID comperand
)
21 static LONG spinlock
= 0 ;
25 /* Acqire spinlock (yielding control to other threads if cant aquire for the moment) */
26 while(InterlockedExchange(&spinlock
, 1))
28 // Using Sleep(0) can cause a priority inversion.
29 // Sleep(0) only yields the processor if there's
30 // another thread of the same priority that's
31 // ready to run. If a high-priority thread is
32 // trying to acquire the lock, which is held by
33 // a low-priority thread, then the low-priority
34 // thread may never get scheduled and hence never
35 // free the lock. NT attempts to avoid priority
36 // inversions by temporarily boosting the priority
37 // of low-priority runnable threads, but the problem
38 // can still occur if there's a medium-priority
39 // thread that's always runnable. If Sleep(1) is used,
40 // then the thread unconditionally yields the CPU. We
41 // only do this for the second and subsequent even
42 // iterations, since a millisecond is a long time to wait
43 // if the thread can be scheduled in again sooner
44 // (~100,000 instructions).
45 // Avoid priority inversion: 0, 1, 0, 1,...
50 if (result
== comperand
)
52 /* Release spinlock */
57 static interlocked_cmp_xchg_t
*ixchg
;
58 BOOL
InitializeNonRecursiveMutex(PNRMUTEX mutex
)
62 /* Sorely, Win95 has no InterlockedCompareExchange API (Win98 has), so we have to use emulation */
63 HANDLE kernel
= GetModuleHandle("kernel32.dll") ;
64 if (!kernel
|| (ixchg
= (interlocked_cmp_xchg_t
*)GetProcAddress(kernel
, "InterlockedCompareExchange")) == NULL
)
65 ixchg
= interlocked_cmp_xchg
;
68 mutex
->owned
= -1 ; /* No threads have entered NonRecursiveMutex */
69 mutex
->thread_id
= 0 ;
70 mutex
->hevent
= CreateEvent(NULL
, FALSE
, FALSE
, NULL
) ;
71 return mutex
->hevent
!= NULL
; /* TRUE if the mutex is created */
74 #ifdef InterlockedCompareExchange
75 #undef InterlockedCompareExchange
77 #define InterlockedCompareExchange(dest,exchange,comperand) (ixchg((dest), (exchange), (comperand)))
79 VOID
DeleteNonRecursiveMutex(PNRMUTEX mutex
)
82 CloseHandle(mutex
->hevent
) ;
83 mutex
->hevent
= NULL
; /* Just in case */
86 DWORD
EnterNonRecursiveMutex(PNRMUTEX mutex
, BOOL wait
)
88 /* Assume that the thread waits successfully */
91 /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */
94 if (InterlockedCompareExchange((PVOID
*)&mutex
->owned
, (PVOID
)0, (PVOID
)-1) != (PVOID
)-1)
99 ret
= InterlockedIncrement(&mutex
->owned
) ?
100 /* Some thread owns the mutex, let's wait... */
101 WaitForSingleObject(mutex
->hevent
, INFINITE
) : WAIT_OBJECT_0
;
103 mutex
->thread_id
= GetCurrentThreadId() ; /* We own it */
107 BOOL
LeaveNonRecursiveMutex(PNRMUTEX mutex
)
109 /* We don't own the mutex */
110 mutex
->thread_id
= 0 ;
112 InterlockedDecrement(&mutex
->owned
) < 0 ||
113 SetEvent(mutex
->hevent
) ; /* Other threads are waiting, wake one on them up */
116 PNRMUTEX
AllocNonRecursiveMutex(void)
118 PNRMUTEX mutex
= (PNRMUTEX
)malloc(sizeof(NRMUTEX
)) ;
119 if (mutex
&& !InitializeNonRecursiveMutex(mutex
))
127 void FreeNonRecursiveMutex(PNRMUTEX mutex
)
131 DeleteNonRecursiveMutex(mutex
) ;
136 long PyThread_get_thread_ident(void);
139 * Initialization of the C package, should not be needed.
141 static void PyThread__init_thread(void)
157 bootstrap(void *call
)
159 callobj
*obj
= (callobj
*)call
;
160 /* copy callobj since other thread might free it before we're done */
161 void (*func
)(void*) = obj
->func
;
162 void *arg
= obj
->arg
;
164 obj
->id
= PyThread_get_thread_ident();
165 ReleaseSemaphore(obj
->done
, 1, NULL
);
170 long PyThread_start_new_thread(void (*func
)(void *), void *arg
)
177 dprintf(("%ld: PyThread_start_new_thread called\n", PyThread_get_thread_ident()));
179 PyThread_init_thread();
181 obj
= malloc(sizeof(callobj
));
184 obj
->done
= CreateSemaphore(NULL
, 0, 1, NULL
);
186 rv
= _beginthread(bootstrap
, 0, obj
); /* use default stack size */
188 if (rv
!= (unsigned long)-1) {
190 dprintf(("%ld: PyThread_start_new_thread succeeded: %p\n", PyThread_get_thread_ident(), rv
));
193 /* wait for thread to initialize and retrieve id */
194 WaitForSingleObject(obj
->done
, 5000); /* maybe INFINITE instead of 5000? */
195 CloseHandle((HANDLE
)obj
->done
);
202 * Return the thread Id instead of an handle. The Id is said to uniquely identify the
203 * thread in the system
205 long PyThread_get_thread_ident(void)
208 PyThread_init_thread();
210 return GetCurrentThreadId();
213 static void do_PyThread_exit_thread(int no_cleanup
)
215 dprintf(("%ld: PyThread_exit_thread called\n", PyThread_get_thread_ident()));
224 void PyThread_exit_thread(void)
226 do_PyThread_exit_thread(0);
229 void PyThread__exit_thread(void)
231 do_PyThread_exit_thread(1);
235 static void do_PyThread_exit_prog(int status
, int no_cleanup
)
237 dprintf(("PyThread_exit_prog(%d) called\n", status
));
245 void PyThread_exit_prog(int status
)
247 do_PyThread_exit_prog(status
, 0);
250 void PyThread__exit_prog(int status
)
252 do_PyThread_exit_prog(status
, 1);
254 #endif /* NO_EXIT_PROG */
257 * Lock support. It has too be implemented as semaphores.
258 * I [Dag] tried to implement it with mutex but I could find a way to
259 * tell whether a thread already own the lock or not.
261 PyThread_type_lock
PyThread_allocate_lock(void)
265 dprintf(("PyThread_allocate_lock called\n"));
267 PyThread_init_thread();
269 aLock
= AllocNonRecursiveMutex() ;
271 dprintf(("%ld: PyThread_allocate_lock() -> %p\n", PyThread_get_thread_ident(), aLock
));
273 return (PyThread_type_lock
) aLock
;
276 void PyThread_free_lock(PyThread_type_lock aLock
)
278 dprintf(("%ld: PyThread_free_lock(%p) called\n", PyThread_get_thread_ident(),aLock
));
280 FreeNonRecursiveMutex(aLock
) ;
284 * Return 1 on success if the lock was acquired
286 * and 0 if the lock was not acquired. This means a 0 is returned
287 * if the lock has already been acquired by this thread!
289 int PyThread_acquire_lock(PyThread_type_lock aLock
, int waitflag
)
293 dprintf(("%ld: PyThread_acquire_lock(%p, %d) called\n", PyThread_get_thread_ident(),aLock
, waitflag
));
295 success
= aLock
&& EnterNonRecursiveMutex((PNRMUTEX
) aLock
, (waitflag
== 1 ? INFINITE
: 0)) == WAIT_OBJECT_0
;
297 dprintf(("%ld: PyThread_acquire_lock(%p, %d) -> %d\n", PyThread_get_thread_ident(),aLock
, waitflag
, success
));
302 void PyThread_release_lock(PyThread_type_lock aLock
)
304 dprintf(("%ld: PyThread_release_lock(%p) called\n", PyThread_get_thread_ident(),aLock
));
306 if (!(aLock
&& LeaveNonRecursiveMutex((PNRMUTEX
) aLock
)))
307 dprintf(("%ld: Could not PyThread_release_lock(%p) error: %l\n", PyThread_get_thread_ident(), aLock
, GetLastError()));