2 Copyright (C) 2014 Szilard Biro
4 This software is provided 'as-is', without any express or implied
5 warranty. In no event will the authors be held liable for any damages
6 arising from the use of this software.
8 Permission is granted to anyone to use this software for any purpose,
9 including commercial applications, and to alter it and redistribute it
10 freely, subject to the following restrictions:
12 1. The origin of this software must not be misrepresented; you must not
13 claim that you wrote the original software. If you use this software
14 in a product, an acknowledgment in the product documentation would be
15 appreciated but is not required.
16 2. Altered source versions must be plainly marked as such, and must not be
17 misrepresented as being the original software.
18 3. This notice may not be removed or altered from any source distribution.
24 #include <dos/dostags.h>
25 #include <proto/exec.h>
26 #include <proto/dos.h>
27 #include <proto/timer.h>
34 #include "pthread_intern.h"
38 //#define USE_ASYNC_CANCEL
40 ThreadInfo threads
[PTHREAD_THREADS_MAX
];
41 struct SignalSemaphore thread_sem
;
42 TLSKey tlskeys
[PTHREAD_KEYS_MAX
];
43 struct SignalSemaphore tls_sem
;
49 int SemaphoreIsInvalid(struct SignalSemaphore
*sem
)
51 DB2(bug("%s(%p)\n", __FUNCTION__
, sem
));
53 return (!sem
|| sem
->ss_Link
.ln_Type
!= NT_SIGNALSEM
|| sem
->ss_WaitQueue
.mlh_Tail
!= NULL
);
56 int SemaphoreIsMine(struct SignalSemaphore
*sem
)
60 DB2(bug("%s(%p)\n", __FUNCTION__
, sem
));
64 return (sem
&& sem
->ss_NestCount
> 0 && sem
->ss_Owner
== me
);
67 ThreadInfo
*GetThreadInfo(pthread_t thread
)
69 ThreadInfo
*inf
= NULL
;
71 DB2(bug("%s(%u)\n", __FUNCTION__
, thread
));
73 // TODO: more robust error handling?
74 if (thread
< PTHREAD_THREADS_MAX
)
75 inf
= &threads
[thread
];
80 pthread_t
GetThreadId(struct Task
*task
)
84 DB2(bug("%s(%p)\n", __FUNCTION__
, task
));
86 ObtainSemaphoreShared(&thread_sem
);
88 // First thread id will be 1 so that it is different than default value of pthread_t
89 for (i
= PTHREAD_FIRST_THREAD_ID
; i
< PTHREAD_THREADS_MAX
; i
++)
91 if (threads
[i
].task
== task
)
95 ReleaseSemaphore(&thread_sem
);
100 #if defined __mc68000__
101 /* No CAS instruction on m68k */
102 static int __m68k_sync_val_compare_and_swap(int *v
, int o
, int n
)
114 #undef __sync_val_compare_and_swap
115 #define __sync_val_compare_and_swap(v, o, n) __m68k_sync_val_compare_and_swap(v, o, n)
117 static int __m68k_sync_lock_test_and_set(int *v
, int n
)
125 #undef __sync_lock_test_and_set
126 #define __sync_lock_test_and_set(v, n) __m68k_sync_lock_test_and_set(v, n)
127 #undef __sync_lock_release
128 #define __sync_lock_release(v) __m68k_sync_lock_test_and_set(v, 0)
130 static inline int __m68k_sync_add_and_fetch(int *v
, int n
)
141 #undef __sync_add_and_fetch
142 #define __sync_add_and_fetch(v, n) __m68k_sync_add_and_fetch(v, n)
143 #undef __sync_sub_and_fetch
144 #define __sync_sub_and_fetch(v, n) __m68k_sync_add_and_fetch(v, -(n))
148 // Thread specific data functions
151 int pthread_key_delete(pthread_key_t key
)
155 D(bug("%s(%u)\n", __FUNCTION__
, key
));
157 if (key
>= PTHREAD_KEYS_MAX
)
162 ObtainSemaphore(&tls_sem
);
164 if (tls
->used
== FALSE
)
166 ReleaseSemaphore(&tls_sem
);
171 tls
->destructor
= NULL
;
173 ReleaseSemaphore(&tls_sem
);
179 // Mutex attribute functions
182 int pthread_mutexattr_gettype(pthread_mutexattr_t
*attr
, int *kind
)
184 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, kind
));
199 int pthread_mutex_timedlock(pthread_mutex_t
*mutex
, const struct timespec
*abstime
)
201 struct timeval end
, now
;
204 D(bug("%s(%p, %p)\n", __FUNCTION__
, mutex
, abstime
));
210 return pthread_mutex_lock(mutex
);
211 /*else if (abstime.tv_nsec < 0 || abstime.tv_nsec >= 1000000000)
214 TIMESPEC_TO_TIMEVAL(&end
, abstime
);
216 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
217 while ((result
= pthread_mutex_trylock(mutex
)) == EBUSY
)
220 gettimeofday(&now
, NULL
);
221 if (timercmp(&end
, &now
, <))
229 // Condition variable attribute functions
232 int pthread_condattr_init(pthread_condattr_t
*attr
)
234 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
239 memset(attr
, 0, sizeof(pthread_condattr_t
));
244 int pthread_condattr_destroy(pthread_condattr_t
*attr
)
246 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
251 memset(attr
, 0, sizeof(pthread_condattr_t
));
257 // Condition variable functions
260 int pthread_cond_timedwait_relative_np(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*reltime
)
262 D(bug("%s(%p, %p, %p)\n", __FUNCTION__
, cond
, mutex
, reltime
));
264 return _pthread_cond_timedwait(cond
, mutex
, reltime
, TRUE
);
272 int pthread_barrier_init(pthread_barrier_t
*barrier
, const pthread_barrierattr_t
*attr
, unsigned int count
)
274 D(bug("%s(%p, %p, %u)\n", __FUNCTION__
, barrier
, attr
, count
));
276 if (barrier
== NULL
|| count
== 0)
279 barrier
->curr_height
= count
;
280 barrier
->total_height
= PTHREAD_BARRIER_FLAG
;
281 pthread_cond_init(&barrier
->breeched
, NULL
);
282 pthread_mutex_init(&barrier
->lock
, NULL
);
287 int pthread_barrier_destroy(pthread_barrier_t
*barrier
)
289 D(bug("%s(%p)\n", __FUNCTION__
, barrier
));
294 if (pthread_mutex_trylock(&barrier
->lock
) != 0)
297 if (barrier
->total_height
> PTHREAD_BARRIER_FLAG
)
299 pthread_mutex_unlock(&barrier
->lock
);
303 pthread_mutex_unlock(&barrier
->lock
);
305 if (pthread_cond_destroy(&barrier
->breeched
) != 0)
308 pthread_mutex_destroy(&barrier
->lock
);
309 barrier
->curr_height
= barrier
->total_height
= 0;
314 int pthread_barrier_wait(pthread_barrier_t
*barrier
)
316 D(bug("%s(%p)\n", __FUNCTION__
, barrier
));
321 pthread_mutex_lock(&barrier
->lock
);
323 // wait until everyone exits the barrier
324 while (barrier
->total_height
> PTHREAD_BARRIER_FLAG
)
325 pthread_cond_wait(&barrier
->breeched
, &barrier
->lock
);
327 // are we the first to enter?
328 if (barrier
->total_height
== PTHREAD_BARRIER_FLAG
) barrier
->total_height
= 0;
330 barrier
->total_height
++;
332 if (barrier
->total_height
== barrier
->curr_height
)
334 barrier
->total_height
+= PTHREAD_BARRIER_FLAG
- 1;
335 pthread_cond_broadcast(&barrier
->breeched
);
337 pthread_mutex_unlock(&barrier
->lock
);
339 return PTHREAD_BARRIER_SERIAL_THREAD
;
343 // wait until enough threads enter the barrier
344 while (barrier
->total_height
< PTHREAD_BARRIER_FLAG
)
345 pthread_cond_wait(&barrier
->breeched
, &barrier
->lock
);
347 barrier
->total_height
--;
349 // get entering threads to wake up
350 if (barrier
->total_height
== PTHREAD_BARRIER_FLAG
)
351 pthread_cond_broadcast(&barrier
->breeched
);
353 pthread_mutex_unlock(&barrier
->lock
);
360 // Read-write lock attribute functions
363 int pthread_rwlockattr_init(pthread_rwlockattr_t
*attr
)
365 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
370 memset(attr
, 0, sizeof(pthread_rwlockattr_t
));
375 int pthread_rwlockattr_destroy(pthread_rwlockattr_t
*attr
)
377 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
382 memset(attr
, 0, sizeof(pthread_rwlockattr_t
));
388 // Read-write lock functions
391 int pthread_rwlock_init(pthread_rwlock_t
*lock
, const pthread_rwlockattr_t
*attr
)
393 D(bug("%s(%p, %p)\n", __FUNCTION__
, lock
, attr
));
398 InitSemaphore(&lock
->semaphore
);
403 int pthread_rwlock_destroy(pthread_rwlock_t
*lock
)
405 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
410 // probably a statically allocated rwlock
411 if (SemaphoreIsInvalid(&lock
->semaphore
))
414 if (AttemptSemaphore(&lock
->semaphore
) == FALSE
)
417 ReleaseSemaphore(&lock
->semaphore
);
418 memset(lock
, 0, sizeof(pthread_rwlock_t
));
423 int pthread_rwlock_tryrdlock(pthread_rwlock_t
*lock
)
427 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
432 // initialize static rwlocks
433 if (SemaphoreIsInvalid(&lock
->semaphore
))
434 pthread_rwlock_init(lock
, NULL
);
436 ret
= AttemptSemaphoreShared(&lock
->semaphore
);
438 return (ret
== TRUE
) ? 0 : EBUSY
;
441 int pthread_rwlock_trywrlock(pthread_rwlock_t
*lock
)
445 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
450 // initialize static rwlocks
451 if (SemaphoreIsInvalid(&lock
->semaphore
))
452 pthread_rwlock_init(lock
, NULL
);
454 ret
= AttemptSemaphore(&lock
->semaphore
);
456 return (ret
== TRUE
) ? 0 : EBUSY
;
459 int pthread_rwlock_rdlock(pthread_rwlock_t
*lock
)
461 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
466 pthread_testcancel();
468 // initialize static rwlocks
469 if (SemaphoreIsInvalid(&lock
->semaphore
))
470 pthread_rwlock_init(lock
, NULL
);
472 // we might already have a write lock
473 if (SemaphoreIsMine(&lock
->semaphore
))
476 ObtainSemaphoreShared(&lock
->semaphore
);
481 int pthread_rwlock_timedrdlock(pthread_rwlock_t
*lock
, const struct timespec
*abstime
)
483 struct timeval end
, now
;
486 D(bug("%s(%p, %p)\n", __FUNCTION__
, lock
, abstime
));
492 return pthread_rwlock_rdlock(lock
);
494 pthread_testcancel();
496 TIMESPEC_TO_TIMEVAL(&end
, abstime
);
498 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
499 while ((result
= pthread_rwlock_tryrdlock(lock
)) == EBUSY
)
502 gettimeofday(&now
, NULL
);
503 if (timercmp(&end
, &now
, <))
510 int pthread_rwlock_wrlock(pthread_rwlock_t
*lock
)
512 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
517 pthread_testcancel();
519 // initialize static rwlocks
520 if (SemaphoreIsInvalid(&lock
->semaphore
))
521 pthread_rwlock_init(lock
, NULL
);
523 if (SemaphoreIsMine(&lock
->semaphore
))
526 ObtainSemaphore(&lock
->semaphore
);
531 int pthread_rwlock_timedwrlock(pthread_rwlock_t
*lock
, const struct timespec
*abstime
)
533 struct timeval end
, now
;
536 D(bug("%s(%p, %p)\n", __FUNCTION__
, lock
, abstime
));
542 return pthread_rwlock_wrlock(lock
);
544 pthread_testcancel();
546 TIMESPEC_TO_TIMEVAL(&end
, abstime
);
548 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
549 while ((result
= pthread_rwlock_trywrlock(lock
)) == EBUSY
)
552 gettimeofday(&now
, NULL
);
553 if (timercmp(&end
, &now
, <))
560 int pthread_rwlock_unlock(pthread_rwlock_t
*lock
)
562 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
567 // initialize static rwlocks
568 if (SemaphoreIsInvalid(&lock
->semaphore
))
569 pthread_rwlock_init(lock
, NULL
);
571 //if (!SemaphoreIsMine(&lock->semaphore))
572 // if no one has obtained the semaphore don't unlock the rwlock
573 // this can be a leap of faith because we don't maintain a separate list of readers
574 if (lock
->semaphore
.ss_NestCount
< 1)
577 ReleaseSemaphore(&lock
->semaphore
);
583 // Spinlock functions
586 int pthread_spin_init(pthread_spinlock_t
*lock
, int pshared
)
588 D(bug("%s(%p, %d)\n", __FUNCTION__
, lock
, pshared
));
598 int pthread_spin_destroy(pthread_spinlock_t
*lock
)
600 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
605 int pthread_spin_lock(pthread_spinlock_t
*lock
)
607 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
612 while (__sync_lock_test_and_set((int *)lock
, 1))
613 sched_yield(); // TODO: don't yield the CPU every iteration
618 int pthread_spin_trylock(pthread_spinlock_t
*lock
)
620 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
625 if (__sync_lock_test_and_set((int *)lock
, 1))
631 int pthread_spin_unlock(pthread_spinlock_t
*lock
)
633 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
638 __sync_lock_release((int *)lock
);
644 // Thread attribute functions
647 int pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
649 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, detachstate
));
654 if (detachstate
!= NULL
)
655 *detachstate
= attr
->detachstate
;
660 int pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
662 D(bug("%s(%p, %d)\n", __FUNCTION__
, attr
, detachstate
));
664 if (attr
== NULL
|| detachstate
!= PTHREAD_CREATE_JOINABLE
)
667 attr
->detachstate
= detachstate
;
672 int pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t *stacksize
)
674 D(bug("%s(%p, %p, %p)\n", __FUNCTION__
, attr
, stackaddr
, stacksize
));
679 if (stackaddr
!= NULL
)
680 *stackaddr
= attr
->stackaddr
;
682 if (stacksize
!= NULL
)
683 *stacksize
= attr
->stacksize
;
688 int pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
690 D(bug("%s(%p, %p, %u)\n", __FUNCTION__
, attr
, stackaddr
, stacksize
));
692 if (attr
== NULL
|| (stackaddr
!= NULL
&& stacksize
== 0))
695 attr
->stackaddr
= stackaddr
;
696 attr
->stacksize
= stacksize
;
701 int pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
703 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, stacksize
));
705 return pthread_attr_getstack(attr
, NULL
, stacksize
);
708 int pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
710 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, param
));
716 *param
= attr
->param
;
721 int pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
723 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, param
));
725 if (attr
== NULL
|| param
== NULL
)
728 attr
->param
= *param
;
737 #ifdef USE_ASYNC_CANCEL
739 static ULONG
CancelHandlerFunc(void);
740 static struct EmulLibEntry CancelHandler
=
742 TRAP_LIB
, 0, (void (*)(void))CancelHandlerFunc
744 static ULONG
CancelHandlerFunc(void)
746 ULONG signals
= (ULONG
)REG_D0
;
747 APTR data
= (APTR
)REG_A1
;
748 struct ExecBase
*SysBase
= (struct ExecBase
*)REG_A6
;
750 AROS_UFH3S(ULONG
, CancelHandler
,
751 AROS_UFHA(ULONG
, signals
, D0
),
752 AROS_UFHA(APTR
, data
, A1
),
753 AROS_UFHA(struct ExecBase
*, SysBase
, A6
))
758 DB2(bug("%s(%u, %p, %p)\n", __FUNCTION__
, signals
, data
, SysBase
));
760 pthread_testcancel();
769 int pthread_detach(pthread_t thread
)
773 D(bug("%s(%u, %p)\n", __FUNCTION__
, thread
, value_ptr
));
775 inf
= GetThreadInfo(thread
);
780 inf
->detached
= TRUE
;
785 void pthread_testcancel(void)
790 D(bug("%s()\n", __FUNCTION__
));
792 thread
= pthread_self();
793 inf
= GetThreadInfo(thread
);
795 if (inf
->canceled
&& (inf
->cancelstate
== PTHREAD_CANCEL_ENABLE
))
796 pthread_exit(PTHREAD_CANCELED
);
799 static void OnceCleanup(void *arg
)
801 pthread_once_t
*once_control
;
803 DB2(bug("%s(%p)\n", __FUNCTION__
, arg
));
805 once_control
= (pthread_once_t
*)arg
;
806 pthread_spin_unlock(&once_control
->lock
);
809 int pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
811 D(bug("%s(%p, %p)\n", __FUNCTION__
, once_control
, init_routine
));
813 if (once_control
== NULL
|| init_routine
== NULL
)
816 if (__sync_val_compare_and_swap(&once_control
->started
, FALSE
, TRUE
))
818 pthread_spin_lock(&once_control
->lock
);
819 if (!once_control
->done
)
821 pthread_cleanup_push(OnceCleanup
, once_control
);
823 pthread_cleanup_pop(0);
824 once_control
->done
= TRUE
;
826 pthread_spin_unlock(&once_control
->lock
);
833 // Scheduling functions
836 int pthread_setschedparam(pthread_t thread
, int policy
, const struct sched_param
*param
)
840 D(bug("%s(%u, %d, %p)\n", __FUNCTION__
, thread
, policy
, param
));
845 inf
= GetThreadInfo(thread
);
850 SetTaskPri(inf
->task
, param
->sched_priority
);
855 int pthread_getschedparam(pthread_t thread
, int *policy
, struct sched_param
*param
)
859 D(bug("%s(%u, %d, %p)\n", __FUNCTION__
, thread
, policy
, param
));
861 if ((param
== NULL
) || (policy
== NULL
))
864 inf
= GetThreadInfo(thread
);
869 param
->sched_priority
= inf
->task
->tc_Node
.ln_Pri
;
876 // Non-portable functions
878 int pthread_setname_np(pthread_t thread
, const char *name
)
884 D(bug("%s(%u, %s)\n", __FUNCTION__
, thread
, name
));
889 inf
= GetThreadInfo(thread
);
894 currentname
= GetNodeName(inf
->task
);
896 if (inf
->parent
== NULL
)
897 namelen
= strlen(currentname
) + 1;
901 if (strlen(name
) + 1 > namelen
)
904 strncpy(currentname
, name
, namelen
);
909 int pthread_getname_np(pthread_t thread
, char *name
, size_t len
)
914 D(bug("%s(%u, %p, %u)\n", __FUNCTION__
, thread
, name
, len
));
916 if (name
== NULL
|| len
== 0)
919 inf
= GetThreadInfo(thread
);
924 currentname
= GetNodeName(inf
->task
);
926 if (strlen(currentname
) + 1 > len
)
929 // TODO: partially copy the name?
930 strncpy(name
, currentname
, len
);
936 // Cancellation cleanup
939 void pthread_cleanup_push(void (*routine
)(void *), void *arg
)
943 CleanupHandler
*handler
;
945 D(bug("%s(%p, %p)\n", __FUNCTION__
, routine
, arg
));
950 handler
= malloc(sizeof(CleanupHandler
));
955 thread
= pthread_self();
956 inf
= GetThreadInfo(thread
);
958 handler
->routine
= routine
;
960 AddTail((struct List
*)&inf
->cleanup
, (struct Node
*)handler
);
963 void pthread_cleanup_pop(int execute
)
967 CleanupHandler
*handler
;
969 D(bug("%s(%d)\n", __FUNCTION__
, execute
));
971 thread
= pthread_self();
972 inf
= GetThreadInfo(thread
);
973 handler
= (CleanupHandler
*)RemTail((struct List
*)&inf
->cleanup
);
975 if (handler
&& handler
->routine
&& execute
)
976 handler
->routine(handler
->arg
);
985 int pthread_kill(pthread_t thread
, int sig
)
987 D(bug("%s(%u, %d) not implemented\n", __FUNCTION__
, thread
, sig
));
993 // Constructors, destructors
996 static int _Init_Func(void)
998 DB2(bug("%s()\n", __FUNCTION__
));
1000 //memset(&threads, 0, sizeof(threads));
1001 InitSemaphore(&thread_sem
);
1002 InitSemaphore(&tls_sem
);
1003 // reserve ID 0 for the main thread
1009 static void _Exit_Func(void)
1015 DB2(bug("%s()\n", __FUNCTION__
));
1017 // wait for the threads?
1019 for (i
= 0; i
< PTHREAD_THREADS_MAX
; i
++)
1020 pthread_join(i
, NULL
);
1025 ADD2INIT(_Init_Func
, 0);
1026 ADD2EXIT(_Exit_Func
, 0);
1028 static CONSTRUCTOR_P(_Init_Func
, 100)
1030 return !_Init_Func();
1033 static DESTRUCTOR_P(_Exit_Func
, 100)