2 Copyright © 1995-2015, The AROS Development Team. All rights reserved.
5 Desc: Semaphore internal handling
9 #include <aros/atomic.h>
10 #include <aros/debug.h>
11 #include <proto/exec.h>
12 #include <proto/kernel.h>
14 #include "exec_util.h"
15 #include "semaphores.h"
17 BOOL
CheckSemaphore(struct SignalSemaphore
*sigSem
, struct TraceLocation
*caller
, struct ExecBase
*SysBase
)
19 /* TODO: Introduce AlertContext for this */
21 if (KernelBase
&& KrnIsSuper())
23 /* FindTask() is called only here, for speedup */
24 struct Task
*ThisTask
= GET_THIS_TASK
;
26 kprintf("%s called in supervisor mode!!!\n"
27 "sem = 0x%p task = 0x%p (%s)\n\n", caller
->function
, sigSem
, ThisTask
, ThisTask
->tc_Node
.ln_Name
);
28 Exec_ExtAlert(ACPU_PrivErr
& ~AT_DeadEnd
, __builtin_return_address(0), CALLER_FRAME
, 0, NULL
, SysBase
);
33 if ((sigSem
->ss_Link
.ln_Type
!= NT_SIGNALSEM
) || (sigSem
->ss_WaitQueue
.mlh_Tail
!= NULL
))
35 struct Task
*ThisTask
= GET_THIS_TASK
;
37 kprintf("%s called on a not initialized semaphore!!!\n"
38 "sem = 0x%p task = 0x%p (%s)\n\n", caller
->function
, sigSem
, ThisTask
, ThisTask
->tc_Node
.ln_Name
);
39 Exec_ExtAlert(AN_SemCorrupt
, __builtin_return_address(0), CALLER_FRAME
, 0, NULL
, SysBase
);
47 void InternalObtainSemaphore(struct SignalSemaphore
*sigSem
, struct Task
*owner
, struct TraceLocation
*caller
, struct ExecBase
*SysBase
)
49 struct Task
*ThisTask
= GET_THIS_TASK
;
52 * If there's no ThisTask, the function is called from within memory
53 * allocator in exec's pre-init code. We are already single-threaded,
60 * Freeing memory during RemTask(NULL). We are already single-threaded by
61 * Forbid(), and waiting isn't possible because task context is being deallocated.
63 if (ThisTask
->tc_State
== TS_REMOVED
)
66 if (!CheckSemaphore(sigSem
, caller
, SysBase
))
67 return; /* A crude attempt to recover... */
70 * Arbitrate for the semaphore structure.
71 * TODO: SMP-aware versions of this code likely need to use spinlocks here
76 * ss_QueueCount == -1 indicates that the semaphore is
77 * free, so we increment this straight away. If it then
78 * equals 0, then we are the first to allocate this semaphore.
80 sigSem
->ss_QueueCount
++;
82 if (sigSem
->ss_QueueCount
== 0)
84 /* We now own the semaphore. This is quick. */
85 sigSem
->ss_Owner
= owner
;
86 sigSem
->ss_NestCount
++;
89 * The semaphore is in use.
90 * It could be either shared (ss_Owner == NULL) or it could already be exclusively owned
91 * by this task (ss_Owner == ThisTask).
92 * Exclusive or shared mode of this function is determined by 'owner' parameter.
93 * Actually it's pointer to a task which is allowed to share the lock with us.
94 * If it's equal to 'ThisTask', we are locking the semaphore in exclusive more. If it's NULL,
95 * we are locking in shared mode. This helps to optimize code against speed, and remove
98 else if ((sigSem
->ss_Owner
== ThisTask
) || (sigSem
->ss_Owner
== owner
))
100 /* Yes, just increase the nesting count */
101 sigSem
->ss_NestCount
++;
103 /* Else, some other task owns it. We have to set a waiting request here. */
107 * We need a node to mark our semaphore request. Lets use some
110 struct SemaphoreRequest sr
;
111 sr
.sr_Waiter
= ThisTask
;
114 sr
.sr_Waiter
= (struct Task
*)((IPTR
)(sr
.sr_Waiter
) | SM_SHARED
);
117 * Have to clear the signal to make sure that we don't
118 * return immediately. We then add the SemReq to the
119 * waiters list of the semaphore. We were the last to
120 * request, so we must be the last to get the semaphore.
123 /* This must be atomic! */
124 AROS_ATOMIC_AND(ThisTask
->tc_SigRecvd
, ~SIGF_SINGLE
);
126 AddTail((struct List
*)&sigSem
->ss_WaitQueue
, (struct Node
*)&sr
);
129 * Finally, we simply wait, ReleaseSemaphore() will fill in
130 * who owns the semaphore.
139 ULONG
InternalAttemptSemaphore(struct SignalSemaphore
*sigSem
, struct Task
*owner
, struct TraceLocation
*caller
, struct ExecBase
*SysBase
)
141 struct Task
*ThisTask
= GET_THIS_TASK
;
144 if (!CheckSemaphore(sigSem
, caller
, SysBase
))
145 return FALSE
; /* A crude attempt to recover... */
148 * Arbitrate for the semaphore structure.
149 * TODO: SMP-aware versions of this code likely need to use spinlocks here
153 /* Increment the queue count */
154 sigSem
->ss_QueueCount
++;
156 if (sigSem
->ss_QueueCount
== 0)
158 /* The semaphore wasn't owned. We can now own it */
159 sigSem
->ss_Owner
= owner
;
160 sigSem
->ss_NestCount
++;
162 else if ((sigSem
->ss_Owner
== ThisTask
) || (sigSem
->ss_Owner
== owner
))
164 /* The semaphore was owned by this task, or is shared, just increase the nest count */
165 sigSem
->ss_NestCount
++;
169 /* We can't get ownership, just return it. */
170 sigSem
->ss_QueueCount
--;