revert between 56095 -> 55830 in arch
[AROS.git] / rom / exec / semaphores.c
blob30b55a4f3d7c10f56bae653205d69f7910eaa561
1 /*
2 Copyright © 1995-2017, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: Semaphore internal handling
6 Lang: english
7 */
9 #define DEBUG 0
11 #include <aros/atomic.h>
12 #include <aros/debug.h>
13 #include <proto/exec.h>
15 #include "exec_util.h"
16 #include "semaphores.h"
18 BOOL CheckSemaphore(struct SignalSemaphore *sigSem, struct TraceLocation *caller, struct ExecBase *SysBase)
20 /* TODO: Introduce AlertContext for this */
22 if (KernelBase && KrnIsSuper())
24 /* FindTask() is called only here, for speedup */
25 struct Task *ThisTask = GET_THIS_TASK;
27 kprintf("%s called in supervisor mode!!!\n"
28 "sem = 0x%p task = 0x%p (%s)\n\n", caller->function, sigSem, ThisTask, ThisTask->tc_Node.ln_Name);
29 Exec_ExtAlert(ACPU_PrivErr & ~AT_DeadEnd, __builtin_return_address(0), CALLER_FRAME, 0, NULL, SysBase);
31 return FALSE;
34 /* Some m68k programs initialize semaphores manually, without setting up ln_Type */
35 #if !defined(__mc68000__) || DEBUG
36 if ((sigSem->ss_Link.ln_Type != NT_SIGNALSEM) || (sigSem->ss_WaitQueue.mlh_Tail != NULL))
38 struct Task *ThisTask = GET_THIS_TASK;
40 kprintf("%s called on a not initialized semaphore!!!\n"
41 "sem = 0x%p task = 0x%p (%s)\n\n", caller->function, sigSem, ThisTask, ThisTask->tc_Node.ln_Name);
42 Exec_ExtAlert(AN_SemCorrupt, __builtin_return_address(0), CALLER_FRAME, 0, NULL, SysBase);
44 return FALSE;
46 #endif
48 return TRUE;
51 void InternalObtainSemaphore(struct SignalSemaphore *sigSem, struct Task *owner, struct TraceLocation *caller, struct ExecBase *SysBase)
53 struct Task *ThisTask = GET_THIS_TASK;
56 * If there's no ThisTask, the function is called from within memory
57 * allocator in exec's pre-init code. We are already single-threaded,
58 * just return. :)
60 if (!ThisTask)
61 return;
64 * Freeing memory during RemTask(NULL). We are already single-threaded by
65 * Forbid(), and waiting isn't possible because task context is being deallocated.
67 if (ThisTask->tc_State == TS_REMOVED)
68 return;
70 if (!CheckSemaphore(sigSem, caller, SysBase))
71 return; /* A crude attempt to recover... */
74 * Arbitrate for the semaphore structure.
75 * TODO: SMP-aware versions of this code likely need to use spinlocks here
77 Forbid();
80 * ss_QueueCount == -1 indicates that the semaphore is
81 * free, so we increment this straight away. If it then
82 * equals 0, then we are the first to allocate this semaphore.
84 sigSem->ss_QueueCount++;
86 if (sigSem->ss_QueueCount == 0)
88 /* We now own the semaphore. This is quick. */
89 sigSem->ss_Owner = owner;
90 sigSem->ss_NestCount++;
93 * The semaphore is in use.
94 * It could be either shared (ss_Owner == NULL) or it could already be exclusively owned
95 * by this task (ss_Owner == ThisTask).
96 * Exclusive or shared mode of this function is determined by 'owner' parameter.
97 * Actually it's pointer to a task which is allowed to share the lock with us.
98 * If it's equal to 'ThisTask', we are locking the semaphore in exclusive mode. If it's NULL,
99 * we are locking in shared mode. This helps to optimize code against speed, and remove
100 * extra comparisons.
102 else if ((sigSem->ss_Owner == ThisTask) || (sigSem->ss_Owner == owner))
104 /* Yes, just increase the nesting count */
105 sigSem->ss_NestCount++;
107 /* Else, some other task owns it. We have to set a waiting request here. */
108 else
111 * We need a node to mark our semaphore request. Lets use some
112 * stack memory.
114 struct SemaphoreRequest sr;
115 sr.sr_Waiter = ThisTask;
117 if (owner == NULL)
118 sr.sr_Waiter = (struct Task *)((IPTR)(sr.sr_Waiter) | SM_SHARED);
121 * Have to clear the signal to make sure that we don't
122 * return immediately. We then add the SemReq to the
123 * waiters list of the semaphore. We were the last to
124 * request, so we must be the last to get the semaphore.
127 /* This must be atomic! */
128 AROS_ATOMIC_AND(ThisTask->tc_SigRecvd, ~SIGF_SINGLE);
130 AddTail((struct List *)&sigSem->ss_WaitQueue, (struct Node *)&sr);
133 * Finally, we simply wait, ReleaseSemaphore() will fill in
134 * who owns the semaphore.
136 Wait(SIGF_SINGLE);
139 /* All Done! */
140 Permit();
143 ULONG InternalAttemptSemaphore(struct SignalSemaphore *sigSem, struct Task *owner, struct TraceLocation *caller, struct ExecBase *SysBase)
145 struct Task *ThisTask = GET_THIS_TASK;
146 ULONG retval = TRUE;
148 if (!CheckSemaphore(sigSem, caller, SysBase))
149 return FALSE; /* A crude attempt to recover... */
152 * Arbitrate for the semaphore structure.
153 * TODO: SMP-aware versions of this code likely need to use spinlocks here
155 Forbid();
157 /* Increment the queue count */
158 sigSem->ss_QueueCount++;
160 if (sigSem->ss_QueueCount == 0)
162 /* The semaphore wasn't owned. We can now own it */
163 sigSem->ss_Owner = owner;
164 sigSem->ss_NestCount++;
166 else if ((sigSem->ss_Owner == ThisTask) || (sigSem->ss_Owner == owner))
168 /* The semaphore was owned by this task, or is shared, just increase the nest count */
169 sigSem->ss_NestCount++;
171 else
173 /* We can't get ownership, just return it. */
174 sigSem->ss_QueueCount--;
175 retval = FALSE;
178 /* All done. */
179 Permit();
181 return retval;