Indentation fix, cleanup.
[AROS.git] / arch / arm-native / kernel / kernel_scheduler.c
blob95b13b3489d1abe856c1817982e5db892a5c43b0
1 /*
2 Copyright © 2015-2016, The AROS Development Team. All rights reserved.
3 $Id$
5 */
7 #include <exec/alerts.h>
8 #include <exec/execbase.h>
9 #include <exec/lists.h>
10 #include <proto/exec.h>
11 #include <proto/kernel.h>
13 //#include <kernel_base.h>
14 #include <kernel_debug.h>
15 #include <kernel_scheduler.h>
17 #include "kernel_cpu.h"
19 #define AROS_NO_ATOMIC_OPERATIONS
20 #include <exec_platform.h>
22 #include <aros/types/spinlock_s.h>
24 #include <etask.h>
26 #include "exec_intern.h"
28 #define DSCHED(x)
30 /* Check if the currently running task on this cpu should be rescheduled.. */
31 BOOL core_Schedule(void)
33 #if defined(DEBUG)
34 int cpunum = GetCPUNumber();
35 #endif
36 struct Task *task = GET_THIS_TASK;
37 BOOL corereschedule = TRUE;
39 DSCHED(bug("[Kernel:%02d] core_Schedule()\n", cpunum));
41 FLAG_SCHEDSWITCH_CLEAR;
43 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
44 if (!(task->tc_Flags & TF_EXCEPT))
46 #if defined(__AROSEXEC_SMP__)
47 KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, NULL,
48 SPINLOCK_MODE_READ);
49 #endif
50 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
51 if (IsListEmpty(&SysBase->TaskReady))
52 corereschedule = FALSE;
53 else
55 struct Task *nexttask;
56 #if defined(__AROSEXEC_SMP__)
57 int cpunum = GetCPUNumber();
58 uint32_t cpumask = (1 << cpunum);
59 #endif
61 If there are tasks ready for this cpu that have equal or lower priority,
62 and the current task has used its alloted time - reschedule so they can run
64 for (nexttask = (struct Task *)GetHead(&SysBase->TaskReady); nexttask != NULL; nexttask = (struct Task *)GetSucc(nexttask))
66 #if defined(__AROSEXEC_SMP__)
67 if ((GetIntETask(nexttask)->iet_CpuAffinity & cpumask) == cpumask)
69 #endif
70 if (nexttask->tc_Node.ln_Pri <= task->tc_Node.ln_Pri)
72 /* If the running task did not used it's whole quantum yet, let it work */
73 if (!FLAG_SCHEDQUANTUM_ISSET)
74 corereschedule = FALSE;
76 break;
77 #if defined(__AROSEXEC_SMP__)
79 #endif
82 #if defined(__AROSEXEC_SMP__)
83 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock);
84 #endif
87 DSCHED
89 if (corereschedule)
90 bug("[Kernel:%02d] '%s' @ 0x%p needs rescheduled ..\n", cpunum, task->tc_Node.ln_Name, task);
93 return corereschedule;
96 /* Switch the currently running task on this cpu to ready state */
97 void core_Switch(void)
99 #if defined(DEBUG)
100 int cpunum = GetCPUNumber();
101 #endif
102 struct Task *task = GET_THIS_TASK;
104 DSCHED(bug("[Kernel:%02d] core_Switch(%08x)\n", cpunum, task->tc_State));
106 if (task->tc_State == TS_RUN)
108 DSCHED(bug("[Kernel:%02d] Switching away from '%s' @ 0x%p\n", cpunum, task->tc_Node.ln_Name, task));
109 #if defined(__AROSEXEC_SMP__)
110 KrnSpinLock(&PrivExecBase(SysBase)->TaskRunningSpinLock, NULL,
111 SPINLOCK_MODE_WRITE);
112 Remove(&task->tc_Node);
113 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskRunningSpinLock);
114 #endif
115 task->tc_State = TS_READY;
117 /* if the current task has gone out of stack bounds, suspend it to prevent further damage to the system */
118 if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper)
120 bug("[Kernel:%02d] '%s' @ 0x%p went out of stack limits\n", cpunum, task->tc_Node.ln_Name, task);
121 bug("[Kernel:%02d] - Lower 0x%p, upper 0x%p, SP 0x%p\n", cpunum, task->tc_SPLower, task->tc_SPUpper, task->tc_SPReg);
123 task->tc_SigWait = 0;
124 task->tc_State = TS_WAIT;
125 #if defined(__AROSEXEC_SMP__)
126 KrnSpinLock(&PrivExecBase(SysBase)->TaskWaitSpinLock, NULL,
127 SPINLOCK_MODE_WRITE);
128 #endif
129 Enqueue(&SysBase->TaskWait, &task->tc_Node);
130 #if defined(__AROSEXEC_SMP__)
131 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskWaitSpinLock);
132 #endif
134 Alert(AN_StackProbe);
137 task->tc_IDNestCnt = IDNESTCOUNT_GET;
139 if (task->tc_Flags & TF_SWITCH)
140 AROS_UFC1NR(void, task->tc_Switch, AROS_UFCA(struct ExecBase *, SysBase, A6));
142 if (task->tc_State == TS_READY)
144 DSCHED(bug("[Kernel:%02d] Setting '%s' @ 0x%p as ready\n", cpunum, task->tc_Node.ln_Name, task));
145 #if defined(__AROSEXEC_SMP__)
146 KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, NULL,
147 SPINLOCK_MODE_WRITE);
148 #endif
149 Enqueue(&SysBase->TaskReady, &task->tc_Node);
150 #if defined(__AROSEXEC_SMP__)
151 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock);
152 #endif
157 /* Dispatch a "new" ready task on this cpu */
158 struct Task *core_Dispatch(void)
160 struct Task *newtask;
161 struct Task *task = GET_THIS_TASK;
162 #if defined(__AROSEXEC_SMP__) || defined(DEBUG)
163 int cpunum = GetCPUNumber();
164 #endif
165 #if defined(__AROSEXEC_SMP__)
166 uint32_t cpumask = (1 << cpunum);
167 #endif
169 DSCHED(bug("[Kernel:%02d] core_Dispatch()\n", cpunum));
171 #if defined(__AROSEXEC_SMP__)
172 KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, NULL,
173 SPINLOCK_MODE_WRITE);
174 #endif
175 for (newtask = (struct Task *)GetHead(&SysBase->TaskReady); newtask != NULL; newtask = (struct Task *)GetSucc(newtask))
177 #if defined(__AROSEXEC_SMP__)
178 if ((GetIntETask(newtask)->iet_CpuAffinity & cpumask) == cpumask)
180 #endif
181 Remove(&newtask->tc_Node);
182 break;
183 #if defined(__AROSEXEC_SMP__)
185 #endif
187 #if defined(__AROSEXEC_SMP__)
188 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock);
189 #endif
191 if ((!newtask) && (task) && (task->tc_State != TS_WAIT))
192 newtask = task;
194 if (newtask != NULL)
196 if (newtask->tc_State == TS_READY || newtask->tc_State == TS_RUN)
198 DSCHED(bug("[Kernel:%02d] Preparing to run '%s' @ 0x%p\n",
199 cpunum, newtask->tc_Node.ln_Name, newtask));
201 SysBase->DispCount++;
202 IDNESTCOUNT_SET(newtask->tc_IDNestCnt);
203 SET_THIS_TASK(newtask);
204 SysBase->Elapsed = SysBase->Quantum;
205 FLAG_SCHEDQUANTUM_CLEAR;
207 /* Check the stack of the task we are about to launch. */
208 if ((newtask->tc_SPReg <= newtask->tc_SPLower) ||
209 (newtask->tc_SPReg > newtask->tc_SPUpper))
210 newtask->tc_State = TS_WAIT;
211 else
212 newtask->tc_State = TS_RUN;
215 BOOL launchtask = TRUE;
216 #if defined(__AROSEXEC_SMP__)
217 if (newtask->tc_State == TS_SPIN)
219 /* move it to the spinning list */
220 KrnSpinLock(&PrivExecBase(SysBase)->TaskSpinningLock, NULL,
221 SPINLOCK_MODE_WRITE);
222 AddHead(&PrivExecBase(SysBase)->TaskSpinning, &newtask->tc_Node);
223 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskSpinningLock);
224 launchtask = FALSE;
226 #endif
227 if (newtask->tc_State == TS_WAIT)
229 #if defined(__AROSEXEC_SMP__)
230 KrnSpinLock(&PrivExecBase(SysBase)->TaskWaitSpinLock, NULL,
231 SPINLOCK_MODE_WRITE);
232 #endif
233 Enqueue(&SysBase->TaskWait, &task->tc_Node);
234 #if defined(__AROSEXEC_SMP__)
235 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskWaitSpinLock);
236 #endif
237 launchtask = FALSE;
240 if (!launchtask)
242 /* if the new task shouldn't run - force a reschedule */
243 DSCHED(bug("[Kernel:%02d] Skipping '%s' @ 0x%p (state %08x)\n", cpunum, newtask->tc_Node.ln_Name, newtask, newtask->tc_State));
245 core_Switch();
246 newtask = core_Dispatch();
248 else
250 DSCHED(bug("[Kernel:%02d] Launching '%s' @ 0x%p (state %08x)\n", cpunum, newtask->tc_Node.ln_Name, newtask, newtask->tc_State));
253 else
255 /* Go idle if there is nothing to do ... */
256 DSCHED(bug("[Kernel:%02d] No ready Task(s) - entering sleep mode\n", cpunum));
259 * Idle counter is incremented every time when we enter here,
260 * not only once. This is correct.
262 SysBase->IdleCount++;
263 FLAG_SCHEDSWITCH_SET;
266 return newtask;