revert between 56095 -> 55830 in arch
[AROS.git] / arch / all-pc / kernel / kernel_scheduler.c
blobe48beb4470761465e5ab125861be84435252c107
1 /*
2 Copyright © 2017, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <exec/alerts.h>
7 #include <exec/execbase.h>
8 #include <exec/lists.h>
9 #include <proto/exec.h>
11 #define __KERNEL_NOLIBBASE__
12 #include <proto/kernel.h>
14 #include <kernel_base.h>
15 #include <kernel_debug.h>
16 #include <kernel_scheduler.h>
18 #include "kernel_cpu.h"
20 #include <exec_platform.h>
22 #include <aros/types/spinlock_s.h>
24 #include <etask.h>
26 #define __AROS_KERNEL__
28 #include "exec_intern.h"
30 #include "apic.h"
32 #ifdef DEBUG
33 #undef DEBUG
34 #endif
36 #define DEBUG 0
38 #if (DEBUG > 0)
39 #define DSCHED(x) x
40 #else
41 #define DSCHED(x)
42 #endif
44 #if defined(__AROSEXEC_SMP__)
45 void core_InitScheduleData(struct X86SchedulerPrivate *schedData)
47 DSCHED(bug("[Kernel] %s(0x%p)\n", __func__, schedData);)
48 schedData->Granularity = SCHEDGRAN_VALUE;
49 schedData->Quantum = SCHEDQUANTUM_VALUE;
51 #endif
53 /* Check if the currently running task on this cpu should be rescheduled.. */
54 BOOL core_Schedule(void)
56 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
57 cpuid_t cpuNo;
58 #endif
59 struct Task *task;
60 BOOL corereschedule = TRUE;
62 DSCHED(bug("[Kernel] %s()\n", __func__);)
64 task = GET_THIS_TASK;
65 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
66 cpuNo = KrnGetCPUNumber();
67 #endif
69 DSCHED(
70 bug("[Kernel:%03u] %s: running Task @ 0x%p\n", cpuNo, __func__, task);
71 bug("[Kernel:%03u] %s: '%s', state %08x\n", cpuNo, __func__, task->tc_Node.ln_Name, task->tc_State);
74 FLAG_SCHEDSWITCH_CLEAR;
76 if (task)
78 if (
79 #if defined(__AROSEXEC_SMP__)
80 (task->tc_State == TS_TOMBSTONED) ||
81 #endif
82 (task->tc_State == TS_REMOVED))
84 /* always let finalising tasks finish... */
85 corereschedule = FALSE;
86 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
87 bug("[Kernel:%03u] core_Schedule: letting finalising task run..\n", cpuNo);
88 #endif
90 else if (!(task->tc_Flags & TF_EXCEPT))
92 #if defined(__AROSEXEC_SMP__)
93 KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, NULL,
94 SPINLOCK_MODE_READ);
95 #endif
96 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
97 if (IsListEmpty(&SysBase->TaskReady))
98 corereschedule = FALSE;
99 else
101 struct Task *nexttask;
103 If there are tasks ready for this cpu that have equal or lower priority,
104 and the current task has used its alloted time - reschedule so they can run
106 for (nexttask = (struct Task *)GetHead(&SysBase->TaskReady); nexttask != NULL; nexttask = (struct Task *)GetSucc(nexttask))
108 #if defined(__AROSEXEC_SMP__)
109 if (!(PrivExecBase(SysBase)->IntFlags & EXECF_CPUAffinity) || (GetIntETask(nexttask) && core_APIC_CPUInMask(cpuNo, GetIntETask(nexttask)->iet_CpuAffinity)))
111 #endif
112 if (
113 #if defined(__AROSEXEC_SMP__)
114 (task->tc_State != TS_SPIN) &&
116 #endif
117 (nexttask->tc_Node.ln_Pri <= task->tc_Node.ln_Pri))
119 /* If the running task did not used it's whole quantum yet, let it work */
120 if (!FLAG_SCHEDQUANTUM_ISSET)
121 corereschedule = FALSE;
123 break;
124 #if defined(__AROSEXEC_SMP__)
126 #endif
129 #if defined(__AROSEXEC_SMP__)
130 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock);
131 #endif
134 #if defined(__AROSEXEC_SMP__)
135 if ((!corereschedule) && (task->tc_State == TS_SPIN))
136 task->tc_State = TS_RUN;
137 #endif
140 DSCHED
142 if (corereschedule)
143 bug("[Kernel:%03u] '%s' @ 0x%p needs rescheduled ..\n", cpuNo, task->tc_Node.ln_Name, task);
146 return corereschedule;
150 Switch the currently running task on this cpu
151 if it is running , switch it to the ready state
152 on SMP builds, if exec has set TS_SPIN,
153 switch it to the spinning list.
155 void core_Switch(void)
157 cpuid_t cpuNo;
158 struct Task *task;
159 ULONG showAlert = 0;
160 BOOL doSwitch = TRUE;
162 DSCHED(bug("[Kernel] %s()\n", __func__);)
164 cpuNo = KrnGetCPUNumber();
165 task = GET_THIS_TASK;
167 DSCHED(
168 bug("[Kernel:%03u] %s: Current running Task @ 0x%p\n", cpuNo, __func__, task);
170 if ((!task) || (task->tc_State == TS_INVALID))
172 bug("[Kernel:%03u] %s: called on invalid task!\n", cpuNo, __func__);
173 doSwitch = FALSE;
175 DSCHED(
176 bug("[Kernel:%03u] %s: Task name '%s'\n", cpuNo, __func__, task->tc_Node.ln_Name);
177 bug("[Kernel:%03u] %s: Task state = %08x\n", cpuNo, __func__, task->tc_State);
179 #if defined(__AROSEXEC_SMP__)
180 if (task->tc_State == TS_TOMBSTONED)
181 doSwitch = FALSE;
182 #endif
183 if (!doSwitch)
185 bug("[Kernel:%03u] %s: Letting Task continue to run..\n", cpuNo, __func__);
186 return;
189 DSCHED(
190 bug("[Kernel:%03u] %s: Switching away from Task\n", cpuNo, __func__);
193 #if defined(__AROSEXEC_SMP__)
194 KrnSpinLock(&PrivExecBase(SysBase)->TaskRunningSpinLock, NULL,
195 SPINLOCK_MODE_WRITE);
196 #else
197 if (task->tc_State != TS_RUN)
198 #endif
199 REMOVE(&task->tc_Node);
200 #if defined(__AROSEXEC_SMP__)
201 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskRunningSpinLock);
202 if (task->tc_State == TS_REMOVED)
203 task->tc_State = TS_TOMBSTONED;
204 #endif
206 DSCHED(bug("[Kernel:%03u] %s: Task removed from list, state = %08x\n", cpuNo, __func__, task->tc_State);)
208 if ((task->tc_State != TS_WAIT) &&
209 #if defined(__AROSEXEC_SMP__)
210 (task->tc_State != TS_SPIN) &&
211 (task->tc_State != TS_TOMBSTONED) &&
212 #endif
213 (task->tc_State != TS_REMOVED))
214 task->tc_State = TS_READY;
216 /* if the current task has gone out of stack bounds, suspend it to prevent further damage to the system */
217 if (task->tc_SPReg <= task->tc_SPLower || task->tc_SPReg > task->tc_SPUpper)
219 bug("[Kernel:%03u] EROR! Task went out of stack limits\n", cpuNo);
220 bug("[Kernel:%03u] - Lower Bound = 0x%p, Upper Bound = 0x%p\n", cpuNo, task->tc_SPLower, task->tc_SPUpper);
221 bug("[Kernel:%03u] - SP = 0x%p\n", cpuNo, task->tc_SPReg);
223 task->tc_SigWait = 0;
224 task->tc_State = TS_WAIT;
226 showAlert = AN_StackProbe;
229 task->tc_IDNestCnt = IDNESTCOUNT_GET;
231 if (task->tc_State == TS_READY)
233 if (task->tc_Flags & TF_SWITCH)
234 AROS_UFC1NR(void, task->tc_Switch, AROS_UFCA(struct ExecBase *, SysBase, A6));
236 DSCHED(bug("[Kernel:%03u] Setting '%s' @ 0x%p as ready\n", cpuNo, task->tc_Node.ln_Name, task);)
237 #if defined(__AROSEXEC_SMP__)
238 KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, NULL,
239 SPINLOCK_MODE_WRITE);
240 #endif
241 Enqueue(&SysBase->TaskReady, &task->tc_Node);
242 #if defined(__AROSEXEC_SMP__)
243 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock);
244 #endif
246 #if defined(__AROSEXEC_SMP__)
247 else if (task->tc_State == TS_SPIN)
249 DSCHED(bug("[Kernel:%03u] Setting '%s' @ 0x%p to spin\n", cpuNo, task->tc_Node.ln_Name, task);)
250 KrnSpinLock(&PrivExecBase(SysBase)->TaskSpinningLock, NULL,
251 SPINLOCK_MODE_WRITE);
252 Enqueue(&PrivExecBase(SysBase)->TaskSpinning, &task->tc_Node);
253 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskSpinningLock);
255 #endif
256 else if(
257 #if defined(__AROSEXEC_SMP__)
258 (task->tc_State != TS_TOMBSTONED) &&
259 #endif
260 (task->tc_State != TS_REMOVED))
262 DSCHED(bug("[Kernel:%03u] Setting '%s' @ 0x%p to wait\n", cpuNo, task->tc_Node.ln_Name, task);)
263 #if defined(__AROSEXEC_SMP__)
264 KrnSpinLock(&PrivExecBase(SysBase)->TaskWaitSpinLock, NULL,
265 SPINLOCK_MODE_WRITE);
266 #endif
267 Enqueue(&SysBase->TaskWait, &task->tc_Node);
268 #if defined(__AROSEXEC_SMP__)
269 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskWaitSpinLock);
270 #endif
272 if (showAlert)
273 Alert(showAlert);
276 /* Dispatch a "new" ready task on this cpu */
277 struct Task *core_Dispatch(void)
279 struct Task *newtask;
280 struct Task *task = GET_THIS_TASK;
281 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
282 cpuid_t cpuNo = KrnGetCPUNumber();
283 #endif
285 DSCHED(bug("[Kernel:%03u] core_Dispatch()\n", cpuNo);)
287 #if defined(__AROSEXEC_SMP__)
288 KrnSpinLock(&PrivExecBase(SysBase)->TaskReadySpinLock, NULL,
289 SPINLOCK_MODE_WRITE);
290 #endif
291 for (newtask = (struct Task *)GetHead(&SysBase->TaskReady); newtask != NULL; newtask = (struct Task *)GetSucc(newtask))
293 #if defined(__AROSEXEC_SMP__)
294 if (!(PrivExecBase(SysBase)->IntFlags & EXECF_CPUAffinity) || (GetIntETask(newtask) && core_APIC_CPUInMask(cpuNo, GetIntETask(newtask)->iet_CpuAffinity)))
296 #endif
297 REMOVE(&newtask->tc_Node);
298 break;
299 #if defined(__AROSEXEC_SMP__)
301 #endif
303 #if defined(__AROSEXEC_SMP__)
304 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskReadySpinLock);
305 #endif
307 if ((!newtask) && (task) && (task->tc_State != TS_WAIT))
309 #if defined(__AROSEXEC_SMP__)
310 if (task->tc_State == TS_SPIN)
312 #if 0
313 bug("----> such unspinning should not take place!\n");
314 KrnSpinLock(&PrivExecBase(SysBase)->TaskSpinningLock, NULL,
315 SPINLOCK_MODE_WRITE);
316 REMOVE(&task->tc_Node);
317 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskSpinningLock);
318 #endif
319 task->tc_State = TS_READY;
321 #endif
322 newtask = task;
325 if (newtask != NULL)
327 BOOL launchtask = FALSE;
329 if (newtask->tc_State == TS_READY)
331 DSCHED(bug("[Kernel:%03u] Preparing to run '%s' @ 0x%p\n",
332 cpuNo, newtask->tc_Node.ln_Name, newtask);)
334 IDNESTCOUNT_SET(newtask->tc_IDNestCnt);
335 SET_THIS_TASK(newtask);
336 SCHEDELAPSED_SET(SCHEDQUANTUM_GET);
337 FLAG_SCHEDQUANTUM_CLEAR;
339 if ((newtask->tc_State == TS_READY) || (newtask->tc_State == TS_RUN))
341 /* Check the stack of the task we are about to launch. */
342 if ((newtask->tc_SPReg <= newtask->tc_SPLower) ||
343 (newtask->tc_SPReg > newtask->tc_SPUpper))
344 newtask->tc_State = TS_WAIT;
345 else
347 newtask->tc_State = TS_RUN;
348 launchtask = TRUE;
351 else if (
352 #if defined(__AROSEXEC_SMP__)
353 (task->tc_State == TS_TOMBSTONED ) ||
354 #endif
355 (task->tc_State == TS_REMOVED))
357 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
358 // The task is on its way out ...
359 bug("[Kernel:%03u] --> Dispatching finalizing/tombstoned task?\n", cpuNo);
360 bug("[Kernel:%03u] --> Task @ 0x%p '%s', state %08x\n", cpuNo, task, task->tc_Node.ln_Name, newtask->tc_State);
361 #endif
364 if (newtask->tc_State == TS_WAIT)
366 DSCHED(bug("[Kernel:%03u] Moving '%s' @ 0x%p to wait queue\n", cpuNo, task->tc_Node.ln_Name, task);)
367 #if defined(__AROSEXEC_SMP__)
368 KrnSpinLock(&PrivExecBase(SysBase)->TaskWaitSpinLock, NULL,
369 SPINLOCK_MODE_WRITE);
370 #endif
371 Enqueue(&SysBase->TaskWait, &task->tc_Node);
372 #if defined(__AROSEXEC_SMP__)
373 KrnSpinUnLock(&PrivExecBase(SysBase)->TaskWaitSpinLock);
374 #endif
377 if (!launchtask)
379 /* if the new task shouldn't run - force a reschedule */
380 DSCHED(bug("[Kernel:%03u] Skipping '%s' @ 0x%p (state %08x)\n", cpuNo, newtask->tc_Node.ln_Name, newtask, newtask->tc_State);)
382 core_Switch();
383 newtask = core_Dispatch();
385 else
387 #if defined(AROS_NO_ATOMIC_OPERATIONS)
388 SysBase->DispCount++;
389 #else
390 AROS_ATOMIC_INC(SysBase->DispCount);
391 #endif
392 DSCHED(bug("[Kernel:%03u] Launching '%s' @ 0x%p (state %08x)\n", cpuNo, newtask->tc_Node.ln_Name, newtask, newtask->tc_State);)
395 else
397 /* Go idle if there is nothing to do ... */
398 DSCHED(bug("[Kernel:%03u] No ready Task(s) - entering sleep mode\n", cpuNo);)
401 * Idle counter is incremented every time when we enter here,
402 * not only once. This is correct.
404 #if defined(AROS_NO_ATOMIC_OPERATIONS)
405 SysBase->IdleCount++;
406 #else
407 AROS_ATOMIC_INC(SysBase->IdleCount);
408 #endif
409 FLAG_SCHEDSWITCH_SET;
412 return newtask;