2 Copyright © 2017, The AROS Development Team. All rights reserved.
6 #include <exec/alerts.h>
7 #include <exec/execbase.h>
8 #include <exec/lists.h>
9 #include <proto/exec.h>
11 #define __KERNEL_NOLIBBASE__
12 #include <proto/kernel.h>
14 #include <kernel_base.h>
15 #include <kernel_debug.h>
16 #include <kernel_scheduler.h>
18 #include "kernel_cpu.h"
20 #include <exec_platform.h>
22 #include <aros/types/spinlock_s.h>
26 #define __AROS_KERNEL__
28 #include "exec_intern.h"
44 #if defined(__AROSEXEC_SMP__)
45 void core_InitScheduleData(struct X86SchedulerPrivate
*schedData
)
47 DSCHED(bug("[Kernel] %s(0x%p)\n", __func__
, schedData
);)
48 schedData
->Granularity
= SCHEDGRAN_VALUE
;
49 schedData
->Quantum
= SCHEDQUANTUM_VALUE
;
53 /* Check if the currently running task on this cpu should be rescheduled.. */
54 BOOL
core_Schedule(void)
56 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
60 BOOL corereschedule
= TRUE
;
62 DSCHED(bug("[Kernel] %s()\n", __func__
);)
65 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
66 cpuNo
= KrnGetCPUNumber();
70 bug("[Kernel:%03u] %s: running Task @ 0x%p\n", cpuNo
, __func__
, task
);
71 bug("[Kernel:%03u] %s: '%s', state %08x\n", cpuNo
, __func__
, task
->tc_Node
.ln_Name
, task
->tc_State
);
74 FLAG_SCHEDSWITCH_CLEAR
;
79 #if defined(__AROSEXEC_SMP__)
80 (task
->tc_State
== TS_TOMBSTONED
) ||
82 (task
->tc_State
== TS_REMOVED
))
84 /* always let finalising tasks finish... */
85 corereschedule
= FALSE
;
86 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
87 bug("[Kernel:%03u] core_Schedule: letting finalising task run..\n", cpuNo
);
90 else if (!(task
->tc_Flags
& TF_EXCEPT
))
92 #if defined(__AROSEXEC_SMP__)
93 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
96 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
97 if (IsListEmpty(&SysBase
->TaskReady
))
98 corereschedule
= FALSE
;
101 struct Task
*nexttask
;
103 If there are tasks ready for this cpu that have equal or lower priority,
104 and the current task has used its alloted time - reschedule so they can run
106 for (nexttask
= (struct Task
*)GetHead(&SysBase
->TaskReady
); nexttask
!= NULL
; nexttask
= (struct Task
*)GetSucc(nexttask
))
108 #if defined(__AROSEXEC_SMP__)
109 if (!(PrivExecBase(SysBase
)->IntFlags
& EXECF_CPUAffinity
) || (GetIntETask(nexttask
) && core_APIC_CPUInMask(cpuNo
, GetIntETask(nexttask
)->iet_CpuAffinity
)))
113 #if defined(__AROSEXEC_SMP__)
114 (task
->tc_State
!= TS_SPIN
) &&
117 (nexttask
->tc_Node
.ln_Pri
<= task
->tc_Node
.ln_Pri
))
119 /* If the running task did not used it's whole quantum yet, let it work */
120 if (!FLAG_SCHEDQUANTUM_ISSET
)
121 corereschedule
= FALSE
;
124 #if defined(__AROSEXEC_SMP__)
129 #if defined(__AROSEXEC_SMP__)
130 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
134 #if defined(__AROSEXEC_SMP__)
135 if ((!corereschedule
) && (task
->tc_State
== TS_SPIN
))
136 task
->tc_State
= TS_RUN
;
143 bug("[Kernel:%03u] '%s' @ 0x%p needs rescheduled ..\n", cpuNo
, task
->tc_Node
.ln_Name
, task
);
146 return corereschedule
;
150 Switch the currently running task on this cpu
151 if it is running , switch it to the ready state
152 on SMP builds, if exec has set TS_SPIN,
153 switch it to the spinning list.
155 void core_Switch(void)
160 BOOL doSwitch
= TRUE
;
162 DSCHED(bug("[Kernel] %s()\n", __func__
);)
164 cpuNo
= KrnGetCPUNumber();
165 task
= GET_THIS_TASK
;
168 bug("[Kernel:%03u] %s: Current running Task @ 0x%p\n", cpuNo
, __func__
, task
);
170 if ((!task
) || (task
->tc_State
== TS_INVALID
))
172 bug("[Kernel:%03u] %s: called on invalid task!\n", cpuNo
, __func__
);
176 bug("[Kernel:%03u] %s: Task name '%s'\n", cpuNo
, __func__
, task
->tc_Node
.ln_Name
);
177 bug("[Kernel:%03u] %s: Task state = %08x\n", cpuNo
, __func__
, task
->tc_State
);
179 #if defined(__AROSEXEC_SMP__)
180 if (task
->tc_State
== TS_TOMBSTONED
)
185 bug("[Kernel:%03u] %s: Letting Task continue to run..\n", cpuNo
, __func__
);
190 bug("[Kernel:%03u] %s: Switching away from Task\n", cpuNo
, __func__
);
193 #if defined(__AROSEXEC_SMP__)
194 KrnSpinLock(&PrivExecBase(SysBase
)->TaskRunningSpinLock
, NULL
,
195 SPINLOCK_MODE_WRITE
);
197 if (task
->tc_State
!= TS_RUN
)
199 REMOVE(&task
->tc_Node
);
200 #if defined(__AROSEXEC_SMP__)
201 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskRunningSpinLock
);
202 if (task
->tc_State
== TS_REMOVED
)
203 task
->tc_State
= TS_TOMBSTONED
;
206 DSCHED(bug("[Kernel:%03u] %s: Task removed from list, state = %08x\n", cpuNo
, __func__
, task
->tc_State
);)
208 if ((task
->tc_State
!= TS_WAIT
) &&
209 #if defined(__AROSEXEC_SMP__)
210 (task
->tc_State
!= TS_SPIN
) &&
211 (task
->tc_State
!= TS_TOMBSTONED
) &&
213 (task
->tc_State
!= TS_REMOVED
))
214 task
->tc_State
= TS_READY
;
216 /* if the current task has gone out of stack bounds, suspend it to prevent further damage to the system */
217 if (task
->tc_SPReg
<= task
->tc_SPLower
|| task
->tc_SPReg
> task
->tc_SPUpper
)
219 bug("[Kernel:%03u] EROR! Task went out of stack limits\n", cpuNo
);
220 bug("[Kernel:%03u] - Lower Bound = 0x%p, Upper Bound = 0x%p\n", cpuNo
, task
->tc_SPLower
, task
->tc_SPUpper
);
221 bug("[Kernel:%03u] - SP = 0x%p\n", cpuNo
, task
->tc_SPReg
);
223 task
->tc_SigWait
= 0;
224 task
->tc_State
= TS_WAIT
;
226 showAlert
= AN_StackProbe
;
229 task
->tc_IDNestCnt
= IDNESTCOUNT_GET
;
231 if (task
->tc_State
== TS_READY
)
233 if (task
->tc_Flags
& TF_SWITCH
)
234 AROS_UFC1NR(void, task
->tc_Switch
, AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
236 DSCHED(bug("[Kernel:%03u] Setting '%s' @ 0x%p as ready\n", cpuNo
, task
->tc_Node
.ln_Name
, task
);)
237 #if defined(__AROSEXEC_SMP__)
238 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
239 SPINLOCK_MODE_WRITE
);
241 Enqueue(&SysBase
->TaskReady
, &task
->tc_Node
);
242 #if defined(__AROSEXEC_SMP__)
243 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
246 #if defined(__AROSEXEC_SMP__)
247 else if (task
->tc_State
== TS_SPIN
)
249 DSCHED(bug("[Kernel:%03u] Setting '%s' @ 0x%p to spin\n", cpuNo
, task
->tc_Node
.ln_Name
, task
);)
250 KrnSpinLock(&PrivExecBase(SysBase
)->TaskSpinningLock
, NULL
,
251 SPINLOCK_MODE_WRITE
);
252 Enqueue(&PrivExecBase(SysBase
)->TaskSpinning
, &task
->tc_Node
);
253 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskSpinningLock
);
257 #if defined(__AROSEXEC_SMP__)
258 (task
->tc_State
!= TS_TOMBSTONED
) &&
260 (task
->tc_State
!= TS_REMOVED
))
262 DSCHED(bug("[Kernel:%03u] Setting '%s' @ 0x%p to wait\n", cpuNo
, task
->tc_Node
.ln_Name
, task
);)
263 #if defined(__AROSEXEC_SMP__)
264 KrnSpinLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
, NULL
,
265 SPINLOCK_MODE_WRITE
);
267 Enqueue(&SysBase
->TaskWait
, &task
->tc_Node
);
268 #if defined(__AROSEXEC_SMP__)
269 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
);
276 /* Dispatch a "new" ready task on this cpu */
277 struct Task
*core_Dispatch(void)
279 struct Task
*newtask
;
280 struct Task
*task
= GET_THIS_TASK
;
281 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
282 cpuid_t cpuNo
= KrnGetCPUNumber();
285 DSCHED(bug("[Kernel:%03u] core_Dispatch()\n", cpuNo
);)
287 #if defined(__AROSEXEC_SMP__)
288 KrnSpinLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
, NULL
,
289 SPINLOCK_MODE_WRITE
);
291 for (newtask
= (struct Task
*)GetHead(&SysBase
->TaskReady
); newtask
!= NULL
; newtask
= (struct Task
*)GetSucc(newtask
))
293 #if defined(__AROSEXEC_SMP__)
294 if (!(PrivExecBase(SysBase
)->IntFlags
& EXECF_CPUAffinity
) || (GetIntETask(newtask
) && core_APIC_CPUInMask(cpuNo
, GetIntETask(newtask
)->iet_CpuAffinity
)))
297 REMOVE(&newtask
->tc_Node
);
299 #if defined(__AROSEXEC_SMP__)
303 #if defined(__AROSEXEC_SMP__)
304 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskReadySpinLock
);
307 if ((!newtask
) && (task
) && (task
->tc_State
!= TS_WAIT
))
309 #if defined(__AROSEXEC_SMP__)
310 if (task
->tc_State
== TS_SPIN
)
313 bug("----> such unspinning should not take place!\n");
314 KrnSpinLock(&PrivExecBase(SysBase
)->TaskSpinningLock
, NULL
,
315 SPINLOCK_MODE_WRITE
);
316 REMOVE(&task
->tc_Node
);
317 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskSpinningLock
);
319 task
->tc_State
= TS_READY
;
327 BOOL launchtask
= FALSE
;
329 if (newtask
->tc_State
== TS_READY
)
331 DSCHED(bug("[Kernel:%03u] Preparing to run '%s' @ 0x%p\n",
332 cpuNo
, newtask
->tc_Node
.ln_Name
, newtask
);)
334 IDNESTCOUNT_SET(newtask
->tc_IDNestCnt
);
335 SET_THIS_TASK(newtask
);
336 SCHEDELAPSED_SET(SCHEDQUANTUM_GET
);
337 FLAG_SCHEDQUANTUM_CLEAR
;
339 if ((newtask
->tc_State
== TS_READY
) || (newtask
->tc_State
== TS_RUN
))
341 /* Check the stack of the task we are about to launch. */
342 if ((newtask
->tc_SPReg
<= newtask
->tc_SPLower
) ||
343 (newtask
->tc_SPReg
> newtask
->tc_SPUpper
))
344 newtask
->tc_State
= TS_WAIT
;
347 newtask
->tc_State
= TS_RUN
;
352 #if defined(__AROSEXEC_SMP__)
353 (task
->tc_State
== TS_TOMBSTONED
) ||
355 (task
->tc_State
== TS_REMOVED
))
357 #if defined(__AROSEXEC_SMP__) || (DEBUG > 0)
358 // The task is on its way out ...
359 bug("[Kernel:%03u] --> Dispatching finalizing/tombstoned task?\n", cpuNo
);
360 bug("[Kernel:%03u] --> Task @ 0x%p '%s', state %08x\n", cpuNo
, task
, task
->tc_Node
.ln_Name
, newtask
->tc_State
);
364 if (newtask
->tc_State
== TS_WAIT
)
366 DSCHED(bug("[Kernel:%03u] Moving '%s' @ 0x%p to wait queue\n", cpuNo
, task
->tc_Node
.ln_Name
, task
);)
367 #if defined(__AROSEXEC_SMP__)
368 KrnSpinLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
, NULL
,
369 SPINLOCK_MODE_WRITE
);
371 Enqueue(&SysBase
->TaskWait
, &task
->tc_Node
);
372 #if defined(__AROSEXEC_SMP__)
373 KrnSpinUnLock(&PrivExecBase(SysBase
)->TaskWaitSpinLock
);
379 /* if the new task shouldn't run - force a reschedule */
380 DSCHED(bug("[Kernel:%03u] Skipping '%s' @ 0x%p (state %08x)\n", cpuNo
, newtask
->tc_Node
.ln_Name
, newtask
, newtask
->tc_State
);)
383 newtask
= core_Dispatch();
387 #if defined(AROS_NO_ATOMIC_OPERATIONS)
388 SysBase
->DispCount
++;
390 AROS_ATOMIC_INC(SysBase
->DispCount
);
392 DSCHED(bug("[Kernel:%03u] Launching '%s' @ 0x%p (state %08x)\n", cpuNo
, newtask
->tc_Node
.ln_Name
, newtask
, newtask
->tc_State
);)
397 /* Go idle if there is nothing to do ... */
398 DSCHED(bug("[Kernel:%03u] No ready Task(s) - entering sleep mode\n", cpuNo
);)
401 * Idle counter is incremented every time when we enter here,
402 * not only once. This is correct.
404 #if defined(AROS_NO_ATOMIC_OPERATIONS)
405 SysBase
->IdleCount
++;
407 AROS_ATOMIC_INC(SysBase
->IdleCount
);
409 FLAG_SCHEDSWITCH_SET
;