3 #include "exec_intern.h"
6 #include <exec/lists.h>
7 #include <exec/types.h>
8 #include <exec/tasks.h>
9 #include <exec/execbase.h>
10 #include <aros/libcall.h>
11 #include <asm/segments.h>
13 #include "kernel_intern.h"
15 AROS_LH0(KRN_SchedType
, KrnGetScheduler
,
16 struct KernelBase
*, KernelBase
, 1, Kernel
)
25 AROS_LH1(void, KrnSetScheduler
,
26 AROS_LHA(KRN_SchedType
, sched
, D0
),
27 struct KernelBase
*, KernelBase
, 2, Kernel
)
31 /* Cannot set scheduler yet */
36 AROS_LH0(void, KrnCause
,
37 struct KernelBase
*, KernelBase
, 3, Kernel
)
41 asm volatile("int $0x80"::"a"(SC_CAUSE
):"memory");
46 AROS_LH0(void , KrnDispatch
,
47 struct KernelBase
*, KernelBase
, 4, Kernel
)
51 asm volatile("int $0x80"::"a"(SC_DISPATCH
):"memory");
56 AROS_LH0(void, KrnSwitch
,
57 struct KernelBase
*, KernelBase
, 5, Kernel
)
61 asm volatile("int $0x80"::"a"(SC_SWITCH
):"memory");
66 AROS_LH0(void, KrnSchedule
,
67 struct KernelBase
*, KernelBase
, 6, Kernel
)
71 asm volatile("int $0x80"::"a"(SC_SCHEDULE
):"memory");
78 * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used
80 void core_Dispatch(regs_t
*regs
)
82 struct ExecBase
*SysBase
= TLS_GET(SysBase
);
85 __asm__
__volatile__("cli;");
88 * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU.
89 * It should be extended by some plugin mechanism which would put CPU and whole machine
90 * into some more sophisticated sleep states (ACPI?)
92 while (IsListEmpty(&SysBase
->TaskReady
))
95 SysBase
->AttnResched
|= ARF_AttnSwitch
;
97 /* Sleep almost forever ;) */
98 __asm__
__volatile__("sti; hlt; cli");
100 if (SysBase
->SysFlags
& SFF_SoftInt
)
106 SysBase
->DispCount
++;
108 /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */
109 task
= (struct Task
*)REMHEAD(&SysBase
->TaskReady
);
110 SysBase
->ThisTask
= task
;
111 SysBase
->Elapsed
= SysBase
->Quantum
;
112 SysBase
->SysFlags
&= ~0x2000;
113 task
->tc_State
= TS_RUN
;
114 SysBase
->IDNestCnt
= task
->tc_IDNestCnt
;
116 /* Handle tasks's flags */
117 if (task
->tc_Flags
& TF_EXCEPT
)
120 if (task
->tc_Flags
& TF_LAUNCH
)
122 AROS_UFC1(void, task
->tc_Launch
,
123 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
126 /* Restore the task's state */
127 bcopy(GetIntETask(task
)->iet_Context
, regs
, sizeof(regs_t
));
128 /* Copy the fpu, mmx, xmm state */
129 #warning FIXME: Change to the lazy saving of the XMM state!!!!
130 IPTR sse_ctx
= ((IPTR
)GetIntETask(task
)->iet_Context
+ sizeof(regs_t
) + 15) & ~15;
131 asm volatile("fxrstor (%0)"::"D"(sse_ctx
));
133 /* Leave interrupt and jump to the new task */
134 core_LeaveInterrupt(regs
);
137 void core_Switch(regs_t
*regs
)
139 struct ExecBase
*SysBase
= TLS_GET(SysBase
);
142 /* Disable interrupts for a while */
143 __asm__
__volatile__("cli; cld;");
145 task
= SysBase
->ThisTask
;
147 /* Copy current task's context into the ETask structure */
148 bcopy(regs
, GetIntETask(task
)->iet_Context
, sizeof(regs_t
));
150 /* Copy the fpu, mmx, xmm state */
151 #warning FIXME: Change to the lazy saving of the XMM state!!!!
152 IPTR sse_ctx
= ((IPTR
)GetIntETask(task
)->iet_Context
+ sizeof(regs_t
) + 15) & ~15;
153 asm volatile("fxsave (%0)"::"D"(sse_ctx
));
155 /* store IDNestCnt into tasks's structure */
156 task
->tc_IDNestCnt
= SysBase
->IDNestCnt
;
157 task
->tc_SPReg
= regs
->return_rsp
;
159 /* And enable interrupts */
160 SysBase
->IDNestCnt
= -1;
161 __asm__
__volatile__("sti;");
163 /* TF_SWITCH flag set? Call the switch routine */
164 if (task
->tc_Flags
& TF_SWITCH
)
166 AROS_UFC1(void, task
->tc_Switch
,
167 AROS_UFCA(struct ExecBase
*, SysBase
, A6
));
174 * Schedule the currently running task away. Put it into the TaskReady list
175 * in some smart way. This function is subject of change and it will be probably replaced
176 * by some plugin system in the future
178 void core_Schedule(regs_t
*regs
)
180 struct ExecBase
*SysBase
= TLS_GET(SysBase
);
183 /* Disable interrupts for a while */
184 __asm__
__volatile__("cli");
186 task
= SysBase
->ThisTask
;
188 /* Clear the pending switch flag. */
189 SysBase
->AttnResched
&= ~ARF_AttnSwitch
;
191 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
192 if (!(task
->tc_Flags
& TF_EXCEPT
))
194 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
195 if (IsListEmpty(&SysBase
->TaskReady
))
196 core_LeaveInterrupt(regs
);
198 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
199 * If so, then check further... */
200 if (((struct Task
*)GetHead(&SysBase
->TaskReady
))->tc_Node
.ln_Pri
<= task
->tc_Node
.ln_Pri
)
202 /* If the running task did not used it's whole quantum yet, let it work */
203 if (!(SysBase
->SysFlags
& 0x2000))
205 core_LeaveInterrupt(regs
);
211 * If we got here, then the rescheduling is necessary.
212 * Put the task into the TaskReady list.
214 task
->tc_State
= TS_READY
;
215 Enqueue(&SysBase
->TaskReady
, (struct Node
*)task
);
217 /* Select new task to run */
222 * Leave the interrupt. This function recieves the register frame used to leave the supervisor
223 * mode. It never returns and reschedules the task if it was asked for.
225 void core_ExitInterrupt(regs_t
*regs
)
227 struct ExecBase
*SysBase
= TLS_GET(SysBase
);
229 /* Going back into supervisor mode? Then exit immediatelly */
230 if (regs
->ds
== KERNEL_DS
)
232 core_LeaveInterrupt(regs
);
236 /* Prepare to go back into user mode */
237 /* Soft interrupt requested? It's high time to do it */
238 if (SysBase
->SysFlags
& SFF_SoftInt
)
241 /* If task switching is disabled, leave immediatelly */
242 if (SysBase
->TDNestCnt
>= 0)
244 core_LeaveInterrupt(regs
);
249 * Do not disturb task if it's not necessary.
250 * Reschedule only if switch pending flag is set. Exit otherwise.
252 if (SysBase
->AttnResched
& ARF_AttnSwitch
)
257 core_LeaveInterrupt(regs
);