Fixed binary search: no more infinite loops when vendor is unknown.
[tangerine.git] / arch / ppc-sam440 / kernel / scheduler.c
blob1f799ee388a21ab038801b2666059ed53c01e164
1 #include <asm/amcc440.h>
2 #include <aros/kernel.h>
3 #include <aros/libcall.h>
4 #include <exec/execbase.h>
5 #include <hardware/intbits.h>
7 #include "exec_intern.h"
8 #include "etask.h"
9 #include "syscall.h"
11 #include "kernel_intern.h"
13 AROS_LH0(KRN_SchedType, KrnGetScheduler,
14 struct KernelBase *, KernelBase, 1, Kernel)
16 AROS_LIBFUNC_INIT
18 return SCHED_RR;
20 AROS_LIBFUNC_EXIT
23 AROS_LH1(void, KrnSetScheduler,
24 AROS_LHA(KRN_SchedType, sched, D0),
25 struct KernelBase *, KernelBase, 2, Kernel)
27 AROS_LIBFUNC_INIT
29 /* Cannot set scheduler yet */
31 AROS_LIBFUNC_EXIT
34 AROS_LH0(void, KrnCause,
35 struct KernelBase *, KernelBase, 3, Kernel)
37 AROS_LIBFUNC_INIT
39 asm volatile("li %%r3,%0; sc"::"i"(SC_CAUSE):"memory","r3");
41 AROS_LIBFUNC_EXIT
44 AROS_LH0(void , KrnDispatch,
45 struct KernelBase *, KernelBase, 4, Kernel)
47 AROS_LIBFUNC_INIT
50 asm volatile("li %%r3,%0; sc"::"i"(SC_DISPATCH):"memory","r3");
52 AROS_LIBFUNC_EXIT
55 AROS_LH0(void, KrnSwitch,
56 struct KernelBase *, KernelBase, 5, Kernel)
58 AROS_LIBFUNC_INIT
60 asm volatile("li %%r3,%0; sc"::"i"(SC_SWITCH):"memory","r3");
62 AROS_LIBFUNC_EXIT
65 AROS_LH0(void, KrnSchedule,
66 struct KernelBase *, KernelBase, 6, Kernel)
68 AROS_LIBFUNC_INIT
70 asm volatile("li %%r3,%0; sc"::"i"(SC_SCHEDULE):"memory","r3");
72 AROS_LIBFUNC_EXIT
77 * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used
79 void core_Dispatch(regs_t *regs)
81 volatile struct ExecBase *SysBase = getSysBase();
82 struct Task *task;
84 if (SysBase)
86 __asm__ __volatile__("wrteei 0;");
88 /*
89 * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU.
90 * It should be extended by some plugin mechanism which would put CPU and whole machine
91 * into some more sophisticated sleep states (ACPI?)
93 while (IsListEmpty(&SysBase->TaskReady))
95 SysBase->IdleCount++;
96 SysBase->AttnResched |= ARF_AttnSwitch;
98 //D(bug("[KRN] TaskReady list empty. Sleeping for a while...\n"));
99 /* Sleep almost forever ;) */
100 //__asm__ __volatile__("wrteei 1; sync; isync;");
101 wrmsr(rdmsr() | MSR_POW | MSR_EE);
102 __asm__ __volatile__("wrteei 0");
103 //D(bug("[\n"));
105 if (SysBase->SysFlags & SFF_SoftInt)
107 core_Cause(SysBase);
111 SysBase->DispCount++;
113 /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */
114 task = (struct Task *)REMHEAD(&SysBase->TaskReady);
115 SysBase->ThisTask = task;
116 SysBase->Elapsed = SysBase->Quantum;
117 SysBase->SysFlags &= ~0x2000;
118 task->tc_State = TS_RUN;
119 SysBase->IDNestCnt = task->tc_IDNestCnt;
121 //D(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name));
123 /* Handle tasks's flags */
124 if (task->tc_Flags & TF_EXCEPT)
125 Exception();
127 if (task->tc_Flags & TF_LAUNCH)
129 AROS_UFC1(void, task->tc_Launch,
130 AROS_UFCA(struct ExecBase *, SysBase, A6));
133 /* Restore the task's state */
134 bcopy(GetIntETask(task)->iet_Context, regs, sizeof(regs_t));
135 /* Copy the fpu, mmx, xmm state */
136 #warning FIXME: Change to the lazy saving of the FPU state!!!!
137 #warning TODO: No FPU support yet!!!!!!! Yay, it sucks! :-D
138 // IPTR sse_ctx = ((IPTR)GetIntETask(task)->iet_Context + sizeof(regs_t) + 15) & ~15;
139 // asm volatile("fxrstor (%0)"::"D"(sse_ctx));
143 regs->srr1 &= ~MSR_POW;
144 /* Leave interrupt and jump to the new task */
145 core_LeaveInterrupt(regs);
148 void core_Switch(regs_t *regs)
150 struct ExecBase *SysBase = getSysBase();
151 struct Task *task;
153 if (SysBase)
155 /* Disable interrupts for a while */
156 __asm__ __volatile__("wrteei 0");
158 task = SysBase->ThisTask;
160 //D(bug("[KRN] Old task = %p (%s)\n", task, task->tc_Node.ln_Name));
162 /* Copy current task's context into the ETask structure */
163 bcopy(regs, GetIntETask(task)->iet_Context, sizeof(regs_t));
165 /* Copy the fpu, mmx, xmm state */
166 #warning FIXME: Change to the lazy saving of the FPU state!!!!
167 #warning TODO: Write the damn FPU handling at all!!!!!!!! ;-D LOL
168 // IPTR sse_ctx = ((IPTR)GetIntETask(task)->iet_Context + sizeof(regs_t) + 15) & ~15;
169 // asm volatile("fxsave (%0)"::"D"(sse_ctx));
171 /* store IDNestCnt into tasks's structure */
172 task->tc_IDNestCnt = SysBase->IDNestCnt;
173 task->tc_SPReg = regs->gpr[1];
175 /* And enable interrupts */
176 SysBase->IDNestCnt = -1;
177 __asm__ __volatile__("wrteei 1");
179 /* TF_SWITCH flag set? Call the switch routine */
180 if (task->tc_Flags & TF_SWITCH)
182 AROS_UFC1(void, task->tc_Switch,
183 AROS_UFCA(struct ExecBase *, SysBase, A6));
187 core_Dispatch(regs);
192 * Schedule the currently running task away. Put it into the TaskReady list
193 * in some smart way. This function is subject of change and it will be probably replaced
194 * by some plugin system in the future
196 void core_Schedule(regs_t *regs)
198 struct ExecBase *SysBase = getSysBase();
199 struct Task *task;
201 if (SysBase)
203 /* Disable interrupts for a while */
204 __asm__ __volatile__("wrteei 0"); // CLI
206 task = SysBase->ThisTask;
208 /* Clear the pending switch flag. */
209 SysBase->AttnResched &= ~ARF_AttnSwitch;
211 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
212 if (!(task->tc_Flags & TF_EXCEPT))
214 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
215 if (IsListEmpty(&SysBase->TaskReady))
216 core_LeaveInterrupt(regs);
218 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
219 * If so, then check further... */
220 if (((struct Task*)GetHead(&SysBase->TaskReady))->tc_Node.ln_Pri <= task->tc_Node.ln_Pri)
222 /* If the running task did not used it's whole quantum yet, let it work */
223 if (!(SysBase->SysFlags & 0x2000))
225 core_LeaveInterrupt(regs);
231 * If we got here, then the rescheduling is necessary.
232 * Put the task into the TaskReady list.
234 task->tc_State = TS_READY;
235 Enqueue(&SysBase->TaskReady, (struct Node *)task);
238 /* Select new task to run */
239 core_Switch(regs);
244 * Leave the interrupt. This function recieves the register frame used to leave the supervisor
245 * mode. It never returns and reschedules the task if it was asked for.
247 void core_ExitInterrupt(regs_t *regs)
249 /* Powermode was on? Turn it off now */
250 regs->srr1 &= ~MSR_POW;
252 /* Going back into supervisor mode? Then exit immediatelly */
253 if (!(regs->srr1 & MSR_PR))
255 core_LeaveInterrupt(regs);
257 else
259 /* Prepare to go back into user mode */
260 struct ExecBase *SysBase = getSysBase();
262 if (SysBase)
264 /* Soft interrupt requested? It's high time to do it */
265 if (SysBase->SysFlags & SFF_SoftInt)
266 core_Cause(SysBase);
268 /* If task switching is disabled, leave immediatelly */
269 if (SysBase->TDNestCnt >= 0)
271 core_LeaveInterrupt(regs);
273 else
276 * Do not disturb task if it's not necessary.
277 * Reschedule only if switch pending flag is set. Exit otherwise.
279 if (SysBase->AttnResched & ARF_AttnSwitch)
281 core_Schedule(regs);
283 else
284 core_LeaveInterrupt(regs);
287 else
288 core_LeaveInterrupt(regs);
292 void core_Cause(struct ExecBase *SysBase)
294 struct IntVector *iv = &SysBase->IntVects[INTB_SOFTINT];
296 /* If the SoftInt vector in SysBase is set, call it. It will do the rest for us */
297 if (iv->iv_Code)
299 AROS_UFC5(void, iv->iv_Code,
300 AROS_UFCA(ULONG, 0, D1),
301 AROS_UFCA(ULONG, 0, A0),
302 AROS_UFCA(APTR, 0, A1),
303 AROS_UFCA(APTR, iv->iv_Code, A5),
304 AROS_UFCA(struct ExecBase *, SysBase, A6)