added concrete implementations of putc(), getc(), getchar() and gets()
[tangerine.git] / arch / x86_64-pc / kernel / scheduler.c
bloba5bc066008f8407446234e78f59eb56980be438f
1 #include <inttypes.h>
3 #include "exec_intern.h"
4 #include "etask.h"
6 #include <exec/lists.h>
7 #include <exec/types.h>
8 #include <exec/tasks.h>
9 #include <exec/execbase.h>
10 #include <aros/libcall.h>
11 #include <asm/segments.h>
13 #include "kernel_intern.h"
15 AROS_LH0(KRN_SchedType, KrnGetScheduler,
16 struct KernelBase *, KernelBase, 1, Kernel)
18 AROS_LIBFUNC_INIT
20 return SCHED_RR;
22 AROS_LIBFUNC_EXIT
25 AROS_LH1(void, KrnSetScheduler,
26 AROS_LHA(KRN_SchedType, sched, D0),
27 struct KernelBase *, KernelBase, 2, Kernel)
29 AROS_LIBFUNC_INIT
31 /* Cannot set scheduler yet */
33 AROS_LIBFUNC_EXIT
36 AROS_LH0(void, KrnCause,
37 struct KernelBase *, KernelBase, 3, Kernel)
39 AROS_LIBFUNC_INIT
41 asm volatile("int $0x80"::"a"(SC_CAUSE):"memory");
43 AROS_LIBFUNC_EXIT
46 AROS_LH0(void , KrnDispatch,
47 struct KernelBase *, KernelBase, 4, Kernel)
49 AROS_LIBFUNC_INIT
51 asm volatile("int $0x80"::"a"(SC_DISPATCH):"memory");
53 AROS_LIBFUNC_EXIT
56 AROS_LH0(void, KrnSwitch,
57 struct KernelBase *, KernelBase, 5, Kernel)
59 AROS_LIBFUNC_INIT
61 asm volatile("int $0x80"::"a"(SC_SWITCH):"memory");
63 AROS_LIBFUNC_EXIT
66 AROS_LH0(void, KrnSchedule,
67 struct KernelBase *, KernelBase, 6, Kernel)
69 AROS_LIBFUNC_INIT
71 asm volatile("int $0x80"::"a"(SC_SCHEDULE):"memory");
73 AROS_LIBFUNC_EXIT
78 * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used
80 void core_Dispatch(regs_t *regs)
82 struct ExecBase *SysBase;
83 struct Task *task;
84 SysBase = *(struct ExecBase **)4UL;
86 __asm__ __volatile__("cli;");
88 /*
89 * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU.
90 * It should be extended by some plugin mechanism which would put CPU and whole machine
91 * into some more sophisticated sleep states (ACPI?)
93 while (IsListEmpty(&SysBase->TaskReady))
95 SysBase->IdleCount++;
96 SysBase->AttnResched |= ARF_AttnSwitch;
98 /* Sleep almost forever ;) */
99 __asm__ __volatile__("sti; hlt; cli");
101 if (SysBase->SysFlags & SFF_SoftInt)
103 core_Cause(SysBase);
107 SysBase->DispCount++;
109 /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */
110 task = (struct Task *)REMHEAD(&SysBase->TaskReady);
111 SysBase->ThisTask = task;
112 SysBase->Elapsed = SysBase->Quantum;
113 SysBase->SysFlags &= ~0x2000;
114 task->tc_State = TS_RUN;
115 SysBase->IDNestCnt = task->tc_IDNestCnt;
117 /* Handle tasks's flags */
118 if (task->tc_Flags & TF_EXCEPT)
119 Exception();
121 if (task->tc_Flags & TF_LAUNCH)
123 AROS_UFC1(void, task->tc_Launch,
124 AROS_UFCA(struct ExecBase *, SysBase, A6));
127 /* Restore the task's state */
128 bcopy(GetIntETask(task)->iet_Context, regs, sizeof(regs_t));
129 /* Copy the fpu, mmx, xmm state */
130 #warning FIXME: Change to the lazy saving of the XMM state!!!!
131 IPTR sse_ctx = ((IPTR)GetIntETask(task)->iet_Context + sizeof(regs_t) + 15) & ~15;
132 asm volatile("fxrstor (%0)"::"D"(sse_ctx));
134 /* Leave interrupt and jump to the new task */
135 core_LeaveInterrupt(regs);
138 void core_Switch(regs_t *regs)
140 struct ExecBase *SysBase;
141 struct Task *task;
143 /* Disable interrupts for a while */
144 __asm__ __volatile__("cli; cld;");
146 SysBase = *(struct ExecBase **)4UL;
147 task = SysBase->ThisTask;
149 /* Copy current task's context into the ETask structure */
150 bcopy(regs, GetIntETask(task)->iet_Context, sizeof(regs_t));
152 /* Copy the fpu, mmx, xmm state */
153 #warning FIXME: Change to the lazy saving of the XMM state!!!!
154 IPTR sse_ctx = ((IPTR)GetIntETask(task)->iet_Context + sizeof(regs_t) + 15) & ~15;
155 asm volatile("fxsave (%0)"::"D"(sse_ctx));
157 /* store IDNestCnt into tasks's structure */
158 task->tc_IDNestCnt = SysBase->IDNestCnt;
159 task->tc_SPReg = regs->return_rsp;
161 /* And enable interrupts */
162 SysBase->IDNestCnt = -1;
163 __asm__ __volatile__("sti;");
165 /* TF_SWITCH flag set? Call the switch routine */
166 if (task->tc_Flags & TF_SWITCH)
168 AROS_UFC1(void, task->tc_Switch,
169 AROS_UFCA(struct ExecBase *, SysBase, A6));
172 core_Dispatch(regs);
176 * Schedule the currently running task away. Put it into the TaskReady list
177 * in some smart way. This function is subject of change and it will be probably replaced
178 * by some plugin system in the future
180 void core_Schedule(regs_t *regs)
182 struct ExecBase *SysBase;
183 struct Task *task;
185 /* Disable interrupts for a while */
186 __asm__ __volatile__("cli");
188 SysBase = *(struct ExecBase **)4UL;
189 task = SysBase->ThisTask;
191 /* Clear the pending switch flag. */
192 SysBase->AttnResched &= ~ARF_AttnSwitch;
194 /* If task has pending exception, reschedule it so that the dispatcher may handle the exception */
195 if (!(task->tc_Flags & TF_EXCEPT))
197 /* Is the TaskReady empty? If yes, then the running task is the only one. Let it work */
198 if (IsListEmpty(&SysBase->TaskReady))
199 core_LeaveInterrupt(regs);
201 /* Does the TaskReady list contains tasks with priority equal or lower than current task?
202 * If so, then check further... */
203 if (((struct Task*)GetHead(&SysBase->TaskReady))->tc_Node.ln_Pri <= task->tc_Node.ln_Pri)
205 /* If the running task did not used it's whole quantum yet, let it work */
206 if (!(SysBase->SysFlags & 0x2000))
208 core_LeaveInterrupt(regs);
214 * If we got here, then the rescheduling is necessary.
215 * Put the task into the TaskReady list.
217 task->tc_State = TS_READY;
218 Enqueue(&SysBase->TaskReady, (struct Node *)task);
220 /* Select new task to run */
221 core_Switch(regs);
225 * Leave the interrupt. This function recieves the register frame used to leave the supervisor
226 * mode. It never returns and reschedules the task if it was asked for.
228 void core_ExitInterrupt(regs_t *regs)
230 struct ExecBase *SysBase;
232 /* Going back into supervisor mode? Then exit immediatelly */
233 if (regs->ds == KERNEL_DS)
235 core_LeaveInterrupt(regs);
237 else
239 /* Prepare to go back into user mode */
240 SysBase = *(struct ExecBase **)4UL;
242 /* Soft interrupt requested? It's high time to do it */
243 if (SysBase->SysFlags & SFF_SoftInt)
244 core_Cause(SysBase);
246 /* If task switching is disabled, leave immediatelly */
247 if (SysBase->TDNestCnt >= 0)
249 core_LeaveInterrupt(regs);
251 else
254 * Do not disturb task if it's not necessary.
255 * Reschedule only if switch pending flag is set. Exit otherwise.
257 if (SysBase->AttnResched & ARF_AttnSwitch)
259 core_Schedule(regs);
261 else
262 core_LeaveInterrupt(regs);