Semi-decennial update. 50% code inflation.
[cbaos.git] / kernel / sched.c
blobf0ce472c3923b06b6237e029a11ef125505f5fac
1 /* Author: Domen Puncer <domen@cba.si>. License: WTFPL, see file LICENSE */
2 #include <sched.h>
3 #include <timekeeping.h>
4 #include <lock.h>
5 #include <compiler.h>
7 #include <stdio.h>
8 #include <assert.h>
11 * In scheduling, priority inversion is the scenario where a low priority task holds a shared resource that is required by a high priority task. This causes the execution of the high priority task to be blocked until the low priority task has released the resource, effectively "inverting" the relative priorities of the two tasks. If some other medium priority task, one that does not depend on the shared resource, attempts to run in the interim, it will take precedence over both the low priority task and the high priority task.
13 * is this a real life problem, or just bad design problem?
16 /* TODO sched stats per task */
18 /* XXX should rethink all of this, write nicer code, write faster up() task
19 * wakeup and nice test cases for tasks and semaphores (also for the case:
20 * up() from irq that wakes the task that will replace &cba
24 #define sched_ctx_t struct lock
25 #define sched_lock lock
26 #define sched_unlock unlock
28 struct task cba;
29 #ifdef ARCH_UNIX
30 static u32 cba_stack[2048];
31 #else
32 static u32 cba_stack[32]; /* on cortex-m3 16+2 actually seems enough */
33 #endif
35 u32 sched_time_started;
38 struct task *current;
40 struct list tasks_ready[TASK_PRIORITIES];
41 struct list tasks_waiting[TASK_PRIORITIES];
43 int task_new(struct task *task, void (*func)(u32 arg), u32 arg, int prio)
45 int i;
47 printf("%s: %s with prio:%i\n", __func__, task->name, prio);
48 for (i=0; i<task->stack_len; i++)
49 task->stack[i] = MAGIC_STACK_POISON;
51 assert(prio < TASK_PRIORITIES);
53 arch_task_new(task, func, arg);
55 if (task == &cba)
56 return 0;
58 list_add_tail(&tasks_ready[prio], &task->list);
60 return 0;
63 u32 ticks_now()
65 return arch_ticks_now();
68 void task_printall()
70 int i;
72 printf("ERROR do not use this code in production\n");
73 printf("ready tasks:\n");
74 for (i=0; i<TASK_PRIORITIES; i++) {
75 struct list *list;
77 list_for_each(&tasks_ready[i], list) {
78 struct task *task = list_entry(list, struct task, list);
79 int s;
80 int len = task->stack_len;
82 for (s=0; s<len; s++)
83 if (task->stack[s] != MAGIC_STACK_POISON)
84 break;
85 printf("\t%s, stack:%i/%i\n", task->name, len-s, len);
89 printf("waiting tasks:\n");
90 for (i=0; i<TASK_PRIORITIES; i++) {
91 struct list *list;
93 list_for_each(&tasks_waiting[i], list) {
94 struct task *task = list_entry(list, struct task, list);
95 int s;
96 int len = task->stack_len;
98 for (s=0; s<len; s++)
99 if (task->stack[s] != MAGIC_STACK_POISON)
100 break;
101 printf("\t%s, stack:%i/%i, timeout in %i ticks\n", task->name, len-s, len, task->timeout-ticks_now());
105 struct task *task = &cba;
106 int s;
107 int len = task->stack_len;
109 for (s=0; s<len; s++)
110 if (task->stack[s] != MAGIC_STACK_POISON)
111 break;
112 printf("%s, stack:%i/%i\n", task->name, len-s, len);
115 /* scheduler idea:
116 * For each task priority there's a list of running and waiting tasks.
119 /* list for running tasks, and list for tasks waiting for events? */
120 /* // see below; event system, that's bound to tasks (timer->task, sem->task)? */
121 /* every task has ->waiting list, then those task either have time_wake or ->sem set? */
122 // XXX was here
124 /* OK, so some insight that i got on the bus ride home:
125 * cba task + 2 lists for every priority - ready and waiting on event
126 * first task to run is always at the start of running list. when a task
127 * (has been ran) is scheduled!, it's put at the end of that list (->prev->prev!)
128 * (something to protect task list operations will be needed).
129 * if a task calls sleep/down, then we put it on other list at same prio (how!?)
130 * ..*/
132 /* must be called under sched_lock */
133 static void task_make_waiting(struct task *task)
135 list_del(&task->list);
136 list_add(&tasks_waiting[task->prio], &task->list);
139 void task_wake(struct task *task)
141 sched_ctx_t ctx;
143 sched_lock(&ctx);
144 list_del(&task->list);
145 list_add(&tasks_ready[task->prio], &task->list);
146 sched_unlock(&ctx);
151 u32 next_event; /* 0 is a special value meaning unused */
153 /* must be called under sched_lock */
154 /* some timeout happened, so wake the tasks, and set the next timeout */
155 static int sched_process_timeout(u32 now)
157 int i;
158 int woken = 0;
160 next_event = 0;
161 for (i=0; i<TASK_PRIORITIES; i++) {
162 struct list *list, *list2;
163 list_for_each_safe(&tasks_waiting[i], list, list2) {
164 struct task *task = list_entry(list, struct task, list);
165 if (time_after(now, task->timeout)) {
166 list_del(list);
167 list_add(&tasks_ready[i], list); /* add to start */
168 /* don't care about task->timeout anymore, since it's running */
169 woken++;
170 continue;
172 if (next_event == 0 || time_before(task->timeout, next_event))
173 next_event = task->timeout;
176 return woken;
180 /* must be called under sched_lock */
182 * schedule next scheduler activation
183 * @runnable: 1 if any task is ready to run
184 * @now: current timestamp
186 static void sched_next_wake(int runnable, u32 now)
188 u32 timeo;
190 if (runnable) {
191 if (next_event) {
192 if (time_before(next_event, now + ms2ticks(TIME_SLICE)))
193 timeo = next_event;
194 else
195 timeo = now + ms2ticks(TIME_SLICE);
196 } else
197 timeo = now + ms2ticks(TIME_SLICE);
199 } else {
200 if (!next_event)
201 fprintf(stderr, "%s: BUG, no events pending and no runnable tasks\n", __func__);
202 timeo = next_event;
205 arch_sched_next_interrupt(timeo-now);
208 /* must be called under sched_lock */
209 static void sched_run()
211 struct task *task = NULL;
212 int i;
213 int runnable = 1;
214 u32 now = ticks_now();
216 timekeeping(now);
218 // fprintf(stderr, "%s:%i\n", __func__, ticks_now() - sched_time_started);
220 /* first wake tasks */
221 /* FIXME, i don't like this. It isn't very clear */
222 if (next_event) {
223 /* event woke us, so wake the waiting tasks */
224 if (time_before(next_event, now))
225 sched_process_timeout(now);
228 /* find a task to run */
229 for (i=0; i<TASK_PRIORITIES; i++) {
230 struct list *list;
231 /* it's ok to use non-safe variant, since we stop traversing on hit */
232 list_for_each(&tasks_ready[i], list) {
233 task = list_entry(list, struct task, list);
234 list_del(list);
235 list_add_tail(&tasks_ready[i], list);
236 goto picked_next_task;
240 task = &cba;
242 /* no task is in ready state, wait for next event */
243 runnable = 0;
245 picked_next_task:
246 // fprintf(stderr, "%s: switching to %s, runnable: %i\n", __func__, task->name, runnable);
248 sched_next_wake(runnable, now);
250 /* this would mean it's the only task at this priority */
251 /* this also means we could just disable preemption */
252 if (current == task)
253 return;
255 arch_task_switch(task);
259 * sched_timeout
260 * @ticks: ticks to sleep for
261 * @return: 0 if we slept till the end, or the remaining number of ticks to
262 * sleep if we were woken by an event
264 u32 sched_timeout(u32 ticks)
266 u32 now;
267 sched_ctx_t ctx;
269 sched_lock(&ctx);
270 now = ticks_now();
272 if (current->dont_reschedule) {
273 //fprintf(stderr, "%s: not rescheduling %s\n", __func__, current->name);
274 sched_unlock(&ctx);
275 return ticks;
278 current->timeout = now + ticks;
279 /* 0 is a special value, meaning there's no next event */
280 if (current->timeout == 0)
281 current->timeout = 1;
283 task_make_waiting(current);
284 //fprintf(stderr, "%s: %s will wait\n", __func__, current->name);
286 /* ugh, but only if we're already waiting on timeout */
287 if (next_event == 0 || time_before(current->timeout, next_event)) {
288 next_event = current->timeout;
291 sched_run();
293 sched_unlock(&ctx);
295 now = ticks_now();
296 /* we didn't expire -> some event happened! */
297 if (time_before(now, current->timeout))
298 return current->timeout-now;
299 return 0;
302 /* voluntary give up cpu time to other tasks (of same priority) */
303 void sched_yield()
305 sched_ctx_t ctx;
307 sched_lock(&ctx);
308 sched_run();
309 sched_unlock(&ctx);
312 u32 msleep(u32 msecs)
314 u32 ticks;
315 /* gah, this hack is ugly */
316 current->dont_reschedule = 0;
317 while (msecs > 1000) {
318 msecs -= 1000;
319 ticks = sched_timeout(ms2ticks(1000));
320 if (ticks)
321 return ticks2ms(ticks) + msecs;
323 ticks = sched_timeout(ms2ticks(msecs));
324 return ticks2ms(ticks);
327 void sched_interrupt()
329 // fprintf(stderr, "%s: now:%i\n", __func__, ticks_now());
330 sched_ctx_t ctx;
331 sched_lock(&ctx);
333 sched_run();
335 sched_unlock(&ctx);
338 /* never use this for anything but testing */
339 // XXX rename to wait_until, implement real mdelay */
340 void mdelay(int msecs)
342 u32 now = ticks_now();
343 u32 timeo = now + ms2ticks(msecs);
345 while (time_before(ticks_now(), timeo))
349 void sched_init()
351 int i;
353 for (i=0; i<TASK_PRIORITIES; i++) {
354 list_init(&tasks_ready[i]);
355 list_init(&tasks_waiting[i]);
359 static void sched_thread_cba(u32 arg)
361 /* running on private stack now, final initializations, enable systick */
362 arch_sched_start_timer();
364 sched_time_started = arch_ticks_now();
366 while (1) {
367 arch_wait_for_interrupt();
371 void __noreturn sched_start()
373 cba = (struct task) {
374 .name = "cba",
375 .stack = cba_stack,
376 .stack_len = ALEN(cba_stack),
379 task_new(&cba, sched_thread_cba, 0, 0);
381 arch_task_first(&cba);
383 /* will never return; also, this stack space is now free */
384 for (;;);