1 /* Author: Domen Puncer <domen@cba.si>. License: WTFPL, see file LICENSE */
3 #include <timekeeping.h>
11 * In scheduling, priority inversion is the scenario where a low priority task holds a shared resource that is required by a high priority task. This causes the execution of the high priority task to be blocked until the low priority task has released the resource, effectively "inverting" the relative priorities of the two tasks. If some other medium priority task, one that does not depend on the shared resource, attempts to run in the interim, it will take precedence over both the low priority task and the high priority task.
13 * is this a real life problem, or just bad design problem?
16 /* TODO sched stats per task */
18 /* XXX should rethink all of this, write nicer code, write faster up() task
19 * wakeup and nice test cases for tasks and semaphores (also for the case:
20 * up() from irq that wakes the task that will replace &cba
24 #define sched_ctx_t struct lock
25 #define sched_lock lock
26 #define sched_unlock unlock
30 static u32 cba_stack
[2048];
32 static u32 cba_stack
[32]; /* on cortex-m3 16+2 actually seems enough */
35 u32 sched_time_started
;
40 struct list tasks_ready
[TASK_PRIORITIES
];
41 struct list tasks_waiting
[TASK_PRIORITIES
];
43 int task_new(struct task
*task
, void (*func
)(u32 arg
), u32 arg
, int prio
)
47 printf("%s: %s with prio:%i\n", __func__
, task
->name
, prio
);
48 for (i
=0; i
<task
->stack_len
; i
++)
49 task
->stack
[i
] = MAGIC_STACK_POISON
;
51 assert(prio
< TASK_PRIORITIES
);
53 arch_task_new(task
, func
, arg
);
58 list_add_tail(&tasks_ready
[prio
], &task
->list
);
65 return arch_ticks_now();
72 printf("ERROR do not use this code in production\n");
73 printf("ready tasks:\n");
74 for (i
=0; i
<TASK_PRIORITIES
; i
++) {
77 list_for_each(&tasks_ready
[i
], list
) {
78 struct task
*task
= list_entry(list
, struct task
, list
);
80 int len
= task
->stack_len
;
83 if (task
->stack
[s
] != MAGIC_STACK_POISON
)
85 printf("\t%s, stack:%i/%i\n", task
->name
, len
-s
, len
);
89 printf("waiting tasks:\n");
90 for (i
=0; i
<TASK_PRIORITIES
; i
++) {
93 list_for_each(&tasks_waiting
[i
], list
) {
94 struct task
*task
= list_entry(list
, struct task
, list
);
96 int len
= task
->stack_len
;
99 if (task
->stack
[s
] != MAGIC_STACK_POISON
)
101 printf("\t%s, stack:%i/%i, timeout in %i ticks\n", task
->name
, len
-s
, len
, task
->timeout
-ticks_now());
105 struct task
*task
= &cba
;
107 int len
= task
->stack_len
;
109 for (s
=0; s
<len
; s
++)
110 if (task
->stack
[s
] != MAGIC_STACK_POISON
)
112 printf("%s, stack:%i/%i\n", task
->name
, len
-s
, len
);
116 * For each task priority there's a list of running and waiting tasks.
119 /* list for running tasks, and list for tasks waiting for events? */
120 /* // see below; event system, that's bound to tasks (timer->task, sem->task)? */
121 /* every task has ->waiting list, then those task either have time_wake or ->sem set? */
124 /* OK, so some insight that i got on the bus ride home:
125 * cba task + 2 lists for every priority - ready and waiting on event
126 * first task to run is always at the start of running list. when a task
127 * (has been ran) is scheduled!, it's put at the end of that list (->prev->prev!)
128 * (something to protect task list operations will be needed).
129 * if a task calls sleep/down, then we put it on other list at same prio (how!?)
132 /* must be called under sched_lock */
133 static void task_make_waiting(struct task
*task
)
135 list_del(&task
->list
);
136 list_add(&tasks_waiting
[task
->prio
], &task
->list
);
139 void task_wake(struct task
*task
)
144 list_del(&task
->list
);
145 list_add(&tasks_ready
[task
->prio
], &task
->list
);
151 u32 next_event
; /* 0 is a special value meaning unused */
153 /* must be called under sched_lock */
154 /* some timeout happened, so wake the tasks, and set the next timeout */
155 static int sched_process_timeout(u32 now
)
161 for (i
=0; i
<TASK_PRIORITIES
; i
++) {
162 struct list
*list
, *list2
;
163 list_for_each_safe(&tasks_waiting
[i
], list
, list2
) {
164 struct task
*task
= list_entry(list
, struct task
, list
);
165 if (time_after(now
, task
->timeout
)) {
167 list_add(&tasks_ready
[i
], list
); /* add to start */
168 /* don't care about task->timeout anymore, since it's running */
172 if (next_event
== 0 || time_before(task
->timeout
, next_event
))
173 next_event
= task
->timeout
;
180 /* must be called under sched_lock */
182 * schedule next scheduler activation
183 * @runnable: 1 if any task is ready to run
184 * @now: current timestamp
186 static void sched_next_wake(int runnable
, u32 now
)
192 if (time_before(next_event
, now
+ ms2ticks(TIME_SLICE
)))
195 timeo
= now
+ ms2ticks(TIME_SLICE
);
197 timeo
= now
+ ms2ticks(TIME_SLICE
);
201 fprintf(stderr
, "%s: BUG, no events pending and no runnable tasks\n", __func__
);
205 arch_sched_next_interrupt(timeo
-now
);
208 /* must be called under sched_lock */
209 static void sched_run()
211 struct task
*task
= NULL
;
214 u32 now
= ticks_now();
218 // fprintf(stderr, "%s:%i\n", __func__, ticks_now() - sched_time_started);
220 /* first wake tasks */
221 /* FIXME, i don't like this. It isn't very clear */
223 /* event woke us, so wake the waiting tasks */
224 if (time_before(next_event
, now
))
225 sched_process_timeout(now
);
228 /* find a task to run */
229 for (i
=0; i
<TASK_PRIORITIES
; i
++) {
231 /* it's ok to use non-safe variant, since we stop traversing on hit */
232 list_for_each(&tasks_ready
[i
], list
) {
233 task
= list_entry(list
, struct task
, list
);
235 list_add_tail(&tasks_ready
[i
], list
);
236 goto picked_next_task
;
242 /* no task is in ready state, wait for next event */
246 // fprintf(stderr, "%s: switching to %s, runnable: %i\n", __func__, task->name, runnable);
248 sched_next_wake(runnable
, now
);
250 /* this would mean it's the only task at this priority */
251 /* this also means we could just disable preemption */
255 arch_task_switch(task
);
260 * @ticks: ticks to sleep for
261 * @return: 0 if we slept till the end, or the remaining number of ticks to
262 * sleep if we were woken by an event
264 u32
sched_timeout(u32 ticks
)
272 if (current
->dont_reschedule
) {
273 //fprintf(stderr, "%s: not rescheduling %s\n", __func__, current->name);
278 current
->timeout
= now
+ ticks
;
279 /* 0 is a special value, meaning there's no next event */
280 if (current
->timeout
== 0)
281 current
->timeout
= 1;
283 task_make_waiting(current
);
284 //fprintf(stderr, "%s: %s will wait\n", __func__, current->name);
286 /* ugh, but only if we're already waiting on timeout */
287 if (next_event
== 0 || time_before(current
->timeout
, next_event
)) {
288 next_event
= current
->timeout
;
296 /* we didn't expire -> some event happened! */
297 if (time_before(now
, current
->timeout
))
298 return current
->timeout
-now
;
302 /* voluntary give up cpu time to other tasks (of same priority) */
312 u32
msleep(u32 msecs
)
315 /* gah, this hack is ugly */
316 current
->dont_reschedule
= 0;
317 while (msecs
> 1000) {
319 ticks
= sched_timeout(ms2ticks(1000));
321 return ticks2ms(ticks
) + msecs
;
323 ticks
= sched_timeout(ms2ticks(msecs
));
324 return ticks2ms(ticks
);
327 void sched_interrupt()
329 // fprintf(stderr, "%s: now:%i\n", __func__, ticks_now());
338 /* never use this for anything but testing */
339 // XXX rename to wait_until, implement real mdelay */
340 void mdelay(int msecs
)
342 u32 now
= ticks_now();
343 u32 timeo
= now
+ ms2ticks(msecs
);
345 while (time_before(ticks_now(), timeo
))
353 for (i
=0; i
<TASK_PRIORITIES
; i
++) {
354 list_init(&tasks_ready
[i
]);
355 list_init(&tasks_waiting
[i
]);
359 static void sched_thread_cba(u32 arg
)
361 /* running on private stack now, final initializations, enable systick */
362 arch_sched_start_timer();
364 sched_time_started
= arch_ticks_now();
367 arch_wait_for_interrupt();
371 void __noreturn
sched_start()
373 cba
= (struct task
) {
376 .stack_len
= ALEN(cba_stack
),
379 task_new(&cba
, sched_thread_cba
, 0, 0);
381 arch_task_first(&cba
);
383 /* will never return; also, this stack space is now free */