mb/hardkernel/odroid-h4: Correct number of jacks in hda_verb.c
[coreboot.git] / src / lib / thread.c
blob8b58ca0f3ea2069b9f2350f98be9252c41c44507
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <assert.h>
4 #include <bootstate.h>
5 #include <console/console.h>
6 #include <smp/node.h>
7 #include <thread.h>
8 #include <timer.h>
9 #include <types.h>
11 static u8 thread_stacks[CONFIG_STACK_SIZE * CONFIG_NUM_THREADS] __aligned(sizeof(uint64_t));
12 static bool initialized;
14 static void idle_thread_init(void);
16 /* There needs to be at least one thread to run the ramstate state machine. */
17 #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
19 /* Storage space for the thread structs .*/
20 static struct thread all_threads[TOTAL_NUM_THREADS];
22 /* All runnable (but not running) and free threads are kept on their
23 * respective lists. */
24 static struct thread *runnable_threads;
25 static struct thread *free_threads;
27 static struct thread *active_thread;
29 static inline int thread_can_yield(const struct thread *t)
31 return (t != NULL && t->can_yield > 0);
34 static inline void set_current_thread(struct thread *t)
36 assert(boot_cpu());
37 active_thread = t;
40 static inline struct thread *current_thread(void)
42 if (!initialized || !boot_cpu())
43 return NULL;
45 return active_thread;
48 static inline int thread_list_empty(struct thread **list)
50 return *list == NULL;
53 static inline struct thread *pop_thread(struct thread **list)
55 struct thread *t;
57 t = *list;
58 *list = t->next;
59 t->next = NULL;
60 return t;
63 static inline void push_thread(struct thread **list, struct thread *t)
65 t->next = *list;
66 *list = t;
69 static inline void push_runnable(struct thread *t)
71 push_thread(&runnable_threads, t);
74 static inline struct thread *pop_runnable(void)
76 return pop_thread(&runnable_threads);
79 static inline struct thread *get_free_thread(void)
81 struct thread *t;
83 if (thread_list_empty(&free_threads))
84 return NULL;
86 t = pop_thread(&free_threads);
88 /* Reset the current stack value to the original. */
89 if (!t->stack_orig)
90 die("%s: Invalid stack value\n", __func__);
92 t->stack_current = t->stack_orig;
94 return t;
97 static inline void free_thread(struct thread *t)
99 push_thread(&free_threads, t);
102 /* The idle thread is ran whenever there isn't anything else that is runnable.
103 * It's sole responsibility is to ensure progress is made by running the timer
104 * callbacks. */
105 __noreturn static enum cb_err idle_thread(void *unused)
107 /* This thread never voluntarily yields. */
108 thread_coop_disable();
109 while (1)
110 timers_run();
113 static void schedule(struct thread *t)
115 struct thread *current = current_thread();
117 /* If t is NULL need to find new runnable thread. */
118 if (t == NULL) {
119 if (thread_list_empty(&runnable_threads))
120 die("Runnable thread list is empty!\n");
121 t = pop_runnable();
122 } else {
123 /* current is still runnable. */
124 push_runnable(current);
127 if (t->handle)
128 t->handle->state = THREAD_STARTED;
130 set_current_thread(t);
132 switch_to_thread(t->stack_current, &current->stack_current);
135 static void terminate_thread(struct thread *t, enum cb_err error)
137 if (t->handle) {
138 t->handle->error = error;
139 t->handle->state = THREAD_DONE;
142 free_thread(t);
143 schedule(NULL);
146 static asmlinkage void call_wrapper(void *unused)
148 struct thread *current = current_thread();
149 enum cb_err error;
151 error = current->entry(current->entry_arg);
153 terminate_thread(current, error);
156 struct block_boot_state {
157 boot_state_t state;
158 boot_state_sequence_t seq;
161 /* Block the provided state until thread is complete. */
162 static asmlinkage void call_wrapper_block_state(void *arg)
164 struct block_boot_state *bbs = arg;
165 struct thread *current = current_thread();
166 enum cb_err error;
168 boot_state_block(bbs->state, bbs->seq);
169 error = current->entry(current->entry_arg);
170 boot_state_unblock(bbs->state, bbs->seq);
171 terminate_thread(current, error);
174 /* Prepare a thread so that it starts by executing thread_entry(thread_arg).
175 * Within thread_entry() it will call func(arg). */
176 static void prepare_thread(struct thread *t, struct thread_handle *handle,
177 enum cb_err (*func)(void *), void *arg,
178 asmlinkage void (*thread_entry)(void *), void *thread_arg)
180 /* Stash the function and argument to run. */
181 t->entry = func;
182 t->entry_arg = arg;
184 /* All new threads can yield by default. */
185 t->can_yield = 1;
187 /* Pointer used to publish the state of thread */
188 t->handle = handle;
190 arch_prepare_thread(t, thread_entry, thread_arg);
193 static void thread_resume_from_timeout(struct timeout_callback *tocb)
195 struct thread *to;
197 to = tocb->priv;
198 schedule(to);
201 static void idle_thread_init(void)
203 struct thread *t;
205 t = get_free_thread();
207 if (t == NULL)
208 die("No threads available for idle thread!\n");
210 /* Queue idle thread to run once all other threads have yielded. */
211 prepare_thread(t, NULL, idle_thread, NULL, call_wrapper, NULL);
212 push_runnable(t);
215 /* Don't inline this function so the timeout_callback won't have its storage
216 * space on the stack cleaned up before the call to schedule(). */
217 static int __attribute__((noinline))
218 thread_yield_timed_callback(struct timeout_callback *tocb,
219 unsigned int microsecs)
221 tocb->priv = current_thread();
222 tocb->callback = thread_resume_from_timeout;
224 if (timer_sched_callback(tocb, microsecs))
225 return -1;
227 /* The timer callback will wake up the current thread. */
228 schedule(NULL);
229 return 0;
232 static void *thread_alloc_space(struct thread *t, size_t bytes)
234 /* Allocate the amount of space on the stack keeping the stack
235 * aligned to the pointer size. */
236 t->stack_current -= ALIGN_UP(bytes, sizeof(uintptr_t));
238 return (void *)t->stack_current;
241 static void threads_initialize(void)
243 int i;
244 struct thread *t;
245 u8 *stack_top;
247 if (initialized)
248 return;
250 t = &all_threads[0];
252 set_current_thread(t);
254 t->stack_orig = 0; /* We never free the main thread */
255 t->id = 0;
256 t->can_yield = 1;
258 stack_top = &thread_stacks[CONFIG_STACK_SIZE];
259 for (i = 1; i < TOTAL_NUM_THREADS; i++) {
260 t = &all_threads[i];
261 t->stack_orig = (uintptr_t)stack_top;
262 t->id = i;
263 stack_top += CONFIG_STACK_SIZE;
264 free_thread(t);
267 idle_thread_init();
269 initialized = 1;
272 int thread_run(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg)
274 struct thread *current;
275 struct thread *t;
277 /* Lazy initialization */
278 threads_initialize();
280 current = current_thread();
282 if (!thread_can_yield(current)) {
283 printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__);
284 return -1;
287 t = get_free_thread();
289 if (t == NULL) {
290 printk(BIOS_ERR, "%s: No more threads!\n", __func__);
291 return -1;
294 prepare_thread(t, handle, func, arg, call_wrapper, NULL);
295 schedule(t);
297 return 0;
300 int thread_run_until(struct thread_handle *handle, enum cb_err (*func)(void *), void *arg,
301 boot_state_t state, boot_state_sequence_t seq)
303 struct thread *current;
304 struct thread *t;
305 struct block_boot_state *bbs;
307 /* This is a ramstage specific API */
308 if (!ENV_RAMSTAGE)
309 dead_code();
311 /* Lazy initialization */
312 threads_initialize();
314 current = current_thread();
316 if (!thread_can_yield(current)) {
317 printk(BIOS_ERR, "%s() called from non-yielding context!\n", __func__);
318 return -1;
321 t = get_free_thread();
323 if (t == NULL) {
324 printk(BIOS_ERR, "%s: No more threads!\n", __func__);
325 return -1;
328 bbs = thread_alloc_space(t, sizeof(*bbs));
329 bbs->state = state;
330 bbs->seq = seq;
331 prepare_thread(t, handle, func, arg, call_wrapper_block_state, bbs);
332 schedule(t);
334 return 0;
337 int thread_yield(void)
339 return thread_yield_microseconds(0);
342 int thread_yield_microseconds(unsigned int microsecs)
344 struct thread *current;
345 struct timeout_callback tocb;
347 current = current_thread();
349 if (!thread_can_yield(current))
350 return -1;
352 if (thread_yield_timed_callback(&tocb, microsecs))
353 return -1;
355 return 0;
358 void thread_coop_enable(void)
360 struct thread *current;
362 current = current_thread();
364 if (current == NULL)
365 return;
367 assert(current->can_yield <= 0);
369 current->can_yield++;
372 void thread_coop_disable(void)
374 struct thread *current;
376 current = current_thread();
378 if (current == NULL)
379 return;
381 current->can_yield--;
384 enum cb_err thread_join(struct thread_handle *handle)
386 struct stopwatch sw;
387 struct thread *current = current_thread();
389 assert(handle);
390 assert(current);
391 assert(current->handle != handle);
393 if (handle->state == THREAD_UNINITIALIZED)
394 return CB_ERR_ARG;
396 printk(BIOS_SPEW, "waiting for thread\n");
398 stopwatch_init(&sw);
400 while (handle->state != THREAD_DONE)
401 assert(thread_yield() == 0);
403 printk(BIOS_SPEW, "took %lld us\n", stopwatch_duration_usecs(&sw));
405 return handle->error;
408 void thread_mutex_lock(struct thread_mutex *mutex)
410 struct stopwatch sw;
412 stopwatch_init(&sw);
414 while (mutex->locked)
415 assert(thread_yield() == 0);
416 mutex->locked = true;
418 printk(BIOS_SPEW, "took %lld us to acquire mutex\n", stopwatch_duration_usecs(&sw));
421 void thread_mutex_unlock(struct thread_mutex *mutex)
423 assert(mutex->locked);
424 mutex->locked = 0;