1 /* SPDX-License-Identifier: GPL-2.0-only */
5 #include <console/console.h>
11 static u8 thread_stacks
[CONFIG_STACK_SIZE
* CONFIG_NUM_THREADS
] __aligned(sizeof(uint64_t));
12 static bool initialized
;
14 static void idle_thread_init(void);
16 /* There needs to be at least one thread to run the ramstate state machine. */
17 #define TOTAL_NUM_THREADS (CONFIG_NUM_THREADS + 1)
19 /* Storage space for the thread structs .*/
20 static struct thread all_threads
[TOTAL_NUM_THREADS
];
22 /* All runnable (but not running) and free threads are kept on their
23 * respective lists. */
24 static struct thread
*runnable_threads
;
25 static struct thread
*free_threads
;
27 static struct thread
*active_thread
;
29 static inline int thread_can_yield(const struct thread
*t
)
31 return (t
!= NULL
&& t
->can_yield
> 0);
34 static inline void set_current_thread(struct thread
*t
)
40 static inline struct thread
*current_thread(void)
42 if (!initialized
|| !boot_cpu())
48 static inline int thread_list_empty(struct thread
**list
)
53 static inline struct thread
*pop_thread(struct thread
**list
)
63 static inline void push_thread(struct thread
**list
, struct thread
*t
)
69 static inline void push_runnable(struct thread
*t
)
71 push_thread(&runnable_threads
, t
);
74 static inline struct thread
*pop_runnable(void)
76 return pop_thread(&runnable_threads
);
79 static inline struct thread
*get_free_thread(void)
83 if (thread_list_empty(&free_threads
))
86 t
= pop_thread(&free_threads
);
88 /* Reset the current stack value to the original. */
90 die("%s: Invalid stack value\n", __func__
);
92 t
->stack_current
= t
->stack_orig
;
97 static inline void free_thread(struct thread
*t
)
99 push_thread(&free_threads
, t
);
102 /* The idle thread is ran whenever there isn't anything else that is runnable.
103 * It's sole responsibility is to ensure progress is made by running the timer
105 __noreturn
static enum cb_err
idle_thread(void *unused
)
107 /* This thread never voluntarily yields. */
108 thread_coop_disable();
113 static void schedule(struct thread
*t
)
115 struct thread
*current
= current_thread();
117 /* If t is NULL need to find new runnable thread. */
119 if (thread_list_empty(&runnable_threads
))
120 die("Runnable thread list is empty!\n");
123 /* current is still runnable. */
124 push_runnable(current
);
128 t
->handle
->state
= THREAD_STARTED
;
130 set_current_thread(t
);
132 switch_to_thread(t
->stack_current
, ¤t
->stack_current
);
135 static void terminate_thread(struct thread
*t
, enum cb_err error
)
138 t
->handle
->error
= error
;
139 t
->handle
->state
= THREAD_DONE
;
146 static asmlinkage
void call_wrapper(void *unused
)
148 struct thread
*current
= current_thread();
151 error
= current
->entry(current
->entry_arg
);
153 terminate_thread(current
, error
);
156 struct block_boot_state
{
158 boot_state_sequence_t seq
;
161 /* Block the provided state until thread is complete. */
162 static asmlinkage
void call_wrapper_block_state(void *arg
)
164 struct block_boot_state
*bbs
= arg
;
165 struct thread
*current
= current_thread();
168 boot_state_block(bbs
->state
, bbs
->seq
);
169 error
= current
->entry(current
->entry_arg
);
170 boot_state_unblock(bbs
->state
, bbs
->seq
);
171 terminate_thread(current
, error
);
174 /* Prepare a thread so that it starts by executing thread_entry(thread_arg).
175 * Within thread_entry() it will call func(arg). */
176 static void prepare_thread(struct thread
*t
, struct thread_handle
*handle
,
177 enum cb_err (*func
)(void *), void *arg
,
178 asmlinkage
void (*thread_entry
)(void *), void *thread_arg
)
180 /* Stash the function and argument to run. */
184 /* All new threads can yield by default. */
187 /* Pointer used to publish the state of thread */
190 arch_prepare_thread(t
, thread_entry
, thread_arg
);
193 static void thread_resume_from_timeout(struct timeout_callback
*tocb
)
201 static void idle_thread_init(void)
205 t
= get_free_thread();
208 die("No threads available for idle thread!\n");
210 /* Queue idle thread to run once all other threads have yielded. */
211 prepare_thread(t
, NULL
, idle_thread
, NULL
, call_wrapper
, NULL
);
215 /* Don't inline this function so the timeout_callback won't have its storage
216 * space on the stack cleaned up before the call to schedule(). */
217 static int __attribute__((noinline
))
218 thread_yield_timed_callback(struct timeout_callback
*tocb
,
219 unsigned int microsecs
)
221 tocb
->priv
= current_thread();
222 tocb
->callback
= thread_resume_from_timeout
;
224 if (timer_sched_callback(tocb
, microsecs
))
227 /* The timer callback will wake up the current thread. */
232 static void *thread_alloc_space(struct thread
*t
, size_t bytes
)
234 /* Allocate the amount of space on the stack keeping the stack
235 * aligned to the pointer size. */
236 t
->stack_current
-= ALIGN_UP(bytes
, sizeof(uintptr_t));
238 return (void *)t
->stack_current
;
241 static void threads_initialize(void)
252 set_current_thread(t
);
254 t
->stack_orig
= (uintptr_t)NULL
; /* We never free the main thread */
258 stack_top
= &thread_stacks
[CONFIG_STACK_SIZE
];
259 for (i
= 1; i
< TOTAL_NUM_THREADS
; i
++) {
261 t
->stack_orig
= (uintptr_t)stack_top
;
263 stack_top
+= CONFIG_STACK_SIZE
;
272 int thread_run(struct thread_handle
*handle
, enum cb_err (*func
)(void *), void *arg
)
274 struct thread
*current
;
277 /* Lazy initialization */
278 threads_initialize();
280 current
= current_thread();
282 if (!thread_can_yield(current
)) {
283 printk(BIOS_ERR
, "%s() called from non-yielding context!\n", __func__
);
287 t
= get_free_thread();
290 printk(BIOS_ERR
, "%s: No more threads!\n", __func__
);
294 prepare_thread(t
, handle
, func
, arg
, call_wrapper
, NULL
);
300 int thread_run_until(struct thread_handle
*handle
, enum cb_err (*func
)(void *), void *arg
,
301 boot_state_t state
, boot_state_sequence_t seq
)
303 struct thread
*current
;
305 struct block_boot_state
*bbs
;
307 /* This is a ramstage specific API */
311 /* Lazy initialization */
312 threads_initialize();
314 current
= current_thread();
316 if (!thread_can_yield(current
)) {
317 printk(BIOS_ERR
, "%s() called from non-yielding context!\n", __func__
);
321 t
= get_free_thread();
324 printk(BIOS_ERR
, "%s: No more threads!\n", __func__
);
328 bbs
= thread_alloc_space(t
, sizeof(*bbs
));
331 prepare_thread(t
, handle
, func
, arg
, call_wrapper_block_state
, bbs
);
337 int thread_yield(void)
339 return thread_yield_microseconds(0);
342 int thread_yield_microseconds(unsigned int microsecs
)
344 struct thread
*current
;
345 struct timeout_callback tocb
;
347 current
= current_thread();
349 if (!thread_can_yield(current
))
352 if (thread_yield_timed_callback(&tocb
, microsecs
))
358 void thread_coop_enable(void)
360 struct thread
*current
;
362 current
= current_thread();
367 assert(current
->can_yield
<= 0);
369 current
->can_yield
++;
372 void thread_coop_disable(void)
374 struct thread
*current
;
376 current
= current_thread();
381 current
->can_yield
--;
384 enum cb_err
thread_join(struct thread_handle
*handle
)
387 struct thread
*current
= current_thread();
391 assert(current
->handle
!= handle
);
393 if (handle
->state
== THREAD_UNINITIALIZED
)
396 printk(BIOS_SPEW
, "waiting for thread\n");
400 while (handle
->state
!= THREAD_DONE
)
401 assert(thread_yield() == 0);
403 printk(BIOS_SPEW
, "took %lld us\n", stopwatch_duration_usecs(&sw
));
405 return handle
->error
;
408 void thread_mutex_lock(struct thread_mutex
*mutex
)
414 while (mutex
->locked
)
415 assert(thread_yield() == 0);
416 mutex
->locked
= true;
418 printk(BIOS_SPEW
, "took %lld us to acquire mutex\n", stopwatch_duration_usecs(&sw
));
421 void thread_mutex_unlock(struct thread_mutex
*mutex
)
423 assert(mutex
->locked
);