2 * Copyright (c) 2005-2007, Kohsuke Ohtani
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * thread.c - thread management routines.
35 * Creating a thread and loading its register state are defined as
36 * separate routine. These two routines are used by fork(), exec(),
37 * and pthread_create() in the POSIX emulation library.
39 * thread_create() thread_load()
40 * --------------- -------------
43 * pthread_create() : O O
57 * An idle thread is the first thread in the system, and it will
58 * be set running when no other thread is active.
60 struct thread idle_thread
= IDLE_THREAD(idle_thread
);
62 /* Thread waiting to be killed */
63 static thread_t zombie_thread
;
66 * Allocate a new thread and attach a new kernel stack.
67 * Returns thread pointer on success, or NULL on failure.
69 static thread_t
thread_alloc(void)
74 if ((th
= kmem_alloc(sizeof(struct thread
))) == NULL
)
76 memset(th
, 0, sizeof(struct thread
));
78 if ((stack
= kmem_alloc(KSTACK_SIZE
)) == NULL
) {
83 th
->magic
= THREAD_MAGIC
;
84 list_init(&th
->mutexes
);
89 * Deallocate all thread data.
91 static void thread_free(thread_t th
)
94 kmem_free(th
->kstack
);
99 * Create a new thread within the specified task.
101 * The context of a current thread will be copied to the new thread.
102 * The new thread will start from the return address of thread_create()
103 * call in the user mode. Since a new thread will share the user mode
104 * stack with a current thread, user mode applications are responsible
105 * for allocating new user mode stack. The new thread is initially set
106 * to suspend state, and so, thread_resume() must be called to start it.
108 * The following scheduling parameters are reset to default values.
110 * - Scheduling Policy
111 * - Scheduling Priority
113 __syscall
int thread_create(task_t task
, thread_t
*pth
)
118 if (!task_valid(task
)) {
122 if (task
!= cur_task() && !task_capable(CAP_TASK
)) {
126 err
= __thread_create(task
, pth
);
131 int __thread_create(task_t task
, thread_t
*pth
)
135 if ((th
= thread_alloc()) == NULL
)
138 * We copy a new thread id as return value, first. This
139 * is done here to simplify all error recovery for the
142 if (cur_task() == &kern_task
) {
143 /* We are called inside kernel */
146 if (umem_copyout(&th
, pth
, sizeof(thread_t
))) {
152 * We can not return any error from here.
154 memcpy(th
->kstack
, cur_thread
->kstack
, KSTACK_SIZE
);
156 th
->sus_count
= task
->sus_count
+ 1;
157 context_init(&th
->ctx
, th
->kstack
+ KSTACK_SIZE
);
158 list_insert(&task
->threads
, &th
->task_link
);
164 * Terminate a thread.
166 * Release all resources of the specified thread. However, we can
167 * not release the context of the current thread because our
168 * thread switching always requires current context. So, the thread
169 * termination is deferred until next thread_terminate() called by
171 * If specified thread is current thread, this routine never returns.
173 __syscall
int thread_terminate(thread_t th
)
178 if (!thread_valid(th
)) {
182 if (th
->task
== &kern_task
||
183 (th
->task
!= cur_task() && !task_capable(CAP_TASK
))) {
187 err
= __thread_terminate(th
);
192 int __thread_terminate(thread_t th
)
194 /* Clear pending exception */
197 /* Clean up all resources */
202 list_remove(&th
->task_link
);
206 /* If previous pending thread exists, kill it now. */
207 if (zombie_thread
&& zombie_thread
!= cur_thread
) {
208 thread_free(zombie_thread
);
209 zombie_thread
= NULL
;
211 if (th
== cur_thread
) {
213 * The thread context can not be deallocated for
214 * current thread. So, wait for somebody to kill it.
224 * Load entry/stack address of user mode.
226 * The entry and stack address can be set to NULL. If it is
227 * NULL, old state is just kept.
229 __syscall
int thread_load(thread_t th
, void *entry
, void *stack
)
231 if (cur_task() != &kern_task
) {
232 if (((entry
&& !user_area(entry
)) ||
233 (stack
&& !user_area(stack
))))
238 if (!thread_valid(th
)) {
242 if (th
->task
!= cur_task() && !task_capable(CAP_TASK
)) {
247 context_set(&th
->ctx
, USER_ENTRY
, (u_long
)entry
);
249 context_set(&th
->ctx
, USER_STACK
, (u_long
)stack
);
256 * Return id of a current thread.
258 __syscall thread_t
thread_self(void)
264 * Release current thread for other thread.
266 __syscall
void thread_yield(void)
274 * A thread can be suspended any number of times. And, it does not
275 * start to run again unless the thread is resumed by the same count
276 * of suspend request.
278 __syscall
int thread_suspend(thread_t th
)
282 if (!thread_valid(th
)) {
286 if (th
->task
!= cur_task() && !task_capable(CAP_TASK
)) {
290 if (++th
->sus_count
== 1)
300 * A thread does not begin to run, unless both a thread suspend
301 * count and a task suspend count are set to 0.
303 __syscall
int thread_resume(thread_t th
)
305 ASSERT(th
!= cur_thread
);
309 if (!thread_valid(th
)) {
313 if (th
->task
!= cur_task() && !task_capable(CAP_TASK
)) {
317 if (th
->sus_count
== 0) {
322 if (th
->sus_count
== 0 && th
->task
->sus_count
== 0)
330 * Get/set scheduling parameter.
334 * @param: pointer to parameter
336 __syscall
int thread_schedparam(thread_t th
, int op
, int *param
)
338 int prio
, policy
, err
= 0;
342 if (!thread_valid(th
)) {
346 if (th
->task
!= cur_task() && !task_capable(CAP_NICE
)) {
352 prio
= sched_getprio(th
);
353 err
= umem_copyout(&prio
, param
, sizeof(int));
356 if ((err
= umem_copyin(param
, &prio
, sizeof(int))))
360 else if (prio
>= PRIO_IDLE
)
361 prio
= PRIO_IDLE
- 1;
363 * If a current priority is inherited for mutex, we can
364 * not change the priority to lower value. In this case,
365 * only the base priority is changed, and a current
366 * priority will be adjusted to correct value, later.
368 if (th
->prio
!= th
->base_prio
&& prio
> th
->prio
)
371 mutex_setprio(th
, prio
);
372 sched_setprio(th
, prio
, prio
);
375 policy
= sched_getpolicy(th
);
376 err
= umem_copyout(&policy
, param
, sizeof(int));
379 if ((err
= umem_copyin(param
, &policy
, sizeof(int))))
381 if (sched_setpolicy(th
, policy
))
395 * This routine is called only once after kernel initialization
396 * is completed. An idle thread runs when no other thread is active.
397 * It has the role of cutting down the power consumption of a system.
398 * An idle thread has FIFO scheduling policy because it does not
401 void thread_idle(void)
403 ASSERT(cur_thread
->lock_count
== 1);
405 /* Unlock scheduler to start scheduling */
416 * Create kernel thread.
418 * Kernel thread does not have user mode context, and its scheduling
419 * policy is set to SCHED_FIFO. Currently, the kernel thread is used
420 * for interrupt threads, timer thread, and an idle thread.
421 * kernel_thread() returns thread ID on success, or NULL on failure.
422 * The scheduler must be locked before calling this routine.
424 * Important: Since sched_switch() will disable interrupts in CPU, the
425 * interrupt is disabled when the kernel thread is started first time.
426 * So, the kernel thread must enable the interrupt by itself when it
429 thread_t
kernel_thread(void (*entry
)(u_long
), u_long arg
)
435 if ((th
= thread_alloc()) == NULL
) {
439 memset(th
->kstack
, 0, KSTACK_SIZE
);
441 context_init(&th
->ctx
, th
->kstack
+ KSTACK_SIZE
);
442 context_set(&th
->ctx
, KERN_ENTRY
, (u_long
)entry
);
443 context_set(&th
->ctx
, KERN_ARG
, arg
);
444 th
->task
= &kern_task
;
445 th
->policy
= SCHED_FIFO
;
446 list_insert(&kern_task
.threads
, &th
->task_link
);
454 * Return thread information.
456 int thread_info(struct info_thread
*info
)
458 u_long index
, target
= info
->cookie
;
468 task
= list_entry(i
, struct task
, link
);
472 th
= list_entry(j
, struct thread
, task_link
);
473 if (index
++ == target
)
476 } while (j
!= &task
->threads
);
478 } while (i
!= &kern_task
.link
);
483 info
->state
= th
->state
;
484 info
->policy
= th
->policy
;
485 info
->prio
= th
->prio
;
486 info
->base_prio
= th
->base_prio
;
487 info
->sus_count
= th
->sus_count
;
488 info
->total_ticks
= th
->total_ticks
;
489 info
->task
= th
->task
;
490 strlcpy(info
->task_name
, task
->name
, MAX_TASKNAME
);
496 #if defined(DEBUG) && defined(CONFIG_KDUMP)
497 void thread_dump(void)
502 char state
[][4] = { "RUN", "SLP", "SUS", "S&S", "EXT" };
503 char pol
[][5] = { "FIFO", "RR " };
505 printk("Thread dump:\n");
506 printk(" mod thread task stat pol prio base ticks total susp sleep event\n");
507 printk(" --- -------- -------- ---- ---- ---- ---- ----- -------- ---- ------------\n");
511 task
= list_entry(i
, struct task
, link
);
515 th
= list_entry(j
, struct thread
, task_link
);
516 printk(" %s %08x %08x %s%c %s %3d %3d %3d %8d %4d %s\n",
517 (task
== &kern_task
) ? "Knl" : "Usr", th
,
518 task
, state
[th
->state
],
519 (th
== cur_thread
) ? '*' : ' ',
520 pol
[th
->policy
], th
->prio
, th
->base_prio
,
521 th
->ticks_left
, th
->total_ticks
, th
->sus_count
,
522 th
->sleep_event
? th
->sleep_event
->name
: "-");
524 } while (j
!= &task
->threads
);
526 } while (i
!= &kern_task
.link
);
531 * The first thread in system is created here, and this thread
532 * becomes an idle thread when thread_idle() is called later.
533 * The scheduler is locked until thread_idle() is called, in order
534 * to prevent thread switch during kernel initialization.
536 void thread_init(void)
541 * Initialize idle thread
543 if ((stack
= kmem_alloc(KSTACK_SIZE
)) == NULL
)
544 panic("Failed to allocate idle stack");
545 memset(stack
, 0, KSTACK_SIZE
);
546 idle_thread
.kstack
= stack
;
547 context_init(&idle_thread
.ctx
, stack
+ KSTACK_SIZE
);
548 list_insert(&kern_task
.threads
, &idle_thread
.task_link
);