prex: const correctness for device_open()
[prex.git] / sys / kern / thread.c
blob75f5f704767da557c206ad2cbb2d893e647a6999
1 /*-
2 * Copyright (c) 2005-2007, Kohsuke Ohtani
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
31 * thread.c - thread management routines.
34 /*-
35 * Creating a thread and loading its register state are defined as
36 * separate routine. These two routines are used by fork(), exec(),
37 * and pthread_create() in the POSIX emulation library.
39 * thread_create() thread_load()
40 * --------------- -------------
41 * fork() : O X
42 * exec() : X O
43 * pthread_create() : O O
46 #include <kernel.h>
47 #include <list.h>
48 #include <kmem.h>
49 #include <task.h>
50 #include <thread.h>
51 #include <ipc.h>
52 #include <sched.h>
53 #include <sync.h>
54 #include <system.h>
57 * An idle thread is the first thread in the system, and it will
58 * be set running when no other thread is active.
60 struct thread idle_thread = IDLE_THREAD(idle_thread);
62 /* Thread waiting to be killed */
63 static thread_t zombie_thread;
66 * Allocate a new thread and attach a new kernel stack.
67 * Returns thread pointer on success, or NULL on failure.
69 static thread_t thread_alloc(void)
71 thread_t th;
72 void *stack;
74 if ((th = kmem_alloc(sizeof(struct thread))) == NULL)
75 return NULL;
76 memset(th, 0, sizeof(struct thread));
78 if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL) {
79 kmem_free(th);
80 return NULL;
82 th->kstack = stack;
83 th->magic = THREAD_MAGIC;
84 list_init(&th->mutexes);
85 return th;
89 * Deallocate all thread data.
91 static void thread_free(thread_t th)
93 th->magic = 0;
94 kmem_free(th->kstack);
95 kmem_free(th);
99 * Create a new thread within the specified task.
101 * The context of a current thread will be copied to the new thread.
102 * The new thread will start from the return address of thread_create()
103 * call in the user mode. Since a new thread will share the user mode
104 * stack with a current thread, user mode applications are responsible
105 * for allocating new user mode stack. The new thread is initially set
106 * to suspend state, and so, thread_resume() must be called to start it.
108 * The following scheduling parameters are reset to default values.
109 * - Thread State
110 * - Scheduling Policy
111 * - Scheduling Priority
113 __syscall int thread_create(task_t task, thread_t *pth)
115 int err;
117 sched_lock();
118 if (!task_valid(task)) {
119 sched_unlock();
120 return ESRCH;
122 if (task != cur_task() && !task_capable(CAP_TASK)) {
123 sched_unlock();
124 return EPERM;
126 err = __thread_create(task, pth);
127 sched_unlock();
128 return err;
131 int __thread_create(task_t task, thread_t *pth)
133 thread_t th;
135 if ((th = thread_alloc()) == NULL)
136 return ENOMEM;
138 * We copy a new thread id as return value, first. This
139 * is done here to simplify all error recovery for the
140 * subsequent code.
142 if (cur_task() == &kern_task) {
143 /* We are called inside kernel */
144 *pth = th;
145 } else {
146 if (umem_copyout(&th, pth, sizeof(thread_t))) {
147 thread_free(th);
148 return EFAULT;
152 * We can not return any error from here.
154 memcpy(th->kstack, cur_thread->kstack, KSTACK_SIZE);
155 th->task = task;
156 th->sus_count = task->sus_count + 1;
157 context_init(&th->ctx, th->kstack + KSTACK_SIZE);
158 list_insert(&task->threads, &th->task_link);
159 sched_start(th);
160 return 0;
164 * Terminate a thread.
166 * Release all resources of the specified thread. However, we can
167 * not release the context of the current thread because our
168 * thread switching always requires current context. So, the thread
169 * termination is deferred until next thread_terminate() called by
170 * another thread.
171 * If specified thread is current thread, this routine never returns.
173 __syscall int thread_terminate(thread_t th)
175 int err;
177 sched_lock();
178 if (!thread_valid(th)) {
179 sched_unlock();
180 return ESRCH;
182 if (th->task == &kern_task ||
183 (th->task != cur_task() && !task_capable(CAP_TASK))) {
184 sched_unlock();
185 return EPERM;
187 err = __thread_terminate(th);
188 sched_unlock();
189 return err;
192 int __thread_terminate(thread_t th)
194 /* Clear pending exception */
195 th->exc_bitmap = 0;
197 /* Clean up all resources */
198 msg_cleanup(th);
199 timer_cleanup(th);
200 mutex_cleanup(th);
202 list_remove(&th->task_link);
203 sched_stop(th);
204 th->magic = 0;
206 /* If previous pending thread exists, kill it now. */
207 if (zombie_thread && zombie_thread != cur_thread) {
208 thread_free(zombie_thread);
209 zombie_thread = NULL;
211 if (th == cur_thread) {
213 * The thread context can not be deallocated for
214 * current thread. So, wait for somebody to kill it.
216 zombie_thread = th;
217 } else {
218 thread_free(th);
220 return 0;
224 * Load entry/stack address of user mode.
226 * The entry and stack address can be set to NULL. If it is
227 * NULL, old state is just kept.
229 __syscall int thread_load(thread_t th, void *entry, void *stack)
231 if (cur_task() != &kern_task) {
232 if (((entry && !user_area(entry)) ||
233 (stack && !user_area(stack))))
234 return EINVAL;
236 sched_lock();
238 if (!thread_valid(th)) {
239 sched_unlock();
240 return ESRCH;
242 if (th->task != cur_task() && !task_capable(CAP_TASK)) {
243 sched_unlock();
244 return EPERM;
246 if (entry != NULL)
247 context_set(&th->ctx, USER_ENTRY, (u_long)entry);
248 if (stack != NULL)
249 context_set(&th->ctx, USER_STACK, (u_long)stack);
251 sched_unlock();
252 return 0;
256 * Return id of a current thread.
258 __syscall thread_t thread_self(void)
260 return cur_thread;
264 * Release current thread for other thread.
266 __syscall void thread_yield(void)
268 sched_yield();
272 * Suspend thread.
274 * A thread can be suspended any number of times. And, it does not
275 * start to run again unless the thread is resumed by the same count
276 * of suspend request.
278 __syscall int thread_suspend(thread_t th)
280 sched_lock();
282 if (!thread_valid(th)) {
283 sched_unlock();
284 return ESRCH;
286 if (th->task != cur_task() && !task_capable(CAP_TASK)) {
287 sched_unlock();
288 return EPERM;
290 if (++th->sus_count == 1)
291 sched_suspend(th);
293 sched_unlock();
294 return 0;
298 * Resume thread.
300 * A thread does not begin to run, unless both a thread suspend
301 * count and a task suspend count are set to 0.
303 __syscall int thread_resume(thread_t th)
305 ASSERT(th != cur_thread);
307 sched_lock();
309 if (!thread_valid(th)) {
310 sched_unlock();
311 return ESRCH;
313 if (th->task != cur_task() && !task_capable(CAP_TASK)) {
314 sched_unlock();
315 return EPERM;
317 if (th->sus_count == 0) {
318 sched_unlock();
319 return EINVAL;
321 th->sus_count--;
322 if (th->sus_count == 0 && th->task->sus_count == 0)
323 sched_resume(th);
325 sched_unlock();
326 return 0;
330 * Get/set scheduling parameter.
332 * @th: target thread
333 * @op: operation ID
334 * @param: pointer to parameter
336 __syscall int thread_schedparam(thread_t th, int op, int *param)
338 int prio, policy, err = 0;
340 sched_lock();
342 if (!thread_valid(th)) {
343 sched_unlock();
344 return ESRCH;
346 if (th->task != cur_task() && !task_capable(CAP_NICE)) {
347 sched_unlock();
348 return EPERM;
350 switch (op) {
351 case OP_GETPRIO:
352 prio = sched_getprio(th);
353 err = umem_copyout(&prio, param, sizeof(int));
354 break;
355 case OP_SETPRIO:
356 if ((err = umem_copyin(param, &prio, sizeof(int))))
357 break;
358 if (prio < 0)
359 prio = 0;
360 else if (prio >= PRIO_IDLE)
361 prio = PRIO_IDLE - 1;
363 * If a current priority is inherited for mutex, we can
364 * not change the priority to lower value. In this case,
365 * only the base priority is changed, and a current
366 * priority will be adjusted to correct value, later.
368 if (th->prio != th->base_prio && prio > th->prio)
369 prio = th->prio;
371 mutex_setprio(th, prio);
372 sched_setprio(th, prio, prio);
373 break;
374 case OP_GETPOLICY:
375 policy = sched_getpolicy(th);
376 err = umem_copyout(&policy, param, sizeof(int));
377 break;
378 case OP_SETPOLICY:
379 if ((err = umem_copyin(param, &policy, sizeof(int))))
380 break;
381 if (sched_setpolicy(th, policy))
382 err = EINVAL;
383 break;
384 default:
385 err = EINVAL;
386 break;
388 sched_unlock();
389 return err;
393 * Do idle loop.
395 * This routine is called only once after kernel initialization
396 * is completed. An idle thread runs when no other thread is active.
397 * It has the role of cutting down the power consumption of a system.
398 * An idle thread has FIFO scheduling policy because it does not
399 * have time quantum.
401 void thread_idle(void)
403 ASSERT(cur_thread->lock_count == 1);
405 /* Unlock scheduler to start scheduling */
406 sched_unlock();
408 for (;;) {
409 cpu_idle();
410 sched_yield();
412 /* NOTREACHED */
416 * Create kernel thread.
418 * Kernel thread does not have user mode context, and its scheduling
419 * policy is set to SCHED_FIFO. Currently, the kernel thread is used
420 * for interrupt threads, timer thread, and an idle thread.
421 * kernel_thread() returns thread ID on success, or NULL on failure.
422 * The scheduler must be locked before calling this routine.
424 * Important: Since sched_switch() will disable interrupts in CPU, the
425 * interrupt is disabled when the kernel thread is started first time.
426 * So, the kernel thread must enable the interrupt by itself when it
427 * runs first.
429 thread_t kernel_thread(void (*entry)(u_long), u_long arg)
431 thread_t th;
433 sched_lock();
435 if ((th = thread_alloc()) == NULL) {
436 sched_unlock();
437 return NULL;
439 memset(th->kstack, 0, KSTACK_SIZE);
441 context_init(&th->ctx, th->kstack + KSTACK_SIZE);
442 context_set(&th->ctx, KERN_ENTRY, (u_long)entry);
443 context_set(&th->ctx, KERN_ARG, arg);
444 th->task = &kern_task;
445 th->policy = SCHED_FIFO;
446 list_insert(&kern_task.threads, &th->task_link);
448 sched_unlock();
449 sched_start(th);
450 return th;
454 * Return thread information.
456 int thread_info(struct info_thread *info)
458 u_long index, target = info->cookie;
459 list_t i, j;
460 thread_t th;
461 task_t task;
463 sched_lock();
465 index = 0;
466 i = &kern_task.link;
467 do {
468 task = list_entry(i, struct task, link);
469 j = &task->threads;
470 j = list_first(j);
471 do {
472 th = list_entry(j, struct thread, task_link);
473 if (index++ == target)
474 goto found;
475 j = list_next(j);
476 } while (j != &task->threads);
477 i = list_next(i);
478 } while (i != &kern_task.link);
480 sched_unlock();
481 return ESRCH;
482 found:
483 info->state = th->state;
484 info->policy = th->policy;
485 info->prio = th->prio;
486 info->base_prio = th->base_prio;
487 info->sus_count = th->sus_count;
488 info->total_ticks = th->total_ticks;
489 info->task = th->task;
490 strlcpy(info->task_name, task->name, MAX_TASKNAME);
492 sched_unlock();
493 return 0;
496 #if defined(DEBUG) && defined(CONFIG_KDUMP)
497 void thread_dump(void)
499 list_t i, j;
500 thread_t th;
501 task_t task;
502 char state[][4] = { "RUN", "SLP", "SUS", "S&S", "EXT" };
503 char pol[][5] = { "FIFO", "RR " };
505 printk("Thread dump:\n");
506 printk(" mod thread task stat pol prio base ticks total susp sleep event\n");
507 printk(" --- -------- -------- ---- ---- ---- ---- ----- -------- ---- ------------\n");
509 i = &kern_task.link;
510 do {
511 task = list_entry(i, struct task, link);
512 j = &task->threads;
513 j = list_first(j);
514 do {
515 th = list_entry(j, struct thread, task_link);
516 printk(" %s %08x %08x %s%c %s %3d %3d %3d %8d %4d %s\n",
517 (task == &kern_task) ? "Knl" : "Usr", th,
518 task, state[th->state],
519 (th == cur_thread) ? '*' : ' ',
520 pol[th->policy], th->prio, th->base_prio,
521 th->ticks_left, th->total_ticks, th->sus_count,
522 th->sleep_event ? th->sleep_event->name : "-");
523 j = list_next(j);
524 } while (j != &task->threads);
525 i = list_next(i);
526 } while (i != &kern_task.link);
528 #endif
531 * The first thread in system is created here, and this thread
532 * becomes an idle thread when thread_idle() is called later.
533 * The scheduler is locked until thread_idle() is called, in order
534 * to prevent thread switch during kernel initialization.
536 void thread_init(void)
538 void *stack;
541 * Initialize idle thread
543 if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL)
544 panic("Failed to allocate idle stack");
545 memset(stack, 0, KSTACK_SIZE);
546 idle_thread.kstack = stack;
547 context_init(&idle_thread.ctx, stack + KSTACK_SIZE);
548 list_insert(&kern_task.threads, &idle_thread.task_link);