use the -newos toolchain even if -elf is present.
[newos.git] / kernel / thread.c
bloba2723c3d5555205a571aecf037a57e107663eee2
1 /*
2 ** Copyright 2001-2004, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/time.h>
15 #include <kernel/cpu.h>
16 #include <kernel/arch/cpu.h>
17 #include <kernel/arch/int.h>
18 #include <kernel/arch/vm.h>
19 #include <kernel/sem.h>
20 #include <kernel/port.h>
21 #include <kernel/vfs.h>
22 #include <kernel/elf.h>
23 #include <kernel/heap.h>
24 #include <kernel/signal.h>
25 #include <kernel/list.h>
26 #include <newos/user_runtime.h>
27 #include <newos/errors.h>
28 #include <boot/stage2.h>
29 #include <string.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/resource.h>
34 struct proc_key {
35 proc_id id;
38 struct thread_key {
39 thread_id id;
42 struct proc_arg {
43 char *path;
44 char **args;
45 unsigned int argc;
48 static void insert_proc_into_parent(struct proc *parent, struct proc *p);
49 static void remove_proc_from_parent(struct proc *parent, struct proc *p);
50 static struct proc *create_proc_struct(const char *name, bool kernel);
51 static int proc_struct_compare(void *_p, const void *_key);
52 static unsigned int proc_struct_hash(void *_p, const void *_key, unsigned int range);
53 static void proc_reparent_children(struct proc *p);
55 // global
56 spinlock_t thread_spinlock = 0;
57 const int fault_handler_offset = (addr_t)&(((struct thread *)0)->fault_handler) - (addr_t)0;
59 // proc list
60 static void *proc_hash = NULL;
61 static struct proc *kernel_proc = NULL;
62 static proc_id next_proc_id = 1;
63 static spinlock_t proc_spinlock = 0;
64 // NOTE: PROC lock can be held over a THREAD lock acquisition,
65 // but not the other way (to avoid deadlock)
66 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
67 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
69 // process groups
70 struct pgid_node {
71 pgrp_id id;
72 struct list_node node;
73 struct list_node list;
75 static void *pgid_hash = NULL;
76 static int pgid_node_compare(void *_p, const void *_key);
77 static unsigned int pgid_node_hash(void *_p, const void *_key, unsigned int range);
78 static int add_proc_to_pgroup(struct proc *p, pgrp_id pgid);
79 static int remove_proc_from_pgroup(struct proc *p, pgrp_id pgid);
80 static struct pgid_node *create_pgroup_struct(pgrp_id pgid);
81 static int send_pgrp_signal_etc_locked(pgrp_id pgid, uint signal, uint32 flags);
83 // session groups
84 struct sid_node {
85 pgrp_id id;
86 struct list_node node;
87 struct list_node list;
89 static void *sid_hash = NULL;
90 static int sid_node_compare(void *_s, const void *_key);
91 static unsigned int sid_node_hash(void *_s, const void *_key, unsigned int range);
92 static int add_proc_to_session(struct proc *p, sess_id sid);
93 static int remove_proc_from_session(struct proc *p, sess_id sid);
94 static struct sid_node *create_session_struct(sess_id sid);
96 // thread list
97 static struct thread *idle_threads[_MAX_CPUS];
98 static void *thread_hash = NULL;
99 static thread_id next_thread_id = 1;
101 static sem_id snooze_sem = -1;
103 // death stacks
104 // used temporarily as a thread cleans itself up
105 struct death_stack {
106 region_id rid;
107 addr_t address;
108 bool in_use;
110 static struct death_stack *death_stacks;
111 static unsigned int num_death_stacks;
112 static unsigned int volatile death_stack_bitmap;
113 static sem_id death_stack_sem;
115 // thread queues
116 static struct list_node run_q[THREAD_NUM_PRIORITY_LEVELS] = { { NULL, NULL }, };
117 static struct list_node dead_q;
119 static int _rand(void);
120 //static struct proc *proc_get_proc_struct(proc_id id); // unused
121 static struct proc *proc_get_proc_struct_locked(proc_id id);
123 // insert a thread onto the tail of a queue
124 void thread_enqueue(struct thread *t, struct list_node *q)
126 list_add_tail(q, &t->q_node);
129 struct thread *thread_lookat_queue(struct list_node *q)
131 return list_peek_head_type(q, struct thread, q_node);
134 struct thread *thread_dequeue(struct list_node *q)
136 return list_remove_head_type(q, struct thread, q_node);
139 void thread_dequeue_thread(struct thread *t)
141 list_delete(&t->q_node);
144 struct thread *thread_lookat_run_q(int priority)
146 return thread_lookat_queue(&run_q[priority]);
149 void thread_enqueue_run_q(struct thread *t)
151 // these shouldn't exist
152 if(t->priority > THREAD_MAX_PRIORITY)
153 t->priority = THREAD_MAX_PRIORITY;
154 if(t->priority < 0)
155 t->priority = 0;
157 thread_enqueue(t, &run_q[t->priority]);
160 static struct thread *thread_dequeue_run_q(int priority)
162 return thread_dequeue(&run_q[priority]);
165 static void insert_thread_into_proc(struct proc *p, struct thread *t)
167 list_add_head(&p->thread_list, &t->proc_node);
168 p->num_threads++;
169 if(p->num_threads == 1) {
170 // this was the first thread
171 p->main_thread = t;
173 t->proc = p;
176 static void remove_thread_from_proc(struct proc *p, struct thread *t)
178 list_delete(&t->proc_node);
179 p->num_threads--;
182 static int thread_struct_compare(void *_t, const void *_key)
184 struct thread *t = _t;
185 const struct thread_key *key = _key;
187 if(t->id == key->id) return 0;
188 else return 1;
191 // Frees the argument list
192 // Parameters
193 // args argument list.
194 // args number of arguments
196 static void free_arg_list(char **args, int argc)
198 int cnt = argc;
200 if(args != NULL) {
201 for(cnt = 0; cnt < argc; cnt++){
202 kfree(args[cnt]);
205 kfree(args);
209 // Copy argument list from userspace to kernel space
210 // Parameters
211 // args userspace parameters
212 // argc number of parameters
213 // kargs usespace parameters
214 // return < 0 on error and **kargs = NULL
216 static int user_copy_arg_list(char **args, int argc, char ***kargs)
218 char **largs;
219 int err;
220 int cnt;
221 char *source;
222 char buf[SYS_THREAD_ARG_LENGTH_MAX];
224 *kargs = NULL;
226 if(is_kernel_address(args))
227 return ERR_VM_BAD_USER_MEMORY;
229 largs = kmalloc((argc + 1) * sizeof(char *));
230 if(largs == NULL){
231 return ERR_NO_MEMORY;
234 // scan all parameters and copy to kernel space
236 for(cnt = 0; cnt < argc; cnt++) {
237 err = user_memcpy(&source, &(args[cnt]), sizeof(char *));
238 if(err < 0)
239 goto error;
241 if(is_kernel_address(source)){
242 err = ERR_VM_BAD_USER_MEMORY;
243 goto error;
246 err = user_strncpy(buf,source, SYS_THREAD_ARG_LENGTH_MAX - 1);
247 if(err < 0)
248 goto error;
249 buf[SYS_THREAD_ARG_LENGTH_MAX - 1] = 0;
251 largs[cnt] = kstrdup(buf);
252 if(largs[cnt] == NULL){
253 err = ERR_NO_MEMORY;
254 goto error;
258 largs[argc] = NULL;
260 *kargs = largs;
261 return NO_ERROR;
263 error:
264 free_arg_list(largs,cnt);
265 dprintf("user_copy_arg_list failed %d \n",err);
266 return err;
269 static unsigned int thread_struct_hash(void *_t, const void *_key, unsigned int range)
271 struct thread *t = _t;
272 const struct thread_key *key = _key;
274 if(t != NULL)
275 return (t->id % range);
276 else
277 return (key->id % range);
280 static struct thread *create_thread_struct(const char *name)
282 struct thread *t;
284 int_disable_interrupts();
285 GRAB_THREAD_LOCK();
286 t = thread_dequeue(&dead_q);
287 RELEASE_THREAD_LOCK();
288 int_restore_interrupts();
290 if(t == NULL) {
291 t = (struct thread *)kmalloc(sizeof(struct thread));
292 if(t == NULL)
293 goto err;
296 strncpy(&t->name[0], name, SYS_MAX_OS_NAME_LEN-1);
297 t->name[SYS_MAX_OS_NAME_LEN-1] = 0;
299 t->id = atomic_add(&next_thread_id, 1);
300 t->proc = NULL;
301 t->cpu = NULL;
302 t->fpu_cpu = NULL;
303 t->fpu_state_saved = true;
304 t->sem_blocking = -1;
305 t->fault_handler = 0;
306 t->kernel_stack_region_id = -1;
307 t->kernel_stack_base = 0;
308 t->user_stack_region_id = -1;
309 t->user_stack_base = 0;
310 list_clear_node(&t->proc_node);
311 t->priority = -1;
312 t->args = NULL;
313 t->sig_pending = 0;
314 t->sig_block_mask = 0;
315 memset(t->sig_action, 0, 32 * sizeof(struct sigaction));
316 memset(&t->alarm_event, 0, sizeof(t->alarm_event));
317 t->in_kernel = true;
318 t->int_disable_level = 0;
319 t->user_time = 0;
320 t->kernel_time = 0;
321 t->last_time = 0;
322 t->last_time_type = KERNEL_TIME;
324 char temp[64];
326 sprintf(temp, "thread_0x%x_retcode_sem", t->id);
327 t->return_code_sem = sem_create(0, temp);
328 if(t->return_code_sem < 0)
329 goto err1;
332 if(arch_thread_init_thread_struct(t) < 0)
333 goto err2;
335 return t;
337 err2:
338 sem_delete_etc(t->return_code_sem, -1);
339 err1:
340 kfree(t);
341 err:
342 return NULL;
345 static void delete_thread_struct(struct thread *t)
347 if(t->return_code_sem >= 0)
348 sem_delete_etc(t->return_code_sem, -1);
349 kfree(t);
352 static int _create_user_thread_kentry(void)
354 struct thread *t;
356 // simulates the thread spinlock release that would occur if the thread had been
357 // rescheded from. The resched didn't happen because the thread is new.
358 RELEASE_THREAD_LOCK();
359 int_restore_interrupts(); // this essentially simulates a return-from-interrupt
361 t = thread_get_current_thread();
363 // start tracking kernel time
364 t->last_time = system_time();
365 t->last_time_type = KERNEL_TIME;
367 // a signal may have been delivered here
368 thread_atkernel_exit();
370 // jump to the entry point in user space
371 arch_thread_enter_uspace(t, (addr_t)t->entry, t->args, t->user_stack_base + STACK_SIZE);
373 // never get here, the thread will exit by calling the thread_exit syscall
374 return 0;
377 static int _create_kernel_thread_kentry(void)
379 int (*func)(void *args);
380 struct thread *t;
381 int retcode;
383 // simulates the thread spinlock release that would occur if the thread had been
384 // rescheded from. The resched didn't happen because the thread is new.
385 RELEASE_THREAD_LOCK();
386 int_restore_interrupts(); // this essentially simulates a return-from-interrupt
388 // start tracking kernel time
389 t = thread_get_current_thread();
390 t->last_time = system_time();
391 t->last_time_type = KERNEL_TIME;
393 // call the entry function with the appropriate args
394 func = (void *)t->entry;
395 retcode = func(t->args);
397 // we're done, exit
398 thread_exit(retcode);
400 // shoudn't get to here
401 return 0;
404 static thread_id _create_thread(const char *name, proc_id pid, addr_t entry, void *args, bool kernel)
406 struct thread *t;
407 struct proc *p;
408 char stack_name[64];
409 bool abort = false;
411 t = create_thread_struct(name);
412 if(t == NULL)
413 return ERR_NO_MEMORY;
415 t->priority = THREAD_MEDIUM_PRIORITY;
416 t->state = THREAD_STATE_BIRTH;
417 t->next_state = THREAD_STATE_SUSPENDED;
419 int_disable_interrupts();
420 GRAB_THREAD_LOCK();
422 // insert into global list
423 hash_insert(thread_hash, t);
424 RELEASE_THREAD_LOCK();
426 GRAB_PROC_LOCK();
427 // look at the proc, make sure it's not being deleted
428 p = proc_get_proc_struct_locked(pid);
429 if(p != NULL && p->state != PROC_STATE_DEATH) {
430 insert_thread_into_proc(p, t);
431 } else {
432 abort = true;
434 RELEASE_PROC_LOCK();
435 if(abort) {
436 GRAB_THREAD_LOCK();
437 hash_remove(thread_hash, t);
438 RELEASE_THREAD_LOCK();
440 int_restore_interrupts();
441 if(abort) {
442 delete_thread_struct(t);
443 return ERR_TASK_PROC_DELETED;
446 sprintf(stack_name, "%s_kstack", name);
447 t->kernel_stack_region_id = vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name,
448 (void **)&t->kernel_stack_base, REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE,
449 REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
450 if(t->kernel_stack_region_id < 0)
451 panic("_create_thread: error creating kernel stack!\n");
453 t->args = args;
454 t->entry = entry;
456 if(kernel) {
457 // this sets up an initial kthread stack that runs the entry
458 arch_thread_initialize_kthread_stack(t, &_create_kernel_thread_kentry);
459 } else {
460 // create user stack
461 // XXX make this better. For now just keep trying to create a stack
462 // until we find a spot.
463 t->user_stack_base = (USER_STACK_REGION - STACK_SIZE) + USER_STACK_REGION_SIZE;
464 while(t->user_stack_base > USER_STACK_REGION) {
465 sprintf(stack_name, "%s_stack%d", p->name, t->id);
466 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, stack_name,
467 (void **)&t->user_stack_base,
468 REGION_ADDR_ANY_ADDRESS, STACK_SIZE, REGION_WIRING_LAZY, LOCK_RW);
469 if(t->user_stack_region_id < 0) {
470 t->user_stack_base -= STACK_SIZE;
471 } else {
472 // we created a region
473 break;
476 if(t->user_stack_region_id < 0)
477 panic("_create_thread: unable to create user stack!\n");
479 // copy the user entry over to the args field in the thread struct
480 // the function this will call will immediately switch the thread into
481 // user space.
482 arch_thread_initialize_kthread_stack(t, &_create_user_thread_kentry);
485 // set the interrupt disable level of the new thread to one (as if it had had int_disable_interrupts called)
486 t->int_disable_level = 1;
488 // set the initial state of the thread to suspended
489 t->state = THREAD_STATE_SUSPENDED;
491 return t->id;
494 thread_id user_thread_create_user_thread(char *uname, addr_t entry, void *args)
496 char name[SYS_MAX_OS_NAME_LEN];
497 int rc;
498 proc_id pid = thread_get_current_thread()->proc->id;
500 if(is_kernel_address(uname))
501 return ERR_VM_BAD_USER_MEMORY;
502 if(is_kernel_address(entry))
503 return ERR_VM_BAD_USER_MEMORY;
505 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
506 if(rc < 0)
507 return rc;
508 name[SYS_MAX_OS_NAME_LEN-1] = 0;
510 return thread_create_user_thread(name, pid, entry, args);
513 thread_id thread_create_user_thread(char *name, proc_id pid, addr_t entry, void *args)
515 return _create_thread(name, pid, entry, args, false);
518 thread_id thread_create_kernel_thread(const char *name, int (*func)(void *), void *args)
520 return _create_thread(name, proc_get_kernel_proc()->id, (addr_t)func, args, true);
523 static thread_id thread_create_kernel_thread_etc(const char *name, int (*func)(void *), void *args, struct proc *p)
525 return _create_thread(name, p->id, (addr_t)func, args, true);
528 int thread_suspend_thread(thread_id id)
530 return send_signal_etc(id, SIGSTOP, SIG_FLAG_NO_RESCHED);
533 thread_id thread_get_current_thread_id(void)
535 struct thread *t = thread_get_current_thread();
537 return t ? t->id : 0;
540 int thread_resume_thread(thread_id id)
542 return send_signal_etc(id, SIGCONT, SIG_FLAG_NO_RESCHED);
545 int thread_set_priority(thread_id id, int priority)
547 struct thread *t;
548 int retval;
550 // make sure the passed in priority is within bounds
551 if(priority > THREAD_MAX_RT_PRIORITY)
552 priority = THREAD_MAX_RT_PRIORITY;
553 if(priority < THREAD_MIN_PRIORITY)
554 priority = THREAD_MIN_PRIORITY;
556 t = thread_get_current_thread();
557 if(t->id == id) {
558 // it's ourself, so we know we aren't in a run queue, and we can manipulate
559 // our structure directly
560 t->priority = priority;
561 retval = NO_ERROR;
562 } else {
563 int_disable_interrupts();
564 GRAB_THREAD_LOCK();
566 t = thread_get_thread_struct_locked(id);
567 if(t) {
568 if(t->state == THREAD_STATE_READY && t->priority != priority) {
569 // this thread is in a ready queue right now, so it needs to be reinserted
570 thread_dequeue_thread(t);
571 t->priority = priority;
572 thread_enqueue_run_q(t);
573 } else {
574 t->priority = priority;
576 retval = NO_ERROR;
577 } else {
578 retval = ERR_INVALID_HANDLE;
581 RELEASE_THREAD_LOCK();
582 int_restore_interrupts();
585 return retval;
588 int user_thread_set_priority(thread_id id, int priority)
590 // clamp the priority levels the user can set their threads to
591 if(priority > THREAD_MAX_PRIORITY)
592 priority = THREAD_MAX_PRIORITY;
593 return thread_set_priority(id, priority);
596 int thread_get_thread_info(thread_id id, struct thread_info *outinfo)
598 struct thread *t;
599 struct thread_info info;
600 int err;
602 int_disable_interrupts();
603 GRAB_THREAD_LOCK();
605 t = thread_get_thread_struct_locked(id);
606 if(!t) {
607 err = ERR_INVALID_HANDLE;
608 goto out;
611 /* found the thread, copy the data out */
612 info.id = id;
613 info.owner_proc_id = t->proc->id;
614 strncpy(info.name, t->name, SYS_MAX_OS_NAME_LEN-1);
615 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
616 info.state = t->state;
617 info.priority = t->priority;
618 info.user_stack_base = t->user_stack_base;
619 info.user_time = t->user_time;
620 info.kernel_time = t->kernel_time;
622 err = NO_ERROR;
624 out:
625 RELEASE_THREAD_LOCK();
626 int_restore_interrupts();
628 if(err >= 0)
629 memcpy(outinfo, &info, sizeof(info));
631 return err;
634 int user_thread_get_thread_info(thread_id id, struct thread_info *uinfo)
636 struct thread_info info;
637 int err, err2;
639 if(is_kernel_address(uinfo)) {
640 return ERR_VM_BAD_USER_MEMORY;
643 err = thread_get_thread_info(id, &info);
644 if(err < 0)
645 return err;
647 err2 = user_memcpy(uinfo, &info, sizeof(info));
648 if(err2 < 0)
649 return err2;
651 return err;
654 int thread_get_next_thread_info(uint32 *_cookie, proc_id pid, struct thread_info *outinfo)
656 struct thread *t;
657 struct proc *p;
658 struct thread_info info;
659 int err;
660 thread_id cookie;
662 cookie = (thread_id)*_cookie;
664 int_disable_interrupts();
665 GRAB_PROC_LOCK();
667 p = proc_get_proc_struct_locked(pid);
668 if(!p) {
669 err = ERR_INVALID_HANDLE;
670 goto out;
673 /* find the next thread in the list of threads in the proc structure */
674 t = NULL;
675 if(cookie == 0) {
676 t = list_peek_head_type(&p->thread_list, struct thread, proc_node);
677 } else {
678 list_for_every_entry(&p->thread_list, t, struct thread, proc_node) {
679 if(t->id == cookie) {
680 /* we found what the last search got us, walk one past the last search */
681 t = list_next_type(&p->thread_list, &t->proc_node, struct thread, proc_node);
682 break;
687 if(!t) {
688 err = ERR_NOT_FOUND;
689 goto out;
692 /* found the thread, copy the data out */
693 info.id = t->id;
694 info.owner_proc_id = t->proc->id;
695 strncpy(info.name, t->name, SYS_MAX_OS_NAME_LEN-1);
696 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
697 info.state = t->state;
698 info.priority = t->priority;
699 info.user_stack_base = t->user_stack_base;
700 info.user_time = t->user_time;
701 info.kernel_time = t->kernel_time;
703 err = NO_ERROR;
705 *_cookie = (uint32)t->id;
707 out:
708 RELEASE_PROC_LOCK();
709 int_restore_interrupts();
711 if(err >= 0)
712 memcpy(outinfo, &info, sizeof(info));
714 return err;
717 int user_thread_get_next_thread_info(uint32 *ucookie, proc_id pid, struct thread_info *uinfo)
719 struct thread_info info;
720 uint32 cookie;
721 int err, err2;
723 if(is_kernel_address(ucookie)) {
724 return ERR_VM_BAD_USER_MEMORY;
727 if(is_kernel_address(uinfo)) {
728 return ERR_VM_BAD_USER_MEMORY;
731 err2 = user_memcpy(&cookie, ucookie, sizeof(cookie));
732 if(err2 < 0)
733 return err2;
735 err = thread_get_next_thread_info(&cookie, pid, &info);
736 if(err < 0)
737 return err;
739 err2 = user_memcpy(uinfo, &info, sizeof(info));
740 if(err2 < 0)
741 return err2;
743 err2 = user_memcpy(ucookie, &cookie, sizeof(cookie));
744 if(err2 < 0)
745 return err2;
747 return err;
751 static void _dump_proc_info(struct proc *p)
753 dprintf("PROC: %p\n", p);
754 dprintf("id: 0x%x\n", p->id);
755 dprintf("pgid: 0x%x\n", p->pgid);
756 dprintf("sid: 0x%x\n", p->sid);
757 dprintf("name: '%s'\n", p->name);
758 dprintf("next: %p\n", p->next);
759 dprintf("parent: %p (0x%x)\n", p->parent, p->parent ? p->parent->id : -1);
760 dprintf("children.next: %p\n", p->children.next);
761 dprintf("siblings.prev: %p\n", p->siblings_node.prev);
762 dprintf("siblings.next: %p\n", p->siblings_node.next);
763 dprintf("num_threads: %d\n", p->num_threads);
764 dprintf("state: %d\n", p->state);
765 dprintf("ioctx: %p\n", p->ioctx);
766 dprintf("aspace_id: 0x%x\n", p->aspace_id);
767 dprintf("aspace: %p\n", p->aspace);
768 dprintf("kaspace: %p\n", p->kaspace);
769 dprintf("main_thread: %p\n", p->main_thread);
770 dprintf("thread_list.next: %p\n", p->thread_list.next);
773 static void dump_proc_info(int argc, char **argv)
775 struct proc *p;
776 int id = -1;
777 unsigned long num;
778 struct hash_iterator i;
780 if(argc < 2) {
781 dprintf("proc: not enough arguments\n");
782 return;
785 // if the argument looks like a hex number, treat it as such
786 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
787 num = atoul(argv[1]);
788 if(num > vm_get_kernel_aspace()->virtual_map.base) {
789 // XXX semi-hack
790 _dump_proc_info((struct proc*)num);
791 return;
792 } else {
793 id = num;
797 // walk through the thread list, trying to match name or id
798 hash_open(proc_hash, &i);
799 while((p = hash_next(proc_hash, &i)) != NULL) {
800 if((p->name && strcmp(argv[1], p->name) == 0) || p->id == id) {
801 _dump_proc_info(p);
802 break;
805 hash_close(proc_hash, &i, false);
809 static const char *state_to_text(int state)
811 switch(state) {
812 case THREAD_STATE_READY:
813 return "READY";
814 case THREAD_STATE_RUNNING:
815 return "RUNNING";
816 case THREAD_STATE_WAITING:
817 return "WAITING";
818 case THREAD_STATE_SUSPENDED:
819 return "SUSPEND";
820 case THREAD_STATE_FREE_ON_RESCHED:
821 return "DEATH";
822 case THREAD_STATE_BIRTH:
823 return "BIRTH";
824 default:
825 return "UNKNOWN";
829 static struct thread *last_thread_dumped = NULL;
831 static void _dump_thread_info(struct thread *t)
833 dprintf("THREAD: %p\n", t);
834 dprintf("id: 0x%x\n", t->id);
835 dprintf("name: '%s'\n", t->name);
836 dprintf("next: %p\nproc_node.prev: %p\nproc_node.next: %p\nq_node.prev: %p\nq_node.next: %p\n",
837 t->next, t->proc_node.prev, t->proc_node.next, t->q_node.prev, t->q_node.next);
838 dprintf("priority: 0x%x\n", t->priority);
839 dprintf("state: %s\n", state_to_text(t->state));
840 dprintf("next_state: %s\n", state_to_text(t->next_state));
841 dprintf("cpu: %p ", t->cpu);
842 if(t->cpu)
843 dprintf("(%d)\n", t->cpu->cpu_num);
844 else
845 dprintf("\n");
846 dprintf("sig_pending: 0x%lx\n", t->sig_pending);
847 dprintf("sig_block_mask: 0x%lx\n", t->sig_block_mask);
848 dprintf("in_kernel: %d\n", t->in_kernel);
849 dprintf("int_disable_level: %d\n", t->int_disable_level);
850 dprintf("sem_blocking:0x%x\n", t->sem_blocking);
851 dprintf("sem_count: 0x%x\n", t->sem_count);
852 dprintf("sem_deleted_retcode: 0x%x\n", t->sem_deleted_retcode);
853 dprintf("sem_errcode: 0x%x\n", t->sem_errcode);
854 dprintf("sem_flags: 0x%x\n", t->sem_flags);
855 dprintf("fault_handler: 0x%lx\n", t->fault_handler);
856 dprintf("args: %p\n", t->args);
857 dprintf("entry: 0x%lx\n", t->entry);
858 dprintf("proc: %p\n", t->proc);
859 dprintf("return_code_sem: 0x%x\n", t->return_code_sem);
860 dprintf("kernel_stack_region_id: 0x%x\n", t->kernel_stack_region_id);
861 dprintf("kernel_stack_base: 0x%lx\n", t->kernel_stack_base);
862 dprintf("user_stack_region_id: 0x%x\n", t->user_stack_region_id);
863 dprintf("user_stack_base: 0x%lx\n", t->user_stack_base);
864 dprintf("kernel_time: %Ld\n", t->kernel_time);
865 dprintf("user_time: %Ld\n", t->user_time);
866 dprintf("architecture dependant section:\n");
867 arch_thread_dump_info(&t->arch_info);
869 last_thread_dumped = t;
872 static void dump_thread_info(int argc, char **argv)
874 struct thread *t;
875 int id = -1;
876 unsigned long num;
877 struct hash_iterator i;
879 if(argc < 2) {
880 dprintf("thread: not enough arguments\n");
881 return;
884 // if the argument looks like a hex number, treat it as such
885 if(strlen(argv[1]) > 2 && argv[1][0] == '0' && argv[1][1] == 'x') {
886 num = atoul(argv[1]);
887 if(num > vm_get_kernel_aspace()->virtual_map.base) {
888 // XXX semi-hack
889 _dump_thread_info((struct thread *)num);
890 return;
891 } else {
892 id = num;
896 // walk through the thread list, trying to match name or id
897 hash_open(thread_hash, &i);
898 while((t = hash_next(thread_hash, &i)) != NULL) {
899 if((t->name && strcmp(argv[1], t->name) == 0) || t->id == id) {
900 _dump_thread_info(t);
901 break;
904 hash_close(thread_hash, &i, false);
907 static void dump_thread_list(int argc, char **argv)
909 struct thread *t;
910 struct hash_iterator i;
912 hash_open(thread_hash, &i);
913 while((t = hash_next(thread_hash, &i)) != NULL) {
914 dprintf("%p", t);
915 if(t->name != NULL)
916 dprintf("\t%32s", t->name);
917 else
918 dprintf("\t%32s", "<NULL>");
919 dprintf("\t0x%x", t->id);
920 dprintf("\t%16s", state_to_text(t->state));
921 if(t->cpu)
922 dprintf("\t%d", t->cpu->cpu_num);
923 else
924 dprintf("\tNOCPU");
925 dprintf("\t0x%lx\n", t->kernel_stack_base);
927 hash_close(thread_hash, &i, false);
930 static void dump_next_thread_in_q(int argc, char **argv)
932 struct thread *t = last_thread_dumped;
934 if(t == NULL) {
935 dprintf("no thread previously dumped. Examine a thread first.\n");
936 return;
939 dprintf("next thread in queue after thread @ %p\n", t);
940 if(t->q_node.next != NULL) {
941 _dump_thread_info(containerof(t->q_node.next, struct thread, q_node)); // XXX fixme
942 } else {
943 dprintf("NULL\n");
947 static void dump_next_thread_in_all_list(int argc, char **argv)
949 struct thread *t = last_thread_dumped;
951 if(t == NULL) {
952 dprintf("no thread previously dumped. Examine a thread first.\n");
953 return;
956 dprintf("next thread in global list after thread @ %p\n", t);
957 if(t->next != NULL) {
958 _dump_thread_info(t->next);
959 } else {
960 dprintf("NULL\n");
964 static void dump_next_thread_in_proc(int argc, char **argv)
966 struct thread *t = last_thread_dumped;
968 if(t == NULL) {
969 dprintf("no thread previously dumped. Examine a thread first.\n");
970 return;
973 dprintf("next thread in proc after thread @ %p\n", t);
975 t = list_next_type(&t->proc->thread_list, &t->proc_node, struct thread, proc_node);
976 if(t)
977 _dump_thread_info(t);
978 else
979 dprintf("NULL\n");
982 static int get_death_stack(void)
984 int i;
985 unsigned int bit;
987 sem_acquire(death_stack_sem, 1);
989 // grap the thread lock, find a free spot and release
990 int_disable_interrupts();
991 GRAB_THREAD_LOCK();
992 bit = death_stack_bitmap;
993 bit = (~bit)&~((~bit)-1);
994 death_stack_bitmap |= bit;
995 RELEASE_THREAD_LOCK();
998 // sanity checks
999 if( !bit ) {
1000 panic("get_death_stack: couldn't find free stack!\n");
1002 if( bit & (bit-1)) {
1003 panic("get_death_stack: impossible bitmap result!\n");
1007 // bit to number
1008 i= -1;
1009 while(bit) {
1010 bit >>= 1;
1011 i += 1;
1014 // dprintf("get_death_stack: returning 0x%lx\n", death_stacks[i].address);
1016 return i;
1019 static void put_death_stack_and_reschedule(unsigned int index)
1021 // dprintf("put_death_stack...: passed %d\n", index);
1023 if(index >= num_death_stacks)
1024 panic("put_death_stack: passed invalid stack index %d\n", index);
1026 if(!(death_stack_bitmap & (1 << index)))
1027 panic("put_death_stack: passed invalid stack index %d\n", index);
1029 int_disable_interrupts();
1030 GRAB_THREAD_LOCK();
1032 death_stack_bitmap &= ~(1 << index);
1034 sem_release_etc(death_stack_sem, 1, SEM_FLAG_NO_RESCHED);
1036 thread_resched();
1039 int thread_init(kernel_args *ka)
1041 struct thread *t;
1042 struct pgid_node *pgnode;
1043 struct sid_node *snode;
1044 unsigned int i;
1046 dprintf("thread_init: entry\n");
1047 kprintf("initializing threading system...\n");
1049 // create the process hash table
1050 proc_hash = hash_init(15, offsetof(struct proc, next), &proc_struct_compare, &proc_struct_hash);
1052 // create the pgroup hash table
1053 pgid_hash = hash_init(15, offsetof(struct pgid_node, node), &pgid_node_compare, &pgid_node_hash);
1055 // create the session hash table
1056 sid_hash = hash_init(15, offsetof(struct sid_node, node), &sid_node_compare, &sid_node_hash);
1058 // create the kernel process
1059 kernel_proc = create_proc_struct("kernel", true);
1060 if(kernel_proc == NULL)
1061 panic("could not create kernel proc!\n");
1062 kernel_proc->state = PROC_STATE_NORMAL;
1064 // the kernel_proc is it's own parent
1065 kernel_proc->parent = kernel_proc;
1067 // it's part of the kernel process group
1068 pgnode = create_pgroup_struct(kernel_proc->id);
1069 hash_insert(pgid_hash, pgnode);
1070 add_proc_to_pgroup(kernel_proc, kernel_proc->id);
1072 // ditto with session
1073 snode = create_session_struct(kernel_proc->id);
1074 hash_insert(sid_hash, snode);
1075 add_proc_to_session(kernel_proc, kernel_proc->id);
1077 kernel_proc->ioctx = vfs_new_ioctx(NULL);
1078 if(kernel_proc->ioctx == NULL)
1079 panic("could not create ioctx for kernel proc!\n");
1081 // stick it in the process hash
1082 hash_insert(proc_hash, kernel_proc);
1084 // create the thread hash table
1085 thread_hash = hash_init(15, offsetof(struct thread, next),
1086 &thread_struct_compare, &thread_struct_hash);
1088 // zero out the run queues
1089 for(i = 0; i < THREAD_NUM_PRIORITY_LEVELS; i++) {
1090 list_initialize(&run_q[i]);
1093 // zero out the dead thread structure q
1094 list_initialize(&dead_q);
1096 // allocate a snooze sem
1097 snooze_sem = sem_create(0, "snooze sem");
1098 if(snooze_sem < 0) {
1099 panic("error creating snooze sem\n");
1100 return snooze_sem;
1103 // create an idle thread for each cpu
1104 for(i=0; i<ka->num_cpus; i++) {
1105 char temp[64];
1106 vm_region *region;
1108 sprintf(temp, "idle_thread%d", i);
1109 t = create_thread_struct(temp);
1110 if(t == NULL) {
1111 panic("error creating idle thread struct\n");
1112 return ERR_NO_MEMORY;
1114 t->proc = proc_get_kernel_proc();
1115 t->priority = THREAD_IDLE_PRIORITY;
1116 t->state = THREAD_STATE_RUNNING;
1117 t->next_state = THREAD_STATE_READY;
1118 t->int_disable_level = 1; // ints are disabled until the int_restore_interrupts in main()
1119 t->last_time = system_time();
1120 sprintf(temp, "idle_thread%d_kstack", i);
1121 t->kernel_stack_region_id = vm_find_region_by_name(vm_get_kernel_aspace_id(), temp);
1122 region = vm_get_region_by_id(t->kernel_stack_region_id);
1123 if(!region) {
1124 panic("error finding idle kstack region\n");
1126 t->kernel_stack_base = region->base;
1127 vm_put_region(region);
1128 hash_insert(thread_hash, t);
1129 insert_thread_into_proc(t->proc, t);
1130 idle_threads[i] = t;
1131 if(i == 0)
1132 arch_thread_set_current_thread(t);
1133 t->cpu = &cpu[i];
1136 // create a set of death stacks
1137 num_death_stacks = smp_get_num_cpus();
1138 if(num_death_stacks > 8*sizeof(death_stack_bitmap)) {
1140 * clamp values for really beefy machines
1142 num_death_stacks = 8*sizeof(death_stack_bitmap);
1144 death_stack_bitmap = 0;
1145 death_stacks = (struct death_stack *)kmalloc(num_death_stacks * sizeof(struct death_stack));
1146 if(death_stacks == NULL) {
1147 panic("error creating death stacks\n");
1148 return ERR_NO_MEMORY;
1151 char temp[64];
1153 for(i=0; i<num_death_stacks; i++) {
1154 sprintf(temp, "death_stack%d", i);
1155 death_stacks[i].rid = vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp,
1156 (void **)&death_stacks[i].address,
1157 REGION_ADDR_ANY_ADDRESS, KSTACK_SIZE, REGION_WIRING_WIRED, LOCK_RW|LOCK_KERNEL);
1158 if(death_stacks[i].rid < 0) {
1159 panic("error creating death stacks\n");
1160 return death_stacks[i].rid;
1162 death_stacks[i].in_use = false;
1165 death_stack_sem = sem_create(num_death_stacks, "death_stack_noavail_sem");
1167 // set up some debugger commands
1168 dbg_add_command(dump_thread_list, "threads", "list all threads");
1169 dbg_add_command(dump_thread_info, "thread", "list info about a particular thread");
1170 dbg_add_command(dump_next_thread_in_q, "next_q", "dump the next thread in the queue of last thread viewed");
1171 dbg_add_command(dump_next_thread_in_all_list, "next_all", "dump the next thread in the global list of the last thread viewed");
1172 dbg_add_command(dump_next_thread_in_proc, "next_proc", "dump the next thread in the process of the last thread viewed");
1173 dbg_add_command(dump_proc_info, "proc", "list info about a particular process");
1175 // initialize the architectural specific thread routines
1176 arch_thread_init(ka);
1178 return 0;
1181 int thread_init_percpu(int cpu_num)
1183 arch_thread_set_current_thread(idle_threads[cpu_num]);
1184 return 0;
1187 // this starts the scheduler. Must be run under the context of
1188 // the initial idle thread.
1189 void thread_start_threading(void)
1191 // XXX may not be the best place for this
1192 // invalidate all of the other processors' TLB caches
1193 int_disable_interrupts();
1194 arch_cpu_global_TLB_invalidate();
1195 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE, 0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
1196 int_restore_interrupts();
1198 // start the other processors
1199 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE, 0, 0, 0, NULL, SMP_MSG_FLAG_ASYNC);
1201 int_disable_interrupts();
1202 GRAB_THREAD_LOCK();
1204 thread_resched();
1206 RELEASE_THREAD_LOCK();
1207 int_restore_interrupts();
1210 int user_thread_snooze(bigtime_t time)
1212 thread_snooze(time);
1213 return NO_ERROR;
1216 int thread_snooze(bigtime_t time)
1218 return sem_acquire_etc(snooze_sem, 1, SEM_FLAG_TIMEOUT|SEM_FLAG_INTERRUPTABLE, time, NULL);
1221 int user_thread_yield(void)
1223 thread_yield();
1224 return NO_ERROR;
1227 void thread_yield(void)
1229 int_disable_interrupts();
1230 GRAB_THREAD_LOCK();
1232 thread_resched();
1234 RELEASE_THREAD_LOCK();
1235 int_restore_interrupts();
1238 // NOTE: PROC_LOCK must be held
1239 static bool check_for_pgrp_connection(pgrp_id pgid, pgrp_id check_for, struct proc *ignore_proc)
1241 struct pgid_node *node;
1242 struct proc *temp_proc;
1243 bool connection = false;
1245 if(ignore_proc)
1246 dprintf("check_for_pgrp_connection: pgid %d check for %d ignore_proc %d\n", pgid, check_for, ignore_proc->id);
1247 else
1248 dprintf("check_for_pgrp_connection: pgid %d check for %d\n", pgid, check_for);
1250 node = hash_lookup(pgid_hash, &pgid);
1251 if(node) {
1252 list_for_every_entry(&node->list, temp_proc, struct proc, pg_node) {
1253 ASSERT(temp_proc->pgid == pgid);
1254 dprintf(" looking at %d, pgid %d, ppgid %d\n", temp_proc->id, temp_proc->pgid, temp_proc->parent->pgid);
1255 if(temp_proc != ignore_proc && temp_proc->parent->pgid == check_for) {
1256 connection = true;
1257 break;
1261 return connection;
1264 // used to pass messages between thread_exit and thread_exit2
1265 struct thread_exit_args {
1266 struct thread *t;
1267 region_id old_kernel_stack;
1268 unsigned int death_stack;
1271 static void thread_exit2(void *_args)
1273 struct thread_exit_args args;
1275 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
1276 memcpy(&args, _args, sizeof(struct thread_exit_args));
1278 // restore the interrupts
1279 int_restore_interrupts();
1281 // dprintf("thread_exit2, running on death stack 0x%lx\n", args.t->kernel_stack_base);
1283 // delete the old kernel stack region
1284 // dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
1285 vm_delete_region(vm_get_kernel_aspace_id(), args.old_kernel_stack);
1287 // dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
1289 // remove this thread from all of the global lists
1290 int_disable_interrupts();
1291 GRAB_PROC_LOCK();
1292 remove_thread_from_proc(kernel_proc, args.t);
1293 RELEASE_PROC_LOCK();
1294 GRAB_THREAD_LOCK();
1295 hash_remove(thread_hash, args.t);
1296 RELEASE_THREAD_LOCK();
1298 // dprintf("thread_exit2: done removing thread from lists\n");
1300 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
1301 args.t->next_state = THREAD_STATE_FREE_ON_RESCHED;
1303 // throw away our fpu context
1304 if(args.t->fpu_cpu) {
1305 args.t->fpu_cpu->fpu_state_thread = NULL;
1306 args.t->fpu_cpu = NULL;
1307 args.t->fpu_state_saved = true; // a lie actually
1310 // return the death stack and reschedule one last time
1311 put_death_stack_and_reschedule(args.death_stack);
1312 // never get to here
1313 panic("thread_exit2: made it where it shouldn't have!\n");
1316 void thread_exit(int retcode)
1318 struct thread *t = thread_get_current_thread();
1319 struct proc *p = t->proc;
1320 proc_id parent_pid = -1;
1321 bool delete_proc = false;
1322 unsigned int death_stack;
1324 dprintf("thread 0x%x exiting w/return code 0x%x\n", t->id, retcode);
1326 if(!kernel_startup && !int_are_interrupts_enabled())
1327 panic("thread_exit called with ints disabled\n");
1329 // boost our priority to get this over with
1330 thread_set_priority(t->id, THREAD_HIGH_PRIORITY);
1332 // cancel any pending alarms
1333 timer_cancel_event(&t->alarm_event);
1335 // delete the user stack region first
1336 if(p->aspace_id >= 0 && t->user_stack_region_id >= 0) {
1337 region_id rid = t->user_stack_region_id;
1338 t->user_stack_region_id = -1;
1339 vm_delete_region(p->aspace_id, rid);
1342 if(p != kernel_proc) {
1343 // remove this thread from the current process and add it to the kernel
1344 // put the thread into the kernel proc until it dies
1345 int_disable_interrupts();
1346 GRAB_PROC_LOCK();
1347 remove_thread_from_proc(p, t);
1348 insert_thread_into_proc(kernel_proc, t);
1349 if(p->main_thread == t) {
1350 // this was main thread in this process
1351 delete_proc = true;
1352 p->state = PROC_STATE_DEATH;
1355 RELEASE_PROC_LOCK();
1356 // swap address spaces, to make sure we're running on the kernel's pgdir
1357 vm_aspace_swap(kernel_proc->kaspace);
1358 int_restore_interrupts();
1361 // delete the process
1362 if(delete_proc) {
1363 if(p->num_threads > 0) {
1364 // there are other threads still in this process,
1365 // cycle through and signal kill on each of the threads
1366 // XXX this can be optimized. There's got to be a better solution.
1367 struct thread *temp_thread;
1369 int_disable_interrupts();
1370 GRAB_PROC_LOCK();
1371 // we can safely walk the list because of the lock. no new threads can be created
1372 // because of the PROC_STATE_DEATH flag on the process
1373 list_for_every_entry(&p->thread_list, temp_thread, struct thread, proc_node) {
1374 thread_kill_thread_nowait(temp_thread->id);
1377 RELEASE_PROC_LOCK();
1378 int_restore_interrupts();
1380 // Now wait for all of the threads to die
1381 // XXX block on a semaphore
1382 while((volatile int)p->num_threads > 0) {
1383 thread_snooze(10000); // 10 ms
1387 int_disable_interrupts();
1388 GRAB_PROC_LOCK();
1390 // see if the process group we are in is going to be orphaned
1391 // it's orphaned if no parent of any other process in the group is in the
1392 // same process group as our parent
1393 if(p->sid == p->parent->sid && p->pgid != p->parent->pgid) {
1394 if(!check_for_pgrp_connection(p->pgid, p->parent->pgid, p)) {
1395 dprintf("thread_exit: killing process %d orphans process group %d\n", p->id, p->pgid);
1396 send_pgrp_signal_etc_locked(p->pgid, SIGHUP, SIG_FLAG_NO_RESCHED);
1397 send_pgrp_signal_etc_locked(p->pgid, SIGCONT, SIG_FLAG_NO_RESCHED);
1401 // remove us from the process list
1402 hash_remove(proc_hash, p);
1404 // reparent each of our children
1405 proc_reparent_children(p);
1407 // we're not part of our process groups and session anymore
1408 remove_proc_from_pgroup(p, p->pgid);
1409 remove_proc_from_session(p, p->sid);
1411 // remember who our parent was so we can send a signal
1412 parent_pid = p->parent->id;
1414 // remove us from our parent
1415 remove_proc_from_parent(p->parent, p);
1417 RELEASE_PROC_LOCK();
1418 int_restore_interrupts();
1420 // clean up resources owned by the process
1421 vm_put_aspace(p->aspace);
1422 vm_delete_aspace(p->aspace_id);
1423 port_delete_owned_ports(p->id);
1424 sem_delete_owned_sems(p->id);
1425 vfs_free_ioctx(p->ioctx);
1426 kfree(p);
1429 // send a signal to the parent
1430 send_proc_signal_etc(parent_pid, SIGCHLD, SIG_FLAG_NO_RESCHED);
1432 // delete the sem that others will use to wait on us and get the retcode
1434 sem_id s = t->return_code_sem;
1436 t->return_code_sem = -1;
1437 sem_delete_etc(s, retcode);
1440 // get_death_stack leaves interrupts disabled
1441 death_stack = get_death_stack();
1443 struct thread_exit_args args;
1445 args.t = t;
1446 args.old_kernel_stack = t->kernel_stack_region_id;
1447 args.death_stack = death_stack;
1449 // set the new kernel stack officially to the death stack, wont be really switched until
1450 // the next function is called. This bookkeeping must be done now before a context switch
1451 // happens, or the processor will interrupt to the old stack
1452 t->kernel_stack_region_id = death_stacks[death_stack].rid;
1453 t->kernel_stack_base = death_stacks[death_stack].address;
1455 // we will continue in thread_exit2(), on the new stack
1456 arch_thread_switch_kstack_and_call(t->kernel_stack_base + KSTACK_SIZE, thread_exit2, &args);
1459 panic("never can get here\n");
1462 int thread_kill_thread(thread_id id)
1464 int status = send_signal_etc(id, SIGKILLTHR, SIG_FLAG_NO_RESCHED);
1465 if (status < 0)
1466 return status;
1468 if (id != thread_get_current_thread()->id)
1469 thread_wait_on_thread(id, NULL);
1471 return status;
1474 int thread_kill_thread_nowait(thread_id id)
1476 return send_signal_etc(id, SIGKILLTHR, SIG_FLAG_NO_RESCHED);
1479 int user_thread_wait_on_thread(thread_id id, int *uretcode)
1481 int retcode;
1482 int rc, rc2;
1484 if(is_kernel_address(uretcode))
1485 return ERR_VM_BAD_USER_MEMORY;
1487 rc = thread_wait_on_thread(id, &retcode);
1489 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1490 if(rc2 < 0)
1491 return rc2;
1493 return rc;
1496 int thread_wait_on_thread(thread_id id, int *retcode)
1498 sem_id sem;
1499 struct thread *t;
1500 int rc;
1502 rc = send_signal_etc(id, SIGCONT, 0);
1503 if (rc < NO_ERROR)
1504 return rc;
1506 int_disable_interrupts();
1507 GRAB_THREAD_LOCK();
1509 t = thread_get_thread_struct_locked(id);
1510 if(t != NULL) {
1511 sem = t->return_code_sem;
1512 } else {
1513 sem = ERR_INVALID_HANDLE;
1516 RELEASE_THREAD_LOCK();
1517 int_restore_interrupts();
1519 rc = sem_acquire_etc(sem, 1, SEM_FLAG_INTERRUPTABLE, 0, retcode);
1521 /* This thread died the way it should, dont ripple a non-error up */
1522 if (rc == ERR_SEM_DELETED)
1523 rc = NO_ERROR;
1525 return rc;
1528 int user_proc_wait_on_proc(proc_id id, int *uretcode)
1530 int retcode;
1531 int rc, rc2;
1533 if(is_kernel_address(uretcode))
1534 return ERR_VM_BAD_USER_MEMORY;
1536 rc = proc_wait_on_proc(id, &retcode);
1537 if(rc < 0)
1538 return rc;
1540 rc2 = user_memcpy(uretcode, &retcode, sizeof(retcode));
1541 if(rc2 < 0)
1542 return rc2;
1544 return rc;
1547 int proc_wait_on_proc(proc_id id, int *retcode)
1549 struct proc *p;
1550 thread_id tid;
1552 int_disable_interrupts();
1553 GRAB_PROC_LOCK();
1554 p = proc_get_proc_struct_locked(id);
1555 if(p && p->main_thread) {
1556 tid = p->main_thread->id;
1557 } else {
1558 tid = ERR_INVALID_HANDLE;
1560 RELEASE_PROC_LOCK();
1561 int_restore_interrupts();
1563 if(tid < 0)
1564 return tid;
1566 return thread_wait_on_thread(tid, retcode);
1569 struct thread *thread_get_thread_struct(thread_id id)
1571 struct thread *t;
1573 int_disable_interrupts();
1574 GRAB_THREAD_LOCK();
1576 t = thread_get_thread_struct_locked(id);
1578 RELEASE_THREAD_LOCK();
1579 int_restore_interrupts();
1581 return t;
1584 struct thread *thread_get_thread_struct_locked(thread_id id)
1586 struct thread_key key;
1588 key.id = id;
1590 return hash_lookup(thread_hash, &key);
1593 // unused
1594 #if 0
1595 static struct proc *proc_get_proc_struct(proc_id id)
1597 struct proc *p;
1599 int_disable_interrupts();
1600 GRAB_PROC_LOCK();
1602 p = proc_get_proc_struct_locked(id);
1604 RELEASE_PROC_LOCK();
1605 int_restore_interrupts();
1607 return p;
1609 #endif
1611 static struct proc *proc_get_proc_struct_locked(proc_id id)
1613 struct proc_key key;
1615 key.id = id;
1617 return hash_lookup(proc_hash, &key);
1620 static void thread_context_switch(struct thread *t_from, struct thread *t_to)
1622 vm_translation_map *new_tmap;
1624 // track kernel time
1625 bigtime_t now = system_time();
1626 if(t_from->last_time_type == KERNEL_TIME)
1627 t_from->kernel_time += now - t_from->last_time;
1628 else
1629 t_from->user_time += now - t_from->last_time;
1630 t_to->last_time = now;
1632 // XXX remove this?
1634 // remember that this cpu will hold the current fpu state if
1635 // a) it's not already saved in the thread structure
1636 // b) it's not on another cpu
1637 if(!t_from->fpu_state_saved) {
1638 if(t_from->fpu_cpu == NULL) { // does another cpu "own" our state?
1639 cpu_ent *cpu = get_curr_cpu_struct();
1641 // the current cpu *has* to own our state
1642 ASSERT(cpu->fpu_state_thread == t_from);
1646 // set the current cpu and thread pointer
1647 t_to->cpu = t_from->cpu;
1648 arch_thread_set_current_thread(t_to);
1649 t_from->cpu = NULL;
1651 // decide if we need to switch to a new mmu context
1652 if(t_from->proc->aspace_id >= 0 && t_to->proc->aspace_id >= 0) {
1653 // they are both uspace threads
1654 if(t_from->proc->aspace_id == t_to->proc->aspace_id) {
1655 // same address space
1656 new_tmap = NULL;
1657 } else {
1658 // switching to a new address space
1659 new_tmap = &t_to->proc->aspace->translation_map;
1661 } else if(t_from->proc->aspace_id < 0 && t_to->proc->aspace_id < 0) {
1662 // they must both be kspace threads
1663 new_tmap = NULL;
1664 } else if(t_to->proc->aspace_id < 0) {
1665 // the one we're switching to is kspace
1666 new_tmap = &t_to->proc->kaspace->translation_map;
1667 } else {
1668 new_tmap = &t_to->proc->aspace->translation_map;
1671 // do the architecture specific context switch
1672 arch_thread_context_switch(t_from, t_to, new_tmap);
1675 static int _rand(void)
1677 static int next = 0;
1679 if(next == 0)
1680 next = system_time();
1682 next = next * 1103515245 + 12345;
1683 return((next >> 16) & 0x7FFF);
1686 static int reschedule_event(void *unused)
1688 // this function is called as a result of the timer event set by the scheduler
1689 // returning this causes a reschedule on the timer event
1690 thread_get_current_thread()->cpu->preempted= 1;
1691 return INT_RESCHEDULE;
1694 // NOTE: expects thread_spinlock to be held
1695 void thread_resched(void)
1697 struct thread *next_thread = NULL;
1698 int last_thread_pri = -1;
1699 struct thread *old_thread = thread_get_current_thread();
1700 int i;
1701 bigtime_t quantum;
1702 struct timer_event *quantum_timer;
1704 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
1706 switch(old_thread->next_state) {
1707 case THREAD_STATE_RUNNING:
1708 case THREAD_STATE_READY:
1709 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1710 thread_enqueue_run_q(old_thread);
1711 break;
1712 case THREAD_STATE_SUSPENDED:
1713 dprintf("suspending thread 0x%x\n", old_thread->id);
1714 break;
1715 case THREAD_STATE_FREE_ON_RESCHED:
1716 thread_enqueue(old_thread, &dead_q);
1717 break;
1718 default:
1719 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1722 old_thread->state = old_thread->next_state;
1724 // search the real-time queue
1725 for(i = THREAD_MAX_RT_PRIORITY; i >= THREAD_MIN_RT_PRIORITY; i--) {
1726 next_thread = thread_dequeue_run_q(i);
1727 if(next_thread)
1728 goto found_thread;
1731 // search the regular queue
1732 for(i = THREAD_MAX_PRIORITY; i > THREAD_IDLE_PRIORITY; i--) {
1733 next_thread = thread_lookat_run_q(i);
1734 if(next_thread != NULL) {
1735 // skip it sometimes
1736 if(_rand() > 0x3000) {
1737 next_thread = thread_dequeue_run_q(i);
1738 goto found_thread;
1740 last_thread_pri = i;
1741 next_thread = NULL;
1744 if(next_thread == NULL) {
1745 if(last_thread_pri != -1) {
1746 next_thread = thread_dequeue_run_q(last_thread_pri);
1747 if(next_thread == NULL)
1748 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri);
1749 } else {
1750 next_thread = thread_dequeue_run_q(THREAD_IDLE_PRIORITY);
1751 if(next_thread == NULL)
1752 panic("next_thread == NULL! no idle priorities!\n");
1756 found_thread:
1757 next_thread->state = THREAD_STATE_RUNNING;
1758 next_thread->next_state = THREAD_STATE_READY;
1760 // XXX should only reset the quantum timer if we are switching to a new thread,
1761 // or we got here as a result of a quantum expire.
1763 // XXX calculate quantum
1764 quantum = 10000;
1766 // get the quantum timer for this cpu
1767 quantum_timer = &old_thread->cpu->quantum_timer;
1768 if(!old_thread->cpu->preempted) {
1769 _local_timer_cancel_event(old_thread->cpu->cpu_num, quantum_timer);
1771 old_thread->cpu->preempted= 0;
1772 timer_setup_timer(&reschedule_event, NULL, quantum_timer);
1773 timer_set_event(quantum, TIMER_MODE_ONESHOT, quantum_timer);
1775 if(next_thread != old_thread) {
1776 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1777 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1778 thread_context_switch(old_thread, next_thread);
1782 static void insert_proc_into_parent(struct proc *parent, struct proc *p)
1784 list_add_head(&parent->children, &p->siblings_node);
1785 p->parent = parent;
1788 static void remove_proc_from_parent(struct proc *parent, struct proc *p)
1790 list_delete(&p->siblings_node);
1791 p->parent = NULL;
1794 static int proc_struct_compare(void *_p, const void *_key)
1796 struct proc *p = _p;
1797 const struct proc_key *key = _key;
1799 if(p->id == key->id) return 0;
1800 else return 1;
1803 static unsigned int proc_struct_hash(void *_p, const void *_key, unsigned int range)
1805 struct proc *p = _p;
1806 const struct proc_key *key = _key;
1808 if(p != NULL)
1809 return (p->id % range);
1810 else
1811 return (key->id % range);
1814 struct proc *proc_get_kernel_proc(void)
1816 return kernel_proc;
1819 proc_id proc_get_kernel_proc_id(void)
1821 if(!kernel_proc)
1822 return 0;
1823 else
1824 return kernel_proc->id;
1827 proc_id proc_get_current_proc_id(void)
1829 return thread_get_current_thread()->proc->id;
1832 struct proc *proc_get_current_proc(void)
1834 return thread_get_current_thread()->proc;
1837 static struct proc *create_proc_struct(const char *name, bool kernel)
1839 struct proc *p;
1841 p = (struct proc *)kmalloc(sizeof(struct proc));
1842 if(p == NULL)
1843 goto error;
1844 p->next = NULL;
1845 list_clear_node(&p->siblings_node);
1846 list_initialize(&p->children);
1847 p->parent = NULL;
1848 p->id = atomic_add(&next_proc_id, 1);
1849 p->pgid = -1;
1850 p->sid = -1;
1851 list_clear_node(&p->pg_node);
1852 list_clear_node(&p->session_node);
1853 strncpy(&p->name[0], name, SYS_MAX_OS_NAME_LEN-1);
1854 p->name[SYS_MAX_OS_NAME_LEN-1] = 0;
1855 p->num_threads = 0;
1856 p->ioctx = NULL;
1857 p->aspace_id = -1;
1858 p->aspace = NULL;
1859 p->kaspace = vm_get_kernel_aspace();
1860 vm_put_aspace(p->kaspace);
1861 list_initialize(&p->thread_list);
1862 p->main_thread = NULL;
1863 p->state = PROC_STATE_BIRTH;
1865 if(arch_proc_init_proc_struct(p, kernel) < 0)
1866 goto error1;
1868 return p;
1870 error1:
1871 kfree(p);
1872 error:
1873 return NULL;
1876 static void delete_proc_struct(struct proc *p)
1878 kfree(p);
1881 int proc_get_proc_info(proc_id id, struct proc_info *outinfo)
1883 struct proc *p;
1884 struct proc_info info;
1885 int err;
1887 int_disable_interrupts();
1888 GRAB_PROC_LOCK();
1890 p = proc_get_proc_struct_locked(id);
1891 if(!p) {
1892 err = ERR_INVALID_HANDLE;
1893 goto out;
1896 /* found the proc, copy the data out */
1897 info.pid = id;
1898 info.ppid = p->parent->id;
1899 info.pgid = p->pgid;
1900 info.sid = p->sid;
1901 strncpy(info.name, p->name, SYS_MAX_OS_NAME_LEN-1);
1902 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
1903 info.state = p->state;
1904 info.num_threads = p->num_threads;
1906 err = NO_ERROR;
1908 out:
1909 RELEASE_PROC_LOCK();
1910 int_restore_interrupts();
1912 if(err >= 0)
1913 memcpy(outinfo, &info, sizeof(info));
1915 return err;
1918 int user_proc_get_proc_info(proc_id id, struct proc_info *uinfo)
1920 struct proc_info info;
1921 int err, err2;
1923 if(is_kernel_address(uinfo)) {
1924 return ERR_VM_BAD_USER_MEMORY;
1927 err = proc_get_proc_info(id, &info);
1928 if(err < 0)
1929 return err;
1931 err2 = user_memcpy(uinfo, &info, sizeof(info));
1932 if(err2 < 0)
1933 return err2;
1935 return err;
1938 int proc_get_next_proc_info(uint32 *cookie, struct proc_info *outinfo)
1940 struct proc *p;
1941 struct proc_info info;
1942 int err;
1943 struct hash_iterator i;
1944 proc_id id = (proc_id)*cookie;
1946 int_disable_interrupts();
1947 GRAB_PROC_LOCK();
1949 hash_open(proc_hash, &i);
1950 while((p = hash_next(proc_hash, &i)) != NULL) {
1951 if(id == 0)
1952 break; // initial search, return the first proc
1953 if(p->id == id) {
1954 // we found the last proc that was looked at, increment to the next one
1955 p = hash_next(proc_hash, &i);
1956 break;
1959 if(p == NULL) {
1960 err = ERR_NO_MORE_HANDLES;
1961 goto out;
1964 // we have the proc structure, copy the data out of it
1965 info.pid = p->id;
1966 info.ppid = p->parent->id;
1967 info.pgid = p->pgid;
1968 info.sid = p->sid;
1969 strncpy(info.name, p->name, SYS_MAX_OS_NAME_LEN-1);
1970 info.name[SYS_MAX_OS_NAME_LEN-1] = '\0';
1971 info.state = p->state;
1972 info.num_threads = p->num_threads;
1974 err = 0;
1976 *cookie = (uint32)p->id;
1978 out:
1979 RELEASE_PROC_LOCK();
1980 int_restore_interrupts();
1982 if(err >= 0)
1983 memcpy(outinfo, &info, sizeof(info));
1985 return err;
1988 int user_proc_get_next_proc_info(uint32 *ucookie, struct proc_info *uinfo)
1990 struct proc_info info;
1991 uint32 cookie;
1992 int err, err2;
1994 if(is_kernel_address(ucookie)) {
1995 return ERR_VM_BAD_USER_MEMORY;
1998 if(is_kernel_address(uinfo)) {
1999 return ERR_VM_BAD_USER_MEMORY;
2002 err2 = user_memcpy(&cookie, ucookie, sizeof(cookie));
2003 if(err2 < 0)
2004 return err2;
2006 err = proc_get_next_proc_info(&cookie, &info);
2007 if(err < 0)
2008 return err;
2010 err2 = user_memcpy(uinfo, &info, sizeof(info));
2011 if(err2 < 0)
2012 return err2;
2014 err2 = user_memcpy(ucookie, &cookie, sizeof(cookie));
2015 if(err2 < 0)
2016 return err2;
2018 return err;
2021 static int get_arguments_data_size(char **args,int argc)
2023 int cnt;
2024 int tot_size = 0;
2026 for(cnt = 0; cnt < argc; cnt++)
2027 tot_size += strlen(args[cnt]) + 1;
2028 tot_size += (argc + 1) * sizeof(char *);
2030 return tot_size + sizeof(struct uspace_prog_args_t);
2033 static int proc_create_proc2(void *args)
2035 int err;
2036 struct thread *t;
2037 struct proc *p;
2038 struct proc_arg *pargs = args;
2039 char *path;
2040 addr_t entry;
2041 char ustack_name[128];
2042 int tot_top_size;
2043 char **uargs;
2044 char *udest;
2045 struct uspace_prog_args_t *uspa;
2046 unsigned int cnt;
2048 t = thread_get_current_thread();
2049 p = t->proc;
2051 dprintf("proc_create_proc2: entry thread %d\n", t->id);
2053 // create an initial primary stack region
2055 tot_top_size = STACK_SIZE + PAGE_ALIGN(get_arguments_data_size(pargs->args,pargs->argc));
2056 t->user_stack_base = ((USER_STACK_REGION - tot_top_size) + USER_STACK_REGION_SIZE);
2057 sprintf(ustack_name, "%s_primary_stack", p->name);
2058 t->user_stack_region_id = vm_create_anonymous_region(p->aspace_id, ustack_name, (void **)&t->user_stack_base,
2059 REGION_ADDR_EXACT_ADDRESS, tot_top_size, REGION_WIRING_LAZY, LOCK_RW);
2060 if(t->user_stack_region_id < 0) {
2061 panic("proc_create_proc2: could not create default user stack region\n");
2062 return t->user_stack_region_id;
2065 uspa = (struct uspace_prog_args_t *)(t->user_stack_base + STACK_SIZE);
2066 uargs = (char **)(uspa + 1);
2067 udest = (char *)(uargs + pargs->argc + 1);
2068 // dprintf("addr: stack base=0x%x uargs = 0x%x udest=0x%x tot_top_size=%d \n\n",t->user_stack_base,uargs,udest,tot_top_size);
2070 for(cnt = 0;cnt < pargs->argc;cnt++){
2071 uargs[cnt] = udest;
2072 user_strcpy(udest, pargs->args[cnt]);
2073 udest += strlen(pargs->args[cnt]) + 1;
2075 uargs[cnt] = NULL;
2077 user_memcpy(uspa->prog_name, p->name, sizeof(uspa->prog_name));
2078 user_memcpy(uspa->prog_path, pargs->path, sizeof(uspa->prog_path));
2079 uspa->argc = cnt;
2080 uspa->argv = uargs;
2081 uspa->envc = 0;
2082 uspa->envp = 0;
2084 if(pargs->args != NULL)
2085 free_arg_list(pargs->args,pargs->argc);
2087 path = pargs->path;
2088 dprintf("proc_create_proc2: loading elf binary '%s'\n", path);
2090 err = elf_load_uspace("/boot/libexec/rld.so", p, 0, &entry);
2091 if(err < 0){
2092 // XXX clean up proc
2093 return err;
2096 // free the args
2097 kfree(pargs->path);
2098 kfree(pargs);
2100 dprintf("proc_create_proc2: loaded elf. entry = 0x%lx\n", entry);
2102 p->state = PROC_STATE_NORMAL;
2104 // jump to the entry point in user space
2105 arch_thread_enter_uspace(t, entry, uspa, t->user_stack_base + STACK_SIZE);
2107 // never gets here
2108 return 0;
2111 proc_id proc_create_proc(const char *path, const char *name, char **args, int argc, int priority, int flags)
2113 struct proc *p;
2114 struct proc *curr_proc;
2115 thread_id tid;
2116 proc_id pid;
2117 proc_id curr_proc_id;
2118 int err;
2119 struct proc_arg *pargs;
2120 struct sid_node *snode = NULL;
2121 struct pgid_node *pgnode = NULL;
2123 dprintf("proc_create_proc: entry '%s', name '%s' args = %p argc = %d, flags = 0x%x\n", path, name, args, argc, flags);
2125 p = create_proc_struct(name, false);
2126 if(p == NULL)
2127 return ERR_NO_MEMORY;
2129 pid = p->id;
2130 curr_proc_id = proc_get_current_proc_id();
2132 // preallocate a process group and session node if we need it
2133 if(flags & PROC_FLAG_NEW_SESSION) {
2134 snode = create_session_struct(p->id);
2135 flags |= PROC_FLAG_NEW_PGROUP; // creating your own session implies your own pgroup
2137 if(flags & PROC_FLAG_NEW_PGROUP)
2138 pgnode = create_pgroup_struct(p->id);
2140 int_disable_interrupts();
2141 GRAB_PROC_LOCK();
2143 // insert this proc into the global list
2144 hash_insert(proc_hash, p);
2146 // add it to the parent's list
2147 curr_proc = proc_get_proc_struct_locked(curr_proc_id);
2148 insert_proc_into_parent(curr_proc, p);
2150 if(flags & PROC_FLAG_NEW_SESSION) {
2151 hash_insert(sid_hash, snode);
2152 add_proc_to_session(p, p->id);
2153 } else {
2154 // inheirit the parent's session
2155 p->sid = curr_proc->sid;
2156 add_proc_to_session(p, curr_proc->sid);
2159 if(flags & PROC_FLAG_NEW_PGROUP) {
2160 hash_insert(pgid_hash, pgnode);
2161 add_proc_to_pgroup(p, p->id);
2162 } else {
2163 // inheirit the creating processes's process group
2164 p->pgid = curr_proc->pgid;
2165 add_proc_to_pgroup(p, curr_proc->pgid);
2168 RELEASE_PROC_LOCK();
2169 int_restore_interrupts();
2171 // copy the args over
2172 pargs = kmalloc(sizeof(struct proc_arg));
2173 if(pargs == NULL){
2174 err = ERR_NO_MEMORY;
2175 goto err1;
2177 pargs->path = kstrdup(path);
2178 if(pargs->path == NULL){
2179 err = ERR_NO_MEMORY;
2180 goto err2;
2182 pargs->argc = argc;
2183 pargs->args = args;
2185 // create a new ioctx for this process
2186 p->ioctx = vfs_new_ioctx(thread_get_current_thread()->proc->ioctx);
2187 if(!p->ioctx) {
2188 err = ERR_NO_MEMORY;
2189 goto err3;
2192 // create an address space for this process
2193 p->aspace_id = vm_create_aspace(p->name, USER_BASE, USER_BASE, USER_SIZE, false);
2194 if(p->aspace_id < 0) {
2195 err = p->aspace_id;
2196 goto err4;
2198 p->aspace = vm_get_aspace_by_id(p->aspace_id);
2200 // create a kernel thread, but under the context of the new process
2201 tid = thread_create_kernel_thread_etc(name, proc_create_proc2, pargs, p);
2202 if(tid < 0) {
2203 err = tid;
2204 goto err5;
2207 if((flags & PROC_FLAG_SUSPENDED) == 0)
2208 thread_resume_thread(tid);
2210 return pid;
2212 err5:
2213 vm_put_aspace(p->aspace);
2214 vm_delete_aspace(p->aspace_id);
2215 err4:
2216 vfs_free_ioctx(p->ioctx);
2217 err3:
2218 kfree(pargs->path);
2219 err2:
2220 kfree(pargs);
2221 err1:
2222 // remove the proc structure from the proc hash table and delete the proc structure
2223 int_disable_interrupts();
2224 GRAB_PROC_LOCK();
2225 hash_remove(proc_hash, p);
2226 RELEASE_PROC_LOCK();
2227 int_restore_interrupts();
2228 delete_proc_struct(p);
2229 //err:
2230 return err;
2233 proc_id user_proc_create_proc(const char *upath, const char *uname, char **args, int argc, int priority, int flags)
2235 char path[SYS_MAX_PATH_LEN];
2236 char name[SYS_MAX_OS_NAME_LEN];
2237 char **kargs;
2238 int rc;
2240 dprintf("user_proc_create_proc : argc=%d \n",argc);
2242 if(is_kernel_address(upath))
2243 return ERR_VM_BAD_USER_MEMORY;
2244 if(is_kernel_address(uname))
2245 return ERR_VM_BAD_USER_MEMORY;
2247 rc = user_copy_arg_list(args, argc, &kargs);
2248 if(rc < 0)
2249 goto error;
2251 rc = user_strncpy(path, upath, SYS_MAX_PATH_LEN-1);
2252 if(rc < 0)
2253 goto error;
2255 path[SYS_MAX_PATH_LEN-1] = 0;
2257 rc = user_strncpy(name, uname, SYS_MAX_OS_NAME_LEN-1);
2258 if(rc < 0)
2259 goto error;
2261 name[SYS_MAX_OS_NAME_LEN-1] = 0;
2263 return proc_create_proc(path, name, kargs, argc, priority, flags);
2264 error:
2265 free_arg_list(kargs,argc);
2266 return rc;
2269 int proc_kill_proc(proc_id id)
2271 struct proc *p;
2272 thread_id tid = -1;
2273 int retval = 0;
2275 int_disable_interrupts();
2276 GRAB_PROC_LOCK();
2278 p = proc_get_proc_struct_locked(id);
2279 if(p != NULL) {
2280 tid = p->main_thread->id;
2281 } else {
2282 retval = ERR_INVALID_HANDLE;
2285 RELEASE_PROC_LOCK();
2286 int_restore_interrupts();
2287 if(retval < 0)
2288 return retval;
2290 // just kill the main thread in the process. The cleanup code there will
2291 // take care of the process
2292 return thread_kill_thread(tid);
2295 thread_id proc_get_main_thread(proc_id id)
2297 struct proc *p;
2298 thread_id tid;
2300 int_disable_interrupts();
2301 GRAB_PROC_LOCK();
2303 p = proc_get_proc_struct_locked(id);
2304 if(p != NULL) {
2305 tid = p->main_thread->id;
2306 } else {
2307 tid = ERR_INVALID_HANDLE;
2310 RELEASE_PROC_LOCK();
2311 int_restore_interrupts();
2313 return tid;
2316 // reparent each of our children
2317 // NOTE: must have PROC lock held
2318 static void proc_reparent_children(struct proc *p)
2320 struct proc *child, *next;
2322 list_for_every_entry_safe(&p->children, child, next, struct proc, siblings_node) {
2323 // remove the child from the current proc and add to the parent
2324 remove_proc_from_parent(p, child);
2325 insert_proc_into_parent(p->parent, child);
2327 // check to see if this orphans the process group the child is in
2328 if(p->sid == child->sid && p->pgid != child->pgid) {
2329 if(!check_for_pgrp_connection(child->pgid, p->pgid, NULL)) {
2330 dprintf("thread_exit: killing process %d orphans process group %d\n", p->id, child->pgid);
2331 send_pgrp_signal_etc_locked(child->pgid, SIGHUP, SIG_FLAG_NO_RESCHED);
2332 send_pgrp_signal_etc_locked(child->pgid, SIGCONT, SIG_FLAG_NO_RESCHED);
2338 // called in the int handler code when a thread enters the kernel from user space (via syscall)
2339 void thread_atkernel_entry(void)
2341 struct thread *t;
2342 bigtime_t now;
2344 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
2346 t = thread_get_current_thread();
2348 int_disable_interrupts();
2350 // track user time
2351 now = system_time();
2352 t->user_time += now - t->last_time;
2353 t->last_time = now;
2354 t->last_time_type = KERNEL_TIME;
2356 GRAB_THREAD_LOCK();
2358 t->in_kernel = true;
2360 RELEASE_THREAD_LOCK();
2361 int_restore_interrupts();
2364 // called when a thread exits kernel space to user space
2365 void thread_atkernel_exit(void)
2367 struct thread *t;
2368 int resched;
2369 bigtime_t now;
2371 // dprintf("thread_atkernel_exit: entry\n");
2373 t = thread_get_current_thread();
2375 int_disable_interrupts();
2376 GRAB_THREAD_LOCK();
2378 resched = handle_signals(t);
2380 if (resched)
2381 thread_resched();
2383 t->in_kernel = false;
2385 RELEASE_THREAD_LOCK();
2387 // track kernel time
2388 now = system_time();
2389 t->kernel_time += now - t->last_time;
2390 t->last_time = now;
2391 t->last_time_type = USER_TIME;
2393 int_restore_interrupts();
2397 // called at the end of an interrupt routine, tries to deliver signals
2398 int thread_atinterrupt_exit(void)
2400 int resched;
2401 struct thread *t;
2403 t = thread_get_current_thread();
2404 if(!t)
2405 return INT_NO_RESCHEDULE;
2407 GRAB_THREAD_LOCK();
2409 resched = handle_signals(t);
2411 RELEASE_THREAD_LOCK();
2413 return resched ? INT_RESCHEDULE : INT_NO_RESCHEDULE;
2416 int user_getrlimit(int resource, struct rlimit * urlp)
2418 int ret;
2419 struct rlimit rl;
2421 if (urlp == NULL) {
2422 return ERR_INVALID_ARGS;
2424 if(is_kernel_address(urlp)) {
2425 return ERR_VM_BAD_USER_MEMORY;
2428 ret = getrlimit(resource, &rl);
2430 if (ret == 0) {
2431 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
2432 if (ret < 0) {
2433 return ret;
2435 return 0;
2438 return ret;
2441 int getrlimit(int resource, struct rlimit * rlp)
2443 if (!rlp) {
2444 return -1;
2447 switch(resource) {
2448 case RLIMIT_NOFILE:
2449 return vfs_getrlimit(resource, rlp);
2451 default:
2452 return -1;
2455 return 0;
2458 int user_setrlimit(int resource, const struct rlimit * urlp)
2460 int err;
2461 struct rlimit rl;
2463 if (urlp == NULL) {
2464 return ERR_INVALID_ARGS;
2466 if(is_kernel_address(urlp)) {
2467 return ERR_VM_BAD_USER_MEMORY;
2470 err = user_memcpy(&rl, urlp, sizeof(struct rlimit));
2471 if (err < 0) {
2472 return err;
2475 return setrlimit(resource, &rl);
2478 int setrlimit(int resource, const struct rlimit * rlp)
2480 if (!rlp) {
2481 return -1;
2484 switch(resource) {
2485 case RLIMIT_NOFILE:
2486 return vfs_setrlimit(resource, rlp);
2488 default:
2489 return -1;
2492 return 0;
2495 static int pgid_node_compare(void *_p, const void *_key)
2497 struct pgid_node *p = _p;
2498 const pgrp_id *key = _key;
2500 if(p->id == *key) return 0;
2501 else return 1;
2504 static unsigned int pgid_node_hash(void *_p, const void *_key, unsigned int range)
2506 struct pgid_node *p = _p;
2507 const pgrp_id *key = _key;
2509 if(p != NULL)
2510 return (p->id % range);
2511 else
2512 return (*key % range);
2515 // assumes PROC_LOCK is held
2516 static int add_proc_to_pgroup(struct proc *p, pgrp_id pgid)
2518 struct pgid_node *node = hash_lookup(pgid_hash, &pgid);
2520 if(!node)
2521 return ERR_NOT_FOUND;
2523 p->pgid = pgid;
2524 ASSERT(p->pg_node.next == NULL && p->pg_node.prev == NULL);
2525 list_add_head(&node->list, &p->pg_node);
2527 return 0;
2530 static int remove_proc_from_pgroup(struct proc *p, pgrp_id pgid)
2532 struct pgid_node *node = hash_lookup(pgid_hash, &pgid);
2534 if(!node)
2535 return ERR_NOT_FOUND;
2537 ASSERT(p->pgid == pgid);
2538 list_delete(&p->pg_node);
2540 return 0;
2543 static struct pgid_node *create_pgroup_struct(pgrp_id pgid)
2545 struct pgid_node *node = kmalloc(sizeof(struct pgid_node));
2546 if(!node)
2547 return NULL;
2549 node->id = pgid;
2550 list_clear_node(&node->node);
2551 list_initialize(&node->list);
2553 return node;
2556 static int send_pgrp_signal_etc_locked(pgrp_id pgid, uint signal, uint32 flags)
2558 struct pgid_node *node;
2559 struct proc *p;
2560 int err = NO_ERROR;
2562 node = hash_lookup(pgid_hash, &pgid);
2563 if(!node) {
2564 err = ERR_NOT_FOUND;
2565 goto out;
2568 list_for_every_entry(&node->list, p, struct proc, pg_node) {
2569 dprintf("send_pgrp_signal_etc: sending sig %d to proc %d in pgid %d\n", signal, p->id, pgid);
2570 send_signal_etc(p->main_thread->id, signal, flags | SIG_FLAG_NO_RESCHED);
2573 out:
2574 return err;
2577 int send_pgrp_signal_etc(pgrp_id pgid, uint signal, uint32 flags)
2579 int err;
2581 int_disable_interrupts();
2582 GRAB_PROC_LOCK();
2584 err = send_pgrp_signal_etc_locked(pgid, signal, flags);
2586 RELEASE_PROC_LOCK();
2587 int_restore_interrupts();
2589 return err;
2592 static int sid_node_compare(void *_s, const void *_key)
2594 struct sid_node *s = _s;
2595 const sess_id *key = _key;
2597 if(s->id == *key) return 0;
2598 else return 1;
2601 static unsigned int sid_node_hash(void *_s, const void *_key, unsigned int range)
2603 struct sid_node *s = _s;
2604 const sess_id *key = _key;
2606 if(s != NULL)
2607 return (s->id % range);
2608 else
2609 return (*key % range);
2612 // assumes PROC_LOCK is held
2613 static int add_proc_to_session(struct proc *p, sess_id sid)
2615 struct sid_node *node = hash_lookup(sid_hash, &sid);
2616 if(!node)
2617 return ERR_NOT_FOUND;
2619 p->sid = sid;
2620 ASSERT(p->session_node.next == NULL && p->session_node.prev == NULL);
2621 list_add_head(&node->list, &p->session_node);
2623 return 0;
2626 static int remove_proc_from_session(struct proc *p, sess_id sid)
2628 struct sid_node *node = hash_lookup(sid_hash, &sid);
2629 if(!node)
2630 return ERR_NOT_FOUND;
2632 ASSERT(p->sid == sid);
2633 list_delete(&p->session_node);
2635 return 0;
2638 static struct sid_node *create_session_struct(sess_id sid)
2640 struct sid_node *node = kmalloc(sizeof(struct sid_node));
2641 if(!node)
2642 return NULL;
2644 node->id = sid;
2645 list_clear_node(&node->node);
2646 list_initialize(&node->list);
2648 return node;
2651 int send_session_signal_etc(sess_id sid, uint signal, uint32 flags)
2653 struct sid_node *node;
2654 struct proc *p;
2655 int err = NO_ERROR;
2657 int_disable_interrupts();
2658 GRAB_PROC_LOCK();
2660 node = hash_lookup(sid_hash, &sid);
2661 if(!node) {
2662 err = ERR_NOT_FOUND;
2663 goto out;
2666 list_for_every_entry(&node->list, p, struct proc, session_node) {
2667 send_proc_signal_etc(p->main_thread->id, signal, flags | SIG_FLAG_NO_RESCHED);
2670 out:
2671 RELEASE_PROC_LOCK();
2672 int_restore_interrupts();
2674 return err;
2677 int setpgid(proc_id pid, pgrp_id pgid)
2679 struct proc *p;
2680 struct pgid_node *free_node = NULL;
2681 int err;
2683 if(pid < 0 || pgid < 0)
2684 return ERR_INVALID_ARGS;
2686 if(pid == 0)
2687 pid = proc_get_current_proc_id();
2689 if(pgid == 0)
2690 pgid = pid;
2692 int_disable_interrupts();
2693 GRAB_PROC_LOCK();
2695 p = proc_get_proc_struct_locked(pid);
2696 if(!p) {
2697 err = ERR_NOT_FOUND;
2698 goto out;
2701 // see if it's already in the target process group
2702 if(p->pgid == pgid) {
2703 err = NO_ERROR;
2704 goto out;
2707 // see if the target process group exists
2708 if(hash_lookup(pgid_hash, &pgid) == NULL) {
2709 // create it
2710 // NOTE, we need to release the proc spinlock because we might have to
2711 // block while allocating the node for the process group
2712 struct pgid_node *node;
2714 RELEASE_PROC_LOCK();
2715 int_restore_interrupts();
2717 node = create_pgroup_struct(pgid);
2718 if(!node) {
2719 err = ERR_NO_MEMORY;
2720 goto out2;
2723 int_disable_interrupts();
2724 GRAB_PROC_LOCK();
2726 // check before we add the newly created pgroup struct to the hash.
2727 // it could have been created while we had the PROC_LOCK released.
2728 if(hash_lookup(pgid_hash, &pgid) != NULL) {
2729 free_node = node; // erase it later and use the pgroup that was already added
2730 } else {
2731 // add our new pgroup node to the list
2732 hash_insert(pgid_hash, node);
2736 // remove the process from it's current group
2737 remove_proc_from_pgroup(p, p->pgid);
2739 // add it to the new group
2740 add_proc_to_pgroup(p, pgid);
2742 err = NO_ERROR;
2744 out:
2745 RELEASE_PROC_LOCK();
2746 int_restore_interrupts();
2748 if(free_node)
2749 kfree(free_node);
2751 out2:
2752 return err;
2755 pgrp_id getpgid(proc_id pid)
2757 struct proc *p;
2758 pgrp_id retval;
2760 if(pid < 0)
2761 return ERR_INVALID_ARGS;
2763 if(pid == 0)
2764 pid = proc_get_current_proc_id();
2766 int_disable_interrupts();
2767 GRAB_PROC_LOCK();
2769 p = proc_get_proc_struct_locked(pid);
2770 if(!p) {
2771 retval = ERR_NOT_FOUND;
2772 goto out;
2775 retval = p->pgid;
2777 out:
2778 RELEASE_PROC_LOCK();
2779 int_restore_interrupts();
2781 return retval;
2784 sess_id setsid(void)
2786 struct proc *p;
2787 struct sid_node *free_node = NULL;
2788 proc_id pid;
2789 sess_id sid;
2790 int err;
2792 pid = proc_get_current_proc_id();
2793 sid = pid;
2795 int_disable_interrupts();
2796 GRAB_PROC_LOCK();
2798 p = proc_get_proc_struct_locked(pid);
2799 if(!p) {
2800 err = ERR_NOT_FOUND;
2801 goto out;
2804 // see if it's already in the target session
2805 if(p->sid == sid) {
2806 err = NO_ERROR;
2807 goto out;
2810 // see if the target session exists
2811 if(hash_lookup(sid_hash, &sid) == NULL) {
2812 // create it
2813 // NOTE, we need to release the proc spinlock because we might have to
2814 // block while allocating the node for the session
2815 struct sid_node *node;
2817 RELEASE_PROC_LOCK();
2818 int_restore_interrupts();
2820 node = create_session_struct(sid);
2821 if(!node) {
2822 err = ERR_NO_MEMORY;
2823 goto out2;
2826 int_disable_interrupts();
2827 GRAB_PROC_LOCK();
2829 // check before we add the newly created pgroup struct to the hash.
2830 // it could have been created while we had the PROC_LOCK released.
2831 if(hash_lookup(sid_hash, &sid) != NULL) {
2832 free_node = node; // erase it later and use the pgroup that was already added
2833 } else {
2834 // add our new pgroup node to the list
2835 hash_insert(sid_hash, node);
2839 // remove the process from it's current group
2840 remove_proc_from_session(p, p->sid);
2842 // add it to the new group
2843 add_proc_to_session(p, sid);
2845 err = NO_ERROR;
2847 out:
2848 RELEASE_PROC_LOCK();
2849 int_restore_interrupts();
2851 if(free_node)
2852 kfree(free_node);
2854 out2:
2855 return err;