2 ** Copyright 2001-2004, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/debug.h>
7 #include <kernel/console.h>
8 #include <kernel/thread.h>
9 #include <kernel/arch/thread.h>
10 #include <kernel/khash.h>
11 #include <kernel/int.h>
12 #include <kernel/smp.h>
13 #include <kernel/timer.h>
14 #include <kernel/time.h>
15 #include <kernel/cpu.h>
16 #include <kernel/arch/cpu.h>
17 #include <kernel/arch/int.h>
18 #include <kernel/arch/vm.h>
19 #include <kernel/sem.h>
20 #include <kernel/port.h>
21 #include <kernel/vfs.h>
22 #include <kernel/elf.h>
23 #include <kernel/heap.h>
24 #include <kernel/signal.h>
25 #include <kernel/list.h>
26 #include <newos/user_runtime.h>
27 #include <newos/errors.h>
28 #include <boot/stage2.h>
32 #include <sys/resource.h>
48 static void insert_proc_into_parent(struct proc
*parent
, struct proc
*p
);
49 static void remove_proc_from_parent(struct proc
*parent
, struct proc
*p
);
50 static struct proc
*create_proc_struct(const char *name
, bool kernel
);
51 static int proc_struct_compare(void *_p
, const void *_key
);
52 static unsigned int proc_struct_hash(void *_p
, const void *_key
, unsigned int range
);
53 static void proc_reparent_children(struct proc
*p
);
56 spinlock_t thread_spinlock
= 0;
57 const int fault_handler_offset
= (addr_t
)&(((struct thread
*)0)->fault_handler
) - (addr_t
)0;
60 static void *proc_hash
= NULL
;
61 static struct proc
*kernel_proc
= NULL
;
62 static proc_id next_proc_id
= 1;
63 static spinlock_t proc_spinlock
= 0;
64 // NOTE: PROC lock can be held over a THREAD lock acquisition,
65 // but not the other way (to avoid deadlock)
66 #define GRAB_PROC_LOCK() acquire_spinlock(&proc_spinlock)
67 #define RELEASE_PROC_LOCK() release_spinlock(&proc_spinlock)
72 struct list_node node
;
73 struct list_node list
;
75 static void *pgid_hash
= NULL
;
76 static int pgid_node_compare(void *_p
, const void *_key
);
77 static unsigned int pgid_node_hash(void *_p
, const void *_key
, unsigned int range
);
78 static int add_proc_to_pgroup(struct proc
*p
, pgrp_id pgid
);
79 static int remove_proc_from_pgroup(struct proc
*p
, pgrp_id pgid
);
80 static struct pgid_node
*create_pgroup_struct(pgrp_id pgid
);
81 static int send_pgrp_signal_etc_locked(pgrp_id pgid
, uint signal
, uint32 flags
);
86 struct list_node node
;
87 struct list_node list
;
89 static void *sid_hash
= NULL
;
90 static int sid_node_compare(void *_s
, const void *_key
);
91 static unsigned int sid_node_hash(void *_s
, const void *_key
, unsigned int range
);
92 static int add_proc_to_session(struct proc
*p
, sess_id sid
);
93 static int remove_proc_from_session(struct proc
*p
, sess_id sid
);
94 static struct sid_node
*create_session_struct(sess_id sid
);
97 static struct thread
*idle_threads
[_MAX_CPUS
];
98 static void *thread_hash
= NULL
;
99 static thread_id next_thread_id
= 1;
101 static sem_id snooze_sem
= -1;
104 // used temporarily as a thread cleans itself up
110 static struct death_stack
*death_stacks
;
111 static unsigned int num_death_stacks
;
112 static unsigned int volatile death_stack_bitmap
;
113 static sem_id death_stack_sem
;
116 static struct list_node run_q
[THREAD_NUM_PRIORITY_LEVELS
] = { { NULL
, NULL
}, };
117 static struct list_node dead_q
;
119 static int _rand(void);
120 //static struct proc *proc_get_proc_struct(proc_id id); // unused
121 static struct proc
*proc_get_proc_struct_locked(proc_id id
);
123 // insert a thread onto the tail of a queue
124 void thread_enqueue(struct thread
*t
, struct list_node
*q
)
126 list_add_tail(q
, &t
->q_node
);
129 struct thread
*thread_lookat_queue(struct list_node
*q
)
131 return list_peek_head_type(q
, struct thread
, q_node
);
134 struct thread
*thread_dequeue(struct list_node
*q
)
136 return list_remove_head_type(q
, struct thread
, q_node
);
139 void thread_dequeue_thread(struct thread
*t
)
141 list_delete(&t
->q_node
);
144 struct thread
*thread_lookat_run_q(int priority
)
146 return thread_lookat_queue(&run_q
[priority
]);
149 void thread_enqueue_run_q(struct thread
*t
)
151 // these shouldn't exist
152 if(t
->priority
> THREAD_MAX_PRIORITY
)
153 t
->priority
= THREAD_MAX_PRIORITY
;
157 thread_enqueue(t
, &run_q
[t
->priority
]);
160 static struct thread
*thread_dequeue_run_q(int priority
)
162 return thread_dequeue(&run_q
[priority
]);
165 static void insert_thread_into_proc(struct proc
*p
, struct thread
*t
)
167 list_add_head(&p
->thread_list
, &t
->proc_node
);
169 if(p
->num_threads
== 1) {
170 // this was the first thread
176 static void remove_thread_from_proc(struct proc
*p
, struct thread
*t
)
178 list_delete(&t
->proc_node
);
182 static int thread_struct_compare(void *_t
, const void *_key
)
184 struct thread
*t
= _t
;
185 const struct thread_key
*key
= _key
;
187 if(t
->id
== key
->id
) return 0;
191 // Frees the argument list
193 // args argument list.
194 // args number of arguments
196 static void free_arg_list(char **args
, int argc
)
201 for(cnt
= 0; cnt
< argc
; cnt
++){
209 // Copy argument list from userspace to kernel space
211 // args userspace parameters
212 // argc number of parameters
213 // kargs usespace parameters
214 // return < 0 on error and **kargs = NULL
216 static int user_copy_arg_list(char **args
, int argc
, char ***kargs
)
222 char buf
[SYS_THREAD_ARG_LENGTH_MAX
];
226 if(is_kernel_address(args
))
227 return ERR_VM_BAD_USER_MEMORY
;
229 largs
= kmalloc((argc
+ 1) * sizeof(char *));
231 return ERR_NO_MEMORY
;
234 // scan all parameters and copy to kernel space
236 for(cnt
= 0; cnt
< argc
; cnt
++) {
237 err
= user_memcpy(&source
, &(args
[cnt
]), sizeof(char *));
241 if(is_kernel_address(source
)){
242 err
= ERR_VM_BAD_USER_MEMORY
;
246 err
= user_strncpy(buf
,source
, SYS_THREAD_ARG_LENGTH_MAX
- 1);
249 buf
[SYS_THREAD_ARG_LENGTH_MAX
- 1] = 0;
251 largs
[cnt
] = kstrdup(buf
);
252 if(largs
[cnt
] == NULL
){
264 free_arg_list(largs
,cnt
);
265 dprintf("user_copy_arg_list failed %d \n",err
);
269 static unsigned int thread_struct_hash(void *_t
, const void *_key
, unsigned int range
)
271 struct thread
*t
= _t
;
272 const struct thread_key
*key
= _key
;
275 return (t
->id
% range
);
277 return (key
->id
% range
);
280 static struct thread
*create_thread_struct(const char *name
)
284 int_disable_interrupts();
286 t
= thread_dequeue(&dead_q
);
287 RELEASE_THREAD_LOCK();
288 int_restore_interrupts();
291 t
= (struct thread
*)kmalloc(sizeof(struct thread
));
296 strncpy(&t
->name
[0], name
, SYS_MAX_OS_NAME_LEN
-1);
297 t
->name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
299 t
->id
= atomic_add(&next_thread_id
, 1);
303 t
->fpu_state_saved
= true;
304 t
->sem_blocking
= -1;
305 t
->fault_handler
= 0;
306 t
->kernel_stack_region_id
= -1;
307 t
->kernel_stack_base
= 0;
308 t
->user_stack_region_id
= -1;
309 t
->user_stack_base
= 0;
310 list_clear_node(&t
->proc_node
);
314 t
->sig_block_mask
= 0;
315 memset(t
->sig_action
, 0, 32 * sizeof(struct sigaction
));
316 memset(&t
->alarm_event
, 0, sizeof(t
->alarm_event
));
318 t
->int_disable_level
= 0;
322 t
->last_time_type
= KERNEL_TIME
;
326 sprintf(temp
, "thread_0x%x_retcode_sem", t
->id
);
327 t
->return_code_sem
= sem_create(0, temp
);
328 if(t
->return_code_sem
< 0)
332 if(arch_thread_init_thread_struct(t
) < 0)
338 sem_delete_etc(t
->return_code_sem
, -1);
345 static void delete_thread_struct(struct thread
*t
)
347 if(t
->return_code_sem
>= 0)
348 sem_delete_etc(t
->return_code_sem
, -1);
352 static int _create_user_thread_kentry(void)
356 // simulates the thread spinlock release that would occur if the thread had been
357 // rescheded from. The resched didn't happen because the thread is new.
358 RELEASE_THREAD_LOCK();
359 int_restore_interrupts(); // this essentially simulates a return-from-interrupt
361 t
= thread_get_current_thread();
363 // start tracking kernel time
364 t
->last_time
= system_time();
365 t
->last_time_type
= KERNEL_TIME
;
367 // a signal may have been delivered here
368 thread_atkernel_exit();
370 // jump to the entry point in user space
371 arch_thread_enter_uspace(t
, (addr_t
)t
->entry
, t
->args
, t
->user_stack_base
+ STACK_SIZE
);
373 // never get here, the thread will exit by calling the thread_exit syscall
377 static int _create_kernel_thread_kentry(void)
379 int (*func
)(void *args
);
383 // simulates the thread spinlock release that would occur if the thread had been
384 // rescheded from. The resched didn't happen because the thread is new.
385 RELEASE_THREAD_LOCK();
386 int_restore_interrupts(); // this essentially simulates a return-from-interrupt
388 // start tracking kernel time
389 t
= thread_get_current_thread();
390 t
->last_time
= system_time();
391 t
->last_time_type
= KERNEL_TIME
;
393 // call the entry function with the appropriate args
394 func
= (void *)t
->entry
;
395 retcode
= func(t
->args
);
398 thread_exit(retcode
);
400 // shoudn't get to here
404 static thread_id
_create_thread(const char *name
, proc_id pid
, addr_t entry
, void *args
, bool kernel
)
411 t
= create_thread_struct(name
);
413 return ERR_NO_MEMORY
;
415 t
->priority
= THREAD_MEDIUM_PRIORITY
;
416 t
->state
= THREAD_STATE_BIRTH
;
417 t
->next_state
= THREAD_STATE_SUSPENDED
;
419 int_disable_interrupts();
422 // insert into global list
423 hash_insert(thread_hash
, t
);
424 RELEASE_THREAD_LOCK();
427 // look at the proc, make sure it's not being deleted
428 p
= proc_get_proc_struct_locked(pid
);
429 if(p
!= NULL
&& p
->state
!= PROC_STATE_DEATH
) {
430 insert_thread_into_proc(p
, t
);
437 hash_remove(thread_hash
, t
);
438 RELEASE_THREAD_LOCK();
440 int_restore_interrupts();
442 delete_thread_struct(t
);
443 return ERR_TASK_PROC_DELETED
;
446 sprintf(stack_name
, "%s_kstack", name
);
447 t
->kernel_stack_region_id
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), stack_name
,
448 (void **)&t
->kernel_stack_base
, REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
,
449 REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
450 if(t
->kernel_stack_region_id
< 0)
451 panic("_create_thread: error creating kernel stack!\n");
457 // this sets up an initial kthread stack that runs the entry
458 arch_thread_initialize_kthread_stack(t
, &_create_kernel_thread_kentry
);
461 // XXX make this better. For now just keep trying to create a stack
462 // until we find a spot.
463 t
->user_stack_base
= (USER_STACK_REGION
- STACK_SIZE
) + USER_STACK_REGION_SIZE
;
464 while(t
->user_stack_base
> USER_STACK_REGION
) {
465 sprintf(stack_name
, "%s_stack%d", p
->name
, t
->id
);
466 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, stack_name
,
467 (void **)&t
->user_stack_base
,
468 REGION_ADDR_ANY_ADDRESS
, STACK_SIZE
, REGION_WIRING_LAZY
, LOCK_RW
);
469 if(t
->user_stack_region_id
< 0) {
470 t
->user_stack_base
-= STACK_SIZE
;
472 // we created a region
476 if(t
->user_stack_region_id
< 0)
477 panic("_create_thread: unable to create user stack!\n");
479 // copy the user entry over to the args field in the thread struct
480 // the function this will call will immediately switch the thread into
482 arch_thread_initialize_kthread_stack(t
, &_create_user_thread_kentry
);
485 // set the interrupt disable level of the new thread to one (as if it had had int_disable_interrupts called)
486 t
->int_disable_level
= 1;
488 // set the initial state of the thread to suspended
489 t
->state
= THREAD_STATE_SUSPENDED
;
494 thread_id
user_thread_create_user_thread(char *uname
, addr_t entry
, void *args
)
496 char name
[SYS_MAX_OS_NAME_LEN
];
498 proc_id pid
= thread_get_current_thread()->proc
->id
;
500 if(is_kernel_address(uname
))
501 return ERR_VM_BAD_USER_MEMORY
;
502 if(is_kernel_address(entry
))
503 return ERR_VM_BAD_USER_MEMORY
;
505 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
508 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
510 return thread_create_user_thread(name
, pid
, entry
, args
);
513 thread_id
thread_create_user_thread(char *name
, proc_id pid
, addr_t entry
, void *args
)
515 return _create_thread(name
, pid
, entry
, args
, false);
518 thread_id
thread_create_kernel_thread(const char *name
, int (*func
)(void *), void *args
)
520 return _create_thread(name
, proc_get_kernel_proc()->id
, (addr_t
)func
, args
, true);
523 static thread_id
thread_create_kernel_thread_etc(const char *name
, int (*func
)(void *), void *args
, struct proc
*p
)
525 return _create_thread(name
, p
->id
, (addr_t
)func
, args
, true);
528 int thread_suspend_thread(thread_id id
)
530 return send_signal_etc(id
, SIGSTOP
, SIG_FLAG_NO_RESCHED
);
533 thread_id
thread_get_current_thread_id(void)
535 struct thread
*t
= thread_get_current_thread();
537 return t
? t
->id
: 0;
540 int thread_resume_thread(thread_id id
)
542 return send_signal_etc(id
, SIGCONT
, SIG_FLAG_NO_RESCHED
);
545 int thread_set_priority(thread_id id
, int priority
)
550 // make sure the passed in priority is within bounds
551 if(priority
> THREAD_MAX_RT_PRIORITY
)
552 priority
= THREAD_MAX_RT_PRIORITY
;
553 if(priority
< THREAD_MIN_PRIORITY
)
554 priority
= THREAD_MIN_PRIORITY
;
556 t
= thread_get_current_thread();
558 // it's ourself, so we know we aren't in a run queue, and we can manipulate
559 // our structure directly
560 t
->priority
= priority
;
563 int_disable_interrupts();
566 t
= thread_get_thread_struct_locked(id
);
568 if(t
->state
== THREAD_STATE_READY
&& t
->priority
!= priority
) {
569 // this thread is in a ready queue right now, so it needs to be reinserted
570 thread_dequeue_thread(t
);
571 t
->priority
= priority
;
572 thread_enqueue_run_q(t
);
574 t
->priority
= priority
;
578 retval
= ERR_INVALID_HANDLE
;
581 RELEASE_THREAD_LOCK();
582 int_restore_interrupts();
588 int user_thread_set_priority(thread_id id
, int priority
)
590 // clamp the priority levels the user can set their threads to
591 if(priority
> THREAD_MAX_PRIORITY
)
592 priority
= THREAD_MAX_PRIORITY
;
593 return thread_set_priority(id
, priority
);
596 int thread_get_thread_info(thread_id id
, struct thread_info
*outinfo
)
599 struct thread_info info
;
602 int_disable_interrupts();
605 t
= thread_get_thread_struct_locked(id
);
607 err
= ERR_INVALID_HANDLE
;
611 /* found the thread, copy the data out */
613 info
.owner_proc_id
= t
->proc
->id
;
614 strncpy(info
.name
, t
->name
, SYS_MAX_OS_NAME_LEN
-1);
615 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
616 info
.state
= t
->state
;
617 info
.priority
= t
->priority
;
618 info
.user_stack_base
= t
->user_stack_base
;
619 info
.user_time
= t
->user_time
;
620 info
.kernel_time
= t
->kernel_time
;
625 RELEASE_THREAD_LOCK();
626 int_restore_interrupts();
629 memcpy(outinfo
, &info
, sizeof(info
));
634 int user_thread_get_thread_info(thread_id id
, struct thread_info
*uinfo
)
636 struct thread_info info
;
639 if(is_kernel_address(uinfo
)) {
640 return ERR_VM_BAD_USER_MEMORY
;
643 err
= thread_get_thread_info(id
, &info
);
647 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
654 int thread_get_next_thread_info(uint32
*_cookie
, proc_id pid
, struct thread_info
*outinfo
)
658 struct thread_info info
;
662 cookie
= (thread_id
)*_cookie
;
664 int_disable_interrupts();
667 p
= proc_get_proc_struct_locked(pid
);
669 err
= ERR_INVALID_HANDLE
;
673 /* find the next thread in the list of threads in the proc structure */
676 t
= list_peek_head_type(&p
->thread_list
, struct thread
, proc_node
);
678 list_for_every_entry(&p
->thread_list
, t
, struct thread
, proc_node
) {
679 if(t
->id
== cookie
) {
680 /* we found what the last search got us, walk one past the last search */
681 t
= list_next_type(&p
->thread_list
, &t
->proc_node
, struct thread
, proc_node
);
692 /* found the thread, copy the data out */
694 info
.owner_proc_id
= t
->proc
->id
;
695 strncpy(info
.name
, t
->name
, SYS_MAX_OS_NAME_LEN
-1);
696 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
697 info
.state
= t
->state
;
698 info
.priority
= t
->priority
;
699 info
.user_stack_base
= t
->user_stack_base
;
700 info
.user_time
= t
->user_time
;
701 info
.kernel_time
= t
->kernel_time
;
705 *_cookie
= (uint32
)t
->id
;
709 int_restore_interrupts();
712 memcpy(outinfo
, &info
, sizeof(info
));
717 int user_thread_get_next_thread_info(uint32
*ucookie
, proc_id pid
, struct thread_info
*uinfo
)
719 struct thread_info info
;
723 if(is_kernel_address(ucookie
)) {
724 return ERR_VM_BAD_USER_MEMORY
;
727 if(is_kernel_address(uinfo
)) {
728 return ERR_VM_BAD_USER_MEMORY
;
731 err2
= user_memcpy(&cookie
, ucookie
, sizeof(cookie
));
735 err
= thread_get_next_thread_info(&cookie
, pid
, &info
);
739 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
743 err2
= user_memcpy(ucookie
, &cookie
, sizeof(cookie
));
751 static void _dump_proc_info(struct proc
*p
)
753 dprintf("PROC: %p\n", p
);
754 dprintf("id: 0x%x\n", p
->id
);
755 dprintf("pgid: 0x%x\n", p
->pgid
);
756 dprintf("sid: 0x%x\n", p
->sid
);
757 dprintf("name: '%s'\n", p
->name
);
758 dprintf("next: %p\n", p
->next
);
759 dprintf("parent: %p (0x%x)\n", p
->parent
, p
->parent
? p
->parent
->id
: -1);
760 dprintf("children.next: %p\n", p
->children
.next
);
761 dprintf("siblings.prev: %p\n", p
->siblings_node
.prev
);
762 dprintf("siblings.next: %p\n", p
->siblings_node
.next
);
763 dprintf("num_threads: %d\n", p
->num_threads
);
764 dprintf("state: %d\n", p
->state
);
765 dprintf("ioctx: %p\n", p
->ioctx
);
766 dprintf("aspace_id: 0x%x\n", p
->aspace_id
);
767 dprintf("aspace: %p\n", p
->aspace
);
768 dprintf("kaspace: %p\n", p
->kaspace
);
769 dprintf("main_thread: %p\n", p
->main_thread
);
770 dprintf("thread_list.next: %p\n", p
->thread_list
.next
);
773 static void dump_proc_info(int argc
, char **argv
)
778 struct hash_iterator i
;
781 dprintf("proc: not enough arguments\n");
785 // if the argument looks like a hex number, treat it as such
786 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
787 num
= atoul(argv
[1]);
788 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
790 _dump_proc_info((struct proc
*)num
);
797 // walk through the thread list, trying to match name or id
798 hash_open(proc_hash
, &i
);
799 while((p
= hash_next(proc_hash
, &i
)) != NULL
) {
800 if((p
->name
&& strcmp(argv
[1], p
->name
) == 0) || p
->id
== id
) {
805 hash_close(proc_hash
, &i
, false);
809 static const char *state_to_text(int state
)
812 case THREAD_STATE_READY
:
814 case THREAD_STATE_RUNNING
:
816 case THREAD_STATE_WAITING
:
818 case THREAD_STATE_SUSPENDED
:
820 case THREAD_STATE_FREE_ON_RESCHED
:
822 case THREAD_STATE_BIRTH
:
829 static struct thread
*last_thread_dumped
= NULL
;
831 static void _dump_thread_info(struct thread
*t
)
833 dprintf("THREAD: %p\n", t
);
834 dprintf("id: 0x%x\n", t
->id
);
835 dprintf("name: '%s'\n", t
->name
);
836 dprintf("next: %p\nproc_node.prev: %p\nproc_node.next: %p\nq_node.prev: %p\nq_node.next: %p\n",
837 t
->next
, t
->proc_node
.prev
, t
->proc_node
.next
, t
->q_node
.prev
, t
->q_node
.next
);
838 dprintf("priority: 0x%x\n", t
->priority
);
839 dprintf("state: %s\n", state_to_text(t
->state
));
840 dprintf("next_state: %s\n", state_to_text(t
->next_state
));
841 dprintf("cpu: %p ", t
->cpu
);
843 dprintf("(%d)\n", t
->cpu
->cpu_num
);
846 dprintf("sig_pending: 0x%lx\n", t
->sig_pending
);
847 dprintf("sig_block_mask: 0x%lx\n", t
->sig_block_mask
);
848 dprintf("in_kernel: %d\n", t
->in_kernel
);
849 dprintf("int_disable_level: %d\n", t
->int_disable_level
);
850 dprintf("sem_blocking:0x%x\n", t
->sem_blocking
);
851 dprintf("sem_count: 0x%x\n", t
->sem_count
);
852 dprintf("sem_deleted_retcode: 0x%x\n", t
->sem_deleted_retcode
);
853 dprintf("sem_errcode: 0x%x\n", t
->sem_errcode
);
854 dprintf("sem_flags: 0x%x\n", t
->sem_flags
);
855 dprintf("fault_handler: 0x%lx\n", t
->fault_handler
);
856 dprintf("args: %p\n", t
->args
);
857 dprintf("entry: 0x%lx\n", t
->entry
);
858 dprintf("proc: %p\n", t
->proc
);
859 dprintf("return_code_sem: 0x%x\n", t
->return_code_sem
);
860 dprintf("kernel_stack_region_id: 0x%x\n", t
->kernel_stack_region_id
);
861 dprintf("kernel_stack_base: 0x%lx\n", t
->kernel_stack_base
);
862 dprintf("user_stack_region_id: 0x%x\n", t
->user_stack_region_id
);
863 dprintf("user_stack_base: 0x%lx\n", t
->user_stack_base
);
864 dprintf("kernel_time: %Ld\n", t
->kernel_time
);
865 dprintf("user_time: %Ld\n", t
->user_time
);
866 dprintf("architecture dependant section:\n");
867 arch_thread_dump_info(&t
->arch_info
);
869 last_thread_dumped
= t
;
872 static void dump_thread_info(int argc
, char **argv
)
877 struct hash_iterator i
;
880 dprintf("thread: not enough arguments\n");
884 // if the argument looks like a hex number, treat it as such
885 if(strlen(argv
[1]) > 2 && argv
[1][0] == '0' && argv
[1][1] == 'x') {
886 num
= atoul(argv
[1]);
887 if(num
> vm_get_kernel_aspace()->virtual_map
.base
) {
889 _dump_thread_info((struct thread
*)num
);
896 // walk through the thread list, trying to match name or id
897 hash_open(thread_hash
, &i
);
898 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
899 if((t
->name
&& strcmp(argv
[1], t
->name
) == 0) || t
->id
== id
) {
900 _dump_thread_info(t
);
904 hash_close(thread_hash
, &i
, false);
907 static void dump_thread_list(int argc
, char **argv
)
910 struct hash_iterator i
;
912 hash_open(thread_hash
, &i
);
913 while((t
= hash_next(thread_hash
, &i
)) != NULL
) {
916 dprintf("\t%32s", t
->name
);
918 dprintf("\t%32s", "<NULL>");
919 dprintf("\t0x%x", t
->id
);
920 dprintf("\t%16s", state_to_text(t
->state
));
922 dprintf("\t%d", t
->cpu
->cpu_num
);
925 dprintf("\t0x%lx\n", t
->kernel_stack_base
);
927 hash_close(thread_hash
, &i
, false);
930 static void dump_next_thread_in_q(int argc
, char **argv
)
932 struct thread
*t
= last_thread_dumped
;
935 dprintf("no thread previously dumped. Examine a thread first.\n");
939 dprintf("next thread in queue after thread @ %p\n", t
);
940 if(t
->q_node
.next
!= NULL
) {
941 _dump_thread_info(containerof(t
->q_node
.next
, struct thread
, q_node
)); // XXX fixme
947 static void dump_next_thread_in_all_list(int argc
, char **argv
)
949 struct thread
*t
= last_thread_dumped
;
952 dprintf("no thread previously dumped. Examine a thread first.\n");
956 dprintf("next thread in global list after thread @ %p\n", t
);
957 if(t
->next
!= NULL
) {
958 _dump_thread_info(t
->next
);
964 static void dump_next_thread_in_proc(int argc
, char **argv
)
966 struct thread
*t
= last_thread_dumped
;
969 dprintf("no thread previously dumped. Examine a thread first.\n");
973 dprintf("next thread in proc after thread @ %p\n", t
);
975 t
= list_next_type(&t
->proc
->thread_list
, &t
->proc_node
, struct thread
, proc_node
);
977 _dump_thread_info(t
);
982 static int get_death_stack(void)
987 sem_acquire(death_stack_sem
, 1);
989 // grap the thread lock, find a free spot and release
990 int_disable_interrupts();
992 bit
= death_stack_bitmap
;
993 bit
= (~bit
)&~((~bit
)-1);
994 death_stack_bitmap
|= bit
;
995 RELEASE_THREAD_LOCK();
1000 panic("get_death_stack: couldn't find free stack!\n");
1002 if( bit
& (bit
-1)) {
1003 panic("get_death_stack: impossible bitmap result!\n");
1014 // dprintf("get_death_stack: returning 0x%lx\n", death_stacks[i].address);
1019 static void put_death_stack_and_reschedule(unsigned int index
)
1021 // dprintf("put_death_stack...: passed %d\n", index);
1023 if(index
>= num_death_stacks
)
1024 panic("put_death_stack: passed invalid stack index %d\n", index
);
1026 if(!(death_stack_bitmap
& (1 << index
)))
1027 panic("put_death_stack: passed invalid stack index %d\n", index
);
1029 int_disable_interrupts();
1032 death_stack_bitmap
&= ~(1 << index
);
1034 sem_release_etc(death_stack_sem
, 1, SEM_FLAG_NO_RESCHED
);
1039 int thread_init(kernel_args
*ka
)
1042 struct pgid_node
*pgnode
;
1043 struct sid_node
*snode
;
1046 dprintf("thread_init: entry\n");
1047 kprintf("initializing threading system...\n");
1049 // create the process hash table
1050 proc_hash
= hash_init(15, offsetof(struct proc
, next
), &proc_struct_compare
, &proc_struct_hash
);
1052 // create the pgroup hash table
1053 pgid_hash
= hash_init(15, offsetof(struct pgid_node
, node
), &pgid_node_compare
, &pgid_node_hash
);
1055 // create the session hash table
1056 sid_hash
= hash_init(15, offsetof(struct sid_node
, node
), &sid_node_compare
, &sid_node_hash
);
1058 // create the kernel process
1059 kernel_proc
= create_proc_struct("kernel", true);
1060 if(kernel_proc
== NULL
)
1061 panic("could not create kernel proc!\n");
1062 kernel_proc
->state
= PROC_STATE_NORMAL
;
1064 // the kernel_proc is it's own parent
1065 kernel_proc
->parent
= kernel_proc
;
1067 // it's part of the kernel process group
1068 pgnode
= create_pgroup_struct(kernel_proc
->id
);
1069 hash_insert(pgid_hash
, pgnode
);
1070 add_proc_to_pgroup(kernel_proc
, kernel_proc
->id
);
1072 // ditto with session
1073 snode
= create_session_struct(kernel_proc
->id
);
1074 hash_insert(sid_hash
, snode
);
1075 add_proc_to_session(kernel_proc
, kernel_proc
->id
);
1077 kernel_proc
->ioctx
= vfs_new_ioctx(NULL
);
1078 if(kernel_proc
->ioctx
== NULL
)
1079 panic("could not create ioctx for kernel proc!\n");
1081 // stick it in the process hash
1082 hash_insert(proc_hash
, kernel_proc
);
1084 // create the thread hash table
1085 thread_hash
= hash_init(15, offsetof(struct thread
, next
),
1086 &thread_struct_compare
, &thread_struct_hash
);
1088 // zero out the run queues
1089 for(i
= 0; i
< THREAD_NUM_PRIORITY_LEVELS
; i
++) {
1090 list_initialize(&run_q
[i
]);
1093 // zero out the dead thread structure q
1094 list_initialize(&dead_q
);
1096 // allocate a snooze sem
1097 snooze_sem
= sem_create(0, "snooze sem");
1098 if(snooze_sem
< 0) {
1099 panic("error creating snooze sem\n");
1103 // create an idle thread for each cpu
1104 for(i
=0; i
<ka
->num_cpus
; i
++) {
1108 sprintf(temp
, "idle_thread%d", i
);
1109 t
= create_thread_struct(temp
);
1111 panic("error creating idle thread struct\n");
1112 return ERR_NO_MEMORY
;
1114 t
->proc
= proc_get_kernel_proc();
1115 t
->priority
= THREAD_IDLE_PRIORITY
;
1116 t
->state
= THREAD_STATE_RUNNING
;
1117 t
->next_state
= THREAD_STATE_READY
;
1118 t
->int_disable_level
= 1; // ints are disabled until the int_restore_interrupts in main()
1119 t
->last_time
= system_time();
1120 sprintf(temp
, "idle_thread%d_kstack", i
);
1121 t
->kernel_stack_region_id
= vm_find_region_by_name(vm_get_kernel_aspace_id(), temp
);
1122 region
= vm_get_region_by_id(t
->kernel_stack_region_id
);
1124 panic("error finding idle kstack region\n");
1126 t
->kernel_stack_base
= region
->base
;
1127 vm_put_region(region
);
1128 hash_insert(thread_hash
, t
);
1129 insert_thread_into_proc(t
->proc
, t
);
1130 idle_threads
[i
] = t
;
1132 arch_thread_set_current_thread(t
);
1136 // create a set of death stacks
1137 num_death_stacks
= smp_get_num_cpus();
1138 if(num_death_stacks
> 8*sizeof(death_stack_bitmap
)) {
1140 * clamp values for really beefy machines
1142 num_death_stacks
= 8*sizeof(death_stack_bitmap
);
1144 death_stack_bitmap
= 0;
1145 death_stacks
= (struct death_stack
*)kmalloc(num_death_stacks
* sizeof(struct death_stack
));
1146 if(death_stacks
== NULL
) {
1147 panic("error creating death stacks\n");
1148 return ERR_NO_MEMORY
;
1153 for(i
=0; i
<num_death_stacks
; i
++) {
1154 sprintf(temp
, "death_stack%d", i
);
1155 death_stacks
[i
].rid
= vm_create_anonymous_region(vm_get_kernel_aspace_id(), temp
,
1156 (void **)&death_stacks
[i
].address
,
1157 REGION_ADDR_ANY_ADDRESS
, KSTACK_SIZE
, REGION_WIRING_WIRED
, LOCK_RW
|LOCK_KERNEL
);
1158 if(death_stacks
[i
].rid
< 0) {
1159 panic("error creating death stacks\n");
1160 return death_stacks
[i
].rid
;
1162 death_stacks
[i
].in_use
= false;
1165 death_stack_sem
= sem_create(num_death_stacks
, "death_stack_noavail_sem");
1167 // set up some debugger commands
1168 dbg_add_command(dump_thread_list
, "threads", "list all threads");
1169 dbg_add_command(dump_thread_info
, "thread", "list info about a particular thread");
1170 dbg_add_command(dump_next_thread_in_q
, "next_q", "dump the next thread in the queue of last thread viewed");
1171 dbg_add_command(dump_next_thread_in_all_list
, "next_all", "dump the next thread in the global list of the last thread viewed");
1172 dbg_add_command(dump_next_thread_in_proc
, "next_proc", "dump the next thread in the process of the last thread viewed");
1173 dbg_add_command(dump_proc_info
, "proc", "list info about a particular process");
1175 // initialize the architectural specific thread routines
1176 arch_thread_init(ka
);
1181 int thread_init_percpu(int cpu_num
)
1183 arch_thread_set_current_thread(idle_threads
[cpu_num
]);
1187 // this starts the scheduler. Must be run under the context of
1188 // the initial idle thread.
1189 void thread_start_threading(void)
1191 // XXX may not be the best place for this
1192 // invalidate all of the other processors' TLB caches
1193 int_disable_interrupts();
1194 arch_cpu_global_TLB_invalidate();
1195 smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVL_PAGE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_SYNC
);
1196 int_restore_interrupts();
1198 // start the other processors
1199 smp_send_broadcast_ici(SMP_MSG_RESCHEDULE
, 0, 0, 0, NULL
, SMP_MSG_FLAG_ASYNC
);
1201 int_disable_interrupts();
1206 RELEASE_THREAD_LOCK();
1207 int_restore_interrupts();
1210 int user_thread_snooze(bigtime_t time
)
1212 thread_snooze(time
);
1216 int thread_snooze(bigtime_t time
)
1218 return sem_acquire_etc(snooze_sem
, 1, SEM_FLAG_TIMEOUT
|SEM_FLAG_INTERRUPTABLE
, time
, NULL
);
1221 int user_thread_yield(void)
1227 void thread_yield(void)
1229 int_disable_interrupts();
1234 RELEASE_THREAD_LOCK();
1235 int_restore_interrupts();
1238 // NOTE: PROC_LOCK must be held
1239 static bool check_for_pgrp_connection(pgrp_id pgid
, pgrp_id check_for
, struct proc
*ignore_proc
)
1241 struct pgid_node
*node
;
1242 struct proc
*temp_proc
;
1243 bool connection
= false;
1246 dprintf("check_for_pgrp_connection: pgid %d check for %d ignore_proc %d\n", pgid
, check_for
, ignore_proc
->id
);
1248 dprintf("check_for_pgrp_connection: pgid %d check for %d\n", pgid
, check_for
);
1250 node
= hash_lookup(pgid_hash
, &pgid
);
1252 list_for_every_entry(&node
->list
, temp_proc
, struct proc
, pg_node
) {
1253 ASSERT(temp_proc
->pgid
== pgid
);
1254 dprintf(" looking at %d, pgid %d, ppgid %d\n", temp_proc
->id
, temp_proc
->pgid
, temp_proc
->parent
->pgid
);
1255 if(temp_proc
!= ignore_proc
&& temp_proc
->parent
->pgid
== check_for
) {
1264 // used to pass messages between thread_exit and thread_exit2
1265 struct thread_exit_args
{
1267 region_id old_kernel_stack
;
1268 unsigned int death_stack
;
1271 static void thread_exit2(void *_args
)
1273 struct thread_exit_args args
;
1275 // copy the arguments over, since the source is probably on the kernel stack we're about to delete
1276 memcpy(&args
, _args
, sizeof(struct thread_exit_args
));
1278 // restore the interrupts
1279 int_restore_interrupts();
1281 // dprintf("thread_exit2, running on death stack 0x%lx\n", args.t->kernel_stack_base);
1283 // delete the old kernel stack region
1284 // dprintf("thread_exit2: deleting old kernel stack id 0x%x for thread 0x%x\n", args.old_kernel_stack, args.t->id);
1285 vm_delete_region(vm_get_kernel_aspace_id(), args
.old_kernel_stack
);
1287 // dprintf("thread_exit2: removing thread 0x%x from global lists\n", args.t->id);
1289 // remove this thread from all of the global lists
1290 int_disable_interrupts();
1292 remove_thread_from_proc(kernel_proc
, args
.t
);
1293 RELEASE_PROC_LOCK();
1295 hash_remove(thread_hash
, args
.t
);
1296 RELEASE_THREAD_LOCK();
1298 // dprintf("thread_exit2: done removing thread from lists\n");
1300 // set the next state to be gone. Will return the thread structure to a ready pool upon reschedule
1301 args
.t
->next_state
= THREAD_STATE_FREE_ON_RESCHED
;
1303 // throw away our fpu context
1304 if(args
.t
->fpu_cpu
) {
1305 args
.t
->fpu_cpu
->fpu_state_thread
= NULL
;
1306 args
.t
->fpu_cpu
= NULL
;
1307 args
.t
->fpu_state_saved
= true; // a lie actually
1310 // return the death stack and reschedule one last time
1311 put_death_stack_and_reschedule(args
.death_stack
);
1312 // never get to here
1313 panic("thread_exit2: made it where it shouldn't have!\n");
1316 void thread_exit(int retcode
)
1318 struct thread
*t
= thread_get_current_thread();
1319 struct proc
*p
= t
->proc
;
1320 proc_id parent_pid
= -1;
1321 bool delete_proc
= false;
1322 unsigned int death_stack
;
1324 dprintf("thread 0x%x exiting w/return code 0x%x\n", t
->id
, retcode
);
1326 if(!kernel_startup
&& !int_are_interrupts_enabled())
1327 panic("thread_exit called with ints disabled\n");
1329 // boost our priority to get this over with
1330 thread_set_priority(t
->id
, THREAD_HIGH_PRIORITY
);
1332 // cancel any pending alarms
1333 timer_cancel_event(&t
->alarm_event
);
1335 // delete the user stack region first
1336 if(p
->aspace_id
>= 0 && t
->user_stack_region_id
>= 0) {
1337 region_id rid
= t
->user_stack_region_id
;
1338 t
->user_stack_region_id
= -1;
1339 vm_delete_region(p
->aspace_id
, rid
);
1342 if(p
!= kernel_proc
) {
1343 // remove this thread from the current process and add it to the kernel
1344 // put the thread into the kernel proc until it dies
1345 int_disable_interrupts();
1347 remove_thread_from_proc(p
, t
);
1348 insert_thread_into_proc(kernel_proc
, t
);
1349 if(p
->main_thread
== t
) {
1350 // this was main thread in this process
1352 p
->state
= PROC_STATE_DEATH
;
1355 RELEASE_PROC_LOCK();
1356 // swap address spaces, to make sure we're running on the kernel's pgdir
1357 vm_aspace_swap(kernel_proc
->kaspace
);
1358 int_restore_interrupts();
1361 // delete the process
1363 if(p
->num_threads
> 0) {
1364 // there are other threads still in this process,
1365 // cycle through and signal kill on each of the threads
1366 // XXX this can be optimized. There's got to be a better solution.
1367 struct thread
*temp_thread
;
1369 int_disable_interrupts();
1371 // we can safely walk the list because of the lock. no new threads can be created
1372 // because of the PROC_STATE_DEATH flag on the process
1373 list_for_every_entry(&p
->thread_list
, temp_thread
, struct thread
, proc_node
) {
1374 thread_kill_thread_nowait(temp_thread
->id
);
1377 RELEASE_PROC_LOCK();
1378 int_restore_interrupts();
1380 // Now wait for all of the threads to die
1381 // XXX block on a semaphore
1382 while((volatile int)p
->num_threads
> 0) {
1383 thread_snooze(10000); // 10 ms
1387 int_disable_interrupts();
1390 // see if the process group we are in is going to be orphaned
1391 // it's orphaned if no parent of any other process in the group is in the
1392 // same process group as our parent
1393 if(p
->sid
== p
->parent
->sid
&& p
->pgid
!= p
->parent
->pgid
) {
1394 if(!check_for_pgrp_connection(p
->pgid
, p
->parent
->pgid
, p
)) {
1395 dprintf("thread_exit: killing process %d orphans process group %d\n", p
->id
, p
->pgid
);
1396 send_pgrp_signal_etc_locked(p
->pgid
, SIGHUP
, SIG_FLAG_NO_RESCHED
);
1397 send_pgrp_signal_etc_locked(p
->pgid
, SIGCONT
, SIG_FLAG_NO_RESCHED
);
1401 // remove us from the process list
1402 hash_remove(proc_hash
, p
);
1404 // reparent each of our children
1405 proc_reparent_children(p
);
1407 // we're not part of our process groups and session anymore
1408 remove_proc_from_pgroup(p
, p
->pgid
);
1409 remove_proc_from_session(p
, p
->sid
);
1411 // remember who our parent was so we can send a signal
1412 parent_pid
= p
->parent
->id
;
1414 // remove us from our parent
1415 remove_proc_from_parent(p
->parent
, p
);
1417 RELEASE_PROC_LOCK();
1418 int_restore_interrupts();
1420 // clean up resources owned by the process
1421 vm_put_aspace(p
->aspace
);
1422 vm_delete_aspace(p
->aspace_id
);
1423 port_delete_owned_ports(p
->id
);
1424 sem_delete_owned_sems(p
->id
);
1425 vfs_free_ioctx(p
->ioctx
);
1429 // send a signal to the parent
1430 send_proc_signal_etc(parent_pid
, SIGCHLD
, SIG_FLAG_NO_RESCHED
);
1432 // delete the sem that others will use to wait on us and get the retcode
1434 sem_id s
= t
->return_code_sem
;
1436 t
->return_code_sem
= -1;
1437 sem_delete_etc(s
, retcode
);
1440 // get_death_stack leaves interrupts disabled
1441 death_stack
= get_death_stack();
1443 struct thread_exit_args args
;
1446 args
.old_kernel_stack
= t
->kernel_stack_region_id
;
1447 args
.death_stack
= death_stack
;
1449 // set the new kernel stack officially to the death stack, wont be really switched until
1450 // the next function is called. This bookkeeping must be done now before a context switch
1451 // happens, or the processor will interrupt to the old stack
1452 t
->kernel_stack_region_id
= death_stacks
[death_stack
].rid
;
1453 t
->kernel_stack_base
= death_stacks
[death_stack
].address
;
1455 // we will continue in thread_exit2(), on the new stack
1456 arch_thread_switch_kstack_and_call(t
->kernel_stack_base
+ KSTACK_SIZE
, thread_exit2
, &args
);
1459 panic("never can get here\n");
1462 int thread_kill_thread(thread_id id
)
1464 int status
= send_signal_etc(id
, SIGKILLTHR
, SIG_FLAG_NO_RESCHED
);
1468 if (id
!= thread_get_current_thread()->id
)
1469 thread_wait_on_thread(id
, NULL
);
1474 int thread_kill_thread_nowait(thread_id id
)
1476 return send_signal_etc(id
, SIGKILLTHR
, SIG_FLAG_NO_RESCHED
);
1479 int user_thread_wait_on_thread(thread_id id
, int *uretcode
)
1484 if(is_kernel_address(uretcode
))
1485 return ERR_VM_BAD_USER_MEMORY
;
1487 rc
= thread_wait_on_thread(id
, &retcode
);
1489 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1496 int thread_wait_on_thread(thread_id id
, int *retcode
)
1502 rc
= send_signal_etc(id
, SIGCONT
, 0);
1506 int_disable_interrupts();
1509 t
= thread_get_thread_struct_locked(id
);
1511 sem
= t
->return_code_sem
;
1513 sem
= ERR_INVALID_HANDLE
;
1516 RELEASE_THREAD_LOCK();
1517 int_restore_interrupts();
1519 rc
= sem_acquire_etc(sem
, 1, SEM_FLAG_INTERRUPTABLE
, 0, retcode
);
1521 /* This thread died the way it should, dont ripple a non-error up */
1522 if (rc
== ERR_SEM_DELETED
)
1528 int user_proc_wait_on_proc(proc_id id
, int *uretcode
)
1533 if(is_kernel_address(uretcode
))
1534 return ERR_VM_BAD_USER_MEMORY
;
1536 rc
= proc_wait_on_proc(id
, &retcode
);
1540 rc2
= user_memcpy(uretcode
, &retcode
, sizeof(retcode
));
1547 int proc_wait_on_proc(proc_id id
, int *retcode
)
1552 int_disable_interrupts();
1554 p
= proc_get_proc_struct_locked(id
);
1555 if(p
&& p
->main_thread
) {
1556 tid
= p
->main_thread
->id
;
1558 tid
= ERR_INVALID_HANDLE
;
1560 RELEASE_PROC_LOCK();
1561 int_restore_interrupts();
1566 return thread_wait_on_thread(tid
, retcode
);
1569 struct thread
*thread_get_thread_struct(thread_id id
)
1573 int_disable_interrupts();
1576 t
= thread_get_thread_struct_locked(id
);
1578 RELEASE_THREAD_LOCK();
1579 int_restore_interrupts();
1584 struct thread
*thread_get_thread_struct_locked(thread_id id
)
1586 struct thread_key key
;
1590 return hash_lookup(thread_hash
, &key
);
1595 static struct proc
*proc_get_proc_struct(proc_id id
)
1599 int_disable_interrupts();
1602 p
= proc_get_proc_struct_locked(id
);
1604 RELEASE_PROC_LOCK();
1605 int_restore_interrupts();
1611 static struct proc
*proc_get_proc_struct_locked(proc_id id
)
1613 struct proc_key key
;
1617 return hash_lookup(proc_hash
, &key
);
1620 static void thread_context_switch(struct thread
*t_from
, struct thread
*t_to
)
1622 vm_translation_map
*new_tmap
;
1624 // track kernel time
1625 bigtime_t now
= system_time();
1626 if(t_from
->last_time_type
== KERNEL_TIME
)
1627 t_from
->kernel_time
+= now
- t_from
->last_time
;
1629 t_from
->user_time
+= now
- t_from
->last_time
;
1630 t_to
->last_time
= now
;
1634 // remember that this cpu will hold the current fpu state if
1635 // a) it's not already saved in the thread structure
1636 // b) it's not on another cpu
1637 if(!t_from
->fpu_state_saved
) {
1638 if(t_from
->fpu_cpu
== NULL
) { // does another cpu "own" our state?
1639 cpu_ent
*cpu
= get_curr_cpu_struct();
1641 // the current cpu *has* to own our state
1642 ASSERT(cpu
->fpu_state_thread
== t_from
);
1646 // set the current cpu and thread pointer
1647 t_to
->cpu
= t_from
->cpu
;
1648 arch_thread_set_current_thread(t_to
);
1651 // decide if we need to switch to a new mmu context
1652 if(t_from
->proc
->aspace_id
>= 0 && t_to
->proc
->aspace_id
>= 0) {
1653 // they are both uspace threads
1654 if(t_from
->proc
->aspace_id
== t_to
->proc
->aspace_id
) {
1655 // same address space
1658 // switching to a new address space
1659 new_tmap
= &t_to
->proc
->aspace
->translation_map
;
1661 } else if(t_from
->proc
->aspace_id
< 0 && t_to
->proc
->aspace_id
< 0) {
1662 // they must both be kspace threads
1664 } else if(t_to
->proc
->aspace_id
< 0) {
1665 // the one we're switching to is kspace
1666 new_tmap
= &t_to
->proc
->kaspace
->translation_map
;
1668 new_tmap
= &t_to
->proc
->aspace
->translation_map
;
1671 // do the architecture specific context switch
1672 arch_thread_context_switch(t_from
, t_to
, new_tmap
);
1675 static int _rand(void)
1677 static int next
= 0;
1680 next
= system_time();
1682 next
= next
* 1103515245 + 12345;
1683 return((next
>> 16) & 0x7FFF);
1686 static int reschedule_event(void *unused
)
1688 // this function is called as a result of the timer event set by the scheduler
1689 // returning this causes a reschedule on the timer event
1690 thread_get_current_thread()->cpu
->preempted
= 1;
1691 return INT_RESCHEDULE
;
1694 // NOTE: expects thread_spinlock to be held
1695 void thread_resched(void)
1697 struct thread
*next_thread
= NULL
;
1698 int last_thread_pri
= -1;
1699 struct thread
*old_thread
= thread_get_current_thread();
1702 struct timer_event
*quantum_timer
;
1704 // dprintf("top of thread_resched: cpu %d, cur_thread = 0x%x\n", smp_get_current_cpu(), thread_get_current_thread());
1706 switch(old_thread
->next_state
) {
1707 case THREAD_STATE_RUNNING
:
1708 case THREAD_STATE_READY
:
1709 // dprintf("enqueueing thread 0x%x into run q. pri = %d\n", old_thread, old_thread->priority);
1710 thread_enqueue_run_q(old_thread
);
1712 case THREAD_STATE_SUSPENDED
:
1713 dprintf("suspending thread 0x%x\n", old_thread
->id
);
1715 case THREAD_STATE_FREE_ON_RESCHED
:
1716 thread_enqueue(old_thread
, &dead_q
);
1719 // dprintf("not enqueueing thread 0x%x into run q. next_state = %d\n", old_thread, old_thread->next_state);
1722 old_thread
->state
= old_thread
->next_state
;
1724 // search the real-time queue
1725 for(i
= THREAD_MAX_RT_PRIORITY
; i
>= THREAD_MIN_RT_PRIORITY
; i
--) {
1726 next_thread
= thread_dequeue_run_q(i
);
1731 // search the regular queue
1732 for(i
= THREAD_MAX_PRIORITY
; i
> THREAD_IDLE_PRIORITY
; i
--) {
1733 next_thread
= thread_lookat_run_q(i
);
1734 if(next_thread
!= NULL
) {
1735 // skip it sometimes
1736 if(_rand() > 0x3000) {
1737 next_thread
= thread_dequeue_run_q(i
);
1740 last_thread_pri
= i
;
1744 if(next_thread
== NULL
) {
1745 if(last_thread_pri
!= -1) {
1746 next_thread
= thread_dequeue_run_q(last_thread_pri
);
1747 if(next_thread
== NULL
)
1748 panic("next_thread == NULL! last_thread_pri = %d\n", last_thread_pri
);
1750 next_thread
= thread_dequeue_run_q(THREAD_IDLE_PRIORITY
);
1751 if(next_thread
== NULL
)
1752 panic("next_thread == NULL! no idle priorities!\n");
1757 next_thread
->state
= THREAD_STATE_RUNNING
;
1758 next_thread
->next_state
= THREAD_STATE_READY
;
1760 // XXX should only reset the quantum timer if we are switching to a new thread,
1761 // or we got here as a result of a quantum expire.
1763 // XXX calculate quantum
1766 // get the quantum timer for this cpu
1767 quantum_timer
= &old_thread
->cpu
->quantum_timer
;
1768 if(!old_thread
->cpu
->preempted
) {
1769 _local_timer_cancel_event(old_thread
->cpu
->cpu_num
, quantum_timer
);
1771 old_thread
->cpu
->preempted
= 0;
1772 timer_setup_timer(&reschedule_event
, NULL
, quantum_timer
);
1773 timer_set_event(quantum
, TIMER_MODE_ONESHOT
, quantum_timer
);
1775 if(next_thread
!= old_thread
) {
1776 // dprintf("thread_resched: cpu %d switching from thread %d to %d\n",
1777 // smp_get_current_cpu(), old_thread->id, next_thread->id);
1778 thread_context_switch(old_thread
, next_thread
);
1782 static void insert_proc_into_parent(struct proc
*parent
, struct proc
*p
)
1784 list_add_head(&parent
->children
, &p
->siblings_node
);
1788 static void remove_proc_from_parent(struct proc
*parent
, struct proc
*p
)
1790 list_delete(&p
->siblings_node
);
1794 static int proc_struct_compare(void *_p
, const void *_key
)
1796 struct proc
*p
= _p
;
1797 const struct proc_key
*key
= _key
;
1799 if(p
->id
== key
->id
) return 0;
1803 static unsigned int proc_struct_hash(void *_p
, const void *_key
, unsigned int range
)
1805 struct proc
*p
= _p
;
1806 const struct proc_key
*key
= _key
;
1809 return (p
->id
% range
);
1811 return (key
->id
% range
);
1814 struct proc
*proc_get_kernel_proc(void)
1819 proc_id
proc_get_kernel_proc_id(void)
1824 return kernel_proc
->id
;
1827 proc_id
proc_get_current_proc_id(void)
1829 return thread_get_current_thread()->proc
->id
;
1832 struct proc
*proc_get_current_proc(void)
1834 return thread_get_current_thread()->proc
;
1837 static struct proc
*create_proc_struct(const char *name
, bool kernel
)
1841 p
= (struct proc
*)kmalloc(sizeof(struct proc
));
1845 list_clear_node(&p
->siblings_node
);
1846 list_initialize(&p
->children
);
1848 p
->id
= atomic_add(&next_proc_id
, 1);
1851 list_clear_node(&p
->pg_node
);
1852 list_clear_node(&p
->session_node
);
1853 strncpy(&p
->name
[0], name
, SYS_MAX_OS_NAME_LEN
-1);
1854 p
->name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
1859 p
->kaspace
= vm_get_kernel_aspace();
1860 vm_put_aspace(p
->kaspace
);
1861 list_initialize(&p
->thread_list
);
1862 p
->main_thread
= NULL
;
1863 p
->state
= PROC_STATE_BIRTH
;
1865 if(arch_proc_init_proc_struct(p
, kernel
) < 0)
1876 static void delete_proc_struct(struct proc
*p
)
1881 int proc_get_proc_info(proc_id id
, struct proc_info
*outinfo
)
1884 struct proc_info info
;
1887 int_disable_interrupts();
1890 p
= proc_get_proc_struct_locked(id
);
1892 err
= ERR_INVALID_HANDLE
;
1896 /* found the proc, copy the data out */
1898 info
.ppid
= p
->parent
->id
;
1899 info
.pgid
= p
->pgid
;
1901 strncpy(info
.name
, p
->name
, SYS_MAX_OS_NAME_LEN
-1);
1902 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
1903 info
.state
= p
->state
;
1904 info
.num_threads
= p
->num_threads
;
1909 RELEASE_PROC_LOCK();
1910 int_restore_interrupts();
1913 memcpy(outinfo
, &info
, sizeof(info
));
1918 int user_proc_get_proc_info(proc_id id
, struct proc_info
*uinfo
)
1920 struct proc_info info
;
1923 if(is_kernel_address(uinfo
)) {
1924 return ERR_VM_BAD_USER_MEMORY
;
1927 err
= proc_get_proc_info(id
, &info
);
1931 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
1938 int proc_get_next_proc_info(uint32
*cookie
, struct proc_info
*outinfo
)
1941 struct proc_info info
;
1943 struct hash_iterator i
;
1944 proc_id id
= (proc_id
)*cookie
;
1946 int_disable_interrupts();
1949 hash_open(proc_hash
, &i
);
1950 while((p
= hash_next(proc_hash
, &i
)) != NULL
) {
1952 break; // initial search, return the first proc
1954 // we found the last proc that was looked at, increment to the next one
1955 p
= hash_next(proc_hash
, &i
);
1960 err
= ERR_NO_MORE_HANDLES
;
1964 // we have the proc structure, copy the data out of it
1966 info
.ppid
= p
->parent
->id
;
1967 info
.pgid
= p
->pgid
;
1969 strncpy(info
.name
, p
->name
, SYS_MAX_OS_NAME_LEN
-1);
1970 info
.name
[SYS_MAX_OS_NAME_LEN
-1] = '\0';
1971 info
.state
= p
->state
;
1972 info
.num_threads
= p
->num_threads
;
1976 *cookie
= (uint32
)p
->id
;
1979 RELEASE_PROC_LOCK();
1980 int_restore_interrupts();
1983 memcpy(outinfo
, &info
, sizeof(info
));
1988 int user_proc_get_next_proc_info(uint32
*ucookie
, struct proc_info
*uinfo
)
1990 struct proc_info info
;
1994 if(is_kernel_address(ucookie
)) {
1995 return ERR_VM_BAD_USER_MEMORY
;
1998 if(is_kernel_address(uinfo
)) {
1999 return ERR_VM_BAD_USER_MEMORY
;
2002 err2
= user_memcpy(&cookie
, ucookie
, sizeof(cookie
));
2006 err
= proc_get_next_proc_info(&cookie
, &info
);
2010 err2
= user_memcpy(uinfo
, &info
, sizeof(info
));
2014 err2
= user_memcpy(ucookie
, &cookie
, sizeof(cookie
));
2021 static int get_arguments_data_size(char **args
,int argc
)
2026 for(cnt
= 0; cnt
< argc
; cnt
++)
2027 tot_size
+= strlen(args
[cnt
]) + 1;
2028 tot_size
+= (argc
+ 1) * sizeof(char *);
2030 return tot_size
+ sizeof(struct uspace_prog_args_t
);
2033 static int proc_create_proc2(void *args
)
2038 struct proc_arg
*pargs
= args
;
2041 char ustack_name
[128];
2045 struct uspace_prog_args_t
*uspa
;
2048 t
= thread_get_current_thread();
2051 dprintf("proc_create_proc2: entry thread %d\n", t
->id
);
2053 // create an initial primary stack region
2055 tot_top_size
= STACK_SIZE
+ PAGE_ALIGN(get_arguments_data_size(pargs
->args
,pargs
->argc
));
2056 t
->user_stack_base
= ((USER_STACK_REGION
- tot_top_size
) + USER_STACK_REGION_SIZE
);
2057 sprintf(ustack_name
, "%s_primary_stack", p
->name
);
2058 t
->user_stack_region_id
= vm_create_anonymous_region(p
->aspace_id
, ustack_name
, (void **)&t
->user_stack_base
,
2059 REGION_ADDR_EXACT_ADDRESS
, tot_top_size
, REGION_WIRING_LAZY
, LOCK_RW
);
2060 if(t
->user_stack_region_id
< 0) {
2061 panic("proc_create_proc2: could not create default user stack region\n");
2062 return t
->user_stack_region_id
;
2065 uspa
= (struct uspace_prog_args_t
*)(t
->user_stack_base
+ STACK_SIZE
);
2066 uargs
= (char **)(uspa
+ 1);
2067 udest
= (char *)(uargs
+ pargs
->argc
+ 1);
2068 // dprintf("addr: stack base=0x%x uargs = 0x%x udest=0x%x tot_top_size=%d \n\n",t->user_stack_base,uargs,udest,tot_top_size);
2070 for(cnt
= 0;cnt
< pargs
->argc
;cnt
++){
2072 user_strcpy(udest
, pargs
->args
[cnt
]);
2073 udest
+= strlen(pargs
->args
[cnt
]) + 1;
2077 user_memcpy(uspa
->prog_name
, p
->name
, sizeof(uspa
->prog_name
));
2078 user_memcpy(uspa
->prog_path
, pargs
->path
, sizeof(uspa
->prog_path
));
2084 if(pargs
->args
!= NULL
)
2085 free_arg_list(pargs
->args
,pargs
->argc
);
2088 dprintf("proc_create_proc2: loading elf binary '%s'\n", path
);
2090 err
= elf_load_uspace("/boot/libexec/rld.so", p
, 0, &entry
);
2092 // XXX clean up proc
2100 dprintf("proc_create_proc2: loaded elf. entry = 0x%lx\n", entry
);
2102 p
->state
= PROC_STATE_NORMAL
;
2104 // jump to the entry point in user space
2105 arch_thread_enter_uspace(t
, entry
, uspa
, t
->user_stack_base
+ STACK_SIZE
);
2111 proc_id
proc_create_proc(const char *path
, const char *name
, char **args
, int argc
, int priority
, int flags
)
2114 struct proc
*curr_proc
;
2117 proc_id curr_proc_id
;
2119 struct proc_arg
*pargs
;
2120 struct sid_node
*snode
= NULL
;
2121 struct pgid_node
*pgnode
= NULL
;
2123 dprintf("proc_create_proc: entry '%s', name '%s' args = %p argc = %d, flags = 0x%x\n", path
, name
, args
, argc
, flags
);
2125 p
= create_proc_struct(name
, false);
2127 return ERR_NO_MEMORY
;
2130 curr_proc_id
= proc_get_current_proc_id();
2132 // preallocate a process group and session node if we need it
2133 if(flags
& PROC_FLAG_NEW_SESSION
) {
2134 snode
= create_session_struct(p
->id
);
2135 flags
|= PROC_FLAG_NEW_PGROUP
; // creating your own session implies your own pgroup
2137 if(flags
& PROC_FLAG_NEW_PGROUP
)
2138 pgnode
= create_pgroup_struct(p
->id
);
2140 int_disable_interrupts();
2143 // insert this proc into the global list
2144 hash_insert(proc_hash
, p
);
2146 // add it to the parent's list
2147 curr_proc
= proc_get_proc_struct_locked(curr_proc_id
);
2148 insert_proc_into_parent(curr_proc
, p
);
2150 if(flags
& PROC_FLAG_NEW_SESSION
) {
2151 hash_insert(sid_hash
, snode
);
2152 add_proc_to_session(p
, p
->id
);
2154 // inheirit the parent's session
2155 p
->sid
= curr_proc
->sid
;
2156 add_proc_to_session(p
, curr_proc
->sid
);
2159 if(flags
& PROC_FLAG_NEW_PGROUP
) {
2160 hash_insert(pgid_hash
, pgnode
);
2161 add_proc_to_pgroup(p
, p
->id
);
2163 // inheirit the creating processes's process group
2164 p
->pgid
= curr_proc
->pgid
;
2165 add_proc_to_pgroup(p
, curr_proc
->pgid
);
2168 RELEASE_PROC_LOCK();
2169 int_restore_interrupts();
2171 // copy the args over
2172 pargs
= kmalloc(sizeof(struct proc_arg
));
2174 err
= ERR_NO_MEMORY
;
2177 pargs
->path
= kstrdup(path
);
2178 if(pargs
->path
== NULL
){
2179 err
= ERR_NO_MEMORY
;
2185 // create a new ioctx for this process
2186 p
->ioctx
= vfs_new_ioctx(thread_get_current_thread()->proc
->ioctx
);
2188 err
= ERR_NO_MEMORY
;
2192 // create an address space for this process
2193 p
->aspace_id
= vm_create_aspace(p
->name
, USER_BASE
, USER_BASE
, USER_SIZE
, false);
2194 if(p
->aspace_id
< 0) {
2198 p
->aspace
= vm_get_aspace_by_id(p
->aspace_id
);
2200 // create a kernel thread, but under the context of the new process
2201 tid
= thread_create_kernel_thread_etc(name
, proc_create_proc2
, pargs
, p
);
2207 if((flags
& PROC_FLAG_SUSPENDED
) == 0)
2208 thread_resume_thread(tid
);
2213 vm_put_aspace(p
->aspace
);
2214 vm_delete_aspace(p
->aspace_id
);
2216 vfs_free_ioctx(p
->ioctx
);
2222 // remove the proc structure from the proc hash table and delete the proc structure
2223 int_disable_interrupts();
2225 hash_remove(proc_hash
, p
);
2226 RELEASE_PROC_LOCK();
2227 int_restore_interrupts();
2228 delete_proc_struct(p
);
2233 proc_id
user_proc_create_proc(const char *upath
, const char *uname
, char **args
, int argc
, int priority
, int flags
)
2235 char path
[SYS_MAX_PATH_LEN
];
2236 char name
[SYS_MAX_OS_NAME_LEN
];
2240 dprintf("user_proc_create_proc : argc=%d \n",argc
);
2242 if(is_kernel_address(upath
))
2243 return ERR_VM_BAD_USER_MEMORY
;
2244 if(is_kernel_address(uname
))
2245 return ERR_VM_BAD_USER_MEMORY
;
2247 rc
= user_copy_arg_list(args
, argc
, &kargs
);
2251 rc
= user_strncpy(path
, upath
, SYS_MAX_PATH_LEN
-1);
2255 path
[SYS_MAX_PATH_LEN
-1] = 0;
2257 rc
= user_strncpy(name
, uname
, SYS_MAX_OS_NAME_LEN
-1);
2261 name
[SYS_MAX_OS_NAME_LEN
-1] = 0;
2263 return proc_create_proc(path
, name
, kargs
, argc
, priority
, flags
);
2265 free_arg_list(kargs
,argc
);
2269 int proc_kill_proc(proc_id id
)
2275 int_disable_interrupts();
2278 p
= proc_get_proc_struct_locked(id
);
2280 tid
= p
->main_thread
->id
;
2282 retval
= ERR_INVALID_HANDLE
;
2285 RELEASE_PROC_LOCK();
2286 int_restore_interrupts();
2290 // just kill the main thread in the process. The cleanup code there will
2291 // take care of the process
2292 return thread_kill_thread(tid
);
2295 thread_id
proc_get_main_thread(proc_id id
)
2300 int_disable_interrupts();
2303 p
= proc_get_proc_struct_locked(id
);
2305 tid
= p
->main_thread
->id
;
2307 tid
= ERR_INVALID_HANDLE
;
2310 RELEASE_PROC_LOCK();
2311 int_restore_interrupts();
2316 // reparent each of our children
2317 // NOTE: must have PROC lock held
2318 static void proc_reparent_children(struct proc
*p
)
2320 struct proc
*child
, *next
;
2322 list_for_every_entry_safe(&p
->children
, child
, next
, struct proc
, siblings_node
) {
2323 // remove the child from the current proc and add to the parent
2324 remove_proc_from_parent(p
, child
);
2325 insert_proc_into_parent(p
->parent
, child
);
2327 // check to see if this orphans the process group the child is in
2328 if(p
->sid
== child
->sid
&& p
->pgid
!= child
->pgid
) {
2329 if(!check_for_pgrp_connection(child
->pgid
, p
->pgid
, NULL
)) {
2330 dprintf("thread_exit: killing process %d orphans process group %d\n", p
->id
, child
->pgid
);
2331 send_pgrp_signal_etc_locked(child
->pgid
, SIGHUP
, SIG_FLAG_NO_RESCHED
);
2332 send_pgrp_signal_etc_locked(child
->pgid
, SIGCONT
, SIG_FLAG_NO_RESCHED
);
2338 // called in the int handler code when a thread enters the kernel from user space (via syscall)
2339 void thread_atkernel_entry(void)
2344 // dprintf("thread_atkernel_entry: entry thread 0x%x\n", t->id);
2346 t
= thread_get_current_thread();
2348 int_disable_interrupts();
2351 now
= system_time();
2352 t
->user_time
+= now
- t
->last_time
;
2354 t
->last_time_type
= KERNEL_TIME
;
2358 t
->in_kernel
= true;
2360 RELEASE_THREAD_LOCK();
2361 int_restore_interrupts();
2364 // called when a thread exits kernel space to user space
2365 void thread_atkernel_exit(void)
2371 // dprintf("thread_atkernel_exit: entry\n");
2373 t
= thread_get_current_thread();
2375 int_disable_interrupts();
2378 resched
= handle_signals(t
);
2383 t
->in_kernel
= false;
2385 RELEASE_THREAD_LOCK();
2387 // track kernel time
2388 now
= system_time();
2389 t
->kernel_time
+= now
- t
->last_time
;
2391 t
->last_time_type
= USER_TIME
;
2393 int_restore_interrupts();
2397 // called at the end of an interrupt routine, tries to deliver signals
2398 int thread_atinterrupt_exit(void)
2403 t
= thread_get_current_thread();
2405 return INT_NO_RESCHEDULE
;
2409 resched
= handle_signals(t
);
2411 RELEASE_THREAD_LOCK();
2413 return resched
? INT_RESCHEDULE
: INT_NO_RESCHEDULE
;
2416 int user_getrlimit(int resource
, struct rlimit
* urlp
)
2422 return ERR_INVALID_ARGS
;
2424 if(is_kernel_address(urlp
)) {
2425 return ERR_VM_BAD_USER_MEMORY
;
2428 ret
= getrlimit(resource
, &rl
);
2431 ret
= user_memcpy(urlp
, &rl
, sizeof(struct rlimit
));
2441 int getrlimit(int resource
, struct rlimit
* rlp
)
2449 return vfs_getrlimit(resource
, rlp
);
2458 int user_setrlimit(int resource
, const struct rlimit
* urlp
)
2464 return ERR_INVALID_ARGS
;
2466 if(is_kernel_address(urlp
)) {
2467 return ERR_VM_BAD_USER_MEMORY
;
2470 err
= user_memcpy(&rl
, urlp
, sizeof(struct rlimit
));
2475 return setrlimit(resource
, &rl
);
2478 int setrlimit(int resource
, const struct rlimit
* rlp
)
2486 return vfs_setrlimit(resource
, rlp
);
2495 static int pgid_node_compare(void *_p
, const void *_key
)
2497 struct pgid_node
*p
= _p
;
2498 const pgrp_id
*key
= _key
;
2500 if(p
->id
== *key
) return 0;
2504 static unsigned int pgid_node_hash(void *_p
, const void *_key
, unsigned int range
)
2506 struct pgid_node
*p
= _p
;
2507 const pgrp_id
*key
= _key
;
2510 return (p
->id
% range
);
2512 return (*key
% range
);
2515 // assumes PROC_LOCK is held
2516 static int add_proc_to_pgroup(struct proc
*p
, pgrp_id pgid
)
2518 struct pgid_node
*node
= hash_lookup(pgid_hash
, &pgid
);
2521 return ERR_NOT_FOUND
;
2524 ASSERT(p
->pg_node
.next
== NULL
&& p
->pg_node
.prev
== NULL
);
2525 list_add_head(&node
->list
, &p
->pg_node
);
2530 static int remove_proc_from_pgroup(struct proc
*p
, pgrp_id pgid
)
2532 struct pgid_node
*node
= hash_lookup(pgid_hash
, &pgid
);
2535 return ERR_NOT_FOUND
;
2537 ASSERT(p
->pgid
== pgid
);
2538 list_delete(&p
->pg_node
);
2543 static struct pgid_node
*create_pgroup_struct(pgrp_id pgid
)
2545 struct pgid_node
*node
= kmalloc(sizeof(struct pgid_node
));
2550 list_clear_node(&node
->node
);
2551 list_initialize(&node
->list
);
2556 static int send_pgrp_signal_etc_locked(pgrp_id pgid
, uint signal
, uint32 flags
)
2558 struct pgid_node
*node
;
2562 node
= hash_lookup(pgid_hash
, &pgid
);
2564 err
= ERR_NOT_FOUND
;
2568 list_for_every_entry(&node
->list
, p
, struct proc
, pg_node
) {
2569 dprintf("send_pgrp_signal_etc: sending sig %d to proc %d in pgid %d\n", signal
, p
->id
, pgid
);
2570 send_signal_etc(p
->main_thread
->id
, signal
, flags
| SIG_FLAG_NO_RESCHED
);
2577 int send_pgrp_signal_etc(pgrp_id pgid
, uint signal
, uint32 flags
)
2581 int_disable_interrupts();
2584 err
= send_pgrp_signal_etc_locked(pgid
, signal
, flags
);
2586 RELEASE_PROC_LOCK();
2587 int_restore_interrupts();
2592 static int sid_node_compare(void *_s
, const void *_key
)
2594 struct sid_node
*s
= _s
;
2595 const sess_id
*key
= _key
;
2597 if(s
->id
== *key
) return 0;
2601 static unsigned int sid_node_hash(void *_s
, const void *_key
, unsigned int range
)
2603 struct sid_node
*s
= _s
;
2604 const sess_id
*key
= _key
;
2607 return (s
->id
% range
);
2609 return (*key
% range
);
2612 // assumes PROC_LOCK is held
2613 static int add_proc_to_session(struct proc
*p
, sess_id sid
)
2615 struct sid_node
*node
= hash_lookup(sid_hash
, &sid
);
2617 return ERR_NOT_FOUND
;
2620 ASSERT(p
->session_node
.next
== NULL
&& p
->session_node
.prev
== NULL
);
2621 list_add_head(&node
->list
, &p
->session_node
);
2626 static int remove_proc_from_session(struct proc
*p
, sess_id sid
)
2628 struct sid_node
*node
= hash_lookup(sid_hash
, &sid
);
2630 return ERR_NOT_FOUND
;
2632 ASSERT(p
->sid
== sid
);
2633 list_delete(&p
->session_node
);
2638 static struct sid_node
*create_session_struct(sess_id sid
)
2640 struct sid_node
*node
= kmalloc(sizeof(struct sid_node
));
2645 list_clear_node(&node
->node
);
2646 list_initialize(&node
->list
);
2651 int send_session_signal_etc(sess_id sid
, uint signal
, uint32 flags
)
2653 struct sid_node
*node
;
2657 int_disable_interrupts();
2660 node
= hash_lookup(sid_hash
, &sid
);
2662 err
= ERR_NOT_FOUND
;
2666 list_for_every_entry(&node
->list
, p
, struct proc
, session_node
) {
2667 send_proc_signal_etc(p
->main_thread
->id
, signal
, flags
| SIG_FLAG_NO_RESCHED
);
2671 RELEASE_PROC_LOCK();
2672 int_restore_interrupts();
2677 int setpgid(proc_id pid
, pgrp_id pgid
)
2680 struct pgid_node
*free_node
= NULL
;
2683 if(pid
< 0 || pgid
< 0)
2684 return ERR_INVALID_ARGS
;
2687 pid
= proc_get_current_proc_id();
2692 int_disable_interrupts();
2695 p
= proc_get_proc_struct_locked(pid
);
2697 err
= ERR_NOT_FOUND
;
2701 // see if it's already in the target process group
2702 if(p
->pgid
== pgid
) {
2707 // see if the target process group exists
2708 if(hash_lookup(pgid_hash
, &pgid
) == NULL
) {
2710 // NOTE, we need to release the proc spinlock because we might have to
2711 // block while allocating the node for the process group
2712 struct pgid_node
*node
;
2714 RELEASE_PROC_LOCK();
2715 int_restore_interrupts();
2717 node
= create_pgroup_struct(pgid
);
2719 err
= ERR_NO_MEMORY
;
2723 int_disable_interrupts();
2726 // check before we add the newly created pgroup struct to the hash.
2727 // it could have been created while we had the PROC_LOCK released.
2728 if(hash_lookup(pgid_hash
, &pgid
) != NULL
) {
2729 free_node
= node
; // erase it later and use the pgroup that was already added
2731 // add our new pgroup node to the list
2732 hash_insert(pgid_hash
, node
);
2736 // remove the process from it's current group
2737 remove_proc_from_pgroup(p
, p
->pgid
);
2739 // add it to the new group
2740 add_proc_to_pgroup(p
, pgid
);
2745 RELEASE_PROC_LOCK();
2746 int_restore_interrupts();
2755 pgrp_id
getpgid(proc_id pid
)
2761 return ERR_INVALID_ARGS
;
2764 pid
= proc_get_current_proc_id();
2766 int_disable_interrupts();
2769 p
= proc_get_proc_struct_locked(pid
);
2771 retval
= ERR_NOT_FOUND
;
2778 RELEASE_PROC_LOCK();
2779 int_restore_interrupts();
2784 sess_id
setsid(void)
2787 struct sid_node
*free_node
= NULL
;
2792 pid
= proc_get_current_proc_id();
2795 int_disable_interrupts();
2798 p
= proc_get_proc_struct_locked(pid
);
2800 err
= ERR_NOT_FOUND
;
2804 // see if it's already in the target session
2810 // see if the target session exists
2811 if(hash_lookup(sid_hash
, &sid
) == NULL
) {
2813 // NOTE, we need to release the proc spinlock because we might have to
2814 // block while allocating the node for the session
2815 struct sid_node
*node
;
2817 RELEASE_PROC_LOCK();
2818 int_restore_interrupts();
2820 node
= create_session_struct(sid
);
2822 err
= ERR_NO_MEMORY
;
2826 int_disable_interrupts();
2829 // check before we add the newly created pgroup struct to the hash.
2830 // it could have been created while we had the PROC_LOCK released.
2831 if(hash_lookup(sid_hash
, &sid
) != NULL
) {
2832 free_node
= node
; // erase it later and use the pgroup that was already added
2834 // add our new pgroup node to the list
2835 hash_insert(sid_hash
, node
);
2839 // remove the process from it's current group
2840 remove_proc_from_session(p
, p
->sid
);
2842 // add it to the new group
2843 add_proc_to_session(p
, sid
);
2848 RELEASE_PROC_LOCK();
2849 int_restore_interrupts();