1 /* This file contains essentially all of the process and message handling.
2 * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3 * There is one entry point from the outside:
5 * sys_call: a system call, i.e., the kernel is trapped with an INT
8 * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
9 * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
10 * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
11 * May 24, 2005 new notification system call (Jorrit N. Herder)
12 * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
14 * The code here is critical to make everything work and is important for the
15 * overall performance of the system. A large fraction of the code deals with
16 * list manipulation. To make this both easy to understand and fast to execute
17 * pointer pointers are used throughout the code. Pointer pointers prevent
18 * exceptions for the head or tail of a linked list.
20 * node_t *queue, *new_node; // assume these as global variables
21 * node_t **xpp = &queue; // get pointer pointer to head of queue
22 * while (*xpp != NULL) // find last pointer of the linked list
23 * xpp = &(*xpp)->next; // get pointer to next pointer
24 * *xpp = new_node; // now replace the end (the NULL pointer)
25 * new_node->next = NULL; // and mark the new end of the list
27 * For example, when adding a new node to the end of the list, one normally
28 * makes an exception for an empty list and looks up the end of the list for
29 * nonempty lists. As shown above, this is not required with pointer pointers.
32 #include <minix/com.h>
33 #include <minix/ipcconst.h>
42 #include "arch_proto.h"
44 #include <minix/syslib.h>
46 /* Scheduling and message passing functions */
47 static void idle(void);
49 * Made public for use in clock.c (for user-space scheduling)
50 static int mini_send(struct proc *caller_ptr, endpoint_t dst_e, message
53 static int mini_receive(struct proc
*caller_ptr
, endpoint_t src
,
54 message
*m_ptr
, int flags
);
55 static int mini_senda(struct proc
*caller_ptr
, asynmsg_t
*table
, size_t
57 static int deadlock(int function
, register struct proc
*caller
,
58 endpoint_t src_dst_e
);
59 static int try_async(struct proc
*caller_ptr
);
60 static int try_one(struct proc
*src_ptr
, struct proc
*dst_ptr
);
61 static struct proc
* pick_proc(void);
62 static void enqueue_head(struct proc
*rp
);
64 /* all idles share the same idle_priv structure */
65 static struct priv idle_priv
;
67 static void set_idle_name(char * name
, int n
)
80 for (i
= 4, c
= 100; c
> 0; c
/= 10) {
86 if (p_z
|| digit
!= 0 || c
== 1) {
88 name
[i
++] = '0' + digit
;
98 #define PICK_HIGHERONLY 2
100 #define BuildNotifyMessage(m_ptr, src, dst_ptr) \
101 (m_ptr)->m_type = NOTIFY_MESSAGE; \
102 (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
105 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
106 priv(dst_ptr)->s_int_pending = 0; \
109 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
110 priv(dst_ptr)->s_sig_pending = 0; \
120 /* Clear the process table. Anounce each slot as empty and set up
121 * mappings for proc_addr() and proc_nr() macros. Do the same for the
122 * table with privilege structures for the system processes.
124 for (rp
= BEG_PROC_ADDR
, i
= -NR_TASKS
; rp
< END_PROC_ADDR
; ++rp
, ++i
) {
125 rp
->p_rts_flags
= RTS_SLOT_FREE
;/* initialize free slot */
126 rp
->p_magic
= PMAGIC
;
127 rp
->p_nr
= i
; /* proc number from ptr */
128 rp
->p_endpoint
= _ENDPOINT(0, rp
->p_nr
); /* generation no. 0 */
129 rp
->p_scheduler
= NULL
; /* no user space scheduler */
130 rp
->p_priority
= 0; /* no priority */
131 rp
->p_quantum_size_ms
= 0; /* no quantum size */
133 /* arch-specific initialization */
136 for (sp
= BEG_PRIV_ADDR
, i
= 0; sp
< END_PRIV_ADDR
; ++sp
, ++i
) {
137 sp
->s_proc_nr
= NONE
; /* initialize as free */
138 sp
->s_id
= (sys_id_t
) i
; /* priv structure index */
139 ppriv_addr
[i
] = sp
; /* priv ptr from number */
140 sp
->s_sig_mgr
= NONE
; /* clear signal managers */
141 sp
->s_bak_sig_mgr
= NONE
;
144 idle_priv
.s_flags
= IDL_F
;
145 /* initialize IDLE structures for every CPU */
146 for (i
= 0; i
< CONFIG_MAX_CPUS
; i
++) {
147 struct proc
* ip
= get_cpu_var_ptr(i
, idle_proc
);
148 ip
->p_endpoint
= IDLE
;
149 ip
->p_priv
= &idle_priv
;
150 /* must not let idle ever get scheduled */
151 ip
->p_rts_flags
|= RTS_PROC_STOP
;
152 set_idle_name(ip
->p_name
, i
);
156 static void switch_address_space_idle(void)
160 * currently we bet that VM is always alive and its pages available so
161 * when the CPU wakes up the kernel is mapped and no surprises happen.
162 * This is only a problem if more than 1 cpus are available
164 switch_address_space(proc_addr(VM_PROC_NR
));
168 /*===========================================================================*
170 *===========================================================================*/
171 static void idle(void)
175 /* This function is called whenever there is no work to do.
176 * Halt the CPU, and measure how many timestamp counter ticks are
177 * spent not doing anything. This allows test setups to measure
178 * the CPU utiliziation of certain workloads with high precision.
181 p
= get_cpulocal_var(proc_ptr
) = get_cpulocal_var_ptr(idle_proc
);
182 if (priv(p
)->s_flags
& BILLABLE
)
183 get_cpulocal_var(bill_ptr
) = p
;
185 switch_address_space_idle();
188 get_cpulocal_var(cpu_is_idle
) = 1;
189 /* we don't need to keep time on APs as it is handled on the BSP */
190 if (cpuid
!= bsp_cpu_id
)
196 * If the timer has expired while in kernel we must
197 * rearm it before we go to sleep
199 restart_local_timer();
202 /* start accounting for the idle time */
203 context_stop(proc_addr(KERNEL
));
212 v
= get_cpulocal_var_ptr(idle_interrupted
);
216 interrupts_disable();
221 * end of accounting for the idle task does not happen here, the kernel
222 * is handling stuff for quite a while before it gets back here!
226 /*===========================================================================*
228 *===========================================================================*/
229 void switch_to_user(void)
231 /* This function is called an instant before proc_ptr is
232 * to be scheduled again.
236 int tlb_must_refresh
= 0;
239 p
= get_cpulocal_var(proc_ptr
);
241 * if the current process is still runnable check the misc flags and let
242 * it run unless it becomes not runnable in the meantime
244 if (proc_is_runnable(p
))
245 goto check_misc_flags
;
247 * if a process becomes not runnable while handling the misc flags, we
248 * need to pick a new one here and start from scratch. Also if the
249 * current process wasn' runnable, we pick a new one here
251 not_runnable_pick_new
:
252 if (proc_is_preempted(p
)) {
253 p
->p_rts_flags
&= ~RTS_PREEMPTED
;
254 if (proc_is_runnable(p
)) {
255 if (!is_zero64(p
->p_cpu_time_left
))
263 * if we have no process to run, set IDLE as the current process for
264 * time accounting and put the cpu in and idle state. After the next
265 * timer interrupt the execution resumes here and we can pick another
266 * process. If there is still nothing runnable we "schedule" IDLE again
268 while (!(p
= pick_proc())) {
272 /* update the global variable */
273 get_cpulocal_var(proc_ptr
) = p
;
276 if (p
->p_misc_flags
& MF_FLUSH_TLB
&& get_cpulocal_var(ptproc
) == p
)
277 tlb_must_refresh
= 1;
279 switch_address_space(p
);
284 assert(proc_is_runnable(p
));
285 while (p
->p_misc_flags
&
286 (MF_KCALL_RESUME
| MF_DELIVERMSG
|
287 MF_SC_DEFER
| MF_SC_TRACE
| MF_SC_ACTIVE
)) {
289 assert(proc_is_runnable(p
));
290 if (p
->p_misc_flags
& MF_KCALL_RESUME
) {
291 kernel_call_resume(p
);
293 else if (p
->p_misc_flags
& MF_DELIVERMSG
) {
294 TRACE(VF_SCHEDULING
, printf("delivering to %s / %d\n",
295 p
->p_name
, p
->p_endpoint
););
298 else if (p
->p_misc_flags
& MF_SC_DEFER
) {
299 /* Perform the system call that we deferred earlier. */
301 assert (!(p
->p_misc_flags
& MF_SC_ACTIVE
));
305 /* If the process is stopped for signal delivery, and
306 * not blocked sending a message after the system call,
309 if ((p
->p_misc_flags
& MF_SIG_DELAY
) &&
310 !RTS_ISSET(p
, RTS_SENDING
))
313 else if (p
->p_misc_flags
& MF_SC_TRACE
) {
314 /* Trigger a system call leave event if this was a
315 * system call. We must do this after processing the
316 * other flags above, both for tracing correctness and
317 * to be able to use 'break'.
319 if (!(p
->p_misc_flags
& MF_SC_ACTIVE
))
323 ~(MF_SC_TRACE
| MF_SC_ACTIVE
);
325 /* Signal the "leave system call" event.
328 cause_sig(proc_nr(p
), SIGTRAP
);
330 else if (p
->p_misc_flags
& MF_SC_ACTIVE
) {
331 /* If MF_SC_ACTIVE was set, remove it now:
332 * we're leaving the system call.
334 p
->p_misc_flags
&= ~MF_SC_ACTIVE
;
340 * the selected process might not be runnable anymore. We have
341 * to checkit and schedule another one
343 if (!proc_is_runnable(p
))
344 goto not_runnable_pick_new
;
347 * check the quantum left before it runs again. We must do it only here
348 * as we are sure that a possible out-of-quantum message to the
349 * scheduler will not collide with the regular ipc
351 if (is_zero64(p
->p_cpu_time_left
))
354 * After handling the misc flags the selected process might not be
355 * runnable anymore. We have to checkit and schedule another one
357 if (!proc_is_runnable(p
))
358 goto not_runnable_pick_new
;
360 TRACE(VF_SCHEDULING
, printf("cpu %d starting %s / %d "
362 cpuid
, p
->p_name
, p
->p_endpoint
, p
->p_reg
.pc
););
367 p
= arch_finish_switch_to_user();
368 assert(!is_zero64(p
->p_cpu_time_left
));
370 context_stop(proc_addr(KERNEL
));
372 /* If the process isn't the owner of FPU, enable the FPU exception */
373 if(get_cpulocal_var(fpu_owner
) != p
)
374 enable_fpu_exception();
376 disable_fpu_exception();
378 /* If MF_CONTEXT_SET is set, don't clobber process state within
379 * the kernel. The next kernel entry is OK again though.
381 p
->p_misc_flags
&= ~MF_CONTEXT_SET
;
383 assert(p
->p_seg
.p_cr3
!= 0);
385 if (p
->p_misc_flags
& MF_FLUSH_TLB
) {
386 if (tlb_must_refresh
)
388 p
->p_misc_flags
&= ~MF_FLUSH_TLB
;
392 restart_local_timer();
395 * restore_user_context() carries out the actual mode switch from kernel
396 * to userspace. This function does not return
398 restore_user_context(p
);
403 * handler for all synchronous IPC calls
405 static int do_sync_ipc(struct proc
* caller_ptr
, /* who made the call */
406 int call_nr
, /* system call number and flags */
407 endpoint_t src_dst_e
, /* src or dst of the call */
408 message
*m_ptr
) /* users pointer to a message */
410 int result
; /* the system call's result */
411 int src_dst_p
; /* Process slot number */
414 /* Check destination. RECEIVE is the only call that accepts ANY (in addition
415 * to a real endpoint). The other calls (SEND, SENDREC, and NOTIFY) require an
416 * endpoint to corresponds to a process. In addition, it is necessary to check
417 * whether a process is allowed to send to a given destination.
419 assert(call_nr
!= SENDA
);
421 /* Only allow non-negative call_nr values less than 32 */
422 if (call_nr
< 0 || call_nr
> IPCNO_HIGHEST
|| call_nr
>= 32
423 || !(callname
= ipc_call_names
[call_nr
])) {
424 #if DEBUG_ENABLE_IPC_WARNINGS
425 printf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
426 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
428 return(ETRAPDENIED
); /* trap denied by mask or kernel */
431 if (src_dst_e
== ANY
)
433 if (call_nr
!= RECEIVE
)
436 printf("sys_call: %s by %d with bad endpoint %d\n",
438 proc_nr(caller_ptr
), src_dst_e
);
442 src_dst_p
= (int) src_dst_e
;
446 /* Require a valid source and/or destination process. */
447 if(!isokendpt(src_dst_e
, &src_dst_p
)) {
449 printf("sys_call: %s by %d with bad endpoint %d\n",
451 proc_nr(caller_ptr
), src_dst_e
);
456 /* If the call is to send to a process, i.e., for SEND, SENDNB,
457 * SENDREC or NOTIFY, verify that the caller is allowed to send to
458 * the given destination.
460 if (call_nr
!= RECEIVE
)
462 if (!may_send_to(caller_ptr
, src_dst_p
)) {
463 #if DEBUG_ENABLE_IPC_WARNINGS
465 "sys_call: ipc mask denied %s from %d to %d\n",
467 caller_ptr
->p_endpoint
, src_dst_e
);
469 return(ECALLDENIED
); /* call denied by ipc mask */
474 /* Check if the process has privileges for the requested call. Calls to the
475 * kernel may only be SENDREC, because tasks always reply and may not block
476 * if the caller doesn't do receive().
478 if (!(priv(caller_ptr
)->s_trap_mask
& (1 << call_nr
))) {
479 #if DEBUG_ENABLE_IPC_WARNINGS
480 printf("sys_call: %s not allowed, caller %d, src_dst %d\n",
481 callname
, proc_nr(caller_ptr
), src_dst_p
);
483 return(ETRAPDENIED
); /* trap denied by mask or kernel */
486 if (call_nr
!= SENDREC
&& call_nr
!= RECEIVE
&& iskerneln(src_dst_p
)) {
487 #if DEBUG_ENABLE_IPC_WARNINGS
488 printf("sys_call: trap %s not allowed, caller %d, src_dst %d\n",
489 callname
, proc_nr(caller_ptr
), src_dst_e
);
491 return(ETRAPDENIED
); /* trap denied by mask or kernel */
496 /* A flag is set so that notifications cannot interrupt SENDREC. */
497 caller_ptr
->p_misc_flags
|= MF_REPLY_PEND
;
500 result
= mini_send(caller_ptr
, src_dst_e
, m_ptr
, 0);
501 if (call_nr
== SEND
|| result
!= OK
)
502 break; /* done, or SEND failed */
503 /* fall through for SENDREC */
505 if (call_nr
== RECEIVE
) {
506 caller_ptr
->p_misc_flags
&= ~MF_REPLY_PEND
;
507 IPC_STATUS_CLEAR(caller_ptr
); /* clear IPC status code */
509 result
= mini_receive(caller_ptr
, src_dst_e
, m_ptr
, 0);
512 result
= mini_notify(caller_ptr
, src_dst_e
);
515 result
= mini_send(caller_ptr
, src_dst_e
, m_ptr
, NON_BLOCKING
);
518 result
= EBADCALL
; /* illegal system call */
521 /* Now, return the result of the system call to the caller. */
525 int do_ipc(reg_t r1
, reg_t r2
, reg_t r3
)
527 struct proc
*const caller_ptr
= get_cpulocal_var(proc_ptr
); /* get pointer to caller */
528 int call_nr
= (int) r1
;
530 assert(!RTS_ISSET(caller_ptr
, RTS_SLOT_FREE
));
532 /* bill kernel time to this process. */
533 kbill_ipc
= caller_ptr
;
535 /* If this process is subject to system call tracing, handle that first. */
536 if (caller_ptr
->p_misc_flags
& (MF_SC_TRACE
| MF_SC_DEFER
)) {
537 /* Are we tracing this process, and is it the first sys_call entry? */
538 if ((caller_ptr
->p_misc_flags
& (MF_SC_TRACE
| MF_SC_DEFER
)) ==
540 /* We must notify the tracer before processing the actual
541 * system call. If we don't, the tracer could not obtain the
542 * input message. Postpone the entire system call.
544 caller_ptr
->p_misc_flags
&= ~MF_SC_TRACE
;
545 caller_ptr
->p_misc_flags
|= MF_SC_DEFER
;
547 /* Signal the "enter system call" event. Block the process. */
548 cause_sig(proc_nr(caller_ptr
), SIGTRAP
);
550 /* Preserve the return register's value. */
551 return caller_ptr
->p_reg
.retreg
;
554 /* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
555 caller_ptr
->p_misc_flags
&= ~MF_SC_DEFER
;
557 assert (!(caller_ptr
->p_misc_flags
& MF_SC_ACTIVE
));
559 /* Set a flag to allow reliable tracing of leaving the system call. */
560 caller_ptr
->p_misc_flags
|= MF_SC_ACTIVE
;
563 if(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
) {
564 panic("sys_call: MF_DELIVERMSG on for %s / %d\n",
565 caller_ptr
->p_name
, caller_ptr
->p_endpoint
);
568 /* Now check if the call is known and try to perform the request. The only
569 * system calls that exist in MINIX are sending and receiving messages.
570 * - SENDREC: combines SEND and RECEIVE in a single system call
571 * - SEND: sender blocks until its message has been delivered
572 * - RECEIVE: receiver blocks until an acceptable message has arrived
573 * - NOTIFY: asynchronous call; deliver notification or mark pending
574 * - SENDA: list of asynchronous send requests
583 /* Process accounting for scheduling */
584 caller_ptr
->p_accounting
.ipc_sync
++;
586 return do_sync_ipc(caller_ptr
, call_nr
, (endpoint_t
) r2
,
592 * Get and check the size of the argument in bytes as it is a
595 size_t msg_size
= (size_t) r2
;
597 /* Process accounting for scheduling */
598 caller_ptr
->p_accounting
.ipc_async
++;
600 /* Limit size to something reasonable. An arbitrary choice is 16
601 * times the number of process table entries.
603 if (msg_size
> 16*(NR_TASKS
+ NR_PROCS
))
605 return mini_senda(caller_ptr
, (asynmsg_t
*) r3
, msg_size
);
609 /* It might not be initialized yet. */
610 if(!minix_kerninfo_user
) {
614 arch_set_secondary_ipc_return(caller_ptr
, minix_kerninfo_user
);
618 return EBADCALL
; /* illegal system call */
622 /*===========================================================================*
624 *===========================================================================*/
625 static int deadlock(function
, cp
, src_dst_e
)
626 int function
; /* trap number */
627 register struct proc
*cp
; /* pointer to caller */
628 endpoint_t src_dst_e
; /* src or dst process */
630 /* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
631 * a cyclic dependency of blocking send and receive calls. The only cyclic
632 * depency that is not fatal is if the caller and target directly SEND(REC)
633 * and RECEIVE to each other. If a deadlock is found, the group size is
634 * returned. Otherwise zero is returned.
636 register struct proc
*xp
; /* process pointer */
637 int group_size
= 1; /* start with only caller */
638 #if DEBUG_ENABLE_IPC_WARNINGS
639 static struct proc
*processes
[NR_PROCS
+ NR_TASKS
];
643 while (src_dst_e
!= ANY
) { /* check while process nr */
645 okendpt(src_dst_e
, &src_dst_slot
);
646 xp
= proc_addr(src_dst_slot
); /* follow chain of processes */
647 assert(proc_ptr_ok(xp
));
648 assert(!RTS_ISSET(xp
, RTS_SLOT_FREE
));
649 #if DEBUG_ENABLE_IPC_WARNINGS
650 processes
[group_size
] = xp
;
652 group_size
++; /* extra process in group */
654 /* Check whether the last process in the chain has a dependency. If it
655 * has not, the cycle cannot be closed and we are done.
657 if((src_dst_e
= P_BLOCKEDON(xp
)) == NONE
)
660 /* Now check if there is a cyclic dependency. For group sizes of two,
661 * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
662 * or other combinations indicate a deadlock.
664 if (src_dst_e
== cp
->p_endpoint
) { /* possible deadlock */
665 if (group_size
== 2) { /* caller and src_dst */
666 /* The function number is magically converted to flags. */
667 if ((xp
->p_rts_flags
^ (function
<< 2)) & RTS_SENDING
) {
668 return(0); /* not a deadlock */
671 #if DEBUG_ENABLE_IPC_WARNINGS
674 printf("deadlock between these processes:\n");
675 for(i
= 0; i
< group_size
; i
++) {
676 printf(" %10s ", processes
[i
]->p_name
);
679 for(i
= 0; i
< group_size
; i
++) {
680 print_proc(processes
[i
]);
681 proc_stacktrace(processes
[i
]);
685 return(group_size
); /* deadlock found */
688 return(0); /* not a deadlock */
691 /*===========================================================================*
693 *===========================================================================*/
694 static int has_pending(sys_map_t
*map
, int src_p
, int asynm
)
696 /* Check to see if there is a pending message from the desired source
701 sys_id_t id
= NULL_PRIV_ID
;
706 /* Either check a specific bit in the mask map, or find the first bit set in
707 * it (if any), depending on whether the receive was called on a specific
711 src_id
= nr_to_id(src_p
);
712 if (get_sys_bit(*map
, src_id
)) {
714 p
= proc_addr(id_to_nr(src_id
));
715 if (asynm
&& RTS_ISSET(p
, RTS_VMINHIBIT
))
716 p
->p_misc_flags
|= MF_SENDA_VM_MISS
;
722 /* Find a source with a pending message */
723 for (src_id
= 0; src_id
< NR_SYS_PROCS
; src_id
+= BITCHUNK_BITS
) {
724 if (get_sys_bits(*map
, src_id
) != 0) {
726 while (src_id
< NR_SYS_PROCS
) {
727 while (!get_sys_bit(*map
, src_id
)) {
728 if (src_id
== NR_SYS_PROCS
)
732 p
= proc_addr(id_to_nr(src_id
));
734 * We must not let kernel fiddle with pages of a
735 * process which are currently being changed by
736 * VM. It is dangerous! So do not report such a
737 * process as having pending async messages.
740 if (asynm
&& RTS_ISSET(p
, RTS_VMINHIBIT
)) {
741 p
->p_misc_flags
|= MF_SENDA_VM_MISS
;
747 while (!get_sys_bit(*map
, src_id
)) src_id
++;
754 if (src_id
< NR_SYS_PROCS
) /* Found one */
761 /*===========================================================================*
762 * has_pending_notify *
763 *===========================================================================*/
764 int has_pending_notify(struct proc
* caller
, int src_p
)
766 sys_map_t
* map
= &priv(caller
)->s_notify_pending
;
767 return has_pending(map
, src_p
, 0);
770 /*===========================================================================*
771 * has_pending_asend *
772 *===========================================================================*/
773 int has_pending_asend(struct proc
* caller
, int src_p
)
775 sys_map_t
* map
= &priv(caller
)->s_asyn_pending
;
776 return has_pending(map
, src_p
, 1);
779 /*===========================================================================*
780 * unset_notify_pending *
781 *===========================================================================*/
782 void unset_notify_pending(struct proc
* caller
, int src_p
)
784 sys_map_t
* map
= &priv(caller
)->s_notify_pending
;
785 unset_sys_bit(*map
, src_p
);
788 /*===========================================================================*
790 *===========================================================================*/
792 register struct proc
*caller_ptr
, /* who is trying to send a message? */
793 endpoint_t dst_e
, /* to whom is message being sent? */
794 message
*m_ptr
, /* pointer to message buffer */
798 /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
799 * for this message, copy the message to it and unblock 'dst'. If 'dst' is
800 * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
802 register struct proc
*dst_ptr
;
803 register struct proc
**xpp
;
805 dst_p
= _ENDPOINT_P(dst_e
);
806 dst_ptr
= proc_addr(dst_p
);
808 if (RTS_ISSET(dst_ptr
, RTS_NO_ENDPOINT
))
813 /* Check if 'dst' is blocked waiting for this message. The destination's
814 * RTS_SENDING flag may be set when its SENDREC call blocked while sending.
816 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
)) {
818 /* Destination is indeed waiting for this message. */
819 assert(!(dst_ptr
->p_misc_flags
& MF_DELIVERMSG
));
821 if (!(flags
& FROM_KERNEL
)) {
822 if(copy_msg_from_user(m_ptr
, &dst_ptr
->p_delivermsg
))
825 dst_ptr
->p_delivermsg
= *m_ptr
;
826 IPC_STATUS_ADD_FLAGS(dst_ptr
, IPC_FLG_MSG_FROM_KERNEL
);
829 dst_ptr
->p_delivermsg
.m_source
= caller_ptr
->p_endpoint
;
830 dst_ptr
->p_misc_flags
|= MF_DELIVERMSG
;
832 call
= (caller_ptr
->p_misc_flags
& MF_REPLY_PEND
? SENDREC
833 : (flags
& NON_BLOCKING
? SENDNB
: SEND
));
834 IPC_STATUS_ADD_CALL(dst_ptr
, call
);
836 if (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
)
837 dst_ptr
->p_misc_flags
&= ~MF_REPLY_PEND
;
839 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
842 hook_ipc_msgsend(&dst_ptr
->p_delivermsg
, caller_ptr
, dst_ptr
);
843 hook_ipc_msgrecv(&dst_ptr
->p_delivermsg
, caller_ptr
, dst_ptr
);
846 if(flags
& NON_BLOCKING
) {
850 /* Check for a possible deadlock before actually blocking. */
851 if (deadlock(SEND
, caller_ptr
, dst_e
)) {
855 /* Destination is not waiting. Block and dequeue caller. */
856 if (!(flags
& FROM_KERNEL
)) {
857 if(copy_msg_from_user(m_ptr
, &caller_ptr
->p_sendmsg
))
860 caller_ptr
->p_sendmsg
= *m_ptr
;
862 * we need to remember that this message is from kernel so we
863 * can set the delivery status flags when the message is
866 caller_ptr
->p_misc_flags
|= MF_SENDING_FROM_KERNEL
;
869 RTS_SET(caller_ptr
, RTS_SENDING
);
870 caller_ptr
->p_sendto_e
= dst_e
;
872 /* Process is now blocked. Put in on the destination's queue. */
873 assert(caller_ptr
->p_q_link
== NULL
);
874 xpp
= &dst_ptr
->p_caller_q
; /* find end of list */
875 while (*xpp
) xpp
= &(*xpp
)->p_q_link
;
876 *xpp
= caller_ptr
; /* add caller to end */
879 hook_ipc_msgsend(&caller_ptr
->p_sendmsg
, caller_ptr
, dst_ptr
);
885 /*===========================================================================*
887 *===========================================================================*/
888 static int mini_receive(struct proc
* caller_ptr
,
889 endpoint_t src_e
, /* which message source is wanted */
890 message
* m_buff_usr
, /* pointer to message buffer */
893 /* A process or task wants to get a message. If a message is already queued,
894 * acquire it and deblock the sender. If no message from the desired source
895 * is available block the caller.
897 register struct proc
**xpp
;
898 int r
, src_id
, src_proc_nr
, src_p
;
900 assert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
902 /* This is where we want our message. */
903 caller_ptr
->p_delivermsg_vir
= (vir_bytes
) m_buff_usr
;
905 if(src_e
== ANY
) src_p
= ANY
;
908 okendpt(src_e
, &src_p
);
909 if (RTS_ISSET(proc_addr(src_p
), RTS_NO_ENDPOINT
))
916 /* Check to see if a message from desired source is already available. The
917 * caller's RTS_SENDING flag may be set if SENDREC couldn't send. If it is
918 * set, the process should be blocked.
920 if (!RTS_ISSET(caller_ptr
, RTS_SENDING
)) {
922 /* Check if there are pending notifications, except for SENDREC. */
923 if (! (caller_ptr
->p_misc_flags
& MF_REPLY_PEND
)) {
925 /* Check for pending notifications */
926 if ((src_id
= has_pending_notify(caller_ptr
, src_p
)) != NULL_PRIV_ID
) {
929 src_proc_nr
= id_to_nr(src_id
); /* get source proc */
930 #if DEBUG_ENABLE_IPC_WARNINGS
931 if(src_proc_nr
== NONE
) {
932 printf("mini_receive: sending notify from NONE\n");
935 assert(src_proc_nr
!= NONE
);
936 unset_notify_pending(caller_ptr
, src_id
); /* no longer pending */
938 /* Found a suitable source, deliver the notification message. */
939 hisep
= proc_addr(src_proc_nr
)->p_endpoint
;
940 assert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
941 assert(src_e
== ANY
|| hisep
== src_e
);
943 /* assemble message */
944 BuildNotifyMessage(&caller_ptr
->p_delivermsg
, src_proc_nr
, caller_ptr
);
945 caller_ptr
->p_delivermsg
.m_source
= hisep
;
946 caller_ptr
->p_misc_flags
|= MF_DELIVERMSG
;
948 IPC_STATUS_ADD_CALL(caller_ptr
, NOTIFY
);
954 /* Check for pending asynchronous messages */
955 if (has_pending_asend(caller_ptr
, src_p
) != NULL_PRIV_ID
) {
957 r
= try_one(proc_addr(src_p
), caller_ptr
);
959 r
= try_async(caller_ptr
);
962 IPC_STATUS_ADD_CALL(caller_ptr
, SENDA
);
967 /* Check caller queue. Use pointer pointers to keep code simple. */
968 xpp
= &caller_ptr
->p_caller_q
;
970 struct proc
* sender
= *xpp
;
972 if (src_e
== ANY
|| src_p
== proc_nr(sender
)) {
974 assert(!RTS_ISSET(sender
, RTS_SLOT_FREE
));
975 assert(!RTS_ISSET(sender
, RTS_NO_ENDPOINT
));
977 /* Found acceptable message. Copy it and update status. */
978 assert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
979 caller_ptr
->p_delivermsg
= sender
->p_sendmsg
;
980 caller_ptr
->p_delivermsg
.m_source
= sender
->p_endpoint
;
981 caller_ptr
->p_misc_flags
|= MF_DELIVERMSG
;
982 RTS_UNSET(sender
, RTS_SENDING
);
984 call
= (sender
->p_misc_flags
& MF_REPLY_PEND
? SENDREC
: SEND
);
985 IPC_STATUS_ADD_CALL(caller_ptr
, call
);
988 * if the message is originaly from the kernel on behalf of this
989 * process, we must send the status flags accordingly
991 if (sender
->p_misc_flags
& MF_SENDING_FROM_KERNEL
) {
992 IPC_STATUS_ADD_FLAGS(caller_ptr
, IPC_FLG_MSG_FROM_KERNEL
);
993 /* we can clean the flag now, not need anymore */
994 sender
->p_misc_flags
&= ~MF_SENDING_FROM_KERNEL
;
996 if (sender
->p_misc_flags
& MF_SIG_DELAY
)
997 sig_delay_done(sender
);
1000 hook_ipc_msgrecv(&caller_ptr
->p_delivermsg
, *xpp
, caller_ptr
);
1003 *xpp
= sender
->p_q_link
; /* remove from queue */
1004 sender
->p_q_link
= NULL
;
1007 xpp
= &sender
->p_q_link
; /* proceed to next */
1011 /* No suitable message is available or the caller couldn't send in SENDREC.
1012 * Block the process trying to receive, unless the flags tell otherwise.
1014 if ( ! (flags
& NON_BLOCKING
)) {
1015 /* Check for a possible deadlock before actually blocking. */
1016 if (deadlock(RECEIVE
, caller_ptr
, src_e
)) {
1020 caller_ptr
->p_getfrom_e
= src_e
;
1021 RTS_SET(caller_ptr
, RTS_RECEIVING
);
1028 if (caller_ptr
->p_misc_flags
& MF_REPLY_PEND
)
1029 caller_ptr
->p_misc_flags
&= ~MF_REPLY_PEND
;
1033 /*===========================================================================*
1035 *===========================================================================*/
1037 const struct proc
*caller_ptr
, /* sender of the notification */
1038 endpoint_t dst_e
/* which process to notify */
1041 register struct proc
*dst_ptr
;
1042 int src_id
; /* source id for late delivery */
1045 if (!isokendpt(dst_e
, &dst_p
)) {
1047 printf("mini_notify: bogus endpoint %d\n", dst_e
);
1051 dst_ptr
= proc_addr(dst_p
);
1053 /* Check to see if target is blocked waiting for this message. A process
1054 * can be both sending and receiving during a SENDREC system call.
1056 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
) &&
1057 ! (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
)) {
1058 /* Destination is indeed waiting for a message. Assemble a notification
1059 * message and deliver it. Copy from pseudo-source HARDWARE, since the
1060 * message is in the kernel's address space.
1062 assert(!(dst_ptr
->p_misc_flags
& MF_DELIVERMSG
));
1064 BuildNotifyMessage(&dst_ptr
->p_delivermsg
, proc_nr(caller_ptr
), dst_ptr
);
1065 dst_ptr
->p_delivermsg
.m_source
= caller_ptr
->p_endpoint
;
1066 dst_ptr
->p_misc_flags
|= MF_DELIVERMSG
;
1068 IPC_STATUS_ADD_CALL(dst_ptr
, NOTIFY
);
1069 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
1074 /* Destination is not ready to receive the notification. Add it to the
1075 * bit map with pending notifications. Note the indirectness: the privilege id
1076 * instead of the process number is used in the pending bit map.
1078 src_id
= priv(caller_ptr
)->s_id
;
1079 set_sys_bit(priv(dst_ptr
)->s_notify_pending
, src_id
);
1083 #define ASCOMPLAIN(caller, entry, field) \
1084 printf("kernel:%s:%d: asyn failed for %s in %s " \
1085 "(%d/%d, tab 0x%lx)\n",__FILE__,__LINE__, \
1086 field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
1088 #define A_RETR_FLD(entry, field) \
1089 if(data_copy(caller_ptr->p_endpoint, \
1090 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
1091 KERNEL, (vir_bytes) &tabent.field, \
1092 sizeof(tabent.field)) != OK) {\
1093 ASCOMPLAIN(caller_ptr, entry, #field); \
1098 #define A_RETR(entry) do { \
1100 caller_ptr->p_endpoint, table_v + (entry)*sizeof(asynmsg_t),\
1101 KERNEL, (vir_bytes) &tabent, \
1102 sizeof(tabent)) != OK) { \
1103 ASCOMPLAIN(caller_ptr, entry, "message entry"); \
1109 #define A_INSRT_FLD(entry, field) \
1110 if(data_copy(KERNEL, (vir_bytes) &tabent.field, \
1111 caller_ptr->p_endpoint, \
1112 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
1113 sizeof(tabent.field)) != OK) {\
1114 ASCOMPLAIN(caller_ptr, entry, #field); \
1119 #define A_INSRT(entry) do { \
1120 if (data_copy(KERNEL, (vir_bytes) &tabent, \
1121 caller_ptr->p_endpoint, table_v + (entry)*sizeof(asynmsg_t),\
1122 sizeof(tabent)) != OK) { \
1123 ASCOMPLAIN(caller_ptr, entry, "message entry"); \
1129 /*===========================================================================*
1130 * try_deliver_senda *
1131 *===========================================================================*/
1132 int try_deliver_senda(struct proc
*caller_ptr
,
1136 int r
, dst_p
, done
, do_notify
;
1140 struct proc
*dst_ptr
;
1143 const vir_bytes table_v
= (vir_bytes
) table
;
1145 privp
= priv(caller_ptr
);
1148 privp
->s_asyntab
= -1;
1149 privp
->s_asynsize
= 0;
1151 if (size
== 0) return(OK
); /* Nothing to do, just return */
1153 /* Scan the table */
1157 /* Limit size to something reasonable. An arbitrary choice is 16
1158 * times the number of process table entries.
1160 * (this check has been duplicated in sys_call but is left here
1161 * as a sanity check)
1163 if (size
> 16*(NR_TASKS
+ NR_PROCS
)) {
1168 for (i
= 0; i
< size
; i
++) {
1169 /* Process each entry in the table and store the result in the table.
1170 * If we're done handling a message, copy the result to the sender. */
1173 /* Copy message to kernel */
1175 flags
= tabent
.flags
;
1178 if (flags
== 0) continue; /* Skip empty entries */
1180 /* 'flags' field must contain only valid bits */
1181 if(flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
|AMF_NOTIFY_ERR
)) {
1185 if (!(flags
& AMF_VALID
)) { /* Must contain message */
1189 if (flags
& AMF_DONE
) continue; /* Already done processing */
1192 if (!isokendpt(tabent
.dst
, &dst_p
))
1193 r
= EDEADSRCDST
; /* Bad destination, report the error */
1194 else if (iskerneln(dst_p
))
1195 r
= ECALLDENIED
; /* Asyn sends to the kernel are not allowed */
1196 else if (!may_send_to(caller_ptr
, dst_p
))
1197 r
= ECALLDENIED
; /* Send denied by IPC mask */
1199 dst_ptr
= proc_addr(dst_p
);
1201 /* XXX: RTS_NO_ENDPOINT should be removed */
1202 if (r
== OK
&& RTS_ISSET(dst_ptr
, RTS_NO_ENDPOINT
)) {
1206 /* Check if 'dst' is blocked waiting for this message.
1207 * If AMF_NOREPLY is set, do not satisfy the receiving part of
1210 if (r
== OK
&& WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
) &&
1211 (!(flags
&AMF_NOREPLY
) || !(dst_ptr
->p_misc_flags
&MF_REPLY_PEND
))) {
1212 /* Destination is indeed waiting for this message. */
1213 dst_ptr
->p_delivermsg
= tabent
.msg
;
1214 dst_ptr
->p_delivermsg
.m_source
= caller_ptr
->p_endpoint
;
1215 dst_ptr
->p_misc_flags
|= MF_DELIVERMSG
;
1216 IPC_STATUS_ADD_CALL(dst_ptr
, SENDA
);
1217 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
1218 } else if (r
== OK
) {
1219 /* Inform receiver that something is pending */
1220 set_sys_bit(priv(dst_ptr
)->s_asyn_pending
,
1221 priv(caller_ptr
)->s_id
);
1228 tabent
.flags
= flags
| AMF_DONE
;
1229 if (flags
& AMF_NOTIFY
)
1231 else if (r
!= OK
&& (flags
& AMF_NOTIFY_ERR
))
1233 A_INSRT(i
); /* Copy results to caller */
1238 printf("KERNEL senda error %d to %d\n", r
, dst
);
1240 printf("KERNEL senda error %d\n", r
);
1244 mini_notify(proc_addr(ASYNCM
), caller_ptr
->p_endpoint
);
1247 privp
->s_asyntab
= (vir_bytes
) table
;
1248 privp
->s_asynsize
= size
;
1254 /*===========================================================================*
1256 *===========================================================================*/
1257 static int mini_senda(struct proc
*caller_ptr
, asynmsg_t
*table
, size_t size
)
1261 privp
= priv(caller_ptr
);
1262 if (!(privp
->s_flags
& SYS_PROC
)) {
1263 printf( "mini_senda: warning caller has no privilege structure\n");
1267 return try_deliver_senda(caller_ptr
, table
, size
);
1271 /*===========================================================================*
1273 *===========================================================================*/
1274 static int try_async(caller_ptr
)
1275 struct proc
*caller_ptr
;
1279 struct proc
*src_ptr
;
1282 map
= &priv(caller_ptr
)->s_asyn_pending
;
1284 /* Try all privilege structures */
1285 for (privp
= BEG_PRIV_ADDR
; privp
< END_PRIV_ADDR
; ++privp
) {
1286 if (privp
->s_proc_nr
== NONE
)
1289 if (!get_sys_bit(*map
, privp
->s_id
))
1292 src_ptr
= proc_addr(privp
->s_proc_nr
);
1296 * Do not copy from a process which does not have a stable address space
1297 * due to VM fiddling with it
1299 if (RTS_ISSET(src_ptr
, RTS_VMINHIBIT
)) {
1300 src_ptr
->p_misc_flags
|= MF_SENDA_VM_MISS
;
1305 assert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
1306 if ((r
= try_one(src_ptr
, caller_ptr
)) == OK
)
1314 /*===========================================================================*
1316 *===========================================================================*/
1317 static int try_one(struct proc
*src_ptr
, struct proc
*dst_ptr
)
1319 /* Try to receive an asynchronous message from 'src_ptr' */
1320 int r
= EAGAIN
, done
, do_notify
;
1321 unsigned int flags
, i
;
1324 struct proc
*caller_ptr
;
1329 privp
= priv(src_ptr
);
1330 if (!(privp
->s_flags
& SYS_PROC
)) return(EPERM
);
1331 size
= privp
->s_asynsize
;
1332 table_v
= privp
->s_asyntab
;
1334 /* Clear table pending message flag. We're done unless we're not. */
1335 unset_sys_bit(priv(dst_ptr
)->s_asyn_pending
, privp
->s_id
);
1337 if (size
== 0) return(EAGAIN
);
1338 if (!may_send_to(src_ptr
, proc_nr(dst_ptr
))) return(ECALLDENIED
);
1340 caller_ptr
= src_ptr
; /* Needed for A_ macros later on */
1342 /* Scan the table */
1346 for (i
= 0; i
< size
; i
++) {
1347 /* Process each entry in the table and store the result in the table.
1348 * If we're done handling a message, copy the result to the sender.
1349 * Some checks done in mini_senda are duplicated here, as the sender
1350 * could've altered the contents of the table in the meantime.
1353 /* Copy message to kernel */
1355 flags
= tabent
.flags
;
1358 if (flags
== 0) continue; /* Skip empty entries */
1360 /* 'flags' field must contain only valid bits */
1361 if(flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
|AMF_NOTIFY_ERR
))
1363 else if (!(flags
& AMF_VALID
)) /* Must contain message */
1365 else if (flags
& AMF_DONE
) continue; /* Already done processing */
1367 /* Clear done flag. The sender is done sending when all messages in the
1368 * table are marked done or empty. However, we will know that only
1369 * the next time we enter this function or when the sender decides to
1370 * send additional asynchronous messages and manages to deliver them
1378 /* Message must be directed at receiving end */
1379 if (dst
!= dst_ptr
->p_endpoint
) continue;
1381 /* If AMF_NOREPLY is set, then this message is not a reply to a
1382 * SENDREC and thus should not satisfy the receiving part of the
1383 * SENDREC. This message is to be delivered later.
1385 if ((flags
& AMF_NOREPLY
) && (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
))
1388 /* Destination is ready to receive the message; deliver it */
1390 dst_ptr
->p_delivermsg
= tabent
.msg
;
1391 dst_ptr
->p_delivermsg
.m_source
= src_ptr
->p_endpoint
;
1392 dst_ptr
->p_misc_flags
|= MF_DELIVERMSG
;
1395 /* Store results for sender */
1397 tabent
.flags
= flags
| AMF_DONE
;
1398 if (flags
& AMF_NOTIFY
) do_notify
= TRUE
;
1399 else if (r
!= OK
&& (flags
& AMF_NOTIFY_ERR
)) do_notify
= TRUE
;
1400 A_INSRT(i
); /* Copy results to sender */
1406 mini_notify(proc_addr(ASYNCM
), src_ptr
->p_endpoint
);
1409 privp
->s_asyntab
= -1;
1410 privp
->s_asynsize
= 0;
1412 set_sys_bit(priv(dst_ptr
)->s_asyn_pending
, privp
->s_id
);
1419 /*===========================================================================*
1421 *===========================================================================*/
1422 int cancel_async(struct proc
*src_ptr
, struct proc
*dst_ptr
)
1424 /* Cancel asynchronous messages from src to dst, because dst is not interested
1425 * in them (e.g., dst has been restarted) */
1426 int done
, do_notify
;
1427 unsigned int flags
, i
;
1430 struct proc
*caller_ptr
;
1435 privp
= priv(src_ptr
);
1436 if (!(privp
->s_flags
& SYS_PROC
)) return(EPERM
);
1437 size
= privp
->s_asynsize
;
1438 table_v
= privp
->s_asyntab
;
1440 /* Clear table pending message flag. We're done unless we're not. */
1441 privp
->s_asyntab
= -1;
1442 privp
->s_asynsize
= 0;
1443 unset_sys_bit(priv(dst_ptr
)->s_asyn_pending
, privp
->s_id
);
1445 if (size
== 0) return(EAGAIN
);
1446 if (!may_send_to(src_ptr
, proc_nr(dst_ptr
))) return(ECALLDENIED
);
1448 caller_ptr
= src_ptr
; /* Needed for A_ macros later on */
1450 /* Scan the table */
1455 for (i
= 0; i
< size
; i
++) {
1456 /* Process each entry in the table and store the result in the table.
1457 * If we're done handling a message, copy the result to the sender.
1458 * Some checks done in mini_senda are duplicated here, as the sender
1459 * could've altered the contents of the table in the mean time.
1462 int r
= EDEADSRCDST
; /* Cancel delivery due to dead dst */
1464 /* Copy message to kernel */
1466 flags
= tabent
.flags
;
1469 if (flags
== 0) continue; /* Skip empty entries */
1471 /* 'flags' field must contain only valid bits */
1472 if(flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
|AMF_NOTIFY_ERR
))
1474 else if (!(flags
& AMF_VALID
)) /* Must contain message */
1476 else if (flags
& AMF_DONE
) continue; /* Already done processing */
1478 /* Message must be directed at receiving end */
1479 if (dst
!= dst_ptr
->p_endpoint
) {
1484 /* Store results for sender */
1486 tabent
.flags
= flags
| AMF_DONE
;
1487 if (flags
& AMF_NOTIFY
) do_notify
= TRUE
;
1488 else if (r
!= OK
&& (flags
& AMF_NOTIFY_ERR
)) do_notify
= TRUE
;
1489 A_INSRT(i
); /* Copy results to sender */
1493 mini_notify(proc_addr(ASYNCM
), src_ptr
->p_endpoint
);
1496 privp
->s_asyntab
= table_v
;
1497 privp
->s_asynsize
= size
;
1504 /*===========================================================================*
1506 *===========================================================================*/
1508 register struct proc
*rp
/* this process is now runnable */
1511 /* Add 'rp' to one of the queues of runnable processes. This function is
1512 * responsible for inserting a process into one of the scheduling queues.
1513 * The mechanism is implemented here. The actual scheduling policy is
1514 * defined in sched() and pick_proc().
1516 * This function can be used x-cpu as it always uses the queues of the cpu the
1517 * process is assigned to.
1519 int q
= rp
->p_priority
; /* scheduling queue to use */
1520 struct proc
**rdy_head
, **rdy_tail
;
1522 assert(proc_is_runnable(rp
));
1526 rdy_head
= get_cpu_var(rp
->p_cpu
, run_q_head
);
1527 rdy_tail
= get_cpu_var(rp
->p_cpu
, run_q_tail
);
1529 /* Now add the process to the queue. */
1530 if (!rdy_head
[q
]) { /* add to empty queue */
1531 rdy_head
[q
] = rdy_tail
[q
] = rp
; /* create a new queue */
1532 rp
->p_nextready
= NULL
; /* mark new end */
1534 else { /* add to tail of queue */
1535 rdy_tail
[q
]->p_nextready
= rp
; /* chain tail of queue */
1536 rdy_tail
[q
] = rp
; /* set new queue tail */
1537 rp
->p_nextready
= NULL
; /* mark new end */
1540 if (cpuid
== rp
->p_cpu
) {
1542 * enqueueing a process with a higher priority than the current one,
1543 * it gets preempted. The current process must be preemptible. Testing
1544 * the priority also makes sure that a process does not preempt itself
1547 p
= get_cpulocal_var(proc_ptr
);
1549 if((p
->p_priority
> rp
->p_priority
) &&
1550 (priv(p
)->s_flags
& PREEMPTIBLE
))
1551 RTS_SET(p
, RTS_PREEMPTED
); /* calls dequeue() */
1555 * if the process was enqueued on a different cpu and the cpu is idle, i.e.
1556 * the time is off, we need to wake up that cpu and let it schedule this new
1559 else if (get_cpu_var(rp
->p_cpu
, cpu_is_idle
)) {
1560 smp_schedule(rp
->p_cpu
);
1564 /* Make note of when this process was added to queue */
1565 read_tsc_64(&(get_cpulocal_var(proc_ptr
)->p_accounting
.enter_queue
));
1568 #if DEBUG_SANITYCHECKS
1569 assert(runqueues_ok_local());
1573 /*===========================================================================*
1575 *===========================================================================*/
1577 * put a process at the front of its run queue. It comes handy when a process is
1578 * preempted and removed from run queue to not to have a currently not-runnable
1579 * process on a run queue. We have to put this process back at the fron to be
1582 static void enqueue_head(struct proc
*rp
)
1584 const int q
= rp
->p_priority
; /* scheduling queue to use */
1586 struct proc
**rdy_head
, **rdy_tail
;
1588 assert(proc_ptr_ok(rp
));
1589 assert(proc_is_runnable(rp
));
1592 * the process was runnable without its quantum expired when dequeued. A
1593 * process with no time left should vahe been handled else and differently
1595 assert(!is_zero64(rp
->p_cpu_time_left
));
1600 rdy_head
= get_cpu_var(rp
->p_cpu
, run_q_head
);
1601 rdy_tail
= get_cpu_var(rp
->p_cpu
, run_q_tail
);
1603 /* Now add the process to the queue. */
1604 if (!rdy_head
[q
]) { /* add to empty queue */
1605 rdy_head
[q
] = rdy_tail
[q
] = rp
; /* create a new queue */
1606 rp
->p_nextready
= NULL
; /* mark new end */
1608 else /* add to head of queue */
1609 rp
->p_nextready
= rdy_head
[q
]; /* chain head of queue */
1610 rdy_head
[q
] = rp
; /* set new queue head */
1612 /* Make note of when this process was added to queue */
1613 read_tsc_64(&(get_cpulocal_var(proc_ptr
->p_accounting
.enter_queue
)));
1616 /* Process accounting for scheduling */
1617 rp
->p_accounting
.dequeues
--;
1618 rp
->p_accounting
.preempted
++;
1620 #if DEBUG_SANITYCHECKS
1621 assert(runqueues_ok_local());
1625 /*===========================================================================*
1627 *===========================================================================*/
1628 void dequeue(struct proc
*rp
)
1629 /* this process is no longer runnable */
1631 /* A process must be removed from the scheduling queues, for example, because
1632 * it has blocked. If the currently active process is removed, a new process
1633 * is picked to run by calling pick_proc().
1635 * This function can operate x-cpu as it always removes the process from the
1636 * queue of the cpu the process is currently assigned to.
1638 int q
= rp
->p_priority
; /* queue to use */
1639 struct proc
**xpp
; /* iterate over queue */
1640 struct proc
*prev_xp
;
1641 u64_t tsc
, tsc_delta
;
1643 struct proc
**rdy_tail
;
1645 assert(proc_ptr_ok(rp
));
1646 assert(!proc_is_runnable(rp
));
1648 /* Side-effect for kernel: check if the task's stack still is ok? */
1649 assert (!iskernelp(rp
) || *priv(rp
)->s_stack_guard
== STACK_GUARD
);
1651 rdy_tail
= get_cpu_var(rp
->p_cpu
, run_q_tail
);
1653 /* Now make sure that the process is not in its ready queue. Remove the
1654 * process if it is found. A process can be made unready even if it is not
1655 * running by being sent a signal that kills it.
1658 for (xpp
= get_cpu_var_ptr(rp
->p_cpu
, run_q_head
[q
]); *xpp
;
1659 xpp
= &(*xpp
)->p_nextready
) {
1660 if (*xpp
== rp
) { /* found process to remove */
1661 *xpp
= (*xpp
)->p_nextready
; /* replace with next chain */
1662 if (rp
== rdy_tail
[q
]) { /* queue tail removed */
1663 rdy_tail
[q
] = prev_xp
; /* set new tail */
1668 prev_xp
= *xpp
; /* save previous in chain */
1672 /* Process accounting for scheduling */
1673 rp
->p_accounting
.dequeues
++;
1675 /* this is not all that accurate on virtual machines, especially with
1676 IO bound processes that only spend a short amount of time in the queue
1678 if (!is_zero64(rp
->p_accounting
.enter_queue
)) {
1680 tsc_delta
= sub64(tsc
, rp
->p_accounting
.enter_queue
);
1681 rp
->p_accounting
.time_in_queue
= add64(rp
->p_accounting
.time_in_queue
,
1683 make_zero64(rp
->p_accounting
.enter_queue
);
1687 #if DEBUG_SANITYCHECKS
1688 assert(runqueues_ok_local());
1692 /*===========================================================================*
1694 *===========================================================================*/
1695 static struct proc
* pick_proc(void)
1697 /* Decide who to run now. A new process is selected an returned.
1698 * When a billable process is selected, record it in 'bill_ptr', so that the
1699 * clock task can tell who to bill for system time.
1701 * This function always uses the run queues of the local cpu!
1703 register struct proc
*rp
; /* process to run */
1704 struct proc
**rdy_head
;
1705 int q
; /* iterate over queues */
1707 /* Check each of the scheduling queues for ready processes. The number of
1708 * queues is defined in proc.h, and priorities are set in the task table.
1709 * If there are no processes ready to run, return NULL.
1711 rdy_head
= get_cpulocal_var(run_q_head
);
1712 for (q
=0; q
< NR_SCHED_QUEUES
; q
++) {
1713 if(!(rp
= rdy_head
[q
])) {
1714 TRACE(VF_PICKPROC
, printf("cpu %d queue %d empty\n", cpuid
, q
););
1717 assert(proc_is_runnable(rp
));
1718 if (priv(rp
)->s_flags
& BILLABLE
)
1719 get_cpulocal_var(bill_ptr
) = rp
; /* bill for system time */
1725 /*===========================================================================*
1727 *===========================================================================*/
1728 struct proc
*endpoint_lookup(endpoint_t e
)
1732 if(!isokendpt(e
, &n
)) return NULL
;
1734 return proc_addr(n
);
1737 /*===========================================================================*
1739 *===========================================================================*/
1740 #if DEBUG_ENABLE_IPC_WARNINGS
1741 int isokendpt_f(file
, line
, e
, p
, fatalflag
)
1745 int isokendpt_f(e
, p
, fatalflag
)
1749 const int fatalflag
;
1752 /* Convert an endpoint number into a process number.
1753 * Return nonzero if the process is alive with the corresponding
1754 * generation number, zero otherwise.
1756 * This function is called with file and line number by the
1757 * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
1758 * otherwise without. This allows us to print the where the
1759 * conversion was attempted, making the errors verbose without
1760 * adding code for that at every call.
1762 * If fatalflag is nonzero, we must panic if the conversion doesn't
1765 *p
= _ENDPOINT_P(e
);
1767 if(isokprocn(*p
) && !isemptyn(*p
) && proc_addr(*p
)->p_endpoint
== e
)
1769 if(!ok
&& fatalflag
)
1770 panic("invalid endpoint: %d", e
);
1774 static void notify_scheduler(struct proc
*p
)
1776 message m_no_quantum
;
1779 assert(!proc_kernel_scheduler(p
));
1781 /* dequeue the process */
1782 RTS_SET(p
, RTS_NO_QUANTUM
);
1784 * Notify the process's scheduler that it has run out of
1785 * quantum. This is done by sending a message to the scheduler
1786 * on the process's behalf
1788 m_no_quantum
.m_source
= p
->p_endpoint
;
1789 m_no_quantum
.m_type
= SCHEDULING_NO_QUANTUM
;
1790 m_no_quantum
.SCHEDULING_ACNT_QUEUE
= cpu_time_2_ms(p
->p_accounting
.time_in_queue
);
1791 m_no_quantum
.SCHEDULING_ACNT_DEQS
= p
->p_accounting
.dequeues
;
1792 m_no_quantum
.SCHEDULING_ACNT_IPC_SYNC
= p
->p_accounting
.ipc_sync
;
1793 m_no_quantum
.SCHEDULING_ACNT_IPC_ASYNC
= p
->p_accounting
.ipc_async
;
1794 m_no_quantum
.SCHEDULING_ACNT_PREEMPT
= p
->p_accounting
.preempted
;
1795 m_no_quantum
.SCHEDULING_ACNT_CPU
= cpuid
;
1796 m_no_quantum
.SCHEDULING_ACNT_CPU_LOAD
= cpu_load();
1798 /* Reset accounting */
1799 reset_proc_accounting(p
);
1801 if ((err
= mini_send(p
, p
->p_scheduler
->p_endpoint
,
1802 &m_no_quantum
, FROM_KERNEL
))) {
1803 panic("WARNING: Scheduling: mini_send returned %d\n", err
);
1807 void proc_no_time(struct proc
* p
)
1809 if (!proc_kernel_scheduler(p
) && priv(p
)->s_flags
& PREEMPTIBLE
) {
1810 /* this dequeues the process */
1811 notify_scheduler(p
);
1815 * non-preemptible processes only need their quantum to
1816 * be renewed. In fact, they by pass scheduling
1818 p
->p_cpu_time_left
= ms_2_cpu_time(p
->p_quantum_size_ms
);
1820 RTS_SET(p
, RTS_PREEMPTED
);
1821 RTS_UNSET(p
, RTS_PREEMPTED
);
1826 void reset_proc_accounting(struct proc
*p
)
1828 p
->p_accounting
.preempted
= 0;
1829 p
->p_accounting
.ipc_sync
= 0;
1830 p
->p_accounting
.ipc_async
= 0;
1831 p
->p_accounting
.dequeues
= 0;
1832 make_zero64(p
->p_accounting
.time_in_queue
);
1833 make_zero64(p
->p_accounting
.enter_queue
);
1836 void copr_not_available_handler(void)
1839 struct proc
** local_fpu_owner
;
1841 * Disable the FPU exception (both for the kernel and for the process
1842 * once it's scheduled), and initialize or restore the FPU state.
1845 disable_fpu_exception();
1847 p
= get_cpulocal_var(proc_ptr
);
1849 /* if FPU is not owned by anyone, do not store anything */
1850 local_fpu_owner
= get_cpulocal_var_ptr(fpu_owner
);
1851 if (*local_fpu_owner
!= NULL
) {
1852 assert(*local_fpu_owner
!= p
);
1853 save_local_fpu(*local_fpu_owner
, FALSE
/*retain*/);
1857 * restore the current process' state and let it run again, do not
1860 if (restore_fpu(p
) != OK
) {
1861 /* Restoring FPU state failed. This is always the process's own
1862 * fault. Send a signal, and schedule another process instead.
1864 *local_fpu_owner
= NULL
; /* release FPU */
1865 cause_sig(proc_nr(p
), SIGFPE
);
1869 *local_fpu_owner
= p
;
1870 context_stop(proc_addr(KERNEL
));
1871 restore_user_context(p
);
1875 void release_fpu(struct proc
* p
) {
1876 struct proc
** fpu_owner_ptr
;
1878 fpu_owner_ptr
= get_cpu_var_ptr(p
->p_cpu
, fpu_owner
);
1880 if (*fpu_owner_ptr
== p
)
1881 *fpu_owner_ptr
= NULL
;