1 /* This file contains essentially all of the process and message handling.
2 * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3 * There is one entry point from the outside:
5 * sys_call: a system call, i.e., the kernel is trapped with an INT
7 * As well as several entry points used from the interrupt and task level:
9 * lock_send: send a message to a process
12 * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
13 * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
14 * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
15 * May 24, 2005 new notification system call (Jorrit N. Herder)
16 * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
18 * The code here is critical to make everything work and is important for the
19 * overall performance of the system. A large fraction of the code deals with
20 * list manipulation. To make this both easy to understand and fast to execute
21 * pointer pointers are used throughout the code. Pointer pointers prevent
22 * exceptions for the head or tail of a linked list.
24 * node_t *queue, *new_node; // assume these as global variables
25 * node_t **xpp = &queue; // get pointer pointer to head of queue
26 * while (*xpp != NULL) // find last pointer of the linked list
27 * xpp = &(*xpp)->next; // get pointer to next pointer
28 * *xpp = new_node; // now replace the end (the NULL pointer)
29 * new_node->next = NULL; // and mark the new end of the list
31 * For example, when adding a new node to the end of the list, one normally
32 * makes an exception for an empty list and looks up the end of the list for
33 * nonempty lists. As shown above, this is not required with pointer pointers.
36 #include <minix/com.h>
37 #include <minix/callnr.h>
38 #include <minix/endpoint.h>
41 #include <minix/portio.h>
42 #include <minix/u64.h>
43 #include <minix/syslib.h>
50 /* Scheduling and message passing functions. The functions are available to
51 * other parts of the kernel through lock_...(). The lock temporarily disables
52 * interrupts to prevent race conditions.
54 FORWARD
_PROTOTYPE( void idle
, (void));
55 FORWARD
_PROTOTYPE( int mini_send
, (struct proc
*caller_ptr
, int dst_e
,
56 message
*m_ptr
, int flags
));
57 FORWARD
_PROTOTYPE( int mini_receive
, (struct proc
*caller_ptr
, int src
,
58 message
*m_ptr
, int flags
));
59 FORWARD
_PROTOTYPE( int mini_senda
, (struct proc
*caller_ptr
,
60 asynmsg_t
*table
, size_t size
));
61 FORWARD
_PROTOTYPE( int deadlock
, (int function
,
62 register struct proc
*caller
, int src_dst
));
63 FORWARD
_PROTOTYPE( int try_async
, (struct proc
*caller_ptr
));
64 FORWARD
_PROTOTYPE( int try_one
, (struct proc
*src_ptr
, struct proc
*dst_ptr
,
66 FORWARD
_PROTOTYPE( void sched
, (struct proc
*rp
, int *queue
, int *front
));
67 FORWARD
_PROTOTYPE( struct proc
* pick_proc
, (void));
68 FORWARD
_PROTOTYPE( void enqueue_head
, (struct proc
*rp
));
71 #define PICK_HIGHERONLY 2
73 #define BuildNotifyMessage(m_ptr, src, dst_ptr) \
74 (m_ptr)->m_type = NOTIFY_FROM(src); \
75 (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
78 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
79 priv(dst_ptr)->s_int_pending = 0; \
82 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
83 priv(dst_ptr)->s_sig_pending = 0; \
87 /*===========================================================================*
89 *===========================================================================*/
90 PRIVATE
int QueueMess(endpoint_t ep
, vir_bytes msg_lin
, struct proc
*dst
)
94 NOREC_ENTER(queuemess
);
95 /* Queue a message from the src process (in memory) to the dst
96 * process (using dst process table entry). Do actual copy to
97 * kernel here; it's an error if the copy fails into kernel.
99 vmassert(!(dst
->p_misc_flags
& MF_DELIVERMSG
));
100 vmassert(dst
->p_delivermsg_lin
);
101 vmassert(isokendpt(ep
, &k
));
105 PHYS_COPY_CATCH(msg_lin
, dst
->p_delivermsg_lin
,
106 sizeof(message
), addr
);
108 PHYS_COPY_CATCH(vir2phys(&ep
), dst
->p_delivermsg_lin
,
111 NOREC_RETURN(queuemess
, OK
);
117 PHYS_COPY_CATCH(msg_lin
, vir2phys(&dst
->p_delivermsg
), sizeof(message
), addr
);
119 NOREC_RETURN(queuemess
, EFAULT
);
122 dst
->p_delivermsg
.m_source
= ep
;
123 dst
->p_misc_flags
|= MF_DELIVERMSG
;
125 NOREC_RETURN(queuemess
, OK
);
128 /*===========================================================================*
130 *===========================================================================*/
133 /* This function is called whenever there is no work to do.
134 * Halt the CPU, and measure how many timestamp counter ticks are
135 * spent not doing anything. This allows test setups to measure
136 * the CPU utiliziation of certain workloads with high precision.
138 #ifdef CONFIG_IDLE_TSC
141 read_tsc_64(&idle_start
);
147 #ifdef CONFIG_IDLE_TSC
150 printf("Kernel: idle active after resuming CPU\n");
153 idle_tsc
= add64(idle_tsc
, sub64(idle_stop
, idle_start
));
157 /*===========================================================================*
159 *===========================================================================*/
160 PUBLIC
struct proc
* schedcheck(void)
162 /* This function is called an instant before proc_ptr is
163 * to be scheduled again.
165 NOREC_ENTER(schedch
);
166 vmassert(intr_disabled());
169 * if the current process is still runnable check the misc flags and let
170 * it run unless it becomes not runnable in the meantime
172 if (proc_is_runnable(proc_ptr
))
173 goto check_misc_flags
;
176 * if a process becomes not runnable while handling the misc flags, we
177 * need to pick a new one here and start from scratch. Also if the
178 * current process wasn' runnable, we pick a new one here
180 not_runnable_pick_new
:
181 if (proc_is_preempted(proc_ptr
)) {
182 proc_ptr
->p_rts_flags
&= ~RTS_PREEMPTED
;
183 if (proc_is_runnable(proc_ptr
))
184 enqueue_head(proc_ptr
);
186 /* this enqueues the process again */
187 if (proc_no_quantum(proc_ptr
))
188 RTS_UNSET(proc_ptr
, RTS_NO_QUANTUM
);
191 * if we have no process to run, set IDLE as the current process for
192 * time accounting and put the cpu in and idle state. After the next
193 * timer interrupt the execution resumes here and we can pick another
194 * process. If there is still nothing runnable we "schedule" IDLE again
196 while (!(proc_ptr
= pick_proc())) {
197 proc_ptr
= proc_addr(IDLE
);
198 if (priv(proc_ptr
)->s_flags
& BILLABLE
)
206 vmassert(proc_is_runnable(proc_ptr
));
207 while (proc_ptr
->p_misc_flags
&
208 (MF_DELIVERMSG
| MF_SC_DEFER
| MF_SC_TRACE
| MF_SC_ACTIVE
)) {
210 vmassert(proc_is_runnable(proc_ptr
));
211 if (proc_ptr
->p_misc_flags
& MF_DELIVERMSG
) {
212 TRACE(VF_SCHEDULING
, printf("delivering to %s / %d\n",
213 proc_ptr
->p_name
, proc_ptr
->p_endpoint
););
214 if(delivermsg(proc_ptr
) == VMSUSPEND
) {
216 printf("suspending %s / %d\n",
218 proc_ptr
->p_endpoint
););
219 vmassert(!proc_is_runnable(proc_ptr
));
222 else if (proc_ptr
->p_misc_flags
& MF_SC_DEFER
) {
223 /* Perform the system call that we deferred earlier. */
225 #if DEBUG_SCHED_CHECK
226 if (proc_ptr
->p_misc_flags
& MF_SC_ACTIVE
)
227 minix_panic("MF_SC_ACTIVE and MF_SC_DEFER set",
231 arch_do_syscall(proc_ptr
);
233 /* If the process is stopped for signal delivery, and
234 * not blocked sending a message after the system call,
237 if ((proc_ptr
->p_misc_flags
& MF_SIG_DELAY
) &&
238 !RTS_ISSET(proc_ptr
, RTS_SENDING
))
239 sig_delay_done(proc_ptr
);
241 else if (proc_ptr
->p_misc_flags
& MF_SC_TRACE
) {
242 /* Trigger a system call leave event if this was a
243 * system call. We must do this after processing the
244 * other flags above, both for tracing correctness and
245 * to be able to use 'break'.
247 if (!(proc_ptr
->p_misc_flags
& MF_SC_ACTIVE
))
250 proc_ptr
->p_misc_flags
&=
251 ~(MF_SC_TRACE
| MF_SC_ACTIVE
);
253 /* Signal the "leave system call" event.
256 cause_sig(proc_nr(proc_ptr
), SIGTRAP
);
258 else if (proc_ptr
->p_misc_flags
& MF_SC_ACTIVE
) {
259 /* If MF_SC_ACTIVE was set, remove it now:
260 * we're leaving the system call.
262 proc_ptr
->p_misc_flags
&= ~MF_SC_ACTIVE
;
268 * the selected process might not be runnable anymore. We have
269 * to checkit and schedule another one
271 if (!proc_is_runnable(proc_ptr
))
272 goto not_runnable_pick_new
;
274 TRACE(VF_SCHEDULING
, printf("starting %s / %d\n",
275 proc_ptr
->p_name
, proc_ptr
->p_endpoint
););
277 proc_ptr
->p_schedules
++;
280 proc_ptr
= arch_finish_schedcheck();
282 NOREC_RETURN(schedch
, proc_ptr
);
285 /*===========================================================================*
287 *===========================================================================*/
288 PUBLIC
int sys_call(call_nr
, src_dst_e
, m_ptr
, bit_map
)
289 int call_nr
; /* system call number and flags */
290 int src_dst_e
; /* src to receive from or dst to send to */
291 message
*m_ptr
; /* pointer to message in the caller's space */
292 long bit_map
; /* notification event set or flags */
294 /* System calls are done by trapping to the kernel with an INT instruction.
295 * The trap is caught and sys_call() is called to send or receive a message
296 * (or both). The caller is always given by 'proc_ptr'.
298 register struct proc
*caller_ptr
= proc_ptr
; /* get pointer to caller */
299 int mask_entry
; /* bit to check in send mask */
300 int group_size
; /* used for deadlock check */
301 int result
; /* the system call's result */
302 int src_dst_p
; /* Process slot number */
305 /* If this process is subject to system call tracing, handle that first. */
306 if (caller_ptr
->p_misc_flags
& (MF_SC_TRACE
| MF_SC_DEFER
)) {
307 /* Are we tracing this process, and is it the first sys_call entry? */
308 if ((caller_ptr
->p_misc_flags
& (MF_SC_TRACE
| MF_SC_DEFER
)) ==
310 /* We must notify the tracer before processing the actual
311 * system call. If we don't, the tracer could not obtain the
312 * input message. Postpone the entire system call.
314 caller_ptr
->p_misc_flags
&= ~MF_SC_TRACE
;
315 caller_ptr
->p_misc_flags
|= MF_SC_DEFER
;
317 /* Signal the "enter system call" event. Block the process. */
318 cause_sig(proc_nr(caller_ptr
), SIGTRAP
);
320 /* Preserve the return register's value. */
321 return caller_ptr
->p_reg
.retreg
;
324 /* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
325 caller_ptr
->p_misc_flags
&= ~MF_SC_DEFER
;
327 #if DEBUG_SCHED_CHECK
328 if (caller_ptr
->p_misc_flags
& MF_SC_ACTIVE
)
329 minix_panic("MF_SC_ACTIVE already set", NO_NUM
);
332 /* Set a flag to allow reliable tracing of leaving the system call. */
333 caller_ptr
->p_misc_flags
|= MF_SC_ACTIVE
;
336 #if DEBUG_SCHED_CHECK
337 if(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
) {
338 kprintf("sys_call: MF_DELIVERMSG on for %s / %d\n",
339 caller_ptr
->p_name
, caller_ptr
->p_endpoint
);
340 minix_panic("MF_DELIVERMSG on", NO_NUM
);
345 if(src_dst_e
!= 4 && src_dst_e
!= 5 &&
346 caller_ptr
->p_endpoint
!= 4 && caller_ptr
->p_endpoint
!= 5) {
348 kprintf("(%d SEND to %d) ", caller_ptr
->p_endpoint
, src_dst_e
);
349 else if(call_nr
== RECEIVE
)
350 kprintf("(%d RECEIVE from %d) ", caller_ptr
->p_endpoint
, src_dst_e
);
351 else if(call_nr
== SENDREC
)
352 kprintf("(%d SENDREC to %d) ", caller_ptr
->p_endpoint
, src_dst_e
);
354 kprintf("(%d %d to/from %d) ", caller_ptr
->p_endpoint
, call_nr
, src_dst_e
);
358 #if DEBUG_SCHED_CHECK
359 if (RTS_ISSET(caller_ptr
, RTS_SLOT_FREE
))
361 kprintf("called by the dead?!?\n");
366 /* Check destination. SENDA is special because its argument is a table and
367 * not a single destination. RECEIVE is the only call that accepts ANY (in
368 * addition to a real endpoint). The other calls (SEND, SENDREC,
369 * and NOTIFY) require an endpoint to corresponds to a process. In addition,
370 * it is necessary to check whether a process is allowed to send to a given
373 if (call_nr
== SENDA
)
375 /* No destination argument */
377 else if (src_dst_e
== ANY
)
379 if (call_nr
!= RECEIVE
)
382 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
383 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
387 src_dst_p
= src_dst_e
;
391 /* Require a valid source and/or destination process. */
392 if(!isokendpt(src_dst_e
, &src_dst_p
)) {
394 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
395 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
400 /* If the call is to send to a process, i.e., for SEND, SENDNB,
401 * SENDREC or NOTIFY, verify that the caller is allowed to send to
402 * the given destination.
404 if (call_nr
!= RECEIVE
)
406 if (!may_send_to(caller_ptr
, src_dst_p
)) {
407 #if DEBUG_ENABLE_IPC_WARNINGS
409 "sys_call: ipc mask denied trap %d from %d to %d\n",
410 call_nr
, caller_ptr
->p_endpoint
, src_dst_e
);
412 return(ECALLDENIED
); /* call denied by ipc mask */
417 /* Only allow non-negative call_nr values less than 32 */
418 if (call_nr
< 0 || call_nr
>= 32)
420 #if DEBUG_ENABLE_IPC_WARNINGS
421 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
422 call_nr
, proc_nr(caller_ptr
), src_dst_p
);
424 return(ETRAPDENIED
); /* trap denied by mask or kernel */
427 /* Check if the process has privileges for the requested call. Calls to the
428 * kernel may only be SENDREC, because tasks always reply and may not block
429 * if the caller doesn't do receive().
431 if (!(priv(caller_ptr
)->s_trap_mask
& (1 << call_nr
))) {
432 #if DEBUG_ENABLE_IPC_WARNINGS
433 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
434 call_nr
, proc_nr(caller_ptr
), src_dst_p
);
436 return(ETRAPDENIED
); /* trap denied by mask or kernel */
439 /* SENDA has no src_dst value here, so this check is in mini_senda() as well.
441 if (call_nr
!= SENDREC
&& call_nr
!= RECEIVE
&& call_nr
!= SENDA
&&
442 iskerneln(src_dst_p
)) {
443 #if DEBUG_ENABLE_IPC_WARNINGS
444 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
445 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
447 return(ETRAPDENIED
); /* trap denied by mask or kernel */
450 /* Get and check the size of the argument in bytes.
451 * Normally this is just the size of a regular message, but in the
452 * case of SENDA the argument is a table.
454 if(call_nr
== SENDA
) {
455 msg_size
= (size_t) src_dst_e
;
457 /* Limit size to something reasonable. An arbitrary choice is 16
458 * times the number of process table entries.
460 if (msg_size
> 16*(NR_TASKS
+ NR_PROCS
))
462 msg_size
*= sizeof(asynmsg_t
); /* convert to bytes */
464 msg_size
= sizeof(*m_ptr
);
467 /* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
468 if (call_nr
== SEND
|| call_nr
== SENDREC
|| call_nr
== RECEIVE
) {
469 if (group_size
= deadlock(call_nr
, caller_ptr
, src_dst_p
)) {
471 kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
472 call_nr
, proc_nr(caller_ptr
), src_dst_p
, group_size
);
478 /* Now check if the call is known and try to perform the request. The only
479 * system calls that exist in MINIX are sending and receiving messages.
480 * - SENDREC: combines SEND and RECEIVE in a single system call
481 * - SEND: sender blocks until its message has been delivered
482 * - RECEIVE: receiver blocks until an acceptable message has arrived
483 * - NOTIFY: asynchronous call; deliver notification or mark pending
484 * - SENDA: list of asynchronous send requests
488 /* A flag is set so that notifications cannot interrupt SENDREC. */
489 caller_ptr
->p_misc_flags
|= MF_REPLY_PEND
;
492 result
= mini_send(caller_ptr
, src_dst_e
, m_ptr
, 0);
493 if (call_nr
== SEND
|| result
!= OK
)
494 break; /* done, or SEND failed */
495 /* fall through for SENDREC */
497 if (call_nr
== RECEIVE
)
498 caller_ptr
->p_misc_flags
&= ~MF_REPLY_PEND
;
499 result
= mini_receive(caller_ptr
, src_dst_e
, m_ptr
, 0);
502 result
= mini_notify(caller_ptr
, src_dst_e
);
505 result
= mini_send(caller_ptr
, src_dst_e
, m_ptr
, NON_BLOCKING
);
508 result
= mini_senda(caller_ptr
, (asynmsg_t
*)m_ptr
, (size_t)src_dst_e
);
511 result
= EBADCALL
; /* illegal system call */
514 /* Now, return the result of the system call to the caller. */
518 /*===========================================================================*
520 *===========================================================================*/
521 PRIVATE
int deadlock(function
, cp
, src_dst
)
522 int function
; /* trap number */
523 register struct proc
*cp
; /* pointer to caller */
524 int src_dst
; /* src or dst process */
526 /* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
527 * a cyclic dependency of blocking send and receive calls. The only cyclic
528 * depency that is not fatal is if the caller and target directly SEND(REC)
529 * and RECEIVE to each other. If a deadlock is found, the group size is
530 * returned. Otherwise zero is returned.
532 register struct proc
*xp
; /* process pointer */
533 int group_size
= 1; /* start with only caller */
535 #if DEBUG_ENABLE_IPC_WARNINGS
536 static struct proc
*processes
[NR_PROCS
+ NR_TASKS
];
540 while (src_dst
!= ANY
) { /* check while process nr */
542 xp
= proc_addr(src_dst
); /* follow chain of processes */
543 #if DEBUG_ENABLE_IPC_WARNINGS
544 processes
[group_size
] = xp
;
546 group_size
++; /* extra process in group */
548 /* Check whether the last process in the chain has a dependency. If it
549 * has not, the cycle cannot be closed and we are done.
551 if (RTS_ISSET(xp
, RTS_RECEIVING
)) { /* xp has dependency */
552 if(xp
->p_getfrom_e
== ANY
) src_dst
= ANY
;
553 else okendpt(xp
->p_getfrom_e
, &src_dst
);
554 } else if (RTS_ISSET(xp
, RTS_SENDING
)) { /* xp has dependency */
555 okendpt(xp
->p_sendto_e
, &src_dst
);
557 return(0); /* not a deadlock */
560 /* Now check if there is a cyclic dependency. For group sizes of two,
561 * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
562 * or other combinations indicate a deadlock.
564 if (src_dst
== proc_nr(cp
)) { /* possible deadlock */
565 if (group_size
== 2) { /* caller and src_dst */
566 /* The function number is magically converted to flags. */
567 if ((xp
->p_rts_flags
^ (function
<< 2)) & RTS_SENDING
) {
568 return(0); /* not a deadlock */
571 #if DEBUG_ENABLE_IPC_WARNINGS
574 kprintf("deadlock between these processes:\n");
575 for(i
= 0; i
< group_size
; i
++) {
576 kprintf(" %10s ", processes
[i
]->p_name
);
577 proc_stacktrace(processes
[i
]);
581 return(group_size
); /* deadlock found */
584 return(0); /* not a deadlock */
587 /*===========================================================================*
589 *===========================================================================*/
590 PRIVATE
int mini_send(caller_ptr
, dst_e
, m_ptr
, flags
)
591 register struct proc
*caller_ptr
; /* who is trying to send a message? */
592 int dst_e
; /* to whom is message being sent? */
593 message
*m_ptr
; /* pointer to message buffer */
596 /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
597 * for this message, copy the message to it and unblock 'dst'. If 'dst' is
598 * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
600 register struct proc
*dst_ptr
;
601 register struct proc
**xpp
;
607 if(!(linaddr
= umap_local(caller_ptr
, D
, (vir_bytes
) m_ptr
,
611 dst_p
= _ENDPOINT_P(dst_e
);
612 dst_ptr
= proc_addr(dst_p
);
614 if (RTS_ISSET(dst_ptr
, RTS_NO_ENDPOINT
))
619 /* Check if 'dst' is blocked waiting for this message. The destination's
620 * RTS_SENDING flag may be set when its SENDREC call blocked while sending.
622 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
)) {
623 /* Destination is indeed waiting for this message. */
624 vmassert(!(dst_ptr
->p_misc_flags
& MF_DELIVERMSG
));
625 if((r
=QueueMess(caller_ptr
->p_endpoint
, linaddr
, dst_ptr
)) != OK
)
627 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
629 if(flags
& NON_BLOCKING
) {
633 /* Destination is not waiting. Block and dequeue caller. */
634 PHYS_COPY_CATCH(linaddr
, vir2phys(&caller_ptr
->p_sendmsg
),
635 sizeof(message
), addr
);
637 if(addr
) { return EFAULT
; }
638 RTS_SET(caller_ptr
, RTS_SENDING
);
639 caller_ptr
->p_sendto_e
= dst_e
;
641 /* Process is now blocked. Put in on the destination's queue. */
642 xpp
= &dst_ptr
->p_caller_q
; /* find end of list */
643 while (*xpp
!= NIL_PROC
) xpp
= &(*xpp
)->p_q_link
;
644 *xpp
= caller_ptr
; /* add caller to end */
645 caller_ptr
->p_q_link
= NIL_PROC
; /* mark new end of list */
650 /*===========================================================================*
652 *===========================================================================*/
653 PRIVATE
int mini_receive(caller_ptr
, src_e
, m_ptr
, flags
)
654 register struct proc
*caller_ptr
; /* process trying to get message */
655 int src_e
; /* which message source is wanted */
656 message
*m_ptr
; /* pointer to message buffer */
659 /* A process or task wants to get a message. If a message is already queued,
660 * acquire it and deblock the sender. If no message from the desired source
661 * is available block the caller.
663 register struct proc
**xpp
;
664 register struct notification
**ntf_q_pp
;
669 int i
, r
, src_id
, src_proc_nr
, src_p
;
672 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
674 if(!(linaddr
= umap_local(caller_ptr
, D
, (vir_bytes
) m_ptr
,
679 /* This is where we want our message. */
680 caller_ptr
->p_delivermsg_lin
= linaddr
;
681 caller_ptr
->p_delivermsg_vir
= (vir_bytes
) m_ptr
;
683 if(src_e
== ANY
) src_p
= ANY
;
686 okendpt(src_e
, &src_p
);
687 if (RTS_ISSET(proc_addr(src_p
), RTS_NO_ENDPOINT
))
694 /* Check to see if a message from desired source is already available. The
695 * caller's RTS_SENDING flag may be set if SENDREC couldn't send. If it is
696 * set, the process should be blocked.
698 if (!RTS_ISSET(caller_ptr
, RTS_SENDING
)) {
700 /* Check if there are pending notifications, except for SENDREC. */
701 if (! (caller_ptr
->p_misc_flags
& MF_REPLY_PEND
)) {
703 map
= &priv(caller_ptr
)->s_notify_pending
;
704 for (chunk
=&map
->chunk
[0]; chunk
<&map
->chunk
[NR_SYS_CHUNKS
]; chunk
++) {
707 /* Find a pending notification from the requested source. */
708 if (! *chunk
) continue; /* no bits in chunk */
709 for (i
=0; ! (*chunk
& (1<<i
)); ++i
) {} /* look up the bit */
710 src_id
= (chunk
- &map
->chunk
[0]) * BITCHUNK_BITS
+ i
;
711 if (src_id
>= NR_SYS_PROCS
) break; /* out of range */
712 src_proc_nr
= id_to_nr(src_id
); /* get source proc */
713 #if DEBUG_ENABLE_IPC_WARNINGS
714 if(src_proc_nr
== NONE
) {
715 kprintf("mini_receive: sending notify from NONE\n");
718 if (src_e
!=ANY
&& src_p
!= src_proc_nr
) continue;/* source not ok */
719 *chunk
&= ~(1 << i
); /* no longer pending */
721 /* Found a suitable source, deliver the notification message. */
722 BuildNotifyMessage(&m
, src_proc_nr
, caller_ptr
); /* assemble message */
723 hisep
= proc_addr(src_proc_nr
)->p_endpoint
;
724 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
725 vmassert(src_e
== ANY
|| hisep
== src_e
);
726 if((r
=QueueMess(hisep
, vir2phys(&m
), caller_ptr
)) != OK
) {
727 minix_panic("mini_receive: local QueueMess failed", NO_NUM
);
729 return(OK
); /* report success */
733 /* Check caller queue. Use pointer pointers to keep code simple. */
734 xpp
= &caller_ptr
->p_caller_q
;
735 while (*xpp
!= NIL_PROC
) {
736 if (src_e
== ANY
|| src_p
== proc_nr(*xpp
)) {
737 #if DEBUG_SCHED_CHECK
738 if (RTS_ISSET(*xpp
, RTS_SLOT_FREE
) || RTS_ISSET(*xpp
, RTS_NO_ENDPOINT
))
740 kprintf("%d: receive from %d; found dead %d (%s)?\n",
741 caller_ptr
->p_endpoint
, src_e
, (*xpp
)->p_endpoint
,
747 /* Found acceptable message. Copy it and update status. */
748 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
749 QueueMess((*xpp
)->p_endpoint
,
750 vir2phys(&(*xpp
)->p_sendmsg
), caller_ptr
);
751 if ((*xpp
)->p_misc_flags
& MF_SIG_DELAY
)
752 sig_delay_done(*xpp
);
753 RTS_UNSET(*xpp
, RTS_SENDING
);
754 *xpp
= (*xpp
)->p_q_link
; /* remove from queue */
755 return(OK
); /* report success */
757 xpp
= &(*xpp
)->p_q_link
; /* proceed to next */
760 if (caller_ptr
->p_misc_flags
& MF_ASYNMSG
)
763 r
= try_one(proc_addr(src_p
), caller_ptr
, NULL
);
765 r
= try_async(caller_ptr
);
768 return OK
; /* Got a message */
772 /* No suitable message is available or the caller couldn't send in SENDREC.
773 * Block the process trying to receive, unless the flags tell otherwise.
775 if ( ! (flags
& NON_BLOCKING
)) {
776 caller_ptr
->p_getfrom_e
= src_e
;
777 RTS_SET(caller_ptr
, RTS_RECEIVING
);
784 /*===========================================================================*
786 *===========================================================================*/
787 PUBLIC
int mini_notify(caller_ptr
, dst_e
)
788 register struct proc
*caller_ptr
; /* sender of the notification */
789 endpoint_t dst_e
; /* which process to notify */
791 register struct proc
*dst_ptr
;
792 int src_id
; /* source id for late delivery */
793 message m
; /* the notification message */
798 vmassert(intr_disabled());
800 if (!isokendpt(dst_e
, &dst_p
)) {
802 kprintf("mini_notify: bogus endpoint %d\n", dst_e
);
806 dst_ptr
= proc_addr(dst_p
);
808 /* Check to see if target is blocked waiting for this message. A process
809 * can be both sending and receiving during a SENDREC system call.
811 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
) &&
812 ! (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
)) {
813 /* Destination is indeed waiting for a message. Assemble a notification
814 * message and deliver it. Copy from pseudo-source HARDWARE, since the
815 * message is in the kernel's address space.
817 BuildNotifyMessage(&m
, proc_nr(caller_ptr
), dst_ptr
);
818 vmassert(!(dst_ptr
->p_misc_flags
& MF_DELIVERMSG
));
819 if((r
=QueueMess(caller_ptr
->p_endpoint
, vir2phys(&m
), dst_ptr
)) != OK
) {
820 minix_panic("mini_notify: local QueueMess failed", NO_NUM
);
822 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
826 /* Destination is not ready to receive the notification. Add it to the
827 * bit map with pending notifications. Note the indirectness: the system id
828 * instead of the process number is used in the pending bit map.
830 src_id
= priv(caller_ptr
)->s_id
;
831 set_sys_bit(priv(dst_ptr
)->s_notify_pending
, src_id
);
835 #define ASCOMPLAIN(caller, entry, field) \
836 kprintf("kernel:%s:%d: asyn failed for %s in %s " \
837 "(%d/%d, tab 0x%lx)\n",__FILE__,__LINE__, \
838 field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
840 #define A_RETRIEVE(entry, field) \
841 if(data_copy(caller_ptr->p_endpoint, \
842 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
843 SYSTEM, (vir_bytes) &tabent.field, \
844 sizeof(tabent.field)) != OK) {\
845 ASCOMPLAIN(caller_ptr, entry, #field); \
849 #define A_INSERT(entry, field) \
850 if(data_copy(SYSTEM, (vir_bytes) &tabent.field, \
851 caller_ptr->p_endpoint, \
852 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
853 sizeof(tabent.field)) != OK) {\
854 ASCOMPLAIN(caller_ptr, entry, #field); \
858 /*===========================================================================*
860 *===========================================================================*/
861 PRIVATE
int mini_senda(caller_ptr
, table
, size
)
862 struct proc
*caller_ptr
;
866 int i
, dst_p
, done
, do_notify
, r
;
868 struct proc
*dst_ptr
;
872 vir_bytes table_v
= (vir_bytes
) table
;
875 privp
= priv(caller_ptr
);
876 if (!(privp
->s_flags
& SYS_PROC
))
879 "mini_senda: warning caller has no privilege structure\n");
884 privp
->s_asyntab
= -1;
885 privp
->s_asynsize
= 0;
889 /* Nothing to do, just return */
893 if(!(linaddr
= umap_local(caller_ptr
, D
, (vir_bytes
) table
,
894 size
* sizeof(*table
)))) {
895 printf("mini_senda: umap_local failed; 0x%lx len 0x%lx\n",
896 table
, size
* sizeof(*table
));
900 /* Limit size to something reasonable. An arbitrary choice is 16
901 * times the number of process table entries.
903 * (this check has been duplicated in sys_call but is left here
906 if (size
> 16*(NR_TASKS
+ NR_PROCS
))
914 for (i
= 0; i
<size
; i
++)
917 /* Read status word */
918 A_RETRIEVE(i
, flags
);
921 /* Skip empty entries */
925 /* Check for reserved bits in the flags field */
926 if (flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
) ||
927 !(flags
& AMF_VALID
))
932 /* Skip entry if AMF_DONE is already set */
933 if (flags
& AMF_DONE
)
936 /* Get destination */
939 if (!isokendpt(tabent
.dst
, &dst_p
))
941 /* Bad destination, report the error */
942 tabent
.result
= EDEADSRCDST
;
944 tabent
.flags
= flags
| AMF_DONE
;
947 if (flags
& AMF_NOTIFY
)
952 if (iskerneln(dst_p
))
954 /* Asynchronous sends to the kernel are not allowed */
955 tabent
.result
= ECALLDENIED
;
957 tabent
.flags
= flags
| AMF_DONE
;
960 if (flags
& AMF_NOTIFY
)
965 if (!may_send_to(caller_ptr
, dst_p
))
967 /* Send denied by IPC mask */
968 tabent
.result
= ECALLDENIED
;
970 tabent
.flags
= flags
| AMF_DONE
;
973 if (flags
& AMF_NOTIFY
)
979 kprintf("mini_senda: entry[%d]: flags 0x%x dst %d/%d\n",
980 i
, tabent
.flags
, tabent
.dst
, dst_p
);
983 dst_ptr
= proc_addr(dst_p
);
985 /* RTS_NO_ENDPOINT should be removed */
986 if (dst_ptr
->p_rts_flags
& RTS_NO_ENDPOINT
)
988 tabent
.result
= EDSTDIED
;
990 tabent
.flags
= flags
| AMF_DONE
;
993 if (flags
& AMF_NOTIFY
)
998 /* Check if 'dst' is blocked waiting for this message.
999 * If AMF_NOREPLY is set, do not satisfy the receiving part of
1002 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
) &&
1003 (!(flags
& AMF_NOREPLY
) ||
1004 !(dst_ptr
->p_misc_flags
& MF_REPLY_PEND
)))
1006 /* Destination is indeed waiting for this message. */
1007 m_ptr
= &table
[i
].msg
; /* Note: pointer in the
1008 * caller's address space.
1010 /* Copy message from sender. */
1011 tabent
.result
= QueueMess(caller_ptr
->p_endpoint
,
1012 linaddr
+ (vir_bytes
) &table
[i
].msg
-
1013 (vir_bytes
) table
, dst_ptr
);
1014 if(tabent
.result
== OK
)
1015 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
1017 A_INSERT(i
, result
);
1018 tabent
.flags
= flags
| AMF_DONE
;
1021 if (flags
& AMF_NOTIFY
)
1027 /* Should inform receiver that something is pending */
1028 dst_ptr
->p_misc_flags
|= MF_ASYNMSG
;
1034 kprintf("mini_senda: should notify caller\n");
1037 privp
->s_asyntab
= (vir_bytes
)table
;
1038 privp
->s_asynsize
= size
;
1044 /*===========================================================================*
1046 *===========================================================================*/
1047 PRIVATE
int try_async(caller_ptr
)
1048 struct proc
*caller_ptr
;
1052 struct proc
*src_ptr
;
1053 int postponed
= FALSE
;
1055 /* Try all privilege structures */
1056 for (privp
= BEG_PRIV_ADDR
; privp
< END_PRIV_ADDR
; ++privp
)
1058 if (privp
->s_proc_nr
== NONE
)
1061 src_ptr
= proc_addr(privp
->s_proc_nr
);
1063 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
1064 r
= try_one(src_ptr
, caller_ptr
, &postponed
);
1069 /* Nothing found, clear MF_ASYNMSG unless messages were postponed */
1070 if (postponed
== FALSE
)
1071 caller_ptr
->p_misc_flags
&= ~MF_ASYNMSG
;
1077 /*===========================================================================*
1079 *===========================================================================*/
1080 PRIVATE
int try_one(src_ptr
, dst_ptr
, postponed
)
1081 struct proc
*src_ptr
;
1082 struct proc
*dst_ptr
;
1085 int i
, do_notify
, done
;
1089 asynmsg_t
*table_ptr
;
1094 struct proc
*caller_ptr
;
1097 privp
= priv(src_ptr
);
1099 /* Basic validity checks */
1100 if (privp
->s_id
== USER_PRIV_ID
) return EAGAIN
;
1101 if (privp
->s_asynsize
== 0) return EAGAIN
;
1102 if (!may_send_to(src_ptr
, proc_nr(dst_ptr
))) return EAGAIN
;
1104 size
= privp
->s_asynsize
;
1105 table_v
= privp
->s_asyntab
;
1106 caller_ptr
= src_ptr
;
1108 dst_e
= dst_ptr
->p_endpoint
;
1110 /* Scan the table */
1113 for (i
= 0; i
<size
; i
++)
1115 /* Read status word */
1116 A_RETRIEVE(i
, flags
);
1117 flags
= tabent
.flags
;
1119 /* Skip empty entries */
1125 /* Check for reserved bits in the flags field */
1126 if (flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
) ||
1127 !(flags
& AMF_VALID
))
1129 kprintf("try_one: bad bits in table\n");
1130 privp
->s_asynsize
= 0;
1134 /* Skip entry is AMF_DONE is already set */
1135 if (flags
& AMF_DONE
)
1140 /* Clear done. We are done when all entries are either empty
1141 * or done at the start of the call.
1145 /* Get destination */
1148 if (tabent
.dst
!= dst_e
)
1153 /* If AMF_NOREPLY is set, do not satisfy the receiving part of
1154 * a SENDREC. Do not unset MF_ASYNMSG later because of this,
1155 * though: this message is still to be delivered later.
1157 if ((flags
& AMF_NOREPLY
) &&
1158 (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
))
1160 if (postponed
!= NULL
)
1166 /* Deliver message */
1167 table_ptr
= (asynmsg_t
*)privp
->s_asyntab
;
1168 m_ptr
= &table_ptr
[i
].msg
; /* Note: pointer in the
1169 * caller's address space.
1172 r
= QueueMess(src_ptr
->p_endpoint
, vir2phys(&tabent
.msg
),
1176 A_INSERT(i
, result
);
1177 tabent
.flags
= flags
| AMF_DONE
;
1180 if (flags
& AMF_NOTIFY
)
1182 kprintf("try_one: should notify caller\n");
1187 privp
->s_asynsize
= 0;
1191 /*===========================================================================*
1193 *===========================================================================*/
1194 PUBLIC
int lock_notify(src_e
, dst_e
)
1195 int src_e
; /* (endpoint) sender of the notification */
1196 int dst_e
; /* (endpoint) who is to be notified */
1198 /* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
1199 * is explicitely given to prevent confusion where the call comes from. MINIX
1200 * kernel is not reentrant, which means to interrupts are disabled after
1201 * the first kernel entry (hardware interrupt, trap, or exception). Locking
1202 * is done by temporarily disabling interrupts.
1206 vmassert(!intr_disabled());
1208 if (!isokendpt(src_e
, &src_p
)) {
1209 kprintf("lock_notify: bogus src: %d\n", src_e
);
1214 vmassert(intr_disabled());
1215 result
= mini_notify(proc_addr(src_p
), dst_e
);
1216 vmassert(intr_disabled());
1218 vmassert(!intr_disabled());
1223 /*===========================================================================*
1225 *===========================================================================*/
1226 PUBLIC
void enqueue(rp
)
1227 register struct proc
*rp
; /* this process is now runnable */
1229 /* Add 'rp' to one of the queues of runnable processes. This function is
1230 * responsible for inserting a process into one of the scheduling queues.
1231 * The mechanism is implemented here. The actual scheduling policy is
1232 * defined in sched() and pick_proc().
1234 int q
; /* scheduling queue to use */
1235 int front
; /* add to front or back */
1237 NOREC_ENTER(enqueuefunc
);
1239 #if DEBUG_SCHED_CHECK
1240 if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM
); }
1241 if (rp
->p_ready
) minix_panic("enqueue already ready process", NO_NUM
);
1244 /* Determine where to insert to process. */
1245 sched(rp
, &q
, &front
);
1249 /* Now add the process to the queue. */
1250 if (rdy_head
[q
] == NIL_PROC
) { /* add to empty queue */
1251 rdy_head
[q
] = rdy_tail
[q
] = rp
; /* create a new queue */
1252 rp
->p_nextready
= NIL_PROC
; /* mark new end */
1254 else if (front
) { /* add to head of queue */
1255 rp
->p_nextready
= rdy_head
[q
]; /* chain head of queue */
1256 rdy_head
[q
] = rp
; /* set new queue head */
1258 else { /* add to tail of queue */
1259 rdy_tail
[q
]->p_nextready
= rp
; /* chain tail of queue */
1260 rdy_tail
[q
] = rp
; /* set new queue tail */
1261 rp
->p_nextready
= NIL_PROC
; /* mark new end */
1264 #if DEBUG_SCHED_CHECK
1270 * enqueueing a process with a higher priority than the current one, it gets
1271 * preempted. The current process must be preemptible. Testing the priority
1272 * also makes sure that a process does not preempt itself
1275 if ((proc_ptr
->p_priority
> rp
->p_priority
) &&
1276 (priv(proc_ptr
)->s_flags
& PREEMPTIBLE
))
1277 RTS_SET(proc_ptr
, RTS_PREEMPTED
); /* calls dequeue() */
1279 #if DEBUG_SCHED_CHECK
1283 NOREC_RETURN(enqueuefunc
, );
1286 /*===========================================================================*
1288 *===========================================================================*/
1290 * put a process at the front of its run queue. It comes handy when a process is
1291 * preempted and removed from run queue to not to have a currently not-runnable
1292 * process on a run queue. We have to put this process back at the fron to be
1295 PRIVATE
void enqueue_head(struct proc
*rp
)
1297 int q
; /* scheduling queue to use */
1299 #if DEBUG_SCHED_CHECK
1300 if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM
); }
1301 if (rp
->p_ready
) minix_panic("enqueue already ready process", NO_NUM
);
1305 * the process was runnable without its quantum expired when dequeued. A
1306 * process with no time left should vahe been handled else and differently
1308 vmassert(rp
->p_ticks_left
);
1314 /* Now add the process to the queue. */
1315 if (rdy_head
[q
] == NIL_PROC
) { /* add to empty queue */
1316 rdy_head
[q
] = rdy_tail
[q
] = rp
; /* create a new queue */
1317 rp
->p_nextready
= NIL_PROC
; /* mark new end */
1319 else /* add to head of queue */
1320 rp
->p_nextready
= rdy_head
[q
]; /* chain head of queue */
1321 rdy_head
[q
] = rp
; /* set new queue head */
1323 #if DEBUG_SCHED_CHECK
1329 /*===========================================================================*
1331 *===========================================================================*/
1332 PUBLIC
void dequeue(rp
)
1333 register struct proc
*rp
; /* this process is no longer runnable */
1335 /* A process must be removed from the scheduling queues, for example, because
1336 * it has blocked. If the currently active process is removed, a new process
1337 * is picked to run by calling pick_proc().
1339 register int q
= rp
->p_priority
; /* queue to use */
1340 register struct proc
**xpp
; /* iterate over queue */
1341 register struct proc
*prev_xp
;
1343 NOREC_ENTER(dequeuefunc
);
1345 #if DEBUG_STACK_CHECK
1346 /* Side-effect for kernel: check if the task's stack still is ok? */
1347 if (iskernelp(rp
)) {
1348 if (*priv(rp
)->s_stack_guard
!= STACK_GUARD
)
1349 minix_panic("stack overrun by task", proc_nr(rp
));
1353 #if DEBUG_SCHED_CHECK
1354 if(!intr_disabled()) { minix_panic("dequeue with interrupts enabled", NO_NUM
); }
1355 if (! rp
->p_ready
) minix_panic("dequeue() already unready process", NO_NUM
);
1358 /* Now make sure that the process is not in its ready queue. Remove the
1359 * process if it is found. A process can be made unready even if it is not
1360 * running by being sent a signal that kills it.
1363 for (xpp
= &rdy_head
[q
]; *xpp
!= NIL_PROC
; xpp
= &(*xpp
)->p_nextready
) {
1365 if (*xpp
== rp
) { /* found process to remove */
1366 *xpp
= (*xpp
)->p_nextready
; /* replace with next chain */
1367 if (rp
== rdy_tail
[q
]) /* queue tail removed */
1368 rdy_tail
[q
] = prev_xp
; /* set new tail */
1370 #if DEBUG_SCHED_CHECK
1376 prev_xp
= *xpp
; /* save previous in chain */
1379 #if DEBUG_SCHED_CHECK
1383 NOREC_RETURN(dequeuefunc
, );
1386 /*===========================================================================*
1388 *===========================================================================*/
1389 PRIVATE
void sched(rp
, queue
, front
)
1390 register struct proc
*rp
; /* process to be scheduled */
1391 int *queue
; /* return: queue to use */
1392 int *front
; /* return: front or back */
1394 /* This function determines the scheduling policy. It is called whenever a
1395 * process must be added to one of the scheduling queues to decide where to
1396 * insert it. As a side-effect the process' priority may be updated.
1398 int time_left
= (rp
->p_ticks_left
> 0); /* quantum fully consumed */
1400 /* Check whether the process has time left. Otherwise give a new quantum
1401 * and lower the process' priority, unless the process already is in the
1404 if (! time_left
) { /* quantum consumed ? */
1405 rp
->p_ticks_left
= rp
->p_quantum_size
; /* give new quantum */
1406 if (rp
->p_priority
< (NR_SCHED_QUEUES
-1)) {
1407 rp
->p_priority
+= 1; /* lower priority */
1411 /* If there is time left, the process is added to the front of its queue,
1412 * so that it can immediately run. The queue to use simply is always the
1413 * process' current priority.
1415 *queue
= rp
->p_priority
;
1419 /*===========================================================================*
1421 *===========================================================================*/
1422 PRIVATE
struct proc
* pick_proc(void)
1424 /* Decide who to run now. A new process is selected an returned.
1425 * When a billable process is selected, record it in 'bill_ptr', so that the
1426 * clock task can tell who to bill for system time.
1428 register struct proc
*rp
; /* process to run */
1429 int q
; /* iterate over queues */
1431 /* Check each of the scheduling queues for ready processes. The number of
1432 * queues is defined in proc.h, and priorities are set in the task table.
1433 * The lowest queue contains IDLE, which is always ready.
1435 for (q
=0; q
< NR_SCHED_QUEUES
; q
++) {
1437 if(!(rp
= rdy_head
[q
])) {
1438 TRACE(VF_PICKPROC
, printf("queue %d empty\n", q
););
1441 TRACE(VF_PICKPROC
, printf("found %s / %d on queue %d\n",
1442 rp
->p_name
, rp
->p_endpoint
, q
););
1443 vmassert(!proc_is_runnable(rp
));
1444 if (priv(rp
)->s_flags
& BILLABLE
)
1445 bill_ptr
= rp
; /* bill for system time */
1451 /*===========================================================================*
1453 *===========================================================================*/
1454 #define Q_BALANCE_TICKS 100
1455 PUBLIC
void balance_queues(tp
)
1456 timer_t
*tp
; /* watchdog timer pointer */
1458 /* Check entire process table and give all process a higher priority. This
1459 * effectively means giving a new quantum. If a process already is at its
1460 * maximum priority, its quantum will be renewed.
1462 static timer_t queue_timer
; /* timer structure to use */
1463 register struct proc
* rp
; /* process table pointer */
1464 clock_t next_period
; /* time of next period */
1465 int ticks_added
= 0; /* total time added */
1467 vmassert(!intr_disabled());
1470 for (rp
=BEG_PROC_ADDR
; rp
<END_PROC_ADDR
; rp
++) {
1471 if (! isemptyp(rp
)) { /* check slot use */
1472 if (rp
->p_priority
> rp
->p_max_priority
) { /* update priority? */
1473 if (proc_is_runnable(rp
)) dequeue(rp
); /* take off queue */
1474 ticks_added
+= rp
->p_quantum_size
; /* do accounting */
1475 rp
->p_priority
-= 1; /* raise priority */
1476 if (proc_is_runnable(rp
)) enqueue(rp
); /* put on queue */
1479 ticks_added
+= rp
->p_quantum_size
- rp
->p_ticks_left
;
1480 rp
->p_ticks_left
= rp
->p_quantum_size
; /* give new quantum */
1486 /* Now schedule a new watchdog timer to balance the queues again. The
1487 * period depends on the total amount of quantum ticks added.
1489 next_period
= MAX(Q_BALANCE_TICKS
, ticks_added
); /* calculate next */
1490 set_timer(&queue_timer
, get_uptime() + next_period
, balance_queues
);
1493 /*===========================================================================*
1495 *===========================================================================*/
1496 PUBLIC
int lock_send(dst_e
, m_ptr
)
1497 int dst_e
; /* to whom is message being sent? */
1498 message
*m_ptr
; /* pointer to message buffer */
1500 /* Safe gateway to mini_send() for tasks. */
1503 result
= mini_send(proc_ptr
, dst_e
, m_ptr
, 0);
1508 /*===========================================================================*
1510 *===========================================================================*/
1511 PUBLIC
struct proc
*endpoint_lookup(endpoint_t e
)
1515 if(!isokendpt(e
, &n
)) return NULL
;
1517 return proc_addr(n
);
1520 /*===========================================================================*
1522 *===========================================================================*/
1523 #if DEBUG_ENABLE_IPC_WARNINGS
1524 PUBLIC
int isokendpt_f(file
, line
, e
, p
, fatalflag
)
1528 PUBLIC
int isokendpt_f(e
, p
, fatalflag
)
1534 /* Convert an endpoint number into a process number.
1535 * Return nonzero if the process is alive with the corresponding
1536 * generation number, zero otherwise.
1538 * This function is called with file and line number by the
1539 * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
1540 * otherwise without. This allows us to print the where the
1541 * conversion was attempted, making the errors verbose without
1542 * adding code for that at every call.
1544 * If fatalflag is nonzero, we must panic if the conversion doesn't
1547 *p
= _ENDPOINT_P(e
);
1548 if(!isokprocn(*p
)) {
1549 #if DEBUG_ENABLE_IPC_WARNINGS
1550 kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
1553 } else if(isemptyn(*p
)) {
1555 kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file
, line
, e
, *p
);
1557 } else if(proc_addr(*p
)->p_endpoint
!= e
) {
1558 #if DEBUG_ENABLE_IPC_WARNINGS
1559 kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file
, line
,
1560 e
, *p
, proc_addr(*p
)->p_endpoint
,
1561 _ENDPOINT_G(e
), _ENDPOINT_G(proc_addr(*p
)->p_endpoint
));
1564 if(!ok
&& fatalflag
) {
1565 minix_panic("invalid endpoint ", e
);