1 /* This file contains essentially all of the process and message handling.
2 * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3 * There is one entry point from the outside:
5 * sys_call: a system call, i.e., the kernel is trapped with an INT
7 * As well as several entry points used from the interrupt and task level:
9 * lock_send: send a message to a process
12 * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
13 * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
14 * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
15 * May 24, 2005 new notification system call (Jorrit N. Herder)
16 * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
18 * The code here is critical to make everything work and is important for the
19 * overall performance of the system. A large fraction of the code deals with
20 * list manipulation. To make this both easy to understand and fast to execute
21 * pointer pointers are used throughout the code. Pointer pointers prevent
22 * exceptions for the head or tail of a linked list.
24 * node_t *queue, *new_node; // assume these as global variables
25 * node_t **xpp = &queue; // get pointer pointer to head of queue
26 * while (*xpp != NULL) // find last pointer of the linked list
27 * xpp = &(*xpp)->next; // get pointer to next pointer
28 * *xpp = new_node; // now replace the end (the NULL pointer)
29 * new_node->next = NULL; // and mark the new end of the list
31 * For example, when adding a new node to the end of the list, one normally
32 * makes an exception for an empty list and looks up the end of the list for
33 * nonempty lists. As shown above, this is not required with pointer pointers.
36 #include <minix/com.h>
37 #include <minix/endpoint.h>
40 #include <minix/portio.h>
41 #include <minix/syslib.h>
48 /* Scheduling and message passing functions. The functions are available to
49 * other parts of the kernel through lock_...(). The lock temporarily disables
50 * interrupts to prevent race conditions.
52 FORWARD
_PROTOTYPE( void idle
, (void));
53 FORWARD
_PROTOTYPE( int mini_send
, (struct proc
*caller_ptr
, int dst_e
,
54 message
*m_ptr
, int flags
));
55 FORWARD
_PROTOTYPE( int mini_receive
, (struct proc
*caller_ptr
, int src
,
56 message
*m_ptr
, int flags
));
57 FORWARD
_PROTOTYPE( int mini_senda
, (struct proc
*caller_ptr
,
58 asynmsg_t
*table
, size_t size
));
59 FORWARD
_PROTOTYPE( int deadlock
, (int function
,
60 register struct proc
*caller
, int src_dst
));
61 FORWARD
_PROTOTYPE( int try_async
, (struct proc
*caller_ptr
));
62 FORWARD
_PROTOTYPE( int try_one
, (struct proc
*src_ptr
, struct proc
*dst_ptr
,
64 FORWARD
_PROTOTYPE( void sched
, (struct proc
*rp
, int *queue
, int *front
));
65 FORWARD
_PROTOTYPE( struct proc
* pick_proc
, (void));
66 FORWARD
_PROTOTYPE( void enqueue_head
, (struct proc
*rp
));
69 #define PICK_HIGHERONLY 2
71 #define BuildNotifyMessage(m_ptr, src, dst_ptr) \
72 (m_ptr)->m_type = NOTIFY_FROM(src); \
73 (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
76 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
77 priv(dst_ptr)->s_int_pending = 0; \
80 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
81 priv(dst_ptr)->s_sig_pending = 0; \
85 /*===========================================================================*
87 *===========================================================================*/
88 PRIVATE
int QueueMess(endpoint_t ep
, vir_bytes msg_lin
, struct proc
*dst
)
92 NOREC_ENTER(queuemess
);
93 /* Queue a message from the src process (in memory) to the dst
94 * process (using dst process table entry). Do actual copy to
95 * kernel here; it's an error if the copy fails into kernel.
97 vmassert(!(dst
->p_misc_flags
& MF_DELIVERMSG
));
98 vmassert(dst
->p_delivermsg_lin
);
99 vmassert(isokendpt(ep
, &k
));
103 PHYS_COPY_CATCH(msg_lin
, dst
->p_delivermsg_lin
,
104 sizeof(message
), addr
);
106 PHYS_COPY_CATCH(vir2phys(&ep
), dst
->p_delivermsg_lin
,
109 NOREC_RETURN(queuemess
, OK
);
115 PHYS_COPY_CATCH(msg_lin
, vir2phys(&dst
->p_delivermsg
), sizeof(message
), addr
);
117 NOREC_RETURN(queuemess
, EFAULT
);
120 dst
->p_delivermsg
.m_source
= ep
;
121 dst
->p_misc_flags
|= MF_DELIVERMSG
;
123 NOREC_RETURN(queuemess
, OK
);
126 /*===========================================================================*
128 *===========================================================================*/
131 /* This function is called whenever there is no work to do.
132 * Halt the CPU, and measure how many timestamp counter ticks are
133 * spent not doing anything. This allows test setups to measure
134 * the CPU utiliziation of certain workloads with high precision.
136 #ifdef CONFIG_IDLE_TSC
139 read_tsc_64(&idle_start
);
145 #ifdef CONFIG_IDLE_TSC
148 printf("Kernel: idle active after resuming CPU\n");
151 idle_tsc
= add64(idle_tsc
, sub64(idle_stop
, idle_start
));
155 /*===========================================================================*
157 *===========================================================================*/
158 PUBLIC
struct proc
* schedcheck(void)
160 /* This function is called an instant before proc_ptr is
161 * to be scheduled again.
163 NOREC_ENTER(schedch
);
164 vmassert(intr_disabled());
167 * if the current process is still runnable check the misc flags and let
168 * it run unless it becomes not runnable in the meantime
170 if (proc_is_runnable(proc_ptr
))
171 goto check_misc_flags
;
174 * if a process becomes not runnable while handling the misc flags, we
175 * need to pick a new one here and start from scratch. Also if the
176 * current process wasn' runnable, we pick a new one here
178 not_runnable_pick_new
:
179 if (proc_is_preempted(proc_ptr
)) {
180 proc_ptr
->p_rts_flags
&= ~RTS_PREEMPTED
;
181 if (proc_is_runnable(proc_ptr
))
182 enqueue_head(proc_ptr
);
184 /* this enqueues the process again */
185 if (proc_no_quantum(proc_ptr
))
186 RTS_UNSET(proc_ptr
, RTS_NO_QUANTUM
);
189 * if we have no process to run, set IDLE as the current process for
190 * time accounting and put the cpu in and idle state. After the next
191 * timer interrupt the execution resumes here and we can pick another
192 * process. If there is still nothing runnable we "schedule" IDLE again
194 while (!(proc_ptr
= pick_proc())) {
195 proc_ptr
= proc_addr(IDLE
);
196 if (priv(proc_ptr
)->s_flags
& BILLABLE
)
204 vmassert(proc_is_runnable(proc_ptr
));
205 while (proc_ptr
->p_misc_flags
&
206 (MF_DELIVERMSG
| MF_SC_DEFER
| MF_SC_TRACE
| MF_SC_ACTIVE
)) {
208 vmassert(proc_is_runnable(proc_ptr
));
209 if (proc_ptr
->p_misc_flags
& MF_DELIVERMSG
) {
210 TRACE(VF_SCHEDULING
, printf("delivering to %s / %d\n",
211 proc_ptr
->p_name
, proc_ptr
->p_endpoint
););
212 if(delivermsg(proc_ptr
) == VMSUSPEND
) {
214 printf("suspending %s / %d\n",
216 proc_ptr
->p_endpoint
););
217 vmassert(!proc_is_runnable(proc_ptr
));
220 else if (proc_ptr
->p_misc_flags
& MF_SC_DEFER
) {
221 /* Perform the system call that we deferred earlier. */
223 #if DEBUG_SCHED_CHECK
224 if (proc_ptr
->p_misc_flags
& MF_SC_ACTIVE
)
225 minix_panic("MF_SC_ACTIVE and MF_SC_DEFER set",
229 arch_do_syscall(proc_ptr
);
231 /* If the process is stopped for signal delivery, and
232 * not blocked sending a message after the system call,
235 if ((proc_ptr
->p_misc_flags
& MF_SIG_DELAY
) &&
236 !RTS_ISSET(proc_ptr
, RTS_SENDING
))
237 sig_delay_done(proc_ptr
);
239 else if (proc_ptr
->p_misc_flags
& MF_SC_TRACE
) {
240 /* Trigger a system call leave event if this was a
241 * system call. We must do this after processing the
242 * other flags above, both for tracing correctness and
243 * to be able to use 'break'.
245 if (!(proc_ptr
->p_misc_flags
& MF_SC_ACTIVE
))
248 proc_ptr
->p_misc_flags
&=
249 ~(MF_SC_TRACE
| MF_SC_ACTIVE
);
251 /* Signal the "leave system call" event.
254 cause_sig(proc_nr(proc_ptr
), SIGTRAP
);
256 else if (proc_ptr
->p_misc_flags
& MF_SC_ACTIVE
) {
257 /* If MF_SC_ACTIVE was set, remove it now:
258 * we're leaving the system call.
260 proc_ptr
->p_misc_flags
&= ~MF_SC_ACTIVE
;
266 * the selected process might not be runnable anymore. We have
267 * to checkit and schedule another one
269 if (!proc_is_runnable(proc_ptr
))
270 goto not_runnable_pick_new
;
272 TRACE(VF_SCHEDULING
, printf("starting %s / %d\n",
273 proc_ptr
->p_name
, proc_ptr
->p_endpoint
););
275 proc_ptr
->p_schedules
++;
278 proc_ptr
= arch_finish_schedcheck();
280 NOREC_RETURN(schedch
, proc_ptr
);
283 /*===========================================================================*
285 *===========================================================================*/
286 PUBLIC
int sys_call(call_nr
, src_dst_e
, m_ptr
, bit_map
)
287 int call_nr
; /* system call number and flags */
288 int src_dst_e
; /* src to receive from or dst to send to */
289 message
*m_ptr
; /* pointer to message in the caller's space */
290 long bit_map
; /* notification event set or flags */
292 /* System calls are done by trapping to the kernel with an INT instruction.
293 * The trap is caught and sys_call() is called to send or receive a message
294 * (or both). The caller is always given by 'proc_ptr'.
296 register struct proc
*caller_ptr
= proc_ptr
; /* get pointer to caller */
297 int result
; /* the system call's result */
298 int src_dst_p
; /* Process slot number */
301 /* If this process is subject to system call tracing, handle that first. */
302 if (caller_ptr
->p_misc_flags
& (MF_SC_TRACE
| MF_SC_DEFER
)) {
303 /* Are we tracing this process, and is it the first sys_call entry? */
304 if ((caller_ptr
->p_misc_flags
& (MF_SC_TRACE
| MF_SC_DEFER
)) ==
306 /* We must notify the tracer before processing the actual
307 * system call. If we don't, the tracer could not obtain the
308 * input message. Postpone the entire system call.
310 caller_ptr
->p_misc_flags
&= ~MF_SC_TRACE
;
311 caller_ptr
->p_misc_flags
|= MF_SC_DEFER
;
313 /* Signal the "enter system call" event. Block the process. */
314 cause_sig(proc_nr(caller_ptr
), SIGTRAP
);
316 /* Preserve the return register's value. */
317 return caller_ptr
->p_reg
.retreg
;
320 /* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
321 caller_ptr
->p_misc_flags
&= ~MF_SC_DEFER
;
323 #if DEBUG_SCHED_CHECK
324 if (caller_ptr
->p_misc_flags
& MF_SC_ACTIVE
)
325 minix_panic("MF_SC_ACTIVE already set", NO_NUM
);
328 /* Set a flag to allow reliable tracing of leaving the system call. */
329 caller_ptr
->p_misc_flags
|= MF_SC_ACTIVE
;
332 #if DEBUG_SCHED_CHECK
333 if(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
) {
334 kprintf("sys_call: MF_DELIVERMSG on for %s / %d\n",
335 caller_ptr
->p_name
, caller_ptr
->p_endpoint
);
336 minix_panic("MF_DELIVERMSG on", NO_NUM
);
341 if(src_dst_e
!= 4 && src_dst_e
!= 5 &&
342 caller_ptr
->p_endpoint
!= 4 && caller_ptr
->p_endpoint
!= 5) {
344 kprintf("(%d SEND to %d) ", caller_ptr
->p_endpoint
, src_dst_e
);
345 else if(call_nr
== RECEIVE
)
346 kprintf("(%d RECEIVE from %d) ", caller_ptr
->p_endpoint
, src_dst_e
);
347 else if(call_nr
== SENDREC
)
348 kprintf("(%d SENDREC to %d) ", caller_ptr
->p_endpoint
, src_dst_e
);
350 kprintf("(%d %d to/from %d) ", caller_ptr
->p_endpoint
, call_nr
, src_dst_e
);
354 #if DEBUG_SCHED_CHECK
355 if (RTS_ISSET(caller_ptr
, RTS_SLOT_FREE
))
357 kprintf("called by the dead?!?\n");
362 /* Check destination. SENDA is special because its argument is a table and
363 * not a single destination. RECEIVE is the only call that accepts ANY (in
364 * addition to a real endpoint). The other calls (SEND, SENDREC,
365 * and NOTIFY) require an endpoint to corresponds to a process. In addition,
366 * it is necessary to check whether a process is allowed to send to a given
369 if (call_nr
== SENDA
)
371 /* No destination argument */
373 else if (src_dst_e
== ANY
)
375 if (call_nr
!= RECEIVE
)
378 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
379 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
383 src_dst_p
= src_dst_e
;
387 /* Require a valid source and/or destination process. */
388 if(!isokendpt(src_dst_e
, &src_dst_p
)) {
390 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
391 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
396 /* If the call is to send to a process, i.e., for SEND, SENDNB,
397 * SENDREC or NOTIFY, verify that the caller is allowed to send to
398 * the given destination.
400 if (call_nr
!= RECEIVE
)
402 if (!may_send_to(caller_ptr
, src_dst_p
)) {
403 #if DEBUG_ENABLE_IPC_WARNINGS
405 "sys_call: ipc mask denied trap %d from %d to %d\n",
406 call_nr
, caller_ptr
->p_endpoint
, src_dst_e
);
408 return(ECALLDENIED
); /* call denied by ipc mask */
413 /* Only allow non-negative call_nr values less than 32 */
414 if (call_nr
< 0 || call_nr
>= 32)
416 #if DEBUG_ENABLE_IPC_WARNINGS
417 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
418 call_nr
, proc_nr(caller_ptr
), src_dst_p
);
420 return(ETRAPDENIED
); /* trap denied by mask or kernel */
423 /* Check if the process has privileges for the requested call. Calls to the
424 * kernel may only be SENDREC, because tasks always reply and may not block
425 * if the caller doesn't do receive().
427 if (!(priv(caller_ptr
)->s_trap_mask
& (1 << call_nr
))) {
428 #if DEBUG_ENABLE_IPC_WARNINGS
429 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
430 call_nr
, proc_nr(caller_ptr
), src_dst_p
);
432 return(ETRAPDENIED
); /* trap denied by mask or kernel */
435 /* SENDA has no src_dst value here, so this check is in mini_senda() as well.
437 if (call_nr
!= SENDREC
&& call_nr
!= RECEIVE
&& call_nr
!= SENDA
&&
438 iskerneln(src_dst_p
)) {
439 #if DEBUG_ENABLE_IPC_WARNINGS
440 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
441 call_nr
, proc_nr(caller_ptr
), src_dst_e
);
443 return(ETRAPDENIED
); /* trap denied by mask or kernel */
446 /* Get and check the size of the argument in bytes.
447 * Normally this is just the size of a regular message, but in the
448 * case of SENDA the argument is a table.
450 if(call_nr
== SENDA
) {
451 msg_size
= (size_t) src_dst_e
;
453 /* Limit size to something reasonable. An arbitrary choice is 16
454 * times the number of process table entries.
456 if (msg_size
> 16*(NR_TASKS
+ NR_PROCS
))
458 msg_size
*= sizeof(asynmsg_t
); /* convert to bytes */
460 msg_size
= sizeof(*m_ptr
);
463 /* Now check if the call is known and try to perform the request. The only
464 * system calls that exist in MINIX are sending and receiving messages.
465 * - SENDREC: combines SEND and RECEIVE in a single system call
466 * - SEND: sender blocks until its message has been delivered
467 * - RECEIVE: receiver blocks until an acceptable message has arrived
468 * - NOTIFY: asynchronous call; deliver notification or mark pending
469 * - SENDA: list of asynchronous send requests
473 /* A flag is set so that notifications cannot interrupt SENDREC. */
474 caller_ptr
->p_misc_flags
|= MF_REPLY_PEND
;
477 result
= mini_send(caller_ptr
, src_dst_e
, m_ptr
, 0);
478 if (call_nr
== SEND
|| result
!= OK
)
479 break; /* done, or SEND failed */
480 /* fall through for SENDREC */
482 if (call_nr
== RECEIVE
)
483 caller_ptr
->p_misc_flags
&= ~MF_REPLY_PEND
;
484 result
= mini_receive(caller_ptr
, src_dst_e
, m_ptr
, 0);
487 result
= mini_notify(caller_ptr
, src_dst_e
);
490 result
= mini_send(caller_ptr
, src_dst_e
, m_ptr
, NON_BLOCKING
);
493 result
= mini_senda(caller_ptr
, (asynmsg_t
*)m_ptr
, (size_t)src_dst_e
);
496 result
= EBADCALL
; /* illegal system call */
499 /* Now, return the result of the system call to the caller. */
503 /*===========================================================================*
505 *===========================================================================*/
506 PRIVATE
int deadlock(function
, cp
, src_dst
)
507 int function
; /* trap number */
508 register struct proc
*cp
; /* pointer to caller */
509 proc_nr_t src_dst
; /* src or dst process */
511 /* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
512 * a cyclic dependency of blocking send and receive calls. The only cyclic
513 * depency that is not fatal is if the caller and target directly SEND(REC)
514 * and RECEIVE to each other. If a deadlock is found, the group size is
515 * returned. Otherwise zero is returned.
517 register struct proc
*xp
; /* process pointer */
518 int group_size
= 1; /* start with only caller */
519 #if DEBUG_ENABLE_IPC_WARNINGS
520 static struct proc
*processes
[NR_PROCS
+ NR_TASKS
];
524 while (src_dst
!= ANY
) { /* check while process nr */
525 xp
= proc_addr(src_dst
); /* follow chain of processes */
526 #if DEBUG_ENABLE_IPC_WARNINGS
527 processes
[group_size
] = xp
;
529 group_size
++; /* extra process in group */
531 /* Check whether the last process in the chain has a dependency. If it
532 * has not, the cycle cannot be closed and we are done.
534 if (RTS_ISSET(xp
, RTS_RECEIVING
)) { /* xp has dependency */
535 if(xp
->p_getfrom_e
== ANY
) src_dst
= ANY
;
536 else okendpt(xp
->p_getfrom_e
, &src_dst
);
537 } else if (RTS_ISSET(xp
, RTS_SENDING
)) { /* xp has dependency */
538 okendpt(xp
->p_sendto_e
, &src_dst
);
540 return(0); /* not a deadlock */
543 /* Now check if there is a cyclic dependency. For group sizes of two,
544 * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
545 * or other combinations indicate a deadlock.
547 if (src_dst
== proc_nr(cp
)) { /* possible deadlock */
548 if (group_size
== 2) { /* caller and src_dst */
549 /* The function number is magically converted to flags. */
550 if ((xp
->p_rts_flags
^ (function
<< 2)) & RTS_SENDING
) {
551 return(0); /* not a deadlock */
554 #if DEBUG_ENABLE_IPC_WARNINGS
557 kprintf("deadlock between these processes:\n");
558 for(i
= 0; i
< group_size
; i
++) {
559 kprintf(" %10s ", processes
[i
]->p_name
);
560 proc_stacktrace(processes
[i
]);
564 return(group_size
); /* deadlock found */
567 return(0); /* not a deadlock */
570 /*===========================================================================*
572 *===========================================================================*/
573 PRIVATE
int mini_send(caller_ptr
, dst_e
, m_ptr
, flags
)
574 register struct proc
*caller_ptr
; /* who is trying to send a message? */
575 int dst_e
; /* to whom is message being sent? */
576 message
*m_ptr
; /* pointer to message buffer */
579 /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
580 * for this message, copy the message to it and unblock 'dst'. If 'dst' is
581 * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
583 register struct proc
*dst_ptr
;
584 register struct proc
**xpp
;
590 if(!(linaddr
= umap_local(caller_ptr
, D
, (vir_bytes
) m_ptr
,
594 dst_p
= _ENDPOINT_P(dst_e
);
595 dst_ptr
= proc_addr(dst_p
);
597 if (RTS_ISSET(dst_ptr
, RTS_NO_ENDPOINT
))
602 /* Check if 'dst' is blocked waiting for this message. The destination's
603 * RTS_SENDING flag may be set when its SENDREC call blocked while sending.
605 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
)) {
606 /* Destination is indeed waiting for this message. */
607 vmassert(!(dst_ptr
->p_misc_flags
& MF_DELIVERMSG
));
608 if((r
=QueueMess(caller_ptr
->p_endpoint
, linaddr
, dst_ptr
)) != OK
)
610 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
612 if(flags
& NON_BLOCKING
) {
616 /* Check for a possible deadlock before actually blocking. */
617 if (deadlock(SEND
, caller_ptr
, dst_p
)) {
621 /* Destination is not waiting. Block and dequeue caller. */
622 PHYS_COPY_CATCH(linaddr
, vir2phys(&caller_ptr
->p_sendmsg
),
623 sizeof(message
), addr
);
625 if(addr
) { return EFAULT
; }
626 RTS_SET(caller_ptr
, RTS_SENDING
);
627 caller_ptr
->p_sendto_e
= dst_e
;
629 /* Process is now blocked. Put in on the destination's queue. */
630 xpp
= &dst_ptr
->p_caller_q
; /* find end of list */
631 while (*xpp
!= NIL_PROC
) xpp
= &(*xpp
)->p_q_link
;
632 *xpp
= caller_ptr
; /* add caller to end */
633 caller_ptr
->p_q_link
= NIL_PROC
; /* mark new end of list */
638 /*===========================================================================*
640 *===========================================================================*/
641 PRIVATE
int mini_receive(caller_ptr
, src_e
, m_ptr
, flags
)
642 register struct proc
*caller_ptr
; /* process trying to get message */
643 int src_e
; /* which message source is wanted */
644 message
*m_ptr
; /* pointer to message buffer */
647 /* A process or task wants to get a message. If a message is already queued,
648 * acquire it and deblock the sender. If no message from the desired source
649 * is available block the caller.
651 register struct proc
**xpp
;
655 int i
, r
, src_id
, src_proc_nr
, src_p
;
658 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
660 if(!(linaddr
= umap_local(caller_ptr
, D
, (vir_bytes
) m_ptr
,
665 /* This is where we want our message. */
666 caller_ptr
->p_delivermsg_lin
= linaddr
;
667 caller_ptr
->p_delivermsg_vir
= (vir_bytes
) m_ptr
;
669 if(src_e
== ANY
) src_p
= ANY
;
672 okendpt(src_e
, &src_p
);
673 if (RTS_ISSET(proc_addr(src_p
), RTS_NO_ENDPOINT
))
680 /* Check to see if a message from desired source is already available. The
681 * caller's RTS_SENDING flag may be set if SENDREC couldn't send. If it is
682 * set, the process should be blocked.
684 if (!RTS_ISSET(caller_ptr
, RTS_SENDING
)) {
686 /* Check if there are pending notifications, except for SENDREC. */
687 if (! (caller_ptr
->p_misc_flags
& MF_REPLY_PEND
)) {
689 map
= &priv(caller_ptr
)->s_notify_pending
;
690 for (chunk
=&map
->chunk
[0]; chunk
<&map
->chunk
[NR_SYS_CHUNKS
]; chunk
++) {
693 /* Find a pending notification from the requested source. */
694 if (! *chunk
) continue; /* no bits in chunk */
695 for (i
=0; ! (*chunk
& (1<<i
)); ++i
) {} /* look up the bit */
696 src_id
= (chunk
- &map
->chunk
[0]) * BITCHUNK_BITS
+ i
;
697 if (src_id
>= NR_SYS_PROCS
) break; /* out of range */
698 src_proc_nr
= id_to_nr(src_id
); /* get source proc */
699 #if DEBUG_ENABLE_IPC_WARNINGS
700 if(src_proc_nr
== NONE
) {
701 kprintf("mini_receive: sending notify from NONE\n");
704 if (src_e
!=ANY
&& src_p
!= src_proc_nr
) continue;/* source not ok */
705 *chunk
&= ~(1 << i
); /* no longer pending */
707 /* Found a suitable source, deliver the notification message. */
708 BuildNotifyMessage(&m
, src_proc_nr
, caller_ptr
); /* assemble message */
709 hisep
= proc_addr(src_proc_nr
)->p_endpoint
;
710 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
711 vmassert(src_e
== ANY
|| hisep
== src_e
);
712 if((r
=QueueMess(hisep
, vir2phys(&m
), caller_ptr
)) != OK
) {
713 minix_panic("mini_receive: local QueueMess failed", NO_NUM
);
715 return(OK
); /* report success */
719 /* Check caller queue. Use pointer pointers to keep code simple. */
720 xpp
= &caller_ptr
->p_caller_q
;
721 while (*xpp
!= NIL_PROC
) {
722 if (src_e
== ANY
|| src_p
== proc_nr(*xpp
)) {
723 #if DEBUG_SCHED_CHECK
724 if (RTS_ISSET(*xpp
, RTS_SLOT_FREE
) || RTS_ISSET(*xpp
, RTS_NO_ENDPOINT
))
726 kprintf("%d: receive from %d; found dead %d (%s)?\n",
727 caller_ptr
->p_endpoint
, src_e
, (*xpp
)->p_endpoint
,
733 /* Found acceptable message. Copy it and update status. */
734 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
735 QueueMess((*xpp
)->p_endpoint
,
736 vir2phys(&(*xpp
)->p_sendmsg
), caller_ptr
);
737 if ((*xpp
)->p_misc_flags
& MF_SIG_DELAY
)
738 sig_delay_done(*xpp
);
739 RTS_UNSET(*xpp
, RTS_SENDING
);
740 *xpp
= (*xpp
)->p_q_link
; /* remove from queue */
741 return(OK
); /* report success */
743 xpp
= &(*xpp
)->p_q_link
; /* proceed to next */
746 if (caller_ptr
->p_misc_flags
& MF_ASYNMSG
)
749 r
= try_one(proc_addr(src_p
), caller_ptr
, NULL
);
751 r
= try_async(caller_ptr
);
754 return OK
; /* Got a message */
758 /* No suitable message is available or the caller couldn't send in SENDREC.
759 * Block the process trying to receive, unless the flags tell otherwise.
761 if ( ! (flags
& NON_BLOCKING
)) {
762 /* Check for a possible deadlock before actually blocking. */
763 if (deadlock(RECEIVE
, caller_ptr
, src_p
)) {
767 caller_ptr
->p_getfrom_e
= src_e
;
768 RTS_SET(caller_ptr
, RTS_RECEIVING
);
775 /*===========================================================================*
777 *===========================================================================*/
778 PUBLIC
int mini_notify(caller_ptr
, dst_e
)
779 register struct proc
*caller_ptr
; /* sender of the notification */
780 endpoint_t dst_e
; /* which process to notify */
782 register struct proc
*dst_ptr
;
783 int src_id
; /* source id for late delivery */
784 message m
; /* the notification message */
788 vmassert(intr_disabled());
790 if (!isokendpt(dst_e
, &dst_p
)) {
792 kprintf("mini_notify: bogus endpoint %d\n", dst_e
);
796 dst_ptr
= proc_addr(dst_p
);
798 /* Check to see if target is blocked waiting for this message. A process
799 * can be both sending and receiving during a SENDREC system call.
801 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
) &&
802 ! (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
)) {
803 /* Destination is indeed waiting for a message. Assemble a notification
804 * message and deliver it. Copy from pseudo-source HARDWARE, since the
805 * message is in the kernel's address space.
807 BuildNotifyMessage(&m
, proc_nr(caller_ptr
), dst_ptr
);
808 vmassert(!(dst_ptr
->p_misc_flags
& MF_DELIVERMSG
));
809 if((r
=QueueMess(caller_ptr
->p_endpoint
, vir2phys(&m
), dst_ptr
)) != OK
) {
810 minix_panic("mini_notify: local QueueMess failed", NO_NUM
);
812 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
816 /* Destination is not ready to receive the notification. Add it to the
817 * bit map with pending notifications. Note the indirectness: the privilege id
818 * instead of the process number is used in the pending bit map.
820 src_id
= priv(caller_ptr
)->s_id
;
821 set_sys_bit(priv(dst_ptr
)->s_notify_pending
, src_id
);
825 #define ASCOMPLAIN(caller, entry, field) \
826 kprintf("kernel:%s:%d: asyn failed for %s in %s " \
827 "(%d/%d, tab 0x%lx)\n",__FILE__,__LINE__, \
828 field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
830 #define A_RETRIEVE(entry, field) \
831 if(data_copy(caller_ptr->p_endpoint, \
832 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
833 SYSTEM, (vir_bytes) &tabent.field, \
834 sizeof(tabent.field)) != OK) {\
835 ASCOMPLAIN(caller_ptr, entry, #field); \
839 #define A_INSERT(entry, field) \
840 if(data_copy(SYSTEM, (vir_bytes) &tabent.field, \
841 caller_ptr->p_endpoint, \
842 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
843 sizeof(tabent.field)) != OK) {\
844 ASCOMPLAIN(caller_ptr, entry, #field); \
848 /*===========================================================================*
850 *===========================================================================*/
851 PRIVATE
int mini_senda(struct proc
*caller_ptr
, asynmsg_t
*table
, size_t size
)
853 int i
, dst_p
, done
, do_notify
;
855 struct proc
*dst_ptr
;
858 vir_bytes table_v
= (vir_bytes
) table
;
861 privp
= priv(caller_ptr
);
862 if (!(privp
->s_flags
& SYS_PROC
))
865 "mini_senda: warning caller has no privilege structure\n");
870 privp
->s_asyntab
= -1;
871 privp
->s_asynsize
= 0;
875 /* Nothing to do, just return */
879 if(!(linaddr
= umap_local(caller_ptr
, D
, (vir_bytes
) table
,
880 size
* sizeof(*table
)))) {
881 printf("mini_senda: umap_local failed; 0x%lx len 0x%lx\n",
882 table
, size
* sizeof(*table
));
886 /* Limit size to something reasonable. An arbitrary choice is 16
887 * times the number of process table entries.
889 * (this check has been duplicated in sys_call but is left here
892 if (size
> 16*(NR_TASKS
+ NR_PROCS
))
900 for (i
= 0; i
<size
; i
++)
903 /* Read status word */
904 A_RETRIEVE(i
, flags
);
907 /* Skip empty entries */
911 /* Check for reserved bits in the flags field */
912 if (flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
) ||
913 !(flags
& AMF_VALID
))
918 /* Skip entry if AMF_DONE is already set */
919 if (flags
& AMF_DONE
)
922 /* Get destination */
925 if (!isokendpt(tabent
.dst
, &dst_p
))
927 /* Bad destination, report the error */
928 tabent
.result
= EDEADSRCDST
;
930 tabent
.flags
= flags
| AMF_DONE
;
933 if (flags
& AMF_NOTIFY
)
938 if (iskerneln(dst_p
))
940 /* Asynchronous sends to the kernel are not allowed */
941 tabent
.result
= ECALLDENIED
;
943 tabent
.flags
= flags
| AMF_DONE
;
946 if (flags
& AMF_NOTIFY
)
951 if (!may_send_to(caller_ptr
, dst_p
))
953 /* Send denied by IPC mask */
954 tabent
.result
= ECALLDENIED
;
956 tabent
.flags
= flags
| AMF_DONE
;
959 if (flags
& AMF_NOTIFY
)
965 kprintf("mini_senda: entry[%d]: flags 0x%x dst %d/%d\n",
966 i
, tabent
.flags
, tabent
.dst
, dst_p
);
969 dst_ptr
= proc_addr(dst_p
);
971 /* RTS_NO_ENDPOINT should be removed */
972 if (dst_ptr
->p_rts_flags
& RTS_NO_ENDPOINT
)
974 tabent
.result
= EDSTDIED
;
976 tabent
.flags
= flags
| AMF_DONE
;
979 if (flags
& AMF_NOTIFY
)
984 /* Check if 'dst' is blocked waiting for this message.
985 * If AMF_NOREPLY is set, do not satisfy the receiving part of
988 if (WILLRECEIVE(dst_ptr
, caller_ptr
->p_endpoint
) &&
989 (!(flags
& AMF_NOREPLY
) ||
990 !(dst_ptr
->p_misc_flags
& MF_REPLY_PEND
)))
992 /* Destination is indeed waiting for this message. */
993 /* Copy message from sender. */
994 tabent
.result
= QueueMess(caller_ptr
->p_endpoint
,
995 linaddr
+ (vir_bytes
) &table
[i
].msg
-
996 (vir_bytes
) table
, dst_ptr
);
997 if(tabent
.result
== OK
)
998 RTS_UNSET(dst_ptr
, RTS_RECEIVING
);
1000 A_INSERT(i
, result
);
1001 tabent
.flags
= flags
| AMF_DONE
;
1004 if (flags
& AMF_NOTIFY
)
1010 /* Should inform receiver that something is pending */
1011 dst_ptr
->p_misc_flags
|= MF_ASYNMSG
;
1017 kprintf("mini_senda: should notify caller\n");
1020 privp
->s_asyntab
= (vir_bytes
)table
;
1021 privp
->s_asynsize
= size
;
1027 /*===========================================================================*
1029 *===========================================================================*/
1030 PRIVATE
int try_async(caller_ptr
)
1031 struct proc
*caller_ptr
;
1035 struct proc
*src_ptr
;
1036 int postponed
= FALSE
;
1038 /* Try all privilege structures */
1039 for (privp
= BEG_PRIV_ADDR
; privp
< END_PRIV_ADDR
; ++privp
)
1041 if (privp
->s_proc_nr
== NONE
)
1044 src_ptr
= proc_addr(privp
->s_proc_nr
);
1046 vmassert(!(caller_ptr
->p_misc_flags
& MF_DELIVERMSG
));
1047 r
= try_one(src_ptr
, caller_ptr
, &postponed
);
1052 /* Nothing found, clear MF_ASYNMSG unless messages were postponed */
1053 if (postponed
== FALSE
)
1054 caller_ptr
->p_misc_flags
&= ~MF_ASYNMSG
;
1060 /*===========================================================================*
1062 *===========================================================================*/
1063 PRIVATE
int try_one(struct proc
*src_ptr
, struct proc
*dst_ptr
, int *postponed
)
1065 int i
, do_notify
, done
;
1072 struct proc
*caller_ptr
;
1075 privp
= priv(src_ptr
);
1077 /* Basic validity checks */
1078 if (privp
->s_id
== USER_PRIV_ID
) return EAGAIN
;
1079 if (privp
->s_asynsize
== 0) return EAGAIN
;
1080 if (!may_send_to(src_ptr
, proc_nr(dst_ptr
))) return EAGAIN
;
1082 size
= privp
->s_asynsize
;
1083 table_v
= privp
->s_asyntab
;
1084 caller_ptr
= src_ptr
;
1086 dst_e
= dst_ptr
->p_endpoint
;
1088 /* Scan the table */
1091 for (i
= 0; i
<size
; i
++)
1093 /* Read status word */
1094 A_RETRIEVE(i
, flags
);
1095 flags
= tabent
.flags
;
1097 /* Skip empty entries */
1103 /* Check for reserved bits in the flags field */
1104 if (flags
& ~(AMF_VALID
|AMF_DONE
|AMF_NOTIFY
|AMF_NOREPLY
) ||
1105 !(flags
& AMF_VALID
))
1107 kprintf("try_one: bad bits in table\n");
1108 privp
->s_asynsize
= 0;
1112 /* Skip entry is AMF_DONE is already set */
1113 if (flags
& AMF_DONE
)
1118 /* Clear done. We are done when all entries are either empty
1119 * or done at the start of the call.
1123 /* Get destination */
1126 if (tabent
.dst
!= dst_e
)
1131 /* If AMF_NOREPLY is set, do not satisfy the receiving part of
1132 * a SENDREC. Do not unset MF_ASYNMSG later because of this,
1133 * though: this message is still to be delivered later.
1135 if ((flags
& AMF_NOREPLY
) &&
1136 (dst_ptr
->p_misc_flags
& MF_REPLY_PEND
))
1138 if (postponed
!= NULL
)
1144 /* Deliver message */
1146 r
= QueueMess(src_ptr
->p_endpoint
, vir2phys(&tabent
.msg
),
1150 A_INSERT(i
, result
);
1151 tabent
.flags
= flags
| AMF_DONE
;
1154 if (flags
& AMF_NOTIFY
)
1156 kprintf("try_one: should notify caller\n");
1161 privp
->s_asynsize
= 0;
1165 /*===========================================================================*
1167 *===========================================================================*/
1168 PUBLIC
int lock_notify(src_e
, dst_e
)
1169 int src_e
; /* (endpoint) sender of the notification */
1170 int dst_e
; /* (endpoint) who is to be notified */
1172 /* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
1173 * is explicitely given to prevent confusion where the call comes from. MINIX
1174 * kernel is not reentrant, which means to interrupts are disabled after
1175 * the first kernel entry (hardware interrupt, trap, or exception). Locking
1176 * is done by temporarily disabling interrupts.
1180 vmassert(!intr_disabled());
1182 if (!isokendpt(src_e
, &src_p
)) {
1183 kprintf("lock_notify: bogus src: %d\n", src_e
);
1188 vmassert(intr_disabled());
1189 result
= mini_notify(proc_addr(src_p
), dst_e
);
1190 vmassert(intr_disabled());
1192 vmassert(!intr_disabled());
1197 /*===========================================================================*
1199 *===========================================================================*/
1200 PUBLIC
void enqueue(rp
)
1201 register struct proc
*rp
; /* this process is now runnable */
1203 /* Add 'rp' to one of the queues of runnable processes. This function is
1204 * responsible for inserting a process into one of the scheduling queues.
1205 * The mechanism is implemented here. The actual scheduling policy is
1206 * defined in sched() and pick_proc().
1208 int q
; /* scheduling queue to use */
1209 int front
; /* add to front or back */
1211 NOREC_ENTER(enqueuefunc
);
1213 #if DEBUG_SCHED_CHECK
1214 if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM
); }
1215 if (rp
->p_ready
) minix_panic("enqueue already ready process", NO_NUM
);
1218 /* Determine where to insert to process. */
1219 sched(rp
, &q
, &front
);
1223 /* Now add the process to the queue. */
1224 if (rdy_head
[q
] == NIL_PROC
) { /* add to empty queue */
1225 rdy_head
[q
] = rdy_tail
[q
] = rp
; /* create a new queue */
1226 rp
->p_nextready
= NIL_PROC
; /* mark new end */
1228 else if (front
) { /* add to head of queue */
1229 rp
->p_nextready
= rdy_head
[q
]; /* chain head of queue */
1230 rdy_head
[q
] = rp
; /* set new queue head */
1232 else { /* add to tail of queue */
1233 rdy_tail
[q
]->p_nextready
= rp
; /* chain tail of queue */
1234 rdy_tail
[q
] = rp
; /* set new queue tail */
1235 rp
->p_nextready
= NIL_PROC
; /* mark new end */
1238 #if DEBUG_SCHED_CHECK
1244 * enqueueing a process with a higher priority than the current one, it gets
1245 * preempted. The current process must be preemptible. Testing the priority
1246 * also makes sure that a process does not preempt itself
1249 if ((proc_ptr
->p_priority
> rp
->p_priority
) &&
1250 (priv(proc_ptr
)->s_flags
& PREEMPTIBLE
))
1251 RTS_SET(proc_ptr
, RTS_PREEMPTED
); /* calls dequeue() */
1253 #if DEBUG_SCHED_CHECK
1257 NOREC_RETURN(enqueuefunc
, );
1260 /*===========================================================================*
1262 *===========================================================================*/
1264 * put a process at the front of its run queue. It comes handy when a process is
1265 * preempted and removed from run queue to not to have a currently not-runnable
1266 * process on a run queue. We have to put this process back at the fron to be
1269 PRIVATE
void enqueue_head(struct proc
*rp
)
1271 int q
; /* scheduling queue to use */
1273 #if DEBUG_SCHED_CHECK
1274 if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM
); }
1275 if (rp
->p_ready
) minix_panic("enqueue already ready process", NO_NUM
);
1279 * the process was runnable without its quantum expired when dequeued. A
1280 * process with no time left should vahe been handled else and differently
1282 vmassert(rp
->p_ticks_left
);
1288 /* Now add the process to the queue. */
1289 if (rdy_head
[q
] == NIL_PROC
) { /* add to empty queue */
1290 rdy_head
[q
] = rdy_tail
[q
] = rp
; /* create a new queue */
1291 rp
->p_nextready
= NIL_PROC
; /* mark new end */
1293 else /* add to head of queue */
1294 rp
->p_nextready
= rdy_head
[q
]; /* chain head of queue */
1295 rdy_head
[q
] = rp
; /* set new queue head */
1297 #if DEBUG_SCHED_CHECK
1303 /*===========================================================================*
1305 *===========================================================================*/
1306 PUBLIC
void dequeue(rp
)
1307 register struct proc
*rp
; /* this process is no longer runnable */
1309 /* A process must be removed from the scheduling queues, for example, because
1310 * it has blocked. If the currently active process is removed, a new process
1311 * is picked to run by calling pick_proc().
1313 register int q
= rp
->p_priority
; /* queue to use */
1314 register struct proc
**xpp
; /* iterate over queue */
1315 register struct proc
*prev_xp
;
1317 NOREC_ENTER(dequeuefunc
);
1319 #if DEBUG_STACK_CHECK
1320 /* Side-effect for kernel: check if the task's stack still is ok? */
1321 if (iskernelp(rp
)) {
1322 if (*priv(rp
)->s_stack_guard
!= STACK_GUARD
)
1323 minix_panic("stack overrun by task", proc_nr(rp
));
1327 #if DEBUG_SCHED_CHECK
1328 if(!intr_disabled()) { minix_panic("dequeue with interrupts enabled", NO_NUM
); }
1329 if (! rp
->p_ready
) minix_panic("dequeue() already unready process", NO_NUM
);
1332 /* Now make sure that the process is not in its ready queue. Remove the
1333 * process if it is found. A process can be made unready even if it is not
1334 * running by being sent a signal that kills it.
1337 for (xpp
= &rdy_head
[q
]; *xpp
!= NIL_PROC
; xpp
= &(*xpp
)->p_nextready
) {
1339 if (*xpp
== rp
) { /* found process to remove */
1340 *xpp
= (*xpp
)->p_nextready
; /* replace with next chain */
1341 if (rp
== rdy_tail
[q
]) /* queue tail removed */
1342 rdy_tail
[q
] = prev_xp
; /* set new tail */
1344 #if DEBUG_SCHED_CHECK
1350 prev_xp
= *xpp
; /* save previous in chain */
1353 #if DEBUG_SCHED_CHECK
1357 NOREC_RETURN(dequeuefunc
, );
1360 /*===========================================================================*
1362 *===========================================================================*/
1363 PRIVATE
void sched(rp
, queue
, front
)
1364 register struct proc
*rp
; /* process to be scheduled */
1365 int *queue
; /* return: queue to use */
1366 int *front
; /* return: front or back */
1368 /* This function determines the scheduling policy. It is called whenever a
1369 * process must be added to one of the scheduling queues to decide where to
1370 * insert it. As a side-effect the process' priority may be updated.
1372 int time_left
= (rp
->p_ticks_left
> 0); /* quantum fully consumed */
1374 /* Check whether the process has time left. Otherwise give a new quantum
1375 * and lower the process' priority, unless the process already is in the
1378 if (! time_left
) { /* quantum consumed ? */
1379 rp
->p_ticks_left
= rp
->p_quantum_size
; /* give new quantum */
1380 if (rp
->p_priority
< (NR_SCHED_QUEUES
-1)) {
1381 rp
->p_priority
+= 1; /* lower priority */
1385 /* If there is time left, the process is added to the front of its queue,
1386 * so that it can immediately run. The queue to use simply is always the
1387 * process' current priority.
1389 *queue
= rp
->p_priority
;
1393 /*===========================================================================*
1395 *===========================================================================*/
1396 PRIVATE
struct proc
* pick_proc(void)
1398 /* Decide who to run now. A new process is selected an returned.
1399 * When a billable process is selected, record it in 'bill_ptr', so that the
1400 * clock task can tell who to bill for system time.
1402 register struct proc
*rp
; /* process to run */
1403 int q
; /* iterate over queues */
1405 /* Check each of the scheduling queues for ready processes. The number of
1406 * queues is defined in proc.h, and priorities are set in the task table.
1407 * The lowest queue contains IDLE, which is always ready.
1409 for (q
=0; q
< NR_SCHED_QUEUES
; q
++) {
1410 if(!(rp
= rdy_head
[q
])) {
1411 TRACE(VF_PICKPROC
, printf("queue %d empty\n", q
););
1414 TRACE(VF_PICKPROC
, printf("found %s / %d on queue %d\n",
1415 rp
->p_name
, rp
->p_endpoint
, q
););
1416 vmassert(!proc_is_runnable(rp
));
1417 if (priv(rp
)->s_flags
& BILLABLE
)
1418 bill_ptr
= rp
; /* bill for system time */
1424 /*===========================================================================*
1426 *===========================================================================*/
1427 #define Q_BALANCE_TICKS 100
1428 PUBLIC
void balance_queues(tp
)
1429 timer_t
*tp
; /* watchdog timer pointer */
1431 /* Check entire process table and give all process a higher priority. This
1432 * effectively means giving a new quantum. If a process already is at its
1433 * maximum priority, its quantum will be renewed.
1435 static timer_t queue_timer
; /* timer structure to use */
1436 register struct proc
* rp
; /* process table pointer */
1437 clock_t next_period
; /* time of next period */
1438 int ticks_added
= 0; /* total time added */
1440 vmassert(!intr_disabled());
1443 for (rp
=BEG_PROC_ADDR
; rp
<END_PROC_ADDR
; rp
++) {
1444 if (! isemptyp(rp
)) { /* check slot use */
1445 if (rp
->p_priority
> rp
->p_max_priority
) { /* update priority? */
1446 if (proc_is_runnable(rp
)) dequeue(rp
); /* take off queue */
1447 ticks_added
+= rp
->p_quantum_size
; /* do accounting */
1448 rp
->p_priority
-= 1; /* raise priority */
1449 if (proc_is_runnable(rp
)) enqueue(rp
); /* put on queue */
1452 ticks_added
+= rp
->p_quantum_size
- rp
->p_ticks_left
;
1453 rp
->p_ticks_left
= rp
->p_quantum_size
; /* give new quantum */
1459 /* Now schedule a new watchdog timer to balance the queues again. The
1460 * period depends on the total amount of quantum ticks added.
1462 next_period
= MAX(Q_BALANCE_TICKS
, ticks_added
); /* calculate next */
1463 set_timer(&queue_timer
, get_uptime() + next_period
, balance_queues
);
1466 /*===========================================================================*
1468 *===========================================================================*/
1469 PUBLIC
int lock_send(dst_e
, m_ptr
)
1470 int dst_e
; /* to whom is message being sent? */
1471 message
*m_ptr
; /* pointer to message buffer */
1473 /* Safe gateway to mini_send() for tasks. */
1476 result
= mini_send(proc_ptr
, dst_e
, m_ptr
, 0);
1481 /*===========================================================================*
1483 *===========================================================================*/
1484 PUBLIC
struct proc
*endpoint_lookup(endpoint_t e
)
1488 if(!isokendpt(e
, &n
)) return NULL
;
1490 return proc_addr(n
);
1493 /*===========================================================================*
1495 *===========================================================================*/
1496 #if DEBUG_ENABLE_IPC_WARNINGS
1497 PUBLIC
int isokendpt_f(file
, line
, e
, p
, fatalflag
)
1501 PUBLIC
int isokendpt_f(e
, p
, fatalflag
)
1507 /* Convert an endpoint number into a process number.
1508 * Return nonzero if the process is alive with the corresponding
1509 * generation number, zero otherwise.
1511 * This function is called with file and line number by the
1512 * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
1513 * otherwise without. This allows us to print the where the
1514 * conversion was attempted, making the errors verbose without
1515 * adding code for that at every call.
1517 * If fatalflag is nonzero, we must panic if the conversion doesn't
1520 *p
= _ENDPOINT_P(e
);
1521 if(!isokprocn(*p
)) {
1522 #if DEBUG_ENABLE_IPC_WARNINGS
1523 kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
1526 } else if(isemptyn(*p
)) {
1528 kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file
, line
, e
, *p
);
1530 } else if(proc_addr(*p
)->p_endpoint
!= e
) {
1531 #if DEBUG_ENABLE_IPC_WARNINGS
1532 kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file
, line
,
1533 e
, *p
, proc_addr(*p
)->p_endpoint
,
1534 _ENDPOINT_G(e
), _ENDPOINT_G(proc_addr(*p
)->p_endpoint
));
1537 if(!ok
&& fatalflag
) {
1538 minix_panic("invalid endpoint ", e
);