connect OSS to the build (clean and install only)
[minix.git] / kernel / proc.c
blob5ef4555e9fde898eee72f092b58f9bf542ae85ee
1 /* This file contains essentially all of the process and message handling.
2 * Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
3 * There is one entry point from the outside:
5 * sys_call: a system call, i.e., the kernel is trapped with an INT
7 * As well as several entry points used from the interrupt and task level:
9 * lock_send: send a message to a process
11 * Changes:
12 * Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
13 * Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
14 * May 26, 2005 rewrote message passing functions (Jorrit N. Herder)
15 * May 24, 2005 new notification system call (Jorrit N. Herder)
16 * Oct 28, 2004 nonblocking send and receive calls (Jorrit N. Herder)
18 * The code here is critical to make everything work and is important for the
19 * overall performance of the system. A large fraction of the code deals with
20 * list manipulation. To make this both easy to understand and fast to execute
21 * pointer pointers are used throughout the code. Pointer pointers prevent
22 * exceptions for the head or tail of a linked list.
24 * node_t *queue, *new_node; // assume these as global variables
25 * node_t **xpp = &queue; // get pointer pointer to head of queue
26 * while (*xpp != NULL) // find last pointer of the linked list
27 * xpp = &(*xpp)->next; // get pointer to next pointer
28 * *xpp = new_node; // now replace the end (the NULL pointer)
29 * new_node->next = NULL; // and mark the new end of the list
31 * For example, when adding a new node to the end of the list, one normally
32 * makes an exception for an empty list and looks up the end of the list for
33 * nonempty lists. As shown above, this is not required with pointer pointers.
36 #include <minix/com.h>
37 #include <minix/callnr.h>
38 #include <minix/endpoint.h>
39 #include <stddef.h>
40 #include <signal.h>
41 #include <minix/portio.h>
42 #include <minix/u64.h>
44 #include "debug.h"
45 #include "kernel.h"
46 #include "proc.h"
47 #include "vm.h"
49 /* Scheduling and message passing functions. The functions are available to
50 * other parts of the kernel through lock_...(). The lock temporarily disables
51 * interrupts to prevent race conditions.
53 FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
54 message *m_ptr, int flags));
55 FORWARD _PROTOTYPE( int mini_receive, (struct proc *caller_ptr, int src,
56 message *m_ptr, int flags));
57 FORWARD _PROTOTYPE( int mini_senda, (struct proc *caller_ptr,
58 asynmsg_t *table, size_t size));
59 FORWARD _PROTOTYPE( int deadlock, (int function,
60 register struct proc *caller, int src_dst));
61 FORWARD _PROTOTYPE( int try_async, (struct proc *caller_ptr));
62 FORWARD _PROTOTYPE( int try_one, (struct proc *src_ptr, struct proc *dst_ptr,
63 int *postponed));
64 FORWARD _PROTOTYPE( void sched, (struct proc *rp, int *queue, int *front));
65 FORWARD _PROTOTYPE( void pick_proc, (void));
67 #define PICK_ANY 1
68 #define PICK_HIGHERONLY 2
70 #define BuildNotifyMessage(m_ptr, src, dst_ptr) \
71 (m_ptr)->m_type = NOTIFY_FROM(src); \
72 (m_ptr)->NOTIFY_TIMESTAMP = get_uptime(); \
73 switch (src) { \
74 case HARDWARE: \
75 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_int_pending; \
76 priv(dst_ptr)->s_int_pending = 0; \
77 break; \
78 case SYSTEM: \
79 (m_ptr)->NOTIFY_ARG = priv(dst_ptr)->s_sig_pending; \
80 priv(dst_ptr)->s_sig_pending = 0; \
81 break; \
84 /*===========================================================================*
85 * QueueMess *
86 *===========================================================================*/
87 PRIVATE int QueueMess(endpoint_t ep, vir_bytes msg_lin, struct proc *dst)
89 int k;
90 phys_bytes addr;
91 NOREC_ENTER(queuemess);
92 /* Queue a message from the src process (in memory) to the dst
93 * process (using dst process table entry). Do actual copy to
94 * kernel here; it's an error if the copy fails into kernel.
96 vmassert(!(dst->p_misc_flags & MF_DELIVERMSG));
97 vmassert(dst->p_delivermsg_lin);
98 vmassert(isokendpt(ep, &k));
100 #if 0
101 if(INMEMORY(dst)) {
102 PHYS_COPY_CATCH(msg_lin, dst->p_delivermsg_lin,
103 sizeof(message), addr);
104 if(!addr) {
105 PHYS_COPY_CATCH(vir2phys(&ep), dst->p_delivermsg_lin,
106 sizeof(ep), addr);
107 if(!addr) {
108 NOREC_RETURN(queuemess, OK);
112 #endif
114 PHYS_COPY_CATCH(msg_lin, vir2phys(&dst->p_delivermsg), sizeof(message), addr);
115 if(addr) {
116 NOREC_RETURN(queuemess, EFAULT);
119 dst->p_delivermsg.m_source = ep;
120 dst->p_misc_flags |= MF_DELIVERMSG;
122 NOREC_RETURN(queuemess, OK);
125 /*===========================================================================*
126 * schedcheck *
127 *===========================================================================*/
128 PUBLIC void schedcheck(void)
130 /* This function is called an instant before proc_ptr is
131 * to be scheduled again.
133 NOREC_ENTER(schedch);
134 vmassert(intr_disabled());
135 if(next_ptr) {
136 proc_ptr = next_ptr;
137 next_ptr = NULL;
139 vmassert(proc_ptr);
140 vmassert(!proc_ptr->p_rts_flags);
141 while (proc_ptr->p_misc_flags &
142 (MF_DELIVERMSG | MF_SC_DEFER | MF_SC_TRACE | MF_SC_ACTIVE)) {
144 vmassert(!next_ptr);
145 vmassert(!proc_ptr->p_rts_flags);
146 if (proc_ptr->p_misc_flags & MF_DELIVERMSG) {
147 TRACE(VF_SCHEDULING, printf("delivering to %s / %d\n",
148 proc_ptr->p_name, proc_ptr->p_endpoint););
149 if(delivermsg(proc_ptr) == VMSUSPEND) {
150 vmassert(next_ptr);
151 TRACE(VF_SCHEDULING,
152 printf("suspending %s / %d\n",
153 proc_ptr->p_name,
154 proc_ptr->p_endpoint););
155 vmassert(proc_ptr->p_rts_flags);
156 vmassert(next_ptr != proc_ptr);
159 else if (proc_ptr->p_misc_flags & MF_SC_DEFER) {
160 /* Perform the system call that we deferred earlier. */
162 #if DEBUG_SCHED_CHECK
163 if (proc_ptr->p_misc_flags & MF_SC_ACTIVE)
164 minix_panic("MF_SC_ACTIVE and MF_SC_DEFER set",
165 NO_NUM);
166 #endif
168 arch_do_syscall(proc_ptr);
170 /* If the process is stopped for signal delivery, and
171 * not blocked sending a message after the system call,
172 * inform PM.
174 if ((proc_ptr->p_misc_flags & MF_SIG_DELAY) &&
175 !RTS_ISSET(proc_ptr, SENDING))
176 sig_delay_done(proc_ptr);
178 else if (proc_ptr->p_misc_flags & MF_SC_TRACE) {
179 /* Trigger a system call leave event if this was a
180 * system call. We must do this after processing the
181 * other flags above, both for tracing correctness and
182 * to be able to use 'break'.
184 if (!(proc_ptr->p_misc_flags & MF_SC_ACTIVE))
185 break;
187 proc_ptr->p_misc_flags &=
188 ~(MF_SC_TRACE | MF_SC_ACTIVE);
190 /* Signal the "leave system call" event.
191 * Block the process.
193 cause_sig(proc_nr(proc_ptr), SIGTRAP);
195 else if (proc_ptr->p_misc_flags & MF_SC_ACTIVE) {
196 /* If MF_SC_ACTIVE was set, remove it now:
197 * we're leaving the system call.
199 proc_ptr->p_misc_flags &= ~MF_SC_ACTIVE;
201 break;
204 /* If proc_ptr is now descheduled,
205 * continue with another process.
207 if (next_ptr) {
208 proc_ptr = next_ptr;
209 next_ptr = NULL;
212 TRACE(VF_SCHEDULING, printf("starting %s / %d\n",
213 proc_ptr->p_name, proc_ptr->p_endpoint););
214 #if DEBUG_TRACE
215 proc_ptr->p_schedules++;
216 #endif
217 NOREC_RETURN(schedch, );
220 /*===========================================================================*
221 * sys_call *
222 *===========================================================================*/
223 PUBLIC int sys_call(call_nr, src_dst_e, m_ptr, bit_map)
224 int call_nr; /* system call number and flags */
225 int src_dst_e; /* src to receive from or dst to send to */
226 message *m_ptr; /* pointer to message in the caller's space */
227 long bit_map; /* notification event set or flags */
229 /* System calls are done by trapping to the kernel with an INT instruction.
230 * The trap is caught and sys_call() is called to send or receive a message
231 * (or both). The caller is always given by 'proc_ptr'.
233 register struct proc *caller_ptr = proc_ptr; /* get pointer to caller */
234 int mask_entry; /* bit to check in send mask */
235 int group_size; /* used for deadlock check */
236 int result; /* the system call's result */
237 int src_dst_p; /* Process slot number */
238 size_t msg_size;
240 /* If this process is subject to system call tracing, handle that first. */
241 if (caller_ptr->p_misc_flags & (MF_SC_TRACE | MF_SC_DEFER)) {
242 /* Are we tracing this process, and is it the first sys_call entry? */
243 if ((caller_ptr->p_misc_flags & (MF_SC_TRACE | MF_SC_DEFER)) ==
244 MF_SC_TRACE) {
245 /* We must notify the tracer before processing the actual
246 * system call. If we don't, the tracer could not obtain the
247 * input message. Postpone the entire system call.
249 caller_ptr->p_misc_flags &= ~MF_SC_TRACE;
250 caller_ptr->p_misc_flags |= MF_SC_DEFER;
252 /* Signal the "enter system call" event. Block the process. */
253 cause_sig(proc_nr(caller_ptr), SIGTRAP);
255 /* Preserve the return register's value. */
256 return caller_ptr->p_reg.retreg;
259 /* If the MF_SC_DEFER flag is set, the syscall is now being resumed. */
260 caller_ptr->p_misc_flags &= ~MF_SC_DEFER;
262 #if DEBUG_SCHED_CHECK
263 if (caller_ptr->p_misc_flags & MF_SC_ACTIVE)
264 minix_panic("MF_SC_ACTIVE already set", NO_NUM);
265 #endif
267 /* Set a flag to allow reliable tracing of leaving the system call. */
268 caller_ptr->p_misc_flags |= MF_SC_ACTIVE;
271 #if DEBUG_SCHED_CHECK
272 if(caller_ptr->p_misc_flags & MF_DELIVERMSG) {
273 kprintf("sys_call: MF_DELIVERMSG on for %s / %d\n",
274 caller_ptr->p_name, caller_ptr->p_endpoint);
275 minix_panic("MF_DELIVERMSG on", NO_NUM);
277 #endif
279 #if 0
280 if(src_dst_e != 4 && src_dst_e != 5 &&
281 caller_ptr->p_endpoint != 4 && caller_ptr->p_endpoint != 5) {
282 if(call_nr == SEND)
283 kprintf("(%d SEND to %d) ", caller_ptr->p_endpoint, src_dst_e);
284 else if(call_nr == RECEIVE)
285 kprintf("(%d RECEIVE from %d) ", caller_ptr->p_endpoint, src_dst_e);
286 else if(call_nr == SENDREC)
287 kprintf("(%d SENDREC to %d) ", caller_ptr->p_endpoint, src_dst_e);
288 else
289 kprintf("(%d %d to/from %d) ", caller_ptr->p_endpoint, call_nr, src_dst_e);
291 #endif
293 #if DEBUG_SCHED_CHECK
294 if (RTS_ISSET(caller_ptr, SLOT_FREE))
296 kprintf("called by the dead?!?\n");
297 return EINVAL;
299 #endif
301 /* Check destination. SENDA is special because its argument is a table and
302 * not a single destination. RECEIVE is the only call that accepts ANY (in
303 * addition to a real endpoint). The other calls (SEND, SENDREC,
304 * and NOTIFY) require an endpoint to corresponds to a process. In addition,
305 * it is necessary to check whether a process is allowed to send to a given
306 * destination.
308 if (call_nr == SENDA)
310 /* No destination argument */
312 else if (src_dst_e == ANY)
314 if (call_nr != RECEIVE)
316 #if 0
317 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
318 call_nr, proc_nr(caller_ptr), src_dst_e);
319 #endif
320 return EINVAL;
322 src_dst_p = src_dst_e;
324 else
326 /* Require a valid source and/or destination process. */
327 if(!isokendpt(src_dst_e, &src_dst_p)) {
328 #if 0
329 kprintf("sys_call: trap %d by %d with bad endpoint %d\n",
330 call_nr, proc_nr(caller_ptr), src_dst_e);
331 #endif
332 return EDEADSRCDST;
335 /* If the call is to send to a process, i.e., for SEND, SENDNB,
336 * SENDREC or NOTIFY, verify that the caller is allowed to send to
337 * the given destination.
339 if (call_nr != RECEIVE)
341 if (!may_send_to(caller_ptr, src_dst_p)) {
342 #if DEBUG_ENABLE_IPC_WARNINGS
343 kprintf(
344 "sys_call: ipc mask denied trap %d from %d to %d\n",
345 call_nr, caller_ptr->p_endpoint, src_dst_e);
346 #endif
347 return(ECALLDENIED); /* call denied by ipc mask */
352 /* Only allow non-negative call_nr values less than 32 */
353 if (call_nr < 0 || call_nr >= 32)
355 #if DEBUG_ENABLE_IPC_WARNINGS
356 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
357 call_nr, proc_nr(caller_ptr), src_dst_p);
358 #endif
359 return(ETRAPDENIED); /* trap denied by mask or kernel */
362 /* Check if the process has privileges for the requested call. Calls to the
363 * kernel may only be SENDREC, because tasks always reply and may not block
364 * if the caller doesn't do receive().
366 if (!(priv(caller_ptr)->s_trap_mask & (1 << call_nr))) {
367 #if DEBUG_ENABLE_IPC_WARNINGS
368 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
369 call_nr, proc_nr(caller_ptr), src_dst_p);
370 #endif
371 return(ETRAPDENIED); /* trap denied by mask or kernel */
374 if ((iskerneln(src_dst_p) && call_nr != SENDREC && call_nr != RECEIVE)) {
375 #if DEBUG_ENABLE_IPC_WARNINGS
376 kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
377 call_nr, proc_nr(caller_ptr), src_dst_e);
378 #endif
379 return(ETRAPDENIED); /* trap denied by mask or kernel */
382 /* Get and check the size of the argument in bytes.
383 * Normally this is just the size of a regular message, but in the
384 * case of SENDA the argument is a table.
386 if(call_nr == SENDA) {
387 msg_size = (size_t) src_dst_e;
389 /* Limit size to something reasonable. An arbitrary choice is 16
390 * times the number of process table entries.
392 if (msg_size > 16*(NR_TASKS + NR_PROCS))
393 return EDOM;
394 msg_size *= sizeof(asynmsg_t); /* convert to bytes */
395 } else {
396 msg_size = sizeof(*m_ptr);
399 /* Check for a possible deadlock for blocking SEND(REC) and RECEIVE. */
400 if (call_nr == SEND || call_nr == SENDREC || call_nr == RECEIVE) {
401 if (group_size = deadlock(call_nr, caller_ptr, src_dst_p)) {
402 #if 0
403 kprintf("sys_call: trap %d from %d to %d deadlocked, group size %d\n",
404 call_nr, proc_nr(caller_ptr), src_dst_p, group_size);
405 #endif
406 return(ELOCKED);
410 /* Now check if the call is known and try to perform the request. The only
411 * system calls that exist in MINIX are sending and receiving messages.
412 * - SENDREC: combines SEND and RECEIVE in a single system call
413 * - SEND: sender blocks until its message has been delivered
414 * - RECEIVE: receiver blocks until an acceptable message has arrived
415 * - NOTIFY: asynchronous call; deliver notification or mark pending
416 * - SENDA: list of asynchronous send requests
418 switch(call_nr) {
419 case SENDREC:
420 /* A flag is set so that notifications cannot interrupt SENDREC. */
421 caller_ptr->p_misc_flags |= MF_REPLY_PEND;
422 /* fall through */
423 case SEND:
424 result = mini_send(caller_ptr, src_dst_e, m_ptr, 0);
425 if (call_nr == SEND || result != OK)
426 break; /* done, or SEND failed */
427 /* fall through for SENDREC */
428 case RECEIVE:
429 if (call_nr == RECEIVE)
430 caller_ptr->p_misc_flags &= ~MF_REPLY_PEND;
431 result = mini_receive(caller_ptr, src_dst_e, m_ptr, 0);
432 break;
433 case NOTIFY:
434 result = mini_notify(caller_ptr, src_dst_e);
435 break;
436 case SENDNB:
437 result = mini_send(caller_ptr, src_dst_e, m_ptr, NON_BLOCKING);
438 break;
439 case SENDA:
440 result = mini_senda(caller_ptr, (asynmsg_t *)m_ptr, (size_t)src_dst_e);
441 break;
442 default:
443 result = EBADCALL; /* illegal system call */
446 /* Now, return the result of the system call to the caller. */
447 return(result);
450 /*===========================================================================*
451 * deadlock *
452 *===========================================================================*/
453 PRIVATE int deadlock(function, cp, src_dst)
454 int function; /* trap number */
455 register struct proc *cp; /* pointer to caller */
456 int src_dst; /* src or dst process */
458 /* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
459 * a cyclic dependency of blocking send and receive calls. The only cyclic
460 * depency that is not fatal is if the caller and target directly SEND(REC)
461 * and RECEIVE to each other. If a deadlock is found, the group size is
462 * returned. Otherwise zero is returned.
464 register struct proc *xp; /* process pointer */
465 int group_size = 1; /* start with only caller */
466 int trap_flags;
467 #if DEBUG_ENABLE_IPC_WARNINGS
468 static struct proc *processes[NR_PROCS + NR_TASKS];
469 processes[0] = cp;
470 #endif
472 while (src_dst != ANY) { /* check while process nr */
473 int src_dst_e;
474 xp = proc_addr(src_dst); /* follow chain of processes */
475 #if DEBUG_ENABLE_IPC_WARNINGS
476 processes[group_size] = xp;
477 #endif
478 group_size ++; /* extra process in group */
480 /* Check whether the last process in the chain has a dependency. If it
481 * has not, the cycle cannot be closed and we are done.
483 if (RTS_ISSET(xp, RECEIVING)) { /* xp has dependency */
484 if(xp->p_getfrom_e == ANY) src_dst = ANY;
485 else okendpt(xp->p_getfrom_e, &src_dst);
486 } else if (RTS_ISSET(xp, SENDING)) { /* xp has dependency */
487 okendpt(xp->p_sendto_e, &src_dst);
488 } else {
489 return(0); /* not a deadlock */
492 /* Now check if there is a cyclic dependency. For group sizes of two,
493 * a combination of SEND(REC) and RECEIVE is not fatal. Larger groups
494 * or other combinations indicate a deadlock.
496 if (src_dst == proc_nr(cp)) { /* possible deadlock */
497 if (group_size == 2) { /* caller and src_dst */
498 /* The function number is magically converted to flags. */
499 if ((xp->p_rts_flags ^ (function << 2)) & SENDING) {
500 return(0); /* not a deadlock */
503 #if DEBUG_ENABLE_IPC_WARNINGS
505 int i;
506 kprintf("deadlock between these processes:\n");
507 for(i = 0; i < group_size; i++) {
508 kprintf(" %10s ", processes[i]->p_name);
509 proc_stacktrace(processes[i]);
512 #endif
513 return(group_size); /* deadlock found */
516 return(0); /* not a deadlock */
519 /*===========================================================================*
520 * mini_send *
521 *===========================================================================*/
522 PRIVATE int mini_send(caller_ptr, dst_e, m_ptr, flags)
523 register struct proc *caller_ptr; /* who is trying to send a message? */
524 int dst_e; /* to whom is message being sent? */
525 message *m_ptr; /* pointer to message buffer */
526 int flags;
528 /* Send a message from 'caller_ptr' to 'dst'. If 'dst' is blocked waiting
529 * for this message, copy the message to it and unblock 'dst'. If 'dst' is
530 * not waiting at all, or is waiting for another source, queue 'caller_ptr'.
532 register struct proc *dst_ptr;
533 register struct proc **xpp;
534 int dst_p;
535 phys_bytes linaddr;
536 vir_bytes addr;
537 int r;
539 if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) m_ptr,
540 sizeof(message)))) {
541 return EFAULT;
543 dst_p = _ENDPOINT_P(dst_e);
544 dst_ptr = proc_addr(dst_p);
546 if (RTS_ISSET(dst_ptr, NO_ENDPOINT))
548 return EDSTDIED;
551 /* Check if 'dst' is blocked waiting for this message. The destination's
552 * SENDING flag may be set when its SENDREC call blocked while sending.
554 if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint)) {
555 /* Destination is indeed waiting for this message. */
556 vmassert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
557 if((r=QueueMess(caller_ptr->p_endpoint, linaddr, dst_ptr)) != OK)
558 return r;
559 RTS_UNSET(dst_ptr, RECEIVING);
560 } else {
561 if(flags & NON_BLOCKING) {
562 return(ENOTREADY);
565 /* Destination is not waiting. Block and dequeue caller. */
566 PHYS_COPY_CATCH(linaddr, vir2phys(&caller_ptr->p_sendmsg),
567 sizeof(message), addr);
569 if(addr) { return EFAULT; }
570 RTS_SET(caller_ptr, SENDING);
571 caller_ptr->p_sendto_e = dst_e;
573 /* Process is now blocked. Put in on the destination's queue. */
574 xpp = &dst_ptr->p_caller_q; /* find end of list */
575 while (*xpp != NIL_PROC) xpp = &(*xpp)->p_q_link;
576 *xpp = caller_ptr; /* add caller to end */
577 caller_ptr->p_q_link = NIL_PROC; /* mark new end of list */
579 return(OK);
582 /*===========================================================================*
583 * mini_receive *
584 *===========================================================================*/
585 PRIVATE int mini_receive(caller_ptr, src_e, m_ptr, flags)
586 register struct proc *caller_ptr; /* process trying to get message */
587 int src_e; /* which message source is wanted */
588 message *m_ptr; /* pointer to message buffer */
589 int flags;
591 /* A process or task wants to get a message. If a message is already queued,
592 * acquire it and deblock the sender. If no message from the desired source
593 * is available block the caller.
595 register struct proc **xpp;
596 register struct notification **ntf_q_pp;
597 message m;
598 int bit_nr;
599 sys_map_t *map;
600 bitchunk_t *chunk;
601 int i, r, src_id, src_proc_nr, src_p;
602 phys_bytes linaddr;
604 vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
606 if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) m_ptr,
607 sizeof(message)))) {
608 return EFAULT;
611 /* This is where we want our message. */
612 caller_ptr->p_delivermsg_lin = linaddr;
613 caller_ptr->p_delivermsg_vir = (vir_bytes) m_ptr;
615 if(src_e == ANY) src_p = ANY;
616 else
618 okendpt(src_e, &src_p);
619 if (RTS_ISSET(proc_addr(src_p), NO_ENDPOINT))
621 return ESRCDIED;
626 /* Check to see if a message from desired source is already available.
627 * The caller's SENDING flag may be set if SENDREC couldn't send. If it is
628 * set, the process should be blocked.
630 if (!RTS_ISSET(caller_ptr, SENDING)) {
632 /* Check if there are pending notifications, except for SENDREC. */
633 if (! (caller_ptr->p_misc_flags & MF_REPLY_PEND)) {
635 map = &priv(caller_ptr)->s_notify_pending;
636 for (chunk=&map->chunk[0]; chunk<&map->chunk[NR_SYS_CHUNKS]; chunk++) {
637 endpoint_t hisep;
639 /* Find a pending notification from the requested source. */
640 if (! *chunk) continue; /* no bits in chunk */
641 for (i=0; ! (*chunk & (1<<i)); ++i) {} /* look up the bit */
642 src_id = (chunk - &map->chunk[0]) * BITCHUNK_BITS + i;
643 if (src_id >= NR_SYS_PROCS) break; /* out of range */
644 src_proc_nr = id_to_nr(src_id); /* get source proc */
645 #if DEBUG_ENABLE_IPC_WARNINGS
646 if(src_proc_nr == NONE) {
647 kprintf("mini_receive: sending notify from NONE\n");
649 #endif
650 if (src_e!=ANY && src_p != src_proc_nr) continue;/* source not ok */
651 *chunk &= ~(1 << i); /* no longer pending */
653 /* Found a suitable source, deliver the notification message. */
654 BuildNotifyMessage(&m, src_proc_nr, caller_ptr); /* assemble message */
655 hisep = proc_addr(src_proc_nr)->p_endpoint;
656 vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
657 vmassert(src_e == ANY || hisep == src_e);
658 if((r=QueueMess(hisep, vir2phys(&m), caller_ptr)) != OK) {
659 minix_panic("mini_receive: local QueueMess failed", NO_NUM);
661 return(OK); /* report success */
665 /* Check caller queue. Use pointer pointers to keep code simple. */
666 xpp = &caller_ptr->p_caller_q;
667 while (*xpp != NIL_PROC) {
668 if (src_e == ANY || src_p == proc_nr(*xpp)) {
669 #if DEBUG_SCHED_CHECK
670 if (RTS_ISSET(*xpp, SLOT_FREE) || RTS_ISSET(*xpp, NO_ENDPOINT))
672 kprintf("%d: receive from %d; found dead %d (%s)?\n",
673 caller_ptr->p_endpoint, src_e, (*xpp)->p_endpoint,
674 (*xpp)->p_name);
675 return EINVAL;
677 #endif
679 /* Found acceptable message. Copy it and update status. */
680 vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
681 QueueMess((*xpp)->p_endpoint,
682 vir2phys(&(*xpp)->p_sendmsg), caller_ptr);
683 if ((*xpp)->p_misc_flags & MF_SIG_DELAY)
684 sig_delay_done(*xpp);
685 RTS_UNSET(*xpp, SENDING);
686 *xpp = (*xpp)->p_q_link; /* remove from queue */
687 return(OK); /* report success */
689 xpp = &(*xpp)->p_q_link; /* proceed to next */
692 if (caller_ptr->p_misc_flags & MF_ASYNMSG)
694 if (src_e != ANY)
695 r= try_one(proc_addr(src_p), caller_ptr, NULL);
696 else
697 r= try_async(caller_ptr);
699 if (r == OK)
700 return OK; /* Got a message */
704 /* No suitable message is available or the caller couldn't send in SENDREC.
705 * Block the process trying to receive, unless the flags tell otherwise.
707 if ( ! (flags & NON_BLOCKING)) {
708 caller_ptr->p_getfrom_e = src_e;
709 RTS_SET(caller_ptr, RECEIVING);
710 return(OK);
711 } else {
712 return(ENOTREADY);
716 /*===========================================================================*
717 * mini_notify *
718 *===========================================================================*/
719 PUBLIC int mini_notify(caller_ptr, dst_e)
720 register struct proc *caller_ptr; /* sender of the notification */
721 endpoint_t dst_e; /* which process to notify */
723 register struct proc *dst_ptr;
724 int src_id; /* source id for late delivery */
725 message m; /* the notification message */
726 int r;
727 int proc_nr;
728 int dst_p;
730 vmassert(intr_disabled());
732 if (!isokendpt(dst_e, &dst_p)) {
733 util_stacktrace();
734 kprintf("mini_notify: bogus endpoint %d\n", dst_e);
735 return EDEADSRCDST;
738 dst_ptr = proc_addr(dst_p);
740 /* Check to see if target is blocked waiting for this message. A process
741 * can be both sending and receiving during a SENDREC system call.
743 if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint) &&
744 ! (dst_ptr->p_misc_flags & MF_REPLY_PEND)) {
745 /* Destination is indeed waiting for a message. Assemble a notification
746 * message and deliver it. Copy from pseudo-source HARDWARE, since the
747 * message is in the kernel's address space.
749 BuildNotifyMessage(&m, proc_nr(caller_ptr), dst_ptr);
750 vmassert(!(dst_ptr->p_misc_flags & MF_DELIVERMSG));
751 if((r=QueueMess(caller_ptr->p_endpoint, vir2phys(&m), dst_ptr)) != OK) {
752 minix_panic("mini_notify: local QueueMess failed", NO_NUM);
754 RTS_UNSET(dst_ptr, RECEIVING);
755 return(OK);
758 /* Destination is not ready to receive the notification. Add it to the
759 * bit map with pending notifications. Note the indirectness: the system id
760 * instead of the process number is used in the pending bit map.
762 src_id = priv(caller_ptr)->s_id;
763 set_sys_bit(priv(dst_ptr)->s_notify_pending, src_id);
764 return(OK);
767 #define ASCOMPLAIN(caller, entry, field) \
768 kprintf("kernel:%s:%d: asyn failed for %s in %s " \
769 "(%d/%d, tab 0x%lx)\n",__FILE__,__LINE__, \
770 field, caller->p_name, entry, priv(caller)->s_asynsize, priv(caller)->s_asyntab)
772 #define A_RETRIEVE(entry, field) \
773 if(data_copy(caller_ptr->p_endpoint, \
774 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
775 SYSTEM, (vir_bytes) &tabent.field, \
776 sizeof(tabent.field)) != OK) {\
777 ASCOMPLAIN(caller_ptr, entry, #field); \
778 return EFAULT; \
781 #define A_INSERT(entry, field) \
782 if(data_copy(SYSTEM, (vir_bytes) &tabent.field, \
783 caller_ptr->p_endpoint, \
784 table_v + (entry)*sizeof(asynmsg_t) + offsetof(struct asynmsg,field),\
785 sizeof(tabent.field)) != OK) {\
786 ASCOMPLAIN(caller_ptr, entry, #field); \
787 return EFAULT; \
790 /*===========================================================================*
791 * mini_senda *
792 *===========================================================================*/
793 PRIVATE int mini_senda(caller_ptr, table, size)
794 struct proc *caller_ptr;
795 asynmsg_t *table;
796 size_t size;
798 int i, dst_p, done, do_notify, r;
799 unsigned flags;
800 struct proc *dst_ptr;
801 struct priv *privp;
802 message *m_ptr;
803 asynmsg_t tabent;
804 vir_bytes table_v = (vir_bytes) table;
805 vir_bytes linaddr;
807 privp= priv(caller_ptr);
808 if (!(privp->s_flags & SYS_PROC))
810 kprintf(
811 "mini_senda: warning caller has no privilege structure\n");
812 return EPERM;
815 /* Clear table */
816 privp->s_asyntab= -1;
817 privp->s_asynsize= 0;
819 if (size == 0)
821 /* Nothing to do, just return */
822 return OK;
825 if(!(linaddr = umap_local(caller_ptr, D, (vir_bytes) table,
826 size * sizeof(*table)))) {
827 printf("mini_senda: umap_local failed; 0x%lx len 0x%lx\n",
828 table, size * sizeof(*table));
829 return EFAULT;
832 /* Limit size to something reasonable. An arbitrary choice is 16
833 * times the number of process table entries.
835 * (this check has been duplicated in sys_call but is left here
836 * as a sanity check)
838 if (size > 16*(NR_TASKS + NR_PROCS))
840 return EDOM;
843 /* Scan the table */
844 do_notify= FALSE;
845 done= TRUE;
846 for (i= 0; i<size; i++)
849 /* Read status word */
850 A_RETRIEVE(i, flags);
851 flags= tabent.flags;
853 /* Skip empty entries */
854 if (flags == 0)
855 continue;
857 /* Check for reserved bits in the flags field */
858 if (flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY) ||
859 !(flags & AMF_VALID))
861 return EINVAL;
864 /* Skip entry if AMF_DONE is already set */
865 if (flags & AMF_DONE)
866 continue;
868 /* Get destination */
869 A_RETRIEVE(i, dst);
871 if (!isokendpt(tabent.dst, &dst_p))
873 /* Bad destination, report the error */
874 tabent.result= EDEADSRCDST;
875 A_INSERT(i, result);
876 tabent.flags= flags | AMF_DONE;
877 A_INSERT(i, flags);
879 if (flags & AMF_NOTIFY)
880 do_notify= 1;
881 continue;
884 if (!may_send_to(caller_ptr, dst_p))
886 /* Send denied by IPC mask */
887 tabent.result= ECALLDENIED;
888 A_INSERT(i, result);
889 tabent.flags= flags | AMF_DONE;
890 A_INSERT(i, flags);
892 if (flags & AMF_NOTIFY)
893 do_notify= 1;
894 continue;
897 #if 0
898 kprintf("mini_senda: entry[%d]: flags 0x%x dst %d/%d\n",
899 i, tabent.flags, tabent.dst, dst_p);
900 #endif
902 dst_ptr = proc_addr(dst_p);
904 /* NO_ENDPOINT should be removed */
905 if (dst_ptr->p_rts_flags & NO_ENDPOINT)
907 tabent.result= EDSTDIED;
908 A_INSERT(i, result);
909 tabent.flags= flags | AMF_DONE;
910 A_INSERT(i, flags);
912 if (flags & AMF_NOTIFY)
913 do_notify= TRUE;
914 continue;
917 /* Check if 'dst' is blocked waiting for this message.
918 * If AMF_NOREPLY is set, do not satisfy the receiving part of
919 * a SENDREC.
921 if (WILLRECEIVE(dst_ptr, caller_ptr->p_endpoint) &&
922 (!(flags & AMF_NOREPLY) ||
923 !(dst_ptr->p_misc_flags & MF_REPLY_PEND)))
925 /* Destination is indeed waiting for this message. */
926 m_ptr= &table[i].msg; /* Note: pointer in the
927 * caller's address space.
929 /* Copy message from sender. */
930 tabent.result= QueueMess(caller_ptr->p_endpoint,
931 linaddr + (vir_bytes) &table[i].msg -
932 (vir_bytes) table, dst_ptr);
933 if(tabent.result == OK)
934 RTS_UNSET(dst_ptr, RECEIVING);
936 A_INSERT(i, result);
937 tabent.flags= flags | AMF_DONE;
938 A_INSERT(i, flags);
940 if (flags & AMF_NOTIFY)
941 do_notify= 1;
942 continue;
944 else
946 /* Should inform receiver that something is pending */
947 dst_ptr->p_misc_flags |= MF_ASYNMSG;
948 done= FALSE;
949 continue;
952 if (do_notify)
953 kprintf("mini_senda: should notify caller\n");
954 if (!done)
956 privp->s_asyntab= (vir_bytes)table;
957 privp->s_asynsize= size;
959 return OK;
963 /*===========================================================================*
964 * try_async *
965 *===========================================================================*/
966 PRIVATE int try_async(caller_ptr)
967 struct proc *caller_ptr;
969 int r;
970 struct priv *privp;
971 struct proc *src_ptr;
972 int postponed = FALSE;
974 /* Try all privilege structures */
975 for (privp = BEG_PRIV_ADDR; privp < END_PRIV_ADDR; ++privp)
977 if (privp->s_proc_nr == NONE)
978 continue;
980 src_ptr= proc_addr(privp->s_proc_nr);
982 vmassert(!(caller_ptr->p_misc_flags & MF_DELIVERMSG));
983 r= try_one(src_ptr, caller_ptr, &postponed);
984 if (r == OK)
985 return r;
988 /* Nothing found, clear MF_ASYNMSG unless messages were postponed */
989 if (postponed == FALSE)
990 caller_ptr->p_misc_flags &= ~MF_ASYNMSG;
992 return ESRCH;
996 /*===========================================================================*
997 * try_one *
998 *===========================================================================*/
999 PRIVATE int try_one(src_ptr, dst_ptr, postponed)
1000 struct proc *src_ptr;
1001 struct proc *dst_ptr;
1002 int *postponed;
1004 int i, do_notify, done;
1005 unsigned flags;
1006 size_t size;
1007 endpoint_t dst_e;
1008 asynmsg_t *table_ptr;
1009 message *m_ptr;
1010 struct priv *privp;
1011 asynmsg_t tabent;
1012 vir_bytes table_v;
1013 struct proc *caller_ptr;
1014 int r;
1016 privp= priv(src_ptr);
1018 /* Basic validity checks */
1019 if (privp->s_id == USER_PRIV_ID) return EAGAIN;
1020 if (privp->s_asynsize == 0) return EAGAIN;
1021 if (!may_send_to(src_ptr, proc_nr(dst_ptr))) return EAGAIN;
1023 size= privp->s_asynsize;
1024 table_v = privp->s_asyntab;
1025 caller_ptr = src_ptr;
1027 dst_e= dst_ptr->p_endpoint;
1029 /* Scan the table */
1030 do_notify= FALSE;
1031 done= TRUE;
1032 for (i= 0; i<size; i++)
1034 /* Read status word */
1035 A_RETRIEVE(i, flags);
1036 flags= tabent.flags;
1038 /* Skip empty entries */
1039 if (flags == 0)
1041 continue;
1044 /* Check for reserved bits in the flags field */
1045 if (flags & ~(AMF_VALID|AMF_DONE|AMF_NOTIFY|AMF_NOREPLY) ||
1046 !(flags & AMF_VALID))
1048 kprintf("try_one: bad bits in table\n");
1049 privp->s_asynsize= 0;
1050 return EINVAL;
1053 /* Skip entry is AMF_DONE is already set */
1054 if (flags & AMF_DONE)
1056 continue;
1059 /* Clear done. We are done when all entries are either empty
1060 * or done at the start of the call.
1062 done= FALSE;
1064 /* Get destination */
1065 A_RETRIEVE(i, dst);
1067 if (tabent.dst != dst_e)
1069 continue;
1072 /* If AMF_NOREPLY is set, do not satisfy the receiving part of
1073 * a SENDREC. Do not unset MF_ASYNMSG later because of this,
1074 * though: this message is still to be delivered later.
1076 if ((flags & AMF_NOREPLY) &&
1077 (dst_ptr->p_misc_flags & MF_REPLY_PEND))
1079 if (postponed != NULL)
1080 *postponed = TRUE;
1082 continue;
1085 /* Deliver message */
1086 table_ptr= (asynmsg_t *)privp->s_asyntab;
1087 m_ptr= &table_ptr[i].msg; /* Note: pointer in the
1088 * caller's address space.
1090 A_RETRIEVE(i, msg);
1091 r = QueueMess(src_ptr->p_endpoint, vir2phys(&tabent.msg),
1092 dst_ptr);
1094 tabent.result= r;
1095 A_INSERT(i, result);
1096 tabent.flags= flags | AMF_DONE;
1097 A_INSERT(i, flags);
1099 if (flags & AMF_NOTIFY)
1101 kprintf("try_one: should notify caller\n");
1103 return OK;
1105 if (done)
1106 privp->s_asynsize= 0;
1107 return EAGAIN;
1110 /*===========================================================================*
1111 * lock_notify *
1112 *===========================================================================*/
1113 PUBLIC int lock_notify(src_e, dst_e)
1114 int src_e; /* (endpoint) sender of the notification */
1115 int dst_e; /* (endpoint) who is to be notified */
1117 /* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
1118 * is explicitely given to prevent confusion where the call comes from. MINIX
1119 * kernel is not reentrant, which means to interrupts are disabled after
1120 * the first kernel entry (hardware interrupt, trap, or exception). Locking
1121 * is done by temporarily disabling interrupts.
1123 int result, src_p;
1125 vmassert(!intr_disabled());
1127 if (!isokendpt(src_e, &src_p)) {
1128 kprintf("lock_notify: bogus src: %d\n", src_e);
1129 return EDEADSRCDST;
1132 lock;
1133 vmassert(intr_disabled());
1134 result = mini_notify(proc_addr(src_p), dst_e);
1135 vmassert(intr_disabled());
1136 unlock;
1137 vmassert(!intr_disabled());
1139 return(result);
1142 /*===========================================================================*
1143 * enqueue *
1144 *===========================================================================*/
1145 PUBLIC void enqueue(rp)
1146 register struct proc *rp; /* this process is now runnable */
1148 /* Add 'rp' to one of the queues of runnable processes. This function is
1149 * responsible for inserting a process into one of the scheduling queues.
1150 * The mechanism is implemented here. The actual scheduling policy is
1151 * defined in sched() and pick_proc().
1153 int q; /* scheduling queue to use */
1154 int front; /* add to front or back */
1156 NOREC_ENTER(enqueuefunc);
1158 #if DEBUG_SCHED_CHECK
1159 if(!intr_disabled()) { minix_panic("enqueue with interrupts enabled", NO_NUM); }
1160 if (rp->p_ready) minix_panic("enqueue already ready process", NO_NUM);
1161 #endif
1163 /* Determine where to insert to process. */
1164 sched(rp, &q, &front);
1166 vmassert(q >= 0);
1167 vmassert(q < IDLE_Q || rp->p_endpoint == IDLE);
1169 /* Now add the process to the queue. */
1170 if (rdy_head[q] == NIL_PROC) { /* add to empty queue */
1171 rdy_head[q] = rdy_tail[q] = rp; /* create a new queue */
1172 rp->p_nextready = NIL_PROC; /* mark new end */
1174 else if (front) { /* add to head of queue */
1175 rp->p_nextready = rdy_head[q]; /* chain head of queue */
1176 rdy_head[q] = rp; /* set new queue head */
1178 else { /* add to tail of queue */
1179 rdy_tail[q]->p_nextready = rp; /* chain tail of queue */
1180 rdy_tail[q] = rp; /* set new queue tail */
1181 rp->p_nextready = NIL_PROC; /* mark new end */
1184 #if DEBUG_SCHED_CHECK
1185 rp->p_ready = 1;
1186 CHECK_RUNQUEUES;
1187 #endif
1189 /* Now select the next process to run, if there isn't a current
1190 * process yet or current process isn't ready any more, or
1191 * it's PREEMPTIBLE.
1193 vmassert(proc_ptr);
1194 if((proc_ptr->p_priority > rp->p_priority) &&
1195 (priv(proc_ptr)->s_flags & PREEMPTIBLE))
1196 pick_proc();
1198 #if DEBUG_SCHED_CHECK
1199 CHECK_RUNQUEUES;
1200 #endif
1202 NOREC_RETURN(enqueuefunc, );
1205 /*===========================================================================*
1206 * dequeue *
1207 *===========================================================================*/
1208 PUBLIC void dequeue(rp)
1209 register struct proc *rp; /* this process is no longer runnable */
1211 /* A process must be removed from the scheduling queues, for example, because
1212 * it has blocked. If the currently active process is removed, a new process
1213 * is picked to run by calling pick_proc().
1215 register int q = rp->p_priority; /* queue to use */
1216 register struct proc **xpp; /* iterate over queue */
1217 register struct proc *prev_xp;
1219 NOREC_ENTER(dequeuefunc);
1221 #if DEBUG_STACK_CHECK
1222 /* Side-effect for kernel: check if the task's stack still is ok? */
1223 if (iskernelp(rp)) {
1224 if (*priv(rp)->s_stack_guard != STACK_GUARD)
1225 minix_panic("stack overrun by task", proc_nr(rp));
1227 #endif
1229 #if DEBUG_SCHED_CHECK
1230 if(!intr_disabled()) { minix_panic("dequeue with interrupts enabled", NO_NUM); }
1231 if (! rp->p_ready) minix_panic("dequeue() already unready process", NO_NUM);
1232 #endif
1234 /* Now make sure that the process is not in its ready queue. Remove the
1235 * process if it is found. A process can be made unready even if it is not
1236 * running by being sent a signal that kills it.
1238 prev_xp = NIL_PROC;
1239 for (xpp = &rdy_head[q]; *xpp != NIL_PROC; xpp = &(*xpp)->p_nextready) {
1241 if (*xpp == rp) { /* found process to remove */
1242 *xpp = (*xpp)->p_nextready; /* replace with next chain */
1243 if (rp == rdy_tail[q]) /* queue tail removed */
1244 rdy_tail[q] = prev_xp; /* set new tail */
1246 #if DEBUG_SCHED_CHECK
1247 rp->p_ready = 0;
1248 CHECK_RUNQUEUES;
1249 #endif
1250 if (rp == proc_ptr || rp == next_ptr) /* active process removed */
1251 pick_proc(); /* pick new process to run */
1252 break;
1254 prev_xp = *xpp; /* save previous in chain */
1257 #if DEBUG_SCHED_CHECK
1258 CHECK_RUNQUEUES;
1259 #endif
1261 NOREC_RETURN(dequeuefunc, );
1264 /*===========================================================================*
1265 * sched *
1266 *===========================================================================*/
1267 PRIVATE void sched(rp, queue, front)
1268 register struct proc *rp; /* process to be scheduled */
1269 int *queue; /* return: queue to use */
1270 int *front; /* return: front or back */
1272 /* This function determines the scheduling policy. It is called whenever a
1273 * process must be added to one of the scheduling queues to decide where to
1274 * insert it. As a side-effect the process' priority may be updated.
1276 int time_left = (rp->p_ticks_left > 0); /* quantum fully consumed */
1278 /* Check whether the process has time left. Otherwise give a new quantum
1279 * and lower the process' priority, unless the process already is in the
1280 * lowest queue.
1282 if (! time_left) { /* quantum consumed ? */
1283 rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
1284 if (rp->p_priority < (IDLE_Q-1)) {
1285 rp->p_priority += 1; /* lower priority */
1289 /* If there is time left, the process is added to the front of its queue,
1290 * so that it can immediately run. The queue to use simply is always the
1291 * process' current priority.
1293 *queue = rp->p_priority;
1294 *front = time_left;
1297 /*===========================================================================*
1298 * pick_proc *
1299 *===========================================================================*/
1300 PRIVATE void pick_proc()
1302 /* Decide who to run now. A new process is selected by setting 'next_ptr'.
1303 * When a billable process is selected, record it in 'bill_ptr', so that the
1304 * clock task can tell who to bill for system time.
1306 register struct proc *rp; /* process to run */
1307 int q; /* iterate over queues */
1309 NOREC_ENTER(pick);
1311 /* Check each of the scheduling queues for ready processes. The number of
1312 * queues is defined in proc.h, and priorities are set in the task table.
1313 * The lowest queue contains IDLE, which is always ready.
1315 for (q=0; q < NR_SCHED_QUEUES; q++) {
1316 int found = 0;
1317 if(!(rp = rdy_head[q])) {
1318 TRACE(VF_PICKPROC, printf("queue %d empty\n", q););
1319 continue;
1321 TRACE(VF_PICKPROC, printf("found %s / %d on queue %d\n",
1322 rp->p_name, rp->p_endpoint, q););
1323 next_ptr = rp; /* run process 'rp' next */
1324 vmassert(proc_ptr != next_ptr);
1325 vmassert(!next_ptr->p_rts_flags);
1326 if (priv(rp)->s_flags & BILLABLE)
1327 bill_ptr = rp; /* bill for system time */
1328 NOREC_RETURN(pick, );
1332 /*===========================================================================*
1333 * balance_queues *
1334 *===========================================================================*/
1335 #define Q_BALANCE_TICKS 100
1336 PUBLIC void balance_queues(tp)
1337 timer_t *tp; /* watchdog timer pointer */
1339 /* Check entire process table and give all process a higher priority. This
1340 * effectively means giving a new quantum. If a process already is at its
1341 * maximum priority, its quantum will be renewed.
1343 static timer_t queue_timer; /* timer structure to use */
1344 register struct proc* rp; /* process table pointer */
1345 clock_t next_period; /* time of next period */
1346 int ticks_added = 0; /* total time added */
1348 vmassert(!intr_disabled());
1350 lock;
1351 for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
1352 if (! isemptyp(rp)) { /* check slot use */
1353 if (rp->p_priority > rp->p_max_priority) { /* update priority? */
1354 if (rp->p_rts_flags == 0) dequeue(rp); /* take off queue */
1355 ticks_added += rp->p_quantum_size; /* do accounting */
1356 rp->p_priority -= 1; /* raise priority */
1357 if (rp->p_rts_flags == 0) enqueue(rp); /* put on queue */
1359 else {
1360 ticks_added += rp->p_quantum_size - rp->p_ticks_left;
1361 rp->p_ticks_left = rp->p_quantum_size; /* give new quantum */
1365 unlock;
1367 /* Now schedule a new watchdog timer to balance the queues again. The
1368 * period depends on the total amount of quantum ticks added.
1370 next_period = MAX(Q_BALANCE_TICKS, ticks_added); /* calculate next */
1371 set_timer(&queue_timer, get_uptime() + next_period, balance_queues);
1374 /*===========================================================================*
1375 * lock_send *
1376 *===========================================================================*/
1377 PUBLIC int lock_send(dst_e, m_ptr)
1378 int dst_e; /* to whom is message being sent? */
1379 message *m_ptr; /* pointer to message buffer */
1381 /* Safe gateway to mini_send() for tasks. */
1382 int result;
1383 lock;
1384 result = mini_send(proc_ptr, dst_e, m_ptr, 0);
1385 unlock;
1386 return(result);
1389 /*===========================================================================*
1390 * endpoint_lookup *
1391 *===========================================================================*/
1392 PUBLIC struct proc *endpoint_lookup(endpoint_t e)
1394 int n;
1396 if(!isokendpt(e, &n)) return NULL;
1398 return proc_addr(n);
1401 /*===========================================================================*
1402 * isokendpt_f *
1403 *===========================================================================*/
1404 #if DEBUG_ENABLE_IPC_WARNINGS
1405 PUBLIC int isokendpt_f(file, line, e, p, fatalflag)
1406 char *file;
1407 int line;
1408 #else
1409 PUBLIC int isokendpt_f(e, p, fatalflag)
1410 #endif
1411 endpoint_t e;
1412 int *p, fatalflag;
1414 int ok = 0;
1415 /* Convert an endpoint number into a process number.
1416 * Return nonzero if the process is alive with the corresponding
1417 * generation number, zero otherwise.
1419 * This function is called with file and line number by the
1420 * isokendpt_d macro if DEBUG_ENABLE_IPC_WARNINGS is defined,
1421 * otherwise without. This allows us to print the where the
1422 * conversion was attempted, making the errors verbose without
1423 * adding code for that at every call.
1425 * If fatalflag is nonzero, we must panic if the conversion doesn't
1426 * succeed.
1428 *p = _ENDPOINT_P(e);
1429 if(!isokprocn(*p)) {
1430 #if DEBUG_ENABLE_IPC_WARNINGS
1431 kprintf("kernel:%s:%d: bad endpoint %d: proc %d out of range\n",
1432 file, line, e, *p);
1433 #endif
1434 } else if(isemptyn(*p)) {
1435 #if 0
1436 kprintf("kernel:%s:%d: bad endpoint %d: proc %d empty\n", file, line, e, *p);
1437 #endif
1438 } else if(proc_addr(*p)->p_endpoint != e) {
1439 #if DEBUG_ENABLE_IPC_WARNINGS
1440 kprintf("kernel:%s:%d: bad endpoint %d: proc %d has ept %d (generation %d vs. %d)\n", file, line,
1441 e, *p, proc_addr(*p)->p_endpoint,
1442 _ENDPOINT_G(e), _ENDPOINT_G(proc_addr(*p)->p_endpoint));
1443 #endif
1444 } else ok = 1;
1445 if(!ok && fatalflag) {
1446 minix_panic("invalid endpoint ", e);
1448 return ok;