kernel: increase some per-process privilege limits.
[minix.git] / servers / inet / sr.c
blob0146e54ed0adc98c104ed32ecb31c85783d1f088
1 /* this file contains the interface of the network software with the file
2 * system.
4 * Copyright 1995 Philip Homburg
6 * The valid messages and their parameters are:
7 *
8 * Requests:
10 * m_type DEVICE IO_ENDPT COUNT
11 * --------------------------------------------------
12 * | DEV_OPEN | minor dev | proc nr | mode |
13 * |-------------+-----------+-----------+----------+
14 * | DEV_CLOSE | minor dev | proc nr | |
15 * |-------------+-----------+-----------+----------+
17 * m_type DEVICE IO_ENDPT COUNT IO_GRANT
18 * ---------------------------------------------------------------
19 * | DEV_READ_S | minor dev | proc nr | count | grant ID |
20 * |-------------+-----------+-----------+-----------+-----------|
21 * | DEV_WRITE_S | minor dev | proc nr | count | grant ID |
22 * |-------------+-----------+-----------+-----------+-----------|
23 * | DEV_IOCTL_S | minor dev | proc nr | command | grant ID |
24 * |-------------+-----------+-----------+-----------+-----------|
25 * | DEV_SELECT | minor dev | ops | | |
26 * |-------------+-----------+-----------+-----------+-----------|
28 * m_type
29 * --------------|
30 * | DEV_STATUS |
31 * |-------------|
33 * m_type DEVICE IO_ENDPT COUNT
34 * --------------------------------------------------|
35 * | CANCEL | minor dev | proc nr | mode |
36 * |-------------+-----------+-----------+-----------|
38 * Replies:
40 * m_type REP_ENDPT REP_STATUS REP_IO_GRANT
41 * -------------------------------------------------------|
42 * | TASK_REPLY | proc nr | status | grant ID |
43 * |---------------+-----------+-----------+--------------|
45 * m_type REP_ENDPT REP_STATUS REP_IO_GRANT
46 * ----------------+-----------+--------------------------|
47 * | DEV_REVIVE | proc nr | | grant ID |
48 * |---------------+-----------+-----------+--------------|
49 * | DEV_IO_READY | minor dev | sel ops | |
50 * |---------------+-----------+-----------+--------------|
51 * | DEV_NO_STATUS | | | |
52 * |---------------+-----------+-----------+--------------|
55 #include "inet.h"
57 #include <sys/svrctl.h>
58 #include <minix/callnr.h>
60 #include "mq.h"
61 #include "qp.h"
62 #include "proto.h"
63 #include "generic/type.h"
65 #include "generic/assert.h"
66 #include "generic/buf.h"
67 #include "generic/event.h"
68 #include "generic/sr.h"
69 #include "sr_int.h"
71 THIS_FILE
73 PUBLIC sr_fd_t sr_fd_table[FD_NR];
75 PRIVATE mq_t *repl_queue, *repl_queue_tail;
76 PRIVATE struct vscp_vec s_cp_req[SCPVEC_NR];
78 FORWARD _PROTOTYPE ( int sr_open, (message *m) );
79 FORWARD _PROTOTYPE ( void sr_close, (message *m) );
80 FORWARD _PROTOTYPE ( int sr_rwio, (mq_t *m) );
81 FORWARD _PROTOTYPE ( int sr_restart_read, (sr_fd_t *fdp) );
82 FORWARD _PROTOTYPE ( int sr_restart_write, (sr_fd_t *fdp) );
83 FORWARD _PROTOTYPE ( int sr_restart_ioctl, (sr_fd_t *fdp) );
84 FORWARD _PROTOTYPE ( int sr_cancel, (message *m) );
85 FORWARD _PROTOTYPE ( int sr_select, (message *m) );
86 FORWARD _PROTOTYPE ( void sr_status, (message *m) );
87 FORWARD _PROTOTYPE ( void sr_reply_, (mq_t *m, int reply, int is_revive) );
88 FORWARD _PROTOTYPE ( sr_fd_t *sr_getchannel, (int minor));
89 FORWARD _PROTOTYPE ( acc_t *sr_get_userdata, (int fd, size_t offset,
90 size_t count, int for_ioctl) );
91 FORWARD _PROTOTYPE ( int sr_put_userdata, (int fd, size_t offset,
92 acc_t *data, int for_ioctl) );
93 FORWARD _PROTOTYPE (void sr_select_res, (int fd, unsigned ops) );
94 FORWARD _PROTOTYPE ( int sr_repl_queue, (int proc, int ref, int operation) );
95 FORWARD _PROTOTYPE ( int walk_queue, (sr_fd_t *sr_fd, mq_t **q_head_ptr,
96 mq_t **q_tail_ptr, int type, int proc_nr, int ref, int first_flag) );
97 FORWARD _PROTOTYPE ( void process_req_q, (mq_t *mq, mq_t *tail,
98 mq_t **tail_ptr) );
99 FORWARD _PROTOTYPE ( void sr_event, (event_t *evp, ev_arg_t arg) );
100 FORWARD _PROTOTYPE ( int cp_u2b, (endpoint_t proc, cp_grant_id_t gid,
101 vir_bytes offset, acc_t **var_acc_ptr, int size) );
102 FORWARD _PROTOTYPE ( int cp_b2u, (acc_t *acc_ptr, endpoint_t proc,
103 cp_grant_id_t gid, vir_bytes offset) );
105 PUBLIC void sr_init()
107 int i;
109 for (i=0; i<FD_NR; i++)
111 sr_fd_table[i].srf_flags= SFF_FREE;
112 ev_init(&sr_fd_table[i].srf_ioctl_ev);
113 ev_init(&sr_fd_table[i].srf_read_ev);
114 ev_init(&sr_fd_table[i].srf_write_ev);
116 repl_queue= NULL;
119 PUBLIC void sr_rec(m)
120 mq_t *m;
122 int result;
123 int send_reply, free_mess;
125 if (repl_queue)
127 if (m->mq_mess.m_type == CANCEL)
129 result= sr_repl_queue(m->mq_mess.IO_ENDPT,
130 (int)m->mq_mess.IO_GRANT, 0);
132 if (result)
134 mq_free(m);
135 return; /* canceled request in queue */
138 #if 0
139 else
140 sr_repl_queue(ANY, 0, 0);
141 #endif
144 switch (m->mq_mess.m_type)
146 case DEV_OPEN:
147 result= sr_open(&m->mq_mess);
148 send_reply= 1;
149 free_mess= 1;
150 break;
151 case DEV_CLOSE:
152 sr_close(&m->mq_mess);
153 result= OK;
154 send_reply= 1;
155 free_mess= 1;
156 break;
157 case DEV_READ_S:
158 case DEV_WRITE_S:
159 case DEV_IOCTL_S:
160 result= sr_rwio(m);
161 assert(result == OK || result == SUSPEND);
162 send_reply= (result == SUSPEND);
163 free_mess= 0;
164 break;
165 case CANCEL:
166 result= sr_cancel(&m->mq_mess);
167 assert(result == OK || result == EINTR);
168 send_reply= (result == EINTR);
169 free_mess= 1;
170 m->mq_mess.m_type= 0;
171 break;
172 case DEV_SELECT:
173 result= sr_select(&m->mq_mess);
174 send_reply= 1;
175 free_mess= 1;
176 break;
177 case DEV_STATUS:
178 sr_status(&m->mq_mess);
179 result= OK; /* Satisfy lint. */
180 send_reply= 0;
181 free_mess= 1;
182 break;
183 default:
184 ip_panic(("unknown message, from %d, type %d",
185 m->mq_mess.m_source, m->mq_mess.m_type));
187 if (send_reply)
189 sr_reply_(m, result, FALSE /* !is_revive */);
191 if (free_mess)
192 mq_free(m);
195 PUBLIC void sr_add_minor(minor, port, openf, closef, readf, writef,
196 ioctlf, cancelf, selectf)
197 int minor;
198 int port;
199 sr_open_t openf;
200 sr_close_t closef;
201 sr_read_t readf;
202 sr_write_t writef;
203 sr_ioctl_t ioctlf;
204 sr_cancel_t cancelf;
205 sr_select_t selectf;
207 sr_fd_t *sr_fd;
209 assert (minor>=0 && minor<FD_NR);
211 sr_fd= &sr_fd_table[minor];
213 assert(!(sr_fd->srf_flags & SFF_INUSE));
215 sr_fd->srf_flags= SFF_INUSE | SFF_MINOR;
216 sr_fd->srf_port= port;
217 sr_fd->srf_open= openf;
218 sr_fd->srf_close= closef;
219 sr_fd->srf_write= writef;
220 sr_fd->srf_read= readf;
221 sr_fd->srf_ioctl= ioctlf;
222 sr_fd->srf_cancel= cancelf;
223 sr_fd->srf_select= selectf;
226 PRIVATE int sr_open(m)
227 message *m;
229 sr_fd_t *sr_fd;
231 int minor= m->DEVICE;
232 int i, fd;
234 if (minor<0 || minor>FD_NR)
236 DBLOCK(1, printf("replying EINVAL\n"));
237 return EINVAL;
239 if (!(sr_fd_table[minor].srf_flags & SFF_MINOR))
241 DBLOCK(1, printf("replying ENXIO\n"));
242 return ENXIO;
244 for (i=0; i<FD_NR && (sr_fd_table[i].srf_flags & SFF_INUSE); i++);
246 if (i>=FD_NR)
248 DBLOCK(1, printf("replying ENFILE\n"));
249 return ENFILE;
252 sr_fd= &sr_fd_table[i];
253 *sr_fd= sr_fd_table[minor];
254 sr_fd->srf_flags= SFF_INUSE;
255 fd= (*sr_fd->srf_open)(sr_fd->srf_port, i, sr_get_userdata,
256 sr_put_userdata, 0 /* no put_pkt */, sr_select_res);
257 if (fd<0)
259 sr_fd->srf_flags= SFF_FREE;
260 DBLOCK(1, printf("replying %d\n", fd));
261 return fd;
263 sr_fd->srf_fd= fd;
264 return i;
267 PRIVATE void sr_close(m)
268 message *m;
270 sr_fd_t *sr_fd;
272 sr_fd= sr_getchannel(m->DEVICE);
273 assert (sr_fd);
275 if (sr_fd->srf_flags & SFF_BUSY)
276 ip_panic(("close on busy channel"));
278 assert (!(sr_fd->srf_flags & SFF_MINOR));
279 (*sr_fd->srf_close)(sr_fd->srf_fd);
280 sr_fd->srf_flags= SFF_FREE;
283 PRIVATE int sr_rwio(m)
284 mq_t *m;
286 sr_fd_t *sr_fd;
287 mq_t **q_head_ptr, **q_tail_ptr;
288 int ip_flag, susp_flag, first_flag;
289 int r;
290 ioreq_t request;
291 size_t size;
293 sr_fd= sr_getchannel(m->mq_mess.DEVICE);
294 assert (sr_fd);
296 switch(m->mq_mess.m_type)
298 case DEV_READ_S:
299 q_head_ptr= &sr_fd->srf_read_q;
300 q_tail_ptr= &sr_fd->srf_read_q_tail;
301 ip_flag= SFF_READ_IP;
302 susp_flag= SFF_READ_SUSP;
303 first_flag= SFF_READ_FIRST;
304 break;
305 case DEV_WRITE_S:
306 q_head_ptr= &sr_fd->srf_write_q;
307 q_tail_ptr= &sr_fd->srf_write_q_tail;
308 ip_flag= SFF_WRITE_IP;
309 susp_flag= SFF_WRITE_SUSP;
310 first_flag= SFF_WRITE_FIRST;
311 break;
312 case DEV_IOCTL_S:
313 q_head_ptr= &sr_fd->srf_ioctl_q;
314 q_tail_ptr= &sr_fd->srf_ioctl_q_tail;
315 ip_flag= SFF_IOCTL_IP;
316 susp_flag= SFF_IOCTL_SUSP;
317 first_flag= SFF_IOCTL_FIRST;
318 break;
319 default:
320 ip_panic(("illegal case entry"));
323 if (sr_fd->srf_flags & ip_flag)
325 assert(sr_fd->srf_flags & susp_flag);
326 assert(*q_head_ptr);
328 (*q_tail_ptr)->mq_next= m;
329 *q_tail_ptr= m;
330 return SUSPEND;
332 assert(!*q_head_ptr);
334 *q_tail_ptr= *q_head_ptr= m;
335 sr_fd->srf_flags |= ip_flag;
336 assert(!(sr_fd->srf_flags & first_flag));
337 sr_fd->srf_flags |= first_flag;
339 switch(m->mq_mess.m_type)
341 case DEV_READ_S:
342 r= (*sr_fd->srf_read)(sr_fd->srf_fd,
343 m->mq_mess.COUNT);
344 break;
345 case DEV_WRITE_S:
346 r= (*sr_fd->srf_write)(sr_fd->srf_fd,
347 m->mq_mess.COUNT);
348 break;
349 case DEV_IOCTL_S:
350 request= m->mq_mess.REQUEST;
351 size= (request >> 16) & _IOCPARM_MASK;
352 if (size>MAX_IOCTL_S)
354 DBLOCK(1, printf("replying EINVAL\n"));
355 r= sr_put_userdata(sr_fd-sr_fd_table, EINVAL,
356 NULL, 1);
357 assert(r == OK);
358 assert(sr_fd->srf_flags & first_flag);
359 sr_fd->srf_flags &= ~first_flag;
360 return OK;
362 r= (*sr_fd->srf_ioctl)(sr_fd->srf_fd, request);
363 break;
364 default:
365 ip_panic(("illegal case entry"));
368 assert(sr_fd->srf_flags & first_flag);
369 sr_fd->srf_flags &= ~first_flag;
371 assert(r == OK || r == SUSPEND ||
372 (printf("r= %d\n", r), 0));
373 if (r == SUSPEND)
374 sr_fd->srf_flags |= susp_flag;
375 else
376 mq_free(m);
377 return r;
380 PRIVATE int sr_restart_read(sr_fd)
381 sr_fd_t *sr_fd;
383 mq_t *mp;
384 int r;
386 mp= sr_fd->srf_read_q;
387 assert(mp);
389 if (sr_fd->srf_flags & SFF_READ_IP)
391 assert(sr_fd->srf_flags & SFF_READ_SUSP);
392 return SUSPEND;
394 sr_fd->srf_flags |= SFF_READ_IP;
396 r= (*sr_fd->srf_read)(sr_fd->srf_fd,
397 mp->mq_mess.COUNT);
399 assert(r == OK || r == SUSPEND ||
400 (printf("r= %d\n", r), 0));
401 if (r == SUSPEND)
402 sr_fd->srf_flags |= SFF_READ_SUSP;
403 return r;
406 PRIVATE int sr_restart_write(sr_fd)
407 sr_fd_t *sr_fd;
409 mq_t *mp;
410 int r;
412 mp= sr_fd->srf_write_q;
413 assert(mp);
415 if (sr_fd->srf_flags & SFF_WRITE_IP)
417 assert(sr_fd->srf_flags & SFF_WRITE_SUSP);
418 return SUSPEND;
420 sr_fd->srf_flags |= SFF_WRITE_IP;
422 r= (*sr_fd->srf_write)(sr_fd->srf_fd,
423 mp->mq_mess.COUNT);
425 assert(r == OK || r == SUSPEND ||
426 (printf("r= %d\n", r), 0));
427 if (r == SUSPEND)
428 sr_fd->srf_flags |= SFF_WRITE_SUSP;
429 return r;
432 PRIVATE int sr_restart_ioctl(sr_fd)
433 sr_fd_t *sr_fd;
435 mq_t *mp;
436 int r;
438 mp= sr_fd->srf_ioctl_q;
439 assert(mp);
441 if (sr_fd->srf_flags & SFF_IOCTL_IP)
443 assert(sr_fd->srf_flags & SFF_IOCTL_SUSP);
444 return SUSPEND;
446 sr_fd->srf_flags |= SFF_IOCTL_IP;
448 r= (*sr_fd->srf_ioctl)(sr_fd->srf_fd,
449 mp->mq_mess.COUNT);
451 assert(r == OK || r == SUSPEND ||
452 (printf("r= %d\n", r), 0));
453 if (r == SUSPEND)
454 sr_fd->srf_flags |= SFF_IOCTL_SUSP;
455 return r;
458 PRIVATE int sr_cancel(m)
459 message *m;
461 sr_fd_t *sr_fd;
462 int result;
463 int proc_nr, ref;
465 result=EINTR;
466 proc_nr= m->IO_ENDPT;
467 ref= (int)m->IO_GRANT;
468 sr_fd= sr_getchannel(m->DEVICE);
469 assert (sr_fd);
471 result= walk_queue(sr_fd, &sr_fd->srf_ioctl_q,
472 &sr_fd->srf_ioctl_q_tail, SR_CANCEL_IOCTL,
473 proc_nr, ref, SFF_IOCTL_FIRST);
474 if (result != EAGAIN)
475 return result;
477 result= walk_queue(sr_fd, &sr_fd->srf_read_q,
478 &sr_fd->srf_read_q_tail, SR_CANCEL_READ,
479 proc_nr, ref, SFF_READ_FIRST);
480 if (result != EAGAIN)
481 return result;
483 result= walk_queue(sr_fd, &sr_fd->srf_write_q,
484 &sr_fd->srf_write_q_tail, SR_CANCEL_WRITE,
485 proc_nr, ref, SFF_WRITE_FIRST);
486 if (result != EAGAIN)
487 return result;
489 ip_panic((
490 "request not found: from %d, type %d, MINOR= %d, PROC= %d, REF= %d",
491 m->m_source, m->m_type, m->DEVICE,
492 m->IO_ENDPT, m->IO_GRANT));
495 PRIVATE int sr_select(m)
496 message *m;
498 sr_fd_t *sr_fd;
499 int r;
500 unsigned m_ops, i_ops;
502 sr_fd= sr_getchannel(m->DEVICE);
503 assert (sr_fd);
505 sr_fd->srf_select_proc= m->m_source;
507 m_ops= m->IO_ENDPT;
508 i_ops= 0;
509 if (m_ops & SEL_RD) i_ops |= SR_SELECT_READ;
510 if (m_ops & SEL_WR) i_ops |= SR_SELECT_WRITE;
511 if (m_ops & SEL_ERR) i_ops |= SR_SELECT_EXCEPTION;
512 if (!(m_ops & SEL_NOTIFY)) i_ops |= SR_SELECT_POLL;
514 r= (*sr_fd->srf_select)(sr_fd->srf_fd, i_ops);
515 if (r < 0)
516 return r;
517 m_ops= 0;
518 if (r & SR_SELECT_READ) m_ops |= SEL_RD;
519 if (r & SR_SELECT_WRITE) m_ops |= SEL_WR;
520 if (r & SR_SELECT_EXCEPTION) m_ops |= SEL_ERR;
522 return m_ops;
525 PRIVATE void sr_status(m)
526 message *m;
528 int fd, result;
529 unsigned m_ops;
530 sr_fd_t *sr_fd;
531 mq_t *mq;
533 mq= repl_queue;
534 if (mq != NULL)
536 repl_queue= mq->mq_next;
538 mq->mq_mess.m_type= DEV_REVIVE;
539 result= send(mq->mq_mess.m_source, &mq->mq_mess);
540 if (result != OK)
541 ip_panic(("unable to send"));
542 mq_free(mq);
544 return;
547 for (fd=0, sr_fd= sr_fd_table; fd<FD_NR; fd++, sr_fd++)
549 if ((sr_fd->srf_flags &
550 (SFF_SELECT_R|SFF_SELECT_W|SFF_SELECT_X)) == 0)
552 /* Nothing to report */
553 continue;
555 if (sr_fd->srf_select_proc != m->m_source)
557 /* Wrong process */
558 continue;
561 m_ops= 0;
562 if (sr_fd->srf_flags & SFF_SELECT_R) m_ops |= SEL_RD;
563 if (sr_fd->srf_flags & SFF_SELECT_W) m_ops |= SEL_WR;
564 if (sr_fd->srf_flags & SFF_SELECT_X) m_ops |= SEL_ERR;
566 sr_fd->srf_flags &= ~(SFF_SELECT_R|SFF_SELECT_W|SFF_SELECT_X);
568 m->m_type= DEV_IO_READY;
569 m->DEV_MINOR= fd;
570 m->DEV_SEL_OPS= m_ops;
572 result= send(m->m_source, m);
573 if (result != OK)
574 ip_panic(("unable to send"));
575 return;
578 m->m_type= DEV_NO_STATUS;
579 result= send(m->m_source, m);
580 if (result != OK)
581 ip_panic(("unable to send"));
584 PRIVATE int walk_queue(sr_fd, q_head_ptr, q_tail_ptr, type, proc_nr, ref,
585 first_flag)
586 sr_fd_t *sr_fd;
587 mq_t **q_head_ptr;
588 mq_t **q_tail_ptr;
589 int type;
590 int proc_nr;
591 int ref;
592 int first_flag;
594 mq_t *q_ptr_prv, *q_ptr;
595 int result;
597 for(q_ptr_prv= NULL, q_ptr= *q_head_ptr; q_ptr;
598 q_ptr_prv= q_ptr, q_ptr= q_ptr->mq_next)
600 if (q_ptr->mq_mess.IO_ENDPT != proc_nr)
601 continue;
602 if ((int)q_ptr->mq_mess.IO_GRANT != ref)
603 continue;
604 if (!q_ptr_prv)
606 assert(!(sr_fd->srf_flags & first_flag));
607 sr_fd->srf_flags |= first_flag;
609 result= (*sr_fd->srf_cancel)(sr_fd->srf_fd, type);
610 assert(result == OK);
612 *q_head_ptr= q_ptr->mq_next;
613 mq_free(q_ptr);
615 assert(sr_fd->srf_flags & first_flag);
616 sr_fd->srf_flags &= ~first_flag;
618 return OK;
620 q_ptr_prv->mq_next= q_ptr->mq_next;
621 mq_free(q_ptr);
622 if (!q_ptr_prv->mq_next)
623 *q_tail_ptr= q_ptr_prv;
624 return EINTR;
626 return EAGAIN;
629 PRIVATE sr_fd_t *sr_getchannel(minor)
630 int minor;
632 sr_fd_t *loc_fd;
634 compare(minor, >=, 0);
635 compare(minor, <, FD_NR);
637 loc_fd= &sr_fd_table[minor];
639 assert (!(loc_fd->srf_flags & SFF_MINOR) &&
640 (loc_fd->srf_flags & SFF_INUSE));
642 return loc_fd;
645 PRIVATE void sr_reply_(mq, status, is_revive)
646 mq_t *mq;
647 int status;
648 int is_revive;
650 int result, proc, ref;
651 message reply, *mp;
653 proc= mq->mq_mess.IO_ENDPT;
654 ref= (int)mq->mq_mess.IO_GRANT;
656 if (is_revive)
657 mp= &mq->mq_mess;
658 else
659 mp= &reply;
661 mp->m_type= TASK_REPLY;
662 mp->REP_ENDPT= proc;
663 mp->REP_STATUS= status;
664 mp->REP_IO_GRANT= ref;
665 if (is_revive)
667 notify(mq->mq_mess.m_source);
668 result= ELOCKED;
670 else
672 result= send(mq->mq_mess.m_source, mp);
675 if (result == ELOCKED && is_revive)
677 mq->mq_next= NULL;
678 if (repl_queue)
679 repl_queue_tail->mq_next= mq;
680 else
681 repl_queue= mq;
682 repl_queue_tail= mq;
683 return;
685 if (result != OK)
686 ip_panic(("unable to send"));
687 if (is_revive)
688 mq_free(mq);
691 PRIVATE acc_t *sr_get_userdata (fd, offset, count, for_ioctl)
692 int fd;
693 size_t offset;
694 size_t count;
695 int for_ioctl;
697 sr_fd_t *loc_fd;
698 mq_t **head_ptr, *m, *mq;
699 int ip_flag, susp_flag, first_flag;
700 int result, suspended, is_revive;
701 acc_t *acc;
702 event_t *evp;
703 ev_arg_t arg;
705 loc_fd= &sr_fd_table[fd];
707 if (for_ioctl)
709 head_ptr= &loc_fd->srf_ioctl_q;
710 evp= &loc_fd->srf_ioctl_ev;
711 ip_flag= SFF_IOCTL_IP;
712 susp_flag= SFF_IOCTL_SUSP;
713 first_flag= SFF_IOCTL_FIRST;
715 else
717 head_ptr= &loc_fd->srf_write_q;
718 evp= &loc_fd->srf_write_ev;
719 ip_flag= SFF_WRITE_IP;
720 susp_flag= SFF_WRITE_SUSP;
721 first_flag= SFF_WRITE_FIRST;
724 assert (loc_fd->srf_flags & ip_flag);
726 if (!count)
728 m= *head_ptr;
729 mq= m->mq_next;
730 *head_ptr= mq;
731 result= (int)offset;
732 is_revive= !(loc_fd->srf_flags & first_flag);
733 sr_reply_(m, result, is_revive);
734 suspended= (loc_fd->srf_flags & susp_flag);
735 loc_fd->srf_flags &= ~(ip_flag|susp_flag);
736 if (suspended)
738 if (mq)
740 arg.ev_ptr= loc_fd;
741 ev_enqueue(evp, sr_event, arg);
744 return NULL;
747 result= cp_u2b ((*head_ptr)->mq_mess.IO_ENDPT,
748 (int)(*head_ptr)->mq_mess.IO_GRANT, offset, &acc, count);
750 return result<0 ? NULL : acc;
753 PRIVATE int sr_put_userdata (fd, offset, data, for_ioctl)
754 int fd;
755 size_t offset;
756 acc_t *data;
757 int for_ioctl;
759 sr_fd_t *loc_fd;
760 mq_t **head_ptr, *m, *mq;
761 int ip_flag, susp_flag, first_flag;
762 int result, suspended, is_revive;
763 event_t *evp;
764 ev_arg_t arg;
766 loc_fd= &sr_fd_table[fd];
768 if (for_ioctl)
770 head_ptr= &loc_fd->srf_ioctl_q;
771 evp= &loc_fd->srf_ioctl_ev;
772 ip_flag= SFF_IOCTL_IP;
773 susp_flag= SFF_IOCTL_SUSP;
774 first_flag= SFF_IOCTL_FIRST;
776 else
778 head_ptr= &loc_fd->srf_read_q;
779 evp= &loc_fd->srf_read_ev;
780 ip_flag= SFF_READ_IP;
781 susp_flag= SFF_READ_SUSP;
782 first_flag= SFF_READ_FIRST;
785 assert (loc_fd->srf_flags & ip_flag);
787 if (!data)
789 m= *head_ptr;
790 mq= m->mq_next;
791 *head_ptr= mq;
792 result= (int)offset;
793 is_revive= !(loc_fd->srf_flags & first_flag);
794 sr_reply_(m, result, is_revive);
795 suspended= (loc_fd->srf_flags & susp_flag);
796 loc_fd->srf_flags &= ~(ip_flag|susp_flag);
797 if (suspended)
799 if (mq)
801 arg.ev_ptr= loc_fd;
802 ev_enqueue(evp, sr_event, arg);
805 return OK;
808 return cp_b2u (data, (*head_ptr)->mq_mess.IO_ENDPT,
809 (int)(*head_ptr)->mq_mess.IO_GRANT, offset);
812 PRIVATE void sr_select_res(int fd, unsigned ops)
814 sr_fd_t *sr_fd;
816 sr_fd= &sr_fd_table[fd];
818 if (ops & SR_SELECT_READ) sr_fd->srf_flags |= SFF_SELECT_R;
819 if (ops & SR_SELECT_WRITE) sr_fd->srf_flags |= SFF_SELECT_W;
820 if (ops & SR_SELECT_EXCEPTION) sr_fd->srf_flags |= SFF_SELECT_X;
822 notify(sr_fd->srf_select_proc);
825 PRIVATE void process_req_q(mq, tail, tail_ptr)
826 mq_t *mq, *tail, **tail_ptr;
828 mq_t *m;
829 int result;
831 for(;mq;)
833 m= mq;
834 mq= mq->mq_next;
836 result= sr_rwio(m);
837 if (result == SUSPEND)
839 if (mq)
841 (*tail_ptr)->mq_next= mq;
842 *tail_ptr= tail;
844 return;
847 return;
850 PRIVATE void sr_event(evp, arg)
851 event_t *evp;
852 ev_arg_t arg;
854 sr_fd_t *sr_fd;
855 int r;
857 sr_fd= arg.ev_ptr;
858 if (evp == &sr_fd->srf_write_ev)
860 while(sr_fd->srf_write_q)
862 r= sr_restart_write(sr_fd);
863 if (r == SUSPEND)
864 return;
866 return;
868 if (evp == &sr_fd->srf_read_ev)
870 while(sr_fd->srf_read_q)
872 r= sr_restart_read(sr_fd);
873 if (r == SUSPEND)
874 return;
876 return;
878 if (evp == &sr_fd->srf_ioctl_ev)
880 while(sr_fd->srf_ioctl_q)
882 r= sr_restart_ioctl(sr_fd);
883 if (r == SUSPEND)
884 return;
886 return;
888 ip_panic(("sr_event: unknown event\n"));
891 PRIVATE int cp_u2b(proc, gid, offset, var_acc_ptr, size)
892 endpoint_t proc;
893 cp_grant_id_t gid;
894 vir_bytes offset;
895 acc_t **var_acc_ptr;
896 int size;
898 acc_t *acc;
899 int i, r;
901 acc= bf_memreq(size);
903 *var_acc_ptr= acc;
904 i=0;
906 while (acc)
908 size= (vir_bytes)acc->acc_length;
910 s_cp_req[i].v_from= proc;
911 s_cp_req[i].v_to= SELF;
912 s_cp_req[i].v_gid= gid;
913 s_cp_req[i].v_offset= offset;
914 s_cp_req[i].v_addr= (vir_bytes) ptr2acc_data(acc);
915 s_cp_req[i].v_bytes= size;
917 offset += size;
918 acc= acc->acc_next;
919 i++;
921 if (acc == NULL && i == 1)
923 r= sys_safecopyfrom(s_cp_req[0].v_from,
924 s_cp_req[0].v_gid, s_cp_req[0].v_offset,
925 s_cp_req[0].v_addr, s_cp_req[0].v_bytes, D);
926 if (r <0)
928 printf("sys_safecopyfrom failed: %d\n", r);
929 bf_afree(*var_acc_ptr);
930 *var_acc_ptr= 0;
931 return r;
933 i= 0;
934 continue;
936 if (i == SCPVEC_NR || acc == NULL)
938 r= sys_vsafecopy(s_cp_req, i);
940 if (r <0)
942 printf("cp_u2b: sys_vsafecopy failed: %d\n",
944 bf_afree(*var_acc_ptr);
945 *var_acc_ptr= 0;
946 return r;
948 i= 0;
951 return OK;
954 PRIVATE int cp_b2u(acc_ptr, proc, gid, offset)
955 acc_t *acc_ptr;
956 endpoint_t proc;
957 cp_grant_id_t gid;
958 vir_bytes offset;
960 acc_t *acc;
961 int i, r, size;
963 acc= acc_ptr;
964 i=0;
966 while (acc)
968 size= (vir_bytes)acc->acc_length;
970 if (size)
972 s_cp_req[i].v_from= SELF;
973 s_cp_req[i].v_to= proc;
974 s_cp_req[i].v_gid= gid;
975 s_cp_req[i].v_offset= offset;
976 s_cp_req[i].v_addr= (vir_bytes) ptr2acc_data(acc);
977 s_cp_req[i].v_bytes= size;
979 i++;
982 offset += size;
983 acc= acc->acc_next;
985 if (acc == NULL && i == 1)
987 r= sys_safecopyto(s_cp_req[0].v_to,
988 s_cp_req[0].v_gid, s_cp_req[0].v_offset,
989 s_cp_req[0].v_addr, s_cp_req[0].v_bytes, D);
990 if (r <0)
992 printf("sys_safecopyto failed: %d\n", r);
993 bf_afree(acc_ptr);
994 return r;
996 i= 0;
997 continue;
999 if (i == SCPVEC_NR || acc == NULL)
1001 r= sys_vsafecopy(s_cp_req, i);
1003 if (r <0)
1005 printf("cp_b2u: sys_vsafecopy failed: %d\n",
1007 bf_afree(acc_ptr);
1008 return r;
1010 i= 0;
1013 bf_afree(acc_ptr);
1014 return OK;
1017 PRIVATE int sr_repl_queue(proc, ref, operation)
1018 int proc;
1019 int ref;
1020 int operation;
1022 mq_t *m, *m_cancel, *m_tmp;
1023 mq_t *new_queue;
1024 int result;
1026 m_cancel= NULL;
1027 new_queue= NULL;
1029 for (m= repl_queue; m;)
1031 if (m->mq_mess.REP_ENDPT == proc &&
1032 m->mq_mess.REP_IO_GRANT == ref)
1034 assert(!m_cancel);
1035 m_cancel= m;
1036 m= m->mq_next;
1037 continue;
1039 m_tmp= m;
1040 m= m->mq_next;
1041 m_tmp->mq_next= new_queue;
1042 new_queue= m_tmp;
1044 repl_queue= new_queue;
1045 if (m_cancel)
1047 result= send(m_cancel->mq_mess.m_source, &m_cancel->mq_mess);
1048 if (result != OK)
1049 ip_panic(("unable to send: %d", result));
1050 mq_free(m_cancel);
1051 return 1;
1053 return 0;
1057 * $PchId: sr.c,v 1.17 2005/06/28 14:26:16 philip Exp $