Sync with cat.c from netbsd-8
[minix3.git] / minix / servers / vfs / comm.c
blob49199f3d6395d42392eca3dc9da96d4a4f45e395
1 #include "fs.h"
2 #include <minix/vfsif.h>
3 #include <assert.h>
4 #include <string.h>
6 static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp);
7 static int queuemsg(struct vmnt *vmp);
9 /*===========================================================================*
10 * sendmsg *
11 *===========================================================================*/
12 static int sendmsg(struct vmnt *vmp, endpoint_t dst, struct worker_thread *wp)
14 /* This is the low level function that sends requests.
15 * Currently to FSes or VM.
17 int r, transid;
19 if(vmp) vmp->m_comm.c_cur_reqs++; /* One more request awaiting a reply */
20 transid = wp->w_tid + VFS_TRANSID;
21 wp->w_sendrec->m_type = TRNS_ADD_ID(wp->w_sendrec->m_type, transid);
22 wp->w_task = dst;
23 if ((r = asynsend3(dst, wp->w_sendrec, AMF_NOREPLY)) != OK) {
24 printf("VFS: sendmsg: error sending message. "
25 "dest: %d req_nr: %d err: %d\n", dst,
26 wp->w_sendrec->m_type, r);
27 util_stacktrace();
28 return(r);
31 return(r);
34 /*===========================================================================*
35 * send_work *
36 *===========================================================================*/
37 void send_work(void)
39 /* Try to send out as many requests as possible */
40 struct vmnt *vmp;
42 if (sending == 0) return;
43 for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; vmp++)
44 fs_sendmore(vmp);
47 /*===========================================================================*
48 * fs_cancel *
49 *===========================================================================*/
50 void fs_cancel(struct vmnt *vmp)
52 /* Cancel all pending requests for this vmp */
53 struct worker_thread *worker;
55 while ((worker = vmp->m_comm.c_req_queue) != NULL) {
56 vmp->m_comm.c_req_queue = worker->w_next;
57 worker->w_next = NULL;
58 sending--;
59 worker_stop(worker);
63 /*===========================================================================*
64 * fs_sendmore *
65 *===========================================================================*/
66 void fs_sendmore(struct vmnt *vmp)
68 struct worker_thread *worker;
70 /* Can we send more requests? */
71 if (vmp->m_fs_e == NONE) return;
72 if ((worker = vmp->m_comm.c_req_queue) == NULL) /* No process is queued */
73 return;
74 if (vmp->m_comm.c_cur_reqs >= vmp->m_comm.c_max_reqs)/*No room to send more*/
75 return;
76 if (vmp->m_flags & VMNT_CALLBACK) /* Hold off for now */
77 return;
79 vmp->m_comm.c_req_queue = worker->w_next; /* Remove head */
80 worker->w_next = NULL;
81 sending--;
82 assert(sending >= 0);
83 (void) sendmsg(vmp, vmp->m_fs_e, worker);
86 /*===========================================================================*
87 * drv_sendrec *
88 *===========================================================================*/
89 int drv_sendrec(endpoint_t drv_e, message *reqmp)
91 int r;
92 struct dmap *dp;
94 /* For the CTTY_MAJOR case, we would actually have to lock the device
95 * entry being redirected to. However, the CTTY major only hosts a
96 * character device while this function is used only for block devices.
97 * Thus, we can simply deny the request immediately.
99 if (drv_e == CTTY_ENDPT) {
100 printf("VFS: /dev/tty is not a block device!\n");
101 return EIO;
104 if ((dp = get_dmap_by_endpt(drv_e)) == NULL)
105 panic("driver endpoint %d invalid", drv_e);
107 lock_dmap(dp);
108 if (dp->dmap_servicing != INVALID_THREAD)
109 panic("driver locking inconsistency");
110 dp->dmap_servicing = self->w_tid;
111 self->w_task = drv_e;
112 self->w_drv_sendrec = reqmp;
114 if ((r = asynsend3(drv_e, self->w_drv_sendrec, AMF_NOREPLY)) == OK) {
115 /* Yield execution until we've received the reply */
116 worker_wait();
118 } else {
119 printf("VFS: drv_sendrec: error sending msg to driver %d: %d\n",
120 drv_e, r);
121 self->w_drv_sendrec = NULL;
124 assert(self->w_drv_sendrec == NULL);
125 dp->dmap_servicing = INVALID_THREAD;
126 self->w_task = NONE;
127 unlock_dmap(dp);
128 return(r);
131 /*===========================================================================*
132 * fs_sendrec *
133 *===========================================================================*/
134 int fs_sendrec(endpoint_t fs_e, message *reqmp)
136 struct vmnt *vmp;
137 int r;
139 if ((vmp = find_vmnt(fs_e)) == NULL) {
140 printf("Trying to talk to non-existent FS endpoint %d\n", fs_e);
141 return(EIO);
143 if (fs_e == fp->fp_endpoint) return(EDEADLK);
145 assert(self->w_sendrec == NULL);
146 self->w_sendrec = reqmp; /* Where to store request and reply */
148 /* Find out whether we can send right away or have to enqueue */
149 if ( !(vmp->m_flags & VMNT_CALLBACK) &&
150 vmp->m_comm.c_cur_reqs < vmp->m_comm.c_max_reqs) {
151 /* There's still room to send more and no proc is queued */
152 r = sendmsg(vmp, vmp->m_fs_e, self);
153 } else {
154 r = queuemsg(vmp);
156 self->w_next = NULL; /* End of list */
158 if (r != OK) return(r);
160 worker_wait(); /* Yield execution until we've received the reply. */
162 assert(self->w_sendrec == NULL);
164 r = reqmp->m_type;
165 if (r == ERESTART) /* ERESTART is used internally, so make sure it is.. */
166 r = EIO; /* ..not delivered as a result from a file system. */
167 return(r);
170 /*===========================================================================*
171 * vm_sendrec *
172 *===========================================================================*/
173 int vm_sendrec(message *reqmp)
175 int r;
177 assert(self);
178 assert(reqmp);
180 assert(self->w_sendrec == NULL);
181 self->w_sendrec = reqmp; /* Where to store request and reply */
183 r = sendmsg(NULL, VM_PROC_NR, self);
185 self->w_next = NULL; /* End of list */
187 if (r != OK) return(r);
189 worker_wait(); /* Yield execution until we've received the reply. */
191 assert(self->w_sendrec == NULL);
193 return(reqmp->m_type);
197 /*===========================================================================*
198 * vm_vfs_procctl_handlemem *
199 *===========================================================================*/
200 int vm_vfs_procctl_handlemem(endpoint_t ep,
201 vir_bytes mem, vir_bytes len, int flags)
203 message m;
205 /* main thread can not be suspended */
206 if(!self) return EFAULT;
208 memset(&m, 0, sizeof(m));
210 m.m_type = VM_PROCCTL;
211 m.VMPCTL_WHO = ep;
212 m.VMPCTL_PARAM = VMPPARAM_HANDLEMEM;
213 m.VMPCTL_M1 = mem;
214 m.VMPCTL_LEN = len;
215 m.VMPCTL_FLAGS = flags;
217 return vm_sendrec(&m);
220 /*===========================================================================*
221 * queuemsg *
222 *===========================================================================*/
223 static int queuemsg(struct vmnt *vmp)
225 /* Put request on queue for vmnt */
227 struct worker_thread *queue;
229 if (vmp->m_comm.c_req_queue == NULL) {
230 vmp->m_comm.c_req_queue = self;
231 } else {
232 /* Walk the list ... */
233 queue = vmp->m_comm.c_req_queue;
234 while (queue->w_next != NULL) queue = queue->w_next;
236 /* ... and append this worker */
237 queue->w_next = self;
240 self->w_next = NULL; /* End of list */
241 sending++;
243 return(OK);