libexec exec fix
[minix.git] / servers / vfs / read.c
blobdb6a65558a290d495002aa36649f3fe476f52bd5
1 /* This file contains the heart of the mechanism used to read (and write)
2 * files. Read and write requests are split up into chunks that do not cross
3 * block boundaries. Each chunk is then processed in turn. Reads on special
4 * files are also detected and handled.
6 * The entry points into this file are
7 * do_read: perform the READ system call by calling read_write
8 * do_getdents: read entries from a directory (GETDENTS)
9 * read_write: actually do the work of READ and WRITE
13 #include "fs.h"
14 #include <fcntl.h>
15 #include <unistd.h>
16 #include <minix/com.h>
17 #include <minix/u64.h>
18 #include "file.h"
19 #include "fproc.h"
20 #include "scratchpad.h"
21 #include "param.h"
22 #include <dirent.h>
23 #include <assert.h>
24 #include <minix/vfsif.h>
25 #include "vnode.h"
26 #include "vmnt.h"
29 /*===========================================================================*
30 * do_read *
31 *===========================================================================*/
32 int do_read()
34 return(do_read_write(READING));
38 /*===========================================================================*
39 * lock_bsf *
40 *===========================================================================*/
41 void lock_bsf(void)
43 struct fproc *org_fp;
44 struct worker_thread *org_self;
46 if (mutex_trylock(&bsf_lock) == 0)
47 return;
49 org_fp = fp;
50 org_self = self;
52 if (mutex_lock(&bsf_lock) != 0)
53 panic("unable to lock block special file lock");
55 fp = org_fp;
56 self = org_self;
59 /*===========================================================================*
60 * unlock_bsf *
61 *===========================================================================*/
62 void unlock_bsf(void)
64 if (mutex_unlock(&bsf_lock) != 0)
65 panic("failed to unlock block special file lock");
68 /*===========================================================================*
69 * check_bsf *
70 *===========================================================================*/
71 void check_bsf_lock(void)
73 int r = mutex_trylock(&bsf_lock);
75 if (r == -EBUSY)
76 panic("bsf_lock locked");
77 else if (r != 0)
78 panic("bsf_lock weird state");
80 /* r == 0 */
81 unlock_bsf();
84 /*===========================================================================*
85 * do_read_write *
86 *===========================================================================*/
87 int do_read_write(rw_flag)
88 int rw_flag; /* READING or WRITING */
90 /* Perform read(fd, buffer, nbytes) or write(fd, buffer, nbytes) call. */
91 struct filp *f;
92 tll_access_t locktype;
93 int r;
95 scratch(fp).file.fd_nr = job_m_in.fd;
96 scratch(fp).io.io_buffer = job_m_in.buffer;
97 scratch(fp).io.io_nbytes = (size_t) job_m_in.nbytes;
99 locktype = (rw_flag == READING) ? VNODE_READ : VNODE_WRITE;
100 if ((f = get_filp(scratch(fp).file.fd_nr, locktype)) == NULL)
101 return(err_code);
102 if (((f->filp_mode) & (rw_flag == READING ? R_BIT : W_BIT)) == 0) {
103 unlock_filp(f);
104 return(f->filp_mode == FILP_CLOSED ? EIO : EBADF);
106 if (scratch(fp).io.io_nbytes == 0) {
107 unlock_filp(f);
108 return(0); /* so char special files need not check for 0*/
111 r = read_write(rw_flag, f, scratch(fp).io.io_buffer, scratch(fp).io.io_nbytes,
112 who_e);
114 unlock_filp(f);
115 return(r);
118 /*===========================================================================*
119 * read_write *
120 *===========================================================================*/
121 int read_write(int rw_flag, struct filp *f, char *buf, size_t size,
122 endpoint_t for_e)
124 register struct vnode *vp;
125 u64_t position, res_pos, new_pos;
126 unsigned int cum_io, cum_io_incr, res_cum_io;
127 int op, oflags, r;
129 position = f->filp_pos;
130 oflags = f->filp_flags;
131 vp = f->filp_vno;
132 r = OK;
133 cum_io = 0;
135 if (size > SSIZE_MAX) return(EINVAL);
137 if (S_ISFIFO(vp->v_mode)) {
138 if (fp->fp_cum_io_partial != 0) {
139 panic("VFS: read_write: fp_cum_io_partial not clear");
141 r = rw_pipe(rw_flag, for_e, f, buf, size);
142 return(r);
145 op = (rw_flag == READING ? VFS_DEV_READ : VFS_DEV_WRITE);
147 if (S_ISCHR(vp->v_mode)) { /* Character special files. */
148 dev_t dev;
149 int suspend_reopen;
151 if (vp->v_sdev == NO_DEV)
152 panic("VFS: read_write tries to access char dev NO_DEV");
154 suspend_reopen = (f->filp_state & FS_NEEDS_REOPEN);
155 dev = (dev_t) vp->v_sdev;
157 r = dev_io(op, dev, for_e, buf, position, size, oflags,
158 suspend_reopen);
159 if (r >= 0) {
160 cum_io = r;
161 position = add64ul(position, r);
162 r = OK;
164 } else if (S_ISBLK(vp->v_mode)) { /* Block special files. */
165 if (vp->v_sdev == NO_DEV)
166 panic("VFS: read_write tries to access block dev NO_DEV");
168 lock_bsf();
170 r = req_breadwrite(vp->v_bfs_e, for_e, vp->v_sdev, position, size,
171 buf, rw_flag, &res_pos, &res_cum_io);
172 if (r == OK) {
173 position = res_pos;
174 cum_io += res_cum_io;
177 unlock_bsf();
178 } else { /* Regular files */
179 if (rw_flag == WRITING) {
180 /* Check for O_APPEND flag. */
181 if (oflags & O_APPEND) position = cvul64(vp->v_size);
184 /* Issue request */
185 r = req_readwrite(vp->v_fs_e, vp->v_inode_nr, position, rw_flag, for_e,
186 buf, size, &new_pos, &cum_io_incr);
188 if (r >= 0) {
189 if (ex64hi(new_pos))
190 panic("read_write: bad new pos");
192 position = new_pos;
193 cum_io += cum_io_incr;
197 /* On write, update file size and access time. */
198 if (rw_flag == WRITING) {
199 if (S_ISREG(vp->v_mode) || S_ISDIR(vp->v_mode)) {
200 if (cmp64ul(position, vp->v_size) > 0) {
201 if (ex64hi(position) != 0) {
202 panic("read_write: file size too big ");
204 vp->v_size = ex64lo(position);
209 f->filp_pos = position;
211 if (r == OK) return(cum_io);
212 return(r);
215 /*===========================================================================*
216 * do_getdents *
217 *===========================================================================*/
218 int do_getdents()
220 /* Perform the getdents(fd, buf, size) system call. */
221 int r = OK;
222 u64_t new_pos;
223 register struct filp *rfilp;
225 scratch(fp).file.fd_nr = job_m_in.fd;
226 scratch(fp).io.io_buffer = job_m_in.buffer;
227 scratch(fp).io.io_nbytes = (size_t) job_m_in.nbytes;
229 /* Is the file descriptor valid? */
230 if ( (rfilp = get_filp(scratch(fp).file.fd_nr, VNODE_READ)) == NULL)
231 return(err_code);
233 if (!(rfilp->filp_mode & R_BIT))
234 r = EBADF;
235 else if (!S_ISDIR(rfilp->filp_vno->v_mode))
236 r = EBADF;
238 if (r == OK) {
239 if (ex64hi(rfilp->filp_pos) != 0)
240 panic("do_getdents: can't handle large offsets");
242 r = req_getdents(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr,
243 rfilp->filp_pos, scratch(fp).io.io_buffer,
244 scratch(fp).io.io_nbytes, &new_pos,0);
246 if (r > 0) rfilp->filp_pos = new_pos;
249 unlock_filp(rfilp);
250 return(r);
254 /*===========================================================================*
255 * rw_pipe *
256 *===========================================================================*/
257 int rw_pipe(rw_flag, usr_e, f, buf, req_size)
258 int rw_flag; /* READING or WRITING */
259 endpoint_t usr_e;
260 struct filp *f;
261 char *buf;
262 size_t req_size;
264 int r, oflags, partial_pipe = 0;
265 size_t size, cum_io, cum_io_incr;
266 struct vnode *vp;
267 u64_t position, new_pos;
269 /* Must make sure we're operating on locked filp and vnode */
270 assert(tll_islocked(&f->filp_vno->v_lock));
271 assert(mutex_trylock(&f->filp_lock) == -EDEADLK);
273 oflags = f->filp_flags;
274 vp = f->filp_vno;
275 position = cvu64((rw_flag == READING) ? vp->v_pipe_rd_pos :
276 vp->v_pipe_wr_pos);
277 /* fp->fp_cum_io_partial is only nonzero when doing partial writes */
278 cum_io = fp->fp_cum_io_partial;
280 r = pipe_check(vp, rw_flag, oflags, req_size, position, 0);
281 if (r <= 0) {
282 if (r == SUSPEND) pipe_suspend(f, buf, req_size);
283 return(r);
286 size = r;
287 if (size < req_size) partial_pipe = 1;
289 /* Truncate read request at size. */
290 if((rw_flag == READING) &&
291 cmp64ul(add64ul(position, size), vp->v_size) > 0) {
292 /* Position always should fit in an off_t (LONG_MAX). */
293 off_t pos32;
295 assert(cmp64ul(position, LONG_MAX) <= 0);
296 pos32 = cv64ul(position);
297 assert(pos32 >= 0);
298 assert(pos32 <= LONG_MAX);
299 size = vp->v_size - pos32;
302 if (vp->v_mapfs_e == 0)
303 panic("unmapped pipe");
305 r = req_readwrite(vp->v_mapfs_e, vp->v_mapinode_nr, position, rw_flag, usr_e,
306 buf, size, &new_pos, &cum_io_incr);
308 if (r >= 0) {
309 if (ex64hi(new_pos))
310 panic("rw_pipe: bad new pos");
312 position = new_pos;
313 cum_io += cum_io_incr;
314 buf += cum_io_incr;
315 req_size -= cum_io_incr;
318 /* On write, update file size and access time. */
319 if (rw_flag == WRITING) {
320 if (cmp64ul(position, vp->v_size) > 0) {
321 if (ex64hi(position) != 0) {
322 panic("read_write: file size too big for v_size");
324 vp->v_size = ex64lo(position);
326 } else {
327 if (cmp64ul(position, vp->v_size) >= 0) {
328 /* Reset pipe pointers */
329 vp->v_size = 0;
330 vp->v_pipe_rd_pos= 0;
331 vp->v_pipe_wr_pos= 0;
332 position = cvu64(0);
336 if (rw_flag == READING)
337 vp->v_pipe_rd_pos= cv64ul(position);
338 else
339 vp->v_pipe_wr_pos= cv64ul(position);
341 if (r == OK) {
342 if (partial_pipe) {
343 /* partial write on pipe with */
344 /* O_NONBLOCK, return write count */
345 if (!(oflags & O_NONBLOCK)) {
346 /* partial write on pipe with req_size > PIPE_SIZE,
347 * non-atomic
349 fp->fp_cum_io_partial = cum_io;
350 pipe_suspend(f, buf, req_size);
351 return(SUSPEND);
354 fp->fp_cum_io_partial = 0;
355 return(cum_io);
358 return(r);