coverity appeasement
[minix.git] / servers / vfs / read.c
blob4b7715017f9750c974f354d3f10be335c7269696
1 /* This file contains the heart of the mechanism used to read (and write)
2 * files. Read and write requests are split up into chunks that do not cross
3 * block boundaries. Each chunk is then processed in turn. Reads on special
4 * files are also detected and handled.
6 * The entry points into this file are
7 * do_read: perform the READ system call by calling read_write
8 * do_getdents: read entries from a directory (GETDENTS)
9 * read_write: actually do the work of READ and WRITE
13 #include "fs.h"
14 #include <fcntl.h>
15 #include <unistd.h>
16 #include <minix/com.h>
17 #include <minix/u64.h>
18 #include "file.h"
19 #include "fproc.h"
20 #include "scratchpad.h"
21 #include "param.h"
22 #include <dirent.h>
23 #include <assert.h>
24 #include <minix/vfsif.h>
25 #include "vnode.h"
26 #include "vmnt.h"
29 /*===========================================================================*
30 * do_read *
31 *===========================================================================*/
32 int do_read()
34 return(do_read_write(READING));
38 /*===========================================================================*
39 * lock_bsf *
40 *===========================================================================*/
41 void lock_bsf(void)
43 struct fproc *org_fp;
44 struct worker_thread *org_self;
46 if (mutex_trylock(&bsf_lock) == 0)
47 return;
49 org_fp = fp;
50 org_self = self;
52 if (mutex_lock(&bsf_lock) != 0)
53 panic("unable to lock block special file lock");
55 fp = org_fp;
56 self = org_self;
59 /*===========================================================================*
60 * unlock_bsf *
61 *===========================================================================*/
62 void unlock_bsf(void)
64 if (mutex_unlock(&bsf_lock) != 0)
65 panic("failed to unlock block special file lock");
68 /*===========================================================================*
69 * do_read_write *
70 *===========================================================================*/
71 int do_read_write(rw_flag)
72 int rw_flag; /* READING or WRITING */
74 /* Perform read(fd, buffer, nbytes) or write(fd, buffer, nbytes) call. */
75 struct filp *f;
76 tll_access_t locktype;
77 int r;
79 scratch(fp).file.fd_nr = job_m_in.fd;
80 scratch(fp).io.io_buffer = job_m_in.buffer;
81 scratch(fp).io.io_nbytes = (size_t) job_m_in.nbytes;
83 locktype = (rw_flag == READING) ? VNODE_READ : VNODE_WRITE;
84 if ((f = get_filp(scratch(fp).file.fd_nr, locktype)) == NULL)
85 return(err_code);
86 if (((f->filp_mode) & (rw_flag == READING ? R_BIT : W_BIT)) == 0) {
87 unlock_filp(f);
88 return(f->filp_mode == FILP_CLOSED ? EIO : EBADF);
90 if (scratch(fp).io.io_nbytes == 0) {
91 unlock_filp(f);
92 return(0); /* so char special files need not check for 0*/
95 r = read_write(rw_flag, f, scratch(fp).io.io_buffer, scratch(fp).io.io_nbytes,
96 who_e);
98 unlock_filp(f);
99 return(r);
102 /*===========================================================================*
103 * read_write *
104 *===========================================================================*/
105 int read_write(int rw_flag, struct filp *f, char *buf, size_t size,
106 endpoint_t for_e)
108 register struct vnode *vp;
109 u64_t position, res_pos, new_pos;
110 unsigned int cum_io, cum_io_incr, res_cum_io;
111 int op, oflags, r;
113 position = f->filp_pos;
114 oflags = f->filp_flags;
115 vp = f->filp_vno;
116 r = OK;
117 cum_io = 0;
119 if (size > SSIZE_MAX) return(EINVAL);
121 if (S_ISFIFO(vp->v_mode)) {
122 if (fp->fp_cum_io_partial != 0) {
123 panic("VFS: read_write: fp_cum_io_partial not clear");
125 r = rw_pipe(rw_flag, for_e, f, buf, size);
126 return(r);
129 op = (rw_flag == READING ? VFS_DEV_READ : VFS_DEV_WRITE);
131 if (S_ISCHR(vp->v_mode)) { /* Character special files. */
132 dev_t dev;
133 int suspend_reopen;
135 if (vp->v_sdev == NO_DEV)
136 panic("VFS: read_write tries to access char dev NO_DEV");
138 suspend_reopen = (f->filp_state & FS_NEEDS_REOPEN);
139 dev = (dev_t) vp->v_sdev;
141 r = dev_io(op, dev, for_e, buf, position, size, oflags,
142 suspend_reopen);
143 if (r >= 0) {
144 cum_io = r;
145 position = add64ul(position, r);
146 r = OK;
148 } else if (S_ISBLK(vp->v_mode)) { /* Block special files. */
149 if (vp->v_sdev == NO_DEV)
150 panic("VFS: read_write tries to access block dev NO_DEV");
152 lock_bsf();
154 r = req_breadwrite(vp->v_bfs_e, for_e, vp->v_sdev, position, size,
155 buf, rw_flag, &res_pos, &res_cum_io);
156 if (r == OK) {
157 position = res_pos;
158 cum_io += res_cum_io;
161 unlock_bsf();
162 } else { /* Regular files */
163 if (rw_flag == WRITING) {
164 /* Check for O_APPEND flag. */
165 if (oflags & O_APPEND) position = cvul64(vp->v_size);
168 /* Issue request */
169 r = req_readwrite(vp->v_fs_e, vp->v_inode_nr, position, rw_flag, for_e,
170 buf, size, &new_pos, &cum_io_incr);
172 if (r >= 0) {
173 if (ex64hi(new_pos))
174 panic("read_write: bad new pos");
176 position = new_pos;
177 cum_io += cum_io_incr;
181 /* On write, update file size and access time. */
182 if (rw_flag == WRITING) {
183 if (S_ISREG(vp->v_mode) || S_ISDIR(vp->v_mode)) {
184 if (cmp64ul(position, vp->v_size) > 0) {
185 if (ex64hi(position) != 0) {
186 panic("read_write: file size too big ");
188 vp->v_size = ex64lo(position);
193 f->filp_pos = position;
195 if (r == OK) return(cum_io);
196 return(r);
199 /*===========================================================================*
200 * do_getdents *
201 *===========================================================================*/
202 int do_getdents()
204 /* Perform the getdents(fd, buf, size) system call. */
205 int r = OK;
206 u64_t new_pos;
207 register struct filp *rfilp;
209 scratch(fp).file.fd_nr = job_m_in.fd;
210 scratch(fp).io.io_buffer = job_m_in.buffer;
211 scratch(fp).io.io_nbytes = (size_t) job_m_in.nbytes;
213 /* Is the file descriptor valid? */
214 if ( (rfilp = get_filp(scratch(fp).file.fd_nr, VNODE_READ)) == NULL)
215 return(err_code);
217 if (!(rfilp->filp_mode & R_BIT))
218 r = EBADF;
219 else if (!S_ISDIR(rfilp->filp_vno->v_mode))
220 r = EBADF;
222 if (r == OK) {
223 if (ex64hi(rfilp->filp_pos) != 0)
224 panic("do_getdents: can't handle large offsets");
226 r = req_getdents(rfilp->filp_vno->v_fs_e, rfilp->filp_vno->v_inode_nr,
227 rfilp->filp_pos, scratch(fp).io.io_buffer,
228 scratch(fp).io.io_nbytes, &new_pos,0);
230 if (r > 0) rfilp->filp_pos = new_pos;
233 unlock_filp(rfilp);
234 return(r);
238 /*===========================================================================*
239 * rw_pipe *
240 *===========================================================================*/
241 int rw_pipe(rw_flag, usr_e, f, buf, req_size)
242 int rw_flag; /* READING or WRITING */
243 endpoint_t usr_e;
244 struct filp *f;
245 char *buf;
246 size_t req_size;
248 int r, oflags, partial_pipe = 0;
249 size_t size, cum_io, cum_io_incr;
250 struct vnode *vp;
251 u64_t position, new_pos;
253 /* Must make sure we're operating on locked filp and vnode */
254 assert(tll_islocked(&f->filp_vno->v_lock));
255 assert(mutex_trylock(&f->filp_lock) == -EDEADLK);
257 oflags = f->filp_flags;
258 vp = f->filp_vno;
259 position = cvu64((rw_flag == READING) ? vp->v_pipe_rd_pos :
260 vp->v_pipe_wr_pos);
261 /* fp->fp_cum_io_partial is only nonzero when doing partial writes */
262 cum_io = fp->fp_cum_io_partial;
264 r = pipe_check(vp, rw_flag, oflags, req_size, position, 0);
265 if (r <= 0) {
266 if (r == SUSPEND) pipe_suspend(f, buf, req_size);
267 return(r);
270 size = r;
271 if (size < req_size) partial_pipe = 1;
273 /* Truncate read request at size. */
274 if((rw_flag == READING) &&
275 cmp64ul(add64ul(position, size), vp->v_size) > 0) {
276 /* Position always should fit in an off_t (LONG_MAX). */
277 off_t pos32;
279 assert(cmp64ul(position, LONG_MAX) <= 0);
280 pos32 = cv64ul(position);
281 assert(pos32 >= 0);
282 assert(pos32 <= LONG_MAX);
283 size = vp->v_size - pos32;
286 if (vp->v_mapfs_e == 0)
287 panic("unmapped pipe");
289 r = req_readwrite(vp->v_mapfs_e, vp->v_mapinode_nr, position, rw_flag, usr_e,
290 buf, size, &new_pos, &cum_io_incr);
292 if (r >= 0) {
293 if (ex64hi(new_pos))
294 panic("rw_pipe: bad new pos");
296 position = new_pos;
297 cum_io += cum_io_incr;
298 buf += cum_io_incr;
299 req_size -= cum_io_incr;
302 /* On write, update file size and access time. */
303 if (rw_flag == WRITING) {
304 if (cmp64ul(position, vp->v_size) > 0) {
305 if (ex64hi(position) != 0) {
306 panic("read_write: file size too big for v_size");
308 vp->v_size = ex64lo(position);
310 } else {
311 if (cmp64ul(position, vp->v_size) >= 0) {
312 /* Reset pipe pointers */
313 vp->v_size = 0;
314 vp->v_pipe_rd_pos= 0;
315 vp->v_pipe_wr_pos= 0;
316 position = cvu64(0);
320 if (rw_flag == READING)
321 vp->v_pipe_rd_pos= cv64ul(position);
322 else
323 vp->v_pipe_wr_pos= cv64ul(position);
325 if (r == OK) {
326 if (partial_pipe) {
327 /* partial write on pipe with */
328 /* O_NONBLOCK, return write count */
329 if (!(oflags & O_NONBLOCK)) {
330 /* partial write on pipe with req_size > PIPE_SIZE,
331 * non-atomic
333 fp->fp_cum_io_partial = cum_io;
334 pipe_suspend(f, buf, req_size);
335 return(SUSPEND);
338 fp->fp_cum_io_partial = 0;
339 return(cum_io);
342 return(r);