1 /* This file contains the heart of the mechanism used to read (and write)
2 * files. Read and write requests are split up into chunks that do not cross
3 * block boundaries. Each chunk is then processed in turn. Reads on special
4 * files are also detected and handled.
6 * The entry points into this file are
7 * do_read: perform the READ system call by calling read_write
8 * do_getdents: read entries from a directory (GETDENTS)
9 * read_write: actually do the work of READ and WRITE
16 #include <minix/com.h>
17 #include <minix/u64.h>
20 #include "scratchpad.h"
24 #include <minix/vfsif.h>
29 /*===========================================================================*
31 *===========================================================================*/
34 return(do_read_write(READING
));
38 /*===========================================================================*
40 *===========================================================================*/
44 struct worker_thread
*org_self
;
46 if (mutex_trylock(&bsf_lock
) == 0)
52 if (mutex_lock(&bsf_lock
) != 0)
53 panic("unable to lock block special file lock");
59 /*===========================================================================*
61 *===========================================================================*/
64 if (mutex_unlock(&bsf_lock
) != 0)
65 panic("failed to unlock block special file lock");
68 /*===========================================================================*
70 *===========================================================================*/
71 int do_read_write(rw_flag
)
72 int rw_flag
; /* READING or WRITING */
74 /* Perform read(fd, buffer, nbytes) or write(fd, buffer, nbytes) call. */
76 tll_access_t locktype
;
79 scratch(fp
).file
.fd_nr
= job_m_in
.fd
;
80 scratch(fp
).io
.io_buffer
= job_m_in
.buffer
;
81 scratch(fp
).io
.io_nbytes
= (size_t) job_m_in
.nbytes
;
83 locktype
= (rw_flag
== READING
) ? VNODE_READ
: VNODE_WRITE
;
84 if ((f
= get_filp(scratch(fp
).file
.fd_nr
, locktype
)) == NULL
)
86 if (((f
->filp_mode
) & (rw_flag
== READING
? R_BIT
: W_BIT
)) == 0) {
88 return(f
->filp_mode
== FILP_CLOSED
? EIO
: EBADF
);
90 if (scratch(fp
).io
.io_nbytes
== 0) {
92 return(0); /* so char special files need not check for 0*/
95 r
= read_write(rw_flag
, f
, scratch(fp
).io
.io_buffer
, scratch(fp
).io
.io_nbytes
,
102 /*===========================================================================*
104 *===========================================================================*/
105 int read_write(int rw_flag
, struct filp
*f
, char *buf
, size_t size
,
108 register struct vnode
*vp
;
109 u64_t position
, res_pos
, new_pos
;
110 unsigned int cum_io
, cum_io_incr
, res_cum_io
;
113 position
= f
->filp_pos
;
114 oflags
= f
->filp_flags
;
119 if (size
> SSIZE_MAX
) return(EINVAL
);
121 if (S_ISFIFO(vp
->v_mode
)) {
122 if (fp
->fp_cum_io_partial
!= 0) {
123 panic("VFS: read_write: fp_cum_io_partial not clear");
125 r
= rw_pipe(rw_flag
, for_e
, f
, buf
, size
);
129 op
= (rw_flag
== READING
? VFS_DEV_READ
: VFS_DEV_WRITE
);
131 if (S_ISCHR(vp
->v_mode
)) { /* Character special files. */
135 if (vp
->v_sdev
== NO_DEV
)
136 panic("VFS: read_write tries to access char dev NO_DEV");
138 suspend_reopen
= (f
->filp_state
& FS_NEEDS_REOPEN
);
139 dev
= (dev_t
) vp
->v_sdev
;
141 r
= dev_io(op
, dev
, for_e
, buf
, position
, size
, oflags
,
145 position
= add64ul(position
, r
);
148 } else if (S_ISBLK(vp
->v_mode
)) { /* Block special files. */
149 if (vp
->v_sdev
== NO_DEV
)
150 panic("VFS: read_write tries to access block dev NO_DEV");
154 r
= req_breadwrite(vp
->v_bfs_e
, for_e
, vp
->v_sdev
, position
, size
,
155 buf
, rw_flag
, &res_pos
, &res_cum_io
);
158 cum_io
+= res_cum_io
;
162 } else { /* Regular files */
163 if (rw_flag
== WRITING
) {
164 /* Check for O_APPEND flag. */
165 if (oflags
& O_APPEND
) position
= cvul64(vp
->v_size
);
169 r
= req_readwrite(vp
->v_fs_e
, vp
->v_inode_nr
, position
, rw_flag
, for_e
,
170 buf
, size
, &new_pos
, &cum_io_incr
);
174 panic("read_write: bad new pos");
177 cum_io
+= cum_io_incr
;
181 /* On write, update file size and access time. */
182 if (rw_flag
== WRITING
) {
183 if (S_ISREG(vp
->v_mode
) || S_ISDIR(vp
->v_mode
)) {
184 if (cmp64ul(position
, vp
->v_size
) > 0) {
185 if (ex64hi(position
) != 0) {
186 panic("read_write: file size too big ");
188 vp
->v_size
= ex64lo(position
);
193 f
->filp_pos
= position
;
195 if (r
== OK
) return(cum_io
);
199 /*===========================================================================*
201 *===========================================================================*/
204 /* Perform the getdents(fd, buf, size) system call. */
207 register struct filp
*rfilp
;
209 scratch(fp
).file
.fd_nr
= job_m_in
.fd
;
210 scratch(fp
).io
.io_buffer
= job_m_in
.buffer
;
211 scratch(fp
).io
.io_nbytes
= (size_t) job_m_in
.nbytes
;
213 /* Is the file descriptor valid? */
214 if ( (rfilp
= get_filp(scratch(fp
).file
.fd_nr
, VNODE_READ
)) == NULL
)
217 if (!(rfilp
->filp_mode
& R_BIT
))
219 else if (!S_ISDIR(rfilp
->filp_vno
->v_mode
))
223 if (ex64hi(rfilp
->filp_pos
) != 0)
224 panic("do_getdents: can't handle large offsets");
226 r
= req_getdents(rfilp
->filp_vno
->v_fs_e
, rfilp
->filp_vno
->v_inode_nr
,
227 rfilp
->filp_pos
, scratch(fp
).io
.io_buffer
,
228 scratch(fp
).io
.io_nbytes
, &new_pos
,0);
230 if (r
> 0) rfilp
->filp_pos
= new_pos
;
238 /*===========================================================================*
240 *===========================================================================*/
241 int rw_pipe(rw_flag
, usr_e
, f
, buf
, req_size
)
242 int rw_flag
; /* READING or WRITING */
248 int r
, oflags
, partial_pipe
= 0;
249 size_t size
, cum_io
, cum_io_incr
;
251 u64_t position
, new_pos
;
253 /* Must make sure we're operating on locked filp and vnode */
254 assert(tll_islocked(&f
->filp_vno
->v_lock
));
255 assert(mutex_trylock(&f
->filp_lock
) == -EDEADLK
);
257 oflags
= f
->filp_flags
;
259 position
= cvu64((rw_flag
== READING
) ? vp
->v_pipe_rd_pos
:
261 /* fp->fp_cum_io_partial is only nonzero when doing partial writes */
262 cum_io
= fp
->fp_cum_io_partial
;
264 r
= pipe_check(vp
, rw_flag
, oflags
, req_size
, position
, 0);
266 if (r
== SUSPEND
) pipe_suspend(f
, buf
, req_size
);
271 if (size
< req_size
) partial_pipe
= 1;
273 /* Truncate read request at size. */
274 if((rw_flag
== READING
) &&
275 cmp64ul(add64ul(position
, size
), vp
->v_size
) > 0) {
276 /* Position always should fit in an off_t (LONG_MAX). */
279 assert(cmp64ul(position
, LONG_MAX
) <= 0);
280 pos32
= cv64ul(position
);
282 assert(pos32
<= LONG_MAX
);
283 size
= vp
->v_size
- pos32
;
286 if (vp
->v_mapfs_e
== 0)
287 panic("unmapped pipe");
289 r
= req_readwrite(vp
->v_mapfs_e
, vp
->v_mapinode_nr
, position
, rw_flag
, usr_e
,
290 buf
, size
, &new_pos
, &cum_io_incr
);
294 panic("rw_pipe: bad new pos");
297 cum_io
+= cum_io_incr
;
299 req_size
-= cum_io_incr
;
302 /* On write, update file size and access time. */
303 if (rw_flag
== WRITING
) {
304 if (cmp64ul(position
, vp
->v_size
) > 0) {
305 if (ex64hi(position
) != 0) {
306 panic("read_write: file size too big for v_size");
308 vp
->v_size
= ex64lo(position
);
311 if (cmp64ul(position
, vp
->v_size
) >= 0) {
312 /* Reset pipe pointers */
314 vp
->v_pipe_rd_pos
= 0;
315 vp
->v_pipe_wr_pos
= 0;
320 if (rw_flag
== READING
)
321 vp
->v_pipe_rd_pos
= cv64ul(position
);
323 vp
->v_pipe_wr_pos
= cv64ul(position
);
327 /* partial write on pipe with */
328 /* O_NONBLOCK, return write count */
329 if (!(oflags
& O_NONBLOCK
)) {
330 /* partial write on pipe with req_size > PIPE_SIZE,
333 fp
->fp_cum_io_partial
= cum_io
;
334 pipe_suspend(f
, buf
, req_size
);
338 fp
->fp_cum_io_partial
= 0;