1 /* This file contains the heart of the mechanism used to read (and write)
2 * files. Read and write requests are split up into chunks that do not cross
3 * block boundaries. Each chunk is then processed in turn. Reads on special
4 * files are also detected and handled.
6 * The entry points into this file are
7 * do_read: perform the READ system call by calling read_write
8 * do_getdents: read entries from a directory (GETDENTS)
9 * read_write: actually do the work of READ and WRITE
16 #include <minix/com.h>
17 #include <minix/u64.h>
20 #include "scratchpad.h"
24 #include <minix/vfsif.h>
29 /*===========================================================================*
31 *===========================================================================*/
34 return(do_read_write(READING
));
38 /*===========================================================================*
40 *===========================================================================*/
44 struct worker_thread
*org_self
;
46 if (mutex_trylock(&bsf_lock
) == 0)
52 if (mutex_lock(&bsf_lock
) != 0)
53 panic("unable to lock block special file lock");
59 /*===========================================================================*
61 *===========================================================================*/
64 if (mutex_unlock(&bsf_lock
) != 0)
65 panic("failed to unlock block special file lock");
68 /*===========================================================================*
70 *===========================================================================*/
71 void check_bsf_lock(void)
73 int r
= mutex_trylock(&bsf_lock
);
76 panic("bsf_lock locked");
78 panic("bsf_lock weird state");
84 /*===========================================================================*
86 *===========================================================================*/
87 int do_read_write(rw_flag
)
88 int rw_flag
; /* READING or WRITING */
90 /* Perform read(fd, buffer, nbytes) or write(fd, buffer, nbytes) call. */
92 tll_access_t locktype
;
95 scratch(fp
).file
.fd_nr
= job_m_in
.fd
;
96 scratch(fp
).io
.io_buffer
= job_m_in
.buffer
;
97 scratch(fp
).io
.io_nbytes
= (size_t) job_m_in
.nbytes
;
99 locktype
= (rw_flag
== READING
) ? VNODE_READ
: VNODE_WRITE
;
100 if ((f
= get_filp(scratch(fp
).file
.fd_nr
, locktype
)) == NULL
)
102 if (((f
->filp_mode
) & (rw_flag
== READING
? R_BIT
: W_BIT
)) == 0) {
104 return(f
->filp_mode
== FILP_CLOSED
? EIO
: EBADF
);
106 if (scratch(fp
).io
.io_nbytes
== 0) {
108 return(0); /* so char special files need not check for 0*/
111 r
= read_write(rw_flag
, f
, scratch(fp
).io
.io_buffer
, scratch(fp
).io
.io_nbytes
,
118 /*===========================================================================*
120 *===========================================================================*/
121 int read_write(int rw_flag
, struct filp
*f
, char *buf
, size_t size
,
124 register struct vnode
*vp
;
125 u64_t position
, res_pos
, new_pos
;
126 unsigned int cum_io
, cum_io_incr
, res_cum_io
;
129 position
= f
->filp_pos
;
130 oflags
= f
->filp_flags
;
135 if (size
> SSIZE_MAX
) return(EINVAL
);
137 if (S_ISFIFO(vp
->v_mode
)) {
138 if (fp
->fp_cum_io_partial
!= 0) {
139 panic("VFS: read_write: fp_cum_io_partial not clear");
141 r
= rw_pipe(rw_flag
, for_e
, f
, buf
, size
);
145 op
= (rw_flag
== READING
? VFS_DEV_READ
: VFS_DEV_WRITE
);
147 if (S_ISCHR(vp
->v_mode
)) { /* Character special files. */
151 if (vp
->v_sdev
== NO_DEV
)
152 panic("VFS: read_write tries to access char dev NO_DEV");
154 suspend_reopen
= (f
->filp_state
& FS_NEEDS_REOPEN
);
155 dev
= (dev_t
) vp
->v_sdev
;
157 r
= dev_io(op
, dev
, for_e
, buf
, position
, size
, oflags
,
161 position
= add64ul(position
, r
);
164 } else if (S_ISBLK(vp
->v_mode
)) { /* Block special files. */
165 if (vp
->v_sdev
== NO_DEV
)
166 panic("VFS: read_write tries to access block dev NO_DEV");
170 r
= req_breadwrite(vp
->v_bfs_e
, for_e
, vp
->v_sdev
, position
, size
,
171 buf
, rw_flag
, &res_pos
, &res_cum_io
);
174 cum_io
+= res_cum_io
;
178 } else { /* Regular files */
179 if (rw_flag
== WRITING
) {
180 /* Check for O_APPEND flag. */
181 if (oflags
& O_APPEND
) position
= cvul64(vp
->v_size
);
185 r
= req_readwrite(vp
->v_fs_e
, vp
->v_inode_nr
, position
, rw_flag
, for_e
,
186 buf
, size
, &new_pos
, &cum_io_incr
);
190 panic("read_write: bad new pos");
193 cum_io
+= cum_io_incr
;
197 /* On write, update file size and access time. */
198 if (rw_flag
== WRITING
) {
199 if (S_ISREG(vp
->v_mode
) || S_ISDIR(vp
->v_mode
)) {
200 if (cmp64ul(position
, vp
->v_size
) > 0) {
201 if (ex64hi(position
) != 0) {
202 panic("read_write: file size too big ");
204 vp
->v_size
= ex64lo(position
);
209 f
->filp_pos
= position
;
211 if (r
== OK
) return(cum_io
);
215 /*===========================================================================*
217 *===========================================================================*/
220 /* Perform the getdents(fd, buf, size) system call. */
223 register struct filp
*rfilp
;
225 scratch(fp
).file
.fd_nr
= job_m_in
.fd
;
226 scratch(fp
).io
.io_buffer
= job_m_in
.buffer
;
227 scratch(fp
).io
.io_nbytes
= (size_t) job_m_in
.nbytes
;
229 /* Is the file descriptor valid? */
230 if ( (rfilp
= get_filp(scratch(fp
).file
.fd_nr
, VNODE_READ
)) == NULL
)
233 if (!(rfilp
->filp_mode
& R_BIT
))
235 else if (!S_ISDIR(rfilp
->filp_vno
->v_mode
))
239 if (ex64hi(rfilp
->filp_pos
) != 0)
240 panic("do_getdents: can't handle large offsets");
242 r
= req_getdents(rfilp
->filp_vno
->v_fs_e
, rfilp
->filp_vno
->v_inode_nr
,
243 rfilp
->filp_pos
, scratch(fp
).io
.io_buffer
,
244 scratch(fp
).io
.io_nbytes
, &new_pos
,0);
246 if (r
> 0) rfilp
->filp_pos
= new_pos
;
254 /*===========================================================================*
256 *===========================================================================*/
257 int rw_pipe(rw_flag
, usr_e
, f
, buf
, req_size
)
258 int rw_flag
; /* READING or WRITING */
264 int r
, oflags
, partial_pipe
= 0;
265 size_t size
, cum_io
, cum_io_incr
;
267 u64_t position
, new_pos
;
269 /* Must make sure we're operating on locked filp and vnode */
270 assert(tll_islocked(&f
->filp_vno
->v_lock
));
271 assert(mutex_trylock(&f
->filp_lock
) == -EDEADLK
);
273 oflags
= f
->filp_flags
;
275 position
= cvu64((rw_flag
== READING
) ? vp
->v_pipe_rd_pos
:
277 /* fp->fp_cum_io_partial is only nonzero when doing partial writes */
278 cum_io
= fp
->fp_cum_io_partial
;
280 r
= pipe_check(vp
, rw_flag
, oflags
, req_size
, position
, 0);
282 if (r
== SUSPEND
) pipe_suspend(f
, buf
, req_size
);
287 if (size
< req_size
) partial_pipe
= 1;
289 /* Truncate read request at size. */
290 if((rw_flag
== READING
) &&
291 cmp64ul(add64ul(position
, size
), vp
->v_size
) > 0) {
292 /* Position always should fit in an off_t (LONG_MAX). */
295 assert(cmp64ul(position
, LONG_MAX
) <= 0);
296 pos32
= cv64ul(position
);
298 assert(pos32
<= LONG_MAX
);
299 size
= vp
->v_size
- pos32
;
302 if (vp
->v_mapfs_e
== 0)
303 panic("unmapped pipe");
305 r
= req_readwrite(vp
->v_mapfs_e
, vp
->v_mapinode_nr
, position
, rw_flag
, usr_e
,
306 buf
, size
, &new_pos
, &cum_io_incr
);
310 panic("rw_pipe: bad new pos");
313 cum_io
+= cum_io_incr
;
315 req_size
-= cum_io_incr
;
318 /* On write, update file size and access time. */
319 if (rw_flag
== WRITING
) {
320 if (cmp64ul(position
, vp
->v_size
) > 0) {
321 if (ex64hi(position
) != 0) {
322 panic("read_write: file size too big for v_size");
324 vp
->v_size
= ex64lo(position
);
327 if (cmp64ul(position
, vp
->v_size
) >= 0) {
328 /* Reset pipe pointers */
330 vp
->v_pipe_rd_pos
= 0;
331 vp
->v_pipe_wr_pos
= 0;
336 if (rw_flag
== READING
)
337 vp
->v_pipe_rd_pos
= cv64ul(position
);
339 vp
->v_pipe_wr_pos
= cv64ul(position
);
343 /* partial write on pipe with */
344 /* O_NONBLOCK, return write count */
345 if (!(oflags
& O_NONBLOCK
)) {
346 /* partial write on pipe with req_size > PIPE_SIZE,
349 fp
->fp_cum_io_partial
= cum_io
;
350 pipe_suspend(f
, buf
, req_size
);
354 fp
->fp_cum_io_partial
= 0;