retire 64-bit conversion functions
[minix3.git] / servers / mfs / read.c
bloba0414668f4091973aa4cfd99983b7a2bebca8757
1 #include "fs.h"
2 #include <stddef.h>
3 #include <string.h>
4 #include <stdlib.h>
5 #include <minix/com.h>
6 #include <minix/u64.h>
7 #include "buf.h"
8 #include "inode.h"
9 #include "super.h"
10 #include <minix/vfsif.h>
11 #include <sys/param.h>
12 #include <assert.h>
15 static struct buf *rahead(struct inode *rip, block_t baseblock, u64_t
16 position, unsigned bytes_ahead);
17 static int rw_chunk(struct inode *rip, u64_t position, unsigned off,
18 size_t chunk, unsigned left, int rw_flag, cp_grant_id_t gid, unsigned
19 buf_off, unsigned int block_size, int *completed);
22 /*===========================================================================*
23 * fs_readwrite *
24 *===========================================================================*/
25 int fs_readwrite(void)
27 int r, rw_flag, block_spec;
28 int regular;
29 cp_grant_id_t gid;
30 off_t position, f_size, bytes_left;
31 unsigned int off, cum_io, block_size, chunk;
32 mode_t mode_word;
33 int completed;
34 struct inode *rip;
35 size_t nrbytes;
37 r = OK;
39 /* Find the inode referred */
40 if ((rip = find_inode(fs_dev, (ino_t) fs_m_in.REQ_INODE_NR)) == NULL)
41 return(EINVAL);
43 mode_word = rip->i_mode & I_TYPE;
44 regular = (mode_word == I_REGULAR || mode_word == I_NAMED_PIPE);
45 block_spec = (mode_word == I_BLOCK_SPECIAL ? 1 : 0);
47 /* Determine blocksize */
48 if (block_spec) {
49 block_size = get_block_size( (dev_t) rip->i_zone[0]);
50 f_size = MAX_FILE_POS;
51 } else {
52 block_size = rip->i_sp->s_block_size;
53 f_size = rip->i_size;
56 /* Get the values from the request message */
57 switch(fs_m_in.m_type) {
58 case REQ_READ: rw_flag = READING; break;
59 case REQ_WRITE: rw_flag = WRITING; break;
60 case REQ_PEEK: rw_flag = PEEKING; break;
61 default: panic("odd request");
63 gid = (cp_grant_id_t) fs_m_in.REQ_GRANT;
64 position = (off_t) fs_m_in.REQ_SEEK_POS_LO;
65 nrbytes = (size_t) fs_m_in.REQ_NBYTES;
67 lmfs_reset_rdwt_err();
69 /* If this is file i/o, check we can write */
70 if (rw_flag == WRITING && !block_spec) {
71 if(rip->i_sp->s_rd_only)
72 return EROFS;
74 /* Check in advance to see if file will grow too big. */
75 if (position > (off_t) (rip->i_sp->s_max_size - nrbytes))
76 return(EFBIG);
78 /* Clear the zone containing present EOF if hole about
79 * to be created. This is necessary because all unwritten
80 * blocks prior to the EOF must read as zeros.
82 if(position > f_size) clear_zone(rip, f_size, 0);
85 /* If this is block i/o, check we can write */
86 if(block_spec && rw_flag == WRITING &&
87 (dev_t) rip->i_zone[0] == superblock.s_dev && superblock.s_rd_only)
88 return EROFS;
90 cum_io = 0;
91 /* Split the transfer into chunks that don't span two blocks. */
92 while (nrbytes > 0) {
93 off = ((unsigned int) position) % block_size; /* offset in blk*/
94 chunk = min(nrbytes, block_size - off);
96 if (rw_flag == READING) {
97 bytes_left = f_size - position;
98 if (position >= f_size) break; /* we are beyond EOF */
99 if (chunk > (unsigned int) bytes_left) chunk = bytes_left;
102 /* Read or write 'chunk' bytes. */
103 r = rw_chunk(rip, ((u64_t)((unsigned long)position)), off, chunk,
104 nrbytes, rw_flag, gid, cum_io, block_size, &completed);
106 if (r != OK) break; /* EOF reached */
107 if (lmfs_rdwt_err() < 0) break;
109 /* Update counters and pointers. */
110 nrbytes -= chunk; /* bytes yet to be read */
111 cum_io += chunk; /* bytes read so far */
112 position += (off_t) chunk; /* position within the file */
115 fs_m_out.RES_SEEK_POS_LO = position; /* It might change later and the VFS
116 has to know this value */
118 /* On write, update file size and access time. */
119 if (rw_flag == WRITING) {
120 if (regular || mode_word == I_DIRECTORY) {
121 if (position > f_size) rip->i_size = position;
125 rip->i_seek = NO_SEEK;
127 if (lmfs_rdwt_err() != OK) r = lmfs_rdwt_err(); /* check for disk error */
128 if (lmfs_rdwt_err() == END_OF_FILE) r = OK;
130 /* even on a ROFS, writing to a device node on it is fine,
131 * just don't update the inode stats for it. And dito for reading.
133 if (r == OK && !rip->i_sp->s_rd_only) {
134 if (rw_flag == READING) rip->i_update |= ATIME;
135 if (rw_flag == WRITING) rip->i_update |= CTIME | MTIME;
136 IN_MARKDIRTY(rip); /* inode is thus now dirty */
139 fs_m_out.RES_NBYTES = cum_io;
141 return(r);
145 /*===========================================================================*
146 * fs_breadwrite *
147 *===========================================================================*/
148 int fs_breadwrite(void)
150 int r, rw_flag, completed;
151 cp_grant_id_t gid;
152 u64_t position;
153 unsigned int off, cum_io, chunk, block_size;
154 size_t nrbytes;
155 dev_t target_dev;
157 /* Pseudo inode for rw_chunk */
158 struct inode rip;
160 r = OK;
162 target_dev = (dev_t) fs_m_in.REQ_DEV2;
164 /* Get the values from the request message */
165 rw_flag = (fs_m_in.m_type == REQ_BREAD ? READING : WRITING);
166 gid = (cp_grant_id_t) fs_m_in.REQ_GRANT;
167 position = make64((unsigned long) fs_m_in.REQ_SEEK_POS_LO,
168 (unsigned long) fs_m_in.REQ_SEEK_POS_HI);
169 nrbytes = (size_t) fs_m_in.REQ_NBYTES;
171 block_size = get_block_size(target_dev);
173 /* Don't block-write to a RO-mounted filesystem. */
174 if(superblock.s_dev == target_dev && superblock.s_rd_only)
175 return EROFS;
177 rip.i_zone[0] = (zone_t) target_dev;
178 rip.i_mode = I_BLOCK_SPECIAL;
179 rip.i_size = 0;
181 lmfs_reset_rdwt_err();
183 cum_io = 0;
184 /* Split the transfer into chunks that don't span two blocks. */
185 while (nrbytes > 0) {
186 off = rem64u(position, block_size); /* offset in blk*/
187 chunk = min(nrbytes, block_size - off);
189 /* Read or write 'chunk' bytes. */
190 r = rw_chunk(&rip, position, off, chunk, nrbytes, rw_flag, gid,
191 cum_io, block_size, &completed);
193 if (r != OK) break; /* EOF reached */
194 if (lmfs_rdwt_err() < 0) break;
196 /* Update counters and pointers. */
197 nrbytes -= chunk; /* bytes yet to be read */
198 cum_io += chunk; /* bytes read so far */
199 position = add64ul(position, chunk); /* position within the file */
202 fs_m_out.RES_SEEK_POS_LO = ex64lo(position);
203 fs_m_out.RES_SEEK_POS_HI = ex64hi(position);
205 if (lmfs_rdwt_err() != OK) r = lmfs_rdwt_err(); /* check for disk error */
206 if (lmfs_rdwt_err() == END_OF_FILE) r = OK;
208 fs_m_out.RES_NBYTES = cum_io;
210 return(r);
214 /*===========================================================================*
215 * rw_chunk *
216 *===========================================================================*/
217 static int rw_chunk(rip, position, off, chunk, left, rw_flag, gid,
218 buf_off, block_size, completed)
219 register struct inode *rip; /* pointer to inode for file to be rd/wr */
220 u64_t position; /* position within file to read or write */
221 unsigned off; /* off within the current block */
222 unsigned int chunk; /* number of bytes to read or write */
223 unsigned left; /* max number of bytes wanted after position */
224 int rw_flag; /* READING, WRITING or PEEKING */
225 cp_grant_id_t gid; /* grant */
226 unsigned buf_off; /* offset in grant */
227 unsigned int block_size; /* block size of FS operating on */
228 int *completed; /* number of bytes copied */
230 /* Read or write (part of) a block. */
232 register struct buf *bp = NULL;
233 register int r = OK;
234 int n, block_spec;
235 block_t b;
236 dev_t dev;
237 ino_t ino = VMC_NO_INODE;
238 u64_t ino_off = rounddown(position, block_size);
240 /* rw_flag:
241 * READING: read from FS, copy to user
242 * WRITING: copy from user, write to FS
243 * PEEKING: try to get all the blocks into the cache, no copying
246 *completed = 0;
248 block_spec = (rip->i_mode & I_TYPE) == I_BLOCK_SPECIAL;
250 if (block_spec) {
251 b = div64u(position, block_size);
252 dev = (dev_t) rip->i_zone[0];
253 } else {
254 if (ex64hi(position) != 0)
255 panic("rw_chunk: position too high");
256 b = read_map(rip, (off_t) ex64lo(position), 0);
257 dev = rip->i_dev;
258 ino = rip->i_num;
259 assert(ino != VMC_NO_INODE);
262 if (!block_spec && b == NO_BLOCK) {
263 if (rw_flag == READING) {
264 /* Reading from a nonexistent block. Must read as all zeros.*/
265 r = sys_safememset(VFS_PROC_NR, gid, (vir_bytes) buf_off,
266 0, (size_t) chunk);
267 if(r != OK) {
268 printf("MFS: sys_safememset failed\n");
270 return r;
271 } else {
272 /* Writing to or peeking a nonexistent block.
273 * Create and enter in inode.
275 if ((bp = new_block(rip, (off_t) ex64lo(position))) == NULL)
276 return(err_code);
278 } else if (rw_flag == READING || rw_flag == PEEKING) {
279 /* Read and read ahead if convenient. */
280 bp = rahead(rip, b, position, left);
281 } else {
282 /* Normally an existing block to be partially overwritten is first read
283 * in. However, a full block need not be read in. If it is already in
284 * the cache, acquire it, otherwise just acquire a free buffer.
286 n = (chunk == block_size ? NO_READ : NORMAL);
287 if (!block_spec && off == 0 && (off_t) ex64lo(position) >= rip->i_size)
288 n = NO_READ;
289 if(block_spec) {
290 assert(ino == VMC_NO_INODE);
291 bp = get_block(dev, b, n);
292 } else {
293 assert(ino != VMC_NO_INODE);
294 assert(!(ino_off % block_size));
295 bp = lmfs_get_block_ino(dev, b, n, ino, ino_off);
299 /* In all cases, bp now points to a valid buffer. */
300 assert(bp);
302 if (rw_flag == WRITING && chunk != block_size && !block_spec &&
303 (off_t) ex64lo(position) >= rip->i_size && off == 0) {
304 zero_block(bp);
307 if (rw_flag == READING) {
308 /* Copy a chunk from the block buffer to user space. */
309 r = sys_safecopyto(VFS_PROC_NR, gid, (vir_bytes) buf_off,
310 (vir_bytes) (b_data(bp)+off), (size_t) chunk);
311 } else if(rw_flag == WRITING) {
312 /* Copy a chunk from user space to the block buffer. */
313 r = sys_safecopyfrom(VFS_PROC_NR, gid, (vir_bytes) buf_off,
314 (vir_bytes) (b_data(bp)+off), (size_t) chunk);
315 MARKDIRTY(bp);
318 n = (off + chunk == block_size ? FULL_DATA_BLOCK : PARTIAL_DATA_BLOCK);
319 put_block(bp, n);
321 return(r);
325 /*===========================================================================*
326 * read_map *
327 *===========================================================================*/
328 block_t read_map(rip, position, opportunistic)
329 register struct inode *rip; /* ptr to inode to map from */
330 off_t position; /* position in file whose blk wanted */
331 int opportunistic; /* if nonzero, only use cache for metadata */
333 /* Given an inode and a position within the corresponding file, locate the
334 * block (not zone) number in which that position is to be found and return it.
337 struct buf *bp;
338 zone_t z;
339 int scale, boff, index, zind;
340 unsigned int dzones, nr_indirects;
341 block_t b;
342 unsigned long excess, zone, block_pos;
343 int iomode = NORMAL;
345 if(opportunistic) iomode = PREFETCH;
347 scale = rip->i_sp->s_log_zone_size; /* for block-zone conversion */
348 block_pos = position/rip->i_sp->s_block_size; /* relative blk # in file */
349 zone = block_pos >> scale; /* position's zone */
350 boff = (int) (block_pos - (zone << scale) ); /* relative blk # within zone */
351 dzones = rip->i_ndzones;
352 nr_indirects = rip->i_nindirs;
354 /* Is 'position' to be found in the inode itself? */
355 if (zone < dzones) {
356 zind = (int) zone; /* index should be an int */
357 z = rip->i_zone[zind];
358 if (z == NO_ZONE) return(NO_BLOCK);
359 b = (block_t) ((z << scale) + boff);
360 return(b);
363 /* It is not in the inode, so it must be single or double indirect. */
364 excess = zone - dzones; /* first Vx_NR_DZONES don't count */
366 if (excess < nr_indirects) {
367 /* 'position' can be located via the single indirect block. */
368 z = rip->i_zone[dzones];
369 } else {
370 /* 'position' can be located via the double indirect block. */
371 if ( (z = rip->i_zone[dzones+1]) == NO_ZONE) return(NO_BLOCK);
372 excess -= nr_indirects; /* single indir doesn't count*/
373 b = (block_t) z << scale;
374 ASSERT(rip->i_dev != NO_DEV);
375 index = (int) (excess/nr_indirects);
376 if ((unsigned int) index > rip->i_nindirs)
377 return(NO_BLOCK); /* Can't go beyond double indirects */
378 bp = get_block(rip->i_dev, b, iomode); /* get double indirect block */
379 if(opportunistic && lmfs_dev(bp) == NO_DEV) {
380 put_block(bp, INDIRECT_BLOCK);
381 return NO_BLOCK;
383 ASSERT(lmfs_dev(bp) != NO_DEV);
384 ASSERT(lmfs_dev(bp) == rip->i_dev);
385 z = rd_indir(bp, index); /* z= zone for single*/
386 put_block(bp, INDIRECT_BLOCK); /* release double ind block */
387 excess = excess % nr_indirects; /* index into single ind blk */
390 /* 'z' is zone num for single indirect block; 'excess' is index into it. */
391 if (z == NO_ZONE) return(NO_BLOCK);
392 b = (block_t) z << scale; /* b is blk # for single ind */
393 bp = get_block(rip->i_dev, b, iomode); /* get single indirect block */
394 if(opportunistic && lmfs_dev(bp) == NO_DEV) {
395 put_block(bp, INDIRECT_BLOCK);
396 return NO_BLOCK;
398 z = rd_indir(bp, (int) excess); /* get block pointed to */
399 put_block(bp, INDIRECT_BLOCK); /* release single indir blk */
400 if (z == NO_ZONE) return(NO_BLOCK);
401 b = (block_t) ((z << scale) + boff);
402 return(b);
405 struct buf *get_block_map(register struct inode *rip, u64_t position)
407 block_t b = read_map(rip, position, 0); /* get block number */
408 int block_size = get_block_size(rip->i_dev);
409 if(b == NO_BLOCK)
410 return NULL;
411 position = rounddown(position, block_size);
412 assert(rip->i_num != VMC_NO_INODE);
413 return lmfs_get_block_ino(rip->i_dev, b, NORMAL, rip->i_num, position);
416 /*===========================================================================*
417 * rd_indir *
418 *===========================================================================*/
419 zone_t rd_indir(bp, index)
420 struct buf *bp; /* pointer to indirect block */
421 int index; /* index into *bp */
423 /* Given a pointer to an indirect block, read one entry. The reason for
424 * making a separate routine out of this is that there are four cases:
425 * V1 (IBM and 68000), and V2 (IBM and 68000).
428 struct super_block *sp;
429 zone_t zone; /* V2 zones are longs (shorts in V1) */
431 if(bp == NULL)
432 panic("rd_indir() on NULL");
434 sp = get_super(lmfs_dev(bp)); /* need super block to find file sys type */
436 /* read a zone from an indirect block */
437 if (sp->s_version == V1)
438 zone = (zone_t) conv2(sp->s_native, (int) b_v1_ind(bp)[index]);
439 else
440 zone = (zone_t) conv4(sp->s_native, (long) b_v2_ind(bp)[index]);
442 if (zone != NO_ZONE &&
443 (zone < (zone_t) sp->s_firstdatazone || zone >= sp->s_zones)) {
444 printf("Illegal zone number %ld in indirect block, index %d\n",
445 (long) zone, index);
446 panic("check file system");
449 return(zone);
452 /*===========================================================================*
453 * rahead *
454 *===========================================================================*/
455 static struct buf *rahead(rip, baseblock, position, bytes_ahead)
456 register struct inode *rip; /* pointer to inode for file to be read */
457 block_t baseblock; /* block at current position */
458 u64_t position; /* position within file */
459 unsigned bytes_ahead; /* bytes beyond position for immediate use */
461 /* Fetch a block from the cache or the device. If a physical read is
462 * required, prefetch as many more blocks as convenient into the cache.
463 * This usually covers bytes_ahead and is at least BLOCKS_MINIMUM.
464 * The device driver may decide it knows better and stop reading at a
465 * cylinder boundary (or after an error). Rw_scattered() puts an optional
466 * flag on all reads to allow this.
468 /* Minimum number of blocks to prefetch. */
469 int nr_bufs = lmfs_nr_bufs();
470 # define BLOCKS_MINIMUM (nr_bufs < 50 ? 18 : 32)
471 int block_spec, scale, read_q_size;
472 unsigned int blocks_ahead, fragment, block_size;
473 block_t block, blocks_left;
474 off_t ind1_pos;
475 dev_t dev;
476 struct buf *bp;
477 static unsigned int readqsize = 0;
478 static struct buf **read_q;
479 u64_t position_running;
481 if(readqsize != nr_bufs) {
482 if(readqsize > 0) {
483 assert(read_q != NULL);
484 free(read_q);
486 if(!(read_q = malloc(sizeof(read_q[0])*nr_bufs)))
487 panic("couldn't allocate read_q");
488 readqsize = nr_bufs;
491 block_spec = (rip->i_mode & I_TYPE) == I_BLOCK_SPECIAL;
492 if (block_spec)
493 dev = (dev_t) rip->i_zone[0];
494 else
495 dev = rip->i_dev;
497 assert(dev != NO_DEV);
499 block_size = get_block_size(dev);
501 block = baseblock;
503 fragment = position % block_size;
504 position -= fragment;
505 position_running = position;
506 bytes_ahead += fragment;
507 blocks_ahead = (bytes_ahead + block_size - 1) / block_size;
509 if(block_spec)
510 bp = get_block(dev, block, PREFETCH);
511 else
512 bp = lmfs_get_block_ino(dev, block, PREFETCH, rip->i_num, position);
514 assert(bp != NULL);
515 if (lmfs_dev(bp) != NO_DEV) return(bp);
517 /* The best guess for the number of blocks to prefetch: A lot.
518 * It is impossible to tell what the device looks like, so we don't even
519 * try to guess the geometry, but leave it to the driver.
521 * The floppy driver can read a full track with no rotational delay, and it
522 * avoids reading partial tracks if it can, so handing it enough buffers to
523 * read two tracks is perfect. (Two, because some diskette types have
524 * an odd number of sectors per track, so a block may span tracks.)
526 * The disk drivers don't try to be smart. With todays disks it is
527 * impossible to tell what the real geometry looks like, so it is best to
528 * read as much as you can. With luck the caching on the drive allows
529 * for a little time to start the next read.
531 * The current solution below is a bit of a hack, it just reads blocks from
532 * the current file position hoping that more of the file can be found. A
533 * better solution must look at the already available zone pointers and
534 * indirect blocks (but don't call read_map!).
537 if (block_spec && rip->i_size == 0) {
538 blocks_left = (block_t) NR_IOREQS;
539 } else {
540 blocks_left = (block_t) (rip->i_size-ex64lo(position)+(block_size-1)) /
541 block_size;
543 /* Go for the first indirect block if we are in its neighborhood. */
544 if (!block_spec) {
545 scale = rip->i_sp->s_log_zone_size;
546 ind1_pos = (off_t) rip->i_ndzones * (block_size << scale);
547 if ((off_t) ex64lo(position) <= ind1_pos &&
548 rip->i_size > ind1_pos) {
549 blocks_ahead++;
550 blocks_left++;
555 /* No more than the maximum request. */
556 if (blocks_ahead > NR_IOREQS) blocks_ahead = NR_IOREQS;
558 /* Read at least the minimum number of blocks, but not after a seek. */
559 if (blocks_ahead < BLOCKS_MINIMUM && rip->i_seek == NO_SEEK)
560 blocks_ahead = BLOCKS_MINIMUM;
562 /* Can't go past end of file. */
563 if (blocks_ahead > blocks_left) blocks_ahead = blocks_left;
565 read_q_size = 0;
567 /* Acquire block buffers. */
568 for (;;) {
569 block_t thisblock;
570 read_q[read_q_size++] = bp;
572 if (--blocks_ahead == 0) break;
574 /* Don't trash the cache, leave 4 free. */
575 if (lmfs_bufs_in_use() >= nr_bufs - 4) break;
577 block++;
578 position_running += block_size;
580 if(!block_spec &&
581 (thisblock = read_map(rip, (off_t) ex64lo(position_running), 1)) != NO_BLOCK) {
582 bp = lmfs_get_block_ino(dev, thisblock, PREFETCH, rip->i_num, position_running);
583 } else {
584 bp = get_block(dev, block, PREFETCH);
586 if (lmfs_dev(bp) != NO_DEV) {
587 /* Oops, block already in the cache, get out. */
588 put_block(bp, FULL_DATA_BLOCK);
589 break;
592 lmfs_rw_scattered(dev, read_q, read_q_size, READING);
594 if(block_spec)
595 return get_block(dev, baseblock, NORMAL);
596 return(lmfs_get_block_ino(dev, baseblock, NORMAL, rip->i_num, position));
600 /*===========================================================================*
601 * fs_getdents *
602 *===========================================================================*/
603 int fs_getdents(void)
605 #define GETDENTS_BUFSIZE (sizeof(struct dirent) + MFS_NAME_MAX + 1)
606 #define GETDENTS_ENTRIES 8
607 static char getdents_buf[GETDENTS_BUFSIZE * GETDENTS_ENTRIES];
608 register struct inode *rip;
609 int o, r, done;
610 unsigned int block_size, len, reclen;
611 ino_t ino;
612 cp_grant_id_t gid;
613 size_t size, tmpbuf_off, userbuf_off;
614 off_t pos, off, block_pos, new_pos, ent_pos;
615 struct buf *bp;
616 struct direct *dp;
617 struct dirent *dep;
618 char *cp;
620 ino = (ino_t) fs_m_in.REQ_INODE_NR;
621 gid = (gid_t) fs_m_in.REQ_GRANT;
622 size = (size_t) fs_m_in.REQ_MEM_SIZE;
623 pos = (off_t) fs_m_in.REQ_SEEK_POS_LO;
625 /* Check whether the position is properly aligned */
626 if( (unsigned int) pos % DIR_ENTRY_SIZE)
627 return(ENOENT);
629 if( (rip = get_inode(fs_dev, ino)) == NULL)
630 return(EINVAL);
632 block_size = rip->i_sp->s_block_size;
633 off = (pos % block_size); /* Offset in block */
634 block_pos = pos - off;
635 done = FALSE; /* Stop processing directory blocks when done is set */
637 tmpbuf_off = 0; /* Offset in getdents_buf */
638 memset(getdents_buf, '\0', sizeof(getdents_buf)); /* Avoid leaking any data */
639 userbuf_off = 0; /* Offset in the user's buffer */
641 /* The default position for the next request is EOF. If the user's buffer
642 * fills up before EOF, new_pos will be modified. */
643 new_pos = rip->i_size;
645 for(; block_pos < rip->i_size; block_pos += block_size) {
646 /* Since directories don't have holes, 'bp' cannot be NULL. */
647 bp = get_block_map(rip, block_pos); /* get a dir block */
648 assert(bp != NULL);
650 /* Search a directory block. */
651 if (block_pos < pos)
652 dp = &b_dir(bp)[off / DIR_ENTRY_SIZE];
653 else
654 dp = &b_dir(bp)[0];
655 for (; dp < &b_dir(bp)[NR_DIR_ENTRIES(block_size)]; dp++) {
656 if (dp->mfs_d_ino == 0)
657 continue; /* Entry is not in use */
659 /* Compute the length of the name */
660 cp = memchr(dp->mfs_d_name, '\0', sizeof(dp->mfs_d_name));
661 if (cp == NULL)
662 len = sizeof(dp->mfs_d_name);
663 else
664 len = cp - (dp->mfs_d_name);
666 /* Compute record length */
667 reclen = offsetof(struct dirent, d_name) + len + 1;
668 o = (reclen % sizeof(long));
669 if (o != 0)
670 reclen += sizeof(long) - o;
672 /* Need the position of this entry in the directory */
673 ent_pos = block_pos + ((char *) dp - (char *) bp->data);
675 if (userbuf_off + tmpbuf_off + reclen >= size) {
676 /* The user has no space for one more record */
677 done = TRUE;
679 /* Record the position of this entry, it is the
680 * starting point of the next request (unless the
681 * postion is modified with lseek).
683 new_pos = ent_pos;
684 break;
687 if (tmpbuf_off + reclen >= GETDENTS_BUFSIZE*GETDENTS_ENTRIES) {
688 r = sys_safecopyto(VFS_PROC_NR, gid,
689 (vir_bytes) userbuf_off,
690 (vir_bytes) getdents_buf,
691 (size_t) tmpbuf_off);
692 if (r != OK) {
693 put_inode(rip);
694 return(r);
697 userbuf_off += tmpbuf_off;
698 tmpbuf_off = 0;
701 dep = (struct dirent *) &getdents_buf[tmpbuf_off];
702 dep->d_ino = dp->mfs_d_ino;
703 dep->d_off = ent_pos;
704 dep->d_reclen = (unsigned short) reclen;
705 memcpy(dep->d_name, dp->mfs_d_name, len);
706 dep->d_name[len] = '\0';
707 tmpbuf_off += reclen;
710 put_block(bp, DIRECTORY_BLOCK);
711 if (done)
712 break;
715 if (tmpbuf_off != 0) {
716 r = sys_safecopyto(VFS_PROC_NR, gid, (vir_bytes) userbuf_off,
717 (vir_bytes) getdents_buf, (size_t) tmpbuf_off);
718 if (r != OK) {
719 put_inode(rip);
720 return(r);
723 userbuf_off += tmpbuf_off;
726 if (done && userbuf_off == 0)
727 r = EINVAL; /* The user's buffer is too small */
728 else {
729 fs_m_out.RES_NBYTES = userbuf_off;
730 fs_m_out.RES_SEEK_POS_LO = new_pos;
731 if(!rip->i_sp->s_rd_only) {
732 rip->i_update |= ATIME;
733 IN_MARKDIRTY(rip);
735 r = OK;
738 put_inode(rip); /* release the inode */
739 return(r);