1 /* Created (MFS based):
2 * February 2010 (Evgeniy Ivanov)
10 #include <minix/u64.h>
14 #include <minix/vfsif.h>
16 #include <sys/param.h>
19 static struct buf
*rahead(struct inode
*rip
, block_t baseblock
, u64_t
20 position
, unsigned bytes_ahead
);
21 static int rw_chunk(struct inode
*rip
, u64_t position
, unsigned off
,
22 size_t chunk
, unsigned left
, int rw_flag
, cp_grant_id_t gid
, unsigned
23 buf_off
, unsigned int block_size
, int *completed
);
25 static off_t rdahedpos
; /* position to read ahead */
26 static struct inode
*rdahed_inode
; /* pointer to inode to read ahead */
28 /*===========================================================================*
30 *===========================================================================*/
31 int fs_readwrite(void)
33 int r
, rw_flag
, block_spec
;
36 off_t position
, f_size
, bytes_left
;
37 unsigned int off
, cum_io
, block_size
, chunk
;
45 /* Find the inode referred */
46 if ((rip
= find_inode(fs_dev
, (ino_t
) fs_m_in
.REQ_INODE_NR
)) == NULL
)
49 mode_word
= rip
->i_mode
& I_TYPE
;
50 regular
= (mode_word
== I_REGULAR
|| mode_word
== I_NAMED_PIPE
);
51 block_spec
= (mode_word
== I_BLOCK_SPECIAL
? 1 : 0);
53 /* Determine blocksize */
55 block_size
= get_block_size( (dev_t
) rip
->i_block
[0]);
56 f_size
= MAX_FILE_POS
;
58 block_size
= rip
->i_sp
->s_block_size
;
60 if (f_size
< 0) f_size
= MAX_FILE_POS
;
63 /* Get the values from the request message */
64 rw_flag
= (fs_m_in
.m_type
== REQ_READ
? READING
: WRITING
);
65 gid
= (cp_grant_id_t
) fs_m_in
.REQ_GRANT
;
66 position
= (off_t
) fs_m_in
.REQ_SEEK_POS_LO
;
67 nrbytes
= (size_t) fs_m_in
.REQ_NBYTES
;
69 rdwt_err
= OK
; /* set to EIO if disk error occurs */
71 if (rw_flag
== WRITING
&& !block_spec
) {
72 /* Check in advance to see if file will grow too big. */
73 if (position
> (off_t
) (rip
->i_sp
->s_max_size
- nrbytes
))
78 /* Split the transfer into chunks that don't span two blocks. */
79 while (nrbytes
!= 0) {
80 off
= (unsigned int) (position
% block_size
);/* offset in blk*/
81 chunk
= MIN(nrbytes
, block_size
- off
);
83 if (rw_flag
== READING
) {
84 bytes_left
= f_size
- position
;
85 if (position
>= f_size
) break; /* we are beyond EOF */
86 if (chunk
> bytes_left
) chunk
= (int) bytes_left
;
89 /* Read or write 'chunk' bytes. */
90 r
= rw_chunk(rip
, cvul64((unsigned long) position
), off
, chunk
,
91 nrbytes
, rw_flag
, gid
, cum_io
, block_size
, &completed
);
93 if (r
!= OK
) break; /* EOF reached */
94 if (rdwt_err
< 0) break;
96 /* Update counters and pointers. */
97 nrbytes
-= chunk
; /* bytes yet to be read */
98 cum_io
+= chunk
; /* bytes read so far */
99 position
+= (off_t
) chunk
; /* position within the file */
102 fs_m_out
.RES_SEEK_POS_LO
= position
; /* It might change later and the VFS
103 has to know this value */
105 /* On write, update file size and access time. */
106 if (rw_flag
== WRITING
) {
107 if (regular
|| mode_word
== I_DIRECTORY
) {
108 if (position
> f_size
) rip
->i_size
= position
;
112 /* Check to see if read-ahead is called for, and if so, set it up. */
113 if(rw_flag
== READING
&& rip
->i_seek
== NO_SEEK
&&
114 (unsigned int) position
% block_size
== 0 &&
115 (regular
|| mode_word
== I_DIRECTORY
)) {
117 rdahedpos
= position
;
120 rip
->i_seek
= NO_SEEK
;
122 if (rdwt_err
!= OK
) r
= rdwt_err
; /* check for disk error */
123 if (rdwt_err
== END_OF_FILE
) r
= OK
;
126 if (rw_flag
== READING
) rip
->i_update
|= ATIME
;
127 if (rw_flag
== WRITING
) rip
->i_update
|= CTIME
| MTIME
;
128 rip
->i_dirt
= IN_DIRTY
; /* inode is thus now dirty */
131 fs_m_out
.RES_NBYTES
= cum_io
;
137 /*===========================================================================*
139 *===========================================================================*/
140 int fs_breadwrite(void)
142 int r
, rw_flag
, completed
;
145 unsigned int off
, cum_io
, chunk
, block_size
;
148 /* Pseudo inode for rw_chunk */
153 /* Get the values from the request message */
154 rw_flag
= (fs_m_in
.m_type
== REQ_BREAD
? READING
: WRITING
);
155 gid
= (cp_grant_id_t
) fs_m_in
.REQ_GRANT
;
156 position
= make64((unsigned long) fs_m_in
.REQ_SEEK_POS_LO
,
157 (unsigned long) fs_m_in
.REQ_SEEK_POS_HI
);
158 nrbytes
= (size_t) fs_m_in
.REQ_NBYTES
;
160 block_size
= get_block_size( (dev_t
) fs_m_in
.REQ_DEV2
);
162 rip
.i_block
[0] = (block_t
) fs_m_in
.REQ_DEV2
;
163 rip
.i_mode
= I_BLOCK_SPECIAL
;
166 rdwt_err
= OK
; /* set to EIO if disk error occurs */
169 /* Split the transfer into chunks that don't span two blocks. */
170 while (nrbytes
> 0) {
171 off
= rem64u(position
, block_size
); /* offset in blk*/
172 chunk
= min(nrbytes
, block_size
- off
);
174 /* Read or write 'chunk' bytes. */
175 r
= rw_chunk(&rip
, position
, off
, chunk
, nrbytes
, rw_flag
, gid
,
176 cum_io
, block_size
, &completed
);
178 if (r
!= OK
) break; /* EOF reached */
179 if (rdwt_err
< 0) break;
181 /* Update counters and pointers. */
182 nrbytes
-= chunk
; /* bytes yet to be read */
183 cum_io
+= chunk
; /* bytes read so far */
184 position
= add64ul(position
, chunk
); /* position within the file */
187 fs_m_out
.RES_SEEK_POS_LO
= ex64lo(position
);
188 fs_m_out
.RES_SEEK_POS_HI
= ex64hi(position
);
190 if (rdwt_err
!= OK
) r
= rdwt_err
; /* check for disk error */
191 if (rdwt_err
== END_OF_FILE
) r
= OK
;
193 fs_m_out
.RES_NBYTES
= cum_io
;
199 /*===========================================================================*
201 *===========================================================================*/
202 static int rw_chunk(rip
, position
, off
, chunk
, left
, rw_flag
, gid
,
203 buf_off
, block_size
, completed
)
204 register struct inode
*rip
; /* pointer to inode for file to be rd/wr */
205 u64_t position
; /* position within file to read or write */
206 unsigned off
; /* off within the current block */
207 unsigned int chunk
; /* number of bytes to read or write */
208 unsigned left
; /* max number of bytes wanted after position */
209 int rw_flag
; /* READING or WRITING */
210 cp_grant_id_t gid
; /* grant */
211 unsigned buf_off
; /* offset in grant */
212 unsigned int block_size
; /* block size of FS operating on */
213 int *completed
; /* number of bytes copied */
215 /* Read or write (part of) a block. */
217 register struct buf
*bp
;
225 block_spec
= (rip
->i_mode
& I_TYPE
) == I_BLOCK_SPECIAL
;
228 b
= div64u(position
, block_size
);
229 dev
= (dev_t
) rip
->i_block
[0];
231 if (ex64hi(position
) != 0)
232 panic("rw_chunk: position too high");
233 b
= read_map(rip
, (off_t
) ex64lo(position
));
237 if (!block_spec
&& b
== NO_BLOCK
) {
238 if (rw_flag
== READING
) {
239 /* Reading from a nonexistent block. Must read as all zeros.*/
240 r
= sys_safememset(VFS_PROC_NR
, gid
, (vir_bytes
) buf_off
,
243 printf("ext2fs: sys_safememset failed\n");
247 /* Writing to a nonexistent block. Create and enter in inode.*/
248 if ((bp
= new_block(rip
, (off_t
) ex64lo(position
))) == NULL
)
251 } else if (rw_flag
== READING
) {
252 /* Read and read ahead if convenient. */
253 bp
= rahead(rip
, b
, position
, left
);
255 /* Normally an existing block to be partially overwritten is first read
256 * in. However, a full block need not be read in. If it is already in
257 * the cache, acquire it, otherwise just acquire a free buffer.
259 n
= (chunk
== block_size
? NO_READ
: NORMAL
);
260 if (!block_spec
&& off
== 0 && (off_t
) ex64lo(position
) >= rip
->i_size
)
262 bp
= get_block(dev
, b
, n
);
265 /* In all cases, bp now points to a valid buffer. */
267 panic("bp not valid in rw_chunk, this can't happen");
269 if (rw_flag
== WRITING
&& chunk
!= block_size
&& !block_spec
&&
270 (off_t
) ex64lo(position
) >= rip
->i_size
&& off
== 0) {
274 if (rw_flag
== READING
) {
275 /* Copy a chunk from the block buffer to user space. */
276 r
= sys_safecopyto(VFS_PROC_NR
, gid
, (vir_bytes
) buf_off
,
277 (vir_bytes
) (b_data(bp
)+off
), (size_t) chunk
);
279 /* Copy a chunk from user space to the block buffer. */
280 r
= sys_safecopyfrom(VFS_PROC_NR
, gid
, (vir_bytes
) buf_off
,
281 (vir_bytes
) (b_data(bp
)+off
), (size_t) chunk
);
285 n
= (off
+ chunk
== block_size
? FULL_DATA_BLOCK
: PARTIAL_DATA_BLOCK
);
292 /*===========================================================================*
294 *===========================================================================*/
295 block_t
read_map(rip
, position
)
296 register struct inode
*rip
; /* ptr to inode to map from */
297 off_t position
; /* position in file whose blk wanted */
299 /* Given an inode and a position within the corresponding file, locate the
300 * block number in which that position is to be found and return it.
306 unsigned long excess
, block_pos
;
307 static char first_time
= TRUE
;
308 static long addr_in_block
;
309 static long addr_in_block2
;
310 static long doub_ind_s
;
311 static long triple_ind_s
;
312 static long out_range_s
;
315 addr_in_block
= rip
->i_sp
->s_block_size
/ BLOCK_ADDRESS_BYTES
;
316 addr_in_block2
= addr_in_block
* addr_in_block
;
317 doub_ind_s
= EXT2_NDIR_BLOCKS
+ addr_in_block
;
318 triple_ind_s
= doub_ind_s
+ addr_in_block2
;
319 out_range_s
= triple_ind_s
+ addr_in_block2
* addr_in_block
;
323 block_pos
= position
/ rip
->i_sp
->s_block_size
; /* relative blk # in file */
325 /* Is 'position' to be found in the inode itself? */
326 if (block_pos
< EXT2_NDIR_BLOCKS
)
327 return(rip
->i_block
[block_pos
]);
329 /* It is not in the inode, so it must be single, double or triple indirect */
330 if (block_pos
< doub_ind_s
) {
331 b
= rip
->i_block
[EXT2_NDIR_BLOCKS
]; /* address of single indirect block */
332 index
= block_pos
- EXT2_NDIR_BLOCKS
;
333 } else if (block_pos
>= out_range_s
) { /* TODO: do we need it? */
336 /* double or triple indirect block. At first if it's triple,
337 * find double indirect block.
339 excess
= block_pos
- doub_ind_s
;
340 b
= rip
->i_block
[EXT2_DIND_BLOCK
];
341 if (block_pos
>= triple_ind_s
) {
342 b
= rip
->i_block
[EXT2_TIND_BLOCK
];
343 if (b
== NO_BLOCK
) return(NO_BLOCK
);
344 bp
= get_block(rip
->i_dev
, b
, NORMAL
); /* get triple ind block */
345 ASSERT(lmfs_dev(bp
) != NO_DEV
);
346 ASSERT(lmfs_dev(bp
) == rip
->i_dev
);
347 excess
= block_pos
- triple_ind_s
;
348 index
= excess
/ addr_in_block2
;
349 b
= rd_indir(bp
, index
); /* num of double ind block */
350 put_block(bp
, INDIRECT_BLOCK
); /* release triple ind block */
351 excess
= excess
% addr_in_block2
;
353 if (b
== NO_BLOCK
) return(NO_BLOCK
);
354 bp
= get_block(rip
->i_dev
, b
, NORMAL
); /* get double indirect block */
355 ASSERT(lmfs_dev(bp
) != NO_DEV
);
356 ASSERT(lmfs_dev(bp
) == rip
->i_dev
);
357 index
= excess
/ addr_in_block
;
358 b
= rd_indir(bp
, index
); /* num of single ind block */
359 put_block(bp
, INDIRECT_BLOCK
); /* release double ind block */
360 index
= excess
% addr_in_block
; /* index into single ind blk */
362 if (b
== NO_BLOCK
) return(NO_BLOCK
);
363 bp
= get_block(rip
->i_dev
, b
, NORMAL
);
364 ASSERT(lmfs_dev(bp
) != NO_DEV
);
365 ASSERT(lmfs_dev(bp
) == rip
->i_dev
);
366 b
= rd_indir(bp
, index
);
367 put_block(bp
, INDIRECT_BLOCK
); /* release single ind block */
373 /*===========================================================================*
375 *===========================================================================*/
376 block_t
rd_indir(bp
, index
)
377 struct buf
*bp
; /* pointer to indirect block */
378 int index
; /* index into *bp */
381 panic("rd_indir() on NULL");
382 /* TODO: use conv call */
383 return conv4(le_CPU
, b_ind(bp
)[index
]);
387 /*===========================================================================*
389 *===========================================================================*/
392 /* Read a block into the cache before it is needed. */
393 unsigned int block_size
;
394 register struct inode
*rip
;
401 rip
= rdahed_inode
; /* pointer to inode to read ahead from */
402 block_size
= get_block_size(rip
->i_dev
);
403 rdahed_inode
= NULL
; /* turn off read ahead */
404 if ( (b
= read_map(rip
, rdahedpos
)) == NO_BLOCK
) return; /* at EOF */
406 assert(rdahedpos
>= 0); /* So we can safely cast it to unsigned below */
408 bp
= rahead(rip
, b
, cvul64((unsigned long) rdahedpos
), block_size
);
409 put_block(bp
, PARTIAL_DATA_BLOCK
);
413 /*===========================================================================*
415 *===========================================================================*/
416 static struct buf
*rahead(rip
, baseblock
, position
, bytes_ahead
)
417 register struct inode
*rip
; /* pointer to inode for file to be read */
418 block_t baseblock
; /* block at current position */
419 u64_t position
; /* position within file */
420 unsigned bytes_ahead
; /* bytes beyond position for immediate use */
422 /* Fetch a block from the cache or the device. If a physical read is
423 * required, prefetch as many more blocks as convenient into the cache.
424 * This usually covers bytes_ahead and is at least BLOCKS_MINIMUM.
425 * The device driver may decide it knows better and stop reading at a
426 * cylinder boundary (or after an error). Rw_scattered() puts an optional
427 * flag on all reads to allow this.
429 /* Minimum number of blocks to prefetch. */
430 # define BLOCKS_MINIMUM (nr_bufs < 50 ? 18 : 32)
431 int nr_bufs
= lmfs_nr_bufs();
432 int block_spec
, read_q_size
;
433 unsigned int blocks_ahead
, fragment
, block_size
;
434 block_t block
, blocks_left
;
437 struct buf
*bp
= NULL
;
438 static unsigned int readqsize
= 0;
439 static struct buf
**read_q
= NULL
;
441 if(readqsize
!= nr_bufs
) {
443 assert(read_q
!= NULL
);
449 assert(readqsize
== 0);
450 assert(read_q
== NULL
);
452 if(!(read_q
= malloc(sizeof(read_q
[0])*nr_bufs
)))
453 panic("couldn't allocate read_q");
457 block_spec
= (rip
->i_mode
& I_TYPE
) == I_BLOCK_SPECIAL
;
459 dev
= (dev_t
) rip
->i_block
[0];
463 block_size
= get_block_size(dev
);
466 bp
= get_block(dev
, block
, PREFETCH
);
468 if (lmfs_dev(bp
) != NO_DEV
) return(bp
);
470 /* The best guess for the number of blocks to prefetch: A lot.
471 * It is impossible to tell what the device looks like, so we don't even
472 * try to guess the geometry, but leave it to the driver.
474 * The floppy driver can read a full track with no rotational delay, and it
475 * avoids reading partial tracks if it can, so handing it enough buffers to
476 * read two tracks is perfect. (Two, because some diskette types have
477 * an odd number of sectors per track, so a block may span tracks.)
479 * The disk drivers don't try to be smart. With todays disks it is
480 * impossible to tell what the real geometry looks like, so it is best to
481 * read as much as you can. With luck the caching on the drive allows
482 * for a little time to start the next read.
484 * The current solution below is a bit of a hack, it just reads blocks from
485 * the current file position hoping that more of the file can be found. A
486 * better solution must look at the already available
487 * indirect blocks (but don't call read_map!).
490 fragment
= rem64u(position
, block_size
);
491 position
= sub64u(position
, fragment
);
492 bytes_ahead
+= fragment
;
494 blocks_ahead
= (bytes_ahead
+ block_size
- 1) / block_size
;
496 if (block_spec
&& rip
->i_size
== 0) {
497 blocks_left
= (block_t
) NR_IOREQS
;
499 blocks_left
= (block_t
) (rip
->i_size
-ex64lo(position
)+(block_size
-1)) /
502 /* Go for the first indirect block if we are in its neighborhood. */
504 ind1_pos
= (EXT2_NDIR_BLOCKS
) * block_size
;
505 if ((off_t
) ex64lo(position
) <= ind1_pos
&& rip
->i_size
> ind1_pos
) {
512 /* No more than the maximum request. */
513 if (blocks_ahead
> NR_IOREQS
) blocks_ahead
= NR_IOREQS
;
515 /* Read at least the minimum number of blocks, but not after a seek. */
516 if (blocks_ahead
< BLOCKS_MINIMUM
&& rip
->i_seek
== NO_SEEK
)
517 blocks_ahead
= BLOCKS_MINIMUM
;
519 /* Can't go past end of file. */
520 if (blocks_ahead
> blocks_left
) blocks_ahead
= blocks_left
;
524 /* Acquire block buffers. */
526 read_q
[read_q_size
++] = bp
;
528 if (--blocks_ahead
== 0) break;
530 /* Don't trash the cache, leave 4 free. */
531 if (lmfs_bufs_in_use() >= nr_bufs
- 4) break;
535 bp
= get_block(dev
, block
, PREFETCH
);
536 if (lmfs_dev(bp
) != NO_DEV
) {
537 /* Oops, block already in the cache, get out. */
538 put_block(bp
, FULL_DATA_BLOCK
);
542 lmfs_rw_scattered(dev
, read_q
, read_q_size
, READING
);
543 return(get_block(dev
, baseblock
, NORMAL
));
547 /*===========================================================================*
549 *===========================================================================*/
550 int fs_getdents(void)
552 #define GETDENTS_BUFSIZE (sizeof(struct dirent) + EXT2_NAME_MAX + 1)
553 #define GETDENTS_ENTRIES 8
554 static char getdents_buf
[GETDENTS_BUFSIZE
* GETDENTS_ENTRIES
];
557 unsigned int block_size
, len
, reclen
;
561 size_t size
, tmpbuf_off
, userbuf_off
;
562 off_t pos
, off
, block_pos
, new_pos
, ent_pos
;
564 struct ext2_disk_dir_desc
*d_desc
;
567 ino
= (ino_t
) fs_m_in
.REQ_INODE_NR
;
568 gid
= (gid_t
) fs_m_in
.REQ_GRANT
;
569 size
= (size_t) fs_m_in
.REQ_MEM_SIZE
;
570 pos
= (off_t
) fs_m_in
.REQ_SEEK_POS_LO
;
572 /* Check whether the position is properly aligned */
573 if ((unsigned int) pos
% DIR_ENTRY_ALIGN
)
576 if ((rip
= get_inode(fs_dev
, ino
)) == NULL
)
579 block_size
= rip
->i_sp
->s_block_size
;
580 off
= (pos
% block_size
); /* Offset in block */
581 block_pos
= pos
- off
;
582 done
= FALSE
; /* Stop processing directory blocks when done is set */
584 memset(getdents_buf
, '\0', sizeof(getdents_buf
)); /* Avoid leaking any data */
585 tmpbuf_off
= 0; /* Offset in getdents_buf */
586 userbuf_off
= 0; /* Offset in the user's buffer */
588 /* The default position for the next request is EOF. If the user's buffer
589 * fills up before EOF, new_pos will be modified. */
590 new_pos
= rip
->i_size
;
592 for (; block_pos
< rip
->i_size
; block_pos
+= block_size
) {
593 off_t temp_pos
= block_pos
;
594 b
= read_map(rip
, block_pos
); /* get block number */
595 /* Since directories don't have holes, 'b' cannot be NO_BLOCK. */
596 bp
= get_block(rip
->i_dev
, b
, NORMAL
); /* get a dir block */
599 /* Search a directory block. */
600 d_desc
= (struct ext2_disk_dir_desc
*) &b_data(bp
);
602 /* we need to seek to entry at off bytes.
603 * when NEXT_DISC_DIR_POS == block_size it's last dentry.
605 for (; temp_pos
+ conv2(le_CPU
, d_desc
->d_rec_len
) <= pos
606 && NEXT_DISC_DIR_POS(d_desc
, &b_data(bp
)) < block_size
;
607 d_desc
= NEXT_DISC_DIR_DESC(d_desc
)) {
608 temp_pos
+= conv2(le_CPU
, d_desc
->d_rec_len
);
611 for (; CUR_DISC_DIR_POS(d_desc
, &b_data(bp
)) < block_size
;
612 d_desc
= NEXT_DISC_DIR_DESC(d_desc
)) {
613 if (d_desc
->d_ino
== 0)
614 continue; /* Entry is not in use */
616 if (d_desc
->d_name_len
> NAME_MAX
||
617 d_desc
->d_name_len
> EXT2_NAME_MAX
) {
618 len
= min(NAME_MAX
, EXT2_NAME_MAX
);
620 len
= d_desc
->d_name_len
;
623 /* Compute record length */
624 reclen
= offsetof(struct dirent
, d_name
) + len
+ 1;
625 o
= (reclen
% sizeof(long));
627 reclen
+= sizeof(long) - o
;
629 /* Need the position of this entry in the directory */
630 ent_pos
= block_pos
+ ((char *)d_desc
- b_data(bp
));
632 if (userbuf_off
+ tmpbuf_off
+ reclen
>= size
) {
633 /* The user has no space for one more record */
636 /* Record the position of this entry, it is the
637 * starting point of the next request (unless the
638 * position is modified with lseek).
644 if (tmpbuf_off
+ reclen
>= GETDENTS_BUFSIZE
*GETDENTS_ENTRIES
) {
645 r
= sys_safecopyto(VFS_PROC_NR
, gid
,
646 (vir_bytes
) userbuf_off
,
647 (vir_bytes
) getdents_buf
,
648 (size_t) tmpbuf_off
);
653 userbuf_off
+= tmpbuf_off
;
657 dep
= (struct dirent
*) &getdents_buf
[tmpbuf_off
];
658 dep
->d_ino
= conv4(le_CPU
, d_desc
->d_ino
);
659 dep
->d_off
= ent_pos
;
660 dep
->d_reclen
= (unsigned short) reclen
;
661 memcpy(dep
->d_name
, d_desc
->d_name
, len
);
662 dep
->d_name
[len
] = '\0';
663 tmpbuf_off
+= reclen
;
666 put_block(bp
, DIRECTORY_BLOCK
);
671 if (tmpbuf_off
!= 0) {
672 r
= sys_safecopyto(VFS_PROC_NR
, gid
, (vir_bytes
) userbuf_off
,
673 (vir_bytes
) getdents_buf
, (size_t) tmpbuf_off
);
679 userbuf_off
+= tmpbuf_off
;
682 if (done
&& userbuf_off
== 0)
683 r
= EINVAL
; /* The user's buffer is too small */
685 fs_m_out
.RES_NBYTES
= userbuf_off
;
686 fs_m_out
.RES_SEEK_POS_LO
= new_pos
;
687 rip
->i_update
|= ATIME
;
688 rip
->i_dirt
= IN_DIRTY
;
692 put_inode(rip
); /* release the inode */