2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
5 #include <linux/time.h>
9 #include <asm/uaccess.h>
10 #include <linux/pagemap.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/buffer_head.h>
15 #include <linux/quotaops.h>
18 ** We pack the tails of files on file close, not at the time they are written.
19 ** This implies an unnecessary copy of the tail and an unnecessary indirect item
20 ** insertion/balancing, for files that are written in one write.
21 ** It avoids unnecessary tail packings (balances) for files that are written in
22 ** multiple writes and are small enough to have tails.
24 ** file_release is called by the VFS layer when the file is closed. If
25 ** this is the last open file descriptor, and the file
26 ** small enough to have a tail, and the tail is currently in an
27 ** unformatted node, the tail is converted back into a direct item.
29 ** We use reiserfs_truncate_file to pack the tail, since it already has
30 ** all the conditions coded.
32 static int reiserfs_file_release(struct inode
*inode
, struct file
*filp
)
35 struct reiserfs_transaction_handle th
;
37 int jbegin_failure
= 0;
39 BUG_ON(!S_ISREG(inode
->i_mode
));
41 if (atomic_add_unless(&REISERFS_I(inode
)->openers
, -1, 1))
44 mutex_lock(&(REISERFS_I(inode
)->tailpack
));
46 if (!atomic_dec_and_test(&REISERFS_I(inode
)->openers
)) {
47 mutex_unlock(&(REISERFS_I(inode
)->tailpack
));
51 /* fast out for when nothing needs to be done */
52 if ((!(REISERFS_I(inode
)->i_flags
& i_pack_on_close_mask
) ||
53 !tail_has_to_be_packed(inode
)) &&
54 REISERFS_I(inode
)->i_prealloc_count
<= 0) {
55 mutex_unlock(&(REISERFS_I(inode
)->tailpack
));
59 reiserfs_write_lock(inode
->i_sb
);
60 /* freeing preallocation only involves relogging blocks that
61 * are already in the current transaction. preallocation gets
62 * freed at the end of each transaction, so it is impossible for
63 * us to log any additional blocks (including quota blocks)
65 err
= journal_begin(&th
, inode
->i_sb
, 1);
67 /* uh oh, we can't allow the inode to go away while there
68 * is still preallocation blocks pending. Try to join the
72 err
= journal_join_abort(&th
, inode
->i_sb
, 1);
75 /* hmpf, our choices here aren't good. We can pin the inode
76 * which will disallow unmount from every happening, we can
77 * do nothing, which will corrupt random memory on unmount,
78 * or we can forcibly remove the file from the preallocation
79 * list, which will leak blocks on disk. Lets pin the inode
80 * and let the admin know what is going on.
83 reiserfs_warning(inode
->i_sb
, "clm-9001",
84 "pinning inode %lu because the "
85 "preallocation can't be freed",
90 reiserfs_update_inode_transaction(inode
);
92 #ifdef REISERFS_PREALLOCATE
93 reiserfs_discard_prealloc(&th
, inode
);
95 err
= journal_end(&th
, inode
->i_sb
, 1);
97 /* copy back the error code from journal_begin */
102 (REISERFS_I(inode
)->i_flags
& i_pack_on_close_mask
) &&
103 tail_has_to_be_packed(inode
)) {
105 /* if regular file is released by last holder and it has been
106 appended (we append by unformatted node only) or its direct
107 item(s) had to be converted, then it may have to be
108 indirect2direct converted */
109 err
= reiserfs_truncate_file(inode
, 0);
112 reiserfs_write_unlock(inode
->i_sb
);
113 mutex_unlock(&(REISERFS_I(inode
)->tailpack
));
117 static int reiserfs_file_open(struct inode
*inode
, struct file
*file
)
119 int err
= dquot_file_open(inode
, file
);
120 if (!atomic_inc_not_zero(&REISERFS_I(inode
)->openers
)) {
121 /* somebody might be tailpacking on final close; wait for it */
122 mutex_lock(&(REISERFS_I(inode
)->tailpack
));
123 atomic_inc(&REISERFS_I(inode
)->openers
);
124 mutex_unlock(&(REISERFS_I(inode
)->tailpack
));
129 void reiserfs_vfs_truncate_file(struct inode
*inode
)
131 mutex_lock(&(REISERFS_I(inode
)->tailpack
));
132 reiserfs_truncate_file(inode
, 1);
133 mutex_unlock(&(REISERFS_I(inode
)->tailpack
));
136 /* Sync a reiserfs file. */
139 * FIXME: sync_mapping_buffers() never has anything to sync. Can
143 static int reiserfs_sync_file(struct file
*filp
, loff_t start
, loff_t end
,
146 struct inode
*inode
= filp
->f_mapping
->host
;
150 err
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
154 mutex_lock(&inode
->i_mutex
);
155 BUG_ON(!S_ISREG(inode
->i_mode
));
156 err
= sync_mapping_buffers(inode
->i_mapping
);
157 reiserfs_write_lock(inode
->i_sb
);
158 barrier_done
= reiserfs_commit_for_inode(inode
);
159 reiserfs_write_unlock(inode
->i_sb
);
160 if (barrier_done
!= 1 && reiserfs_barrier_flush(inode
->i_sb
))
161 blkdev_issue_flush(inode
->i_sb
->s_bdev
, GFP_KERNEL
, NULL
);
162 mutex_unlock(&inode
->i_mutex
);
163 if (barrier_done
< 0)
165 return (err
< 0) ? -EIO
: 0;
168 /* taken fs/buffer.c:__block_commit_write */
169 int reiserfs_commit_page(struct inode
*inode
, struct page
*page
,
170 unsigned from
, unsigned to
)
172 unsigned block_start
, block_end
;
175 struct buffer_head
*bh
, *head
;
176 unsigned long i_size_index
= inode
->i_size
>> PAGE_CACHE_SHIFT
;
178 int logit
= reiserfs_file_data_log(inode
);
179 struct super_block
*s
= inode
->i_sb
;
180 int bh_per_page
= PAGE_CACHE_SIZE
/ s
->s_blocksize
;
181 struct reiserfs_transaction_handle th
;
185 blocksize
= 1 << inode
->i_blkbits
;
188 reiserfs_write_lock(s
);
189 ret
= journal_begin(&th
, s
, bh_per_page
+ 1);
191 goto drop_write_lock
;
192 reiserfs_update_inode_transaction(inode
);
194 for (bh
= head
= page_buffers(page
), block_start
= 0;
195 bh
!= head
|| !block_start
;
196 block_start
= block_end
, bh
= bh
->b_this_page
) {
198 new = buffer_new(bh
);
199 clear_buffer_new(bh
);
200 block_end
= block_start
+ blocksize
;
201 if (block_end
<= from
|| block_start
>= to
) {
202 if (!buffer_uptodate(bh
))
205 set_buffer_uptodate(bh
);
207 reiserfs_prepare_for_journal(s
, bh
, 1);
208 journal_mark_dirty(&th
, s
, bh
);
209 } else if (!buffer_dirty(bh
)) {
210 mark_buffer_dirty(bh
);
211 /* do data=ordered on any page past the end
212 * of file and any buffer marked BH_New.
214 if (reiserfs_data_ordered(inode
->i_sb
) &&
215 (new || page
->index
>= i_size_index
)) {
216 reiserfs_add_ordered_list(inode
, bh
);
222 ret
= journal_end(&th
, s
, bh_per_page
+ 1);
224 reiserfs_write_unlock(s
);
227 * If this is a partial write which happened to make all buffers
228 * uptodate then we can optimize away a bogus readpage() for
229 * the next read(). Here we 'discover' whether the page went
230 * uptodate as a result of this (potentially partial) write.
233 SetPageUptodate(page
);
237 const struct file_operations reiserfs_file_operations
= {
238 .read
= do_sync_read
,
239 .write
= do_sync_write
,
240 .unlocked_ioctl
= reiserfs_ioctl
,
242 .compat_ioctl
= reiserfs_compat_ioctl
,
244 .mmap
= generic_file_mmap
,
245 .open
= reiserfs_file_open
,
246 .release
= reiserfs_file_release
,
247 .fsync
= reiserfs_sync_file
,
248 .aio_read
= generic_file_aio_read
,
249 .aio_write
= generic_file_aio_write
,
250 .splice_read
= generic_file_splice_read
,
251 .splice_write
= generic_file_splice_write
,
252 .llseek
= generic_file_llseek
,
255 const struct inode_operations reiserfs_file_inode_operations
= {
256 .setattr
= reiserfs_setattr
,
257 .setxattr
= reiserfs_setxattr
,
258 .getxattr
= reiserfs_getxattr
,
259 .listxattr
= reiserfs_listxattr
,
260 .removexattr
= reiserfs_removexattr
,
261 .permission
= reiserfs_permission
,
262 .get_acl
= reiserfs_get_acl
,
263 .set_acl
= reiserfs_set_acl
,