xen: cleancache shim to Xen Transcendent Memory
[linux-2.6/next.git] / fs / omfs / file.c
blobd738a7e493ddc07ed1b4b1fb7a30198b3d477c5f
1 /*
2 * OMFS (as used by RIO Karma) file operations.
3 * Copyright (C) 2005 Bob Copeland <me@bobcopeland.com>
4 * Released under GPL v2.
5 */
7 #include <linux/version.h>
8 #include <linux/module.h>
9 #include <linux/fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include "omfs.h"
14 static u32 omfs_max_extents(struct omfs_sb_info *sbi, int offset)
16 return (sbi->s_sys_blocksize - offset -
17 sizeof(struct omfs_extent)) /
18 sizeof(struct omfs_extent_entry) + 1;
21 void omfs_make_empty_table(struct buffer_head *bh, int offset)
23 struct omfs_extent *oe = (struct omfs_extent *) &bh->b_data[offset];
25 oe->e_next = ~cpu_to_be64(0ULL);
26 oe->e_extent_count = cpu_to_be32(1),
27 oe->e_fill = cpu_to_be32(0x22),
28 oe->e_entry.e_cluster = ~cpu_to_be64(0ULL);
29 oe->e_entry.e_blocks = ~cpu_to_be64(0ULL);
32 int omfs_shrink_inode(struct inode *inode)
34 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
35 struct omfs_extent *oe;
36 struct omfs_extent_entry *entry;
37 struct buffer_head *bh;
38 u64 next, last;
39 u32 extent_count;
40 u32 max_extents;
41 int ret;
43 /* traverse extent table, freeing each entry that is greater
44 * than inode->i_size;
46 next = inode->i_ino;
48 /* only support truncate -> 0 for now */
49 ret = -EIO;
50 if (inode->i_size != 0)
51 goto out;
53 bh = omfs_bread(inode->i_sb, next);
54 if (!bh)
55 goto out;
57 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
58 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
60 for (;;) {
62 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
63 goto out_brelse;
65 extent_count = be32_to_cpu(oe->e_extent_count);
67 if (extent_count > max_extents)
68 goto out_brelse;
70 last = next;
71 next = be64_to_cpu(oe->e_next);
72 entry = &oe->e_entry;
74 /* ignore last entry as it is the terminator */
75 for (; extent_count > 1; extent_count--) {
76 u64 start, count;
77 start = be64_to_cpu(entry->e_cluster);
78 count = be64_to_cpu(entry->e_blocks);
80 omfs_clear_range(inode->i_sb, start, (int) count);
81 entry++;
83 omfs_make_empty_table(bh, (char *) oe - bh->b_data);
84 mark_buffer_dirty(bh);
85 brelse(bh);
87 if (last != inode->i_ino)
88 omfs_clear_range(inode->i_sb, last, sbi->s_mirrors);
90 if (next == ~0)
91 break;
93 bh = omfs_bread(inode->i_sb, next);
94 if (!bh)
95 goto out;
96 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
97 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
99 ret = 0;
100 out:
101 return ret;
102 out_brelse:
103 brelse(bh);
104 return ret;
107 static void omfs_truncate(struct inode *inode)
109 omfs_shrink_inode(inode);
110 mark_inode_dirty(inode);
114 * Add new blocks to the current extent, or create new entries/continuations
115 * as necessary.
117 static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
118 u64 *ret_block)
120 struct omfs_extent_entry *terminator;
121 struct omfs_extent_entry *entry = &oe->e_entry;
122 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
123 u32 extent_count = be32_to_cpu(oe->e_extent_count);
124 u64 new_block = 0;
125 u32 max_count;
126 int new_count;
127 int ret = 0;
129 /* reached the end of the extent table with no blocks mapped.
130 * there are three possibilities for adding: grow last extent,
131 * add a new extent to the current extent table, and add a
132 * continuation inode. in last two cases need an allocator for
133 * sbi->s_cluster_size
136 /* TODO: handle holes */
138 /* should always have a terminator */
139 if (extent_count < 1)
140 return -EIO;
142 /* trivially grow current extent, if next block is not taken */
143 terminator = entry + extent_count - 1;
144 if (extent_count > 1) {
145 entry = terminator-1;
146 new_block = be64_to_cpu(entry->e_cluster) +
147 be64_to_cpu(entry->e_blocks);
149 if (omfs_allocate_block(inode->i_sb, new_block)) {
150 entry->e_blocks =
151 cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1);
152 terminator->e_blocks = ~(cpu_to_be64(
153 be64_to_cpu(~terminator->e_blocks) + 1));
154 goto out;
157 max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);
159 /* TODO: add a continuation block here */
160 if (be32_to_cpu(oe->e_extent_count) > max_count-1)
161 return -EIO;
163 /* try to allocate a new cluster */
164 ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
165 &new_block, &new_count);
166 if (ret)
167 goto out_fail;
169 /* copy terminator down an entry */
170 entry = terminator;
171 terminator++;
172 memcpy(terminator, entry, sizeof(struct omfs_extent_entry));
174 entry->e_cluster = cpu_to_be64(new_block);
175 entry->e_blocks = cpu_to_be64((u64) new_count);
177 terminator->e_blocks = ~(cpu_to_be64(
178 be64_to_cpu(~terminator->e_blocks) + (u64) new_count));
180 /* write in new entry */
181 oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count));
183 out:
184 *ret_block = new_block;
185 out_fail:
186 return ret;
190 * Scans across the directory table for a given file block number.
191 * If block not found, return 0.
193 static sector_t find_block(struct inode *inode, struct omfs_extent_entry *ent,
194 sector_t block, int count, int *left)
196 /* count > 1 because of terminator */
197 sector_t searched = 0;
198 for (; count > 1; count--) {
199 int numblocks = clus_to_blk(OMFS_SB(inode->i_sb),
200 be64_to_cpu(ent->e_blocks));
202 if (block >= searched &&
203 block < searched + numblocks) {
205 * found it at cluster + (block - searched)
206 * numblocks - (block - searched) is remainder
208 *left = numblocks - (block - searched);
209 return clus_to_blk(OMFS_SB(inode->i_sb),
210 be64_to_cpu(ent->e_cluster)) +
211 block - searched;
213 searched += numblocks;
214 ent++;
216 return 0;
219 static int omfs_get_block(struct inode *inode, sector_t block,
220 struct buffer_head *bh_result, int create)
222 struct buffer_head *bh;
223 sector_t next, offset;
224 int ret;
225 u64 uninitialized_var(new_block);
226 u32 max_extents;
227 int extent_count;
228 struct omfs_extent *oe;
229 struct omfs_extent_entry *entry;
230 struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
231 int max_blocks = bh_result->b_size >> inode->i_blkbits;
232 int remain;
234 ret = -EIO;
235 bh = omfs_bread(inode->i_sb, inode->i_ino);
236 if (!bh)
237 goto out;
239 oe = (struct omfs_extent *)(&bh->b_data[OMFS_EXTENT_START]);
240 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_START);
241 next = inode->i_ino;
243 for (;;) {
245 if (omfs_is_bad(sbi, (struct omfs_header *) bh->b_data, next))
246 goto out_brelse;
248 extent_count = be32_to_cpu(oe->e_extent_count);
249 next = be64_to_cpu(oe->e_next);
250 entry = &oe->e_entry;
252 if (extent_count > max_extents)
253 goto out_brelse;
255 offset = find_block(inode, entry, block, extent_count, &remain);
256 if (offset > 0) {
257 ret = 0;
258 map_bh(bh_result, inode->i_sb, offset);
259 if (remain > max_blocks)
260 remain = max_blocks;
261 bh_result->b_size = (remain << inode->i_blkbits);
262 goto out_brelse;
264 if (next == ~0)
265 break;
267 brelse(bh);
268 bh = omfs_bread(inode->i_sb, next);
269 if (!bh)
270 goto out;
271 oe = (struct omfs_extent *) (&bh->b_data[OMFS_EXTENT_CONT]);
272 max_extents = omfs_max_extents(sbi, OMFS_EXTENT_CONT);
274 if (create) {
275 ret = omfs_grow_extent(inode, oe, &new_block);
276 if (ret == 0) {
277 mark_buffer_dirty(bh);
278 mark_inode_dirty(inode);
279 map_bh(bh_result, inode->i_sb,
280 clus_to_blk(sbi, new_block));
283 out_brelse:
284 brelse(bh);
285 out:
286 return ret;
289 static int omfs_readpage(struct file *file, struct page *page)
291 return block_read_full_page(page, omfs_get_block);
294 static int omfs_readpages(struct file *file, struct address_space *mapping,
295 struct list_head *pages, unsigned nr_pages)
297 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block);
300 static int omfs_writepage(struct page *page, struct writeback_control *wbc)
302 return block_write_full_page(page, omfs_get_block, wbc);
305 static int
306 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
308 return mpage_writepages(mapping, wbc, omfs_get_block);
311 static int omfs_write_begin(struct file *file, struct address_space *mapping,
312 loff_t pos, unsigned len, unsigned flags,
313 struct page **pagep, void **fsdata)
315 int ret;
317 ret = block_write_begin(mapping, pos, len, flags, pagep,
318 omfs_get_block);
319 if (unlikely(ret)) {
320 loff_t isize = mapping->host->i_size;
321 if (pos + len > isize)
322 vmtruncate(mapping->host, isize);
325 return ret;
328 static sector_t omfs_bmap(struct address_space *mapping, sector_t block)
330 return generic_block_bmap(mapping, block, omfs_get_block);
333 const struct file_operations omfs_file_operations = {
334 .llseek = generic_file_llseek,
335 .read = do_sync_read,
336 .write = do_sync_write,
337 .aio_read = generic_file_aio_read,
338 .aio_write = generic_file_aio_write,
339 .mmap = generic_file_mmap,
340 .fsync = generic_file_fsync,
341 .splice_read = generic_file_splice_read,
344 static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
346 struct inode *inode = dentry->d_inode;
347 int error;
349 error = inode_change_ok(inode, attr);
350 if (error)
351 return error;
353 if ((attr->ia_valid & ATTR_SIZE) &&
354 attr->ia_size != i_size_read(inode)) {
355 error = vmtruncate(inode, attr->ia_size);
356 if (error)
357 return error;
360 setattr_copy(inode, attr);
361 mark_inode_dirty(inode);
362 return 0;
365 const struct inode_operations omfs_file_inops = {
366 .setattr = omfs_setattr,
367 .truncate = omfs_truncate
370 const struct address_space_operations omfs_aops = {
371 .readpage = omfs_readpage,
372 .readpages = omfs_readpages,
373 .writepage = omfs_writepage,
374 .writepages = omfs_writepages,
375 .write_begin = omfs_write_begin,
376 .write_end = generic_write_end,
377 .bmap = omfs_bmap,