x86: 64-bit, add the new split_large_page() function
[wrt350n-kernel.git] / fs / ext3 / inode.c
blob9b162cd6c16c170ce7c35c3fc2f993ccc648cbc3
1 /*
2 * linux/fs/ext3/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext3_jbd.h>
29 #include <linux/jbd.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include "xattr.h"
40 #include "acl.h"
42 static int ext3_writepage_trans_blocks(struct inode *inode);
45 * Test whether an inode is a fast symlink.
47 static int ext3_inode_is_fast_symlink(struct inode *inode)
49 int ea_blocks = EXT3_I(inode)->i_file_acl ?
50 (inode->i_sb->s_blocksize >> 9) : 0;
52 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
56 * The ext3 forget function must perform a revoke if we are freeing data
57 * which has been journaled. Metadata (eg. indirect blocks) must be
58 * revoked in all cases.
60 * "bh" may be NULL: a metadata block may have been freed from memory
61 * but there may still be a record of it in the journal, and that record
62 * still needs to be revoked.
64 int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
65 struct buffer_head *bh, ext3_fsblk_t blocknr)
67 int err;
69 might_sleep();
71 BUFFER_TRACE(bh, "enter");
73 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
74 "data mode %lx\n",
75 bh, is_metadata, inode->i_mode,
76 test_opt(inode->i_sb, DATA_FLAGS));
78 /* Never use the revoke function if we are doing full data
79 * journaling: there is no need to, and a V1 superblock won't
80 * support it. Otherwise, only skip the revoke on un-journaled
81 * data blocks. */
83 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
84 (!is_metadata && !ext3_should_journal_data(inode))) {
85 if (bh) {
86 BUFFER_TRACE(bh, "call journal_forget");
87 return ext3_journal_forget(handle, bh);
89 return 0;
93 * data!=journal && (is_metadata || should_journal_data(inode))
95 BUFFER_TRACE(bh, "call ext3_journal_revoke");
96 err = ext3_journal_revoke(handle, blocknr, bh);
97 if (err)
98 ext3_abort(inode->i_sb, __FUNCTION__,
99 "error %d when attempting revoke", err);
100 BUFFER_TRACE(bh, "exit");
101 return err;
105 * Work out how many blocks we need to proceed with the next chunk of a
106 * truncate transaction.
108 static unsigned long blocks_for_truncate(struct inode *inode)
110 unsigned long needed;
112 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
114 /* Give ourselves just enough room to cope with inodes in which
115 * i_blocks is corrupt: we've seen disk corruptions in the past
116 * which resulted in random data in an inode which looked enough
117 * like a regular file for ext3 to try to delete it. Things
118 * will go a bit crazy if that happens, but at least we should
119 * try not to panic the whole kernel. */
120 if (needed < 2)
121 needed = 2;
123 /* But we need to bound the transaction so we don't overflow the
124 * journal. */
125 if (needed > EXT3_MAX_TRANS_DATA)
126 needed = EXT3_MAX_TRANS_DATA;
128 return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
132 * Truncate transactions can be complex and absolutely huge. So we need to
133 * be able to restart the transaction at a conventient checkpoint to make
134 * sure we don't overflow the journal.
136 * start_transaction gets us a new handle for a truncate transaction,
137 * and extend_transaction tries to extend the existing one a bit. If
138 * extend fails, we need to propagate the failure up and restart the
139 * transaction in the top-level truncate loop. --sct
141 static handle_t *start_transaction(struct inode *inode)
143 handle_t *result;
145 result = ext3_journal_start(inode, blocks_for_truncate(inode));
146 if (!IS_ERR(result))
147 return result;
149 ext3_std_error(inode->i_sb, PTR_ERR(result));
150 return result;
154 * Try to extend this transaction for the purposes of truncation.
156 * Returns 0 if we managed to create more room. If we can't create more
157 * room, and the transaction must be restarted we return 1.
159 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
161 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
162 return 0;
163 if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
164 return 0;
165 return 1;
169 * Restart the transaction associated with *handle. This does a commit,
170 * so before we call here everything must be consistently dirtied against
171 * this transaction.
173 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
175 jbd_debug(2, "restarting handle %p\n", handle);
176 return ext3_journal_restart(handle, blocks_for_truncate(inode));
180 * Called at the last iput() if i_nlink is zero.
182 void ext3_delete_inode (struct inode * inode)
184 handle_t *handle;
186 truncate_inode_pages(&inode->i_data, 0);
188 if (is_bad_inode(inode))
189 goto no_delete;
191 handle = start_transaction(inode);
192 if (IS_ERR(handle)) {
194 * If we're going to skip the normal cleanup, we still need to
195 * make sure that the in-core orphan linked list is properly
196 * cleaned up.
198 ext3_orphan_del(NULL, inode);
199 goto no_delete;
202 if (IS_SYNC(inode))
203 handle->h_sync = 1;
204 inode->i_size = 0;
205 if (inode->i_blocks)
206 ext3_truncate(inode);
208 * Kill off the orphan record which ext3_truncate created.
209 * AKPM: I think this can be inside the above `if'.
210 * Note that ext3_orphan_del() has to be able to cope with the
211 * deletion of a non-existent orphan - this is because we don't
212 * know if ext3_truncate() actually created an orphan record.
213 * (Well, we could do this if we need to, but heck - it works)
215 ext3_orphan_del(handle, inode);
216 EXT3_I(inode)->i_dtime = get_seconds();
219 * One subtle ordering requirement: if anything has gone wrong
220 * (transaction abort, IO errors, whatever), then we can still
221 * do these next steps (the fs will already have been marked as
222 * having errors), but we can't free the inode if the mark_dirty
223 * fails.
225 if (ext3_mark_inode_dirty(handle, inode))
226 /* If that failed, just do the required in-core inode clear. */
227 clear_inode(inode);
228 else
229 ext3_free_inode(handle, inode);
230 ext3_journal_stop(handle);
231 return;
232 no_delete:
233 clear_inode(inode); /* We must guarantee clearing of inode... */
236 typedef struct {
237 __le32 *p;
238 __le32 key;
239 struct buffer_head *bh;
240 } Indirect;
242 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
244 p->key = *(p->p = v);
245 p->bh = bh;
248 static int verify_chain(Indirect *from, Indirect *to)
250 while (from <= to && from->key == *from->p)
251 from++;
252 return (from > to);
256 * ext3_block_to_path - parse the block number into array of offsets
257 * @inode: inode in question (we are only interested in its superblock)
258 * @i_block: block number to be parsed
259 * @offsets: array to store the offsets in
260 * @boundary: set this non-zero if the referred-to block is likely to be
261 * followed (on disk) by an indirect block.
263 * To store the locations of file's data ext3 uses a data structure common
264 * for UNIX filesystems - tree of pointers anchored in the inode, with
265 * data blocks at leaves and indirect blocks in intermediate nodes.
266 * This function translates the block number into path in that tree -
267 * return value is the path length and @offsets[n] is the offset of
268 * pointer to (n+1)th node in the nth one. If @block is out of range
269 * (negative or too large) warning is printed and zero returned.
271 * Note: function doesn't find node addresses, so no IO is needed. All
272 * we need to know is the capacity of indirect blocks (taken from the
273 * inode->i_sb).
277 * Portability note: the last comparison (check that we fit into triple
278 * indirect block) is spelled differently, because otherwise on an
279 * architecture with 32-bit longs and 8Kb pages we might get into trouble
280 * if our filesystem had 8Kb blocks. We might use long long, but that would
281 * kill us on x86. Oh, well, at least the sign propagation does not matter -
282 * i_block would have to be negative in the very beginning, so we would not
283 * get there at all.
286 static int ext3_block_to_path(struct inode *inode,
287 long i_block, int offsets[4], int *boundary)
289 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
290 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
291 const long direct_blocks = EXT3_NDIR_BLOCKS,
292 indirect_blocks = ptrs,
293 double_blocks = (1 << (ptrs_bits * 2));
294 int n = 0;
295 int final = 0;
297 if (i_block < 0) {
298 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
299 } else if (i_block < direct_blocks) {
300 offsets[n++] = i_block;
301 final = direct_blocks;
302 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
303 offsets[n++] = EXT3_IND_BLOCK;
304 offsets[n++] = i_block;
305 final = ptrs;
306 } else if ((i_block -= indirect_blocks) < double_blocks) {
307 offsets[n++] = EXT3_DIND_BLOCK;
308 offsets[n++] = i_block >> ptrs_bits;
309 offsets[n++] = i_block & (ptrs - 1);
310 final = ptrs;
311 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
312 offsets[n++] = EXT3_TIND_BLOCK;
313 offsets[n++] = i_block >> (ptrs_bits * 2);
314 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
315 offsets[n++] = i_block & (ptrs - 1);
316 final = ptrs;
317 } else {
318 ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
320 if (boundary)
321 *boundary = final - 1 - (i_block & (ptrs - 1));
322 return n;
326 * ext3_get_branch - read the chain of indirect blocks leading to data
327 * @inode: inode in question
328 * @depth: depth of the chain (1 - direct pointer, etc.)
329 * @offsets: offsets of pointers in inode/indirect blocks
330 * @chain: place to store the result
331 * @err: here we store the error value
333 * Function fills the array of triples <key, p, bh> and returns %NULL
334 * if everything went OK or the pointer to the last filled triple
335 * (incomplete one) otherwise. Upon the return chain[i].key contains
336 * the number of (i+1)-th block in the chain (as it is stored in memory,
337 * i.e. little-endian 32-bit), chain[i].p contains the address of that
338 * number (it points into struct inode for i==0 and into the bh->b_data
339 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
340 * block for i>0 and NULL for i==0. In other words, it holds the block
341 * numbers of the chain, addresses they were taken from (and where we can
342 * verify that chain did not change) and buffer_heads hosting these
343 * numbers.
345 * Function stops when it stumbles upon zero pointer (absent block)
346 * (pointer to last triple returned, *@err == 0)
347 * or when it gets an IO error reading an indirect block
348 * (ditto, *@err == -EIO)
349 * or when it notices that chain had been changed while it was reading
350 * (ditto, *@err == -EAGAIN)
351 * or when it reads all @depth-1 indirect blocks successfully and finds
352 * the whole chain, all way to the data (returns %NULL, *err == 0).
354 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
355 Indirect chain[4], int *err)
357 struct super_block *sb = inode->i_sb;
358 Indirect *p = chain;
359 struct buffer_head *bh;
361 *err = 0;
362 /* i_data is not going away, no lock needed */
363 add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
364 if (!p->key)
365 goto no_block;
366 while (--depth) {
367 bh = sb_bread(sb, le32_to_cpu(p->key));
368 if (!bh)
369 goto failure;
370 /* Reader: pointers */
371 if (!verify_chain(chain, p))
372 goto changed;
373 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
374 /* Reader: end */
375 if (!p->key)
376 goto no_block;
378 return NULL;
380 changed:
381 brelse(bh);
382 *err = -EAGAIN;
383 goto no_block;
384 failure:
385 *err = -EIO;
386 no_block:
387 return p;
391 * ext3_find_near - find a place for allocation with sufficient locality
392 * @inode: owner
393 * @ind: descriptor of indirect block.
395 * This function returns the prefered place for block allocation.
396 * It is used when heuristic for sequential allocation fails.
397 * Rules are:
398 * + if there is a block to the left of our position - allocate near it.
399 * + if pointer will live in indirect block - allocate near that block.
400 * + if pointer will live in inode - allocate in the same
401 * cylinder group.
403 * In the latter case we colour the starting block by the callers PID to
404 * prevent it from clashing with concurrent allocations for a different inode
405 * in the same block group. The PID is used here so that functionally related
406 * files will be close-by on-disk.
408 * Caller must make sure that @ind is valid and will stay that way.
410 static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
412 struct ext3_inode_info *ei = EXT3_I(inode);
413 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
414 __le32 *p;
415 ext3_fsblk_t bg_start;
416 ext3_grpblk_t colour;
418 /* Try to find previous block */
419 for (p = ind->p - 1; p >= start; p--) {
420 if (*p)
421 return le32_to_cpu(*p);
424 /* No such thing, so let's try location of indirect block */
425 if (ind->bh)
426 return ind->bh->b_blocknr;
429 * It is going to be referred to from the inode itself? OK, just put it
430 * into the same cylinder group then.
432 bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
433 colour = (current->pid % 16) *
434 (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
435 return bg_start + colour;
439 * ext3_find_goal - find a prefered place for allocation.
440 * @inode: owner
441 * @block: block we want
442 * @chain: chain of indirect blocks
443 * @partial: pointer to the last triple within a chain
444 * @goal: place to store the result.
446 * Normally this function find the prefered place for block allocation,
447 * stores it in *@goal and returns zero.
450 static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
451 Indirect chain[4], Indirect *partial)
453 struct ext3_block_alloc_info *block_i;
455 block_i = EXT3_I(inode)->i_block_alloc_info;
458 * try the heuristic for sequential allocation,
459 * failing that at least try to get decent locality.
461 if (block_i && (block == block_i->last_alloc_logical_block + 1)
462 && (block_i->last_alloc_physical_block != 0)) {
463 return block_i->last_alloc_physical_block + 1;
466 return ext3_find_near(inode, partial);
470 * ext3_blks_to_allocate: Look up the block map and count the number
471 * of direct blocks need to be allocated for the given branch.
473 * @branch: chain of indirect blocks
474 * @k: number of blocks need for indirect blocks
475 * @blks: number of data blocks to be mapped.
476 * @blocks_to_boundary: the offset in the indirect block
478 * return the total number of blocks to be allocate, including the
479 * direct and indirect blocks.
481 static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
482 int blocks_to_boundary)
484 unsigned long count = 0;
487 * Simple case, [t,d]Indirect block(s) has not allocated yet
488 * then it's clear blocks on that path have not allocated
490 if (k > 0) {
491 /* right now we don't handle cross boundary allocation */
492 if (blks < blocks_to_boundary + 1)
493 count += blks;
494 else
495 count += blocks_to_boundary + 1;
496 return count;
499 count++;
500 while (count < blks && count <= blocks_to_boundary &&
501 le32_to_cpu(*(branch[0].p + count)) == 0) {
502 count++;
504 return count;
508 * ext3_alloc_blocks: multiple allocate blocks needed for a branch
509 * @indirect_blks: the number of blocks need to allocate for indirect
510 * blocks
512 * @new_blocks: on return it will store the new block numbers for
513 * the indirect blocks(if needed) and the first direct block,
514 * @blks: on return it will store the total number of allocated
515 * direct blocks
517 static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
518 ext3_fsblk_t goal, int indirect_blks, int blks,
519 ext3_fsblk_t new_blocks[4], int *err)
521 int target, i;
522 unsigned long count = 0;
523 int index = 0;
524 ext3_fsblk_t current_block = 0;
525 int ret = 0;
528 * Here we try to allocate the requested multiple blocks at once,
529 * on a best-effort basis.
530 * To build a branch, we should allocate blocks for
531 * the indirect blocks(if not allocated yet), and at least
532 * the first direct block of this branch. That's the
533 * minimum number of blocks need to allocate(required)
535 target = blks + indirect_blks;
537 while (1) {
538 count = target;
539 /* allocating blocks for indirect blocks and direct blocks */
540 current_block = ext3_new_blocks(handle,inode,goal,&count,err);
541 if (*err)
542 goto failed_out;
544 target -= count;
545 /* allocate blocks for indirect blocks */
546 while (index < indirect_blks && count) {
547 new_blocks[index++] = current_block++;
548 count--;
551 if (count > 0)
552 break;
555 /* save the new block number for the first direct block */
556 new_blocks[index] = current_block;
558 /* total number of blocks allocated for direct blocks */
559 ret = count;
560 *err = 0;
561 return ret;
562 failed_out:
563 for (i = 0; i <index; i++)
564 ext3_free_blocks(handle, inode, new_blocks[i], 1);
565 return ret;
569 * ext3_alloc_branch - allocate and set up a chain of blocks.
570 * @inode: owner
571 * @indirect_blks: number of allocated indirect blocks
572 * @blks: number of allocated direct blocks
573 * @offsets: offsets (in the blocks) to store the pointers to next.
574 * @branch: place to store the chain in.
576 * This function allocates blocks, zeroes out all but the last one,
577 * links them into chain and (if we are synchronous) writes them to disk.
578 * In other words, it prepares a branch that can be spliced onto the
579 * inode. It stores the information about that chain in the branch[], in
580 * the same format as ext3_get_branch() would do. We are calling it after
581 * we had read the existing part of chain and partial points to the last
582 * triple of that (one with zero ->key). Upon the exit we have the same
583 * picture as after the successful ext3_get_block(), except that in one
584 * place chain is disconnected - *branch->p is still zero (we did not
585 * set the last link), but branch->key contains the number that should
586 * be placed into *branch->p to fill that gap.
588 * If allocation fails we free all blocks we've allocated (and forget
589 * their buffer_heads) and return the error value the from failed
590 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
591 * as described above and return 0.
593 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
594 int indirect_blks, int *blks, ext3_fsblk_t goal,
595 int *offsets, Indirect *branch)
597 int blocksize = inode->i_sb->s_blocksize;
598 int i, n = 0;
599 int err = 0;
600 struct buffer_head *bh;
601 int num;
602 ext3_fsblk_t new_blocks[4];
603 ext3_fsblk_t current_block;
605 num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
606 *blks, new_blocks, &err);
607 if (err)
608 return err;
610 branch[0].key = cpu_to_le32(new_blocks[0]);
612 * metadata blocks and data blocks are allocated.
614 for (n = 1; n <= indirect_blks; n++) {
616 * Get buffer_head for parent block, zero it out
617 * and set the pointer to new one, then send
618 * parent to disk.
620 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
621 branch[n].bh = bh;
622 lock_buffer(bh);
623 BUFFER_TRACE(bh, "call get_create_access");
624 err = ext3_journal_get_create_access(handle, bh);
625 if (err) {
626 unlock_buffer(bh);
627 brelse(bh);
628 goto failed;
631 memset(bh->b_data, 0, blocksize);
632 branch[n].p = (__le32 *) bh->b_data + offsets[n];
633 branch[n].key = cpu_to_le32(new_blocks[n]);
634 *branch[n].p = branch[n].key;
635 if ( n == indirect_blks) {
636 current_block = new_blocks[n];
638 * End of chain, update the last new metablock of
639 * the chain to point to the new allocated
640 * data blocks numbers
642 for (i=1; i < num; i++)
643 *(branch[n].p + i) = cpu_to_le32(++current_block);
645 BUFFER_TRACE(bh, "marking uptodate");
646 set_buffer_uptodate(bh);
647 unlock_buffer(bh);
649 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
650 err = ext3_journal_dirty_metadata(handle, bh);
651 if (err)
652 goto failed;
654 *blks = num;
655 return err;
656 failed:
657 /* Allocation failed, free what we already allocated */
658 for (i = 1; i <= n ; i++) {
659 BUFFER_TRACE(branch[i].bh, "call journal_forget");
660 ext3_journal_forget(handle, branch[i].bh);
662 for (i = 0; i <indirect_blks; i++)
663 ext3_free_blocks(handle, inode, new_blocks[i], 1);
665 ext3_free_blocks(handle, inode, new_blocks[i], num);
667 return err;
671 * ext3_splice_branch - splice the allocated branch onto inode.
672 * @inode: owner
673 * @block: (logical) number of block we are adding
674 * @chain: chain of indirect blocks (with a missing link - see
675 * ext3_alloc_branch)
676 * @where: location of missing link
677 * @num: number of indirect blocks we are adding
678 * @blks: number of direct blocks we are adding
680 * This function fills the missing link and does all housekeeping needed in
681 * inode (->i_blocks, etc.). In case of success we end up with the full
682 * chain to new block and return 0.
684 static int ext3_splice_branch(handle_t *handle, struct inode *inode,
685 long block, Indirect *where, int num, int blks)
687 int i;
688 int err = 0;
689 struct ext3_block_alloc_info *block_i;
690 ext3_fsblk_t current_block;
692 block_i = EXT3_I(inode)->i_block_alloc_info;
694 * If we're splicing into a [td]indirect block (as opposed to the
695 * inode) then we need to get write access to the [td]indirect block
696 * before the splice.
698 if (where->bh) {
699 BUFFER_TRACE(where->bh, "get_write_access");
700 err = ext3_journal_get_write_access(handle, where->bh);
701 if (err)
702 goto err_out;
704 /* That's it */
706 *where->p = where->key;
709 * Update the host buffer_head or inode to point to more just allocated
710 * direct blocks blocks
712 if (num == 0 && blks > 1) {
713 current_block = le32_to_cpu(where->key) + 1;
714 for (i = 1; i < blks; i++)
715 *(where->p + i ) = cpu_to_le32(current_block++);
719 * update the most recently allocated logical & physical block
720 * in i_block_alloc_info, to assist find the proper goal block for next
721 * allocation
723 if (block_i) {
724 block_i->last_alloc_logical_block = block + blks - 1;
725 block_i->last_alloc_physical_block =
726 le32_to_cpu(where[num].key) + blks - 1;
729 /* We are done with atomic stuff, now do the rest of housekeeping */
731 inode->i_ctime = CURRENT_TIME_SEC;
732 ext3_mark_inode_dirty(handle, inode);
734 /* had we spliced it onto indirect block? */
735 if (where->bh) {
737 * If we spliced it onto an indirect block, we haven't
738 * altered the inode. Note however that if it is being spliced
739 * onto an indirect block at the very end of the file (the
740 * file is growing) then we *will* alter the inode to reflect
741 * the new i_size. But that is not done here - it is done in
742 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
744 jbd_debug(5, "splicing indirect only\n");
745 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
746 err = ext3_journal_dirty_metadata(handle, where->bh);
747 if (err)
748 goto err_out;
749 } else {
751 * OK, we spliced it into the inode itself on a direct block.
752 * Inode was dirtied above.
754 jbd_debug(5, "splicing direct\n");
756 return err;
758 err_out:
759 for (i = 1; i <= num; i++) {
760 BUFFER_TRACE(where[i].bh, "call journal_forget");
761 ext3_journal_forget(handle, where[i].bh);
762 ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
764 ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
766 return err;
770 * Allocation strategy is simple: if we have to allocate something, we will
771 * have to go the whole way to leaf. So let's do it before attaching anything
772 * to tree, set linkage between the newborn blocks, write them if sync is
773 * required, recheck the path, free and repeat if check fails, otherwise
774 * set the last missing link (that will protect us from any truncate-generated
775 * removals - all blocks on the path are immune now) and possibly force the
776 * write on the parent block.
777 * That has a nice additional property: no special recovery from the failed
778 * allocations is needed - we simply release blocks and do not touch anything
779 * reachable from inode.
781 * `handle' can be NULL if create == 0.
783 * The BKL may not be held on entry here. Be sure to take it early.
784 * return > 0, # of blocks mapped or allocated.
785 * return = 0, if plain lookup failed.
786 * return < 0, error case.
788 int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
789 sector_t iblock, unsigned long maxblocks,
790 struct buffer_head *bh_result,
791 int create, int extend_disksize)
793 int err = -EIO;
794 int offsets[4];
795 Indirect chain[4];
796 Indirect *partial;
797 ext3_fsblk_t goal;
798 int indirect_blks;
799 int blocks_to_boundary = 0;
800 int depth;
801 struct ext3_inode_info *ei = EXT3_I(inode);
802 int count = 0;
803 ext3_fsblk_t first_block = 0;
806 J_ASSERT(handle != NULL || create == 0);
807 depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
809 if (depth == 0)
810 goto out;
812 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
814 /* Simplest case - block found, no allocation needed */
815 if (!partial) {
816 first_block = le32_to_cpu(chain[depth - 1].key);
817 clear_buffer_new(bh_result);
818 count++;
819 /*map more blocks*/
820 while (count < maxblocks && count <= blocks_to_boundary) {
821 ext3_fsblk_t blk;
823 if (!verify_chain(chain, partial)) {
825 * Indirect block might be removed by
826 * truncate while we were reading it.
827 * Handling of that case: forget what we've
828 * got now. Flag the err as EAGAIN, so it
829 * will reread.
831 err = -EAGAIN;
832 count = 0;
833 break;
835 blk = le32_to_cpu(*(chain[depth-1].p + count));
837 if (blk == first_block + count)
838 count++;
839 else
840 break;
842 if (err != -EAGAIN)
843 goto got_it;
846 /* Next simple case - plain lookup or failed read of indirect block */
847 if (!create || err == -EIO)
848 goto cleanup;
850 mutex_lock(&ei->truncate_mutex);
853 * If the indirect block is missing while we are reading
854 * the chain(ext3_get_branch() returns -EAGAIN err), or
855 * if the chain has been changed after we grab the semaphore,
856 * (either because another process truncated this branch, or
857 * another get_block allocated this branch) re-grab the chain to see if
858 * the request block has been allocated or not.
860 * Since we already block the truncate/other get_block
861 * at this point, we will have the current copy of the chain when we
862 * splice the branch into the tree.
864 if (err == -EAGAIN || !verify_chain(chain, partial)) {
865 while (partial > chain) {
866 brelse(partial->bh);
867 partial--;
869 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
870 if (!partial) {
871 count++;
872 mutex_unlock(&ei->truncate_mutex);
873 if (err)
874 goto cleanup;
875 clear_buffer_new(bh_result);
876 goto got_it;
881 * Okay, we need to do block allocation. Lazily initialize the block
882 * allocation info here if necessary
884 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
885 ext3_init_block_alloc_info(inode);
887 goal = ext3_find_goal(inode, iblock, chain, partial);
889 /* the number of blocks need to allocate for [d,t]indirect blocks */
890 indirect_blks = (chain + depth) - partial - 1;
893 * Next look up the indirect map to count the totoal number of
894 * direct blocks to allocate for this branch.
896 count = ext3_blks_to_allocate(partial, indirect_blks,
897 maxblocks, blocks_to_boundary);
899 * Block out ext3_truncate while we alter the tree
901 err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
902 offsets + (partial - chain), partial);
905 * The ext3_splice_branch call will free and forget any buffers
906 * on the new chain if there is a failure, but that risks using
907 * up transaction credits, especially for bitmaps where the
908 * credits cannot be returned. Can we handle this somehow? We
909 * may need to return -EAGAIN upwards in the worst case. --sct
911 if (!err)
912 err = ext3_splice_branch(handle, inode, iblock,
913 partial, indirect_blks, count);
915 * i_disksize growing is protected by truncate_mutex. Don't forget to
916 * protect it if you're about to implement concurrent
917 * ext3_get_block() -bzzz
919 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
920 ei->i_disksize = inode->i_size;
921 mutex_unlock(&ei->truncate_mutex);
922 if (err)
923 goto cleanup;
925 set_buffer_new(bh_result);
926 got_it:
927 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
928 if (count > blocks_to_boundary)
929 set_buffer_boundary(bh_result);
930 err = count;
931 /* Clean up and exit */
932 partial = chain + depth - 1; /* the whole chain */
933 cleanup:
934 while (partial > chain) {
935 BUFFER_TRACE(partial->bh, "call brelse");
936 brelse(partial->bh);
937 partial--;
939 BUFFER_TRACE(bh_result, "returned");
940 out:
941 return err;
944 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
946 static int ext3_get_block(struct inode *inode, sector_t iblock,
947 struct buffer_head *bh_result, int create)
949 handle_t *handle = ext3_journal_current_handle();
950 int ret = 0;
951 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
953 if (!create)
954 goto get_block; /* A read */
956 if (max_blocks == 1)
957 goto get_block; /* A single block get */
959 if (handle->h_transaction->t_state == T_LOCKED) {
961 * Huge direct-io writes can hold off commits for long
962 * periods of time. Let this commit run.
964 ext3_journal_stop(handle);
965 handle = ext3_journal_start(inode, DIO_CREDITS);
966 if (IS_ERR(handle))
967 ret = PTR_ERR(handle);
968 goto get_block;
971 if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
973 * Getting low on buffer credits...
975 ret = ext3_journal_extend(handle, DIO_CREDITS);
976 if (ret > 0) {
978 * Couldn't extend the transaction. Start a new one.
980 ret = ext3_journal_restart(handle, DIO_CREDITS);
984 get_block:
985 if (ret == 0) {
986 ret = ext3_get_blocks_handle(handle, inode, iblock,
987 max_blocks, bh_result, create, 0);
988 if (ret > 0) {
989 bh_result->b_size = (ret << inode->i_blkbits);
990 ret = 0;
993 return ret;
997 * `handle' can be NULL if create is zero
999 struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
1000 long block, int create, int *errp)
1002 struct buffer_head dummy;
1003 int fatal = 0, err;
1005 J_ASSERT(handle != NULL || create == 0);
1007 dummy.b_state = 0;
1008 dummy.b_blocknr = -1000;
1009 buffer_trace_init(&dummy.b_history);
1010 err = ext3_get_blocks_handle(handle, inode, block, 1,
1011 &dummy, create, 1);
1013 * ext3_get_blocks_handle() returns number of blocks
1014 * mapped. 0 in case of a HOLE.
1016 if (err > 0) {
1017 if (err > 1)
1018 WARN_ON(1);
1019 err = 0;
1021 *errp = err;
1022 if (!err && buffer_mapped(&dummy)) {
1023 struct buffer_head *bh;
1024 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1025 if (!bh) {
1026 *errp = -EIO;
1027 goto err;
1029 if (buffer_new(&dummy)) {
1030 J_ASSERT(create != 0);
1031 J_ASSERT(handle != NULL);
1034 * Now that we do not always journal data, we should
1035 * keep in mind whether this should always journal the
1036 * new buffer as metadata. For now, regular file
1037 * writes use ext3_get_block instead, so it's not a
1038 * problem.
1040 lock_buffer(bh);
1041 BUFFER_TRACE(bh, "call get_create_access");
1042 fatal = ext3_journal_get_create_access(handle, bh);
1043 if (!fatal && !buffer_uptodate(bh)) {
1044 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1045 set_buffer_uptodate(bh);
1047 unlock_buffer(bh);
1048 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1049 err = ext3_journal_dirty_metadata(handle, bh);
1050 if (!fatal)
1051 fatal = err;
1052 } else {
1053 BUFFER_TRACE(bh, "not a new buffer");
1055 if (fatal) {
1056 *errp = fatal;
1057 brelse(bh);
1058 bh = NULL;
1060 return bh;
1062 err:
1063 return NULL;
1066 struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
1067 int block, int create, int *err)
1069 struct buffer_head * bh;
1071 bh = ext3_getblk(handle, inode, block, create, err);
1072 if (!bh)
1073 return bh;
1074 if (buffer_uptodate(bh))
1075 return bh;
1076 ll_rw_block(READ_META, 1, &bh);
1077 wait_on_buffer(bh);
1078 if (buffer_uptodate(bh))
1079 return bh;
1080 put_bh(bh);
1081 *err = -EIO;
1082 return NULL;
1085 static int walk_page_buffers( handle_t *handle,
1086 struct buffer_head *head,
1087 unsigned from,
1088 unsigned to,
1089 int *partial,
1090 int (*fn)( handle_t *handle,
1091 struct buffer_head *bh))
1093 struct buffer_head *bh;
1094 unsigned block_start, block_end;
1095 unsigned blocksize = head->b_size;
1096 int err, ret = 0;
1097 struct buffer_head *next;
1099 for ( bh = head, block_start = 0;
1100 ret == 0 && (bh != head || !block_start);
1101 block_start = block_end, bh = next)
1103 next = bh->b_this_page;
1104 block_end = block_start + blocksize;
1105 if (block_end <= from || block_start >= to) {
1106 if (partial && !buffer_uptodate(bh))
1107 *partial = 1;
1108 continue;
1110 err = (*fn)(handle, bh);
1111 if (!ret)
1112 ret = err;
1114 return ret;
1118 * To preserve ordering, it is essential that the hole instantiation and
1119 * the data write be encapsulated in a single transaction. We cannot
1120 * close off a transaction and start a new one between the ext3_get_block()
1121 * and the commit_write(). So doing the journal_start at the start of
1122 * prepare_write() is the right place.
1124 * Also, this function can nest inside ext3_writepage() ->
1125 * block_write_full_page(). In that case, we *know* that ext3_writepage()
1126 * has generated enough buffer credits to do the whole page. So we won't
1127 * block on the journal in that case, which is good, because the caller may
1128 * be PF_MEMALLOC.
1130 * By accident, ext3 can be reentered when a transaction is open via
1131 * quota file writes. If we were to commit the transaction while thus
1132 * reentered, there can be a deadlock - we would be holding a quota
1133 * lock, and the commit would never complete if another thread had a
1134 * transaction open and was blocking on the quota lock - a ranking
1135 * violation.
1137 * So what we do is to rely on the fact that journal_stop/journal_start
1138 * will _not_ run commit under these circumstances because handle->h_ref
1139 * is elevated. We'll still have enough credits for the tiny quotafile
1140 * write.
1142 static int do_journal_get_write_access(handle_t *handle,
1143 struct buffer_head *bh)
1145 if (!buffer_mapped(bh) || buffer_freed(bh))
1146 return 0;
1147 return ext3_journal_get_write_access(handle, bh);
1150 static int ext3_write_begin(struct file *file, struct address_space *mapping,
1151 loff_t pos, unsigned len, unsigned flags,
1152 struct page **pagep, void **fsdata)
1154 struct inode *inode = mapping->host;
1155 int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1156 handle_t *handle;
1157 int retries = 0;
1158 struct page *page;
1159 pgoff_t index;
1160 unsigned from, to;
1162 index = pos >> PAGE_CACHE_SHIFT;
1163 from = pos & (PAGE_CACHE_SIZE - 1);
1164 to = from + len;
1166 retry:
1167 page = __grab_cache_page(mapping, index);
1168 if (!page)
1169 return -ENOMEM;
1170 *pagep = page;
1172 handle = ext3_journal_start(inode, needed_blocks);
1173 if (IS_ERR(handle)) {
1174 unlock_page(page);
1175 page_cache_release(page);
1176 ret = PTR_ERR(handle);
1177 goto out;
1179 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1180 ext3_get_block);
1181 if (ret)
1182 goto write_begin_failed;
1184 if (ext3_should_journal_data(inode)) {
1185 ret = walk_page_buffers(handle, page_buffers(page),
1186 from, to, NULL, do_journal_get_write_access);
1188 write_begin_failed:
1189 if (ret) {
1190 ext3_journal_stop(handle);
1191 unlock_page(page);
1192 page_cache_release(page);
1194 if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
1195 goto retry;
1196 out:
1197 return ret;
1201 int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1203 int err = journal_dirty_data(handle, bh);
1204 if (err)
1205 ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1206 bh, handle, err);
1207 return err;
1210 /* For write_end() in data=journal mode */
1211 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1213 if (!buffer_mapped(bh) || buffer_freed(bh))
1214 return 0;
1215 set_buffer_uptodate(bh);
1216 return ext3_journal_dirty_metadata(handle, bh);
1220 * Generic write_end handler for ordered and writeback ext3 journal modes.
1221 * We can't use generic_write_end, because that unlocks the page and we need to
1222 * unlock the page after ext3_journal_stop, but ext3_journal_stop must run
1223 * after block_write_end.
1225 static int ext3_generic_write_end(struct file *file,
1226 struct address_space *mapping,
1227 loff_t pos, unsigned len, unsigned copied,
1228 struct page *page, void *fsdata)
1230 struct inode *inode = file->f_mapping->host;
1232 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1234 if (pos+copied > inode->i_size) {
1235 i_size_write(inode, pos+copied);
1236 mark_inode_dirty(inode);
1239 return copied;
1243 * We need to pick up the new inode size which generic_commit_write gave us
1244 * `file' can be NULL - eg, when called from page_symlink().
1246 * ext3 never places buffers on inode->i_mapping->private_list. metadata
1247 * buffers are managed internally.
1249 static int ext3_ordered_write_end(struct file *file,
1250 struct address_space *mapping,
1251 loff_t pos, unsigned len, unsigned copied,
1252 struct page *page, void *fsdata)
1254 handle_t *handle = ext3_journal_current_handle();
1255 struct inode *inode = file->f_mapping->host;
1256 unsigned from, to;
1257 int ret = 0, ret2;
1259 from = pos & (PAGE_CACHE_SIZE - 1);
1260 to = from + len;
1262 ret = walk_page_buffers(handle, page_buffers(page),
1263 from, to, NULL, ext3_journal_dirty_data);
1265 if (ret == 0) {
1267 * generic_write_end() will run mark_inode_dirty() if i_size
1268 * changes. So let's piggyback the i_disksize mark_inode_dirty
1269 * into that.
1271 loff_t new_i_size;
1273 new_i_size = pos + copied;
1274 if (new_i_size > EXT3_I(inode)->i_disksize)
1275 EXT3_I(inode)->i_disksize = new_i_size;
1276 copied = ext3_generic_write_end(file, mapping, pos, len, copied,
1277 page, fsdata);
1278 if (copied < 0)
1279 ret = copied;
1281 ret2 = ext3_journal_stop(handle);
1282 if (!ret)
1283 ret = ret2;
1284 unlock_page(page);
1285 page_cache_release(page);
1287 return ret ? ret : copied;
1290 static int ext3_writeback_write_end(struct file *file,
1291 struct address_space *mapping,
1292 loff_t pos, unsigned len, unsigned copied,
1293 struct page *page, void *fsdata)
1295 handle_t *handle = ext3_journal_current_handle();
1296 struct inode *inode = file->f_mapping->host;
1297 int ret = 0, ret2;
1298 loff_t new_i_size;
1300 new_i_size = pos + copied;
1301 if (new_i_size > EXT3_I(inode)->i_disksize)
1302 EXT3_I(inode)->i_disksize = new_i_size;
1304 copied = ext3_generic_write_end(file, mapping, pos, len, copied,
1305 page, fsdata);
1306 if (copied < 0)
1307 ret = copied;
1309 ret2 = ext3_journal_stop(handle);
1310 if (!ret)
1311 ret = ret2;
1312 unlock_page(page);
1313 page_cache_release(page);
1315 return ret ? ret : copied;
1318 static int ext3_journalled_write_end(struct file *file,
1319 struct address_space *mapping,
1320 loff_t pos, unsigned len, unsigned copied,
1321 struct page *page, void *fsdata)
1323 handle_t *handle = ext3_journal_current_handle();
1324 struct inode *inode = mapping->host;
1325 int ret = 0, ret2;
1326 int partial = 0;
1327 unsigned from, to;
1329 from = pos & (PAGE_CACHE_SIZE - 1);
1330 to = from + len;
1332 if (copied < len) {
1333 if (!PageUptodate(page))
1334 copied = 0;
1335 page_zero_new_buffers(page, from+copied, to);
1338 ret = walk_page_buffers(handle, page_buffers(page), from,
1339 to, &partial, write_end_fn);
1340 if (!partial)
1341 SetPageUptodate(page);
1342 if (pos+copied > inode->i_size)
1343 i_size_write(inode, pos+copied);
1344 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1345 if (inode->i_size > EXT3_I(inode)->i_disksize) {
1346 EXT3_I(inode)->i_disksize = inode->i_size;
1347 ret2 = ext3_mark_inode_dirty(handle, inode);
1348 if (!ret)
1349 ret = ret2;
1352 ret2 = ext3_journal_stop(handle);
1353 if (!ret)
1354 ret = ret2;
1355 unlock_page(page);
1356 page_cache_release(page);
1358 return ret ? ret : copied;
1362 * bmap() is special. It gets used by applications such as lilo and by
1363 * the swapper to find the on-disk block of a specific piece of data.
1365 * Naturally, this is dangerous if the block concerned is still in the
1366 * journal. If somebody makes a swapfile on an ext3 data-journaling
1367 * filesystem and enables swap, then they may get a nasty shock when the
1368 * data getting swapped to that swapfile suddenly gets overwritten by
1369 * the original zero's written out previously to the journal and
1370 * awaiting writeback in the kernel's buffer cache.
1372 * So, if we see any bmap calls here on a modified, data-journaled file,
1373 * take extra steps to flush any blocks which might be in the cache.
1375 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
1377 struct inode *inode = mapping->host;
1378 journal_t *journal;
1379 int err;
1381 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1383 * This is a REALLY heavyweight approach, but the use of
1384 * bmap on dirty files is expected to be extremely rare:
1385 * only if we run lilo or swapon on a freshly made file
1386 * do we expect this to happen.
1388 * (bmap requires CAP_SYS_RAWIO so this does not
1389 * represent an unprivileged user DOS attack --- we'd be
1390 * in trouble if mortal users could trigger this path at
1391 * will.)
1393 * NB. EXT3_STATE_JDATA is not set on files other than
1394 * regular files. If somebody wants to bmap a directory
1395 * or symlink and gets confused because the buffer
1396 * hasn't yet been flushed to disk, they deserve
1397 * everything they get.
1400 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1401 journal = EXT3_JOURNAL(inode);
1402 journal_lock_updates(journal);
1403 err = journal_flush(journal);
1404 journal_unlock_updates(journal);
1406 if (err)
1407 return 0;
1410 return generic_block_bmap(mapping,block,ext3_get_block);
1413 static int bget_one(handle_t *handle, struct buffer_head *bh)
1415 get_bh(bh);
1416 return 0;
1419 static int bput_one(handle_t *handle, struct buffer_head *bh)
1421 put_bh(bh);
1422 return 0;
1425 static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1427 if (buffer_mapped(bh))
1428 return ext3_journal_dirty_data(handle, bh);
1429 return 0;
1433 * Note that we always start a transaction even if we're not journalling
1434 * data. This is to preserve ordering: any hole instantiation within
1435 * __block_write_full_page -> ext3_get_block() should be journalled
1436 * along with the data so we don't crash and then get metadata which
1437 * refers to old data.
1439 * In all journalling modes block_write_full_page() will start the I/O.
1441 * Problem:
1443 * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1444 * ext3_writepage()
1446 * Similar for:
1448 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1450 * Same applies to ext3_get_block(). We will deadlock on various things like
1451 * lock_journal and i_truncate_mutex.
1453 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1454 * allocations fail.
1456 * 16May01: If we're reentered then journal_current_handle() will be
1457 * non-zero. We simply *return*.
1459 * 1 July 2001: @@@ FIXME:
1460 * In journalled data mode, a data buffer may be metadata against the
1461 * current transaction. But the same file is part of a shared mapping
1462 * and someone does a writepage() on it.
1464 * We will move the buffer onto the async_data list, but *after* it has
1465 * been dirtied. So there's a small window where we have dirty data on
1466 * BJ_Metadata.
1468 * Note that this only applies to the last partial page in the file. The
1469 * bit which block_write_full_page() uses prepare/commit for. (That's
1470 * broken code anyway: it's wrong for msync()).
1472 * It's a rare case: affects the final partial page, for journalled data
1473 * where the file is subject to bith write() and writepage() in the same
1474 * transction. To fix it we'll need a custom block_write_full_page().
1475 * We'll probably need that anyway for journalling writepage() output.
1477 * We don't honour synchronous mounts for writepage(). That would be
1478 * disastrous. Any write() or metadata operation will sync the fs for
1479 * us.
1481 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1482 * we don't need to open a transaction here.
1484 static int ext3_ordered_writepage(struct page *page,
1485 struct writeback_control *wbc)
1487 struct inode *inode = page->mapping->host;
1488 struct buffer_head *page_bufs;
1489 handle_t *handle = NULL;
1490 int ret = 0;
1491 int err;
1493 J_ASSERT(PageLocked(page));
1496 * We give up here if we're reentered, because it might be for a
1497 * different filesystem.
1499 if (ext3_journal_current_handle())
1500 goto out_fail;
1502 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1504 if (IS_ERR(handle)) {
1505 ret = PTR_ERR(handle);
1506 goto out_fail;
1509 if (!page_has_buffers(page)) {
1510 create_empty_buffers(page, inode->i_sb->s_blocksize,
1511 (1 << BH_Dirty)|(1 << BH_Uptodate));
1513 page_bufs = page_buffers(page);
1514 walk_page_buffers(handle, page_bufs, 0,
1515 PAGE_CACHE_SIZE, NULL, bget_one);
1517 ret = block_write_full_page(page, ext3_get_block, wbc);
1520 * The page can become unlocked at any point now, and
1521 * truncate can then come in and change things. So we
1522 * can't touch *page from now on. But *page_bufs is
1523 * safe due to elevated refcount.
1527 * And attach them to the current transaction. But only if
1528 * block_write_full_page() succeeded. Otherwise they are unmapped,
1529 * and generally junk.
1531 if (ret == 0) {
1532 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1533 NULL, journal_dirty_data_fn);
1534 if (!ret)
1535 ret = err;
1537 walk_page_buffers(handle, page_bufs, 0,
1538 PAGE_CACHE_SIZE, NULL, bput_one);
1539 err = ext3_journal_stop(handle);
1540 if (!ret)
1541 ret = err;
1542 return ret;
1544 out_fail:
1545 redirty_page_for_writepage(wbc, page);
1546 unlock_page(page);
1547 return ret;
1550 static int ext3_writeback_writepage(struct page *page,
1551 struct writeback_control *wbc)
1553 struct inode *inode = page->mapping->host;
1554 handle_t *handle = NULL;
1555 int ret = 0;
1556 int err;
1558 if (ext3_journal_current_handle())
1559 goto out_fail;
1561 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1562 if (IS_ERR(handle)) {
1563 ret = PTR_ERR(handle);
1564 goto out_fail;
1567 if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
1568 ret = nobh_writepage(page, ext3_get_block, wbc);
1569 else
1570 ret = block_write_full_page(page, ext3_get_block, wbc);
1572 err = ext3_journal_stop(handle);
1573 if (!ret)
1574 ret = err;
1575 return ret;
1577 out_fail:
1578 redirty_page_for_writepage(wbc, page);
1579 unlock_page(page);
1580 return ret;
1583 static int ext3_journalled_writepage(struct page *page,
1584 struct writeback_control *wbc)
1586 struct inode *inode = page->mapping->host;
1587 handle_t *handle = NULL;
1588 int ret = 0;
1589 int err;
1591 if (ext3_journal_current_handle())
1592 goto no_write;
1594 handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
1595 if (IS_ERR(handle)) {
1596 ret = PTR_ERR(handle);
1597 goto no_write;
1600 if (!page_has_buffers(page) || PageChecked(page)) {
1602 * It's mmapped pagecache. Add buffers and journal it. There
1603 * doesn't seem much point in redirtying the page here.
1605 ClearPageChecked(page);
1606 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1607 ext3_get_block);
1608 if (ret != 0) {
1609 ext3_journal_stop(handle);
1610 goto out_unlock;
1612 ret = walk_page_buffers(handle, page_buffers(page), 0,
1613 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1615 err = walk_page_buffers(handle, page_buffers(page), 0,
1616 PAGE_CACHE_SIZE, NULL, write_end_fn);
1617 if (ret == 0)
1618 ret = err;
1619 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1620 unlock_page(page);
1621 } else {
1623 * It may be a page full of checkpoint-mode buffers. We don't
1624 * really know unless we go poke around in the buffer_heads.
1625 * But block_write_full_page will do the right thing.
1627 ret = block_write_full_page(page, ext3_get_block, wbc);
1629 err = ext3_journal_stop(handle);
1630 if (!ret)
1631 ret = err;
1632 out:
1633 return ret;
1635 no_write:
1636 redirty_page_for_writepage(wbc, page);
1637 out_unlock:
1638 unlock_page(page);
1639 goto out;
1642 static int ext3_readpage(struct file *file, struct page *page)
1644 return mpage_readpage(page, ext3_get_block);
1647 static int
1648 ext3_readpages(struct file *file, struct address_space *mapping,
1649 struct list_head *pages, unsigned nr_pages)
1651 return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
1654 static void ext3_invalidatepage(struct page *page, unsigned long offset)
1656 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1659 * If it's a full truncate we just forget about the pending dirtying
1661 if (offset == 0)
1662 ClearPageChecked(page);
1664 journal_invalidatepage(journal, page, offset);
1667 static int ext3_releasepage(struct page *page, gfp_t wait)
1669 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1671 WARN_ON(PageChecked(page));
1672 if (!page_has_buffers(page))
1673 return 0;
1674 return journal_try_to_free_buffers(journal, page, wait);
1678 * If the O_DIRECT write will extend the file then add this inode to the
1679 * orphan list. So recovery will truncate it back to the original size
1680 * if the machine crashes during the write.
1682 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1683 * crashes then stale disk data _may_ be exposed inside the file.
1685 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
1686 const struct iovec *iov, loff_t offset,
1687 unsigned long nr_segs)
1689 struct file *file = iocb->ki_filp;
1690 struct inode *inode = file->f_mapping->host;
1691 struct ext3_inode_info *ei = EXT3_I(inode);
1692 handle_t *handle = NULL;
1693 ssize_t ret;
1694 int orphan = 0;
1695 size_t count = iov_length(iov, nr_segs);
1697 if (rw == WRITE) {
1698 loff_t final_size = offset + count;
1700 handle = ext3_journal_start(inode, DIO_CREDITS);
1701 if (IS_ERR(handle)) {
1702 ret = PTR_ERR(handle);
1703 goto out;
1705 if (final_size > inode->i_size) {
1706 ret = ext3_orphan_add(handle, inode);
1707 if (ret)
1708 goto out_stop;
1709 orphan = 1;
1710 ei->i_disksize = inode->i_size;
1714 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1715 offset, nr_segs,
1716 ext3_get_block, NULL);
1719 * Reacquire the handle: ext3_get_block() can restart the transaction
1721 handle = ext3_journal_current_handle();
1723 out_stop:
1724 if (handle) {
1725 int err;
1727 if (orphan && inode->i_nlink)
1728 ext3_orphan_del(handle, inode);
1729 if (orphan && ret > 0) {
1730 loff_t end = offset + ret;
1731 if (end > inode->i_size) {
1732 ei->i_disksize = end;
1733 i_size_write(inode, end);
1735 * We're going to return a positive `ret'
1736 * here due to non-zero-length I/O, so there's
1737 * no way of reporting error returns from
1738 * ext3_mark_inode_dirty() to userspace. So
1739 * ignore it.
1741 ext3_mark_inode_dirty(handle, inode);
1744 err = ext3_journal_stop(handle);
1745 if (ret == 0)
1746 ret = err;
1748 out:
1749 return ret;
1753 * Pages can be marked dirty completely asynchronously from ext3's journalling
1754 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
1755 * much here because ->set_page_dirty is called under VFS locks. The page is
1756 * not necessarily locked.
1758 * We cannot just dirty the page and leave attached buffers clean, because the
1759 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
1760 * or jbddirty because all the journalling code will explode.
1762 * So what we do is to mark the page "pending dirty" and next time writepage
1763 * is called, propagate that into the buffers appropriately.
1765 static int ext3_journalled_set_page_dirty(struct page *page)
1767 SetPageChecked(page);
1768 return __set_page_dirty_nobuffers(page);
1771 static const struct address_space_operations ext3_ordered_aops = {
1772 .readpage = ext3_readpage,
1773 .readpages = ext3_readpages,
1774 .writepage = ext3_ordered_writepage,
1775 .sync_page = block_sync_page,
1776 .write_begin = ext3_write_begin,
1777 .write_end = ext3_ordered_write_end,
1778 .bmap = ext3_bmap,
1779 .invalidatepage = ext3_invalidatepage,
1780 .releasepage = ext3_releasepage,
1781 .direct_IO = ext3_direct_IO,
1782 .migratepage = buffer_migrate_page,
1785 static const struct address_space_operations ext3_writeback_aops = {
1786 .readpage = ext3_readpage,
1787 .readpages = ext3_readpages,
1788 .writepage = ext3_writeback_writepage,
1789 .sync_page = block_sync_page,
1790 .write_begin = ext3_write_begin,
1791 .write_end = ext3_writeback_write_end,
1792 .bmap = ext3_bmap,
1793 .invalidatepage = ext3_invalidatepage,
1794 .releasepage = ext3_releasepage,
1795 .direct_IO = ext3_direct_IO,
1796 .migratepage = buffer_migrate_page,
1799 static const struct address_space_operations ext3_journalled_aops = {
1800 .readpage = ext3_readpage,
1801 .readpages = ext3_readpages,
1802 .writepage = ext3_journalled_writepage,
1803 .sync_page = block_sync_page,
1804 .write_begin = ext3_write_begin,
1805 .write_end = ext3_journalled_write_end,
1806 .set_page_dirty = ext3_journalled_set_page_dirty,
1807 .bmap = ext3_bmap,
1808 .invalidatepage = ext3_invalidatepage,
1809 .releasepage = ext3_releasepage,
1812 void ext3_set_aops(struct inode *inode)
1814 if (ext3_should_order_data(inode))
1815 inode->i_mapping->a_ops = &ext3_ordered_aops;
1816 else if (ext3_should_writeback_data(inode))
1817 inode->i_mapping->a_ops = &ext3_writeback_aops;
1818 else
1819 inode->i_mapping->a_ops = &ext3_journalled_aops;
1823 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1824 * up to the end of the block which corresponds to `from'.
1825 * This required during truncate. We need to physically zero the tail end
1826 * of that block so it doesn't yield old data if the file is later grown.
1828 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
1829 struct address_space *mapping, loff_t from)
1831 ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1832 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1833 unsigned blocksize, iblock, length, pos;
1834 struct inode *inode = mapping->host;
1835 struct buffer_head *bh;
1836 int err = 0;
1838 blocksize = inode->i_sb->s_blocksize;
1839 length = blocksize - (offset & (blocksize - 1));
1840 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1843 * For "nobh" option, we can only work if we don't need to
1844 * read-in the page - otherwise we create buffers to do the IO.
1846 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1847 ext3_should_writeback_data(inode) && PageUptodate(page)) {
1848 zero_user_page(page, offset, length, KM_USER0);
1849 set_page_dirty(page);
1850 goto unlock;
1853 if (!page_has_buffers(page))
1854 create_empty_buffers(page, blocksize, 0);
1856 /* Find the buffer that contains "offset" */
1857 bh = page_buffers(page);
1858 pos = blocksize;
1859 while (offset >= pos) {
1860 bh = bh->b_this_page;
1861 iblock++;
1862 pos += blocksize;
1865 err = 0;
1866 if (buffer_freed(bh)) {
1867 BUFFER_TRACE(bh, "freed: skip");
1868 goto unlock;
1871 if (!buffer_mapped(bh)) {
1872 BUFFER_TRACE(bh, "unmapped");
1873 ext3_get_block(inode, iblock, bh, 0);
1874 /* unmapped? It's a hole - nothing to do */
1875 if (!buffer_mapped(bh)) {
1876 BUFFER_TRACE(bh, "still unmapped");
1877 goto unlock;
1881 /* Ok, it's mapped. Make sure it's up-to-date */
1882 if (PageUptodate(page))
1883 set_buffer_uptodate(bh);
1885 if (!buffer_uptodate(bh)) {
1886 err = -EIO;
1887 ll_rw_block(READ, 1, &bh);
1888 wait_on_buffer(bh);
1889 /* Uhhuh. Read error. Complain and punt. */
1890 if (!buffer_uptodate(bh))
1891 goto unlock;
1894 if (ext3_should_journal_data(inode)) {
1895 BUFFER_TRACE(bh, "get write access");
1896 err = ext3_journal_get_write_access(handle, bh);
1897 if (err)
1898 goto unlock;
1901 zero_user_page(page, offset, length, KM_USER0);
1902 BUFFER_TRACE(bh, "zeroed end of block");
1904 err = 0;
1905 if (ext3_should_journal_data(inode)) {
1906 err = ext3_journal_dirty_metadata(handle, bh);
1907 } else {
1908 if (ext3_should_order_data(inode))
1909 err = ext3_journal_dirty_data(handle, bh);
1910 mark_buffer_dirty(bh);
1913 unlock:
1914 unlock_page(page);
1915 page_cache_release(page);
1916 return err;
1920 * Probably it should be a library function... search for first non-zero word
1921 * or memcmp with zero_page, whatever is better for particular architecture.
1922 * Linus?
1924 static inline int all_zeroes(__le32 *p, __le32 *q)
1926 while (p < q)
1927 if (*p++)
1928 return 0;
1929 return 1;
1933 * ext3_find_shared - find the indirect blocks for partial truncation.
1934 * @inode: inode in question
1935 * @depth: depth of the affected branch
1936 * @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1937 * @chain: place to store the pointers to partial indirect blocks
1938 * @top: place to the (detached) top of branch
1940 * This is a helper function used by ext3_truncate().
1942 * When we do truncate() we may have to clean the ends of several
1943 * indirect blocks but leave the blocks themselves alive. Block is
1944 * partially truncated if some data below the new i_size is refered
1945 * from it (and it is on the path to the first completely truncated
1946 * data block, indeed). We have to free the top of that path along
1947 * with everything to the right of the path. Since no allocation
1948 * past the truncation point is possible until ext3_truncate()
1949 * finishes, we may safely do the latter, but top of branch may
1950 * require special attention - pageout below the truncation point
1951 * might try to populate it.
1953 * We atomically detach the top of branch from the tree, store the
1954 * block number of its root in *@top, pointers to buffer_heads of
1955 * partially truncated blocks - in @chain[].bh and pointers to
1956 * their last elements that should not be removed - in
1957 * @chain[].p. Return value is the pointer to last filled element
1958 * of @chain.
1960 * The work left to caller to do the actual freeing of subtrees:
1961 * a) free the subtree starting from *@top
1962 * b) free the subtrees whose roots are stored in
1963 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1964 * c) free the subtrees growing from the inode past the @chain[0].
1965 * (no partially truncated stuff there). */
1967 static Indirect *ext3_find_shared(struct inode *inode, int depth,
1968 int offsets[4], Indirect chain[4], __le32 *top)
1970 Indirect *partial, *p;
1971 int k, err;
1973 *top = 0;
1974 /* Make k index the deepest non-null offest + 1 */
1975 for (k = depth; k > 1 && !offsets[k-1]; k--)
1977 partial = ext3_get_branch(inode, k, offsets, chain, &err);
1978 /* Writer: pointers */
1979 if (!partial)
1980 partial = chain + k-1;
1982 * If the branch acquired continuation since we've looked at it -
1983 * fine, it should all survive and (new) top doesn't belong to us.
1985 if (!partial->key && *partial->p)
1986 /* Writer: end */
1987 goto no_top;
1988 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1991 * OK, we've found the last block that must survive. The rest of our
1992 * branch should be detached before unlocking. However, if that rest
1993 * of branch is all ours and does not grow immediately from the inode
1994 * it's easier to cheat and just decrement partial->p.
1996 if (p == chain + k - 1 && p > chain) {
1997 p->p--;
1998 } else {
1999 *top = *p->p;
2000 /* Nope, don't do this in ext3. Must leave the tree intact */
2001 #if 0
2002 *p->p = 0;
2003 #endif
2005 /* Writer: end */
2007 while(partial > p) {
2008 brelse(partial->bh);
2009 partial--;
2011 no_top:
2012 return partial;
2016 * Zero a number of block pointers in either an inode or an indirect block.
2017 * If we restart the transaction we must again get write access to the
2018 * indirect block for further modification.
2020 * We release `count' blocks on disk, but (last - first) may be greater
2021 * than `count' because there can be holes in there.
2023 static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2024 struct buffer_head *bh, ext3_fsblk_t block_to_free,
2025 unsigned long count, __le32 *first, __le32 *last)
2027 __le32 *p;
2028 if (try_to_extend_transaction(handle, inode)) {
2029 if (bh) {
2030 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2031 ext3_journal_dirty_metadata(handle, bh);
2033 ext3_mark_inode_dirty(handle, inode);
2034 ext3_journal_test_restart(handle, inode);
2035 if (bh) {
2036 BUFFER_TRACE(bh, "retaking write access");
2037 ext3_journal_get_write_access(handle, bh);
2042 * Any buffers which are on the journal will be in memory. We find
2043 * them on the hash table so journal_revoke() will run journal_forget()
2044 * on them. We've already detached each block from the file, so
2045 * bforget() in journal_forget() should be safe.
2047 * AKPM: turn on bforget in journal_forget()!!!
2049 for (p = first; p < last; p++) {
2050 u32 nr = le32_to_cpu(*p);
2051 if (nr) {
2052 struct buffer_head *bh;
2054 *p = 0;
2055 bh = sb_find_get_block(inode->i_sb, nr);
2056 ext3_forget(handle, 0, inode, bh, nr);
2060 ext3_free_blocks(handle, inode, block_to_free, count);
2064 * ext3_free_data - free a list of data blocks
2065 * @handle: handle for this transaction
2066 * @inode: inode we are dealing with
2067 * @this_bh: indirect buffer_head which contains *@first and *@last
2068 * @first: array of block numbers
2069 * @last: points immediately past the end of array
2071 * We are freeing all blocks refered from that array (numbers are stored as
2072 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2074 * We accumulate contiguous runs of blocks to free. Conveniently, if these
2075 * blocks are contiguous then releasing them at one time will only affect one
2076 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2077 * actually use a lot of journal space.
2079 * @this_bh will be %NULL if @first and @last point into the inode's direct
2080 * block pointers.
2082 static void ext3_free_data(handle_t *handle, struct inode *inode,
2083 struct buffer_head *this_bh,
2084 __le32 *first, __le32 *last)
2086 ext3_fsblk_t block_to_free = 0; /* Starting block # of a run */
2087 unsigned long count = 0; /* Number of blocks in the run */
2088 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2089 corresponding to
2090 block_to_free */
2091 ext3_fsblk_t nr; /* Current block # */
2092 __le32 *p; /* Pointer into inode/ind
2093 for current block */
2094 int err;
2096 if (this_bh) { /* For indirect block */
2097 BUFFER_TRACE(this_bh, "get_write_access");
2098 err = ext3_journal_get_write_access(handle, this_bh);
2099 /* Important: if we can't update the indirect pointers
2100 * to the blocks, we can't free them. */
2101 if (err)
2102 return;
2105 for (p = first; p < last; p++) {
2106 nr = le32_to_cpu(*p);
2107 if (nr) {
2108 /* accumulate blocks to free if they're contiguous */
2109 if (count == 0) {
2110 block_to_free = nr;
2111 block_to_free_p = p;
2112 count = 1;
2113 } else if (nr == block_to_free + count) {
2114 count++;
2115 } else {
2116 ext3_clear_blocks(handle, inode, this_bh,
2117 block_to_free,
2118 count, block_to_free_p, p);
2119 block_to_free = nr;
2120 block_to_free_p = p;
2121 count = 1;
2126 if (count > 0)
2127 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
2128 count, block_to_free_p, p);
2130 if (this_bh) {
2131 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
2132 ext3_journal_dirty_metadata(handle, this_bh);
2137 * ext3_free_branches - free an array of branches
2138 * @handle: JBD handle for this transaction
2139 * @inode: inode we are dealing with
2140 * @parent_bh: the buffer_head which contains *@first and *@last
2141 * @first: array of block numbers
2142 * @last: pointer immediately past the end of array
2143 * @depth: depth of the branches to free
2145 * We are freeing all blocks refered from these branches (numbers are
2146 * stored as little-endian 32-bit) and updating @inode->i_blocks
2147 * appropriately.
2149 static void ext3_free_branches(handle_t *handle, struct inode *inode,
2150 struct buffer_head *parent_bh,
2151 __le32 *first, __le32 *last, int depth)
2153 ext3_fsblk_t nr;
2154 __le32 *p;
2156 if (is_handle_aborted(handle))
2157 return;
2159 if (depth--) {
2160 struct buffer_head *bh;
2161 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2162 p = last;
2163 while (--p >= first) {
2164 nr = le32_to_cpu(*p);
2165 if (!nr)
2166 continue; /* A hole */
2168 /* Go read the buffer for the next level down */
2169 bh = sb_bread(inode->i_sb, nr);
2172 * A read failure? Report error and clear slot
2173 * (should be rare).
2175 if (!bh) {
2176 ext3_error(inode->i_sb, "ext3_free_branches",
2177 "Read failure, inode=%lu, block="E3FSBLK,
2178 inode->i_ino, nr);
2179 continue;
2182 /* This zaps the entire block. Bottom up. */
2183 BUFFER_TRACE(bh, "free child branches");
2184 ext3_free_branches(handle, inode, bh,
2185 (__le32*)bh->b_data,
2186 (__le32*)bh->b_data + addr_per_block,
2187 depth);
2190 * We've probably journalled the indirect block several
2191 * times during the truncate. But it's no longer
2192 * needed and we now drop it from the transaction via
2193 * journal_revoke().
2195 * That's easy if it's exclusively part of this
2196 * transaction. But if it's part of the committing
2197 * transaction then journal_forget() will simply
2198 * brelse() it. That means that if the underlying
2199 * block is reallocated in ext3_get_block(),
2200 * unmap_underlying_metadata() will find this block
2201 * and will try to get rid of it. damn, damn.
2203 * If this block has already been committed to the
2204 * journal, a revoke record will be written. And
2205 * revoke records must be emitted *before* clearing
2206 * this block's bit in the bitmaps.
2208 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
2211 * Everything below this this pointer has been
2212 * released. Now let this top-of-subtree go.
2214 * We want the freeing of this indirect block to be
2215 * atomic in the journal with the updating of the
2216 * bitmap block which owns it. So make some room in
2217 * the journal.
2219 * We zero the parent pointer *after* freeing its
2220 * pointee in the bitmaps, so if extend_transaction()
2221 * for some reason fails to put the bitmap changes and
2222 * the release into the same transaction, recovery
2223 * will merely complain about releasing a free block,
2224 * rather than leaking blocks.
2226 if (is_handle_aborted(handle))
2227 return;
2228 if (try_to_extend_transaction(handle, inode)) {
2229 ext3_mark_inode_dirty(handle, inode);
2230 ext3_journal_test_restart(handle, inode);
2233 ext3_free_blocks(handle, inode, nr, 1);
2235 if (parent_bh) {
2237 * The block which we have just freed is
2238 * pointed to by an indirect block: journal it
2240 BUFFER_TRACE(parent_bh, "get_write_access");
2241 if (!ext3_journal_get_write_access(handle,
2242 parent_bh)){
2243 *p = 0;
2244 BUFFER_TRACE(parent_bh,
2245 "call ext3_journal_dirty_metadata");
2246 ext3_journal_dirty_metadata(handle,
2247 parent_bh);
2251 } else {
2252 /* We have reached the bottom of the tree. */
2253 BUFFER_TRACE(parent_bh, "free data blocks");
2254 ext3_free_data(handle, inode, parent_bh, first, last);
2259 * ext3_truncate()
2261 * We block out ext3_get_block() block instantiations across the entire
2262 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
2263 * simultaneously on behalf of the same inode.
2265 * As we work through the truncate and commmit bits of it to the journal there
2266 * is one core, guiding principle: the file's tree must always be consistent on
2267 * disk. We must be able to restart the truncate after a crash.
2269 * The file's tree may be transiently inconsistent in memory (although it
2270 * probably isn't), but whenever we close off and commit a journal transaction,
2271 * the contents of (the filesystem + the journal) must be consistent and
2272 * restartable. It's pretty simple, really: bottom up, right to left (although
2273 * left-to-right works OK too).
2275 * Note that at recovery time, journal replay occurs *before* the restart of
2276 * truncate against the orphan inode list.
2278 * The committed inode has the new, desired i_size (which is the same as
2279 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
2280 * that this inode's truncate did not complete and it will again call
2281 * ext3_truncate() to have another go. So there will be instantiated blocks
2282 * to the right of the truncation point in a crashed ext3 filesystem. But
2283 * that's fine - as long as they are linked from the inode, the post-crash
2284 * ext3_truncate() run will find them and release them.
2286 void ext3_truncate(struct inode *inode)
2288 handle_t *handle;
2289 struct ext3_inode_info *ei = EXT3_I(inode);
2290 __le32 *i_data = ei->i_data;
2291 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
2292 struct address_space *mapping = inode->i_mapping;
2293 int offsets[4];
2294 Indirect chain[4];
2295 Indirect *partial;
2296 __le32 nr = 0;
2297 int n;
2298 long last_block;
2299 unsigned blocksize = inode->i_sb->s_blocksize;
2300 struct page *page;
2302 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2303 S_ISLNK(inode->i_mode)))
2304 return;
2305 if (ext3_inode_is_fast_symlink(inode))
2306 return;
2307 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2308 return;
2311 * We have to lock the EOF page here, because lock_page() nests
2312 * outside journal_start().
2314 if ((inode->i_size & (blocksize - 1)) == 0) {
2315 /* Block boundary? Nothing to do */
2316 page = NULL;
2317 } else {
2318 page = grab_cache_page(mapping,
2319 inode->i_size >> PAGE_CACHE_SHIFT);
2320 if (!page)
2321 return;
2324 handle = start_transaction(inode);
2325 if (IS_ERR(handle)) {
2326 if (page) {
2327 clear_highpage(page);
2328 flush_dcache_page(page);
2329 unlock_page(page);
2330 page_cache_release(page);
2332 return; /* AKPM: return what? */
2335 last_block = (inode->i_size + blocksize-1)
2336 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
2338 if (page)
2339 ext3_block_truncate_page(handle, page, mapping, inode->i_size);
2341 n = ext3_block_to_path(inode, last_block, offsets, NULL);
2342 if (n == 0)
2343 goto out_stop; /* error */
2346 * OK. This truncate is going to happen. We add the inode to the
2347 * orphan list, so that if this truncate spans multiple transactions,
2348 * and we crash, we will resume the truncate when the filesystem
2349 * recovers. It also marks the inode dirty, to catch the new size.
2351 * Implication: the file must always be in a sane, consistent
2352 * truncatable state while each transaction commits.
2354 if (ext3_orphan_add(handle, inode))
2355 goto out_stop;
2358 * The orphan list entry will now protect us from any crash which
2359 * occurs before the truncate completes, so it is now safe to propagate
2360 * the new, shorter inode size (held for now in i_size) into the
2361 * on-disk inode. We do this via i_disksize, which is the value which
2362 * ext3 *really* writes onto the disk inode.
2364 ei->i_disksize = inode->i_size;
2367 * From here we block out all ext3_get_block() callers who want to
2368 * modify the block allocation tree.
2370 mutex_lock(&ei->truncate_mutex);
2372 if (n == 1) { /* direct blocks */
2373 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
2374 i_data + EXT3_NDIR_BLOCKS);
2375 goto do_indirects;
2378 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
2379 /* Kill the top of shared branch (not detached) */
2380 if (nr) {
2381 if (partial == chain) {
2382 /* Shared branch grows from the inode */
2383 ext3_free_branches(handle, inode, NULL,
2384 &nr, &nr+1, (chain+n-1) - partial);
2385 *partial->p = 0;
2387 * We mark the inode dirty prior to restart,
2388 * and prior to stop. No need for it here.
2390 } else {
2391 /* Shared branch grows from an indirect block */
2392 BUFFER_TRACE(partial->bh, "get_write_access");
2393 ext3_free_branches(handle, inode, partial->bh,
2394 partial->p,
2395 partial->p+1, (chain+n-1) - partial);
2398 /* Clear the ends of indirect blocks on the shared branch */
2399 while (partial > chain) {
2400 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
2401 (__le32*)partial->bh->b_data+addr_per_block,
2402 (chain+n-1) - partial);
2403 BUFFER_TRACE(partial->bh, "call brelse");
2404 brelse (partial->bh);
2405 partial--;
2407 do_indirects:
2408 /* Kill the remaining (whole) subtrees */
2409 switch (offsets[0]) {
2410 default:
2411 nr = i_data[EXT3_IND_BLOCK];
2412 if (nr) {
2413 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2414 i_data[EXT3_IND_BLOCK] = 0;
2416 case EXT3_IND_BLOCK:
2417 nr = i_data[EXT3_DIND_BLOCK];
2418 if (nr) {
2419 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2420 i_data[EXT3_DIND_BLOCK] = 0;
2422 case EXT3_DIND_BLOCK:
2423 nr = i_data[EXT3_TIND_BLOCK];
2424 if (nr) {
2425 ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2426 i_data[EXT3_TIND_BLOCK] = 0;
2428 case EXT3_TIND_BLOCK:
2432 ext3_discard_reservation(inode);
2434 mutex_unlock(&ei->truncate_mutex);
2435 inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
2436 ext3_mark_inode_dirty(handle, inode);
2439 * In a multi-transaction truncate, we only make the final transaction
2440 * synchronous
2442 if (IS_SYNC(inode))
2443 handle->h_sync = 1;
2444 out_stop:
2446 * If this was a simple ftruncate(), and the file will remain alive
2447 * then we need to clear up the orphan record which we created above.
2448 * However, if this was a real unlink then we were called by
2449 * ext3_delete_inode(), and we allow that function to clean up the
2450 * orphan info for us.
2452 if (inode->i_nlink)
2453 ext3_orphan_del(handle, inode);
2455 ext3_journal_stop(handle);
2458 static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
2459 unsigned long ino, struct ext3_iloc *iloc)
2461 unsigned long desc, group_desc, block_group;
2462 unsigned long offset;
2463 ext3_fsblk_t block;
2464 struct buffer_head *bh;
2465 struct ext3_group_desc * gdp;
2467 if (!ext3_valid_inum(sb, ino)) {
2469 * This error is already checked for in namei.c unless we are
2470 * looking at an NFS filehandle, in which case no error
2471 * report is needed
2473 return 0;
2476 block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
2477 if (block_group >= EXT3_SB(sb)->s_groups_count) {
2478 ext3_error(sb,"ext3_get_inode_block","group >= groups count");
2479 return 0;
2481 smp_rmb();
2482 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
2483 desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
2484 bh = EXT3_SB(sb)->s_group_desc[group_desc];
2485 if (!bh) {
2486 ext3_error (sb, "ext3_get_inode_block",
2487 "Descriptor not loaded");
2488 return 0;
2491 gdp = (struct ext3_group_desc *)bh->b_data;
2493 * Figure out the offset within the block group inode table
2495 offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
2496 EXT3_INODE_SIZE(sb);
2497 block = le32_to_cpu(gdp[desc].bg_inode_table) +
2498 (offset >> EXT3_BLOCK_SIZE_BITS(sb));
2500 iloc->block_group = block_group;
2501 iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
2502 return block;
2506 * ext3_get_inode_loc returns with an extra refcount against the inode's
2507 * underlying buffer_head on success. If 'in_mem' is true, we have all
2508 * data in memory that is needed to recreate the on-disk version of this
2509 * inode.
2511 static int __ext3_get_inode_loc(struct inode *inode,
2512 struct ext3_iloc *iloc, int in_mem)
2514 ext3_fsblk_t block;
2515 struct buffer_head *bh;
2517 block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2518 if (!block)
2519 return -EIO;
2521 bh = sb_getblk(inode->i_sb, block);
2522 if (!bh) {
2523 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2524 "unable to read inode block - "
2525 "inode=%lu, block="E3FSBLK,
2526 inode->i_ino, block);
2527 return -EIO;
2529 if (!buffer_uptodate(bh)) {
2530 lock_buffer(bh);
2531 if (buffer_uptodate(bh)) {
2532 /* someone brought it uptodate while we waited */
2533 unlock_buffer(bh);
2534 goto has_buffer;
2538 * If we have all information of the inode in memory and this
2539 * is the only valid inode in the block, we need not read the
2540 * block.
2542 if (in_mem) {
2543 struct buffer_head *bitmap_bh;
2544 struct ext3_group_desc *desc;
2545 int inodes_per_buffer;
2546 int inode_offset, i;
2547 int block_group;
2548 int start;
2550 block_group = (inode->i_ino - 1) /
2551 EXT3_INODES_PER_GROUP(inode->i_sb);
2552 inodes_per_buffer = bh->b_size /
2553 EXT3_INODE_SIZE(inode->i_sb);
2554 inode_offset = ((inode->i_ino - 1) %
2555 EXT3_INODES_PER_GROUP(inode->i_sb));
2556 start = inode_offset & ~(inodes_per_buffer - 1);
2558 /* Is the inode bitmap in cache? */
2559 desc = ext3_get_group_desc(inode->i_sb,
2560 block_group, NULL);
2561 if (!desc)
2562 goto make_io;
2564 bitmap_bh = sb_getblk(inode->i_sb,
2565 le32_to_cpu(desc->bg_inode_bitmap));
2566 if (!bitmap_bh)
2567 goto make_io;
2570 * If the inode bitmap isn't in cache then the
2571 * optimisation may end up performing two reads instead
2572 * of one, so skip it.
2574 if (!buffer_uptodate(bitmap_bh)) {
2575 brelse(bitmap_bh);
2576 goto make_io;
2578 for (i = start; i < start + inodes_per_buffer; i++) {
2579 if (i == inode_offset)
2580 continue;
2581 if (ext3_test_bit(i, bitmap_bh->b_data))
2582 break;
2584 brelse(bitmap_bh);
2585 if (i == start + inodes_per_buffer) {
2586 /* all other inodes are free, so skip I/O */
2587 memset(bh->b_data, 0, bh->b_size);
2588 set_buffer_uptodate(bh);
2589 unlock_buffer(bh);
2590 goto has_buffer;
2594 make_io:
2596 * There are other valid inodes in the buffer, this inode
2597 * has in-inode xattrs, or we don't have this inode in memory.
2598 * Read the block from disk.
2600 get_bh(bh);
2601 bh->b_end_io = end_buffer_read_sync;
2602 submit_bh(READ_META, bh);
2603 wait_on_buffer(bh);
2604 if (!buffer_uptodate(bh)) {
2605 ext3_error(inode->i_sb, "ext3_get_inode_loc",
2606 "unable to read inode block - "
2607 "inode=%lu, block="E3FSBLK,
2608 inode->i_ino, block);
2609 brelse(bh);
2610 return -EIO;
2613 has_buffer:
2614 iloc->bh = bh;
2615 return 0;
2618 int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
2620 /* We have all inode data except xattrs in memory here. */
2621 return __ext3_get_inode_loc(inode, iloc,
2622 !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
2625 void ext3_set_inode_flags(struct inode *inode)
2627 unsigned int flags = EXT3_I(inode)->i_flags;
2629 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2630 if (flags & EXT3_SYNC_FL)
2631 inode->i_flags |= S_SYNC;
2632 if (flags & EXT3_APPEND_FL)
2633 inode->i_flags |= S_APPEND;
2634 if (flags & EXT3_IMMUTABLE_FL)
2635 inode->i_flags |= S_IMMUTABLE;
2636 if (flags & EXT3_NOATIME_FL)
2637 inode->i_flags |= S_NOATIME;
2638 if (flags & EXT3_DIRSYNC_FL)
2639 inode->i_flags |= S_DIRSYNC;
2642 /* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
2643 void ext3_get_inode_flags(struct ext3_inode_info *ei)
2645 unsigned int flags = ei->vfs_inode.i_flags;
2647 ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
2648 EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
2649 if (flags & S_SYNC)
2650 ei->i_flags |= EXT3_SYNC_FL;
2651 if (flags & S_APPEND)
2652 ei->i_flags |= EXT3_APPEND_FL;
2653 if (flags & S_IMMUTABLE)
2654 ei->i_flags |= EXT3_IMMUTABLE_FL;
2655 if (flags & S_NOATIME)
2656 ei->i_flags |= EXT3_NOATIME_FL;
2657 if (flags & S_DIRSYNC)
2658 ei->i_flags |= EXT3_DIRSYNC_FL;
2661 void ext3_read_inode(struct inode * inode)
2663 struct ext3_iloc iloc;
2664 struct ext3_inode *raw_inode;
2665 struct ext3_inode_info *ei = EXT3_I(inode);
2666 struct buffer_head *bh;
2667 int block;
2669 #ifdef CONFIG_EXT3_FS_POSIX_ACL
2670 ei->i_acl = EXT3_ACL_NOT_CACHED;
2671 ei->i_default_acl = EXT3_ACL_NOT_CACHED;
2672 #endif
2673 ei->i_block_alloc_info = NULL;
2675 if (__ext3_get_inode_loc(inode, &iloc, 0))
2676 goto bad_inode;
2677 bh = iloc.bh;
2678 raw_inode = ext3_raw_inode(&iloc);
2679 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2680 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2681 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2682 if(!(test_opt (inode->i_sb, NO_UID32))) {
2683 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2684 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2686 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2687 inode->i_size = le32_to_cpu(raw_inode->i_size);
2688 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
2689 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
2690 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
2691 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
2693 ei->i_state = 0;
2694 ei->i_dir_start_lookup = 0;
2695 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2696 /* We now have enough fields to check if the inode was active or not.
2697 * This is needed because nfsd might try to access dead inodes
2698 * the test is that same one that e2fsck uses
2699 * NeilBrown 1999oct15
2701 if (inode->i_nlink == 0) {
2702 if (inode->i_mode == 0 ||
2703 !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
2704 /* this inode is deleted */
2705 brelse (bh);
2706 goto bad_inode;
2708 /* The only unlinked inodes we let through here have
2709 * valid i_mode and are being read by the orphan
2710 * recovery code: that's fine, we're about to complete
2711 * the process of deleting those. */
2713 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2714 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2715 #ifdef EXT3_FRAGMENTS
2716 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
2717 ei->i_frag_no = raw_inode->i_frag;
2718 ei->i_frag_size = raw_inode->i_fsize;
2719 #endif
2720 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2721 if (!S_ISREG(inode->i_mode)) {
2722 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2723 } else {
2724 inode->i_size |=
2725 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2727 ei->i_disksize = inode->i_size;
2728 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2729 ei->i_block_group = iloc.block_group;
2731 * NOTE! The in-memory inode i_data array is in little-endian order
2732 * even on big-endian machines: we do NOT byteswap the block numbers!
2734 for (block = 0; block < EXT3_N_BLOCKS; block++)
2735 ei->i_data[block] = raw_inode->i_block[block];
2736 INIT_LIST_HEAD(&ei->i_orphan);
2738 if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
2739 EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
2741 * When mke2fs creates big inodes it does not zero out
2742 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
2743 * so ignore those first few inodes.
2745 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2746 if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2747 EXT3_INODE_SIZE(inode->i_sb)) {
2748 brelse (bh);
2749 goto bad_inode;
2751 if (ei->i_extra_isize == 0) {
2752 /* The extra space is currently unused. Use it. */
2753 ei->i_extra_isize = sizeof(struct ext3_inode) -
2754 EXT3_GOOD_OLD_INODE_SIZE;
2755 } else {
2756 __le32 *magic = (void *)raw_inode +
2757 EXT3_GOOD_OLD_INODE_SIZE +
2758 ei->i_extra_isize;
2759 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
2760 ei->i_state |= EXT3_STATE_XATTR;
2762 } else
2763 ei->i_extra_isize = 0;
2765 if (S_ISREG(inode->i_mode)) {
2766 inode->i_op = &ext3_file_inode_operations;
2767 inode->i_fop = &ext3_file_operations;
2768 ext3_set_aops(inode);
2769 } else if (S_ISDIR(inode->i_mode)) {
2770 inode->i_op = &ext3_dir_inode_operations;
2771 inode->i_fop = &ext3_dir_operations;
2772 } else if (S_ISLNK(inode->i_mode)) {
2773 if (ext3_inode_is_fast_symlink(inode))
2774 inode->i_op = &ext3_fast_symlink_inode_operations;
2775 else {
2776 inode->i_op = &ext3_symlink_inode_operations;
2777 ext3_set_aops(inode);
2779 } else {
2780 inode->i_op = &ext3_special_inode_operations;
2781 if (raw_inode->i_block[0])
2782 init_special_inode(inode, inode->i_mode,
2783 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2784 else
2785 init_special_inode(inode, inode->i_mode,
2786 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2788 brelse (iloc.bh);
2789 ext3_set_inode_flags(inode);
2790 return;
2792 bad_inode:
2793 make_bad_inode(inode);
2794 return;
2798 * Post the struct inode info into an on-disk inode location in the
2799 * buffer-cache. This gobbles the caller's reference to the
2800 * buffer_head in the inode location struct.
2802 * The caller must have write access to iloc->bh.
2804 static int ext3_do_update_inode(handle_t *handle,
2805 struct inode *inode,
2806 struct ext3_iloc *iloc)
2808 struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
2809 struct ext3_inode_info *ei = EXT3_I(inode);
2810 struct buffer_head *bh = iloc->bh;
2811 int err = 0, rc, block;
2813 /* For fields not not tracking in the in-memory inode,
2814 * initialise them to zero for new inodes. */
2815 if (ei->i_state & EXT3_STATE_NEW)
2816 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
2818 ext3_get_inode_flags(ei);
2819 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2820 if(!(test_opt(inode->i_sb, NO_UID32))) {
2821 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2822 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2824 * Fix up interoperability with old kernels. Otherwise, old inodes get
2825 * re-used with the upper 16 bits of the uid/gid intact
2827 if(!ei->i_dtime) {
2828 raw_inode->i_uid_high =
2829 cpu_to_le16(high_16_bits(inode->i_uid));
2830 raw_inode->i_gid_high =
2831 cpu_to_le16(high_16_bits(inode->i_gid));
2832 } else {
2833 raw_inode->i_uid_high = 0;
2834 raw_inode->i_gid_high = 0;
2836 } else {
2837 raw_inode->i_uid_low =
2838 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2839 raw_inode->i_gid_low =
2840 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2841 raw_inode->i_uid_high = 0;
2842 raw_inode->i_gid_high = 0;
2844 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2845 raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2846 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
2847 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
2848 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
2849 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2850 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2851 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2852 #ifdef EXT3_FRAGMENTS
2853 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
2854 raw_inode->i_frag = ei->i_frag_no;
2855 raw_inode->i_fsize = ei->i_frag_size;
2856 #endif
2857 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2858 if (!S_ISREG(inode->i_mode)) {
2859 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2860 } else {
2861 raw_inode->i_size_high =
2862 cpu_to_le32(ei->i_disksize >> 32);
2863 if (ei->i_disksize > 0x7fffffffULL) {
2864 struct super_block *sb = inode->i_sb;
2865 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2866 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2867 EXT3_SB(sb)->s_es->s_rev_level ==
2868 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2869 /* If this is the first large file
2870 * created, add a flag to the superblock.
2872 err = ext3_journal_get_write_access(handle,
2873 EXT3_SB(sb)->s_sbh);
2874 if (err)
2875 goto out_brelse;
2876 ext3_update_dynamic_rev(sb);
2877 EXT3_SET_RO_COMPAT_FEATURE(sb,
2878 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2879 sb->s_dirt = 1;
2880 handle->h_sync = 1;
2881 err = ext3_journal_dirty_metadata(handle,
2882 EXT3_SB(sb)->s_sbh);
2886 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2887 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2888 if (old_valid_dev(inode->i_rdev)) {
2889 raw_inode->i_block[0] =
2890 cpu_to_le32(old_encode_dev(inode->i_rdev));
2891 raw_inode->i_block[1] = 0;
2892 } else {
2893 raw_inode->i_block[0] = 0;
2894 raw_inode->i_block[1] =
2895 cpu_to_le32(new_encode_dev(inode->i_rdev));
2896 raw_inode->i_block[2] = 0;
2898 } else for (block = 0; block < EXT3_N_BLOCKS; block++)
2899 raw_inode->i_block[block] = ei->i_data[block];
2901 if (ei->i_extra_isize)
2902 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2904 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2905 rc = ext3_journal_dirty_metadata(handle, bh);
2906 if (!err)
2907 err = rc;
2908 ei->i_state &= ~EXT3_STATE_NEW;
2910 out_brelse:
2911 brelse (bh);
2912 ext3_std_error(inode->i_sb, err);
2913 return err;
2917 * ext3_write_inode()
2919 * We are called from a few places:
2921 * - Within generic_file_write() for O_SYNC files.
2922 * Here, there will be no transaction running. We wait for any running
2923 * trasnaction to commit.
2925 * - Within sys_sync(), kupdate and such.
2926 * We wait on commit, if tol to.
2928 * - Within prune_icache() (PF_MEMALLOC == true)
2929 * Here we simply return. We can't afford to block kswapd on the
2930 * journal commit.
2932 * In all cases it is actually safe for us to return without doing anything,
2933 * because the inode has been copied into a raw inode buffer in
2934 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
2935 * knfsd.
2937 * Note that we are absolutely dependent upon all inode dirtiers doing the
2938 * right thing: they *must* call mark_inode_dirty() after dirtying info in
2939 * which we are interested.
2941 * It would be a bug for them to not do this. The code:
2943 * mark_inode_dirty(inode)
2944 * stuff();
2945 * inode->i_size = expr;
2947 * is in error because a kswapd-driven write_inode() could occur while
2948 * `stuff()' is running, and the new i_size will be lost. Plus the inode
2949 * will no longer be on the superblock's dirty inode list.
2951 int ext3_write_inode(struct inode *inode, int wait)
2953 if (current->flags & PF_MEMALLOC)
2954 return 0;
2956 if (ext3_journal_current_handle()) {
2957 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
2958 dump_stack();
2959 return -EIO;
2962 if (!wait)
2963 return 0;
2965 return ext3_force_commit(inode->i_sb);
2969 * ext3_setattr()
2971 * Called from notify_change.
2973 * We want to trap VFS attempts to truncate the file as soon as
2974 * possible. In particular, we want to make sure that when the VFS
2975 * shrinks i_size, we put the inode on the orphan list and modify
2976 * i_disksize immediately, so that during the subsequent flushing of
2977 * dirty pages and freeing of disk blocks, we can guarantee that any
2978 * commit will leave the blocks being flushed in an unused state on
2979 * disk. (On recovery, the inode will get truncated and the blocks will
2980 * be freed, so we have a strong guarantee that no future commit will
2981 * leave these blocks visible to the user.)
2983 * Called with inode->sem down.
2985 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2987 struct inode *inode = dentry->d_inode;
2988 int error, rc = 0;
2989 const unsigned int ia_valid = attr->ia_valid;
2991 error = inode_change_ok(inode, attr);
2992 if (error)
2993 return error;
2995 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2996 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2997 handle_t *handle;
2999 /* (user+group)*(old+new) structure, inode write (sb,
3000 * inode block, ? - but truncate inode update has it) */
3001 handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+
3002 EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3003 if (IS_ERR(handle)) {
3004 error = PTR_ERR(handle);
3005 goto err_out;
3007 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3008 if (error) {
3009 ext3_journal_stop(handle);
3010 return error;
3012 /* Update corresponding info in inode so that everything is in
3013 * one transaction */
3014 if (attr->ia_valid & ATTR_UID)
3015 inode->i_uid = attr->ia_uid;
3016 if (attr->ia_valid & ATTR_GID)
3017 inode->i_gid = attr->ia_gid;
3018 error = ext3_mark_inode_dirty(handle, inode);
3019 ext3_journal_stop(handle);
3022 if (S_ISREG(inode->i_mode) &&
3023 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3024 handle_t *handle;
3026 handle = ext3_journal_start(inode, 3);
3027 if (IS_ERR(handle)) {
3028 error = PTR_ERR(handle);
3029 goto err_out;
3032 error = ext3_orphan_add(handle, inode);
3033 EXT3_I(inode)->i_disksize = attr->ia_size;
3034 rc = ext3_mark_inode_dirty(handle, inode);
3035 if (!error)
3036 error = rc;
3037 ext3_journal_stop(handle);
3040 rc = inode_setattr(inode, attr);
3042 /* If inode_setattr's call to ext3_truncate failed to get a
3043 * transaction handle at all, we need to clean up the in-core
3044 * orphan list manually. */
3045 if (inode->i_nlink)
3046 ext3_orphan_del(NULL, inode);
3048 if (!rc && (ia_valid & ATTR_MODE))
3049 rc = ext3_acl_chmod(inode);
3051 err_out:
3052 ext3_std_error(inode->i_sb, error);
3053 if (!error)
3054 error = rc;
3055 return error;
3060 * How many blocks doth make a writepage()?
3062 * With N blocks per page, it may be:
3063 * N data blocks
3064 * 2 indirect block
3065 * 2 dindirect
3066 * 1 tindirect
3067 * N+5 bitmap blocks (from the above)
3068 * N+5 group descriptor summary blocks
3069 * 1 inode block
3070 * 1 superblock.
3071 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
3073 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
3075 * With ordered or writeback data it's the same, less the N data blocks.
3077 * If the inode's direct blocks can hold an integral number of pages then a
3078 * page cannot straddle two indirect blocks, and we can only touch one indirect
3079 * and dindirect block, and the "5" above becomes "3".
3081 * This still overestimates under most circumstances. If we were to pass the
3082 * start and end offsets in here as well we could do block_to_path() on each
3083 * block and work out the exact number of indirects which are touched. Pah.
3086 static int ext3_writepage_trans_blocks(struct inode *inode)
3088 int bpp = ext3_journal_blocks_per_page(inode);
3089 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
3090 int ret;
3092 if (ext3_should_journal_data(inode))
3093 ret = 3 * (bpp + indirects) + 2;
3094 else
3095 ret = 2 * (bpp + indirects) + 2;
3097 #ifdef CONFIG_QUOTA
3098 /* We know that structure was already allocated during DQUOT_INIT so
3099 * we will be updating only the data blocks + inodes */
3100 ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb);
3101 #endif
3103 return ret;
3107 * The caller must have previously called ext3_reserve_inode_write().
3108 * Give this, we know that the caller already has write access to iloc->bh.
3110 int ext3_mark_iloc_dirty(handle_t *handle,
3111 struct inode *inode, struct ext3_iloc *iloc)
3113 int err = 0;
3115 /* the do_update_inode consumes one bh->b_count */
3116 get_bh(iloc->bh);
3118 /* ext3_do_update_inode() does journal_dirty_metadata */
3119 err = ext3_do_update_inode(handle, inode, iloc);
3120 put_bh(iloc->bh);
3121 return err;
3125 * On success, We end up with an outstanding reference count against
3126 * iloc->bh. This _must_ be cleaned up later.
3130 ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
3131 struct ext3_iloc *iloc)
3133 int err = 0;
3134 if (handle) {
3135 err = ext3_get_inode_loc(inode, iloc);
3136 if (!err) {
3137 BUFFER_TRACE(iloc->bh, "get_write_access");
3138 err = ext3_journal_get_write_access(handle, iloc->bh);
3139 if (err) {
3140 brelse(iloc->bh);
3141 iloc->bh = NULL;
3145 ext3_std_error(inode->i_sb, err);
3146 return err;
3150 * What we do here is to mark the in-core inode as clean with respect to inode
3151 * dirtiness (it may still be data-dirty).
3152 * This means that the in-core inode may be reaped by prune_icache
3153 * without having to perform any I/O. This is a very good thing,
3154 * because *any* task may call prune_icache - even ones which
3155 * have a transaction open against a different journal.
3157 * Is this cheating? Not really. Sure, we haven't written the
3158 * inode out, but prune_icache isn't a user-visible syncing function.
3159 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3160 * we start and wait on commits.
3162 * Is this efficient/effective? Well, we're being nice to the system
3163 * by cleaning up our inodes proactively so they can be reaped
3164 * without I/O. But we are potentially leaving up to five seconds'
3165 * worth of inodes floating about which prune_icache wants us to
3166 * write out. One way to fix that would be to get prune_icache()
3167 * to do a write_super() to free up some memory. It has the desired
3168 * effect.
3170 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
3172 struct ext3_iloc iloc;
3173 int err;
3175 might_sleep();
3176 err = ext3_reserve_inode_write(handle, inode, &iloc);
3177 if (!err)
3178 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
3179 return err;
3183 * ext3_dirty_inode() is called from __mark_inode_dirty()
3185 * We're really interested in the case where a file is being extended.
3186 * i_size has been changed by generic_commit_write() and we thus need
3187 * to include the updated inode in the current transaction.
3189 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3190 * are allocated to the file.
3192 * If the inode is marked synchronous, we don't honour that here - doing
3193 * so would cause a commit on atime updates, which we don't bother doing.
3194 * We handle synchronous inodes at the highest possible level.
3196 void ext3_dirty_inode(struct inode *inode)
3198 handle_t *current_handle = ext3_journal_current_handle();
3199 handle_t *handle;
3201 handle = ext3_journal_start(inode, 2);
3202 if (IS_ERR(handle))
3203 goto out;
3204 if (current_handle &&
3205 current_handle->h_transaction != handle->h_transaction) {
3206 /* This task has a transaction open against a different fs */
3207 printk(KERN_EMERG "%s: transactions do not match!\n",
3208 __FUNCTION__);
3209 } else {
3210 jbd_debug(5, "marking dirty. outer handle=%p\n",
3211 current_handle);
3212 ext3_mark_inode_dirty(handle, inode);
3214 ext3_journal_stop(handle);
3215 out:
3216 return;
3219 #if 0
3221 * Bind an inode's backing buffer_head into this transaction, to prevent
3222 * it from being flushed to disk early. Unlike
3223 * ext3_reserve_inode_write, this leaves behind no bh reference and
3224 * returns no iloc structure, so the caller needs to repeat the iloc
3225 * lookup to mark the inode dirty later.
3227 static int ext3_pin_inode(handle_t *handle, struct inode *inode)
3229 struct ext3_iloc iloc;
3231 int err = 0;
3232 if (handle) {
3233 err = ext3_get_inode_loc(inode, &iloc);
3234 if (!err) {
3235 BUFFER_TRACE(iloc.bh, "get_write_access");
3236 err = journal_get_write_access(handle, iloc.bh);
3237 if (!err)
3238 err = ext3_journal_dirty_metadata(handle,
3239 iloc.bh);
3240 brelse(iloc.bh);
3243 ext3_std_error(inode->i_sb, err);
3244 return err;
3246 #endif
3248 int ext3_change_inode_journal_flag(struct inode *inode, int val)
3250 journal_t *journal;
3251 handle_t *handle;
3252 int err;
3255 * We have to be very careful here: changing a data block's
3256 * journaling status dynamically is dangerous. If we write a
3257 * data block to the journal, change the status and then delete
3258 * that block, we risk forgetting to revoke the old log record
3259 * from the journal and so a subsequent replay can corrupt data.
3260 * So, first we make sure that the journal is empty and that
3261 * nobody is changing anything.
3264 journal = EXT3_JOURNAL(inode);
3265 if (is_journal_aborted(journal))
3266 return -EROFS;
3268 journal_lock_updates(journal);
3269 journal_flush(journal);
3272 * OK, there are no updates running now, and all cached data is
3273 * synced to disk. We are now in a completely consistent state
3274 * which doesn't have anything in the journal, and we know that
3275 * no filesystem updates are running, so it is safe to modify
3276 * the inode's in-core data-journaling state flag now.
3279 if (val)
3280 EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
3281 else
3282 EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
3283 ext3_set_aops(inode);
3285 journal_unlock_updates(journal);
3287 /* Finally we can mark the inode as dirty. */
3289 handle = ext3_journal_start(inode, 1);
3290 if (IS_ERR(handle))
3291 return PTR_ERR(handle);
3293 err = ext3_mark_inode_dirty(handle, inode);
3294 handle->h_sync = 1;
3295 ext3_journal_stop(handle);
3296 ext3_std_error(inode->i_sb, err);
3298 return err;