Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / fs / ext4 / inode.c
blob9742de55b3e082cb19c82f0535340d050ed6fed3
1 /*
2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext4_jbd2.h>
29 #include <linux/jbd2.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include "xattr.h"
40 #include "acl.h"
43 * Test whether an inode is a fast symlink.
45 static int ext4_inode_is_fast_symlink(struct inode *inode)
47 int ea_blocks = EXT4_I(inode)->i_file_acl ?
48 (inode->i_sb->s_blocksize >> 9) : 0;
50 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
54 * The ext4 forget function must perform a revoke if we are freeing data
55 * which has been journaled. Metadata (eg. indirect blocks) must be
56 * revoked in all cases.
58 * "bh" may be NULL: a metadata block may have been freed from memory
59 * but there may still be a record of it in the journal, and that record
60 * still needs to be revoked.
62 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63 struct buffer_head *bh, ext4_fsblk_t blocknr)
65 int err;
67 might_sleep();
69 BUFFER_TRACE(bh, "enter");
71 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72 "data mode %lx\n",
73 bh, is_metadata, inode->i_mode,
74 test_opt(inode->i_sb, DATA_FLAGS));
76 /* Never use the revoke function if we are doing full data
77 * journaling: there is no need to, and a V1 superblock won't
78 * support it. Otherwise, only skip the revoke on un-journaled
79 * data blocks. */
81 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82 (!is_metadata && !ext4_should_journal_data(inode))) {
83 if (bh) {
84 BUFFER_TRACE(bh, "call jbd2_journal_forget");
85 return ext4_journal_forget(handle, bh);
87 return 0;
91 * data!=journal && (is_metadata || should_journal_data(inode))
93 BUFFER_TRACE(bh, "call ext4_journal_revoke");
94 err = ext4_journal_revoke(handle, blocknr, bh);
95 if (err)
96 ext4_abort(inode->i_sb, __FUNCTION__,
97 "error %d when attempting revoke", err);
98 BUFFER_TRACE(bh, "exit");
99 return err;
103 * Work out how many blocks we need to proceed with the next chunk of a
104 * truncate transaction.
106 static unsigned long blocks_for_truncate(struct inode *inode)
108 ext4_lblk_t needed;
110 needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
112 /* Give ourselves just enough room to cope with inodes in which
113 * i_blocks is corrupt: we've seen disk corruptions in the past
114 * which resulted in random data in an inode which looked enough
115 * like a regular file for ext4 to try to delete it. Things
116 * will go a bit crazy if that happens, but at least we should
117 * try not to panic the whole kernel. */
118 if (needed < 2)
119 needed = 2;
121 /* But we need to bound the transaction so we don't overflow the
122 * journal. */
123 if (needed > EXT4_MAX_TRANS_DATA)
124 needed = EXT4_MAX_TRANS_DATA;
126 return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
130 * Truncate transactions can be complex and absolutely huge. So we need to
131 * be able to restart the transaction at a conventient checkpoint to make
132 * sure we don't overflow the journal.
134 * start_transaction gets us a new handle for a truncate transaction,
135 * and extend_transaction tries to extend the existing one a bit. If
136 * extend fails, we need to propagate the failure up and restart the
137 * transaction in the top-level truncate loop. --sct
139 static handle_t *start_transaction(struct inode *inode)
141 handle_t *result;
143 result = ext4_journal_start(inode, blocks_for_truncate(inode));
144 if (!IS_ERR(result))
145 return result;
147 ext4_std_error(inode->i_sb, PTR_ERR(result));
148 return result;
152 * Try to extend this transaction for the purposes of truncation.
154 * Returns 0 if we managed to create more room. If we can't create more
155 * room, and the transaction must be restarted we return 1.
157 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
159 if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160 return 0;
161 if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162 return 0;
163 return 1;
167 * Restart the transaction associated with *handle. This does a commit,
168 * so before we call here everything must be consistently dirtied against
169 * this transaction.
171 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
173 jbd_debug(2, "restarting handle %p\n", handle);
174 return ext4_journal_restart(handle, blocks_for_truncate(inode));
178 * Called at the last iput() if i_nlink is zero.
180 void ext4_delete_inode (struct inode * inode)
182 handle_t *handle;
184 truncate_inode_pages(&inode->i_data, 0);
186 if (is_bad_inode(inode))
187 goto no_delete;
189 handle = start_transaction(inode);
190 if (IS_ERR(handle)) {
192 * If we're going to skip the normal cleanup, we still need to
193 * make sure that the in-core orphan linked list is properly
194 * cleaned up.
196 ext4_orphan_del(NULL, inode);
197 goto no_delete;
200 if (IS_SYNC(inode))
201 handle->h_sync = 1;
202 inode->i_size = 0;
203 if (inode->i_blocks)
204 ext4_truncate(inode);
206 * Kill off the orphan record which ext4_truncate created.
207 * AKPM: I think this can be inside the above `if'.
208 * Note that ext4_orphan_del() has to be able to cope with the
209 * deletion of a non-existent orphan - this is because we don't
210 * know if ext4_truncate() actually created an orphan record.
211 * (Well, we could do this if we need to, but heck - it works)
213 ext4_orphan_del(handle, inode);
214 EXT4_I(inode)->i_dtime = get_seconds();
217 * One subtle ordering requirement: if anything has gone wrong
218 * (transaction abort, IO errors, whatever), then we can still
219 * do these next steps (the fs will already have been marked as
220 * having errors), but we can't free the inode if the mark_dirty
221 * fails.
223 if (ext4_mark_inode_dirty(handle, inode))
224 /* If that failed, just do the required in-core inode clear. */
225 clear_inode(inode);
226 else
227 ext4_free_inode(handle, inode);
228 ext4_journal_stop(handle);
229 return;
230 no_delete:
231 clear_inode(inode); /* We must guarantee clearing of inode... */
234 typedef struct {
235 __le32 *p;
236 __le32 key;
237 struct buffer_head *bh;
238 } Indirect;
240 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
242 p->key = *(p->p = v);
243 p->bh = bh;
247 * ext4_block_to_path - parse the block number into array of offsets
248 * @inode: inode in question (we are only interested in its superblock)
249 * @i_block: block number to be parsed
250 * @offsets: array to store the offsets in
251 * @boundary: set this non-zero if the referred-to block is likely to be
252 * followed (on disk) by an indirect block.
254 * To store the locations of file's data ext4 uses a data structure common
255 * for UNIX filesystems - tree of pointers anchored in the inode, with
256 * data blocks at leaves and indirect blocks in intermediate nodes.
257 * This function translates the block number into path in that tree -
258 * return value is the path length and @offsets[n] is the offset of
259 * pointer to (n+1)th node in the nth one. If @block is out of range
260 * (negative or too large) warning is printed and zero returned.
262 * Note: function doesn't find node addresses, so no IO is needed. All
263 * we need to know is the capacity of indirect blocks (taken from the
264 * inode->i_sb).
268 * Portability note: the last comparison (check that we fit into triple
269 * indirect block) is spelled differently, because otherwise on an
270 * architecture with 32-bit longs and 8Kb pages we might get into trouble
271 * if our filesystem had 8Kb blocks. We might use long long, but that would
272 * kill us on x86. Oh, well, at least the sign propagation does not matter -
273 * i_block would have to be negative in the very beginning, so we would not
274 * get there at all.
277 static int ext4_block_to_path(struct inode *inode,
278 ext4_lblk_t i_block,
279 ext4_lblk_t offsets[4], int *boundary)
281 int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
282 int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
283 const long direct_blocks = EXT4_NDIR_BLOCKS,
284 indirect_blocks = ptrs,
285 double_blocks = (1 << (ptrs_bits * 2));
286 int n = 0;
287 int final = 0;
289 if (i_block < 0) {
290 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
291 } else if (i_block < direct_blocks) {
292 offsets[n++] = i_block;
293 final = direct_blocks;
294 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
295 offsets[n++] = EXT4_IND_BLOCK;
296 offsets[n++] = i_block;
297 final = ptrs;
298 } else if ((i_block -= indirect_blocks) < double_blocks) {
299 offsets[n++] = EXT4_DIND_BLOCK;
300 offsets[n++] = i_block >> ptrs_bits;
301 offsets[n++] = i_block & (ptrs - 1);
302 final = ptrs;
303 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
304 offsets[n++] = EXT4_TIND_BLOCK;
305 offsets[n++] = i_block >> (ptrs_bits * 2);
306 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
307 offsets[n++] = i_block & (ptrs - 1);
308 final = ptrs;
309 } else {
310 ext4_warning(inode->i_sb, "ext4_block_to_path",
311 "block %lu > max",
312 i_block + direct_blocks +
313 indirect_blocks + double_blocks);
315 if (boundary)
316 *boundary = final - 1 - (i_block & (ptrs - 1));
317 return n;
321 * ext4_get_branch - read the chain of indirect blocks leading to data
322 * @inode: inode in question
323 * @depth: depth of the chain (1 - direct pointer, etc.)
324 * @offsets: offsets of pointers in inode/indirect blocks
325 * @chain: place to store the result
326 * @err: here we store the error value
328 * Function fills the array of triples <key, p, bh> and returns %NULL
329 * if everything went OK or the pointer to the last filled triple
330 * (incomplete one) otherwise. Upon the return chain[i].key contains
331 * the number of (i+1)-th block in the chain (as it is stored in memory,
332 * i.e. little-endian 32-bit), chain[i].p contains the address of that
333 * number (it points into struct inode for i==0 and into the bh->b_data
334 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
335 * block for i>0 and NULL for i==0. In other words, it holds the block
336 * numbers of the chain, addresses they were taken from (and where we can
337 * verify that chain did not change) and buffer_heads hosting these
338 * numbers.
340 * Function stops when it stumbles upon zero pointer (absent block)
341 * (pointer to last triple returned, *@err == 0)
342 * or when it gets an IO error reading an indirect block
343 * (ditto, *@err == -EIO)
344 * or when it reads all @depth-1 indirect blocks successfully and finds
345 * the whole chain, all way to the data (returns %NULL, *err == 0).
347 * Need to be called with
348 * down_read(&EXT4_I(inode)->i_data_sem)
350 static Indirect *ext4_get_branch(struct inode *inode, int depth,
351 ext4_lblk_t *offsets,
352 Indirect chain[4], int *err)
354 struct super_block *sb = inode->i_sb;
355 Indirect *p = chain;
356 struct buffer_head *bh;
358 *err = 0;
359 /* i_data is not going away, no lock needed */
360 add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
361 if (!p->key)
362 goto no_block;
363 while (--depth) {
364 bh = sb_bread(sb, le32_to_cpu(p->key));
365 if (!bh)
366 goto failure;
367 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
368 /* Reader: end */
369 if (!p->key)
370 goto no_block;
372 return NULL;
374 failure:
375 *err = -EIO;
376 no_block:
377 return p;
381 * ext4_find_near - find a place for allocation with sufficient locality
382 * @inode: owner
383 * @ind: descriptor of indirect block.
385 * This function returns the prefered place for block allocation.
386 * It is used when heuristic for sequential allocation fails.
387 * Rules are:
388 * + if there is a block to the left of our position - allocate near it.
389 * + if pointer will live in indirect block - allocate near that block.
390 * + if pointer will live in inode - allocate in the same
391 * cylinder group.
393 * In the latter case we colour the starting block by the callers PID to
394 * prevent it from clashing with concurrent allocations for a different inode
395 * in the same block group. The PID is used here so that functionally related
396 * files will be close-by on-disk.
398 * Caller must make sure that @ind is valid and will stay that way.
400 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
402 struct ext4_inode_info *ei = EXT4_I(inode);
403 __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
404 __le32 *p;
405 ext4_fsblk_t bg_start;
406 <<<<<<< HEAD:fs/ext4/inode.c
407 =======
408 ext4_fsblk_t last_block;
409 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
410 ext4_grpblk_t colour;
412 /* Try to find previous block */
413 for (p = ind->p - 1; p >= start; p--) {
414 if (*p)
415 return le32_to_cpu(*p);
418 /* No such thing, so let's try location of indirect block */
419 if (ind->bh)
420 return ind->bh->b_blocknr;
423 * It is going to be referred to from the inode itself? OK, just put it
424 * into the same cylinder group then.
426 bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
427 <<<<<<< HEAD:fs/ext4/inode.c
428 colour = (current->pid % 16) *
429 =======
430 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
432 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
433 colour = (current->pid % 16) *
434 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
435 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
436 <<<<<<< HEAD:fs/ext4/inode.c
437 =======
438 else
439 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
440 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
441 return bg_start + colour;
445 * ext4_find_goal - find a prefered place for allocation.
446 * @inode: owner
447 * @block: block we want
448 * @partial: pointer to the last triple within a chain
450 * Normally this function find the prefered place for block allocation,
451 * returns it.
453 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
454 Indirect *partial)
456 struct ext4_block_alloc_info *block_i;
458 block_i = EXT4_I(inode)->i_block_alloc_info;
461 * try the heuristic for sequential allocation,
462 * failing that at least try to get decent locality.
464 if (block_i && (block == block_i->last_alloc_logical_block + 1)
465 && (block_i->last_alloc_physical_block != 0)) {
466 return block_i->last_alloc_physical_block + 1;
469 return ext4_find_near(inode, partial);
473 * ext4_blks_to_allocate: Look up the block map and count the number
474 * of direct blocks need to be allocated for the given branch.
476 * @branch: chain of indirect blocks
477 * @k: number of blocks need for indirect blocks
478 * @blks: number of data blocks to be mapped.
479 * @blocks_to_boundary: the offset in the indirect block
481 * return the total number of blocks to be allocate, including the
482 * direct and indirect blocks.
484 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
485 int blocks_to_boundary)
487 unsigned long count = 0;
490 * Simple case, [t,d]Indirect block(s) has not allocated yet
491 * then it's clear blocks on that path have not allocated
493 if (k > 0) {
494 /* right now we don't handle cross boundary allocation */
495 if (blks < blocks_to_boundary + 1)
496 count += blks;
497 else
498 count += blocks_to_boundary + 1;
499 return count;
502 count++;
503 while (count < blks && count <= blocks_to_boundary &&
504 le32_to_cpu(*(branch[0].p + count)) == 0) {
505 count++;
507 return count;
511 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
512 * @indirect_blks: the number of blocks need to allocate for indirect
513 * blocks
515 * @new_blocks: on return it will store the new block numbers for
516 * the indirect blocks(if needed) and the first direct block,
517 * @blks: on return it will store the total number of allocated
518 * direct blocks
520 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
521 ext4_fsblk_t goal, int indirect_blks, int blks,
522 ext4_fsblk_t new_blocks[4], int *err)
524 int target, i;
525 unsigned long count = 0;
526 int index = 0;
527 ext4_fsblk_t current_block = 0;
528 int ret = 0;
531 * Here we try to allocate the requested multiple blocks at once,
532 * on a best-effort basis.
533 * To build a branch, we should allocate blocks for
534 * the indirect blocks(if not allocated yet), and at least
535 * the first direct block of this branch. That's the
536 * minimum number of blocks need to allocate(required)
538 target = blks + indirect_blks;
540 while (1) {
541 count = target;
542 /* allocating blocks for indirect blocks and direct blocks */
543 current_block = ext4_new_blocks(handle,inode,goal,&count,err);
544 if (*err)
545 goto failed_out;
547 target -= count;
548 /* allocate blocks for indirect blocks */
549 while (index < indirect_blks && count) {
550 new_blocks[index++] = current_block++;
551 count--;
554 if (count > 0)
555 break;
558 /* save the new block number for the first direct block */
559 new_blocks[index] = current_block;
561 /* total number of blocks allocated for direct blocks */
562 ret = count;
563 *err = 0;
564 return ret;
565 failed_out:
566 for (i = 0; i <index; i++)
567 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
568 return ret;
572 * ext4_alloc_branch - allocate and set up a chain of blocks.
573 * @inode: owner
574 * @indirect_blks: number of allocated indirect blocks
575 * @blks: number of allocated direct blocks
576 * @offsets: offsets (in the blocks) to store the pointers to next.
577 * @branch: place to store the chain in.
579 * This function allocates blocks, zeroes out all but the last one,
580 * links them into chain and (if we are synchronous) writes them to disk.
581 * In other words, it prepares a branch that can be spliced onto the
582 * inode. It stores the information about that chain in the branch[], in
583 * the same format as ext4_get_branch() would do. We are calling it after
584 * we had read the existing part of chain and partial points to the last
585 * triple of that (one with zero ->key). Upon the exit we have the same
586 * picture as after the successful ext4_get_block(), except that in one
587 * place chain is disconnected - *branch->p is still zero (we did not
588 * set the last link), but branch->key contains the number that should
589 * be placed into *branch->p to fill that gap.
591 * If allocation fails we free all blocks we've allocated (and forget
592 * their buffer_heads) and return the error value the from failed
593 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
594 * as described above and return 0.
596 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
597 int indirect_blks, int *blks, ext4_fsblk_t goal,
598 ext4_lblk_t *offsets, Indirect *branch)
600 int blocksize = inode->i_sb->s_blocksize;
601 int i, n = 0;
602 int err = 0;
603 struct buffer_head *bh;
604 int num;
605 ext4_fsblk_t new_blocks[4];
606 ext4_fsblk_t current_block;
608 num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
609 *blks, new_blocks, &err);
610 if (err)
611 return err;
613 branch[0].key = cpu_to_le32(new_blocks[0]);
615 * metadata blocks and data blocks are allocated.
617 for (n = 1; n <= indirect_blks; n++) {
619 * Get buffer_head for parent block, zero it out
620 * and set the pointer to new one, then send
621 * parent to disk.
623 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
624 branch[n].bh = bh;
625 lock_buffer(bh);
626 BUFFER_TRACE(bh, "call get_create_access");
627 err = ext4_journal_get_create_access(handle, bh);
628 if (err) {
629 unlock_buffer(bh);
630 brelse(bh);
631 goto failed;
634 memset(bh->b_data, 0, blocksize);
635 branch[n].p = (__le32 *) bh->b_data + offsets[n];
636 branch[n].key = cpu_to_le32(new_blocks[n]);
637 *branch[n].p = branch[n].key;
638 if ( n == indirect_blks) {
639 current_block = new_blocks[n];
641 * End of chain, update the last new metablock of
642 * the chain to point to the new allocated
643 * data blocks numbers
645 for (i=1; i < num; i++)
646 *(branch[n].p + i) = cpu_to_le32(++current_block);
648 BUFFER_TRACE(bh, "marking uptodate");
649 set_buffer_uptodate(bh);
650 unlock_buffer(bh);
652 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
653 err = ext4_journal_dirty_metadata(handle, bh);
654 if (err)
655 goto failed;
657 *blks = num;
658 return err;
659 failed:
660 /* Allocation failed, free what we already allocated */
661 for (i = 1; i <= n ; i++) {
662 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
663 ext4_journal_forget(handle, branch[i].bh);
665 for (i = 0; i <indirect_blks; i++)
666 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
668 ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
670 return err;
674 * ext4_splice_branch - splice the allocated branch onto inode.
675 * @inode: owner
676 * @block: (logical) number of block we are adding
677 * @chain: chain of indirect blocks (with a missing link - see
678 * ext4_alloc_branch)
679 * @where: location of missing link
680 * @num: number of indirect blocks we are adding
681 * @blks: number of direct blocks we are adding
683 * This function fills the missing link and does all housekeeping needed in
684 * inode (->i_blocks, etc.). In case of success we end up with the full
685 * chain to new block and return 0.
687 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
688 ext4_lblk_t block, Indirect *where, int num, int blks)
690 int i;
691 int err = 0;
692 struct ext4_block_alloc_info *block_i;
693 ext4_fsblk_t current_block;
695 block_i = EXT4_I(inode)->i_block_alloc_info;
697 * If we're splicing into a [td]indirect block (as opposed to the
698 * inode) then we need to get write access to the [td]indirect block
699 * before the splice.
701 if (where->bh) {
702 BUFFER_TRACE(where->bh, "get_write_access");
703 err = ext4_journal_get_write_access(handle, where->bh);
704 if (err)
705 goto err_out;
707 /* That's it */
709 *where->p = where->key;
712 * Update the host buffer_head or inode to point to more just allocated
713 * direct blocks blocks
715 if (num == 0 && blks > 1) {
716 current_block = le32_to_cpu(where->key) + 1;
717 for (i = 1; i < blks; i++)
718 *(where->p + i ) = cpu_to_le32(current_block++);
722 * update the most recently allocated logical & physical block
723 * in i_block_alloc_info, to assist find the proper goal block for next
724 * allocation
726 if (block_i) {
727 block_i->last_alloc_logical_block = block + blks - 1;
728 block_i->last_alloc_physical_block =
729 le32_to_cpu(where[num].key) + blks - 1;
732 /* We are done with atomic stuff, now do the rest of housekeeping */
734 inode->i_ctime = ext4_current_time(inode);
735 ext4_mark_inode_dirty(handle, inode);
737 /* had we spliced it onto indirect block? */
738 if (where->bh) {
740 * If we spliced it onto an indirect block, we haven't
741 * altered the inode. Note however that if it is being spliced
742 * onto an indirect block at the very end of the file (the
743 * file is growing) then we *will* alter the inode to reflect
744 * the new i_size. But that is not done here - it is done in
745 * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
747 jbd_debug(5, "splicing indirect only\n");
748 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
749 err = ext4_journal_dirty_metadata(handle, where->bh);
750 if (err)
751 goto err_out;
752 } else {
754 * OK, we spliced it into the inode itself on a direct block.
755 * Inode was dirtied above.
757 jbd_debug(5, "splicing direct\n");
759 return err;
761 err_out:
762 for (i = 1; i <= num; i++) {
763 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
764 ext4_journal_forget(handle, where[i].bh);
765 ext4_free_blocks(handle, inode,
766 le32_to_cpu(where[i-1].key), 1, 0);
768 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
770 return err;
774 * Allocation strategy is simple: if we have to allocate something, we will
775 * have to go the whole way to leaf. So let's do it before attaching anything
776 * to tree, set linkage between the newborn blocks, write them if sync is
777 * required, recheck the path, free and repeat if check fails, otherwise
778 * set the last missing link (that will protect us from any truncate-generated
779 * removals - all blocks on the path are immune now) and possibly force the
780 * write on the parent block.
781 * That has a nice additional property: no special recovery from the failed
782 * allocations is needed - we simply release blocks and do not touch anything
783 * reachable from inode.
785 * `handle' can be NULL if create == 0.
787 <<<<<<< HEAD:fs/ext4/inode.c
788 * The BKL may not be held on entry here. Be sure to take it early.
789 =======
790 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
791 * return > 0, # of blocks mapped or allocated.
792 * return = 0, if plain lookup failed.
793 * return < 0, error case.
796 * Need to be called with
797 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
798 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
800 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
801 ext4_lblk_t iblock, unsigned long maxblocks,
802 struct buffer_head *bh_result,
803 int create, int extend_disksize)
805 int err = -EIO;
806 ext4_lblk_t offsets[4];
807 Indirect chain[4];
808 Indirect *partial;
809 ext4_fsblk_t goal;
810 int indirect_blks;
811 int blocks_to_boundary = 0;
812 int depth;
813 struct ext4_inode_info *ei = EXT4_I(inode);
814 int count = 0;
815 ext4_fsblk_t first_block = 0;
818 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
819 J_ASSERT(handle != NULL || create == 0);
820 depth = ext4_block_to_path(inode, iblock, offsets,
821 &blocks_to_boundary);
823 if (depth == 0)
824 goto out;
826 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
828 /* Simplest case - block found, no allocation needed */
829 if (!partial) {
830 first_block = le32_to_cpu(chain[depth - 1].key);
831 clear_buffer_new(bh_result);
832 count++;
833 /*map more blocks*/
834 while (count < maxblocks && count <= blocks_to_boundary) {
835 ext4_fsblk_t blk;
837 blk = le32_to_cpu(*(chain[depth-1].p + count));
839 if (blk == first_block + count)
840 count++;
841 else
842 break;
844 goto got_it;
847 /* Next simple case - plain lookup or failed read of indirect block */
848 if (!create || err == -EIO)
849 goto cleanup;
852 * Okay, we need to do block allocation. Lazily initialize the block
853 * allocation info here if necessary
855 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
856 ext4_init_block_alloc_info(inode);
858 goal = ext4_find_goal(inode, iblock, partial);
860 /* the number of blocks need to allocate for [d,t]indirect blocks */
861 indirect_blks = (chain + depth) - partial - 1;
864 * Next look up the indirect map to count the totoal number of
865 * direct blocks to allocate for this branch.
867 count = ext4_blks_to_allocate(partial, indirect_blks,
868 maxblocks, blocks_to_boundary);
870 * Block out ext4_truncate while we alter the tree
872 err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
873 offsets + (partial - chain), partial);
876 * The ext4_splice_branch call will free and forget any buffers
877 * on the new chain if there is a failure, but that risks using
878 * up transaction credits, especially for bitmaps where the
879 * credits cannot be returned. Can we handle this somehow? We
880 * may need to return -EAGAIN upwards in the worst case. --sct
882 if (!err)
883 err = ext4_splice_branch(handle, inode, iblock,
884 partial, indirect_blks, count);
886 * i_disksize growing is protected by i_data_sem. Don't forget to
887 * protect it if you're about to implement concurrent
888 * ext4_get_block() -bzzz
890 if (!err && extend_disksize && inode->i_size > ei->i_disksize)
891 ei->i_disksize = inode->i_size;
892 if (err)
893 goto cleanup;
895 set_buffer_new(bh_result);
896 got_it:
897 map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
898 if (count > blocks_to_boundary)
899 set_buffer_boundary(bh_result);
900 err = count;
901 /* Clean up and exit */
902 partial = chain + depth - 1; /* the whole chain */
903 cleanup:
904 while (partial > chain) {
905 BUFFER_TRACE(partial->bh, "call brelse");
906 brelse(partial->bh);
907 partial--;
909 BUFFER_TRACE(bh_result, "returned");
910 out:
911 return err;
914 /* Maximum number of blocks we map for direct IO at once. */
915 #define DIO_MAX_BLOCKS 4096
917 * Number of credits we need for writing DIO_MAX_BLOCKS:
918 * We need sb + group descriptor + bitmap + inode -> 4
919 * For B blocks with A block pointers per block we need:
920 * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
921 * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
923 #define DIO_CREDITS 25
925 <<<<<<< HEAD:fs/ext4/inode.c
926 =======
931 * ext4_ext4 get_block() wrapper function
932 * It will do a look up first, and returns if the blocks already mapped.
933 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
934 * and store the allocated blocks in the result buffer head and mark it
935 * mapped.
937 * If file type is extents based, it will call ext4_ext_get_blocks(),
938 * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
939 * based files
941 * On success, it returns the number of blocks being mapped or allocate.
942 * if create==0 and the blocks are pre-allocated and uninitialized block,
943 * the result buffer head is unmapped. If the create ==1, it will make sure
944 * the buffer head is mapped.
946 * It returns 0 if plain look up failed (blocks have not been allocated), in
947 * that casem, buffer head is unmapped
949 * It returns the error in case of allocation failure.
951 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
952 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
953 unsigned long max_blocks, struct buffer_head *bh,
954 int create, int extend_disksize)
956 int retval;
957 <<<<<<< HEAD:fs/ext4/inode.c
958 =======
960 clear_buffer_mapped(bh);
962 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
964 * Try to see if we can get the block without requesting
965 * for new file system block.
967 down_read((&EXT4_I(inode)->i_data_sem));
968 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
969 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
970 bh, 0, 0);
971 } else {
972 retval = ext4_get_blocks_handle(handle,
973 inode, block, max_blocks, bh, 0, 0);
975 up_read((&EXT4_I(inode)->i_data_sem));
976 <<<<<<< HEAD:fs/ext4/inode.c
977 if (!create || (retval > 0))
978 =======
980 /* If it is only a block(s) look up */
981 if (!create)
982 return retval;
985 * Returns if the blocks have already allocated
987 * Note that if blocks have been preallocated
988 * ext4_ext_get_block() returns th create = 0
989 * with buffer head unmapped.
991 if (retval > 0 && buffer_mapped(bh))
992 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
993 return retval;
996 <<<<<<< HEAD:fs/ext4/inode.c
997 * We need to allocate new blocks which will result
998 * in i_data update
999 =======
1000 * New blocks allocate and/or writing to uninitialized extent
1001 * will possibly result in updating i_data, so we take
1002 * the write lock of i_data_sem, and call get_blocks()
1003 * with create == 1 flag.
1004 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:fs/ext4/inode.c
1006 down_write((&EXT4_I(inode)->i_data_sem));
1008 * We need to check for EXT4 here because migrate
1009 * could have changed the inode type in between
1011 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1012 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1013 bh, create, extend_disksize);
1014 } else {
1015 retval = ext4_get_blocks_handle(handle, inode, block,
1016 max_blocks, bh, create, extend_disksize);
1018 up_write((&EXT4_I(inode)->i_data_sem));
1019 return retval;
1022 static int ext4_get_block(struct inode *inode, sector_t iblock,
1023 struct buffer_head *bh_result, int create)
1025 handle_t *handle = ext4_journal_current_handle();
1026 int ret = 0, started = 0;
1027 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1029 if (create && !handle) {
1030 /* Direct IO write... */
1031 if (max_blocks > DIO_MAX_BLOCKS)
1032 max_blocks = DIO_MAX_BLOCKS;
1033 handle = ext4_journal_start(inode, DIO_CREDITS +
1034 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
1035 if (IS_ERR(handle)) {
1036 ret = PTR_ERR(handle);
1037 goto out;
1039 started = 1;
1042 ret = ext4_get_blocks_wrap(handle, inode, iblock,
1043 max_blocks, bh_result, create, 0);
1044 if (ret > 0) {
1045 bh_result->b_size = (ret << inode->i_blkbits);
1046 ret = 0;
1048 if (started)
1049 ext4_journal_stop(handle);
1050 out:
1051 return ret;
1055 * `handle' can be NULL if create is zero
1057 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1058 ext4_lblk_t block, int create, int *errp)
1060 struct buffer_head dummy;
1061 int fatal = 0, err;
1063 J_ASSERT(handle != NULL || create == 0);
1065 dummy.b_state = 0;
1066 dummy.b_blocknr = -1000;
1067 buffer_trace_init(&dummy.b_history);
1068 err = ext4_get_blocks_wrap(handle, inode, block, 1,
1069 &dummy, create, 1);
1071 * ext4_get_blocks_handle() returns number of blocks
1072 * mapped. 0 in case of a HOLE.
1074 if (err > 0) {
1075 if (err > 1)
1076 WARN_ON(1);
1077 err = 0;
1079 *errp = err;
1080 if (!err && buffer_mapped(&dummy)) {
1081 struct buffer_head *bh;
1082 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1083 if (!bh) {
1084 *errp = -EIO;
1085 goto err;
1087 if (buffer_new(&dummy)) {
1088 J_ASSERT(create != 0);
1089 J_ASSERT(handle != NULL);
1092 * Now that we do not always journal data, we should
1093 * keep in mind whether this should always journal the
1094 * new buffer as metadata. For now, regular file
1095 * writes use ext4_get_block instead, so it's not a
1096 * problem.
1098 lock_buffer(bh);
1099 BUFFER_TRACE(bh, "call get_create_access");
1100 fatal = ext4_journal_get_create_access(handle, bh);
1101 if (!fatal && !buffer_uptodate(bh)) {
1102 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1103 set_buffer_uptodate(bh);
1105 unlock_buffer(bh);
1106 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1107 err = ext4_journal_dirty_metadata(handle, bh);
1108 if (!fatal)
1109 fatal = err;
1110 } else {
1111 BUFFER_TRACE(bh, "not a new buffer");
1113 if (fatal) {
1114 *errp = fatal;
1115 brelse(bh);
1116 bh = NULL;
1118 return bh;
1120 err:
1121 return NULL;
1124 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1125 ext4_lblk_t block, int create, int *err)
1127 struct buffer_head * bh;
1129 bh = ext4_getblk(handle, inode, block, create, err);
1130 if (!bh)
1131 return bh;
1132 if (buffer_uptodate(bh))
1133 return bh;
1134 ll_rw_block(READ_META, 1, &bh);
1135 wait_on_buffer(bh);
1136 if (buffer_uptodate(bh))
1137 return bh;
1138 put_bh(bh);
1139 *err = -EIO;
1140 return NULL;
1143 static int walk_page_buffers( handle_t *handle,
1144 struct buffer_head *head,
1145 unsigned from,
1146 unsigned to,
1147 int *partial,
1148 int (*fn)( handle_t *handle,
1149 struct buffer_head *bh))
1151 struct buffer_head *bh;
1152 unsigned block_start, block_end;
1153 unsigned blocksize = head->b_size;
1154 int err, ret = 0;
1155 struct buffer_head *next;
1157 for ( bh = head, block_start = 0;
1158 ret == 0 && (bh != head || !block_start);
1159 block_start = block_end, bh = next)
1161 next = bh->b_this_page;
1162 block_end = block_start + blocksize;
1163 if (block_end <= from || block_start >= to) {
1164 if (partial && !buffer_uptodate(bh))
1165 *partial = 1;
1166 continue;
1168 err = (*fn)(handle, bh);
1169 if (!ret)
1170 ret = err;
1172 return ret;
1176 * To preserve ordering, it is essential that the hole instantiation and
1177 * the data write be encapsulated in a single transaction. We cannot
1178 * close off a transaction and start a new one between the ext4_get_block()
1179 * and the commit_write(). So doing the jbd2_journal_start at the start of
1180 * prepare_write() is the right place.
1182 * Also, this function can nest inside ext4_writepage() ->
1183 * block_write_full_page(). In that case, we *know* that ext4_writepage()
1184 * has generated enough buffer credits to do the whole page. So we won't
1185 * block on the journal in that case, which is good, because the caller may
1186 * be PF_MEMALLOC.
1188 * By accident, ext4 can be reentered when a transaction is open via
1189 * quota file writes. If we were to commit the transaction while thus
1190 * reentered, there can be a deadlock - we would be holding a quota
1191 * lock, and the commit would never complete if another thread had a
1192 * transaction open and was blocking on the quota lock - a ranking
1193 * violation.
1195 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1196 * will _not_ run commit under these circumstances because handle->h_ref
1197 * is elevated. We'll still have enough credits for the tiny quotafile
1198 * write.
1200 static int do_journal_get_write_access(handle_t *handle,
1201 struct buffer_head *bh)
1203 if (!buffer_mapped(bh) || buffer_freed(bh))
1204 return 0;
1205 return ext4_journal_get_write_access(handle, bh);
1208 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1209 loff_t pos, unsigned len, unsigned flags,
1210 struct page **pagep, void **fsdata)
1212 struct inode *inode = mapping->host;
1213 int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1214 handle_t *handle;
1215 int retries = 0;
1216 struct page *page;
1217 pgoff_t index;
1218 unsigned from, to;
1220 index = pos >> PAGE_CACHE_SHIFT;
1221 from = pos & (PAGE_CACHE_SIZE - 1);
1222 to = from + len;
1224 retry:
1225 page = __grab_cache_page(mapping, index);
1226 if (!page)
1227 return -ENOMEM;
1228 *pagep = page;
1230 handle = ext4_journal_start(inode, needed_blocks);
1231 if (IS_ERR(handle)) {
1232 unlock_page(page);
1233 page_cache_release(page);
1234 ret = PTR_ERR(handle);
1235 goto out;
1238 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1239 ext4_get_block);
1241 if (!ret && ext4_should_journal_data(inode)) {
1242 ret = walk_page_buffers(handle, page_buffers(page),
1243 from, to, NULL, do_journal_get_write_access);
1246 if (ret) {
1247 ext4_journal_stop(handle);
1248 unlock_page(page);
1249 page_cache_release(page);
1252 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1253 goto retry;
1254 out:
1255 return ret;
1258 int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1260 int err = jbd2_journal_dirty_data(handle, bh);
1261 if (err)
1262 ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1263 bh, handle, err);
1264 return err;
1267 /* For write_end() in data=journal mode */
1268 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1270 if (!buffer_mapped(bh) || buffer_freed(bh))
1271 return 0;
1272 set_buffer_uptodate(bh);
1273 return ext4_journal_dirty_metadata(handle, bh);
1277 * Generic write_end handler for ordered and writeback ext4 journal modes.
1278 * We can't use generic_write_end, because that unlocks the page and we need to
1279 * unlock the page after ext4_journal_stop, but ext4_journal_stop must run
1280 * after block_write_end.
1282 static int ext4_generic_write_end(struct file *file,
1283 struct address_space *mapping,
1284 loff_t pos, unsigned len, unsigned copied,
1285 struct page *page, void *fsdata)
1287 struct inode *inode = file->f_mapping->host;
1289 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1291 if (pos+copied > inode->i_size) {
1292 i_size_write(inode, pos+copied);
1293 mark_inode_dirty(inode);
1296 return copied;
1300 * We need to pick up the new inode size which generic_commit_write gave us
1301 * `file' can be NULL - eg, when called from page_symlink().
1303 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1304 * buffers are managed internally.
1306 static int ext4_ordered_write_end(struct file *file,
1307 struct address_space *mapping,
1308 loff_t pos, unsigned len, unsigned copied,
1309 struct page *page, void *fsdata)
1311 handle_t *handle = ext4_journal_current_handle();
1312 struct inode *inode = file->f_mapping->host;
1313 unsigned from, to;
1314 int ret = 0, ret2;
1316 from = pos & (PAGE_CACHE_SIZE - 1);
1317 to = from + len;
1319 ret = walk_page_buffers(handle, page_buffers(page),
1320 from, to, NULL, ext4_journal_dirty_data);
1322 if (ret == 0) {
1324 * generic_write_end() will run mark_inode_dirty() if i_size
1325 * changes. So let's piggyback the i_disksize mark_inode_dirty
1326 * into that.
1328 loff_t new_i_size;
1330 new_i_size = pos + copied;
1331 if (new_i_size > EXT4_I(inode)->i_disksize)
1332 EXT4_I(inode)->i_disksize = new_i_size;
1333 copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1334 page, fsdata);
1335 if (copied < 0)
1336 ret = copied;
1338 ret2 = ext4_journal_stop(handle);
1339 if (!ret)
1340 ret = ret2;
1341 unlock_page(page);
1342 page_cache_release(page);
1344 return ret ? ret : copied;
1347 static int ext4_writeback_write_end(struct file *file,
1348 struct address_space *mapping,
1349 loff_t pos, unsigned len, unsigned copied,
1350 struct page *page, void *fsdata)
1352 handle_t *handle = ext4_journal_current_handle();
1353 struct inode *inode = file->f_mapping->host;
1354 int ret = 0, ret2;
1355 loff_t new_i_size;
1357 new_i_size = pos + copied;
1358 if (new_i_size > EXT4_I(inode)->i_disksize)
1359 EXT4_I(inode)->i_disksize = new_i_size;
1361 copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1362 page, fsdata);
1363 if (copied < 0)
1364 ret = copied;
1366 ret2 = ext4_journal_stop(handle);
1367 if (!ret)
1368 ret = ret2;
1369 unlock_page(page);
1370 page_cache_release(page);
1372 return ret ? ret : copied;
1375 static int ext4_journalled_write_end(struct file *file,
1376 struct address_space *mapping,
1377 loff_t pos, unsigned len, unsigned copied,
1378 struct page *page, void *fsdata)
1380 handle_t *handle = ext4_journal_current_handle();
1381 struct inode *inode = mapping->host;
1382 int ret = 0, ret2;
1383 int partial = 0;
1384 unsigned from, to;
1386 from = pos & (PAGE_CACHE_SIZE - 1);
1387 to = from + len;
1389 if (copied < len) {
1390 if (!PageUptodate(page))
1391 copied = 0;
1392 page_zero_new_buffers(page, from+copied, to);
1395 ret = walk_page_buffers(handle, page_buffers(page), from,
1396 to, &partial, write_end_fn);
1397 if (!partial)
1398 SetPageUptodate(page);
1399 if (pos+copied > inode->i_size)
1400 i_size_write(inode, pos+copied);
1401 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1402 if (inode->i_size > EXT4_I(inode)->i_disksize) {
1403 EXT4_I(inode)->i_disksize = inode->i_size;
1404 ret2 = ext4_mark_inode_dirty(handle, inode);
1405 if (!ret)
1406 ret = ret2;
1409 ret2 = ext4_journal_stop(handle);
1410 if (!ret)
1411 ret = ret2;
1412 unlock_page(page);
1413 page_cache_release(page);
1415 return ret ? ret : copied;
1419 * bmap() is special. It gets used by applications such as lilo and by
1420 * the swapper to find the on-disk block of a specific piece of data.
1422 * Naturally, this is dangerous if the block concerned is still in the
1423 * journal. If somebody makes a swapfile on an ext4 data-journaling
1424 * filesystem and enables swap, then they may get a nasty shock when the
1425 * data getting swapped to that swapfile suddenly gets overwritten by
1426 * the original zero's written out previously to the journal and
1427 * awaiting writeback in the kernel's buffer cache.
1429 * So, if we see any bmap calls here on a modified, data-journaled file,
1430 * take extra steps to flush any blocks which might be in the cache.
1432 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1434 struct inode *inode = mapping->host;
1435 journal_t *journal;
1436 int err;
1438 if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1440 * This is a REALLY heavyweight approach, but the use of
1441 * bmap on dirty files is expected to be extremely rare:
1442 * only if we run lilo or swapon on a freshly made file
1443 * do we expect this to happen.
1445 * (bmap requires CAP_SYS_RAWIO so this does not
1446 * represent an unprivileged user DOS attack --- we'd be
1447 * in trouble if mortal users could trigger this path at
1448 * will.)
1450 * NB. EXT4_STATE_JDATA is not set on files other than
1451 * regular files. If somebody wants to bmap a directory
1452 * or symlink and gets confused because the buffer
1453 * hasn't yet been flushed to disk, they deserve
1454 * everything they get.
1457 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1458 journal = EXT4_JOURNAL(inode);
1459 jbd2_journal_lock_updates(journal);
1460 err = jbd2_journal_flush(journal);
1461 jbd2_journal_unlock_updates(journal);
1463 if (err)
1464 return 0;
1467 return generic_block_bmap(mapping,block,ext4_get_block);
1470 static int bget_one(handle_t *handle, struct buffer_head *bh)
1472 get_bh(bh);
1473 return 0;
1476 static int bput_one(handle_t *handle, struct buffer_head *bh)
1478 put_bh(bh);
1479 return 0;
1482 static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1484 if (buffer_mapped(bh))
1485 return ext4_journal_dirty_data(handle, bh);
1486 return 0;
1490 * Note that we always start a transaction even if we're not journalling
1491 * data. This is to preserve ordering: any hole instantiation within
1492 * __block_write_full_page -> ext4_get_block() should be journalled
1493 * along with the data so we don't crash and then get metadata which
1494 * refers to old data.
1496 * In all journalling modes block_write_full_page() will start the I/O.
1498 * Problem:
1500 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1501 * ext4_writepage()
1503 * Similar for:
1505 * ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1507 * Same applies to ext4_get_block(). We will deadlock on various things like
1508 * lock_journal and i_data_sem
1510 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1511 * allocations fail.
1513 * 16May01: If we're reentered then journal_current_handle() will be
1514 * non-zero. We simply *return*.
1516 * 1 July 2001: @@@ FIXME:
1517 * In journalled data mode, a data buffer may be metadata against the
1518 * current transaction. But the same file is part of a shared mapping
1519 * and someone does a writepage() on it.
1521 * We will move the buffer onto the async_data list, but *after* it has
1522 * been dirtied. So there's a small window where we have dirty data on
1523 * BJ_Metadata.
1525 * Note that this only applies to the last partial page in the file. The
1526 * bit which block_write_full_page() uses prepare/commit for. (That's
1527 * broken code anyway: it's wrong for msync()).
1529 * It's a rare case: affects the final partial page, for journalled data
1530 * where the file is subject to bith write() and writepage() in the same
1531 * transction. To fix it we'll need a custom block_write_full_page().
1532 * We'll probably need that anyway for journalling writepage() output.
1534 * We don't honour synchronous mounts for writepage(). That would be
1535 * disastrous. Any write() or metadata operation will sync the fs for
1536 * us.
1538 * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1539 * we don't need to open a transaction here.
1541 static int ext4_ordered_writepage(struct page *page,
1542 struct writeback_control *wbc)
1544 struct inode *inode = page->mapping->host;
1545 struct buffer_head *page_bufs;
1546 handle_t *handle = NULL;
1547 int ret = 0;
1548 int err;
1550 J_ASSERT(PageLocked(page));
1553 * We give up here if we're reentered, because it might be for a
1554 * different filesystem.
1556 if (ext4_journal_current_handle())
1557 goto out_fail;
1559 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1561 if (IS_ERR(handle)) {
1562 ret = PTR_ERR(handle);
1563 goto out_fail;
1566 if (!page_has_buffers(page)) {
1567 create_empty_buffers(page, inode->i_sb->s_blocksize,
1568 (1 << BH_Dirty)|(1 << BH_Uptodate));
1570 page_bufs = page_buffers(page);
1571 walk_page_buffers(handle, page_bufs, 0,
1572 PAGE_CACHE_SIZE, NULL, bget_one);
1574 ret = block_write_full_page(page, ext4_get_block, wbc);
1577 * The page can become unlocked at any point now, and
1578 * truncate can then come in and change things. So we
1579 * can't touch *page from now on. But *page_bufs is
1580 * safe due to elevated refcount.
1584 * And attach them to the current transaction. But only if
1585 * block_write_full_page() succeeded. Otherwise they are unmapped,
1586 * and generally junk.
1588 if (ret == 0) {
1589 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1590 NULL, jbd2_journal_dirty_data_fn);
1591 if (!ret)
1592 ret = err;
1594 walk_page_buffers(handle, page_bufs, 0,
1595 PAGE_CACHE_SIZE, NULL, bput_one);
1596 err = ext4_journal_stop(handle);
1597 if (!ret)
1598 ret = err;
1599 return ret;
1601 out_fail:
1602 redirty_page_for_writepage(wbc, page);
1603 unlock_page(page);
1604 return ret;
1607 static int ext4_writeback_writepage(struct page *page,
1608 struct writeback_control *wbc)
1610 struct inode *inode = page->mapping->host;
1611 handle_t *handle = NULL;
1612 int ret = 0;
1613 int err;
1615 if (ext4_journal_current_handle())
1616 goto out_fail;
1618 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1619 if (IS_ERR(handle)) {
1620 ret = PTR_ERR(handle);
1621 goto out_fail;
1624 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1625 ret = nobh_writepage(page, ext4_get_block, wbc);
1626 else
1627 ret = block_write_full_page(page, ext4_get_block, wbc);
1629 err = ext4_journal_stop(handle);
1630 if (!ret)
1631 ret = err;
1632 return ret;
1634 out_fail:
1635 redirty_page_for_writepage(wbc, page);
1636 unlock_page(page);
1637 return ret;
1640 static int ext4_journalled_writepage(struct page *page,
1641 struct writeback_control *wbc)
1643 struct inode *inode = page->mapping->host;
1644 handle_t *handle = NULL;
1645 int ret = 0;
1646 int err;
1648 if (ext4_journal_current_handle())
1649 goto no_write;
1651 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1652 if (IS_ERR(handle)) {
1653 ret = PTR_ERR(handle);
1654 goto no_write;
1657 if (!page_has_buffers(page) || PageChecked(page)) {
1659 * It's mmapped pagecache. Add buffers and journal it. There
1660 * doesn't seem much point in redirtying the page here.
1662 ClearPageChecked(page);
1663 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1664 ext4_get_block);
1665 if (ret != 0) {
1666 ext4_journal_stop(handle);
1667 goto out_unlock;
1669 ret = walk_page_buffers(handle, page_buffers(page), 0,
1670 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1672 err = walk_page_buffers(handle, page_buffers(page), 0,
1673 PAGE_CACHE_SIZE, NULL, write_end_fn);
1674 if (ret == 0)
1675 ret = err;
1676 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1677 unlock_page(page);
1678 } else {
1680 * It may be a page full of checkpoint-mode buffers. We don't
1681 * really know unless we go poke around in the buffer_heads.
1682 * But block_write_full_page will do the right thing.
1684 ret = block_write_full_page(page, ext4_get_block, wbc);
1686 err = ext4_journal_stop(handle);
1687 if (!ret)
1688 ret = err;
1689 out:
1690 return ret;
1692 no_write:
1693 redirty_page_for_writepage(wbc, page);
1694 out_unlock:
1695 unlock_page(page);
1696 goto out;
1699 static int ext4_readpage(struct file *file, struct page *page)
1701 return mpage_readpage(page, ext4_get_block);
1704 static int
1705 ext4_readpages(struct file *file, struct address_space *mapping,
1706 struct list_head *pages, unsigned nr_pages)
1708 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1711 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1713 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1716 * If it's a full truncate we just forget about the pending dirtying
1718 if (offset == 0)
1719 ClearPageChecked(page);
1721 jbd2_journal_invalidatepage(journal, page, offset);
1724 static int ext4_releasepage(struct page *page, gfp_t wait)
1726 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1728 WARN_ON(PageChecked(page));
1729 if (!page_has_buffers(page))
1730 return 0;
1731 return jbd2_journal_try_to_free_buffers(journal, page, wait);
1735 * If the O_DIRECT write will extend the file then add this inode to the
1736 * orphan list. So recovery will truncate it back to the original size
1737 * if the machine crashes during the write.
1739 * If the O_DIRECT write is intantiating holes inside i_size and the machine
1740 * crashes then stale disk data _may_ be exposed inside the file. But current
1741 * VFS code falls back into buffered path in that case so we are safe.
1743 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1744 const struct iovec *iov, loff_t offset,
1745 unsigned long nr_segs)
1747 struct file *file = iocb->ki_filp;
1748 struct inode *inode = file->f_mapping->host;
1749 struct ext4_inode_info *ei = EXT4_I(inode);
1750 handle_t *handle;
1751 ssize_t ret;
1752 int orphan = 0;
1753 size_t count = iov_length(iov, nr_segs);
1755 if (rw == WRITE) {
1756 loff_t final_size = offset + count;
1758 if (final_size > inode->i_size) {
1759 /* Credits for sb + inode write */
1760 handle = ext4_journal_start(inode, 2);
1761 if (IS_ERR(handle)) {
1762 ret = PTR_ERR(handle);
1763 goto out;
1765 ret = ext4_orphan_add(handle, inode);
1766 if (ret) {
1767 ext4_journal_stop(handle);
1768 goto out;
1770 orphan = 1;
1771 ei->i_disksize = inode->i_size;
1772 ext4_journal_stop(handle);
1776 ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1777 offset, nr_segs,
1778 ext4_get_block, NULL);
1780 if (orphan) {
1781 int err;
1783 /* Credits for sb + inode write */
1784 handle = ext4_journal_start(inode, 2);
1785 if (IS_ERR(handle)) {
1786 /* This is really bad luck. We've written the data
1787 * but cannot extend i_size. Bail out and pretend
1788 * the write failed... */
1789 ret = PTR_ERR(handle);
1790 goto out;
1792 if (inode->i_nlink)
1793 ext4_orphan_del(handle, inode);
1794 if (ret > 0) {
1795 loff_t end = offset + ret;
1796 if (end > inode->i_size) {
1797 ei->i_disksize = end;
1798 i_size_write(inode, end);
1800 * We're going to return a positive `ret'
1801 * here due to non-zero-length I/O, so there's
1802 * no way of reporting error returns from
1803 * ext4_mark_inode_dirty() to userspace. So
1804 * ignore it.
1806 ext4_mark_inode_dirty(handle, inode);
1809 err = ext4_journal_stop(handle);
1810 if (ret == 0)
1811 ret = err;
1813 out:
1814 return ret;
1818 * Pages can be marked dirty completely asynchronously from ext4's journalling
1819 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
1820 * much here because ->set_page_dirty is called under VFS locks. The page is
1821 * not necessarily locked.
1823 * We cannot just dirty the page and leave attached buffers clean, because the
1824 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
1825 * or jbddirty because all the journalling code will explode.
1827 * So what we do is to mark the page "pending dirty" and next time writepage
1828 * is called, propagate that into the buffers appropriately.
1830 static int ext4_journalled_set_page_dirty(struct page *page)
1832 SetPageChecked(page);
1833 return __set_page_dirty_nobuffers(page);
1836 static const struct address_space_operations ext4_ordered_aops = {
1837 .readpage = ext4_readpage,
1838 .readpages = ext4_readpages,
1839 .writepage = ext4_ordered_writepage,
1840 .sync_page = block_sync_page,
1841 .write_begin = ext4_write_begin,
1842 .write_end = ext4_ordered_write_end,
1843 .bmap = ext4_bmap,
1844 .invalidatepage = ext4_invalidatepage,
1845 .releasepage = ext4_releasepage,
1846 .direct_IO = ext4_direct_IO,
1847 .migratepage = buffer_migrate_page,
1850 static const struct address_space_operations ext4_writeback_aops = {
1851 .readpage = ext4_readpage,
1852 .readpages = ext4_readpages,
1853 .writepage = ext4_writeback_writepage,
1854 .sync_page = block_sync_page,
1855 .write_begin = ext4_write_begin,
1856 .write_end = ext4_writeback_write_end,
1857 .bmap = ext4_bmap,
1858 .invalidatepage = ext4_invalidatepage,
1859 .releasepage = ext4_releasepage,
1860 .direct_IO = ext4_direct_IO,
1861 .migratepage = buffer_migrate_page,
1864 static const struct address_space_operations ext4_journalled_aops = {
1865 .readpage = ext4_readpage,
1866 .readpages = ext4_readpages,
1867 .writepage = ext4_journalled_writepage,
1868 .sync_page = block_sync_page,
1869 .write_begin = ext4_write_begin,
1870 .write_end = ext4_journalled_write_end,
1871 .set_page_dirty = ext4_journalled_set_page_dirty,
1872 .bmap = ext4_bmap,
1873 .invalidatepage = ext4_invalidatepage,
1874 .releasepage = ext4_releasepage,
1877 void ext4_set_aops(struct inode *inode)
1879 if (ext4_should_order_data(inode))
1880 inode->i_mapping->a_ops = &ext4_ordered_aops;
1881 else if (ext4_should_writeback_data(inode))
1882 inode->i_mapping->a_ops = &ext4_writeback_aops;
1883 else
1884 inode->i_mapping->a_ops = &ext4_journalled_aops;
1888 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1889 * up to the end of the block which corresponds to `from'.
1890 * This required during truncate. We need to physically zero the tail end
1891 * of that block so it doesn't yield old data if the file is later grown.
1893 int ext4_block_truncate_page(handle_t *handle, struct page *page,
1894 struct address_space *mapping, loff_t from)
1896 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1897 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1898 unsigned blocksize, length, pos;
1899 ext4_lblk_t iblock;
1900 struct inode *inode = mapping->host;
1901 struct buffer_head *bh;
1902 int err = 0;
1904 blocksize = inode->i_sb->s_blocksize;
1905 length = blocksize - (offset & (blocksize - 1));
1906 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1909 * For "nobh" option, we can only work if we don't need to
1910 * read-in the page - otherwise we create buffers to do the IO.
1912 if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1913 ext4_should_writeback_data(inode) && PageUptodate(page)) {
1914 zero_user(page, offset, length);
1915 set_page_dirty(page);
1916 goto unlock;
1919 if (!page_has_buffers(page))
1920 create_empty_buffers(page, blocksize, 0);
1922 /* Find the buffer that contains "offset" */
1923 bh = page_buffers(page);
1924 pos = blocksize;
1925 while (offset >= pos) {
1926 bh = bh->b_this_page;
1927 iblock++;
1928 pos += blocksize;
1931 err = 0;
1932 if (buffer_freed(bh)) {
1933 BUFFER_TRACE(bh, "freed: skip");
1934 goto unlock;
1937 if (!buffer_mapped(bh)) {
1938 BUFFER_TRACE(bh, "unmapped");
1939 ext4_get_block(inode, iblock, bh, 0);
1940 /* unmapped? It's a hole - nothing to do */
1941 if (!buffer_mapped(bh)) {
1942 BUFFER_TRACE(bh, "still unmapped");
1943 goto unlock;
1947 /* Ok, it's mapped. Make sure it's up-to-date */
1948 if (PageUptodate(page))
1949 set_buffer_uptodate(bh);
1951 if (!buffer_uptodate(bh)) {
1952 err = -EIO;
1953 ll_rw_block(READ, 1, &bh);
1954 wait_on_buffer(bh);
1955 /* Uhhuh. Read error. Complain and punt. */
1956 if (!buffer_uptodate(bh))
1957 goto unlock;
1960 if (ext4_should_journal_data(inode)) {
1961 BUFFER_TRACE(bh, "get write access");
1962 err = ext4_journal_get_write_access(handle, bh);
1963 if (err)
1964 goto unlock;
1967 zero_user(page, offset, length);
1969 BUFFER_TRACE(bh, "zeroed end of block");
1971 err = 0;
1972 if (ext4_should_journal_data(inode)) {
1973 err = ext4_journal_dirty_metadata(handle, bh);
1974 } else {
1975 if (ext4_should_order_data(inode))
1976 err = ext4_journal_dirty_data(handle, bh);
1977 mark_buffer_dirty(bh);
1980 unlock:
1981 unlock_page(page);
1982 page_cache_release(page);
1983 return err;
1987 * Probably it should be a library function... search for first non-zero word
1988 * or memcmp with zero_page, whatever is better for particular architecture.
1989 * Linus?
1991 static inline int all_zeroes(__le32 *p, __le32 *q)
1993 while (p < q)
1994 if (*p++)
1995 return 0;
1996 return 1;
2000 * ext4_find_shared - find the indirect blocks for partial truncation.
2001 * @inode: inode in question
2002 * @depth: depth of the affected branch
2003 * @offsets: offsets of pointers in that branch (see ext4_block_to_path)
2004 * @chain: place to store the pointers to partial indirect blocks
2005 * @top: place to the (detached) top of branch
2007 * This is a helper function used by ext4_truncate().
2009 * When we do truncate() we may have to clean the ends of several
2010 * indirect blocks but leave the blocks themselves alive. Block is
2011 * partially truncated if some data below the new i_size is refered
2012 * from it (and it is on the path to the first completely truncated
2013 * data block, indeed). We have to free the top of that path along
2014 * with everything to the right of the path. Since no allocation
2015 * past the truncation point is possible until ext4_truncate()
2016 * finishes, we may safely do the latter, but top of branch may
2017 * require special attention - pageout below the truncation point
2018 * might try to populate it.
2020 * We atomically detach the top of branch from the tree, store the
2021 * block number of its root in *@top, pointers to buffer_heads of
2022 * partially truncated blocks - in @chain[].bh and pointers to
2023 * their last elements that should not be removed - in
2024 * @chain[].p. Return value is the pointer to last filled element
2025 * of @chain.
2027 * The work left to caller to do the actual freeing of subtrees:
2028 * a) free the subtree starting from *@top
2029 * b) free the subtrees whose roots are stored in
2030 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
2031 * c) free the subtrees growing from the inode past the @chain[0].
2032 * (no partially truncated stuff there). */
2034 static Indirect *ext4_find_shared(struct inode *inode, int depth,
2035 ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
2037 Indirect *partial, *p;
2038 int k, err;
2040 *top = 0;
2041 /* Make k index the deepest non-null offest + 1 */
2042 for (k = depth; k > 1 && !offsets[k-1]; k--)
2044 partial = ext4_get_branch(inode, k, offsets, chain, &err);
2045 /* Writer: pointers */
2046 if (!partial)
2047 partial = chain + k-1;
2049 * If the branch acquired continuation since we've looked at it -
2050 * fine, it should all survive and (new) top doesn't belong to us.
2052 if (!partial->key && *partial->p)
2053 /* Writer: end */
2054 goto no_top;
2055 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2058 * OK, we've found the last block that must survive. The rest of our
2059 * branch should be detached before unlocking. However, if that rest
2060 * of branch is all ours and does not grow immediately from the inode
2061 * it's easier to cheat and just decrement partial->p.
2063 if (p == chain + k - 1 && p > chain) {
2064 p->p--;
2065 } else {
2066 *top = *p->p;
2067 /* Nope, don't do this in ext4. Must leave the tree intact */
2068 #if 0
2069 *p->p = 0;
2070 #endif
2072 /* Writer: end */
2074 while(partial > p) {
2075 brelse(partial->bh);
2076 partial--;
2078 no_top:
2079 return partial;
2083 * Zero a number of block pointers in either an inode or an indirect block.
2084 * If we restart the transaction we must again get write access to the
2085 * indirect block for further modification.
2087 * We release `count' blocks on disk, but (last - first) may be greater
2088 * than `count' because there can be holes in there.
2090 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2091 struct buffer_head *bh, ext4_fsblk_t block_to_free,
2092 unsigned long count, __le32 *first, __le32 *last)
2094 __le32 *p;
2095 if (try_to_extend_transaction(handle, inode)) {
2096 if (bh) {
2097 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2098 ext4_journal_dirty_metadata(handle, bh);
2100 ext4_mark_inode_dirty(handle, inode);
2101 ext4_journal_test_restart(handle, inode);
2102 if (bh) {
2103 BUFFER_TRACE(bh, "retaking write access");
2104 ext4_journal_get_write_access(handle, bh);
2109 * Any buffers which are on the journal will be in memory. We find
2110 * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2111 * on them. We've already detached each block from the file, so
2112 * bforget() in jbd2_journal_forget() should be safe.
2114 * AKPM: turn on bforget in jbd2_journal_forget()!!!
2116 for (p = first; p < last; p++) {
2117 u32 nr = le32_to_cpu(*p);
2118 if (nr) {
2119 struct buffer_head *tbh;
2121 *p = 0;
2122 tbh = sb_find_get_block(inode->i_sb, nr);
2123 ext4_forget(handle, 0, inode, tbh, nr);
2127 ext4_free_blocks(handle, inode, block_to_free, count, 0);
2131 * ext4_free_data - free a list of data blocks
2132 * @handle: handle for this transaction
2133 * @inode: inode we are dealing with
2134 * @this_bh: indirect buffer_head which contains *@first and *@last
2135 * @first: array of block numbers
2136 * @last: points immediately past the end of array
2138 * We are freeing all blocks refered from that array (numbers are stored as
2139 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2141 * We accumulate contiguous runs of blocks to free. Conveniently, if these
2142 * blocks are contiguous then releasing them at one time will only affect one
2143 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2144 * actually use a lot of journal space.
2146 * @this_bh will be %NULL if @first and @last point into the inode's direct
2147 * block pointers.
2149 static void ext4_free_data(handle_t *handle, struct inode *inode,
2150 struct buffer_head *this_bh,
2151 __le32 *first, __le32 *last)
2153 ext4_fsblk_t block_to_free = 0; /* Starting block # of a run */
2154 unsigned long count = 0; /* Number of blocks in the run */
2155 __le32 *block_to_free_p = NULL; /* Pointer into inode/ind
2156 corresponding to
2157 block_to_free */
2158 ext4_fsblk_t nr; /* Current block # */
2159 __le32 *p; /* Pointer into inode/ind
2160 for current block */
2161 int err;
2163 if (this_bh) { /* For indirect block */
2164 BUFFER_TRACE(this_bh, "get_write_access");
2165 err = ext4_journal_get_write_access(handle, this_bh);
2166 /* Important: if we can't update the indirect pointers
2167 * to the blocks, we can't free them. */
2168 if (err)
2169 return;
2172 for (p = first; p < last; p++) {
2173 nr = le32_to_cpu(*p);
2174 if (nr) {
2175 /* accumulate blocks to free if they're contiguous */
2176 if (count == 0) {
2177 block_to_free = nr;
2178 block_to_free_p = p;
2179 count = 1;
2180 } else if (nr == block_to_free + count) {
2181 count++;
2182 } else {
2183 ext4_clear_blocks(handle, inode, this_bh,
2184 block_to_free,
2185 count, block_to_free_p, p);
2186 block_to_free = nr;
2187 block_to_free_p = p;
2188 count = 1;
2193 if (count > 0)
2194 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2195 count, block_to_free_p, p);
2197 if (this_bh) {
2198 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2199 ext4_journal_dirty_metadata(handle, this_bh);
2204 * ext4_free_branches - free an array of branches
2205 * @handle: JBD handle for this transaction
2206 * @inode: inode we are dealing with
2207 * @parent_bh: the buffer_head which contains *@first and *@last
2208 * @first: array of block numbers
2209 * @last: pointer immediately past the end of array
2210 * @depth: depth of the branches to free
2212 * We are freeing all blocks refered from these branches (numbers are
2213 * stored as little-endian 32-bit) and updating @inode->i_blocks
2214 * appropriately.
2216 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2217 struct buffer_head *parent_bh,
2218 __le32 *first, __le32 *last, int depth)
2220 ext4_fsblk_t nr;
2221 __le32 *p;
2223 if (is_handle_aborted(handle))
2224 return;
2226 if (depth--) {
2227 struct buffer_head *bh;
2228 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2229 p = last;
2230 while (--p >= first) {
2231 nr = le32_to_cpu(*p);
2232 if (!nr)
2233 continue; /* A hole */
2235 /* Go read the buffer for the next level down */
2236 bh = sb_bread(inode->i_sb, nr);
2239 * A read failure? Report error and clear slot
2240 * (should be rare).
2242 if (!bh) {
2243 ext4_error(inode->i_sb, "ext4_free_branches",
2244 "Read failure, inode=%lu, block=%llu",
2245 inode->i_ino, nr);
2246 continue;
2249 /* This zaps the entire block. Bottom up. */
2250 BUFFER_TRACE(bh, "free child branches");
2251 ext4_free_branches(handle, inode, bh,
2252 (__le32*)bh->b_data,
2253 (__le32*)bh->b_data + addr_per_block,
2254 depth);
2257 * We've probably journalled the indirect block several
2258 * times during the truncate. But it's no longer
2259 * needed and we now drop it from the transaction via
2260 * jbd2_journal_revoke().
2262 * That's easy if it's exclusively part of this
2263 * transaction. But if it's part of the committing
2264 * transaction then jbd2_journal_forget() will simply
2265 * brelse() it. That means that if the underlying
2266 * block is reallocated in ext4_get_block(),
2267 * unmap_underlying_metadata() will find this block
2268 * and will try to get rid of it. damn, damn.
2270 * If this block has already been committed to the
2271 * journal, a revoke record will be written. And
2272 * revoke records must be emitted *before* clearing
2273 * this block's bit in the bitmaps.
2275 ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2278 * Everything below this this pointer has been
2279 * released. Now let this top-of-subtree go.
2281 * We want the freeing of this indirect block to be
2282 * atomic in the journal with the updating of the
2283 * bitmap block which owns it. So make some room in
2284 * the journal.
2286 * We zero the parent pointer *after* freeing its
2287 * pointee in the bitmaps, so if extend_transaction()
2288 * for some reason fails to put the bitmap changes and
2289 * the release into the same transaction, recovery
2290 * will merely complain about releasing a free block,
2291 * rather than leaking blocks.
2293 if (is_handle_aborted(handle))
2294 return;
2295 if (try_to_extend_transaction(handle, inode)) {
2296 ext4_mark_inode_dirty(handle, inode);
2297 ext4_journal_test_restart(handle, inode);
2300 ext4_free_blocks(handle, inode, nr, 1, 1);
2302 if (parent_bh) {
2304 * The block which we have just freed is
2305 * pointed to by an indirect block: journal it
2307 BUFFER_TRACE(parent_bh, "get_write_access");
2308 if (!ext4_journal_get_write_access(handle,
2309 parent_bh)){
2310 *p = 0;
2311 BUFFER_TRACE(parent_bh,
2312 "call ext4_journal_dirty_metadata");
2313 ext4_journal_dirty_metadata(handle,
2314 parent_bh);
2318 } else {
2319 /* We have reached the bottom of the tree. */
2320 BUFFER_TRACE(parent_bh, "free data blocks");
2321 ext4_free_data(handle, inode, parent_bh, first, last);
2326 * ext4_truncate()
2328 * We block out ext4_get_block() block instantiations across the entire
2329 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2330 * simultaneously on behalf of the same inode.
2332 * As we work through the truncate and commmit bits of it to the journal there
2333 * is one core, guiding principle: the file's tree must always be consistent on
2334 * disk. We must be able to restart the truncate after a crash.
2336 * The file's tree may be transiently inconsistent in memory (although it
2337 * probably isn't), but whenever we close off and commit a journal transaction,
2338 * the contents of (the filesystem + the journal) must be consistent and
2339 * restartable. It's pretty simple, really: bottom up, right to left (although
2340 * left-to-right works OK too).
2342 * Note that at recovery time, journal replay occurs *before* the restart of
2343 * truncate against the orphan inode list.
2345 * The committed inode has the new, desired i_size (which is the same as
2346 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
2347 * that this inode's truncate did not complete and it will again call
2348 * ext4_truncate() to have another go. So there will be instantiated blocks
2349 * to the right of the truncation point in a crashed ext4 filesystem. But
2350 * that's fine - as long as they are linked from the inode, the post-crash
2351 * ext4_truncate() run will find them and release them.
2353 void ext4_truncate(struct inode *inode)
2355 handle_t *handle;
2356 struct ext4_inode_info *ei = EXT4_I(inode);
2357 __le32 *i_data = ei->i_data;
2358 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2359 struct address_space *mapping = inode->i_mapping;
2360 ext4_lblk_t offsets[4];
2361 Indirect chain[4];
2362 Indirect *partial;
2363 __le32 nr = 0;
2364 int n;
2365 ext4_lblk_t last_block;
2366 unsigned blocksize = inode->i_sb->s_blocksize;
2367 struct page *page;
2369 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2370 S_ISLNK(inode->i_mode)))
2371 return;
2372 if (ext4_inode_is_fast_symlink(inode))
2373 return;
2374 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2375 return;
2378 * We have to lock the EOF page here, because lock_page() nests
2379 * outside jbd2_journal_start().
2381 if ((inode->i_size & (blocksize - 1)) == 0) {
2382 /* Block boundary? Nothing to do */
2383 page = NULL;
2384 } else {
2385 page = grab_cache_page(mapping,
2386 inode->i_size >> PAGE_CACHE_SHIFT);
2387 if (!page)
2388 return;
2391 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
2392 ext4_ext_truncate(inode, page);
2393 return;
2396 handle = start_transaction(inode);
2397 if (IS_ERR(handle)) {
2398 if (page) {
2399 clear_highpage(page);
2400 flush_dcache_page(page);
2401 unlock_page(page);
2402 page_cache_release(page);
2404 return; /* AKPM: return what? */
2407 last_block = (inode->i_size + blocksize-1)
2408 >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2410 if (page)
2411 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2413 n = ext4_block_to_path(inode, last_block, offsets, NULL);
2414 if (n == 0)
2415 goto out_stop; /* error */
2418 * OK. This truncate is going to happen. We add the inode to the
2419 * orphan list, so that if this truncate spans multiple transactions,
2420 * and we crash, we will resume the truncate when the filesystem
2421 * recovers. It also marks the inode dirty, to catch the new size.
2423 * Implication: the file must always be in a sane, consistent
2424 * truncatable state while each transaction commits.
2426 if (ext4_orphan_add(handle, inode))
2427 goto out_stop;
2430 * The orphan list entry will now protect us from any crash which
2431 * occurs before the truncate completes, so it is now safe to propagate
2432 * the new, shorter inode size (held for now in i_size) into the
2433 * on-disk inode. We do this via i_disksize, which is the value which
2434 * ext4 *really* writes onto the disk inode.
2436 ei->i_disksize = inode->i_size;
2439 * From here we block out all ext4_get_block() callers who want to
2440 * modify the block allocation tree.
2442 down_write(&ei->i_data_sem);
2444 if (n == 1) { /* direct blocks */
2445 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2446 i_data + EXT4_NDIR_BLOCKS);
2447 goto do_indirects;
2450 partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2451 /* Kill the top of shared branch (not detached) */
2452 if (nr) {
2453 if (partial == chain) {
2454 /* Shared branch grows from the inode */
2455 ext4_free_branches(handle, inode, NULL,
2456 &nr, &nr+1, (chain+n-1) - partial);
2457 *partial->p = 0;
2459 * We mark the inode dirty prior to restart,
2460 * and prior to stop. No need for it here.
2462 } else {
2463 /* Shared branch grows from an indirect block */
2464 BUFFER_TRACE(partial->bh, "get_write_access");
2465 ext4_free_branches(handle, inode, partial->bh,
2466 partial->p,
2467 partial->p+1, (chain+n-1) - partial);
2470 /* Clear the ends of indirect blocks on the shared branch */
2471 while (partial > chain) {
2472 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2473 (__le32*)partial->bh->b_data+addr_per_block,
2474 (chain+n-1) - partial);
2475 BUFFER_TRACE(partial->bh, "call brelse");
2476 brelse (partial->bh);
2477 partial--;
2479 do_indirects:
2480 /* Kill the remaining (whole) subtrees */
2481 switch (offsets[0]) {
2482 default:
2483 nr = i_data[EXT4_IND_BLOCK];
2484 if (nr) {
2485 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2486 i_data[EXT4_IND_BLOCK] = 0;
2488 case EXT4_IND_BLOCK:
2489 nr = i_data[EXT4_DIND_BLOCK];
2490 if (nr) {
2491 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2492 i_data[EXT4_DIND_BLOCK] = 0;
2494 case EXT4_DIND_BLOCK:
2495 nr = i_data[EXT4_TIND_BLOCK];
2496 if (nr) {
2497 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2498 i_data[EXT4_TIND_BLOCK] = 0;
2500 case EXT4_TIND_BLOCK:
2504 ext4_discard_reservation(inode);
2506 up_write(&ei->i_data_sem);
2507 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2508 ext4_mark_inode_dirty(handle, inode);
2511 * In a multi-transaction truncate, we only make the final transaction
2512 * synchronous
2514 if (IS_SYNC(inode))
2515 handle->h_sync = 1;
2516 out_stop:
2518 * If this was a simple ftruncate(), and the file will remain alive
2519 * then we need to clear up the orphan record which we created above.
2520 * However, if this was a real unlink then we were called by
2521 * ext4_delete_inode(), and we allow that function to clean up the
2522 * orphan info for us.
2524 if (inode->i_nlink)
2525 ext4_orphan_del(handle, inode);
2527 ext4_journal_stop(handle);
2530 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2531 unsigned long ino, struct ext4_iloc *iloc)
2533 unsigned long desc, group_desc;
2534 ext4_group_t block_group;
2535 unsigned long offset;
2536 ext4_fsblk_t block;
2537 struct buffer_head *bh;
2538 struct ext4_group_desc * gdp;
2540 if (!ext4_valid_inum(sb, ino)) {
2542 * This error is already checked for in namei.c unless we are
2543 * looking at an NFS filehandle, in which case no error
2544 * report is needed
2546 return 0;
2549 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2550 if (block_group >= EXT4_SB(sb)->s_groups_count) {
2551 ext4_error(sb,"ext4_get_inode_block","group >= groups count");
2552 return 0;
2554 smp_rmb();
2555 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2556 desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2557 bh = EXT4_SB(sb)->s_group_desc[group_desc];
2558 if (!bh) {
2559 ext4_error (sb, "ext4_get_inode_block",
2560 "Descriptor not loaded");
2561 return 0;
2564 gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
2565 desc * EXT4_DESC_SIZE(sb));
2567 * Figure out the offset within the block group inode table
2569 offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2570 EXT4_INODE_SIZE(sb);
2571 block = ext4_inode_table(sb, gdp) +
2572 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2574 iloc->block_group = block_group;
2575 iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2576 return block;
2580 * ext4_get_inode_loc returns with an extra refcount against the inode's
2581 * underlying buffer_head on success. If 'in_mem' is true, we have all
2582 * data in memory that is needed to recreate the on-disk version of this
2583 * inode.
2585 static int __ext4_get_inode_loc(struct inode *inode,
2586 struct ext4_iloc *iloc, int in_mem)
2588 ext4_fsblk_t block;
2589 struct buffer_head *bh;
2591 block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2592 if (!block)
2593 return -EIO;
2595 bh = sb_getblk(inode->i_sb, block);
2596 if (!bh) {
2597 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2598 "unable to read inode block - "
2599 "inode=%lu, block=%llu",
2600 inode->i_ino, block);
2601 return -EIO;
2603 if (!buffer_uptodate(bh)) {
2604 lock_buffer(bh);
2605 if (buffer_uptodate(bh)) {
2606 /* someone brought it uptodate while we waited */
2607 unlock_buffer(bh);
2608 goto has_buffer;
2612 * If we have all information of the inode in memory and this
2613 * is the only valid inode in the block, we need not read the
2614 * block.
2616 if (in_mem) {
2617 struct buffer_head *bitmap_bh;
2618 struct ext4_group_desc *desc;
2619 int inodes_per_buffer;
2620 int inode_offset, i;
2621 ext4_group_t block_group;
2622 int start;
2624 block_group = (inode->i_ino - 1) /
2625 EXT4_INODES_PER_GROUP(inode->i_sb);
2626 inodes_per_buffer = bh->b_size /
2627 EXT4_INODE_SIZE(inode->i_sb);
2628 inode_offset = ((inode->i_ino - 1) %
2629 EXT4_INODES_PER_GROUP(inode->i_sb));
2630 start = inode_offset & ~(inodes_per_buffer - 1);
2632 /* Is the inode bitmap in cache? */
2633 desc = ext4_get_group_desc(inode->i_sb,
2634 block_group, NULL);
2635 if (!desc)
2636 goto make_io;
2638 bitmap_bh = sb_getblk(inode->i_sb,
2639 ext4_inode_bitmap(inode->i_sb, desc));
2640 if (!bitmap_bh)
2641 goto make_io;
2644 * If the inode bitmap isn't in cache then the
2645 * optimisation may end up performing two reads instead
2646 * of one, so skip it.
2648 if (!buffer_uptodate(bitmap_bh)) {
2649 brelse(bitmap_bh);
2650 goto make_io;
2652 for (i = start; i < start + inodes_per_buffer; i++) {
2653 if (i == inode_offset)
2654 continue;
2655 if (ext4_test_bit(i, bitmap_bh->b_data))
2656 break;
2658 brelse(bitmap_bh);
2659 if (i == start + inodes_per_buffer) {
2660 /* all other inodes are free, so skip I/O */
2661 memset(bh->b_data, 0, bh->b_size);
2662 set_buffer_uptodate(bh);
2663 unlock_buffer(bh);
2664 goto has_buffer;
2668 make_io:
2670 * There are other valid inodes in the buffer, this inode
2671 * has in-inode xattrs, or we don't have this inode in memory.
2672 * Read the block from disk.
2674 get_bh(bh);
2675 bh->b_end_io = end_buffer_read_sync;
2676 submit_bh(READ_META, bh);
2677 wait_on_buffer(bh);
2678 if (!buffer_uptodate(bh)) {
2679 ext4_error(inode->i_sb, "ext4_get_inode_loc",
2680 "unable to read inode block - "
2681 "inode=%lu, block=%llu",
2682 inode->i_ino, block);
2683 brelse(bh);
2684 return -EIO;
2687 has_buffer:
2688 iloc->bh = bh;
2689 return 0;
2692 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2694 /* We have all inode data except xattrs in memory here. */
2695 return __ext4_get_inode_loc(inode, iloc,
2696 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2699 void ext4_set_inode_flags(struct inode *inode)
2701 unsigned int flags = EXT4_I(inode)->i_flags;
2703 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2704 if (flags & EXT4_SYNC_FL)
2705 inode->i_flags |= S_SYNC;
2706 if (flags & EXT4_APPEND_FL)
2707 inode->i_flags |= S_APPEND;
2708 if (flags & EXT4_IMMUTABLE_FL)
2709 inode->i_flags |= S_IMMUTABLE;
2710 if (flags & EXT4_NOATIME_FL)
2711 inode->i_flags |= S_NOATIME;
2712 if (flags & EXT4_DIRSYNC_FL)
2713 inode->i_flags |= S_DIRSYNC;
2716 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2717 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2719 unsigned int flags = ei->vfs_inode.i_flags;
2721 ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2722 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2723 if (flags & S_SYNC)
2724 ei->i_flags |= EXT4_SYNC_FL;
2725 if (flags & S_APPEND)
2726 ei->i_flags |= EXT4_APPEND_FL;
2727 if (flags & S_IMMUTABLE)
2728 ei->i_flags |= EXT4_IMMUTABLE_FL;
2729 if (flags & S_NOATIME)
2730 ei->i_flags |= EXT4_NOATIME_FL;
2731 if (flags & S_DIRSYNC)
2732 ei->i_flags |= EXT4_DIRSYNC_FL;
2734 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2735 struct ext4_inode_info *ei)
2737 blkcnt_t i_blocks ;
2738 struct inode *inode = &(ei->vfs_inode);
2739 struct super_block *sb = inode->i_sb;
2741 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2742 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2743 /* we are using combined 48 bit field */
2744 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
2745 le32_to_cpu(raw_inode->i_blocks_lo);
2746 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
2747 /* i_blocks represent file system block size */
2748 return i_blocks << (inode->i_blkbits - 9);
2749 } else {
2750 return i_blocks;
2752 } else {
2753 return le32_to_cpu(raw_inode->i_blocks_lo);
2757 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2759 struct ext4_iloc iloc;
2760 struct ext4_inode *raw_inode;
2761 struct ext4_inode_info *ei;
2762 struct buffer_head *bh;
2763 struct inode *inode;
2764 long ret;
2765 int block;
2767 inode = iget_locked(sb, ino);
2768 if (!inode)
2769 return ERR_PTR(-ENOMEM);
2770 if (!(inode->i_state & I_NEW))
2771 return inode;
2773 ei = EXT4_I(inode);
2774 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2775 ei->i_acl = EXT4_ACL_NOT_CACHED;
2776 ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2777 #endif
2778 ei->i_block_alloc_info = NULL;
2780 ret = __ext4_get_inode_loc(inode, &iloc, 0);
2781 if (ret < 0)
2782 goto bad_inode;
2783 bh = iloc.bh;
2784 raw_inode = ext4_raw_inode(&iloc);
2785 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2786 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2787 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2788 if(!(test_opt (inode->i_sb, NO_UID32))) {
2789 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2790 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2792 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2794 ei->i_state = 0;
2795 ei->i_dir_start_lookup = 0;
2796 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2797 /* We now have enough fields to check if the inode was active or not.
2798 * This is needed because nfsd might try to access dead inodes
2799 * the test is that same one that e2fsck uses
2800 * NeilBrown 1999oct15
2802 if (inode->i_nlink == 0) {
2803 if (inode->i_mode == 0 ||
2804 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2805 /* this inode is deleted */
2806 brelse (bh);
2807 ret = -ESTALE;
2808 goto bad_inode;
2810 /* The only unlinked inodes we let through here have
2811 * valid i_mode and are being read by the orphan
2812 * recovery code: that's fine, we're about to complete
2813 * the process of deleting those. */
2815 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2816 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
2817 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
2818 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2819 cpu_to_le32(EXT4_OS_HURD)) {
2820 ei->i_file_acl |=
2821 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2823 inode->i_size = ext4_isize(raw_inode);
2824 ei->i_disksize = inode->i_size;
2825 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2826 ei->i_block_group = iloc.block_group;
2828 * NOTE! The in-memory inode i_data array is in little-endian order
2829 * even on big-endian machines: we do NOT byteswap the block numbers!
2831 for (block = 0; block < EXT4_N_BLOCKS; block++)
2832 ei->i_data[block] = raw_inode->i_block[block];
2833 INIT_LIST_HEAD(&ei->i_orphan);
2835 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2836 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2837 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2838 EXT4_INODE_SIZE(inode->i_sb)) {
2839 brelse (bh);
2840 ret = -EIO;
2841 goto bad_inode;
2843 if (ei->i_extra_isize == 0) {
2844 /* The extra space is currently unused. Use it. */
2845 ei->i_extra_isize = sizeof(struct ext4_inode) -
2846 EXT4_GOOD_OLD_INODE_SIZE;
2847 } else {
2848 __le32 *magic = (void *)raw_inode +
2849 EXT4_GOOD_OLD_INODE_SIZE +
2850 ei->i_extra_isize;
2851 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2852 ei->i_state |= EXT4_STATE_XATTR;
2854 } else
2855 ei->i_extra_isize = 0;
2857 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2858 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2859 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2860 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2862 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
2863 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2864 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
2865 inode->i_version |=
2866 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
2869 if (S_ISREG(inode->i_mode)) {
2870 inode->i_op = &ext4_file_inode_operations;
2871 inode->i_fop = &ext4_file_operations;
2872 ext4_set_aops(inode);
2873 } else if (S_ISDIR(inode->i_mode)) {
2874 inode->i_op = &ext4_dir_inode_operations;
2875 inode->i_fop = &ext4_dir_operations;
2876 } else if (S_ISLNK(inode->i_mode)) {
2877 if (ext4_inode_is_fast_symlink(inode))
2878 inode->i_op = &ext4_fast_symlink_inode_operations;
2879 else {
2880 inode->i_op = &ext4_symlink_inode_operations;
2881 ext4_set_aops(inode);
2883 } else {
2884 inode->i_op = &ext4_special_inode_operations;
2885 if (raw_inode->i_block[0])
2886 init_special_inode(inode, inode->i_mode,
2887 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2888 else
2889 init_special_inode(inode, inode->i_mode,
2890 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2892 brelse (iloc.bh);
2893 ext4_set_inode_flags(inode);
2894 unlock_new_inode(inode);
2895 return inode;
2897 bad_inode:
2898 iget_failed(inode);
2899 return ERR_PTR(ret);
2902 static int ext4_inode_blocks_set(handle_t *handle,
2903 struct ext4_inode *raw_inode,
2904 struct ext4_inode_info *ei)
2906 struct inode *inode = &(ei->vfs_inode);
2907 u64 i_blocks = inode->i_blocks;
2908 struct super_block *sb = inode->i_sb;
2909 int err = 0;
2911 if (i_blocks <= ~0U) {
2913 * i_blocks can be represnted in a 32 bit variable
2914 * as multiple of 512 bytes
2916 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
2917 raw_inode->i_blocks_high = 0;
2918 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2919 } else if (i_blocks <= 0xffffffffffffULL) {
2921 * i_blocks can be represented in a 48 bit variable
2922 * as multiple of 512 bytes
2924 err = ext4_update_rocompat_feature(handle, sb,
2925 EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2926 if (err)
2927 goto err_out;
2928 /* i_block is stored in the split 48 bit fields */
2929 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
2930 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2931 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2932 } else {
2934 * i_blocks should be represented in a 48 bit variable
2935 * as multiple of file system block size
2937 err = ext4_update_rocompat_feature(handle, sb,
2938 EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2939 if (err)
2940 goto err_out;
2941 ei->i_flags |= EXT4_HUGE_FILE_FL;
2942 /* i_block is stored in file system block size */
2943 i_blocks = i_blocks >> (inode->i_blkbits - 9);
2944 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
2945 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2947 err_out:
2948 return err;
2952 * Post the struct inode info into an on-disk inode location in the
2953 * buffer-cache. This gobbles the caller's reference to the
2954 * buffer_head in the inode location struct.
2956 * The caller must have write access to iloc->bh.
2958 static int ext4_do_update_inode(handle_t *handle,
2959 struct inode *inode,
2960 struct ext4_iloc *iloc)
2962 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2963 struct ext4_inode_info *ei = EXT4_I(inode);
2964 struct buffer_head *bh = iloc->bh;
2965 int err = 0, rc, block;
2967 /* For fields not not tracking in the in-memory inode,
2968 * initialise them to zero for new inodes. */
2969 if (ei->i_state & EXT4_STATE_NEW)
2970 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2972 ext4_get_inode_flags(ei);
2973 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2974 if(!(test_opt(inode->i_sb, NO_UID32))) {
2975 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2976 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2978 * Fix up interoperability with old kernels. Otherwise, old inodes get
2979 * re-used with the upper 16 bits of the uid/gid intact
2981 if(!ei->i_dtime) {
2982 raw_inode->i_uid_high =
2983 cpu_to_le16(high_16_bits(inode->i_uid));
2984 raw_inode->i_gid_high =
2985 cpu_to_le16(high_16_bits(inode->i_gid));
2986 } else {
2987 raw_inode->i_uid_high = 0;
2988 raw_inode->i_gid_high = 0;
2990 } else {
2991 raw_inode->i_uid_low =
2992 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2993 raw_inode->i_gid_low =
2994 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2995 raw_inode->i_uid_high = 0;
2996 raw_inode->i_gid_high = 0;
2998 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
3000 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
3001 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
3002 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
3003 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
3005 if (ext4_inode_blocks_set(handle, raw_inode, ei))
3006 goto out_brelse;
3007 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
3008 raw_inode->i_flags = cpu_to_le32(ei->i_flags);
3009 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
3010 cpu_to_le32(EXT4_OS_HURD))
3011 raw_inode->i_file_acl_high =
3012 cpu_to_le16(ei->i_file_acl >> 32);
3013 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
3014 ext4_isize_set(raw_inode, ei->i_disksize);
3015 if (ei->i_disksize > 0x7fffffffULL) {
3016 struct super_block *sb = inode->i_sb;
3017 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
3018 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
3019 EXT4_SB(sb)->s_es->s_rev_level ==
3020 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
3021 /* If this is the first large file
3022 * created, add a flag to the superblock.
3024 err = ext4_journal_get_write_access(handle,
3025 EXT4_SB(sb)->s_sbh);
3026 if (err)
3027 goto out_brelse;
3028 ext4_update_dynamic_rev(sb);
3029 EXT4_SET_RO_COMPAT_FEATURE(sb,
3030 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
3031 sb->s_dirt = 1;
3032 handle->h_sync = 1;
3033 err = ext4_journal_dirty_metadata(handle,
3034 EXT4_SB(sb)->s_sbh);
3037 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
3038 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
3039 if (old_valid_dev(inode->i_rdev)) {
3040 raw_inode->i_block[0] =
3041 cpu_to_le32(old_encode_dev(inode->i_rdev));
3042 raw_inode->i_block[1] = 0;
3043 } else {
3044 raw_inode->i_block[0] = 0;
3045 raw_inode->i_block[1] =
3046 cpu_to_le32(new_encode_dev(inode->i_rdev));
3047 raw_inode->i_block[2] = 0;
3049 } else for (block = 0; block < EXT4_N_BLOCKS; block++)
3050 raw_inode->i_block[block] = ei->i_data[block];
3052 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
3053 if (ei->i_extra_isize) {
3054 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3055 raw_inode->i_version_hi =
3056 cpu_to_le32(inode->i_version >> 32);
3057 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3061 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
3062 rc = ext4_journal_dirty_metadata(handle, bh);
3063 if (!err)
3064 err = rc;
3065 ei->i_state &= ~EXT4_STATE_NEW;
3067 out_brelse:
3068 brelse (bh);
3069 ext4_std_error(inode->i_sb, err);
3070 return err;
3074 * ext4_write_inode()
3076 * We are called from a few places:
3078 * - Within generic_file_write() for O_SYNC files.
3079 * Here, there will be no transaction running. We wait for any running
3080 * trasnaction to commit.
3082 * - Within sys_sync(), kupdate and such.
3083 * We wait on commit, if tol to.
3085 * - Within prune_icache() (PF_MEMALLOC == true)
3086 * Here we simply return. We can't afford to block kswapd on the
3087 * journal commit.
3089 * In all cases it is actually safe for us to return without doing anything,
3090 * because the inode has been copied into a raw inode buffer in
3091 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
3092 * knfsd.
3094 * Note that we are absolutely dependent upon all inode dirtiers doing the
3095 * right thing: they *must* call mark_inode_dirty() after dirtying info in
3096 * which we are interested.
3098 * It would be a bug for them to not do this. The code:
3100 * mark_inode_dirty(inode)
3101 * stuff();
3102 * inode->i_size = expr;
3104 * is in error because a kswapd-driven write_inode() could occur while
3105 * `stuff()' is running, and the new i_size will be lost. Plus the inode
3106 * will no longer be on the superblock's dirty inode list.
3108 int ext4_write_inode(struct inode *inode, int wait)
3110 if (current->flags & PF_MEMALLOC)
3111 return 0;
3113 if (ext4_journal_current_handle()) {
3114 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3115 dump_stack();
3116 return -EIO;
3119 if (!wait)
3120 return 0;
3122 return ext4_force_commit(inode->i_sb);
3126 * ext4_setattr()
3128 * Called from notify_change.
3130 * We want to trap VFS attempts to truncate the file as soon as
3131 * possible. In particular, we want to make sure that when the VFS
3132 * shrinks i_size, we put the inode on the orphan list and modify
3133 * i_disksize immediately, so that during the subsequent flushing of
3134 * dirty pages and freeing of disk blocks, we can guarantee that any
3135 * commit will leave the blocks being flushed in an unused state on
3136 * disk. (On recovery, the inode will get truncated and the blocks will
3137 * be freed, so we have a strong guarantee that no future commit will
3138 * leave these blocks visible to the user.)
3140 * Called with inode->sem down.
3142 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3144 struct inode *inode = dentry->d_inode;
3145 int error, rc = 0;
3146 const unsigned int ia_valid = attr->ia_valid;
3148 error = inode_change_ok(inode, attr);
3149 if (error)
3150 return error;
3152 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3153 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3154 handle_t *handle;
3156 /* (user+group)*(old+new) structure, inode write (sb,
3157 * inode block, ? - but truncate inode update has it) */
3158 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3159 EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3160 if (IS_ERR(handle)) {
3161 error = PTR_ERR(handle);
3162 goto err_out;
3164 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3165 if (error) {
3166 ext4_journal_stop(handle);
3167 return error;
3169 /* Update corresponding info in inode so that everything is in
3170 * one transaction */
3171 if (attr->ia_valid & ATTR_UID)
3172 inode->i_uid = attr->ia_uid;
3173 if (attr->ia_valid & ATTR_GID)
3174 inode->i_gid = attr->ia_gid;
3175 error = ext4_mark_inode_dirty(handle, inode);
3176 ext4_journal_stop(handle);
3179 if (attr->ia_valid & ATTR_SIZE) {
3180 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
3181 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3183 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
3184 error = -EFBIG;
3185 goto err_out;
3190 if (S_ISREG(inode->i_mode) &&
3191 attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3192 handle_t *handle;
3194 handle = ext4_journal_start(inode, 3);
3195 if (IS_ERR(handle)) {
3196 error = PTR_ERR(handle);
3197 goto err_out;
3200 error = ext4_orphan_add(handle, inode);
3201 EXT4_I(inode)->i_disksize = attr->ia_size;
3202 rc = ext4_mark_inode_dirty(handle, inode);
3203 if (!error)
3204 error = rc;
3205 ext4_journal_stop(handle);
3208 rc = inode_setattr(inode, attr);
3210 /* If inode_setattr's call to ext4_truncate failed to get a
3211 * transaction handle at all, we need to clean up the in-core
3212 * orphan list manually. */
3213 if (inode->i_nlink)
3214 ext4_orphan_del(NULL, inode);
3216 if (!rc && (ia_valid & ATTR_MODE))
3217 rc = ext4_acl_chmod(inode);
3219 err_out:
3220 ext4_std_error(inode->i_sb, error);
3221 if (!error)
3222 error = rc;
3223 return error;
3228 * How many blocks doth make a writepage()?
3230 * With N blocks per page, it may be:
3231 * N data blocks
3232 * 2 indirect block
3233 * 2 dindirect
3234 * 1 tindirect
3235 * N+5 bitmap blocks (from the above)
3236 * N+5 group descriptor summary blocks
3237 * 1 inode block
3238 * 1 superblock.
3239 * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3241 * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3243 * With ordered or writeback data it's the same, less the N data blocks.
3245 * If the inode's direct blocks can hold an integral number of pages then a
3246 * page cannot straddle two indirect blocks, and we can only touch one indirect
3247 * and dindirect block, and the "5" above becomes "3".
3249 * This still overestimates under most circumstances. If we were to pass the
3250 * start and end offsets in here as well we could do block_to_path() on each
3251 * block and work out the exact number of indirects which are touched. Pah.
3254 int ext4_writepage_trans_blocks(struct inode *inode)
3256 int bpp = ext4_journal_blocks_per_page(inode);
3257 int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3258 int ret;
3260 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3261 return ext4_ext_writepage_trans_blocks(inode, bpp);
3263 if (ext4_should_journal_data(inode))
3264 ret = 3 * (bpp + indirects) + 2;
3265 else
3266 ret = 2 * (bpp + indirects) + 2;
3268 #ifdef CONFIG_QUOTA
3269 /* We know that structure was already allocated during DQUOT_INIT so
3270 * we will be updating only the data blocks + inodes */
3271 ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3272 #endif
3274 return ret;
3278 * The caller must have previously called ext4_reserve_inode_write().
3279 * Give this, we know that the caller already has write access to iloc->bh.
3281 int ext4_mark_iloc_dirty(handle_t *handle,
3282 struct inode *inode, struct ext4_iloc *iloc)
3284 int err = 0;
3286 if (test_opt(inode->i_sb, I_VERSION))
3287 inode_inc_iversion(inode);
3289 /* the do_update_inode consumes one bh->b_count */
3290 get_bh(iloc->bh);
3292 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3293 err = ext4_do_update_inode(handle, inode, iloc);
3294 put_bh(iloc->bh);
3295 return err;
3299 * On success, We end up with an outstanding reference count against
3300 * iloc->bh. This _must_ be cleaned up later.
3304 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3305 struct ext4_iloc *iloc)
3307 int err = 0;
3308 if (handle) {
3309 err = ext4_get_inode_loc(inode, iloc);
3310 if (!err) {
3311 BUFFER_TRACE(iloc->bh, "get_write_access");
3312 err = ext4_journal_get_write_access(handle, iloc->bh);
3313 if (err) {
3314 brelse(iloc->bh);
3315 iloc->bh = NULL;
3319 ext4_std_error(inode->i_sb, err);
3320 return err;
3324 * Expand an inode by new_extra_isize bytes.
3325 * Returns 0 on success or negative error number on failure.
3327 static int ext4_expand_extra_isize(struct inode *inode,
3328 unsigned int new_extra_isize,
3329 struct ext4_iloc iloc,
3330 handle_t *handle)
3332 struct ext4_inode *raw_inode;
3333 struct ext4_xattr_ibody_header *header;
3334 struct ext4_xattr_entry *entry;
3336 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3337 return 0;
3339 raw_inode = ext4_raw_inode(&iloc);
3341 header = IHDR(inode, raw_inode);
3342 entry = IFIRST(header);
3344 /* No extended attributes present */
3345 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3346 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3347 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3348 new_extra_isize);
3349 EXT4_I(inode)->i_extra_isize = new_extra_isize;
3350 return 0;
3353 /* try to expand with EAs present */
3354 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3355 raw_inode, handle);
3359 * What we do here is to mark the in-core inode as clean with respect to inode
3360 * dirtiness (it may still be data-dirty).
3361 * This means that the in-core inode may be reaped by prune_icache
3362 * without having to perform any I/O. This is a very good thing,
3363 * because *any* task may call prune_icache - even ones which
3364 * have a transaction open against a different journal.
3366 * Is this cheating? Not really. Sure, we haven't written the
3367 * inode out, but prune_icache isn't a user-visible syncing function.
3368 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3369 * we start and wait on commits.
3371 * Is this efficient/effective? Well, we're being nice to the system
3372 * by cleaning up our inodes proactively so they can be reaped
3373 * without I/O. But we are potentially leaving up to five seconds'
3374 * worth of inodes floating about which prune_icache wants us to
3375 * write out. One way to fix that would be to get prune_icache()
3376 * to do a write_super() to free up some memory. It has the desired
3377 * effect.
3379 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3381 struct ext4_iloc iloc;
3382 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3383 static unsigned int mnt_count;
3384 int err, ret;
3386 might_sleep();
3387 err = ext4_reserve_inode_write(handle, inode, &iloc);
3388 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3389 !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3391 * We need extra buffer credits since we may write into EA block
3392 * with this same handle. If journal_extend fails, then it will
3393 * only result in a minor loss of functionality for that inode.
3394 * If this is felt to be critical, then e2fsck should be run to
3395 * force a large enough s_min_extra_isize.
3397 if ((jbd2_journal_extend(handle,
3398 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3399 ret = ext4_expand_extra_isize(inode,
3400 sbi->s_want_extra_isize,
3401 iloc, handle);
3402 if (ret) {
3403 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3404 if (mnt_count !=
3405 le16_to_cpu(sbi->s_es->s_mnt_count)) {
3406 ext4_warning(inode->i_sb, __FUNCTION__,
3407 "Unable to expand inode %lu. Delete"
3408 " some EAs or run e2fsck.",
3409 inode->i_ino);
3410 mnt_count =
3411 le16_to_cpu(sbi->s_es->s_mnt_count);
3416 if (!err)
3417 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3418 return err;
3422 * ext4_dirty_inode() is called from __mark_inode_dirty()
3424 * We're really interested in the case where a file is being extended.
3425 * i_size has been changed by generic_commit_write() and we thus need
3426 * to include the updated inode in the current transaction.
3428 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3429 * are allocated to the file.
3431 * If the inode is marked synchronous, we don't honour that here - doing
3432 * so would cause a commit on atime updates, which we don't bother doing.
3433 * We handle synchronous inodes at the highest possible level.
3435 void ext4_dirty_inode(struct inode *inode)
3437 handle_t *current_handle = ext4_journal_current_handle();
3438 handle_t *handle;
3440 handle = ext4_journal_start(inode, 2);
3441 if (IS_ERR(handle))
3442 goto out;
3443 if (current_handle &&
3444 current_handle->h_transaction != handle->h_transaction) {
3445 /* This task has a transaction open against a different fs */
3446 printk(KERN_EMERG "%s: transactions do not match!\n",
3447 __FUNCTION__);
3448 } else {
3449 jbd_debug(5, "marking dirty. outer handle=%p\n",
3450 current_handle);
3451 ext4_mark_inode_dirty(handle, inode);
3453 ext4_journal_stop(handle);
3454 out:
3455 return;
3458 #if 0
3460 * Bind an inode's backing buffer_head into this transaction, to prevent
3461 * it from being flushed to disk early. Unlike
3462 * ext4_reserve_inode_write, this leaves behind no bh reference and
3463 * returns no iloc structure, so the caller needs to repeat the iloc
3464 * lookup to mark the inode dirty later.
3466 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3468 struct ext4_iloc iloc;
3470 int err = 0;
3471 if (handle) {
3472 err = ext4_get_inode_loc(inode, &iloc);
3473 if (!err) {
3474 BUFFER_TRACE(iloc.bh, "get_write_access");
3475 err = jbd2_journal_get_write_access(handle, iloc.bh);
3476 if (!err)
3477 err = ext4_journal_dirty_metadata(handle,
3478 iloc.bh);
3479 brelse(iloc.bh);
3482 ext4_std_error(inode->i_sb, err);
3483 return err;
3485 #endif
3487 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3489 journal_t *journal;
3490 handle_t *handle;
3491 int err;
3494 * We have to be very careful here: changing a data block's
3495 * journaling status dynamically is dangerous. If we write a
3496 * data block to the journal, change the status and then delete
3497 * that block, we risk forgetting to revoke the old log record
3498 * from the journal and so a subsequent replay can corrupt data.
3499 * So, first we make sure that the journal is empty and that
3500 * nobody is changing anything.
3503 journal = EXT4_JOURNAL(inode);
3504 if (is_journal_aborted(journal))
3505 return -EROFS;
3507 jbd2_journal_lock_updates(journal);
3508 jbd2_journal_flush(journal);
3511 * OK, there are no updates running now, and all cached data is
3512 * synced to disk. We are now in a completely consistent state
3513 * which doesn't have anything in the journal, and we know that
3514 * no filesystem updates are running, so it is safe to modify
3515 * the inode's in-core data-journaling state flag now.
3518 if (val)
3519 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3520 else
3521 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3522 ext4_set_aops(inode);
3524 jbd2_journal_unlock_updates(journal);
3526 /* Finally we can mark the inode as dirty. */
3528 handle = ext4_journal_start(inode, 1);
3529 if (IS_ERR(handle))
3530 return PTR_ERR(handle);
3532 err = ext4_mark_inode_dirty(handle, inode);
3533 handle->h_sync = 1;
3534 ext4_journal_stop(handle);
3535 ext4_std_error(inode->i_sb, err);
3537 return err;