2 * linux/fs/ext2/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@dcs.ed.ac.uk), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000
25 #include <linux/time.h>
26 #include <linux/highuid.h>
27 #include <linux/pagemap.h>
28 #include <linux/dax.h>
29 #include <linux/quotaops.h>
30 #include <linux/writeback.h>
31 #include <linux/buffer_head.h>
32 #include <linux/mpage.h>
33 #include <linux/fiemap.h>
34 #include <linux/namei.h>
35 #include <linux/uio.h>
40 static int __ext2_write_inode(struct inode
*inode
, int do_sync
);
43 * Test whether an inode is a fast symlink.
45 static inline int ext2_inode_is_fast_symlink(struct inode
*inode
)
47 int ea_blocks
= EXT2_I(inode
)->i_file_acl
?
48 (inode
->i_sb
->s_blocksize
>> 9) : 0;
50 return (S_ISLNK(inode
->i_mode
) &&
51 inode
->i_blocks
- ea_blocks
== 0);
54 static void ext2_truncate_blocks(struct inode
*inode
, loff_t offset
);
56 static void ext2_write_failed(struct address_space
*mapping
, loff_t to
)
58 struct inode
*inode
= mapping
->host
;
60 if (to
> inode
->i_size
) {
61 truncate_pagecache(inode
, inode
->i_size
);
62 ext2_truncate_blocks(inode
, inode
->i_size
);
67 * Called at the last iput() if i_nlink is zero.
69 void ext2_evict_inode(struct inode
* inode
)
71 struct ext2_block_alloc_info
*rsv
;
74 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
76 dquot_initialize(inode
);
81 truncate_inode_pages_final(&inode
->i_data
);
84 sb_start_intwrite(inode
->i_sb
);
86 EXT2_I(inode
)->i_dtime
= get_seconds();
87 mark_inode_dirty(inode
);
88 __ext2_write_inode(inode
, inode_needs_sync(inode
));
92 ext2_truncate_blocks(inode
, 0);
93 ext2_xattr_delete_inode(inode
);
96 invalidate_inode_buffers(inode
);
99 ext2_discard_reservation(inode
);
100 rsv
= EXT2_I(inode
)->i_block_alloc_info
;
101 EXT2_I(inode
)->i_block_alloc_info
= NULL
;
106 ext2_free_inode(inode
);
107 sb_end_intwrite(inode
->i_sb
);
114 struct buffer_head
*bh
;
117 static inline void add_chain(Indirect
*p
, struct buffer_head
*bh
, __le32
*v
)
119 p
->key
= *(p
->p
= v
);
123 static inline int verify_chain(Indirect
*from
, Indirect
*to
)
125 while (from
<= to
&& from
->key
== *from
->p
)
131 * ext2_block_to_path - parse the block number into array of offsets
132 * @inode: inode in question (we are only interested in its superblock)
133 * @i_block: block number to be parsed
134 * @offsets: array to store the offsets in
135 * @boundary: set this non-zero if the referred-to block is likely to be
136 * followed (on disk) by an indirect block.
137 * To store the locations of file's data ext2 uses a data structure common
138 * for UNIX filesystems - tree of pointers anchored in the inode, with
139 * data blocks at leaves and indirect blocks in intermediate nodes.
140 * This function translates the block number into path in that tree -
141 * return value is the path length and @offsets[n] is the offset of
142 * pointer to (n+1)th node in the nth one. If @block is out of range
143 * (negative or too large) warning is printed and zero returned.
145 * Note: function doesn't find node addresses, so no IO is needed. All
146 * we need to know is the capacity of indirect blocks (taken from the
151 * Portability note: the last comparison (check that we fit into triple
152 * indirect block) is spelled differently, because otherwise on an
153 * architecture with 32-bit longs and 8Kb pages we might get into trouble
154 * if our filesystem had 8Kb blocks. We might use long long, but that would
155 * kill us on x86. Oh, well, at least the sign propagation does not matter -
156 * i_block would have to be negative in the very beginning, so we would not
160 static int ext2_block_to_path(struct inode
*inode
,
161 long i_block
, int offsets
[4], int *boundary
)
163 int ptrs
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
164 int ptrs_bits
= EXT2_ADDR_PER_BLOCK_BITS(inode
->i_sb
);
165 const long direct_blocks
= EXT2_NDIR_BLOCKS
,
166 indirect_blocks
= ptrs
,
167 double_blocks
= (1 << (ptrs_bits
* 2));
172 ext2_msg(inode
->i_sb
, KERN_WARNING
,
173 "warning: %s: block < 0", __func__
);
174 } else if (i_block
< direct_blocks
) {
175 offsets
[n
++] = i_block
;
176 final
= direct_blocks
;
177 } else if ( (i_block
-= direct_blocks
) < indirect_blocks
) {
178 offsets
[n
++] = EXT2_IND_BLOCK
;
179 offsets
[n
++] = i_block
;
181 } else if ((i_block
-= indirect_blocks
) < double_blocks
) {
182 offsets
[n
++] = EXT2_DIND_BLOCK
;
183 offsets
[n
++] = i_block
>> ptrs_bits
;
184 offsets
[n
++] = i_block
& (ptrs
- 1);
186 } else if (((i_block
-= double_blocks
) >> (ptrs_bits
* 2)) < ptrs
) {
187 offsets
[n
++] = EXT2_TIND_BLOCK
;
188 offsets
[n
++] = i_block
>> (ptrs_bits
* 2);
189 offsets
[n
++] = (i_block
>> ptrs_bits
) & (ptrs
- 1);
190 offsets
[n
++] = i_block
& (ptrs
- 1);
193 ext2_msg(inode
->i_sb
, KERN_WARNING
,
194 "warning: %s: block is too big", __func__
);
197 *boundary
= final
- 1 - (i_block
& (ptrs
- 1));
203 * ext2_get_branch - read the chain of indirect blocks leading to data
204 * @inode: inode in question
205 * @depth: depth of the chain (1 - direct pointer, etc.)
206 * @offsets: offsets of pointers in inode/indirect blocks
207 * @chain: place to store the result
208 * @err: here we store the error value
210 * Function fills the array of triples <key, p, bh> and returns %NULL
211 * if everything went OK or the pointer to the last filled triple
212 * (incomplete one) otherwise. Upon the return chain[i].key contains
213 * the number of (i+1)-th block in the chain (as it is stored in memory,
214 * i.e. little-endian 32-bit), chain[i].p contains the address of that
215 * number (it points into struct inode for i==0 and into the bh->b_data
216 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
217 * block for i>0 and NULL for i==0. In other words, it holds the block
218 * numbers of the chain, addresses they were taken from (and where we can
219 * verify that chain did not change) and buffer_heads hosting these
222 * Function stops when it stumbles upon zero pointer (absent block)
223 * (pointer to last triple returned, *@err == 0)
224 * or when it gets an IO error reading an indirect block
225 * (ditto, *@err == -EIO)
226 * or when it notices that chain had been changed while it was reading
227 * (ditto, *@err == -EAGAIN)
228 * or when it reads all @depth-1 indirect blocks successfully and finds
229 * the whole chain, all way to the data (returns %NULL, *err == 0).
231 static Indirect
*ext2_get_branch(struct inode
*inode
,
237 struct super_block
*sb
= inode
->i_sb
;
239 struct buffer_head
*bh
;
242 /* i_data is not going away, no lock needed */
243 add_chain (chain
, NULL
, EXT2_I(inode
)->i_data
+ *offsets
);
247 bh
= sb_bread(sb
, le32_to_cpu(p
->key
));
250 read_lock(&EXT2_I(inode
)->i_meta_lock
);
251 if (!verify_chain(chain
, p
))
253 add_chain(++p
, bh
, (__le32
*)bh
->b_data
+ *++offsets
);
254 read_unlock(&EXT2_I(inode
)->i_meta_lock
);
261 read_unlock(&EXT2_I(inode
)->i_meta_lock
);
272 * ext2_find_near - find a place for allocation with sufficient locality
274 * @ind: descriptor of indirect block.
276 * This function returns the preferred place for block allocation.
277 * It is used when heuristic for sequential allocation fails.
279 * + if there is a block to the left of our position - allocate near it.
280 * + if pointer will live in indirect block - allocate near that block.
281 * + if pointer will live in inode - allocate in the same cylinder group.
283 * In the latter case we colour the starting block by the callers PID to
284 * prevent it from clashing with concurrent allocations for a different inode
285 * in the same block group. The PID is used here so that functionally related
286 * files will be close-by on-disk.
288 * Caller must make sure that @ind is valid and will stay that way.
291 static ext2_fsblk_t
ext2_find_near(struct inode
*inode
, Indirect
*ind
)
293 struct ext2_inode_info
*ei
= EXT2_I(inode
);
294 __le32
*start
= ind
->bh
? (__le32
*) ind
->bh
->b_data
: ei
->i_data
;
296 ext2_fsblk_t bg_start
;
299 /* Try to find previous block */
300 for (p
= ind
->p
- 1; p
>= start
; p
--)
302 return le32_to_cpu(*p
);
304 /* No such thing, so let's try location of indirect block */
306 return ind
->bh
->b_blocknr
;
309 * It is going to be referred from inode itself? OK, just put it into
310 * the same cylinder group then.
312 bg_start
= ext2_group_first_block_no(inode
->i_sb
, ei
->i_block_group
);
313 colour
= (current
->pid
% 16) *
314 (EXT2_BLOCKS_PER_GROUP(inode
->i_sb
) / 16);
315 return bg_start
+ colour
;
319 * ext2_find_goal - find a preferred place for allocation.
321 * @block: block we want
322 * @partial: pointer to the last triple within a chain
324 * Returns preferred place for a block (the goal).
327 static inline ext2_fsblk_t
ext2_find_goal(struct inode
*inode
, long block
,
330 struct ext2_block_alloc_info
*block_i
;
332 block_i
= EXT2_I(inode
)->i_block_alloc_info
;
335 * try the heuristic for sequential allocation,
336 * failing that at least try to get decent locality.
338 if (block_i
&& (block
== block_i
->last_alloc_logical_block
+ 1)
339 && (block_i
->last_alloc_physical_block
!= 0)) {
340 return block_i
->last_alloc_physical_block
+ 1;
343 return ext2_find_near(inode
, partial
);
347 * ext2_blks_to_allocate: Look up the block map and count the number
348 * of direct blocks need to be allocated for the given branch.
350 * @branch: chain of indirect blocks
351 * @k: number of blocks need for indirect blocks
352 * @blks: number of data blocks to be mapped.
353 * @blocks_to_boundary: the offset in the indirect block
355 * return the total number of blocks to be allocate, including the
356 * direct and indirect blocks.
359 ext2_blks_to_allocate(Indirect
* branch
, int k
, unsigned long blks
,
360 int blocks_to_boundary
)
362 unsigned long count
= 0;
365 * Simple case, [t,d]Indirect block(s) has not allocated yet
366 * then it's clear blocks on that path have not allocated
369 /* right now don't hanel cross boundary allocation */
370 if (blks
< blocks_to_boundary
+ 1)
373 count
+= blocks_to_boundary
+ 1;
378 while (count
< blks
&& count
<= blocks_to_boundary
379 && le32_to_cpu(*(branch
[0].p
+ count
)) == 0) {
386 * ext2_alloc_blocks: multiple allocate blocks needed for a branch
387 * @indirect_blks: the number of blocks need to allocate for indirect
390 * @new_blocks: on return it will store the new block numbers for
391 * the indirect blocks(if needed) and the first direct block,
392 * @blks: on return it will store the total number of allocated
395 static int ext2_alloc_blocks(struct inode
*inode
,
396 ext2_fsblk_t goal
, int indirect_blks
, int blks
,
397 ext2_fsblk_t new_blocks
[4], int *err
)
400 unsigned long count
= 0;
402 ext2_fsblk_t current_block
= 0;
406 * Here we try to allocate the requested multiple blocks at once,
407 * on a best-effort basis.
408 * To build a branch, we should allocate blocks for
409 * the indirect blocks(if not allocated yet), and at least
410 * the first direct block of this branch. That's the
411 * minimum number of blocks need to allocate(required)
413 target
= blks
+ indirect_blks
;
417 /* allocating blocks for indirect blocks and direct blocks */
418 current_block
= ext2_new_blocks(inode
,goal
,&count
,err
);
423 /* allocate blocks for indirect blocks */
424 while (index
< indirect_blks
&& count
) {
425 new_blocks
[index
++] = current_block
++;
433 /* save the new block number for the first direct block */
434 new_blocks
[index
] = current_block
;
436 /* total number of blocks allocated for direct blocks */
441 for (i
= 0; i
<index
; i
++)
442 ext2_free_blocks(inode
, new_blocks
[i
], 1);
444 mark_inode_dirty(inode
);
449 * ext2_alloc_branch - allocate and set up a chain of blocks.
451 * @num: depth of the chain (number of blocks to allocate)
452 * @offsets: offsets (in the blocks) to store the pointers to next.
453 * @branch: place to store the chain in.
455 * This function allocates @num blocks, zeroes out all but the last one,
456 * links them into chain and (if we are synchronous) writes them to disk.
457 * In other words, it prepares a branch that can be spliced onto the
458 * inode. It stores the information about that chain in the branch[], in
459 * the same format as ext2_get_branch() would do. We are calling it after
460 * we had read the existing part of chain and partial points to the last
461 * triple of that (one with zero ->key). Upon the exit we have the same
462 * picture as after the successful ext2_get_block(), except that in one
463 * place chain is disconnected - *branch->p is still zero (we did not
464 * set the last link), but branch->key contains the number that should
465 * be placed into *branch->p to fill that gap.
467 * If allocation fails we free all blocks we've allocated (and forget
468 * their buffer_heads) and return the error value the from failed
469 * ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain
470 * as described above and return 0.
473 static int ext2_alloc_branch(struct inode
*inode
,
474 int indirect_blks
, int *blks
, ext2_fsblk_t goal
,
475 int *offsets
, Indirect
*branch
)
477 int blocksize
= inode
->i_sb
->s_blocksize
;
480 struct buffer_head
*bh
;
482 ext2_fsblk_t new_blocks
[4];
483 ext2_fsblk_t current_block
;
485 num
= ext2_alloc_blocks(inode
, goal
, indirect_blks
,
486 *blks
, new_blocks
, &err
);
490 branch
[0].key
= cpu_to_le32(new_blocks
[0]);
492 * metadata blocks and data blocks are allocated.
494 for (n
= 1; n
<= indirect_blks
; n
++) {
496 * Get buffer_head for parent block, zero it out
497 * and set the pointer to new one, then send
500 bh
= sb_getblk(inode
->i_sb
, new_blocks
[n
-1]);
507 memset(bh
->b_data
, 0, blocksize
);
508 branch
[n
].p
= (__le32
*) bh
->b_data
+ offsets
[n
];
509 branch
[n
].key
= cpu_to_le32(new_blocks
[n
]);
510 *branch
[n
].p
= branch
[n
].key
;
511 if ( n
== indirect_blks
) {
512 current_block
= new_blocks
[n
];
514 * End of chain, update the last new metablock of
515 * the chain to point to the new allocated
516 * data blocks numbers
518 for (i
=1; i
< num
; i
++)
519 *(branch
[n
].p
+ i
) = cpu_to_le32(++current_block
);
521 set_buffer_uptodate(bh
);
523 mark_buffer_dirty_inode(bh
, inode
);
524 /* We used to sync bh here if IS_SYNC(inode).
525 * But we now rely upon generic_write_sync()
526 * and b_inode_buffers. But not for directories.
528 if (S_ISDIR(inode
->i_mode
) && IS_DIRSYNC(inode
))
529 sync_dirty_buffer(bh
);
535 for (i
= 1; i
< n
; i
++)
536 bforget(branch
[i
].bh
);
537 for (i
= 0; i
< indirect_blks
; i
++)
538 ext2_free_blocks(inode
, new_blocks
[i
], 1);
539 ext2_free_blocks(inode
, new_blocks
[i
], num
);
544 * ext2_splice_branch - splice the allocated branch onto inode.
546 * @block: (logical) number of block we are adding
547 * @where: location of missing link
548 * @num: number of indirect blocks we are adding
549 * @blks: number of direct blocks we are adding
551 * This function fills the missing link and does all housekeeping needed in
552 * inode (->i_blocks, etc.). In case of success we end up with the full
553 * chain to new block and return 0.
555 static void ext2_splice_branch(struct inode
*inode
,
556 long block
, Indirect
*where
, int num
, int blks
)
559 struct ext2_block_alloc_info
*block_i
;
560 ext2_fsblk_t current_block
;
562 block_i
= EXT2_I(inode
)->i_block_alloc_info
;
564 /* XXX LOCKING probably should have i_meta_lock ?*/
567 *where
->p
= where
->key
;
570 * Update the host buffer_head or inode to point to more just allocated
571 * direct blocks blocks
573 if (num
== 0 && blks
> 1) {
574 current_block
= le32_to_cpu(where
->key
) + 1;
575 for (i
= 1; i
< blks
; i
++)
576 *(where
->p
+ i
) = cpu_to_le32(current_block
++);
580 * update the most recently allocated logical & physical block
581 * in i_block_alloc_info, to assist find the proper goal block for next
585 block_i
->last_alloc_logical_block
= block
+ blks
- 1;
586 block_i
->last_alloc_physical_block
=
587 le32_to_cpu(where
[num
].key
) + blks
- 1;
590 /* We are done with atomic stuff, now do the rest of housekeeping */
592 /* had we spliced it onto indirect block? */
594 mark_buffer_dirty_inode(where
->bh
, inode
);
596 inode
->i_ctime
= CURRENT_TIME_SEC
;
597 mark_inode_dirty(inode
);
601 * Allocation strategy is simple: if we have to allocate something, we will
602 * have to go the whole way to leaf. So let's do it before attaching anything
603 * to tree, set linkage between the newborn blocks, write them if sync is
604 * required, recheck the path, free and repeat if check fails, otherwise
605 * set the last missing link (that will protect us from any truncate-generated
606 * removals - all blocks on the path are immune now) and possibly force the
607 * write on the parent block.
608 * That has a nice additional property: no special recovery from the failed
609 * allocations is needed - we simply release blocks and do not touch anything
610 * reachable from inode.
612 * `handle' can be NULL if create == 0.
614 * return > 0, # of blocks mapped or allocated.
615 * return = 0, if plain lookup failed.
616 * return < 0, error case.
618 static int ext2_get_blocks(struct inode
*inode
,
619 sector_t iblock
, unsigned long maxblocks
,
620 struct buffer_head
*bh_result
,
629 int blocks_to_boundary
= 0;
631 struct ext2_inode_info
*ei
= EXT2_I(inode
);
633 ext2_fsblk_t first_block
= 0;
635 BUG_ON(maxblocks
== 0);
637 depth
= ext2_block_to_path(inode
,iblock
,offsets
,&blocks_to_boundary
);
642 partial
= ext2_get_branch(inode
, depth
, offsets
, chain
, &err
);
643 /* Simplest case - block found, no allocation needed */
645 first_block
= le32_to_cpu(chain
[depth
- 1].key
);
646 clear_buffer_new(bh_result
); /* What's this do? */
649 while (count
< maxblocks
&& count
<= blocks_to_boundary
) {
652 if (!verify_chain(chain
, chain
+ depth
- 1)) {
654 * Indirect block might be removed by
655 * truncate while we were reading it.
656 * Handling of that case: forget what we've
657 * got now, go to reread.
663 blk
= le32_to_cpu(*(chain
[depth
-1].p
+ count
));
664 if (blk
== first_block
+ count
)
673 /* Next simple case - plain lookup or failed read of indirect block */
674 if (!create
|| err
== -EIO
)
677 mutex_lock(&ei
->truncate_mutex
);
679 * If the indirect block is missing while we are reading
680 * the chain(ext2_get_branch() returns -EAGAIN err), or
681 * if the chain has been changed after we grab the semaphore,
682 * (either because another process truncated this branch, or
683 * another get_block allocated this branch) re-grab the chain to see if
684 * the request block has been allocated or not.
686 * Since we already block the truncate/other get_block
687 * at this point, we will have the current copy of the chain when we
688 * splice the branch into the tree.
690 if (err
== -EAGAIN
|| !verify_chain(chain
, partial
)) {
691 while (partial
> chain
) {
695 partial
= ext2_get_branch(inode
, depth
, offsets
, chain
, &err
);
698 mutex_unlock(&ei
->truncate_mutex
);
701 clear_buffer_new(bh_result
);
707 * Okay, we need to do block allocation. Lazily initialize the block
708 * allocation info here if necessary
710 if (S_ISREG(inode
->i_mode
) && (!ei
->i_block_alloc_info
))
711 ext2_init_block_alloc_info(inode
);
713 goal
= ext2_find_goal(inode
, iblock
, partial
);
715 /* the number of blocks need to allocate for [d,t]indirect blocks */
716 indirect_blks
= (chain
+ depth
) - partial
- 1;
718 * Next look up the indirect map to count the totoal number of
719 * direct blocks to allocate for this branch.
721 count
= ext2_blks_to_allocate(partial
, indirect_blks
,
722 maxblocks
, blocks_to_boundary
);
724 * XXX ???? Block out ext2_truncate while we alter the tree
726 err
= ext2_alloc_branch(inode
, indirect_blks
, &count
, goal
,
727 offsets
+ (partial
- chain
), partial
);
730 mutex_unlock(&ei
->truncate_mutex
);
736 * block must be initialised before we put it in the tree
737 * so that it's not found by another thread before it's
740 err
= dax_clear_blocks(inode
, le32_to_cpu(chain
[depth
-1].key
),
741 1 << inode
->i_blkbits
);
743 mutex_unlock(&ei
->truncate_mutex
);
748 ext2_splice_branch(inode
, iblock
, partial
, indirect_blks
, count
);
749 mutex_unlock(&ei
->truncate_mutex
);
750 set_buffer_new(bh_result
);
752 map_bh(bh_result
, inode
->i_sb
, le32_to_cpu(chain
[depth
-1].key
));
753 if (count
> blocks_to_boundary
)
754 set_buffer_boundary(bh_result
);
756 /* Clean up and exit */
757 partial
= chain
+ depth
- 1; /* the whole chain */
759 while (partial
> chain
) {
766 int ext2_get_block(struct inode
*inode
, sector_t iblock
, struct buffer_head
*bh_result
, int create
)
768 unsigned max_blocks
= bh_result
->b_size
>> inode
->i_blkbits
;
769 int ret
= ext2_get_blocks(inode
, iblock
, max_blocks
,
772 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
779 int ext2_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
782 return generic_block_fiemap(inode
, fieinfo
, start
, len
,
786 static int ext2_writepage(struct page
*page
, struct writeback_control
*wbc
)
788 return block_write_full_page(page
, ext2_get_block
, wbc
);
791 static int ext2_readpage(struct file
*file
, struct page
*page
)
793 return mpage_readpage(page
, ext2_get_block
);
797 ext2_readpages(struct file
*file
, struct address_space
*mapping
,
798 struct list_head
*pages
, unsigned nr_pages
)
800 return mpage_readpages(mapping
, pages
, nr_pages
, ext2_get_block
);
804 ext2_write_begin(struct file
*file
, struct address_space
*mapping
,
805 loff_t pos
, unsigned len
, unsigned flags
,
806 struct page
**pagep
, void **fsdata
)
810 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
813 ext2_write_failed(mapping
, pos
+ len
);
817 static int ext2_write_end(struct file
*file
, struct address_space
*mapping
,
818 loff_t pos
, unsigned len
, unsigned copied
,
819 struct page
*page
, void *fsdata
)
823 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
825 ext2_write_failed(mapping
, pos
+ len
);
830 ext2_nobh_write_begin(struct file
*file
, struct address_space
*mapping
,
831 loff_t pos
, unsigned len
, unsigned flags
,
832 struct page
**pagep
, void **fsdata
)
836 ret
= nobh_write_begin(mapping
, pos
, len
, flags
, pagep
, fsdata
,
839 ext2_write_failed(mapping
, pos
+ len
);
843 static int ext2_nobh_writepage(struct page
*page
,
844 struct writeback_control
*wbc
)
846 return nobh_writepage(page
, ext2_get_block
, wbc
);
849 static sector_t
ext2_bmap(struct address_space
*mapping
, sector_t block
)
851 return generic_block_bmap(mapping
,block
,ext2_get_block
);
855 ext2_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
, loff_t offset
)
857 struct file
*file
= iocb
->ki_filp
;
858 struct address_space
*mapping
= file
->f_mapping
;
859 struct inode
*inode
= mapping
->host
;
860 size_t count
= iov_iter_count(iter
);
864 ret
= dax_do_io(iocb
, inode
, iter
, offset
, ext2_get_block
, NULL
,
867 ret
= blockdev_direct_IO(iocb
, inode
, iter
, offset
,
869 if (ret
< 0 && iov_iter_rw(iter
) == WRITE
)
870 ext2_write_failed(mapping
, offset
+ count
);
875 ext2_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
877 return mpage_writepages(mapping
, wbc
, ext2_get_block
);
880 const struct address_space_operations ext2_aops
= {
881 .readpage
= ext2_readpage
,
882 .readpages
= ext2_readpages
,
883 .writepage
= ext2_writepage
,
884 .write_begin
= ext2_write_begin
,
885 .write_end
= ext2_write_end
,
887 .direct_IO
= ext2_direct_IO
,
888 .writepages
= ext2_writepages
,
889 .migratepage
= buffer_migrate_page
,
890 .is_partially_uptodate
= block_is_partially_uptodate
,
891 .error_remove_page
= generic_error_remove_page
,
894 const struct address_space_operations ext2_nobh_aops
= {
895 .readpage
= ext2_readpage
,
896 .readpages
= ext2_readpages
,
897 .writepage
= ext2_nobh_writepage
,
898 .write_begin
= ext2_nobh_write_begin
,
899 .write_end
= nobh_write_end
,
901 .direct_IO
= ext2_direct_IO
,
902 .writepages
= ext2_writepages
,
903 .migratepage
= buffer_migrate_page
,
904 .error_remove_page
= generic_error_remove_page
,
908 * Probably it should be a library function... search for first non-zero word
909 * or memcmp with zero_page, whatever is better for particular architecture.
912 static inline int all_zeroes(__le32
*p
, __le32
*q
)
921 * ext2_find_shared - find the indirect blocks for partial truncation.
922 * @inode: inode in question
923 * @depth: depth of the affected branch
924 * @offsets: offsets of pointers in that branch (see ext2_block_to_path)
925 * @chain: place to store the pointers to partial indirect blocks
926 * @top: place to the (detached) top of branch
928 * This is a helper function used by ext2_truncate().
930 * When we do truncate() we may have to clean the ends of several indirect
931 * blocks but leave the blocks themselves alive. Block is partially
932 * truncated if some data below the new i_size is referred from it (and
933 * it is on the path to the first completely truncated data block, indeed).
934 * We have to free the top of that path along with everything to the right
935 * of the path. Since no allocation past the truncation point is possible
936 * until ext2_truncate() finishes, we may safely do the latter, but top
937 * of branch may require special attention - pageout below the truncation
938 * point might try to populate it.
940 * We atomically detach the top of branch from the tree, store the block
941 * number of its root in *@top, pointers to buffer_heads of partially
942 * truncated blocks - in @chain[].bh and pointers to their last elements
943 * that should not be removed - in @chain[].p. Return value is the pointer
944 * to last filled element of @chain.
946 * The work left to caller to do the actual freeing of subtrees:
947 * a) free the subtree starting from *@top
948 * b) free the subtrees whose roots are stored in
949 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
950 * c) free the subtrees growing from the inode past the @chain[0].p
951 * (no partially truncated stuff there).
954 static Indirect
*ext2_find_shared(struct inode
*inode
,
960 Indirect
*partial
, *p
;
964 for (k
= depth
; k
> 1 && !offsets
[k
-1]; k
--)
966 partial
= ext2_get_branch(inode
, k
, offsets
, chain
, &err
);
968 partial
= chain
+ k
-1;
970 * If the branch acquired continuation since we've looked at it -
971 * fine, it should all survive and (new) top doesn't belong to us.
973 write_lock(&EXT2_I(inode
)->i_meta_lock
);
974 if (!partial
->key
&& *partial
->p
) {
975 write_unlock(&EXT2_I(inode
)->i_meta_lock
);
978 for (p
=partial
; p
>chain
&& all_zeroes((__le32
*)p
->bh
->b_data
,p
->p
); p
--)
981 * OK, we've found the last block that must survive. The rest of our
982 * branch should be detached before unlocking. However, if that rest
983 * of branch is all ours and does not grow immediately from the inode
984 * it's easier to cheat and just decrement partial->p.
986 if (p
== chain
+ k
- 1 && p
> chain
) {
992 write_unlock(&EXT2_I(inode
)->i_meta_lock
);
1004 * ext2_free_data - free a list of data blocks
1005 * @inode: inode we are dealing with
1006 * @p: array of block numbers
1007 * @q: points immediately past the end of array
1009 * We are freeing all blocks referred from that array (numbers are
1010 * stored as little-endian 32-bit) and updating @inode->i_blocks
1013 static inline void ext2_free_data(struct inode
*inode
, __le32
*p
, __le32
*q
)
1015 unsigned long block_to_free
= 0, count
= 0;
1018 for ( ; p
< q
; p
++) {
1019 nr
= le32_to_cpu(*p
);
1022 /* accumulate blocks to free if they're contiguous */
1025 else if (block_to_free
== nr
- count
)
1028 ext2_free_blocks (inode
, block_to_free
, count
);
1029 mark_inode_dirty(inode
);
1037 ext2_free_blocks (inode
, block_to_free
, count
);
1038 mark_inode_dirty(inode
);
1043 * ext2_free_branches - free an array of branches
1044 * @inode: inode we are dealing with
1045 * @p: array of block numbers
1046 * @q: pointer immediately past the end of array
1047 * @depth: depth of the branches to free
1049 * We are freeing all blocks referred from these branches (numbers are
1050 * stored as little-endian 32-bit) and updating @inode->i_blocks
1053 static void ext2_free_branches(struct inode
*inode
, __le32
*p
, __le32
*q
, int depth
)
1055 struct buffer_head
* bh
;
1059 int addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
1060 for ( ; p
< q
; p
++) {
1061 nr
= le32_to_cpu(*p
);
1065 bh
= sb_bread(inode
->i_sb
, nr
);
1067 * A read failure? Report error and clear slot
1071 ext2_error(inode
->i_sb
, "ext2_free_branches",
1072 "Read failure, inode=%ld, block=%ld",
1076 ext2_free_branches(inode
,
1077 (__le32
*)bh
->b_data
,
1078 (__le32
*)bh
->b_data
+ addr_per_block
,
1081 ext2_free_blocks(inode
, nr
, 1);
1082 mark_inode_dirty(inode
);
1085 ext2_free_data(inode
, p
, q
);
1088 /* dax_sem must be held when calling this function */
1089 static void __ext2_truncate_blocks(struct inode
*inode
, loff_t offset
)
1091 __le32
*i_data
= EXT2_I(inode
)->i_data
;
1092 struct ext2_inode_info
*ei
= EXT2_I(inode
);
1093 int addr_per_block
= EXT2_ADDR_PER_BLOCK(inode
->i_sb
);
1101 blocksize
= inode
->i_sb
->s_blocksize
;
1102 iblock
= (offset
+ blocksize
-1) >> EXT2_BLOCK_SIZE_BITS(inode
->i_sb
);
1104 #ifdef CONFIG_FS_DAX
1105 WARN_ON(!rwsem_is_locked(&ei
->dax_sem
));
1108 n
= ext2_block_to_path(inode
, iblock
, offsets
, NULL
);
1113 * From here we block out all ext2_get_block() callers who want to
1114 * modify the block allocation tree.
1116 mutex_lock(&ei
->truncate_mutex
);
1119 ext2_free_data(inode
, i_data
+offsets
[0],
1120 i_data
+ EXT2_NDIR_BLOCKS
);
1124 partial
= ext2_find_shared(inode
, n
, offsets
, chain
, &nr
);
1125 /* Kill the top of shared branch (already detached) */
1127 if (partial
== chain
)
1128 mark_inode_dirty(inode
);
1130 mark_buffer_dirty_inode(partial
->bh
, inode
);
1131 ext2_free_branches(inode
, &nr
, &nr
+1, (chain
+n
-1) - partial
);
1133 /* Clear the ends of indirect blocks on the shared branch */
1134 while (partial
> chain
) {
1135 ext2_free_branches(inode
,
1137 (__le32
*)partial
->bh
->b_data
+addr_per_block
,
1138 (chain
+n
-1) - partial
);
1139 mark_buffer_dirty_inode(partial
->bh
, inode
);
1140 brelse (partial
->bh
);
1144 /* Kill the remaining (whole) subtrees */
1145 switch (offsets
[0]) {
1147 nr
= i_data
[EXT2_IND_BLOCK
];
1149 i_data
[EXT2_IND_BLOCK
] = 0;
1150 mark_inode_dirty(inode
);
1151 ext2_free_branches(inode
, &nr
, &nr
+1, 1);
1153 case EXT2_IND_BLOCK
:
1154 nr
= i_data
[EXT2_DIND_BLOCK
];
1156 i_data
[EXT2_DIND_BLOCK
] = 0;
1157 mark_inode_dirty(inode
);
1158 ext2_free_branches(inode
, &nr
, &nr
+1, 2);
1160 case EXT2_DIND_BLOCK
:
1161 nr
= i_data
[EXT2_TIND_BLOCK
];
1163 i_data
[EXT2_TIND_BLOCK
] = 0;
1164 mark_inode_dirty(inode
);
1165 ext2_free_branches(inode
, &nr
, &nr
+1, 3);
1167 case EXT2_TIND_BLOCK
:
1171 ext2_discard_reservation(inode
);
1173 mutex_unlock(&ei
->truncate_mutex
);
1176 static void ext2_truncate_blocks(struct inode
*inode
, loff_t offset
)
1178 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1179 S_ISLNK(inode
->i_mode
)))
1181 if (ext2_inode_is_fast_symlink(inode
))
1184 dax_sem_down_write(EXT2_I(inode
));
1185 __ext2_truncate_blocks(inode
, offset
);
1186 dax_sem_up_write(EXT2_I(inode
));
1189 static int ext2_setsize(struct inode
*inode
, loff_t newsize
)
1193 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1194 S_ISLNK(inode
->i_mode
)))
1196 if (ext2_inode_is_fast_symlink(inode
))
1198 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1201 inode_dio_wait(inode
);
1204 error
= dax_truncate_page(inode
, newsize
, ext2_get_block
);
1205 else if (test_opt(inode
->i_sb
, NOBH
))
1206 error
= nobh_truncate_page(inode
->i_mapping
,
1207 newsize
, ext2_get_block
);
1209 error
= block_truncate_page(inode
->i_mapping
,
1210 newsize
, ext2_get_block
);
1214 dax_sem_down_write(EXT2_I(inode
));
1215 truncate_setsize(inode
, newsize
);
1216 __ext2_truncate_blocks(inode
, newsize
);
1217 dax_sem_up_write(EXT2_I(inode
));
1219 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
1220 if (inode_needs_sync(inode
)) {
1221 sync_mapping_buffers(inode
->i_mapping
);
1222 sync_inode_metadata(inode
, 1);
1224 mark_inode_dirty(inode
);
1230 static struct ext2_inode
*ext2_get_inode(struct super_block
*sb
, ino_t ino
,
1231 struct buffer_head
**p
)
1233 struct buffer_head
* bh
;
1234 unsigned long block_group
;
1235 unsigned long block
;
1236 unsigned long offset
;
1237 struct ext2_group_desc
* gdp
;
1240 if ((ino
!= EXT2_ROOT_INO
&& ino
< EXT2_FIRST_INO(sb
)) ||
1241 ino
> le32_to_cpu(EXT2_SB(sb
)->s_es
->s_inodes_count
))
1244 block_group
= (ino
- 1) / EXT2_INODES_PER_GROUP(sb
);
1245 gdp
= ext2_get_group_desc(sb
, block_group
, NULL
);
1249 * Figure out the offset within the block group inode table
1251 offset
= ((ino
- 1) % EXT2_INODES_PER_GROUP(sb
)) * EXT2_INODE_SIZE(sb
);
1252 block
= le32_to_cpu(gdp
->bg_inode_table
) +
1253 (offset
>> EXT2_BLOCK_SIZE_BITS(sb
));
1254 if (!(bh
= sb_bread(sb
, block
)))
1258 offset
&= (EXT2_BLOCK_SIZE(sb
) - 1);
1259 return (struct ext2_inode
*) (bh
->b_data
+ offset
);
1262 ext2_error(sb
, "ext2_get_inode", "bad inode number: %lu",
1263 (unsigned long) ino
);
1264 return ERR_PTR(-EINVAL
);
1266 ext2_error(sb
, "ext2_get_inode",
1267 "unable to read inode block - inode=%lu, block=%lu",
1268 (unsigned long) ino
, block
);
1270 return ERR_PTR(-EIO
);
1273 void ext2_set_inode_flags(struct inode
*inode
)
1275 unsigned int flags
= EXT2_I(inode
)->i_flags
;
1277 inode
->i_flags
&= ~(S_SYNC
| S_APPEND
| S_IMMUTABLE
| S_NOATIME
|
1279 if (flags
& EXT2_SYNC_FL
)
1280 inode
->i_flags
|= S_SYNC
;
1281 if (flags
& EXT2_APPEND_FL
)
1282 inode
->i_flags
|= S_APPEND
;
1283 if (flags
& EXT2_IMMUTABLE_FL
)
1284 inode
->i_flags
|= S_IMMUTABLE
;
1285 if (flags
& EXT2_NOATIME_FL
)
1286 inode
->i_flags
|= S_NOATIME
;
1287 if (flags
& EXT2_DIRSYNC_FL
)
1288 inode
->i_flags
|= S_DIRSYNC
;
1289 if (test_opt(inode
->i_sb
, DAX
))
1290 inode
->i_flags
|= S_DAX
;
1293 /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */
1294 void ext2_get_inode_flags(struct ext2_inode_info
*ei
)
1296 unsigned int flags
= ei
->vfs_inode
.i_flags
;
1298 ei
->i_flags
&= ~(EXT2_SYNC_FL
|EXT2_APPEND_FL
|
1299 EXT2_IMMUTABLE_FL
|EXT2_NOATIME_FL
|EXT2_DIRSYNC_FL
);
1301 ei
->i_flags
|= EXT2_SYNC_FL
;
1302 if (flags
& S_APPEND
)
1303 ei
->i_flags
|= EXT2_APPEND_FL
;
1304 if (flags
& S_IMMUTABLE
)
1305 ei
->i_flags
|= EXT2_IMMUTABLE_FL
;
1306 if (flags
& S_NOATIME
)
1307 ei
->i_flags
|= EXT2_NOATIME_FL
;
1308 if (flags
& S_DIRSYNC
)
1309 ei
->i_flags
|= EXT2_DIRSYNC_FL
;
1312 struct inode
*ext2_iget (struct super_block
*sb
, unsigned long ino
)
1314 struct ext2_inode_info
*ei
;
1315 struct buffer_head
* bh
;
1316 struct ext2_inode
*raw_inode
;
1317 struct inode
*inode
;
1323 inode
= iget_locked(sb
, ino
);
1325 return ERR_PTR(-ENOMEM
);
1326 if (!(inode
->i_state
& I_NEW
))
1330 ei
->i_block_alloc_info
= NULL
;
1332 raw_inode
= ext2_get_inode(inode
->i_sb
, ino
, &bh
);
1333 if (IS_ERR(raw_inode
)) {
1334 ret
= PTR_ERR(raw_inode
);
1338 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
1339 i_uid
= (uid_t
)le16_to_cpu(raw_inode
->i_uid_low
);
1340 i_gid
= (gid_t
)le16_to_cpu(raw_inode
->i_gid_low
);
1341 if (!(test_opt (inode
->i_sb
, NO_UID32
))) {
1342 i_uid
|= le16_to_cpu(raw_inode
->i_uid_high
) << 16;
1343 i_gid
|= le16_to_cpu(raw_inode
->i_gid_high
) << 16;
1345 i_uid_write(inode
, i_uid
);
1346 i_gid_write(inode
, i_gid
);
1347 set_nlink(inode
, le16_to_cpu(raw_inode
->i_links_count
));
1348 inode
->i_size
= le32_to_cpu(raw_inode
->i_size
);
1349 inode
->i_atime
.tv_sec
= (signed)le32_to_cpu(raw_inode
->i_atime
);
1350 inode
->i_ctime
.tv_sec
= (signed)le32_to_cpu(raw_inode
->i_ctime
);
1351 inode
->i_mtime
.tv_sec
= (signed)le32_to_cpu(raw_inode
->i_mtime
);
1352 inode
->i_atime
.tv_nsec
= inode
->i_mtime
.tv_nsec
= inode
->i_ctime
.tv_nsec
= 0;
1353 ei
->i_dtime
= le32_to_cpu(raw_inode
->i_dtime
);
1354 /* We now have enough fields to check if the inode was active or not.
1355 * This is needed because nfsd might try to access dead inodes
1356 * the test is that same one that e2fsck uses
1357 * NeilBrown 1999oct15
1359 if (inode
->i_nlink
== 0 && (inode
->i_mode
== 0 || ei
->i_dtime
)) {
1360 /* this inode is deleted */
1365 inode
->i_blocks
= le32_to_cpu(raw_inode
->i_blocks
);
1366 ei
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
1367 ei
->i_faddr
= le32_to_cpu(raw_inode
->i_faddr
);
1368 ei
->i_frag_no
= raw_inode
->i_frag
;
1369 ei
->i_frag_size
= raw_inode
->i_fsize
;
1370 ei
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
1372 if (S_ISREG(inode
->i_mode
))
1373 inode
->i_size
|= ((__u64
)le32_to_cpu(raw_inode
->i_size_high
)) << 32;
1375 ei
->i_dir_acl
= le32_to_cpu(raw_inode
->i_dir_acl
);
1377 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
1379 ei
->i_block_group
= (ino
- 1) / EXT2_INODES_PER_GROUP(inode
->i_sb
);
1380 ei
->i_dir_start_lookup
= 0;
1383 * NOTE! The in-memory inode i_data array is in little-endian order
1384 * even on big-endian machines: we do NOT byteswap the block numbers!
1386 for (n
= 0; n
< EXT2_N_BLOCKS
; n
++)
1387 ei
->i_data
[n
] = raw_inode
->i_block
[n
];
1389 if (S_ISREG(inode
->i_mode
)) {
1390 inode
->i_op
= &ext2_file_inode_operations
;
1391 if (test_opt(inode
->i_sb
, NOBH
)) {
1392 inode
->i_mapping
->a_ops
= &ext2_nobh_aops
;
1393 inode
->i_fop
= &ext2_file_operations
;
1395 inode
->i_mapping
->a_ops
= &ext2_aops
;
1396 inode
->i_fop
= &ext2_file_operations
;
1398 } else if (S_ISDIR(inode
->i_mode
)) {
1399 inode
->i_op
= &ext2_dir_inode_operations
;
1400 inode
->i_fop
= &ext2_dir_operations
;
1401 if (test_opt(inode
->i_sb
, NOBH
))
1402 inode
->i_mapping
->a_ops
= &ext2_nobh_aops
;
1404 inode
->i_mapping
->a_ops
= &ext2_aops
;
1405 } else if (S_ISLNK(inode
->i_mode
)) {
1406 if (ext2_inode_is_fast_symlink(inode
)) {
1407 inode
->i_link
= (char *)ei
->i_data
;
1408 inode
->i_op
= &ext2_fast_symlink_inode_operations
;
1409 nd_terminate_link(ei
->i_data
, inode
->i_size
,
1410 sizeof(ei
->i_data
) - 1);
1412 inode
->i_op
= &ext2_symlink_inode_operations
;
1413 if (test_opt(inode
->i_sb
, NOBH
))
1414 inode
->i_mapping
->a_ops
= &ext2_nobh_aops
;
1416 inode
->i_mapping
->a_ops
= &ext2_aops
;
1419 inode
->i_op
= &ext2_special_inode_operations
;
1420 if (raw_inode
->i_block
[0])
1421 init_special_inode(inode
, inode
->i_mode
,
1422 old_decode_dev(le32_to_cpu(raw_inode
->i_block
[0])));
1424 init_special_inode(inode
, inode
->i_mode
,
1425 new_decode_dev(le32_to_cpu(raw_inode
->i_block
[1])));
1428 ext2_set_inode_flags(inode
);
1429 unlock_new_inode(inode
);
1434 return ERR_PTR(ret
);
1437 static int __ext2_write_inode(struct inode
*inode
, int do_sync
)
1439 struct ext2_inode_info
*ei
= EXT2_I(inode
);
1440 struct super_block
*sb
= inode
->i_sb
;
1441 ino_t ino
= inode
->i_ino
;
1442 uid_t uid
= i_uid_read(inode
);
1443 gid_t gid
= i_gid_read(inode
);
1444 struct buffer_head
* bh
;
1445 struct ext2_inode
* raw_inode
= ext2_get_inode(sb
, ino
, &bh
);
1449 if (IS_ERR(raw_inode
))
1452 /* For fields not not tracking in the in-memory inode,
1453 * initialise them to zero for new inodes. */
1454 if (ei
->i_state
& EXT2_STATE_NEW
)
1455 memset(raw_inode
, 0, EXT2_SB(sb
)->s_inode_size
);
1457 ext2_get_inode_flags(ei
);
1458 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
1459 if (!(test_opt(sb
, NO_UID32
))) {
1460 raw_inode
->i_uid_low
= cpu_to_le16(low_16_bits(uid
));
1461 raw_inode
->i_gid_low
= cpu_to_le16(low_16_bits(gid
));
1463 * Fix up interoperability with old kernels. Otherwise, old inodes get
1464 * re-used with the upper 16 bits of the uid/gid intact
1467 raw_inode
->i_uid_high
= cpu_to_le16(high_16_bits(uid
));
1468 raw_inode
->i_gid_high
= cpu_to_le16(high_16_bits(gid
));
1470 raw_inode
->i_uid_high
= 0;
1471 raw_inode
->i_gid_high
= 0;
1474 raw_inode
->i_uid_low
= cpu_to_le16(fs_high2lowuid(uid
));
1475 raw_inode
->i_gid_low
= cpu_to_le16(fs_high2lowgid(gid
));
1476 raw_inode
->i_uid_high
= 0;
1477 raw_inode
->i_gid_high
= 0;
1479 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
1480 raw_inode
->i_size
= cpu_to_le32(inode
->i_size
);
1481 raw_inode
->i_atime
= cpu_to_le32(inode
->i_atime
.tv_sec
);
1482 raw_inode
->i_ctime
= cpu_to_le32(inode
->i_ctime
.tv_sec
);
1483 raw_inode
->i_mtime
= cpu_to_le32(inode
->i_mtime
.tv_sec
);
1485 raw_inode
->i_blocks
= cpu_to_le32(inode
->i_blocks
);
1486 raw_inode
->i_dtime
= cpu_to_le32(ei
->i_dtime
);
1487 raw_inode
->i_flags
= cpu_to_le32(ei
->i_flags
);
1488 raw_inode
->i_faddr
= cpu_to_le32(ei
->i_faddr
);
1489 raw_inode
->i_frag
= ei
->i_frag_no
;
1490 raw_inode
->i_fsize
= ei
->i_frag_size
;
1491 raw_inode
->i_file_acl
= cpu_to_le32(ei
->i_file_acl
);
1492 if (!S_ISREG(inode
->i_mode
))
1493 raw_inode
->i_dir_acl
= cpu_to_le32(ei
->i_dir_acl
);
1495 raw_inode
->i_size_high
= cpu_to_le32(inode
->i_size
>> 32);
1496 if (inode
->i_size
> 0x7fffffffULL
) {
1497 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb
,
1498 EXT2_FEATURE_RO_COMPAT_LARGE_FILE
) ||
1499 EXT2_SB(sb
)->s_es
->s_rev_level
==
1500 cpu_to_le32(EXT2_GOOD_OLD_REV
)) {
1501 /* If this is the first large file
1502 * created, add a flag to the superblock.
1504 spin_lock(&EXT2_SB(sb
)->s_lock
);
1505 ext2_update_dynamic_rev(sb
);
1506 EXT2_SET_RO_COMPAT_FEATURE(sb
,
1507 EXT2_FEATURE_RO_COMPAT_LARGE_FILE
);
1508 spin_unlock(&EXT2_SB(sb
)->s_lock
);
1509 ext2_write_super(sb
);
1514 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
1515 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1516 if (old_valid_dev(inode
->i_rdev
)) {
1517 raw_inode
->i_block
[0] =
1518 cpu_to_le32(old_encode_dev(inode
->i_rdev
));
1519 raw_inode
->i_block
[1] = 0;
1521 raw_inode
->i_block
[0] = 0;
1522 raw_inode
->i_block
[1] =
1523 cpu_to_le32(new_encode_dev(inode
->i_rdev
));
1524 raw_inode
->i_block
[2] = 0;
1526 } else for (n
= 0; n
< EXT2_N_BLOCKS
; n
++)
1527 raw_inode
->i_block
[n
] = ei
->i_data
[n
];
1528 mark_buffer_dirty(bh
);
1530 sync_dirty_buffer(bh
);
1531 if (buffer_req(bh
) && !buffer_uptodate(bh
)) {
1532 printk ("IO error syncing ext2 inode [%s:%08lx]\n",
1533 sb
->s_id
, (unsigned long) ino
);
1537 ei
->i_state
&= ~EXT2_STATE_NEW
;
1542 int ext2_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
1544 return __ext2_write_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
1547 int ext2_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
1549 struct inode
*inode
= d_inode(dentry
);
1552 error
= inode_change_ok(inode
, iattr
);
1556 if (is_quota_modification(inode
, iattr
)) {
1557 error
= dquot_initialize(inode
);
1561 if ((iattr
->ia_valid
& ATTR_UID
&& !uid_eq(iattr
->ia_uid
, inode
->i_uid
)) ||
1562 (iattr
->ia_valid
& ATTR_GID
&& !gid_eq(iattr
->ia_gid
, inode
->i_gid
))) {
1563 error
= dquot_transfer(inode
, iattr
);
1567 if (iattr
->ia_valid
& ATTR_SIZE
&& iattr
->ia_size
!= inode
->i_size
) {
1568 error
= ext2_setsize(inode
, iattr
->ia_size
);
1572 setattr_copy(inode
, iattr
);
1573 if (iattr
->ia_valid
& ATTR_MODE
)
1574 error
= posix_acl_chmod(inode
, inode
->i_mode
);
1575 mark_inode_dirty(inode
);