Linux 3.6.10
[linux/fpc-iii.git] / fs / ext4 / inode.c
blob2ce16af1bdbd68fbb0304c50cb4ad6bed8b3a47c
1 /*
2 * linux/fs/ext4/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * from
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
21 #include <linux/fs.h>
22 #include <linux/time.h>
23 #include <linux/jbd2.h>
24 #include <linux/highuid.h>
25 #include <linux/pagemap.h>
26 #include <linux/quotaops.h>
27 #include <linux/string.h>
28 #include <linux/buffer_head.h>
29 #include <linux/writeback.h>
30 #include <linux/pagevec.h>
31 #include <linux/mpage.h>
32 #include <linux/namei.h>
33 #include <linux/uio.h>
34 #include <linux/bio.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/printk.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
41 #include "ext4_jbd2.h"
42 #include "xattr.h"
43 #include "acl.h"
44 #include "truncate.h"
46 #include <trace/events/ext4.h>
48 #define MPAGE_DA_EXTENT_TAIL 0x01
50 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51 struct ext4_inode_info *ei)
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u16 csum_lo;
55 __u16 csum_hi = 0;
56 __u32 csum;
58 csum_lo = raw->i_checksum_lo;
59 raw->i_checksum_lo = 0;
60 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62 csum_hi = raw->i_checksum_hi;
63 raw->i_checksum_hi = 0;
66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67 EXT4_INODE_SIZE(inode->i_sb));
69 raw->i_checksum_lo = csum_lo;
70 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72 raw->i_checksum_hi = csum_hi;
74 return csum;
77 static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78 struct ext4_inode_info *ei)
80 __u32 provided, calculated;
82 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83 cpu_to_le32(EXT4_OS_LINUX) ||
84 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86 return 1;
88 provided = le16_to_cpu(raw->i_checksum_lo);
89 calculated = ext4_inode_csum(inode, raw, ei);
90 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93 else
94 calculated &= 0xFFFF;
96 return provided == calculated;
99 static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100 struct ext4_inode_info *ei)
102 __u32 csum;
104 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105 cpu_to_le32(EXT4_OS_LINUX) ||
106 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108 return;
110 csum = ext4_inode_csum(inode, raw, ei);
111 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
117 static inline int ext4_begin_ordered_truncate(struct inode *inode,
118 loff_t new_size)
120 trace_ext4_begin_ordered_truncate(inode, new_size);
122 * If jinode is zero, then we never opened the file for
123 * writing, so there's no need to call
124 * jbd2_journal_begin_ordered_truncate() since there's no
125 * outstanding writes we need to flush.
127 if (!EXT4_I(inode)->jinode)
128 return 0;
129 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
130 EXT4_I(inode)->jinode,
131 new_size);
134 static void ext4_invalidatepage(struct page *page, unsigned long offset);
135 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
136 struct buffer_head *bh_result, int create);
137 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
138 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
139 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
140 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
141 static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
142 struct inode *inode, struct page *page, loff_t from,
143 loff_t length, int flags);
146 * Test whether an inode is a fast symlink.
148 static int ext4_inode_is_fast_symlink(struct inode *inode)
150 int ea_blocks = EXT4_I(inode)->i_file_acl ?
151 (inode->i_sb->s_blocksize >> 9) : 0;
153 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
157 * Restart the transaction associated with *handle. This does a commit,
158 * so before we call here everything must be consistently dirtied against
159 * this transaction.
161 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
162 int nblocks)
164 int ret;
167 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
168 * moment, get_block can be called only for blocks inside i_size since
169 * page cache has been already dropped and writes are blocked by
170 * i_mutex. So we can safely drop the i_data_sem here.
172 BUG_ON(EXT4_JOURNAL(inode) == NULL);
173 jbd_debug(2, "restarting handle %p\n", handle);
174 up_write(&EXT4_I(inode)->i_data_sem);
175 ret = ext4_journal_restart(handle, nblocks);
176 down_write(&EXT4_I(inode)->i_data_sem);
177 ext4_discard_preallocations(inode);
179 return ret;
183 * Called at the last iput() if i_nlink is zero.
185 void ext4_evict_inode(struct inode *inode)
187 handle_t *handle;
188 int err;
190 trace_ext4_evict_inode(inode);
192 ext4_ioend_wait(inode);
194 if (inode->i_nlink) {
196 * When journalling data dirty buffers are tracked only in the
197 * journal. So although mm thinks everything is clean and
198 * ready for reaping the inode might still have some pages to
199 * write in the running transaction or waiting to be
200 * checkpointed. Thus calling jbd2_journal_invalidatepage()
201 * (via truncate_inode_pages()) to discard these buffers can
202 * cause data loss. Also even if we did not discard these
203 * buffers, we would have no way to find them after the inode
204 * is reaped and thus user could see stale data if he tries to
205 * read them before the transaction is checkpointed. So be
206 * careful and force everything to disk here... We use
207 * ei->i_datasync_tid to store the newest transaction
208 * containing inode's data.
210 * Note that directories do not have this problem because they
211 * don't use page cache.
213 if (ext4_should_journal_data(inode) &&
214 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
215 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
216 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
218 jbd2_log_start_commit(journal, commit_tid);
219 jbd2_log_wait_commit(journal, commit_tid);
220 filemap_write_and_wait(&inode->i_data);
222 truncate_inode_pages(&inode->i_data, 0);
223 goto no_delete;
226 if (!is_bad_inode(inode))
227 dquot_initialize(inode);
229 if (ext4_should_order_data(inode))
230 ext4_begin_ordered_truncate(inode, 0);
231 truncate_inode_pages(&inode->i_data, 0);
233 if (is_bad_inode(inode))
234 goto no_delete;
237 * Protect us against freezing - iput() caller didn't have to have any
238 * protection against it
240 sb_start_intwrite(inode->i_sb);
241 handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
242 if (IS_ERR(handle)) {
243 ext4_std_error(inode->i_sb, PTR_ERR(handle));
245 * If we're going to skip the normal cleanup, we still need to
246 * make sure that the in-core orphan linked list is properly
247 * cleaned up.
249 ext4_orphan_del(NULL, inode);
250 sb_end_intwrite(inode->i_sb);
251 goto no_delete;
254 if (IS_SYNC(inode))
255 ext4_handle_sync(handle);
256 inode->i_size = 0;
257 err = ext4_mark_inode_dirty(handle, inode);
258 if (err) {
259 ext4_warning(inode->i_sb,
260 "couldn't mark inode dirty (err %d)", err);
261 goto stop_handle;
263 if (inode->i_blocks)
264 ext4_truncate(inode);
267 * ext4_ext_truncate() doesn't reserve any slop when it
268 * restarts journal transactions; therefore there may not be
269 * enough credits left in the handle to remove the inode from
270 * the orphan list and set the dtime field.
272 if (!ext4_handle_has_enough_credits(handle, 3)) {
273 err = ext4_journal_extend(handle, 3);
274 if (err > 0)
275 err = ext4_journal_restart(handle, 3);
276 if (err != 0) {
277 ext4_warning(inode->i_sb,
278 "couldn't extend journal (err %d)", err);
279 stop_handle:
280 ext4_journal_stop(handle);
281 ext4_orphan_del(NULL, inode);
282 sb_end_intwrite(inode->i_sb);
283 goto no_delete;
288 * Kill off the orphan record which ext4_truncate created.
289 * AKPM: I think this can be inside the above `if'.
290 * Note that ext4_orphan_del() has to be able to cope with the
291 * deletion of a non-existent orphan - this is because we don't
292 * know if ext4_truncate() actually created an orphan record.
293 * (Well, we could do this if we need to, but heck - it works)
295 ext4_orphan_del(handle, inode);
296 EXT4_I(inode)->i_dtime = get_seconds();
299 * One subtle ordering requirement: if anything has gone wrong
300 * (transaction abort, IO errors, whatever), then we can still
301 * do these next steps (the fs will already have been marked as
302 * having errors), but we can't free the inode if the mark_dirty
303 * fails.
305 if (ext4_mark_inode_dirty(handle, inode))
306 /* If that failed, just do the required in-core inode clear. */
307 ext4_clear_inode(inode);
308 else
309 ext4_free_inode(handle, inode);
310 ext4_journal_stop(handle);
311 sb_end_intwrite(inode->i_sb);
312 return;
313 no_delete:
314 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
317 #ifdef CONFIG_QUOTA
318 qsize_t *ext4_get_reserved_space(struct inode *inode)
320 return &EXT4_I(inode)->i_reserved_quota;
322 #endif
325 * Calculate the number of metadata blocks need to reserve
326 * to allocate a block located at @lblock
328 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
330 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
331 return ext4_ext_calc_metadata_amount(inode, lblock);
333 return ext4_ind_calc_metadata_amount(inode, lblock);
337 * Called with i_data_sem down, which is important since we can call
338 * ext4_discard_preallocations() from here.
340 void ext4_da_update_reserve_space(struct inode *inode,
341 int used, int quota_claim)
343 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
344 struct ext4_inode_info *ei = EXT4_I(inode);
346 spin_lock(&ei->i_block_reservation_lock);
347 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
348 if (unlikely(used > ei->i_reserved_data_blocks)) {
349 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
350 "with only %d reserved data blocks",
351 __func__, inode->i_ino, used,
352 ei->i_reserved_data_blocks);
353 WARN_ON(1);
354 used = ei->i_reserved_data_blocks;
357 if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
358 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
359 "with only %d reserved metadata blocks\n", __func__,
360 inode->i_ino, ei->i_allocated_meta_blocks,
361 ei->i_reserved_meta_blocks);
362 WARN_ON(1);
363 ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
366 /* Update per-inode reservations */
367 ei->i_reserved_data_blocks -= used;
368 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
369 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
370 used + ei->i_allocated_meta_blocks);
371 ei->i_allocated_meta_blocks = 0;
373 if (ei->i_reserved_data_blocks == 0) {
375 * We can release all of the reserved metadata blocks
376 * only when we have written all of the delayed
377 * allocation blocks.
379 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
380 ei->i_reserved_meta_blocks);
381 ei->i_reserved_meta_blocks = 0;
382 ei->i_da_metadata_calc_len = 0;
384 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
386 /* Update quota subsystem for data blocks */
387 if (quota_claim)
388 dquot_claim_block(inode, EXT4_C2B(sbi, used));
389 else {
391 * We did fallocate with an offset that is already delayed
392 * allocated. So on delayed allocated writeback we should
393 * not re-claim the quota for fallocated blocks.
395 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
399 * If we have done all the pending block allocations and if
400 * there aren't any writers on the inode, we can discard the
401 * inode's preallocations.
403 if ((ei->i_reserved_data_blocks == 0) &&
404 (atomic_read(&inode->i_writecount) == 0))
405 ext4_discard_preallocations(inode);
408 static int __check_block_validity(struct inode *inode, const char *func,
409 unsigned int line,
410 struct ext4_map_blocks *map)
412 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
413 map->m_len)) {
414 ext4_error_inode(inode, func, line, map->m_pblk,
415 "lblock %lu mapped to illegal pblock "
416 "(length %d)", (unsigned long) map->m_lblk,
417 map->m_len);
418 return -EIO;
420 return 0;
423 #define check_block_validity(inode, map) \
424 __check_block_validity((inode), __func__, __LINE__, (map))
427 * Return the number of contiguous dirty pages in a given inode
428 * starting at page frame idx.
430 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
431 unsigned int max_pages)
433 struct address_space *mapping = inode->i_mapping;
434 pgoff_t index;
435 struct pagevec pvec;
436 pgoff_t num = 0;
437 int i, nr_pages, done = 0;
439 if (max_pages == 0)
440 return 0;
441 pagevec_init(&pvec, 0);
442 while (!done) {
443 index = idx;
444 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
445 PAGECACHE_TAG_DIRTY,
446 (pgoff_t)PAGEVEC_SIZE);
447 if (nr_pages == 0)
448 break;
449 for (i = 0; i < nr_pages; i++) {
450 struct page *page = pvec.pages[i];
451 struct buffer_head *bh, *head;
453 lock_page(page);
454 if (unlikely(page->mapping != mapping) ||
455 !PageDirty(page) ||
456 PageWriteback(page) ||
457 page->index != idx) {
458 done = 1;
459 unlock_page(page);
460 break;
462 if (page_has_buffers(page)) {
463 bh = head = page_buffers(page);
464 do {
465 if (!buffer_delay(bh) &&
466 !buffer_unwritten(bh))
467 done = 1;
468 bh = bh->b_this_page;
469 } while (!done && (bh != head));
471 unlock_page(page);
472 if (done)
473 break;
474 idx++;
475 num++;
476 if (num >= max_pages) {
477 done = 1;
478 break;
481 pagevec_release(&pvec);
483 return num;
487 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
489 static void set_buffers_da_mapped(struct inode *inode,
490 struct ext4_map_blocks *map)
492 struct address_space *mapping = inode->i_mapping;
493 struct pagevec pvec;
494 int i, nr_pages;
495 pgoff_t index, end;
497 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
498 end = (map->m_lblk + map->m_len - 1) >>
499 (PAGE_CACHE_SHIFT - inode->i_blkbits);
501 pagevec_init(&pvec, 0);
502 while (index <= end) {
503 nr_pages = pagevec_lookup(&pvec, mapping, index,
504 min(end - index + 1,
505 (pgoff_t)PAGEVEC_SIZE));
506 if (nr_pages == 0)
507 break;
508 for (i = 0; i < nr_pages; i++) {
509 struct page *page = pvec.pages[i];
510 struct buffer_head *bh, *head;
512 if (unlikely(page->mapping != mapping) ||
513 !PageDirty(page))
514 break;
516 if (page_has_buffers(page)) {
517 bh = head = page_buffers(page);
518 do {
519 set_buffer_da_mapped(bh);
520 bh = bh->b_this_page;
521 } while (bh != head);
523 index++;
525 pagevec_release(&pvec);
530 * The ext4_map_blocks() function tries to look up the requested blocks,
531 * and returns if the blocks are already mapped.
533 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
534 * and store the allocated blocks in the result buffer head and mark it
535 * mapped.
537 * If file type is extents based, it will call ext4_ext_map_blocks(),
538 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
539 * based files
541 * On success, it returns the number of blocks being mapped or allocate.
542 * if create==0 and the blocks are pre-allocated and uninitialized block,
543 * the result buffer head is unmapped. If the create ==1, it will make sure
544 * the buffer head is mapped.
546 * It returns 0 if plain look up failed (blocks have not been allocated), in
547 * that case, buffer head is unmapped
549 * It returns the error in case of allocation failure.
551 int ext4_map_blocks(handle_t *handle, struct inode *inode,
552 struct ext4_map_blocks *map, int flags)
554 int retval;
556 map->m_flags = 0;
557 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
558 "logical block %lu\n", inode->i_ino, flags, map->m_len,
559 (unsigned long) map->m_lblk);
561 * Try to see if we can get the block without requesting a new
562 * file system block.
564 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
565 down_read((&EXT4_I(inode)->i_data_sem));
566 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
567 retval = ext4_ext_map_blocks(handle, inode, map, flags &
568 EXT4_GET_BLOCKS_KEEP_SIZE);
569 } else {
570 retval = ext4_ind_map_blocks(handle, inode, map, flags &
571 EXT4_GET_BLOCKS_KEEP_SIZE);
573 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
574 up_read((&EXT4_I(inode)->i_data_sem));
576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577 int ret = check_block_validity(inode, map);
578 if (ret != 0)
579 return ret;
582 /* If it is only a block(s) look up */
583 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
584 return retval;
587 * Returns if the blocks have already allocated
589 * Note that if blocks have been preallocated
590 * ext4_ext_get_block() returns the create = 0
591 * with buffer head unmapped.
593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
594 return retval;
597 * When we call get_blocks without the create flag, the
598 * BH_Unwritten flag could have gotten set if the blocks
599 * requested were part of a uninitialized extent. We need to
600 * clear this flag now that we are committed to convert all or
601 * part of the uninitialized extent to be an initialized
602 * extent. This is because we need to avoid the combination
603 * of BH_Unwritten and BH_Mapped flags being simultaneously
604 * set on the buffer_head.
606 map->m_flags &= ~EXT4_MAP_UNWRITTEN;
609 * New blocks allocate and/or writing to uninitialized extent
610 * will possibly result in updating i_data, so we take
611 * the write lock of i_data_sem, and call get_blocks()
612 * with create == 1 flag.
614 down_write((&EXT4_I(inode)->i_data_sem));
617 * if the caller is from delayed allocation writeout path
618 * we have already reserved fs blocks for allocation
619 * let the underlying get_block() function know to
620 * avoid double accounting
622 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
623 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
625 * We need to check for EXT4 here because migrate
626 * could have changed the inode type in between
628 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
629 retval = ext4_ext_map_blocks(handle, inode, map, flags);
630 } else {
631 retval = ext4_ind_map_blocks(handle, inode, map, flags);
633 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
635 * We allocated new blocks which will result in
636 * i_data's format changing. Force the migrate
637 * to fail by clearing migrate flags
639 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
643 * Update reserved blocks/metadata blocks after successful
644 * block allocation which had been deferred till now. We don't
645 * support fallocate for non extent files. So we can update
646 * reserve space here.
648 if ((retval > 0) &&
649 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
650 ext4_da_update_reserve_space(inode, retval, 1);
652 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
653 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
655 /* If we have successfully mapped the delayed allocated blocks,
656 * set the BH_Da_Mapped bit on them. Its important to do this
657 * under the protection of i_data_sem.
659 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
660 set_buffers_da_mapped(inode, map);
663 up_write((&EXT4_I(inode)->i_data_sem));
664 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
665 int ret = check_block_validity(inode, map);
666 if (ret != 0)
667 return ret;
669 return retval;
672 /* Maximum number of blocks we map for direct IO at once. */
673 #define DIO_MAX_BLOCKS 4096
675 static int _ext4_get_block(struct inode *inode, sector_t iblock,
676 struct buffer_head *bh, int flags)
678 handle_t *handle = ext4_journal_current_handle();
679 struct ext4_map_blocks map;
680 int ret = 0, started = 0;
681 int dio_credits;
683 map.m_lblk = iblock;
684 map.m_len = bh->b_size >> inode->i_blkbits;
686 if (flags && !handle) {
687 /* Direct IO write... */
688 if (map.m_len > DIO_MAX_BLOCKS)
689 map.m_len = DIO_MAX_BLOCKS;
690 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
691 handle = ext4_journal_start(inode, dio_credits);
692 if (IS_ERR(handle)) {
693 ret = PTR_ERR(handle);
694 return ret;
696 started = 1;
699 ret = ext4_map_blocks(handle, inode, &map, flags);
700 if (ret > 0) {
701 map_bh(bh, inode->i_sb, map.m_pblk);
702 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
703 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
704 ret = 0;
706 if (started)
707 ext4_journal_stop(handle);
708 return ret;
711 int ext4_get_block(struct inode *inode, sector_t iblock,
712 struct buffer_head *bh, int create)
714 return _ext4_get_block(inode, iblock, bh,
715 create ? EXT4_GET_BLOCKS_CREATE : 0);
719 * `handle' can be NULL if create is zero
721 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
722 ext4_lblk_t block, int create, int *errp)
724 struct ext4_map_blocks map;
725 struct buffer_head *bh;
726 int fatal = 0, err;
728 J_ASSERT(handle != NULL || create == 0);
730 map.m_lblk = block;
731 map.m_len = 1;
732 err = ext4_map_blocks(handle, inode, &map,
733 create ? EXT4_GET_BLOCKS_CREATE : 0);
735 if (err < 0)
736 *errp = err;
737 if (err <= 0)
738 return NULL;
739 *errp = 0;
741 bh = sb_getblk(inode->i_sb, map.m_pblk);
742 if (!bh) {
743 *errp = -EIO;
744 return NULL;
746 if (map.m_flags & EXT4_MAP_NEW) {
747 J_ASSERT(create != 0);
748 J_ASSERT(handle != NULL);
751 * Now that we do not always journal data, we should
752 * keep in mind whether this should always journal the
753 * new buffer as metadata. For now, regular file
754 * writes use ext4_get_block instead, so it's not a
755 * problem.
757 lock_buffer(bh);
758 BUFFER_TRACE(bh, "call get_create_access");
759 fatal = ext4_journal_get_create_access(handle, bh);
760 if (!fatal && !buffer_uptodate(bh)) {
761 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
762 set_buffer_uptodate(bh);
764 unlock_buffer(bh);
765 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
766 err = ext4_handle_dirty_metadata(handle, inode, bh);
767 if (!fatal)
768 fatal = err;
769 } else {
770 BUFFER_TRACE(bh, "not a new buffer");
772 if (fatal) {
773 *errp = fatal;
774 brelse(bh);
775 bh = NULL;
777 return bh;
780 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
781 ext4_lblk_t block, int create, int *err)
783 struct buffer_head *bh;
785 bh = ext4_getblk(handle, inode, block, create, err);
786 if (!bh)
787 return bh;
788 if (buffer_uptodate(bh))
789 return bh;
790 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
791 wait_on_buffer(bh);
792 if (buffer_uptodate(bh))
793 return bh;
794 put_bh(bh);
795 *err = -EIO;
796 return NULL;
799 static int walk_page_buffers(handle_t *handle,
800 struct buffer_head *head,
801 unsigned from,
802 unsigned to,
803 int *partial,
804 int (*fn)(handle_t *handle,
805 struct buffer_head *bh))
807 struct buffer_head *bh;
808 unsigned block_start, block_end;
809 unsigned blocksize = head->b_size;
810 int err, ret = 0;
811 struct buffer_head *next;
813 for (bh = head, block_start = 0;
814 ret == 0 && (bh != head || !block_start);
815 block_start = block_end, bh = next) {
816 next = bh->b_this_page;
817 block_end = block_start + blocksize;
818 if (block_end <= from || block_start >= to) {
819 if (partial && !buffer_uptodate(bh))
820 *partial = 1;
821 continue;
823 err = (*fn)(handle, bh);
824 if (!ret)
825 ret = err;
827 return ret;
831 * To preserve ordering, it is essential that the hole instantiation and
832 * the data write be encapsulated in a single transaction. We cannot
833 * close off a transaction and start a new one between the ext4_get_block()
834 * and the commit_write(). So doing the jbd2_journal_start at the start of
835 * prepare_write() is the right place.
837 * Also, this function can nest inside ext4_writepage() ->
838 * block_write_full_page(). In that case, we *know* that ext4_writepage()
839 * has generated enough buffer credits to do the whole page. So we won't
840 * block on the journal in that case, which is good, because the caller may
841 * be PF_MEMALLOC.
843 * By accident, ext4 can be reentered when a transaction is open via
844 * quota file writes. If we were to commit the transaction while thus
845 * reentered, there can be a deadlock - we would be holding a quota
846 * lock, and the commit would never complete if another thread had a
847 * transaction open and was blocking on the quota lock - a ranking
848 * violation.
850 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
851 * will _not_ run commit under these circumstances because handle->h_ref
852 * is elevated. We'll still have enough credits for the tiny quotafile
853 * write.
855 static int do_journal_get_write_access(handle_t *handle,
856 struct buffer_head *bh)
858 int dirty = buffer_dirty(bh);
859 int ret;
861 if (!buffer_mapped(bh) || buffer_freed(bh))
862 return 0;
864 * __block_write_begin() could have dirtied some buffers. Clean
865 * the dirty bit as jbd2_journal_get_write_access() could complain
866 * otherwise about fs integrity issues. Setting of the dirty bit
867 * by __block_write_begin() isn't a real problem here as we clear
868 * the bit before releasing a page lock and thus writeback cannot
869 * ever write the buffer.
871 if (dirty)
872 clear_buffer_dirty(bh);
873 ret = ext4_journal_get_write_access(handle, bh);
874 if (!ret && dirty)
875 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
876 return ret;
879 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
880 struct buffer_head *bh_result, int create);
881 static int ext4_write_begin(struct file *file, struct address_space *mapping,
882 loff_t pos, unsigned len, unsigned flags,
883 struct page **pagep, void **fsdata)
885 struct inode *inode = mapping->host;
886 int ret, needed_blocks;
887 handle_t *handle;
888 int retries = 0;
889 struct page *page;
890 pgoff_t index;
891 unsigned from, to;
893 trace_ext4_write_begin(inode, pos, len, flags);
895 * Reserve one block more for addition to orphan list in case
896 * we allocate blocks but write fails for some reason
898 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
899 index = pos >> PAGE_CACHE_SHIFT;
900 from = pos & (PAGE_CACHE_SIZE - 1);
901 to = from + len;
903 retry:
904 handle = ext4_journal_start(inode, needed_blocks);
905 if (IS_ERR(handle)) {
906 ret = PTR_ERR(handle);
907 goto out;
910 /* We cannot recurse into the filesystem as the transaction is already
911 * started */
912 flags |= AOP_FLAG_NOFS;
914 page = grab_cache_page_write_begin(mapping, index, flags);
915 if (!page) {
916 ext4_journal_stop(handle);
917 ret = -ENOMEM;
918 goto out;
920 *pagep = page;
922 if (ext4_should_dioread_nolock(inode))
923 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
924 else
925 ret = __block_write_begin(page, pos, len, ext4_get_block);
927 if (!ret && ext4_should_journal_data(inode)) {
928 ret = walk_page_buffers(handle, page_buffers(page),
929 from, to, NULL, do_journal_get_write_access);
932 if (ret) {
933 unlock_page(page);
934 page_cache_release(page);
936 * __block_write_begin may have instantiated a few blocks
937 * outside i_size. Trim these off again. Don't need
938 * i_size_read because we hold i_mutex.
940 * Add inode to orphan list in case we crash before
941 * truncate finishes
943 if (pos + len > inode->i_size && ext4_can_truncate(inode))
944 ext4_orphan_add(handle, inode);
946 ext4_journal_stop(handle);
947 if (pos + len > inode->i_size) {
948 ext4_truncate_failed_write(inode);
950 * If truncate failed early the inode might
951 * still be on the orphan list; we need to
952 * make sure the inode is removed from the
953 * orphan list in that case.
955 if (inode->i_nlink)
956 ext4_orphan_del(NULL, inode);
960 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
961 goto retry;
962 out:
963 return ret;
966 /* For write_end() in data=journal mode */
967 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
969 if (!buffer_mapped(bh) || buffer_freed(bh))
970 return 0;
971 set_buffer_uptodate(bh);
972 return ext4_handle_dirty_metadata(handle, NULL, bh);
975 static int ext4_generic_write_end(struct file *file,
976 struct address_space *mapping,
977 loff_t pos, unsigned len, unsigned copied,
978 struct page *page, void *fsdata)
980 int i_size_changed = 0;
981 struct inode *inode = mapping->host;
982 handle_t *handle = ext4_journal_current_handle();
984 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
987 * No need to use i_size_read() here, the i_size
988 * cannot change under us because we hold i_mutex.
990 * But it's important to update i_size while still holding page lock:
991 * page writeout could otherwise come in and zero beyond i_size.
993 if (pos + copied > inode->i_size) {
994 i_size_write(inode, pos + copied);
995 i_size_changed = 1;
998 if (pos + copied > EXT4_I(inode)->i_disksize) {
999 /* We need to mark inode dirty even if
1000 * new_i_size is less that inode->i_size
1001 * bu greater than i_disksize.(hint delalloc)
1003 ext4_update_i_disksize(inode, (pos + copied));
1004 i_size_changed = 1;
1006 unlock_page(page);
1007 page_cache_release(page);
1010 * Don't mark the inode dirty under page lock. First, it unnecessarily
1011 * makes the holding time of page lock longer. Second, it forces lock
1012 * ordering of page lock and transaction start for journaling
1013 * filesystems.
1015 if (i_size_changed)
1016 ext4_mark_inode_dirty(handle, inode);
1018 return copied;
1022 * We need to pick up the new inode size which generic_commit_write gave us
1023 * `file' can be NULL - eg, when called from page_symlink().
1025 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1026 * buffers are managed internally.
1028 static int ext4_ordered_write_end(struct file *file,
1029 struct address_space *mapping,
1030 loff_t pos, unsigned len, unsigned copied,
1031 struct page *page, void *fsdata)
1033 handle_t *handle = ext4_journal_current_handle();
1034 struct inode *inode = mapping->host;
1035 int ret = 0, ret2;
1037 trace_ext4_ordered_write_end(inode, pos, len, copied);
1038 ret = ext4_jbd2_file_inode(handle, inode);
1040 if (ret == 0) {
1041 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1042 page, fsdata);
1043 copied = ret2;
1044 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1045 /* if we have allocated more blocks and copied
1046 * less. We will have blocks allocated outside
1047 * inode->i_size. So truncate them
1049 ext4_orphan_add(handle, inode);
1050 if (ret2 < 0)
1051 ret = ret2;
1052 } else {
1053 unlock_page(page);
1054 page_cache_release(page);
1057 ret2 = ext4_journal_stop(handle);
1058 if (!ret)
1059 ret = ret2;
1061 if (pos + len > inode->i_size) {
1062 ext4_truncate_failed_write(inode);
1064 * If truncate failed early the inode might still be
1065 * on the orphan list; we need to make sure the inode
1066 * is removed from the orphan list in that case.
1068 if (inode->i_nlink)
1069 ext4_orphan_del(NULL, inode);
1073 return ret ? ret : copied;
1076 static int ext4_writeback_write_end(struct file *file,
1077 struct address_space *mapping,
1078 loff_t pos, unsigned len, unsigned copied,
1079 struct page *page, void *fsdata)
1081 handle_t *handle = ext4_journal_current_handle();
1082 struct inode *inode = mapping->host;
1083 int ret = 0, ret2;
1085 trace_ext4_writeback_write_end(inode, pos, len, copied);
1086 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1087 page, fsdata);
1088 copied = ret2;
1089 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1090 /* if we have allocated more blocks and copied
1091 * less. We will have blocks allocated outside
1092 * inode->i_size. So truncate them
1094 ext4_orphan_add(handle, inode);
1096 if (ret2 < 0)
1097 ret = ret2;
1099 ret2 = ext4_journal_stop(handle);
1100 if (!ret)
1101 ret = ret2;
1103 if (pos + len > inode->i_size) {
1104 ext4_truncate_failed_write(inode);
1106 * If truncate failed early the inode might still be
1107 * on the orphan list; we need to make sure the inode
1108 * is removed from the orphan list in that case.
1110 if (inode->i_nlink)
1111 ext4_orphan_del(NULL, inode);
1114 return ret ? ret : copied;
1117 static int ext4_journalled_write_end(struct file *file,
1118 struct address_space *mapping,
1119 loff_t pos, unsigned len, unsigned copied,
1120 struct page *page, void *fsdata)
1122 handle_t *handle = ext4_journal_current_handle();
1123 struct inode *inode = mapping->host;
1124 int ret = 0, ret2;
1125 int partial = 0;
1126 unsigned from, to;
1127 loff_t new_i_size;
1129 trace_ext4_journalled_write_end(inode, pos, len, copied);
1130 from = pos & (PAGE_CACHE_SIZE - 1);
1131 to = from + len;
1133 BUG_ON(!ext4_handle_valid(handle));
1135 if (copied < len) {
1136 if (!PageUptodate(page))
1137 copied = 0;
1138 page_zero_new_buffers(page, from+copied, to);
1141 ret = walk_page_buffers(handle, page_buffers(page), from,
1142 to, &partial, write_end_fn);
1143 if (!partial)
1144 SetPageUptodate(page);
1145 new_i_size = pos + copied;
1146 if (new_i_size > inode->i_size)
1147 i_size_write(inode, pos+copied);
1148 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1149 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1150 if (new_i_size > EXT4_I(inode)->i_disksize) {
1151 ext4_update_i_disksize(inode, new_i_size);
1152 ret2 = ext4_mark_inode_dirty(handle, inode);
1153 if (!ret)
1154 ret = ret2;
1157 unlock_page(page);
1158 page_cache_release(page);
1159 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1160 /* if we have allocated more blocks and copied
1161 * less. We will have blocks allocated outside
1162 * inode->i_size. So truncate them
1164 ext4_orphan_add(handle, inode);
1166 ret2 = ext4_journal_stop(handle);
1167 if (!ret)
1168 ret = ret2;
1169 if (pos + len > inode->i_size) {
1170 ext4_truncate_failed_write(inode);
1172 * If truncate failed early the inode might still be
1173 * on the orphan list; we need to make sure the inode
1174 * is removed from the orphan list in that case.
1176 if (inode->i_nlink)
1177 ext4_orphan_del(NULL, inode);
1180 return ret ? ret : copied;
1184 * Reserve a single cluster located at lblock
1186 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1188 int retries = 0;
1189 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1190 struct ext4_inode_info *ei = EXT4_I(inode);
1191 unsigned int md_needed;
1192 int ret;
1193 ext4_lblk_t save_last_lblock;
1194 int save_len;
1197 * We will charge metadata quota at writeout time; this saves
1198 * us from metadata over-estimation, though we may go over by
1199 * a small amount in the end. Here we just reserve for data.
1201 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1202 if (ret)
1203 return ret;
1206 * recalculate the amount of metadata blocks to reserve
1207 * in order to allocate nrblocks
1208 * worse case is one extent per block
1210 repeat:
1211 spin_lock(&ei->i_block_reservation_lock);
1213 * ext4_calc_metadata_amount() has side effects, which we have
1214 * to be prepared undo if we fail to claim space.
1216 save_len = ei->i_da_metadata_calc_len;
1217 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1218 md_needed = EXT4_NUM_B2C(sbi,
1219 ext4_calc_metadata_amount(inode, lblock));
1220 trace_ext4_da_reserve_space(inode, md_needed);
1223 * We do still charge estimated metadata to the sb though;
1224 * we cannot afford to run out of free blocks.
1226 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1227 ei->i_da_metadata_calc_len = save_len;
1228 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1229 spin_unlock(&ei->i_block_reservation_lock);
1230 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1231 yield();
1232 goto repeat;
1234 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1235 return -ENOSPC;
1237 ei->i_reserved_data_blocks++;
1238 ei->i_reserved_meta_blocks += md_needed;
1239 spin_unlock(&ei->i_block_reservation_lock);
1241 return 0; /* success */
1244 static void ext4_da_release_space(struct inode *inode, int to_free)
1246 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1247 struct ext4_inode_info *ei = EXT4_I(inode);
1249 if (!to_free)
1250 return; /* Nothing to release, exit */
1252 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1254 trace_ext4_da_release_space(inode, to_free);
1255 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1257 * if there aren't enough reserved blocks, then the
1258 * counter is messed up somewhere. Since this
1259 * function is called from invalidate page, it's
1260 * harmless to return without any action.
1262 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1263 "ino %lu, to_free %d with only %d reserved "
1264 "data blocks", inode->i_ino, to_free,
1265 ei->i_reserved_data_blocks);
1266 WARN_ON(1);
1267 to_free = ei->i_reserved_data_blocks;
1269 ei->i_reserved_data_blocks -= to_free;
1271 if (ei->i_reserved_data_blocks == 0) {
1273 * We can release all of the reserved metadata blocks
1274 * only when we have written all of the delayed
1275 * allocation blocks.
1276 * Note that in case of bigalloc, i_reserved_meta_blocks,
1277 * i_reserved_data_blocks, etc. refer to number of clusters.
1279 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1280 ei->i_reserved_meta_blocks);
1281 ei->i_reserved_meta_blocks = 0;
1282 ei->i_da_metadata_calc_len = 0;
1285 /* update fs dirty data blocks counter */
1286 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1288 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1290 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1293 static void ext4_da_page_release_reservation(struct page *page,
1294 unsigned long offset)
1296 int to_release = 0;
1297 struct buffer_head *head, *bh;
1298 unsigned int curr_off = 0;
1299 struct inode *inode = page->mapping->host;
1300 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1301 int num_clusters;
1303 head = page_buffers(page);
1304 bh = head;
1305 do {
1306 unsigned int next_off = curr_off + bh->b_size;
1308 if ((offset <= curr_off) && (buffer_delay(bh))) {
1309 to_release++;
1310 clear_buffer_delay(bh);
1311 clear_buffer_da_mapped(bh);
1313 curr_off = next_off;
1314 } while ((bh = bh->b_this_page) != head);
1316 /* If we have released all the blocks belonging to a cluster, then we
1317 * need to release the reserved space for that cluster. */
1318 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1319 while (num_clusters > 0) {
1320 ext4_fsblk_t lblk;
1321 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1322 ((num_clusters - 1) << sbi->s_cluster_bits);
1323 if (sbi->s_cluster_ratio == 1 ||
1324 !ext4_find_delalloc_cluster(inode, lblk, 1))
1325 ext4_da_release_space(inode, 1);
1327 num_clusters--;
1332 * Delayed allocation stuff
1336 * mpage_da_submit_io - walks through extent of pages and try to write
1337 * them with writepage() call back
1339 * @mpd->inode: inode
1340 * @mpd->first_page: first page of the extent
1341 * @mpd->next_page: page after the last page of the extent
1343 * By the time mpage_da_submit_io() is called we expect all blocks
1344 * to be allocated. this may be wrong if allocation failed.
1346 * As pages are already locked by write_cache_pages(), we can't use it
1348 static int mpage_da_submit_io(struct mpage_da_data *mpd,
1349 struct ext4_map_blocks *map)
1351 struct pagevec pvec;
1352 unsigned long index, end;
1353 int ret = 0, err, nr_pages, i;
1354 struct inode *inode = mpd->inode;
1355 struct address_space *mapping = inode->i_mapping;
1356 loff_t size = i_size_read(inode);
1357 unsigned int len, block_start;
1358 struct buffer_head *bh, *page_bufs = NULL;
1359 int journal_data = ext4_should_journal_data(inode);
1360 sector_t pblock = 0, cur_logical = 0;
1361 struct ext4_io_submit io_submit;
1363 BUG_ON(mpd->next_page <= mpd->first_page);
1364 memset(&io_submit, 0, sizeof(io_submit));
1366 * We need to start from the first_page to the next_page - 1
1367 * to make sure we also write the mapped dirty buffer_heads.
1368 * If we look at mpd->b_blocknr we would only be looking
1369 * at the currently mapped buffer_heads.
1371 index = mpd->first_page;
1372 end = mpd->next_page - 1;
1374 pagevec_init(&pvec, 0);
1375 while (index <= end) {
1376 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1377 if (nr_pages == 0)
1378 break;
1379 for (i = 0; i < nr_pages; i++) {
1380 int commit_write = 0, skip_page = 0;
1381 struct page *page = pvec.pages[i];
1383 index = page->index;
1384 if (index > end)
1385 break;
1387 if (index == size >> PAGE_CACHE_SHIFT)
1388 len = size & ~PAGE_CACHE_MASK;
1389 else
1390 len = PAGE_CACHE_SIZE;
1391 if (map) {
1392 cur_logical = index << (PAGE_CACHE_SHIFT -
1393 inode->i_blkbits);
1394 pblock = map->m_pblk + (cur_logical -
1395 map->m_lblk);
1397 index++;
1399 BUG_ON(!PageLocked(page));
1400 BUG_ON(PageWriteback(page));
1403 * If the page does not have buffers (for
1404 * whatever reason), try to create them using
1405 * __block_write_begin. If this fails,
1406 * skip the page and move on.
1408 if (!page_has_buffers(page)) {
1409 if (__block_write_begin(page, 0, len,
1410 noalloc_get_block_write)) {
1411 skip_page:
1412 unlock_page(page);
1413 continue;
1415 commit_write = 1;
1418 bh = page_bufs = page_buffers(page);
1419 block_start = 0;
1420 do {
1421 if (!bh)
1422 goto skip_page;
1423 if (map && (cur_logical >= map->m_lblk) &&
1424 (cur_logical <= (map->m_lblk +
1425 (map->m_len - 1)))) {
1426 if (buffer_delay(bh)) {
1427 clear_buffer_delay(bh);
1428 bh->b_blocknr = pblock;
1430 if (buffer_da_mapped(bh))
1431 clear_buffer_da_mapped(bh);
1432 if (buffer_unwritten(bh) ||
1433 buffer_mapped(bh))
1434 BUG_ON(bh->b_blocknr != pblock);
1435 if (map->m_flags & EXT4_MAP_UNINIT)
1436 set_buffer_uninit(bh);
1437 clear_buffer_unwritten(bh);
1441 * skip page if block allocation undone and
1442 * block is dirty
1444 if (ext4_bh_delay_or_unwritten(NULL, bh))
1445 skip_page = 1;
1446 bh = bh->b_this_page;
1447 block_start += bh->b_size;
1448 cur_logical++;
1449 pblock++;
1450 } while (bh != page_bufs);
1452 if (skip_page)
1453 goto skip_page;
1455 if (commit_write)
1456 /* mark the buffer_heads as dirty & uptodate */
1457 block_commit_write(page, 0, len);
1459 clear_page_dirty_for_io(page);
1461 * Delalloc doesn't support data journalling,
1462 * but eventually maybe we'll lift this
1463 * restriction.
1465 if (unlikely(journal_data && PageChecked(page)))
1466 err = __ext4_journalled_writepage(page, len);
1467 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1468 err = ext4_bio_write_page(&io_submit, page,
1469 len, mpd->wbc);
1470 else if (buffer_uninit(page_bufs)) {
1471 ext4_set_bh_endio(page_bufs, inode);
1472 err = block_write_full_page_endio(page,
1473 noalloc_get_block_write,
1474 mpd->wbc, ext4_end_io_buffer_write);
1475 } else
1476 err = block_write_full_page(page,
1477 noalloc_get_block_write, mpd->wbc);
1479 if (!err)
1480 mpd->pages_written++;
1482 * In error case, we have to continue because
1483 * remaining pages are still locked
1485 if (ret == 0)
1486 ret = err;
1488 pagevec_release(&pvec);
1490 ext4_io_submit(&io_submit);
1491 return ret;
1494 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1496 int nr_pages, i;
1497 pgoff_t index, end;
1498 struct pagevec pvec;
1499 struct inode *inode = mpd->inode;
1500 struct address_space *mapping = inode->i_mapping;
1502 index = mpd->first_page;
1503 end = mpd->next_page - 1;
1504 while (index <= end) {
1505 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1506 if (nr_pages == 0)
1507 break;
1508 for (i = 0; i < nr_pages; i++) {
1509 struct page *page = pvec.pages[i];
1510 if (page->index > end)
1511 break;
1512 BUG_ON(!PageLocked(page));
1513 BUG_ON(PageWriteback(page));
1514 block_invalidatepage(page, 0);
1515 ClearPageUptodate(page);
1516 unlock_page(page);
1518 index = pvec.pages[nr_pages - 1]->index + 1;
1519 pagevec_release(&pvec);
1521 return;
1524 static void ext4_print_free_blocks(struct inode *inode)
1526 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1527 struct super_block *sb = inode->i_sb;
1529 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1530 EXT4_C2B(EXT4_SB(inode->i_sb),
1531 ext4_count_free_clusters(inode->i_sb)));
1532 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1533 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1534 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1535 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1536 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1537 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1538 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1539 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1540 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1541 EXT4_I(inode)->i_reserved_data_blocks);
1542 ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1543 EXT4_I(inode)->i_reserved_meta_blocks);
1544 return;
1548 * mpage_da_map_and_submit - go through given space, map them
1549 * if necessary, and then submit them for I/O
1551 * @mpd - bh describing space
1553 * The function skips space we know is already mapped to disk blocks.
1556 static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1558 int err, blks, get_blocks_flags;
1559 struct ext4_map_blocks map, *mapp = NULL;
1560 sector_t next = mpd->b_blocknr;
1561 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1562 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1563 handle_t *handle = NULL;
1566 * If the blocks are mapped already, or we couldn't accumulate
1567 * any blocks, then proceed immediately to the submission stage.
1569 if ((mpd->b_size == 0) ||
1570 ((mpd->b_state & (1 << BH_Mapped)) &&
1571 !(mpd->b_state & (1 << BH_Delay)) &&
1572 !(mpd->b_state & (1 << BH_Unwritten))))
1573 goto submit_io;
1575 handle = ext4_journal_current_handle();
1576 BUG_ON(!handle);
1579 * Call ext4_map_blocks() to allocate any delayed allocation
1580 * blocks, or to convert an uninitialized extent to be
1581 * initialized (in the case where we have written into
1582 * one or more preallocated blocks).
1584 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1585 * indicate that we are on the delayed allocation path. This
1586 * affects functions in many different parts of the allocation
1587 * call path. This flag exists primarily because we don't
1588 * want to change *many* call functions, so ext4_map_blocks()
1589 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1590 * inode's allocation semaphore is taken.
1592 * If the blocks in questions were delalloc blocks, set
1593 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1594 * variables are updated after the blocks have been allocated.
1596 map.m_lblk = next;
1597 map.m_len = max_blocks;
1598 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1599 if (ext4_should_dioread_nolock(mpd->inode))
1600 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1601 if (mpd->b_state & (1 << BH_Delay))
1602 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1604 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1605 if (blks < 0) {
1606 struct super_block *sb = mpd->inode->i_sb;
1608 err = blks;
1610 * If get block returns EAGAIN or ENOSPC and there
1611 * appears to be free blocks we will just let
1612 * mpage_da_submit_io() unlock all of the pages.
1614 if (err == -EAGAIN)
1615 goto submit_io;
1617 if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1618 mpd->retval = err;
1619 goto submit_io;
1623 * get block failure will cause us to loop in
1624 * writepages, because a_ops->writepage won't be able
1625 * to make progress. The page will be redirtied by
1626 * writepage and writepages will again try to write
1627 * the same.
1629 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1630 ext4_msg(sb, KERN_CRIT,
1631 "delayed block allocation failed for inode %lu "
1632 "at logical offset %llu with max blocks %zd "
1633 "with error %d", mpd->inode->i_ino,
1634 (unsigned long long) next,
1635 mpd->b_size >> mpd->inode->i_blkbits, err);
1636 ext4_msg(sb, KERN_CRIT,
1637 "This should not happen!! Data will be lost\n");
1638 if (err == -ENOSPC)
1639 ext4_print_free_blocks(mpd->inode);
1641 /* invalidate all the pages */
1642 ext4_da_block_invalidatepages(mpd);
1644 /* Mark this page range as having been completed */
1645 mpd->io_done = 1;
1646 return;
1648 BUG_ON(blks == 0);
1650 mapp = &map;
1651 if (map.m_flags & EXT4_MAP_NEW) {
1652 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1653 int i;
1655 for (i = 0; i < map.m_len; i++)
1656 unmap_underlying_metadata(bdev, map.m_pblk + i);
1658 if (ext4_should_order_data(mpd->inode)) {
1659 err = ext4_jbd2_file_inode(handle, mpd->inode);
1660 if (err) {
1661 /* Only if the journal is aborted */
1662 mpd->retval = err;
1663 goto submit_io;
1669 * Update on-disk size along with block allocation.
1671 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1672 if (disksize > i_size_read(mpd->inode))
1673 disksize = i_size_read(mpd->inode);
1674 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1675 ext4_update_i_disksize(mpd->inode, disksize);
1676 err = ext4_mark_inode_dirty(handle, mpd->inode);
1677 if (err)
1678 ext4_error(mpd->inode->i_sb,
1679 "Failed to mark inode %lu dirty",
1680 mpd->inode->i_ino);
1683 submit_io:
1684 mpage_da_submit_io(mpd, mapp);
1685 mpd->io_done = 1;
1688 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1689 (1 << BH_Delay) | (1 << BH_Unwritten))
1692 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1694 * @mpd->lbh - extent of blocks
1695 * @logical - logical number of the block in the file
1696 * @bh - bh of the block (used to access block's state)
1698 * the function is used to collect contig. blocks in same state
1700 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1701 sector_t logical, size_t b_size,
1702 unsigned long b_state)
1704 sector_t next;
1705 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
1708 * XXX Don't go larger than mballoc is willing to allocate
1709 * This is a stopgap solution. We eventually need to fold
1710 * mpage_da_submit_io() into this function and then call
1711 * ext4_map_blocks() multiple times in a loop
1713 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1714 goto flush_it;
1716 /* check if thereserved journal credits might overflow */
1717 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1718 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1720 * With non-extent format we are limited by the journal
1721 * credit available. Total credit needed to insert
1722 * nrblocks contiguous blocks is dependent on the
1723 * nrblocks. So limit nrblocks.
1725 goto flush_it;
1726 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1727 EXT4_MAX_TRANS_DATA) {
1729 * Adding the new buffer_head would make it cross the
1730 * allowed limit for which we have journal credit
1731 * reserved. So limit the new bh->b_size
1733 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1734 mpd->inode->i_blkbits;
1735 /* we will do mpage_da_submit_io in the next loop */
1739 * First block in the extent
1741 if (mpd->b_size == 0) {
1742 mpd->b_blocknr = logical;
1743 mpd->b_size = b_size;
1744 mpd->b_state = b_state & BH_FLAGS;
1745 return;
1748 next = mpd->b_blocknr + nrblocks;
1750 * Can we merge the block to our big extent?
1752 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1753 mpd->b_size += b_size;
1754 return;
1757 flush_it:
1759 * We couldn't merge the block to our extent, so we
1760 * need to flush current extent and start new one
1762 mpage_da_map_and_submit(mpd);
1763 return;
1766 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1768 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1772 * This function is grabs code from the very beginning of
1773 * ext4_map_blocks, but assumes that the caller is from delayed write
1774 * time. This function looks up the requested blocks and sets the
1775 * buffer delay bit under the protection of i_data_sem.
1777 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1778 struct ext4_map_blocks *map,
1779 struct buffer_head *bh)
1781 int retval;
1782 sector_t invalid_block = ~((sector_t) 0xffff);
1784 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1785 invalid_block = ~0;
1787 map->m_flags = 0;
1788 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1789 "logical block %lu\n", inode->i_ino, map->m_len,
1790 (unsigned long) map->m_lblk);
1792 * Try to see if we can get the block without requesting a new
1793 * file system block.
1795 down_read((&EXT4_I(inode)->i_data_sem));
1796 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1797 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1798 else
1799 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1801 if (retval == 0) {
1803 * XXX: __block_prepare_write() unmaps passed block,
1804 * is it OK?
1806 /* If the block was allocated from previously allocated cluster,
1807 * then we dont need to reserve it again. */
1808 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1809 retval = ext4_da_reserve_space(inode, iblock);
1810 if (retval)
1811 /* not enough space to reserve */
1812 goto out_unlock;
1815 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1816 * and it should not appear on the bh->b_state.
1818 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1820 map_bh(bh, inode->i_sb, invalid_block);
1821 set_buffer_new(bh);
1822 set_buffer_delay(bh);
1825 out_unlock:
1826 up_read((&EXT4_I(inode)->i_data_sem));
1828 return retval;
1832 * This is a special get_blocks_t callback which is used by
1833 * ext4_da_write_begin(). It will either return mapped block or
1834 * reserve space for a single block.
1836 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1837 * We also have b_blocknr = -1 and b_bdev initialized properly
1839 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1840 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1841 * initialized properly.
1843 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1844 struct buffer_head *bh, int create)
1846 struct ext4_map_blocks map;
1847 int ret = 0;
1849 BUG_ON(create == 0);
1850 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1852 map.m_lblk = iblock;
1853 map.m_len = 1;
1856 * first, we need to know whether the block is allocated already
1857 * preallocated blocks are unmapped but should treated
1858 * the same as allocated blocks.
1860 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1861 if (ret <= 0)
1862 return ret;
1864 map_bh(bh, inode->i_sb, map.m_pblk);
1865 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1867 if (buffer_unwritten(bh)) {
1868 /* A delayed write to unwritten bh should be marked
1869 * new and mapped. Mapped ensures that we don't do
1870 * get_block multiple times when we write to the same
1871 * offset and new ensures that we do proper zero out
1872 * for partial write.
1874 set_buffer_new(bh);
1875 set_buffer_mapped(bh);
1877 return 0;
1881 * This function is used as a standard get_block_t calback function
1882 * when there is no desire to allocate any blocks. It is used as a
1883 * callback function for block_write_begin() and block_write_full_page().
1884 * These functions should only try to map a single block at a time.
1886 * Since this function doesn't do block allocations even if the caller
1887 * requests it by passing in create=1, it is critically important that
1888 * any caller checks to make sure that any buffer heads are returned
1889 * by this function are either all already mapped or marked for
1890 * delayed allocation before calling block_write_full_page(). Otherwise,
1891 * b_blocknr could be left unitialized, and the page write functions will
1892 * be taken by surprise.
1894 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1895 struct buffer_head *bh_result, int create)
1897 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1898 return _ext4_get_block(inode, iblock, bh_result, 0);
1901 static int bget_one(handle_t *handle, struct buffer_head *bh)
1903 get_bh(bh);
1904 return 0;
1907 static int bput_one(handle_t *handle, struct buffer_head *bh)
1909 put_bh(bh);
1910 return 0;
1913 static int __ext4_journalled_writepage(struct page *page,
1914 unsigned int len)
1916 struct address_space *mapping = page->mapping;
1917 struct inode *inode = mapping->host;
1918 struct buffer_head *page_bufs;
1919 handle_t *handle = NULL;
1920 int ret = 0;
1921 int err;
1923 ClearPageChecked(page);
1924 page_bufs = page_buffers(page);
1925 BUG_ON(!page_bufs);
1926 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
1927 /* As soon as we unlock the page, it can go away, but we have
1928 * references to buffers so we are safe */
1929 unlock_page(page);
1931 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1932 if (IS_ERR(handle)) {
1933 ret = PTR_ERR(handle);
1934 goto out;
1937 BUG_ON(!ext4_handle_valid(handle));
1939 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1940 do_journal_get_write_access);
1942 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1943 write_end_fn);
1944 if (ret == 0)
1945 ret = err;
1946 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1947 err = ext4_journal_stop(handle);
1948 if (!ret)
1949 ret = err;
1951 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
1952 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1953 out:
1954 return ret;
1957 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1958 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1961 * Note that we don't need to start a transaction unless we're journaling data
1962 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1963 * need to file the inode to the transaction's list in ordered mode because if
1964 * we are writing back data added by write(), the inode is already there and if
1965 * we are writing back data modified via mmap(), no one guarantees in which
1966 * transaction the data will hit the disk. In case we are journaling data, we
1967 * cannot start transaction directly because transaction start ranks above page
1968 * lock so we have to do some magic.
1970 * This function can get called via...
1971 * - ext4_da_writepages after taking page lock (have journal handle)
1972 * - journal_submit_inode_data_buffers (no journal handle)
1973 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1974 * - grab_page_cache when doing write_begin (have journal handle)
1976 * We don't do any block allocation in this function. If we have page with
1977 * multiple blocks we need to write those buffer_heads that are mapped. This
1978 * is important for mmaped based write. So if we do with blocksize 1K
1979 * truncate(f, 1024);
1980 * a = mmap(f, 0, 4096);
1981 * a[0] = 'a';
1982 * truncate(f, 4096);
1983 * we have in the page first buffer_head mapped via page_mkwrite call back
1984 * but other buffer_heads would be unmapped but dirty (dirty done via the
1985 * do_wp_page). So writepage should write the first block. If we modify
1986 * the mmap area beyond 1024 we will again get a page_fault and the
1987 * page_mkwrite callback will do the block allocation and mark the
1988 * buffer_heads mapped.
1990 * We redirty the page if we have any buffer_heads that is either delay or
1991 * unwritten in the page.
1993 * We can get recursively called as show below.
1995 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1996 * ext4_writepage()
1998 * But since we don't do any block allocation we should not deadlock.
1999 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2001 static int ext4_writepage(struct page *page,
2002 struct writeback_control *wbc)
2004 int ret = 0, commit_write = 0;
2005 loff_t size;
2006 unsigned int len;
2007 struct buffer_head *page_bufs = NULL;
2008 struct inode *inode = page->mapping->host;
2010 trace_ext4_writepage(page);
2011 size = i_size_read(inode);
2012 if (page->index == size >> PAGE_CACHE_SHIFT)
2013 len = size & ~PAGE_CACHE_MASK;
2014 else
2015 len = PAGE_CACHE_SIZE;
2018 * If the page does not have buffers (for whatever reason),
2019 * try to create them using __block_write_begin. If this
2020 * fails, redirty the page and move on.
2022 if (!page_has_buffers(page)) {
2023 if (__block_write_begin(page, 0, len,
2024 noalloc_get_block_write)) {
2025 redirty_page:
2026 redirty_page_for_writepage(wbc, page);
2027 unlock_page(page);
2028 return 0;
2030 commit_write = 1;
2032 page_bufs = page_buffers(page);
2033 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2034 ext4_bh_delay_or_unwritten)) {
2036 * We don't want to do block allocation, so redirty
2037 * the page and return. We may reach here when we do
2038 * a journal commit via journal_submit_inode_data_buffers.
2039 * We can also reach here via shrink_page_list but it
2040 * should never be for direct reclaim so warn if that
2041 * happens
2043 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2044 PF_MEMALLOC);
2045 goto redirty_page;
2047 if (commit_write)
2048 /* now mark the buffer_heads as dirty and uptodate */
2049 block_commit_write(page, 0, len);
2051 if (PageChecked(page) && ext4_should_journal_data(inode))
2053 * It's mmapped pagecache. Add buffers and journal it. There
2054 * doesn't seem much point in redirtying the page here.
2056 return __ext4_journalled_writepage(page, len);
2058 if (buffer_uninit(page_bufs)) {
2059 ext4_set_bh_endio(page_bufs, inode);
2060 ret = block_write_full_page_endio(page, noalloc_get_block_write,
2061 wbc, ext4_end_io_buffer_write);
2062 } else
2063 ret = block_write_full_page(page, noalloc_get_block_write,
2064 wbc);
2066 return ret;
2070 * This is called via ext4_da_writepages() to
2071 * calculate the total number of credits to reserve to fit
2072 * a single extent allocation into a single transaction,
2073 * ext4_da_writpeages() will loop calling this before
2074 * the block allocation.
2077 static int ext4_da_writepages_trans_blocks(struct inode *inode)
2079 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2082 * With non-extent format the journal credit needed to
2083 * insert nrblocks contiguous block is dependent on
2084 * number of contiguous block. So we will limit
2085 * number of contiguous block to a sane value
2087 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2088 (max_blocks > EXT4_MAX_TRANS_DATA))
2089 max_blocks = EXT4_MAX_TRANS_DATA;
2091 return ext4_chunk_trans_blocks(inode, max_blocks);
2095 * write_cache_pages_da - walk the list of dirty pages of the given
2096 * address space and accumulate pages that need writing, and call
2097 * mpage_da_map_and_submit to map a single contiguous memory region
2098 * and then write them.
2100 static int write_cache_pages_da(struct address_space *mapping,
2101 struct writeback_control *wbc,
2102 struct mpage_da_data *mpd,
2103 pgoff_t *done_index)
2105 struct buffer_head *bh, *head;
2106 struct inode *inode = mapping->host;
2107 struct pagevec pvec;
2108 unsigned int nr_pages;
2109 sector_t logical;
2110 pgoff_t index, end;
2111 long nr_to_write = wbc->nr_to_write;
2112 int i, tag, ret = 0;
2114 memset(mpd, 0, sizeof(struct mpage_da_data));
2115 mpd->wbc = wbc;
2116 mpd->inode = inode;
2117 pagevec_init(&pvec, 0);
2118 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2119 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2121 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2122 tag = PAGECACHE_TAG_TOWRITE;
2123 else
2124 tag = PAGECACHE_TAG_DIRTY;
2126 *done_index = index;
2127 while (index <= end) {
2128 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2129 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2130 if (nr_pages == 0)
2131 return 0;
2133 for (i = 0; i < nr_pages; i++) {
2134 struct page *page = pvec.pages[i];
2137 * At this point, the page may be truncated or
2138 * invalidated (changing page->mapping to NULL), or
2139 * even swizzled back from swapper_space to tmpfs file
2140 * mapping. However, page->index will not change
2141 * because we have a reference on the page.
2143 if (page->index > end)
2144 goto out;
2146 *done_index = page->index + 1;
2149 * If we can't merge this page, and we have
2150 * accumulated an contiguous region, write it
2152 if ((mpd->next_page != page->index) &&
2153 (mpd->next_page != mpd->first_page)) {
2154 mpage_da_map_and_submit(mpd);
2155 goto ret_extent_tail;
2158 lock_page(page);
2161 * If the page is no longer dirty, or its
2162 * mapping no longer corresponds to inode we
2163 * are writing (which means it has been
2164 * truncated or invalidated), or the page is
2165 * already under writeback and we are not
2166 * doing a data integrity writeback, skip the page
2168 if (!PageDirty(page) ||
2169 (PageWriteback(page) &&
2170 (wbc->sync_mode == WB_SYNC_NONE)) ||
2171 unlikely(page->mapping != mapping)) {
2172 unlock_page(page);
2173 continue;
2176 wait_on_page_writeback(page);
2177 BUG_ON(PageWriteback(page));
2179 if (mpd->next_page != page->index)
2180 mpd->first_page = page->index;
2181 mpd->next_page = page->index + 1;
2182 logical = (sector_t) page->index <<
2183 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2185 if (!page_has_buffers(page)) {
2186 mpage_add_bh_to_extent(mpd, logical,
2187 PAGE_CACHE_SIZE,
2188 (1 << BH_Dirty) | (1 << BH_Uptodate));
2189 if (mpd->io_done)
2190 goto ret_extent_tail;
2191 } else {
2193 * Page with regular buffer heads,
2194 * just add all dirty ones
2196 head = page_buffers(page);
2197 bh = head;
2198 do {
2199 BUG_ON(buffer_locked(bh));
2201 * We need to try to allocate
2202 * unmapped blocks in the same page.
2203 * Otherwise we won't make progress
2204 * with the page in ext4_writepage
2206 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2207 mpage_add_bh_to_extent(mpd, logical,
2208 bh->b_size,
2209 bh->b_state);
2210 if (mpd->io_done)
2211 goto ret_extent_tail;
2212 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2214 * mapped dirty buffer. We need
2215 * to update the b_state
2216 * because we look at b_state
2217 * in mpage_da_map_blocks. We
2218 * don't update b_size because
2219 * if we find an unmapped
2220 * buffer_head later we need to
2221 * use the b_state flag of that
2222 * buffer_head.
2224 if (mpd->b_size == 0)
2225 mpd->b_state = bh->b_state & BH_FLAGS;
2227 logical++;
2228 } while ((bh = bh->b_this_page) != head);
2231 if (nr_to_write > 0) {
2232 nr_to_write--;
2233 if (nr_to_write == 0 &&
2234 wbc->sync_mode == WB_SYNC_NONE)
2236 * We stop writing back only if we are
2237 * not doing integrity sync. In case of
2238 * integrity sync we have to keep going
2239 * because someone may be concurrently
2240 * dirtying pages, and we might have
2241 * synced a lot of newly appeared dirty
2242 * pages, but have not synced all of the
2243 * old dirty pages.
2245 goto out;
2248 pagevec_release(&pvec);
2249 cond_resched();
2251 return 0;
2252 ret_extent_tail:
2253 ret = MPAGE_DA_EXTENT_TAIL;
2254 out:
2255 pagevec_release(&pvec);
2256 cond_resched();
2257 return ret;
2261 static int ext4_da_writepages(struct address_space *mapping,
2262 struct writeback_control *wbc)
2264 pgoff_t index;
2265 int range_whole = 0;
2266 handle_t *handle = NULL;
2267 struct mpage_da_data mpd;
2268 struct inode *inode = mapping->host;
2269 int pages_written = 0;
2270 unsigned int max_pages;
2271 int range_cyclic, cycled = 1, io_done = 0;
2272 int needed_blocks, ret = 0;
2273 long desired_nr_to_write, nr_to_writebump = 0;
2274 loff_t range_start = wbc->range_start;
2275 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2276 pgoff_t done_index = 0;
2277 pgoff_t end;
2278 struct blk_plug plug;
2280 trace_ext4_da_writepages(inode, wbc);
2283 * No pages to write? This is mainly a kludge to avoid starting
2284 * a transaction for special inodes like journal inode on last iput()
2285 * because that could violate lock ordering on umount
2287 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2288 return 0;
2291 * If the filesystem has aborted, it is read-only, so return
2292 * right away instead of dumping stack traces later on that
2293 * will obscure the real source of the problem. We test
2294 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2295 * the latter could be true if the filesystem is mounted
2296 * read-only, and in that case, ext4_da_writepages should
2297 * *never* be called, so if that ever happens, we would want
2298 * the stack trace.
2300 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2301 return -EROFS;
2303 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2304 range_whole = 1;
2306 range_cyclic = wbc->range_cyclic;
2307 if (wbc->range_cyclic) {
2308 index = mapping->writeback_index;
2309 if (index)
2310 cycled = 0;
2311 wbc->range_start = index << PAGE_CACHE_SHIFT;
2312 wbc->range_end = LLONG_MAX;
2313 wbc->range_cyclic = 0;
2314 end = -1;
2315 } else {
2316 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2317 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2321 * This works around two forms of stupidity. The first is in
2322 * the writeback code, which caps the maximum number of pages
2323 * written to be 1024 pages. This is wrong on multiple
2324 * levels; different architectues have a different page size,
2325 * which changes the maximum amount of data which gets
2326 * written. Secondly, 4 megabytes is way too small. XFS
2327 * forces this value to be 16 megabytes by multiplying
2328 * nr_to_write parameter by four, and then relies on its
2329 * allocator to allocate larger extents to make them
2330 * contiguous. Unfortunately this brings us to the second
2331 * stupidity, which is that ext4's mballoc code only allocates
2332 * at most 2048 blocks. So we force contiguous writes up to
2333 * the number of dirty blocks in the inode, or
2334 * sbi->max_writeback_mb_bump whichever is smaller.
2336 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2337 if (!range_cyclic && range_whole) {
2338 if (wbc->nr_to_write == LONG_MAX)
2339 desired_nr_to_write = wbc->nr_to_write;
2340 else
2341 desired_nr_to_write = wbc->nr_to_write * 8;
2342 } else
2343 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2344 max_pages);
2345 if (desired_nr_to_write > max_pages)
2346 desired_nr_to_write = max_pages;
2348 if (wbc->nr_to_write < desired_nr_to_write) {
2349 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2350 wbc->nr_to_write = desired_nr_to_write;
2353 retry:
2354 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2355 tag_pages_for_writeback(mapping, index, end);
2357 blk_start_plug(&plug);
2358 while (!ret && wbc->nr_to_write > 0) {
2361 * we insert one extent at a time. So we need
2362 * credit needed for single extent allocation.
2363 * journalled mode is currently not supported
2364 * by delalloc
2366 BUG_ON(ext4_should_journal_data(inode));
2367 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2369 /* start a new transaction*/
2370 handle = ext4_journal_start(inode, needed_blocks);
2371 if (IS_ERR(handle)) {
2372 ret = PTR_ERR(handle);
2373 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2374 "%ld pages, ino %lu; err %d", __func__,
2375 wbc->nr_to_write, inode->i_ino, ret);
2376 blk_finish_plug(&plug);
2377 goto out_writepages;
2381 * Now call write_cache_pages_da() to find the next
2382 * contiguous region of logical blocks that need
2383 * blocks to be allocated by ext4 and submit them.
2385 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2387 * If we have a contiguous extent of pages and we
2388 * haven't done the I/O yet, map the blocks and submit
2389 * them for I/O.
2391 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2392 mpage_da_map_and_submit(&mpd);
2393 ret = MPAGE_DA_EXTENT_TAIL;
2395 trace_ext4_da_write_pages(inode, &mpd);
2396 wbc->nr_to_write -= mpd.pages_written;
2398 ext4_journal_stop(handle);
2400 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2401 /* commit the transaction which would
2402 * free blocks released in the transaction
2403 * and try again
2405 jbd2_journal_force_commit_nested(sbi->s_journal);
2406 ret = 0;
2407 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2409 * Got one extent now try with rest of the pages.
2410 * If mpd.retval is set -EIO, journal is aborted.
2411 * So we don't need to write any more.
2413 pages_written += mpd.pages_written;
2414 ret = mpd.retval;
2415 io_done = 1;
2416 } else if (wbc->nr_to_write)
2418 * There is no more writeout needed
2419 * or we requested for a noblocking writeout
2420 * and we found the device congested
2422 break;
2424 blk_finish_plug(&plug);
2425 if (!io_done && !cycled) {
2426 cycled = 1;
2427 index = 0;
2428 wbc->range_start = index << PAGE_CACHE_SHIFT;
2429 wbc->range_end = mapping->writeback_index - 1;
2430 goto retry;
2433 /* Update index */
2434 wbc->range_cyclic = range_cyclic;
2435 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2437 * set the writeback_index so that range_cyclic
2438 * mode will write it back later
2440 mapping->writeback_index = done_index;
2442 out_writepages:
2443 wbc->nr_to_write -= nr_to_writebump;
2444 wbc->range_start = range_start;
2445 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2446 return ret;
2449 #define FALL_BACK_TO_NONDELALLOC 1
2450 static int ext4_nonda_switch(struct super_block *sb)
2452 s64 free_blocks, dirty_blocks;
2453 struct ext4_sb_info *sbi = EXT4_SB(sb);
2456 * switch to non delalloc mode if we are running low
2457 * on free block. The free block accounting via percpu
2458 * counters can get slightly wrong with percpu_counter_batch getting
2459 * accumulated on each CPU without updating global counters
2460 * Delalloc need an accurate free block accounting. So switch
2461 * to non delalloc when we are near to error range.
2463 free_blocks = EXT4_C2B(sbi,
2464 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2465 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2467 * Start pushing delalloc when 1/2 of free blocks are dirty.
2469 if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
2470 !writeback_in_progress(sb->s_bdi) &&
2471 down_read_trylock(&sb->s_umount)) {
2472 writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2473 up_read(&sb->s_umount);
2476 if (2 * free_blocks < 3 * dirty_blocks ||
2477 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
2479 * free block count is less than 150% of dirty blocks
2480 * or free blocks is less than watermark
2482 return 1;
2484 return 0;
2487 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2488 loff_t pos, unsigned len, unsigned flags,
2489 struct page **pagep, void **fsdata)
2491 int ret, retries = 0;
2492 struct page *page;
2493 pgoff_t index;
2494 struct inode *inode = mapping->host;
2495 handle_t *handle;
2497 index = pos >> PAGE_CACHE_SHIFT;
2499 if (ext4_nonda_switch(inode->i_sb)) {
2500 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2501 return ext4_write_begin(file, mapping, pos,
2502 len, flags, pagep, fsdata);
2504 *fsdata = (void *)0;
2505 trace_ext4_da_write_begin(inode, pos, len, flags);
2506 retry:
2508 * With delayed allocation, we don't log the i_disksize update
2509 * if there is delayed block allocation. But we still need
2510 * to journalling the i_disksize update if writes to the end
2511 * of file which has an already mapped buffer.
2513 handle = ext4_journal_start(inode, 1);
2514 if (IS_ERR(handle)) {
2515 ret = PTR_ERR(handle);
2516 goto out;
2518 /* We cannot recurse into the filesystem as the transaction is already
2519 * started */
2520 flags |= AOP_FLAG_NOFS;
2522 page = grab_cache_page_write_begin(mapping, index, flags);
2523 if (!page) {
2524 ext4_journal_stop(handle);
2525 ret = -ENOMEM;
2526 goto out;
2528 *pagep = page;
2530 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2531 if (ret < 0) {
2532 unlock_page(page);
2533 ext4_journal_stop(handle);
2534 page_cache_release(page);
2536 * block_write_begin may have instantiated a few blocks
2537 * outside i_size. Trim these off again. Don't need
2538 * i_size_read because we hold i_mutex.
2540 if (pos + len > inode->i_size)
2541 ext4_truncate_failed_write(inode);
2544 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2545 goto retry;
2546 out:
2547 return ret;
2551 * Check if we should update i_disksize
2552 * when write to the end of file but not require block allocation
2554 static int ext4_da_should_update_i_disksize(struct page *page,
2555 unsigned long offset)
2557 struct buffer_head *bh;
2558 struct inode *inode = page->mapping->host;
2559 unsigned int idx;
2560 int i;
2562 bh = page_buffers(page);
2563 idx = offset >> inode->i_blkbits;
2565 for (i = 0; i < idx; i++)
2566 bh = bh->b_this_page;
2568 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2569 return 0;
2570 return 1;
2573 static int ext4_da_write_end(struct file *file,
2574 struct address_space *mapping,
2575 loff_t pos, unsigned len, unsigned copied,
2576 struct page *page, void *fsdata)
2578 struct inode *inode = mapping->host;
2579 int ret = 0, ret2;
2580 handle_t *handle = ext4_journal_current_handle();
2581 loff_t new_i_size;
2582 unsigned long start, end;
2583 int write_mode = (int)(unsigned long)fsdata;
2585 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2586 switch (ext4_inode_journal_mode(inode)) {
2587 case EXT4_INODE_ORDERED_DATA_MODE:
2588 return ext4_ordered_write_end(file, mapping, pos,
2589 len, copied, page, fsdata);
2590 case EXT4_INODE_WRITEBACK_DATA_MODE:
2591 return ext4_writeback_write_end(file, mapping, pos,
2592 len, copied, page, fsdata);
2593 default:
2594 BUG();
2598 trace_ext4_da_write_end(inode, pos, len, copied);
2599 start = pos & (PAGE_CACHE_SIZE - 1);
2600 end = start + copied - 1;
2603 * generic_write_end() will run mark_inode_dirty() if i_size
2604 * changes. So let's piggyback the i_disksize mark_inode_dirty
2605 * into that.
2608 new_i_size = pos + copied;
2609 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2610 if (ext4_da_should_update_i_disksize(page, end)) {
2611 down_write(&EXT4_I(inode)->i_data_sem);
2612 if (new_i_size > EXT4_I(inode)->i_disksize) {
2614 * Updating i_disksize when extending file
2615 * without needing block allocation
2617 if (ext4_should_order_data(inode))
2618 ret = ext4_jbd2_file_inode(handle,
2619 inode);
2621 EXT4_I(inode)->i_disksize = new_i_size;
2623 up_write(&EXT4_I(inode)->i_data_sem);
2624 /* We need to mark inode dirty even if
2625 * new_i_size is less that inode->i_size
2626 * bu greater than i_disksize.(hint delalloc)
2628 ext4_mark_inode_dirty(handle, inode);
2631 ret2 = generic_write_end(file, mapping, pos, len, copied,
2632 page, fsdata);
2633 copied = ret2;
2634 if (ret2 < 0)
2635 ret = ret2;
2636 ret2 = ext4_journal_stop(handle);
2637 if (!ret)
2638 ret = ret2;
2640 return ret ? ret : copied;
2643 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2646 * Drop reserved blocks
2648 BUG_ON(!PageLocked(page));
2649 if (!page_has_buffers(page))
2650 goto out;
2652 ext4_da_page_release_reservation(page, offset);
2654 out:
2655 ext4_invalidatepage(page, offset);
2657 return;
2661 * Force all delayed allocation blocks to be allocated for a given inode.
2663 int ext4_alloc_da_blocks(struct inode *inode)
2665 trace_ext4_alloc_da_blocks(inode);
2667 if (!EXT4_I(inode)->i_reserved_data_blocks &&
2668 !EXT4_I(inode)->i_reserved_meta_blocks)
2669 return 0;
2672 * We do something simple for now. The filemap_flush() will
2673 * also start triggering a write of the data blocks, which is
2674 * not strictly speaking necessary (and for users of
2675 * laptop_mode, not even desirable). However, to do otherwise
2676 * would require replicating code paths in:
2678 * ext4_da_writepages() ->
2679 * write_cache_pages() ---> (via passed in callback function)
2680 * __mpage_da_writepage() -->
2681 * mpage_add_bh_to_extent()
2682 * mpage_da_map_blocks()
2684 * The problem is that write_cache_pages(), located in
2685 * mm/page-writeback.c, marks pages clean in preparation for
2686 * doing I/O, which is not desirable if we're not planning on
2687 * doing I/O at all.
2689 * We could call write_cache_pages(), and then redirty all of
2690 * the pages by calling redirty_page_for_writepage() but that
2691 * would be ugly in the extreme. So instead we would need to
2692 * replicate parts of the code in the above functions,
2693 * simplifying them because we wouldn't actually intend to
2694 * write out the pages, but rather only collect contiguous
2695 * logical block extents, call the multi-block allocator, and
2696 * then update the buffer heads with the block allocations.
2698 * For now, though, we'll cheat by calling filemap_flush(),
2699 * which will map the blocks, and start the I/O, but not
2700 * actually wait for the I/O to complete.
2702 return filemap_flush(inode->i_mapping);
2706 * bmap() is special. It gets used by applications such as lilo and by
2707 * the swapper to find the on-disk block of a specific piece of data.
2709 * Naturally, this is dangerous if the block concerned is still in the
2710 * journal. If somebody makes a swapfile on an ext4 data-journaling
2711 * filesystem and enables swap, then they may get a nasty shock when the
2712 * data getting swapped to that swapfile suddenly gets overwritten by
2713 * the original zero's written out previously to the journal and
2714 * awaiting writeback in the kernel's buffer cache.
2716 * So, if we see any bmap calls here on a modified, data-journaled file,
2717 * take extra steps to flush any blocks which might be in the cache.
2719 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2721 struct inode *inode = mapping->host;
2722 journal_t *journal;
2723 int err;
2725 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2726 test_opt(inode->i_sb, DELALLOC)) {
2728 * With delalloc we want to sync the file
2729 * so that we can make sure we allocate
2730 * blocks for file
2732 filemap_write_and_wait(mapping);
2735 if (EXT4_JOURNAL(inode) &&
2736 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2738 * This is a REALLY heavyweight approach, but the use of
2739 * bmap on dirty files is expected to be extremely rare:
2740 * only if we run lilo or swapon on a freshly made file
2741 * do we expect this to happen.
2743 * (bmap requires CAP_SYS_RAWIO so this does not
2744 * represent an unprivileged user DOS attack --- we'd be
2745 * in trouble if mortal users could trigger this path at
2746 * will.)
2748 * NB. EXT4_STATE_JDATA is not set on files other than
2749 * regular files. If somebody wants to bmap a directory
2750 * or symlink and gets confused because the buffer
2751 * hasn't yet been flushed to disk, they deserve
2752 * everything they get.
2755 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2756 journal = EXT4_JOURNAL(inode);
2757 jbd2_journal_lock_updates(journal);
2758 err = jbd2_journal_flush(journal);
2759 jbd2_journal_unlock_updates(journal);
2761 if (err)
2762 return 0;
2765 return generic_block_bmap(mapping, block, ext4_get_block);
2768 static int ext4_readpage(struct file *file, struct page *page)
2770 trace_ext4_readpage(page);
2771 return mpage_readpage(page, ext4_get_block);
2774 static int
2775 ext4_readpages(struct file *file, struct address_space *mapping,
2776 struct list_head *pages, unsigned nr_pages)
2778 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2781 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2783 struct buffer_head *head, *bh;
2784 unsigned int curr_off = 0;
2786 if (!page_has_buffers(page))
2787 return;
2788 head = bh = page_buffers(page);
2789 do {
2790 if (offset <= curr_off && test_clear_buffer_uninit(bh)
2791 && bh->b_private) {
2792 ext4_free_io_end(bh->b_private);
2793 bh->b_private = NULL;
2794 bh->b_end_io = NULL;
2796 curr_off = curr_off + bh->b_size;
2797 bh = bh->b_this_page;
2798 } while (bh != head);
2801 static void ext4_invalidatepage(struct page *page, unsigned long offset)
2803 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2805 trace_ext4_invalidatepage(page, offset);
2808 * free any io_end structure allocated for buffers to be discarded
2810 if (ext4_should_dioread_nolock(page->mapping->host))
2811 ext4_invalidatepage_free_endio(page, offset);
2813 * If it's a full truncate we just forget about the pending dirtying
2815 if (offset == 0)
2816 ClearPageChecked(page);
2818 if (journal)
2819 jbd2_journal_invalidatepage(journal, page, offset);
2820 else
2821 block_invalidatepage(page, offset);
2824 static int ext4_releasepage(struct page *page, gfp_t wait)
2826 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2828 trace_ext4_releasepage(page);
2830 WARN_ON(PageChecked(page));
2831 if (!page_has_buffers(page))
2832 return 0;
2833 if (journal)
2834 return jbd2_journal_try_to_free_buffers(journal, page, wait);
2835 else
2836 return try_to_free_buffers(page);
2840 * ext4_get_block used when preparing for a DIO write or buffer write.
2841 * We allocate an uinitialized extent if blocks haven't been allocated.
2842 * The extent will be converted to initialized after the IO is complete.
2844 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2845 struct buffer_head *bh_result, int create)
2847 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2848 inode->i_ino, create);
2849 return _ext4_get_block(inode, iblock, bh_result,
2850 EXT4_GET_BLOCKS_IO_CREATE_EXT);
2853 static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
2854 struct buffer_head *bh_result, int flags)
2856 handle_t *handle = ext4_journal_current_handle();
2857 struct ext4_map_blocks map;
2858 int ret = 0;
2860 ext4_debug("ext4_get_block_write_nolock: inode %lu, flag %d\n",
2861 inode->i_ino, flags);
2863 flags = EXT4_GET_BLOCKS_NO_LOCK;
2865 map.m_lblk = iblock;
2866 map.m_len = bh_result->b_size >> inode->i_blkbits;
2868 ret = ext4_map_blocks(handle, inode, &map, flags);
2869 if (ret > 0) {
2870 map_bh(bh_result, inode->i_sb, map.m_pblk);
2871 bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) |
2872 map.m_flags;
2873 bh_result->b_size = inode->i_sb->s_blocksize * map.m_len;
2874 ret = 0;
2876 return ret;
2879 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2880 ssize_t size, void *private, int ret,
2881 bool is_async)
2883 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2884 ext4_io_end_t *io_end = iocb->private;
2885 struct workqueue_struct *wq;
2886 unsigned long flags;
2887 struct ext4_inode_info *ei;
2889 /* if not async direct IO or dio with 0 bytes write, just return */
2890 if (!io_end || !size)
2891 goto out;
2893 ext_debug("ext4_end_io_dio(): io_end 0x%p "
2894 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
2895 iocb->private, io_end->inode->i_ino, iocb, offset,
2896 size);
2898 iocb->private = NULL;
2900 /* if not aio dio with unwritten extents, just free io and return */
2901 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2902 ext4_free_io_end(io_end);
2903 out:
2904 if (is_async)
2905 aio_complete(iocb, ret, 0);
2906 inode_dio_done(inode);
2907 return;
2910 io_end->offset = offset;
2911 io_end->size = size;
2912 if (is_async) {
2913 io_end->iocb = iocb;
2914 io_end->result = ret;
2916 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2918 /* Add the io_end to per-inode completed aio dio list*/
2919 ei = EXT4_I(io_end->inode);
2920 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2921 list_add_tail(&io_end->list, &ei->i_completed_io_list);
2922 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2924 /* queue the work to convert unwritten extents to written */
2925 queue_work(wq, &io_end->work);
2928 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2930 ext4_io_end_t *io_end = bh->b_private;
2931 struct workqueue_struct *wq;
2932 struct inode *inode;
2933 unsigned long flags;
2935 if (!test_clear_buffer_uninit(bh) || !io_end)
2936 goto out;
2938 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2939 ext4_msg(io_end->inode->i_sb, KERN_INFO,
2940 "sb umounted, discard end_io request for inode %lu",
2941 io_end->inode->i_ino);
2942 ext4_free_io_end(io_end);
2943 goto out;
2947 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2948 * but being more careful is always safe for the future change.
2950 inode = io_end->inode;
2951 ext4_set_io_unwritten_flag(inode, io_end);
2953 /* Add the io_end to per-inode completed io list*/
2954 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2955 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2956 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2958 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2959 /* queue the work to convert unwritten extents to written */
2960 queue_work(wq, &io_end->work);
2961 out:
2962 bh->b_private = NULL;
2963 bh->b_end_io = NULL;
2964 clear_buffer_uninit(bh);
2965 end_buffer_async_write(bh, uptodate);
2968 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2970 ext4_io_end_t *io_end;
2971 struct page *page = bh->b_page;
2972 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2973 size_t size = bh->b_size;
2975 retry:
2976 io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2977 if (!io_end) {
2978 pr_warn_ratelimited("%s: allocation fail\n", __func__);
2979 schedule();
2980 goto retry;
2982 io_end->offset = offset;
2983 io_end->size = size;
2985 * We need to hold a reference to the page to make sure it
2986 * doesn't get evicted before ext4_end_io_work() has a chance
2987 * to convert the extent from written to unwritten.
2989 io_end->page = page;
2990 get_page(io_end->page);
2992 bh->b_private = io_end;
2993 bh->b_end_io = ext4_end_io_buffer_write;
2994 return 0;
2998 * For ext4 extent files, ext4 will do direct-io write to holes,
2999 * preallocated extents, and those write extend the file, no need to
3000 * fall back to buffered IO.
3002 * For holes, we fallocate those blocks, mark them as uninitialized
3003 * If those blocks were preallocated, we mark sure they are splited, but
3004 * still keep the range to write as uninitialized.
3006 * The unwrritten extents will be converted to written when DIO is completed.
3007 * For async direct IO, since the IO may still pending when return, we
3008 * set up an end_io call back function, which will do the conversion
3009 * when async direct IO completed.
3011 * If the O_DIRECT write will extend the file then add this inode to the
3012 * orphan list. So recovery will truncate it back to the original size
3013 * if the machine crashes during the write.
3016 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3017 const struct iovec *iov, loff_t offset,
3018 unsigned long nr_segs)
3020 struct file *file = iocb->ki_filp;
3021 struct inode *inode = file->f_mapping->host;
3022 ssize_t ret;
3023 size_t count = iov_length(iov, nr_segs);
3025 loff_t final_size = offset + count;
3026 if (rw == WRITE && final_size <= inode->i_size) {
3027 int overwrite = 0;
3029 BUG_ON(iocb->private == NULL);
3031 /* If we do a overwrite dio, i_mutex locking can be released */
3032 overwrite = *((int *)iocb->private);
3034 if (overwrite) {
3035 down_read(&EXT4_I(inode)->i_data_sem);
3036 mutex_unlock(&inode->i_mutex);
3040 * We could direct write to holes and fallocate.
3042 * Allocated blocks to fill the hole are marked as uninitialized
3043 * to prevent parallel buffered read to expose the stale data
3044 * before DIO complete the data IO.
3046 * As to previously fallocated extents, ext4 get_block
3047 * will just simply mark the buffer mapped but still
3048 * keep the extents uninitialized.
3050 * for non AIO case, we will convert those unwritten extents
3051 * to written after return back from blockdev_direct_IO.
3053 * for async DIO, the conversion needs to be defered when
3054 * the IO is completed. The ext4 end_io callback function
3055 * will be called to take care of the conversion work.
3056 * Here for async case, we allocate an io_end structure to
3057 * hook to the iocb.
3059 iocb->private = NULL;
3060 EXT4_I(inode)->cur_aio_dio = NULL;
3061 if (!is_sync_kiocb(iocb)) {
3062 ext4_io_end_t *io_end =
3063 ext4_init_io_end(inode, GFP_NOFS);
3064 if (!io_end) {
3065 ret = -ENOMEM;
3066 goto retake_lock;
3068 io_end->flag |= EXT4_IO_END_DIRECT;
3069 iocb->private = io_end;
3071 * we save the io structure for current async
3072 * direct IO, so that later ext4_map_blocks()
3073 * could flag the io structure whether there
3074 * is a unwritten extents needs to be converted
3075 * when IO is completed.
3077 EXT4_I(inode)->cur_aio_dio = iocb->private;
3080 if (overwrite)
3081 ret = __blockdev_direct_IO(rw, iocb, inode,
3082 inode->i_sb->s_bdev, iov,
3083 offset, nr_segs,
3084 ext4_get_block_write_nolock,
3085 ext4_end_io_dio,
3086 NULL,
3088 else
3089 ret = __blockdev_direct_IO(rw, iocb, inode,
3090 inode->i_sb->s_bdev, iov,
3091 offset, nr_segs,
3092 ext4_get_block_write,
3093 ext4_end_io_dio,
3094 NULL,
3095 DIO_LOCKING);
3096 if (iocb->private)
3097 EXT4_I(inode)->cur_aio_dio = NULL;
3099 * The io_end structure takes a reference to the inode,
3100 * that structure needs to be destroyed and the
3101 * reference to the inode need to be dropped, when IO is
3102 * complete, even with 0 byte write, or failed.
3104 * In the successful AIO DIO case, the io_end structure will be
3105 * desctroyed and the reference to the inode will be dropped
3106 * after the end_io call back function is called.
3108 * In the case there is 0 byte write, or error case, since
3109 * VFS direct IO won't invoke the end_io call back function,
3110 * we need to free the end_io structure here.
3112 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3113 ext4_free_io_end(iocb->private);
3114 iocb->private = NULL;
3115 } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3116 EXT4_STATE_DIO_UNWRITTEN)) {
3117 int err;
3119 * for non AIO case, since the IO is already
3120 * completed, we could do the conversion right here
3122 err = ext4_convert_unwritten_extents(inode,
3123 offset, ret);
3124 if (err < 0)
3125 ret = err;
3126 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3129 retake_lock:
3130 /* take i_mutex locking again if we do a ovewrite dio */
3131 if (overwrite) {
3132 up_read(&EXT4_I(inode)->i_data_sem);
3133 mutex_lock(&inode->i_mutex);
3136 return ret;
3139 /* for write the the end of file case, we fall back to old way */
3140 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3143 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3144 const struct iovec *iov, loff_t offset,
3145 unsigned long nr_segs)
3147 struct file *file = iocb->ki_filp;
3148 struct inode *inode = file->f_mapping->host;
3149 ssize_t ret;
3152 * If we are doing data journalling we don't support O_DIRECT
3154 if (ext4_should_journal_data(inode))
3155 return 0;
3157 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3158 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3159 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3160 else
3161 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3162 trace_ext4_direct_IO_exit(inode, offset,
3163 iov_length(iov, nr_segs), rw, ret);
3164 return ret;
3168 * Pages can be marked dirty completely asynchronously from ext4's journalling
3169 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3170 * much here because ->set_page_dirty is called under VFS locks. The page is
3171 * not necessarily locked.
3173 * We cannot just dirty the page and leave attached buffers clean, because the
3174 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3175 * or jbddirty because all the journalling code will explode.
3177 * So what we do is to mark the page "pending dirty" and next time writepage
3178 * is called, propagate that into the buffers appropriately.
3180 static int ext4_journalled_set_page_dirty(struct page *page)
3182 SetPageChecked(page);
3183 return __set_page_dirty_nobuffers(page);
3186 static const struct address_space_operations ext4_ordered_aops = {
3187 .readpage = ext4_readpage,
3188 .readpages = ext4_readpages,
3189 .writepage = ext4_writepage,
3190 .write_begin = ext4_write_begin,
3191 .write_end = ext4_ordered_write_end,
3192 .bmap = ext4_bmap,
3193 .invalidatepage = ext4_invalidatepage,
3194 .releasepage = ext4_releasepage,
3195 .direct_IO = ext4_direct_IO,
3196 .migratepage = buffer_migrate_page,
3197 .is_partially_uptodate = block_is_partially_uptodate,
3198 .error_remove_page = generic_error_remove_page,
3201 static const struct address_space_operations ext4_writeback_aops = {
3202 .readpage = ext4_readpage,
3203 .readpages = ext4_readpages,
3204 .writepage = ext4_writepage,
3205 .write_begin = ext4_write_begin,
3206 .write_end = ext4_writeback_write_end,
3207 .bmap = ext4_bmap,
3208 .invalidatepage = ext4_invalidatepage,
3209 .releasepage = ext4_releasepage,
3210 .direct_IO = ext4_direct_IO,
3211 .migratepage = buffer_migrate_page,
3212 .is_partially_uptodate = block_is_partially_uptodate,
3213 .error_remove_page = generic_error_remove_page,
3216 static const struct address_space_operations ext4_journalled_aops = {
3217 .readpage = ext4_readpage,
3218 .readpages = ext4_readpages,
3219 .writepage = ext4_writepage,
3220 .write_begin = ext4_write_begin,
3221 .write_end = ext4_journalled_write_end,
3222 .set_page_dirty = ext4_journalled_set_page_dirty,
3223 .bmap = ext4_bmap,
3224 .invalidatepage = ext4_invalidatepage,
3225 .releasepage = ext4_releasepage,
3226 .direct_IO = ext4_direct_IO,
3227 .is_partially_uptodate = block_is_partially_uptodate,
3228 .error_remove_page = generic_error_remove_page,
3231 static const struct address_space_operations ext4_da_aops = {
3232 .readpage = ext4_readpage,
3233 .readpages = ext4_readpages,
3234 .writepage = ext4_writepage,
3235 .writepages = ext4_da_writepages,
3236 .write_begin = ext4_da_write_begin,
3237 .write_end = ext4_da_write_end,
3238 .bmap = ext4_bmap,
3239 .invalidatepage = ext4_da_invalidatepage,
3240 .releasepage = ext4_releasepage,
3241 .direct_IO = ext4_direct_IO,
3242 .migratepage = buffer_migrate_page,
3243 .is_partially_uptodate = block_is_partially_uptodate,
3244 .error_remove_page = generic_error_remove_page,
3247 void ext4_set_aops(struct inode *inode)
3249 switch (ext4_inode_journal_mode(inode)) {
3250 case EXT4_INODE_ORDERED_DATA_MODE:
3251 if (test_opt(inode->i_sb, DELALLOC))
3252 inode->i_mapping->a_ops = &ext4_da_aops;
3253 else
3254 inode->i_mapping->a_ops = &ext4_ordered_aops;
3255 break;
3256 case EXT4_INODE_WRITEBACK_DATA_MODE:
3257 if (test_opt(inode->i_sb, DELALLOC))
3258 inode->i_mapping->a_ops = &ext4_da_aops;
3259 else
3260 inode->i_mapping->a_ops = &ext4_writeback_aops;
3261 break;
3262 case EXT4_INODE_JOURNAL_DATA_MODE:
3263 inode->i_mapping->a_ops = &ext4_journalled_aops;
3264 break;
3265 default:
3266 BUG();
3272 * ext4_discard_partial_page_buffers()
3273 * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3274 * This function finds and locks the page containing the offset
3275 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3276 * Calling functions that already have the page locked should call
3277 * ext4_discard_partial_page_buffers_no_lock directly.
3279 int ext4_discard_partial_page_buffers(handle_t *handle,
3280 struct address_space *mapping, loff_t from,
3281 loff_t length, int flags)
3283 struct inode *inode = mapping->host;
3284 struct page *page;
3285 int err = 0;
3287 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3288 mapping_gfp_mask(mapping) & ~__GFP_FS);
3289 if (!page)
3290 return -ENOMEM;
3292 err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3293 from, length, flags);
3295 unlock_page(page);
3296 page_cache_release(page);
3297 return err;
3301 * ext4_discard_partial_page_buffers_no_lock()
3302 * Zeros a page range of length 'length' starting from offset 'from'.
3303 * Buffer heads that correspond to the block aligned regions of the
3304 * zeroed range will be unmapped. Unblock aligned regions
3305 * will have the corresponding buffer head mapped if needed so that
3306 * that region of the page can be updated with the partial zero out.
3308 * This function assumes that the page has already been locked. The
3309 * The range to be discarded must be contained with in the given page.
3310 * If the specified range exceeds the end of the page it will be shortened
3311 * to the end of the page that corresponds to 'from'. This function is
3312 * appropriate for updating a page and it buffer heads to be unmapped and
3313 * zeroed for blocks that have been either released, or are going to be
3314 * released.
3316 * handle: The journal handle
3317 * inode: The files inode
3318 * page: A locked page that contains the offset "from"
3319 * from: The starting byte offset (from the begining of the file)
3320 * to begin discarding
3321 * len: The length of bytes to discard
3322 * flags: Optional flags that may be used:
3324 * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3325 * Only zero the regions of the page whose buffer heads
3326 * have already been unmapped. This flag is appropriate
3327 * for updateing the contents of a page whose blocks may
3328 * have already been released, and we only want to zero
3329 * out the regions that correspond to those released blocks.
3331 * Returns zero on sucess or negative on failure.
3333 static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3334 struct inode *inode, struct page *page, loff_t from,
3335 loff_t length, int flags)
3337 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3338 unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3339 unsigned int blocksize, max, pos;
3340 ext4_lblk_t iblock;
3341 struct buffer_head *bh;
3342 int err = 0;
3344 blocksize = inode->i_sb->s_blocksize;
3345 max = PAGE_CACHE_SIZE - offset;
3347 if (index != page->index)
3348 return -EINVAL;
3351 * correct length if it does not fall between
3352 * 'from' and the end of the page
3354 if (length > max || length < 0)
3355 length = max;
3357 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3359 if (!page_has_buffers(page))
3360 create_empty_buffers(page, blocksize, 0);
3362 /* Find the buffer that contains "offset" */
3363 bh = page_buffers(page);
3364 pos = blocksize;
3365 while (offset >= pos) {
3366 bh = bh->b_this_page;
3367 iblock++;
3368 pos += blocksize;
3371 pos = offset;
3372 while (pos < offset + length) {
3373 unsigned int end_of_block, range_to_discard;
3375 err = 0;
3377 /* The length of space left to zero and unmap */
3378 range_to_discard = offset + length - pos;
3380 /* The length of space until the end of the block */
3381 end_of_block = blocksize - (pos & (blocksize-1));
3384 * Do not unmap or zero past end of block
3385 * for this buffer head
3387 if (range_to_discard > end_of_block)
3388 range_to_discard = end_of_block;
3392 * Skip this buffer head if we are only zeroing unampped
3393 * regions of the page
3395 if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3396 buffer_mapped(bh))
3397 goto next;
3399 /* If the range is block aligned, unmap */
3400 if (range_to_discard == blocksize) {
3401 clear_buffer_dirty(bh);
3402 bh->b_bdev = NULL;
3403 clear_buffer_mapped(bh);
3404 clear_buffer_req(bh);
3405 clear_buffer_new(bh);
3406 clear_buffer_delay(bh);
3407 clear_buffer_unwritten(bh);
3408 clear_buffer_uptodate(bh);
3409 zero_user(page, pos, range_to_discard);
3410 BUFFER_TRACE(bh, "Buffer discarded");
3411 goto next;
3415 * If this block is not completely contained in the range
3416 * to be discarded, then it is not going to be released. Because
3417 * we need to keep this block, we need to make sure this part
3418 * of the page is uptodate before we modify it by writeing
3419 * partial zeros on it.
3421 if (!buffer_mapped(bh)) {
3423 * Buffer head must be mapped before we can read
3424 * from the block
3426 BUFFER_TRACE(bh, "unmapped");
3427 ext4_get_block(inode, iblock, bh, 0);
3428 /* unmapped? It's a hole - nothing to do */
3429 if (!buffer_mapped(bh)) {
3430 BUFFER_TRACE(bh, "still unmapped");
3431 goto next;
3435 /* Ok, it's mapped. Make sure it's up-to-date */
3436 if (PageUptodate(page))
3437 set_buffer_uptodate(bh);
3439 if (!buffer_uptodate(bh)) {
3440 err = -EIO;
3441 ll_rw_block(READ, 1, &bh);
3442 wait_on_buffer(bh);
3443 /* Uhhuh. Read error. Complain and punt.*/
3444 if (!buffer_uptodate(bh))
3445 goto next;
3448 if (ext4_should_journal_data(inode)) {
3449 BUFFER_TRACE(bh, "get write access");
3450 err = ext4_journal_get_write_access(handle, bh);
3451 if (err)
3452 goto next;
3455 zero_user(page, pos, range_to_discard);
3457 err = 0;
3458 if (ext4_should_journal_data(inode)) {
3459 err = ext4_handle_dirty_metadata(handle, inode, bh);
3460 } else
3461 mark_buffer_dirty(bh);
3463 BUFFER_TRACE(bh, "Partial buffer zeroed");
3464 next:
3465 bh = bh->b_this_page;
3466 iblock++;
3467 pos += range_to_discard;
3470 return err;
3473 int ext4_can_truncate(struct inode *inode)
3475 if (S_ISREG(inode->i_mode))
3476 return 1;
3477 if (S_ISDIR(inode->i_mode))
3478 return 1;
3479 if (S_ISLNK(inode->i_mode))
3480 return !ext4_inode_is_fast_symlink(inode);
3481 return 0;
3485 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3486 * associated with the given offset and length
3488 * @inode: File inode
3489 * @offset: The offset where the hole will begin
3490 * @len: The length of the hole
3492 * Returns: 0 on sucess or negative on failure
3495 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3497 struct inode *inode = file->f_path.dentry->d_inode;
3498 if (!S_ISREG(inode->i_mode))
3499 return -EOPNOTSUPP;
3501 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3502 /* TODO: Add support for non extent hole punching */
3503 return -EOPNOTSUPP;
3506 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3507 /* TODO: Add support for bigalloc file systems */
3508 return -EOPNOTSUPP;
3511 return ext4_ext_punch_hole(file, offset, length);
3515 * ext4_truncate()
3517 * We block out ext4_get_block() block instantiations across the entire
3518 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3519 * simultaneously on behalf of the same inode.
3521 * As we work through the truncate and commit bits of it to the journal there
3522 * is one core, guiding principle: the file's tree must always be consistent on
3523 * disk. We must be able to restart the truncate after a crash.
3525 * The file's tree may be transiently inconsistent in memory (although it
3526 * probably isn't), but whenever we close off and commit a journal transaction,
3527 * the contents of (the filesystem + the journal) must be consistent and
3528 * restartable. It's pretty simple, really: bottom up, right to left (although
3529 * left-to-right works OK too).
3531 * Note that at recovery time, journal replay occurs *before* the restart of
3532 * truncate against the orphan inode list.
3534 * The committed inode has the new, desired i_size (which is the same as
3535 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
3536 * that this inode's truncate did not complete and it will again call
3537 * ext4_truncate() to have another go. So there will be instantiated blocks
3538 * to the right of the truncation point in a crashed ext4 filesystem. But
3539 * that's fine - as long as they are linked from the inode, the post-crash
3540 * ext4_truncate() run will find them and release them.
3542 void ext4_truncate(struct inode *inode)
3544 trace_ext4_truncate_enter(inode);
3546 if (!ext4_can_truncate(inode))
3547 return;
3549 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3551 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3552 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3554 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3555 ext4_ext_truncate(inode);
3556 else
3557 ext4_ind_truncate(inode);
3559 trace_ext4_truncate_exit(inode);
3563 * ext4_get_inode_loc returns with an extra refcount against the inode's
3564 * underlying buffer_head on success. If 'in_mem' is true, we have all
3565 * data in memory that is needed to recreate the on-disk version of this
3566 * inode.
3568 static int __ext4_get_inode_loc(struct inode *inode,
3569 struct ext4_iloc *iloc, int in_mem)
3571 struct ext4_group_desc *gdp;
3572 struct buffer_head *bh;
3573 struct super_block *sb = inode->i_sb;
3574 ext4_fsblk_t block;
3575 int inodes_per_block, inode_offset;
3577 iloc->bh = NULL;
3578 if (!ext4_valid_inum(sb, inode->i_ino))
3579 return -EIO;
3581 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3582 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3583 if (!gdp)
3584 return -EIO;
3587 * Figure out the offset within the block group inode table
3589 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3590 inode_offset = ((inode->i_ino - 1) %
3591 EXT4_INODES_PER_GROUP(sb));
3592 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3593 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3595 bh = sb_getblk(sb, block);
3596 if (!bh) {
3597 EXT4_ERROR_INODE_BLOCK(inode, block,
3598 "unable to read itable block");
3599 return -EIO;
3601 if (!buffer_uptodate(bh)) {
3602 lock_buffer(bh);
3605 * If the buffer has the write error flag, we have failed
3606 * to write out another inode in the same block. In this
3607 * case, we don't have to read the block because we may
3608 * read the old inode data successfully.
3610 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3611 set_buffer_uptodate(bh);
3613 if (buffer_uptodate(bh)) {
3614 /* someone brought it uptodate while we waited */
3615 unlock_buffer(bh);
3616 goto has_buffer;
3620 * If we have all information of the inode in memory and this
3621 * is the only valid inode in the block, we need not read the
3622 * block.
3624 if (in_mem) {
3625 struct buffer_head *bitmap_bh;
3626 int i, start;
3628 start = inode_offset & ~(inodes_per_block - 1);
3630 /* Is the inode bitmap in cache? */
3631 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3632 if (!bitmap_bh)
3633 goto make_io;
3636 * If the inode bitmap isn't in cache then the
3637 * optimisation may end up performing two reads instead
3638 * of one, so skip it.
3640 if (!buffer_uptodate(bitmap_bh)) {
3641 brelse(bitmap_bh);
3642 goto make_io;
3644 for (i = start; i < start + inodes_per_block; i++) {
3645 if (i == inode_offset)
3646 continue;
3647 if (ext4_test_bit(i, bitmap_bh->b_data))
3648 break;
3650 brelse(bitmap_bh);
3651 if (i == start + inodes_per_block) {
3652 /* all other inodes are free, so skip I/O */
3653 memset(bh->b_data, 0, bh->b_size);
3654 set_buffer_uptodate(bh);
3655 unlock_buffer(bh);
3656 goto has_buffer;
3660 make_io:
3662 * If we need to do any I/O, try to pre-readahead extra
3663 * blocks from the inode table.
3665 if (EXT4_SB(sb)->s_inode_readahead_blks) {
3666 ext4_fsblk_t b, end, table;
3667 unsigned num;
3669 table = ext4_inode_table(sb, gdp);
3670 /* s_inode_readahead_blks is always a power of 2 */
3671 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3672 if (table > b)
3673 b = table;
3674 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3675 num = EXT4_INODES_PER_GROUP(sb);
3676 if (ext4_has_group_desc_csum(sb))
3677 num -= ext4_itable_unused_count(sb, gdp);
3678 table += num / inodes_per_block;
3679 if (end > table)
3680 end = table;
3681 while (b <= end)
3682 sb_breadahead(sb, b++);
3686 * There are other valid inodes in the buffer, this inode
3687 * has in-inode xattrs, or we don't have this inode in memory.
3688 * Read the block from disk.
3690 trace_ext4_load_inode(inode);
3691 get_bh(bh);
3692 bh->b_end_io = end_buffer_read_sync;
3693 submit_bh(READ | REQ_META | REQ_PRIO, bh);
3694 wait_on_buffer(bh);
3695 if (!buffer_uptodate(bh)) {
3696 EXT4_ERROR_INODE_BLOCK(inode, block,
3697 "unable to read itable block");
3698 brelse(bh);
3699 return -EIO;
3702 has_buffer:
3703 iloc->bh = bh;
3704 return 0;
3707 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3709 /* We have all inode data except xattrs in memory here. */
3710 return __ext4_get_inode_loc(inode, iloc,
3711 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3714 void ext4_set_inode_flags(struct inode *inode)
3716 unsigned int flags = EXT4_I(inode)->i_flags;
3718 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3719 if (flags & EXT4_SYNC_FL)
3720 inode->i_flags |= S_SYNC;
3721 if (flags & EXT4_APPEND_FL)
3722 inode->i_flags |= S_APPEND;
3723 if (flags & EXT4_IMMUTABLE_FL)
3724 inode->i_flags |= S_IMMUTABLE;
3725 if (flags & EXT4_NOATIME_FL)
3726 inode->i_flags |= S_NOATIME;
3727 if (flags & EXT4_DIRSYNC_FL)
3728 inode->i_flags |= S_DIRSYNC;
3731 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3732 void ext4_get_inode_flags(struct ext4_inode_info *ei)
3734 unsigned int vfs_fl;
3735 unsigned long old_fl, new_fl;
3737 do {
3738 vfs_fl = ei->vfs_inode.i_flags;
3739 old_fl = ei->i_flags;
3740 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3741 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3742 EXT4_DIRSYNC_FL);
3743 if (vfs_fl & S_SYNC)
3744 new_fl |= EXT4_SYNC_FL;
3745 if (vfs_fl & S_APPEND)
3746 new_fl |= EXT4_APPEND_FL;
3747 if (vfs_fl & S_IMMUTABLE)
3748 new_fl |= EXT4_IMMUTABLE_FL;
3749 if (vfs_fl & S_NOATIME)
3750 new_fl |= EXT4_NOATIME_FL;
3751 if (vfs_fl & S_DIRSYNC)
3752 new_fl |= EXT4_DIRSYNC_FL;
3753 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3756 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3757 struct ext4_inode_info *ei)
3759 blkcnt_t i_blocks ;
3760 struct inode *inode = &(ei->vfs_inode);
3761 struct super_block *sb = inode->i_sb;
3763 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3764 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3765 /* we are using combined 48 bit field */
3766 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3767 le32_to_cpu(raw_inode->i_blocks_lo);
3768 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3769 /* i_blocks represent file system block size */
3770 return i_blocks << (inode->i_blkbits - 9);
3771 } else {
3772 return i_blocks;
3774 } else {
3775 return le32_to_cpu(raw_inode->i_blocks_lo);
3779 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3781 struct ext4_iloc iloc;
3782 struct ext4_inode *raw_inode;
3783 struct ext4_inode_info *ei;
3784 struct inode *inode;
3785 journal_t *journal = EXT4_SB(sb)->s_journal;
3786 long ret;
3787 int block;
3788 uid_t i_uid;
3789 gid_t i_gid;
3791 inode = iget_locked(sb, ino);
3792 if (!inode)
3793 return ERR_PTR(-ENOMEM);
3794 if (!(inode->i_state & I_NEW))
3795 return inode;
3797 ei = EXT4_I(inode);
3798 iloc.bh = NULL;
3800 ret = __ext4_get_inode_loc(inode, &iloc, 0);
3801 if (ret < 0)
3802 goto bad_inode;
3803 raw_inode = ext4_raw_inode(&iloc);
3805 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3806 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3807 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3808 EXT4_INODE_SIZE(inode->i_sb)) {
3809 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3810 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3811 EXT4_INODE_SIZE(inode->i_sb));
3812 ret = -EIO;
3813 goto bad_inode;
3815 } else
3816 ei->i_extra_isize = 0;
3818 /* Precompute checksum seed for inode metadata */
3819 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3820 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3821 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3822 __u32 csum;
3823 __le32 inum = cpu_to_le32(inode->i_ino);
3824 __le32 gen = raw_inode->i_generation;
3825 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3826 sizeof(inum));
3827 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3828 sizeof(gen));
3831 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3832 EXT4_ERROR_INODE(inode, "checksum invalid");
3833 ret = -EIO;
3834 goto bad_inode;
3837 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3838 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3839 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3840 if (!(test_opt(inode->i_sb, NO_UID32))) {
3841 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3842 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3844 i_uid_write(inode, i_uid);
3845 i_gid_write(inode, i_gid);
3846 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3848 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
3849 ei->i_dir_start_lookup = 0;
3850 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3851 /* We now have enough fields to check if the inode was active or not.
3852 * This is needed because nfsd might try to access dead inodes
3853 * the test is that same one that e2fsck uses
3854 * NeilBrown 1999oct15
3856 if (inode->i_nlink == 0) {
3857 if (inode->i_mode == 0 ||
3858 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3859 /* this inode is deleted */
3860 ret = -ESTALE;
3861 goto bad_inode;
3863 /* The only unlinked inodes we let through here have
3864 * valid i_mode and are being read by the orphan
3865 * recovery code: that's fine, we're about to complete
3866 * the process of deleting those. */
3868 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
3869 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3870 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3871 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3872 ei->i_file_acl |=
3873 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3874 inode->i_size = ext4_isize(raw_inode);
3875 ei->i_disksize = inode->i_size;
3876 #ifdef CONFIG_QUOTA
3877 ei->i_reserved_quota = 0;
3878 #endif
3879 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3880 ei->i_block_group = iloc.block_group;
3881 ei->i_last_alloc_group = ~0;
3883 * NOTE! The in-memory inode i_data array is in little-endian order
3884 * even on big-endian machines: we do NOT byteswap the block numbers!
3886 for (block = 0; block < EXT4_N_BLOCKS; block++)
3887 ei->i_data[block] = raw_inode->i_block[block];
3888 INIT_LIST_HEAD(&ei->i_orphan);
3891 * Set transaction id's of transactions that have to be committed
3892 * to finish f[data]sync. We set them to currently running transaction
3893 * as we cannot be sure that the inode or some of its metadata isn't
3894 * part of the transaction - the inode could have been reclaimed and
3895 * now it is reread from disk.
3897 if (journal) {
3898 transaction_t *transaction;
3899 tid_t tid;
3901 read_lock(&journal->j_state_lock);
3902 if (journal->j_running_transaction)
3903 transaction = journal->j_running_transaction;
3904 else
3905 transaction = journal->j_committing_transaction;
3906 if (transaction)
3907 tid = transaction->t_tid;
3908 else
3909 tid = journal->j_commit_sequence;
3910 read_unlock(&journal->j_state_lock);
3911 ei->i_sync_tid = tid;
3912 ei->i_datasync_tid = tid;
3915 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3916 if (ei->i_extra_isize == 0) {
3917 /* The extra space is currently unused. Use it. */
3918 ei->i_extra_isize = sizeof(struct ext4_inode) -
3919 EXT4_GOOD_OLD_INODE_SIZE;
3920 } else {
3921 __le32 *magic = (void *)raw_inode +
3922 EXT4_GOOD_OLD_INODE_SIZE +
3923 ei->i_extra_isize;
3924 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3925 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3929 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3930 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3931 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3932 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3934 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3935 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3936 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3937 inode->i_version |=
3938 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
3941 ret = 0;
3942 if (ei->i_file_acl &&
3943 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3944 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
3945 ei->i_file_acl);
3946 ret = -EIO;
3947 goto bad_inode;
3948 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3949 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3950 (S_ISLNK(inode->i_mode) &&
3951 !ext4_inode_is_fast_symlink(inode)))
3952 /* Validate extent which is part of inode */
3953 ret = ext4_ext_check_inode(inode);
3954 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3955 (S_ISLNK(inode->i_mode) &&
3956 !ext4_inode_is_fast_symlink(inode))) {
3957 /* Validate block references which are part of inode */
3958 ret = ext4_ind_check_inode(inode);
3960 if (ret)
3961 goto bad_inode;
3963 if (S_ISREG(inode->i_mode)) {
3964 inode->i_op = &ext4_file_inode_operations;
3965 inode->i_fop = &ext4_file_operations;
3966 ext4_set_aops(inode);
3967 } else if (S_ISDIR(inode->i_mode)) {
3968 inode->i_op = &ext4_dir_inode_operations;
3969 inode->i_fop = &ext4_dir_operations;
3970 } else if (S_ISLNK(inode->i_mode)) {
3971 if (ext4_inode_is_fast_symlink(inode)) {
3972 inode->i_op = &ext4_fast_symlink_inode_operations;
3973 nd_terminate_link(ei->i_data, inode->i_size,
3974 sizeof(ei->i_data) - 1);
3975 } else {
3976 inode->i_op = &ext4_symlink_inode_operations;
3977 ext4_set_aops(inode);
3979 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3980 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3981 inode->i_op = &ext4_special_inode_operations;
3982 if (raw_inode->i_block[0])
3983 init_special_inode(inode, inode->i_mode,
3984 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3985 else
3986 init_special_inode(inode, inode->i_mode,
3987 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3988 } else {
3989 ret = -EIO;
3990 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3991 goto bad_inode;
3993 brelse(iloc.bh);
3994 ext4_set_inode_flags(inode);
3995 unlock_new_inode(inode);
3996 return inode;
3998 bad_inode:
3999 brelse(iloc.bh);
4000 iget_failed(inode);
4001 return ERR_PTR(ret);
4004 static int ext4_inode_blocks_set(handle_t *handle,
4005 struct ext4_inode *raw_inode,
4006 struct ext4_inode_info *ei)
4008 struct inode *inode = &(ei->vfs_inode);
4009 u64 i_blocks = inode->i_blocks;
4010 struct super_block *sb = inode->i_sb;
4012 if (i_blocks <= ~0U) {
4014 * i_blocks can be represnted in a 32 bit variable
4015 * as multiple of 512 bytes
4017 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4018 raw_inode->i_blocks_high = 0;
4019 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4020 return 0;
4022 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4023 return -EFBIG;
4025 if (i_blocks <= 0xffffffffffffULL) {
4027 * i_blocks can be represented in a 48 bit variable
4028 * as multiple of 512 bytes
4030 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4031 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4032 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4033 } else {
4034 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4035 /* i_block is stored in file system block size */
4036 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4037 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4038 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4040 return 0;
4044 * Post the struct inode info into an on-disk inode location in the
4045 * buffer-cache. This gobbles the caller's reference to the
4046 * buffer_head in the inode location struct.
4048 * The caller must have write access to iloc->bh.
4050 static int ext4_do_update_inode(handle_t *handle,
4051 struct inode *inode,
4052 struct ext4_iloc *iloc)
4054 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4055 struct ext4_inode_info *ei = EXT4_I(inode);
4056 struct buffer_head *bh = iloc->bh;
4057 int err = 0, rc, block;
4058 int need_datasync = 0;
4059 uid_t i_uid;
4060 gid_t i_gid;
4062 /* For fields not not tracking in the in-memory inode,
4063 * initialise them to zero for new inodes. */
4064 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4065 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4067 ext4_get_inode_flags(ei);
4068 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4069 i_uid = i_uid_read(inode);
4070 i_gid = i_gid_read(inode);
4071 if (!(test_opt(inode->i_sb, NO_UID32))) {
4072 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4073 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4075 * Fix up interoperability with old kernels. Otherwise, old inodes get
4076 * re-used with the upper 16 bits of the uid/gid intact
4078 if (!ei->i_dtime) {
4079 raw_inode->i_uid_high =
4080 cpu_to_le16(high_16_bits(i_uid));
4081 raw_inode->i_gid_high =
4082 cpu_to_le16(high_16_bits(i_gid));
4083 } else {
4084 raw_inode->i_uid_high = 0;
4085 raw_inode->i_gid_high = 0;
4087 } else {
4088 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4089 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4090 raw_inode->i_uid_high = 0;
4091 raw_inode->i_gid_high = 0;
4093 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4095 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4096 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4097 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4098 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4100 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4101 goto out_brelse;
4102 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4103 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4104 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4105 cpu_to_le32(EXT4_OS_HURD))
4106 raw_inode->i_file_acl_high =
4107 cpu_to_le16(ei->i_file_acl >> 32);
4108 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4109 if (ei->i_disksize != ext4_isize(raw_inode)) {
4110 ext4_isize_set(raw_inode, ei->i_disksize);
4111 need_datasync = 1;
4113 if (ei->i_disksize > 0x7fffffffULL) {
4114 struct super_block *sb = inode->i_sb;
4115 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4116 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4117 EXT4_SB(sb)->s_es->s_rev_level ==
4118 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4119 /* If this is the first large file
4120 * created, add a flag to the superblock.
4122 err = ext4_journal_get_write_access(handle,
4123 EXT4_SB(sb)->s_sbh);
4124 if (err)
4125 goto out_brelse;
4126 ext4_update_dynamic_rev(sb);
4127 EXT4_SET_RO_COMPAT_FEATURE(sb,
4128 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4129 ext4_handle_sync(handle);
4130 err = ext4_handle_dirty_super(handle, sb);
4133 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4134 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4135 if (old_valid_dev(inode->i_rdev)) {
4136 raw_inode->i_block[0] =
4137 cpu_to_le32(old_encode_dev(inode->i_rdev));
4138 raw_inode->i_block[1] = 0;
4139 } else {
4140 raw_inode->i_block[0] = 0;
4141 raw_inode->i_block[1] =
4142 cpu_to_le32(new_encode_dev(inode->i_rdev));
4143 raw_inode->i_block[2] = 0;
4145 } else
4146 for (block = 0; block < EXT4_N_BLOCKS; block++)
4147 raw_inode->i_block[block] = ei->i_data[block];
4149 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4150 if (ei->i_extra_isize) {
4151 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4152 raw_inode->i_version_hi =
4153 cpu_to_le32(inode->i_version >> 32);
4154 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4157 ext4_inode_csum_set(inode, raw_inode, ei);
4159 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4160 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4161 if (!err)
4162 err = rc;
4163 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4165 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4166 out_brelse:
4167 brelse(bh);
4168 ext4_std_error(inode->i_sb, err);
4169 return err;
4173 * ext4_write_inode()
4175 * We are called from a few places:
4177 * - Within generic_file_write() for O_SYNC files.
4178 * Here, there will be no transaction running. We wait for any running
4179 * trasnaction to commit.
4181 * - Within sys_sync(), kupdate and such.
4182 * We wait on commit, if tol to.
4184 * - Within prune_icache() (PF_MEMALLOC == true)
4185 * Here we simply return. We can't afford to block kswapd on the
4186 * journal commit.
4188 * In all cases it is actually safe for us to return without doing anything,
4189 * because the inode has been copied into a raw inode buffer in
4190 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
4191 * knfsd.
4193 * Note that we are absolutely dependent upon all inode dirtiers doing the
4194 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4195 * which we are interested.
4197 * It would be a bug for them to not do this. The code:
4199 * mark_inode_dirty(inode)
4200 * stuff();
4201 * inode->i_size = expr;
4203 * is in error because a kswapd-driven write_inode() could occur while
4204 * `stuff()' is running, and the new i_size will be lost. Plus the inode
4205 * will no longer be on the superblock's dirty inode list.
4207 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4209 int err;
4211 if (current->flags & PF_MEMALLOC)
4212 return 0;
4214 if (EXT4_SB(inode->i_sb)->s_journal) {
4215 if (ext4_journal_current_handle()) {
4216 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4217 dump_stack();
4218 return -EIO;
4221 if (wbc->sync_mode != WB_SYNC_ALL)
4222 return 0;
4224 err = ext4_force_commit(inode->i_sb);
4225 } else {
4226 struct ext4_iloc iloc;
4228 err = __ext4_get_inode_loc(inode, &iloc, 0);
4229 if (err)
4230 return err;
4231 if (wbc->sync_mode == WB_SYNC_ALL)
4232 sync_dirty_buffer(iloc.bh);
4233 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4234 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4235 "IO error syncing inode");
4236 err = -EIO;
4238 brelse(iloc.bh);
4240 return err;
4244 * ext4_setattr()
4246 * Called from notify_change.
4248 * We want to trap VFS attempts to truncate the file as soon as
4249 * possible. In particular, we want to make sure that when the VFS
4250 * shrinks i_size, we put the inode on the orphan list and modify
4251 * i_disksize immediately, so that during the subsequent flushing of
4252 * dirty pages and freeing of disk blocks, we can guarantee that any
4253 * commit will leave the blocks being flushed in an unused state on
4254 * disk. (On recovery, the inode will get truncated and the blocks will
4255 * be freed, so we have a strong guarantee that no future commit will
4256 * leave these blocks visible to the user.)
4258 * Another thing we have to assure is that if we are in ordered mode
4259 * and inode is still attached to the committing transaction, we must
4260 * we start writeout of all the dirty pages which are being truncated.
4261 * This way we are sure that all the data written in the previous
4262 * transaction are already on disk (truncate waits for pages under
4263 * writeback).
4265 * Called with inode->i_mutex down.
4267 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4269 struct inode *inode = dentry->d_inode;
4270 int error, rc = 0;
4271 int orphan = 0;
4272 const unsigned int ia_valid = attr->ia_valid;
4274 error = inode_change_ok(inode, attr);
4275 if (error)
4276 return error;
4278 if (is_quota_modification(inode, attr))
4279 dquot_initialize(inode);
4280 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4281 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4282 handle_t *handle;
4284 /* (user+group)*(old+new) structure, inode write (sb,
4285 * inode block, ? - but truncate inode update has it) */
4286 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4287 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4288 if (IS_ERR(handle)) {
4289 error = PTR_ERR(handle);
4290 goto err_out;
4292 error = dquot_transfer(inode, attr);
4293 if (error) {
4294 ext4_journal_stop(handle);
4295 return error;
4297 /* Update corresponding info in inode so that everything is in
4298 * one transaction */
4299 if (attr->ia_valid & ATTR_UID)
4300 inode->i_uid = attr->ia_uid;
4301 if (attr->ia_valid & ATTR_GID)
4302 inode->i_gid = attr->ia_gid;
4303 error = ext4_mark_inode_dirty(handle, inode);
4304 ext4_journal_stop(handle);
4307 if (attr->ia_valid & ATTR_SIZE) {
4308 inode_dio_wait(inode);
4310 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4311 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4313 if (attr->ia_size > sbi->s_bitmap_maxbytes)
4314 return -EFBIG;
4318 if (S_ISREG(inode->i_mode) &&
4319 attr->ia_valid & ATTR_SIZE &&
4320 (attr->ia_size < inode->i_size)) {
4321 handle_t *handle;
4323 handle = ext4_journal_start(inode, 3);
4324 if (IS_ERR(handle)) {
4325 error = PTR_ERR(handle);
4326 goto err_out;
4328 if (ext4_handle_valid(handle)) {
4329 error = ext4_orphan_add(handle, inode);
4330 orphan = 1;
4332 EXT4_I(inode)->i_disksize = attr->ia_size;
4333 rc = ext4_mark_inode_dirty(handle, inode);
4334 if (!error)
4335 error = rc;
4336 ext4_journal_stop(handle);
4338 if (ext4_should_order_data(inode)) {
4339 error = ext4_begin_ordered_truncate(inode,
4340 attr->ia_size);
4341 if (error) {
4342 /* Do as much error cleanup as possible */
4343 handle = ext4_journal_start(inode, 3);
4344 if (IS_ERR(handle)) {
4345 ext4_orphan_del(NULL, inode);
4346 goto err_out;
4348 ext4_orphan_del(handle, inode);
4349 orphan = 0;
4350 ext4_journal_stop(handle);
4351 goto err_out;
4356 if (attr->ia_valid & ATTR_SIZE) {
4357 if (attr->ia_size != i_size_read(inode))
4358 truncate_setsize(inode, attr->ia_size);
4359 ext4_truncate(inode);
4362 if (!rc) {
4363 setattr_copy(inode, attr);
4364 mark_inode_dirty(inode);
4368 * If the call to ext4_truncate failed to get a transaction handle at
4369 * all, we need to clean up the in-core orphan list manually.
4371 if (orphan && inode->i_nlink)
4372 ext4_orphan_del(NULL, inode);
4374 if (!rc && (ia_valid & ATTR_MODE))
4375 rc = ext4_acl_chmod(inode);
4377 err_out:
4378 ext4_std_error(inode->i_sb, error);
4379 if (!error)
4380 error = rc;
4381 return error;
4384 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4385 struct kstat *stat)
4387 struct inode *inode;
4388 unsigned long delalloc_blocks;
4390 inode = dentry->d_inode;
4391 generic_fillattr(inode, stat);
4394 * We can't update i_blocks if the block allocation is delayed
4395 * otherwise in the case of system crash before the real block
4396 * allocation is done, we will have i_blocks inconsistent with
4397 * on-disk file blocks.
4398 * We always keep i_blocks updated together with real
4399 * allocation. But to not confuse with user, stat
4400 * will return the blocks that include the delayed allocation
4401 * blocks for this file.
4403 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4404 EXT4_I(inode)->i_reserved_data_blocks);
4406 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4407 return 0;
4410 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4412 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4413 return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4414 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4418 * Account for index blocks, block groups bitmaps and block group
4419 * descriptor blocks if modify datablocks and index blocks
4420 * worse case, the indexs blocks spread over different block groups
4422 * If datablocks are discontiguous, they are possible to spread over
4423 * different block groups too. If they are contiuguous, with flexbg,
4424 * they could still across block group boundary.
4426 * Also account for superblock, inode, quota and xattr blocks
4428 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4430 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4431 int gdpblocks;
4432 int idxblocks;
4433 int ret = 0;
4436 * How many index blocks need to touch to modify nrblocks?
4437 * The "Chunk" flag indicating whether the nrblocks is
4438 * physically contiguous on disk
4440 * For Direct IO and fallocate, they calls get_block to allocate
4441 * one single extent at a time, so they could set the "Chunk" flag
4443 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4445 ret = idxblocks;
4448 * Now let's see how many group bitmaps and group descriptors need
4449 * to account
4451 groups = idxblocks;
4452 if (chunk)
4453 groups += 1;
4454 else
4455 groups += nrblocks;
4457 gdpblocks = groups;
4458 if (groups > ngroups)
4459 groups = ngroups;
4460 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4461 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4463 /* bitmaps and block group descriptor blocks */
4464 ret += groups + gdpblocks;
4466 /* Blocks for super block, inode, quota and xattr blocks */
4467 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4469 return ret;
4473 * Calculate the total number of credits to reserve to fit
4474 * the modification of a single pages into a single transaction,
4475 * which may include multiple chunks of block allocations.
4477 * This could be called via ext4_write_begin()
4479 * We need to consider the worse case, when
4480 * one new block per extent.
4482 int ext4_writepage_trans_blocks(struct inode *inode)
4484 int bpp = ext4_journal_blocks_per_page(inode);
4485 int ret;
4487 ret = ext4_meta_trans_blocks(inode, bpp, 0);
4489 /* Account for data blocks for journalled mode */
4490 if (ext4_should_journal_data(inode))
4491 ret += bpp;
4492 return ret;
4496 * Calculate the journal credits for a chunk of data modification.
4498 * This is called from DIO, fallocate or whoever calling
4499 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4501 * journal buffers for data blocks are not included here, as DIO
4502 * and fallocate do no need to journal data buffers.
4504 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4506 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4510 * The caller must have previously called ext4_reserve_inode_write().
4511 * Give this, we know that the caller already has write access to iloc->bh.
4513 int ext4_mark_iloc_dirty(handle_t *handle,
4514 struct inode *inode, struct ext4_iloc *iloc)
4516 int err = 0;
4518 if (IS_I_VERSION(inode))
4519 inode_inc_iversion(inode);
4521 /* the do_update_inode consumes one bh->b_count */
4522 get_bh(iloc->bh);
4524 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4525 err = ext4_do_update_inode(handle, inode, iloc);
4526 put_bh(iloc->bh);
4527 return err;
4531 * On success, We end up with an outstanding reference count against
4532 * iloc->bh. This _must_ be cleaned up later.
4536 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4537 struct ext4_iloc *iloc)
4539 int err;
4541 err = ext4_get_inode_loc(inode, iloc);
4542 if (!err) {
4543 BUFFER_TRACE(iloc->bh, "get_write_access");
4544 err = ext4_journal_get_write_access(handle, iloc->bh);
4545 if (err) {
4546 brelse(iloc->bh);
4547 iloc->bh = NULL;
4550 ext4_std_error(inode->i_sb, err);
4551 return err;
4555 * Expand an inode by new_extra_isize bytes.
4556 * Returns 0 on success or negative error number on failure.
4558 static int ext4_expand_extra_isize(struct inode *inode,
4559 unsigned int new_extra_isize,
4560 struct ext4_iloc iloc,
4561 handle_t *handle)
4563 struct ext4_inode *raw_inode;
4564 struct ext4_xattr_ibody_header *header;
4566 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4567 return 0;
4569 raw_inode = ext4_raw_inode(&iloc);
4571 header = IHDR(inode, raw_inode);
4573 /* No extended attributes present */
4574 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4575 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4576 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4577 new_extra_isize);
4578 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4579 return 0;
4582 /* try to expand with EAs present */
4583 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4584 raw_inode, handle);
4588 * What we do here is to mark the in-core inode as clean with respect to inode
4589 * dirtiness (it may still be data-dirty).
4590 * This means that the in-core inode may be reaped by prune_icache
4591 * without having to perform any I/O. This is a very good thing,
4592 * because *any* task may call prune_icache - even ones which
4593 * have a transaction open against a different journal.
4595 * Is this cheating? Not really. Sure, we haven't written the
4596 * inode out, but prune_icache isn't a user-visible syncing function.
4597 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4598 * we start and wait on commits.
4600 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4602 struct ext4_iloc iloc;
4603 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4604 static unsigned int mnt_count;
4605 int err, ret;
4607 might_sleep();
4608 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4609 err = ext4_reserve_inode_write(handle, inode, &iloc);
4610 if (ext4_handle_valid(handle) &&
4611 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4612 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4614 * We need extra buffer credits since we may write into EA block
4615 * with this same handle. If journal_extend fails, then it will
4616 * only result in a minor loss of functionality for that inode.
4617 * If this is felt to be critical, then e2fsck should be run to
4618 * force a large enough s_min_extra_isize.
4620 if ((jbd2_journal_extend(handle,
4621 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4622 ret = ext4_expand_extra_isize(inode,
4623 sbi->s_want_extra_isize,
4624 iloc, handle);
4625 if (ret) {
4626 ext4_set_inode_state(inode,
4627 EXT4_STATE_NO_EXPAND);
4628 if (mnt_count !=
4629 le16_to_cpu(sbi->s_es->s_mnt_count)) {
4630 ext4_warning(inode->i_sb,
4631 "Unable to expand inode %lu. Delete"
4632 " some EAs or run e2fsck.",
4633 inode->i_ino);
4634 mnt_count =
4635 le16_to_cpu(sbi->s_es->s_mnt_count);
4640 if (!err)
4641 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4642 return err;
4646 * ext4_dirty_inode() is called from __mark_inode_dirty()
4648 * We're really interested in the case where a file is being extended.
4649 * i_size has been changed by generic_commit_write() and we thus need
4650 * to include the updated inode in the current transaction.
4652 * Also, dquot_alloc_block() will always dirty the inode when blocks
4653 * are allocated to the file.
4655 * If the inode is marked synchronous, we don't honour that here - doing
4656 * so would cause a commit on atime updates, which we don't bother doing.
4657 * We handle synchronous inodes at the highest possible level.
4659 void ext4_dirty_inode(struct inode *inode, int flags)
4661 handle_t *handle;
4663 handle = ext4_journal_start(inode, 2);
4664 if (IS_ERR(handle))
4665 goto out;
4667 ext4_mark_inode_dirty(handle, inode);
4669 ext4_journal_stop(handle);
4670 out:
4671 return;
4674 #if 0
4676 * Bind an inode's backing buffer_head into this transaction, to prevent
4677 * it from being flushed to disk early. Unlike
4678 * ext4_reserve_inode_write, this leaves behind no bh reference and
4679 * returns no iloc structure, so the caller needs to repeat the iloc
4680 * lookup to mark the inode dirty later.
4682 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4684 struct ext4_iloc iloc;
4686 int err = 0;
4687 if (handle) {
4688 err = ext4_get_inode_loc(inode, &iloc);
4689 if (!err) {
4690 BUFFER_TRACE(iloc.bh, "get_write_access");
4691 err = jbd2_journal_get_write_access(handle, iloc.bh);
4692 if (!err)
4693 err = ext4_handle_dirty_metadata(handle,
4694 NULL,
4695 iloc.bh);
4696 brelse(iloc.bh);
4699 ext4_std_error(inode->i_sb, err);
4700 return err;
4702 #endif
4704 int ext4_change_inode_journal_flag(struct inode *inode, int val)
4706 journal_t *journal;
4707 handle_t *handle;
4708 int err;
4711 * We have to be very careful here: changing a data block's
4712 * journaling status dynamically is dangerous. If we write a
4713 * data block to the journal, change the status and then delete
4714 * that block, we risk forgetting to revoke the old log record
4715 * from the journal and so a subsequent replay can corrupt data.
4716 * So, first we make sure that the journal is empty and that
4717 * nobody is changing anything.
4720 journal = EXT4_JOURNAL(inode);
4721 if (!journal)
4722 return 0;
4723 if (is_journal_aborted(journal))
4724 return -EROFS;
4725 /* We have to allocate physical blocks for delalloc blocks
4726 * before flushing journal. otherwise delalloc blocks can not
4727 * be allocated any more. even more truncate on delalloc blocks
4728 * could trigger BUG by flushing delalloc blocks in journal.
4729 * There is no delalloc block in non-journal data mode.
4731 if (val && test_opt(inode->i_sb, DELALLOC)) {
4732 err = ext4_alloc_da_blocks(inode);
4733 if (err < 0)
4734 return err;
4737 jbd2_journal_lock_updates(journal);
4740 * OK, there are no updates running now, and all cached data is
4741 * synced to disk. We are now in a completely consistent state
4742 * which doesn't have anything in the journal, and we know that
4743 * no filesystem updates are running, so it is safe to modify
4744 * the inode's in-core data-journaling state flag now.
4747 if (val)
4748 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4749 else {
4750 jbd2_journal_flush(journal);
4751 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4753 ext4_set_aops(inode);
4755 jbd2_journal_unlock_updates(journal);
4757 /* Finally we can mark the inode as dirty. */
4759 handle = ext4_journal_start(inode, 1);
4760 if (IS_ERR(handle))
4761 return PTR_ERR(handle);
4763 err = ext4_mark_inode_dirty(handle, inode);
4764 ext4_handle_sync(handle);
4765 ext4_journal_stop(handle);
4766 ext4_std_error(inode->i_sb, err);
4768 return err;
4771 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4773 return !buffer_mapped(bh);
4776 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4778 struct page *page = vmf->page;
4779 loff_t size;
4780 unsigned long len;
4781 int ret;
4782 struct file *file = vma->vm_file;
4783 struct inode *inode = file->f_path.dentry->d_inode;
4784 struct address_space *mapping = inode->i_mapping;
4785 handle_t *handle;
4786 get_block_t *get_block;
4787 int retries = 0;
4789 sb_start_pagefault(inode->i_sb);
4790 file_update_time(vma->vm_file);
4791 /* Delalloc case is easy... */
4792 if (test_opt(inode->i_sb, DELALLOC) &&
4793 !ext4_should_journal_data(inode) &&
4794 !ext4_nonda_switch(inode->i_sb)) {
4795 do {
4796 ret = __block_page_mkwrite(vma, vmf,
4797 ext4_da_get_block_prep);
4798 } while (ret == -ENOSPC &&
4799 ext4_should_retry_alloc(inode->i_sb, &retries));
4800 goto out_ret;
4803 lock_page(page);
4804 size = i_size_read(inode);
4805 /* Page got truncated from under us? */
4806 if (page->mapping != mapping || page_offset(page) > size) {
4807 unlock_page(page);
4808 ret = VM_FAULT_NOPAGE;
4809 goto out;
4812 if (page->index == size >> PAGE_CACHE_SHIFT)
4813 len = size & ~PAGE_CACHE_MASK;
4814 else
4815 len = PAGE_CACHE_SIZE;
4817 * Return if we have all the buffers mapped. This avoids the need to do
4818 * journal_start/journal_stop which can block and take a long time
4820 if (page_has_buffers(page)) {
4821 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4822 ext4_bh_unmapped)) {
4823 /* Wait so that we don't change page under IO */
4824 wait_on_page_writeback(page);
4825 ret = VM_FAULT_LOCKED;
4826 goto out;
4829 unlock_page(page);
4830 /* OK, we need to fill the hole... */
4831 if (ext4_should_dioread_nolock(inode))
4832 get_block = ext4_get_block_write;
4833 else
4834 get_block = ext4_get_block;
4835 retry_alloc:
4836 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
4837 if (IS_ERR(handle)) {
4838 ret = VM_FAULT_SIGBUS;
4839 goto out;
4841 ret = __block_page_mkwrite(vma, vmf, get_block);
4842 if (!ret && ext4_should_journal_data(inode)) {
4843 if (walk_page_buffers(handle, page_buffers(page), 0,
4844 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4845 unlock_page(page);
4846 ret = VM_FAULT_SIGBUS;
4847 ext4_journal_stop(handle);
4848 goto out;
4850 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4852 ext4_journal_stop(handle);
4853 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4854 goto retry_alloc;
4855 out_ret:
4856 ret = block_page_mkwrite_return(ret);
4857 out:
4858 sb_end_pagefault(inode->i_sb);
4859 return ret;