4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
25 #include <trace/events/f2fs.h>
28 * Lock ordering for the change of data block address:
31 * update block addresses in the node page
33 static void __set_data_blkaddr(struct dnode_of_data
*dn
, block_t new_addr
)
37 struct page
*node_page
= dn
->node_page
;
38 unsigned int ofs_in_node
= dn
->ofs_in_node
;
40 wait_on_page_writeback(node_page
);
42 rn
= (struct f2fs_node
*)page_address(node_page
);
44 /* Get physical address of data block */
45 addr_array
= blkaddr_in_node(rn
);
46 addr_array
[ofs_in_node
] = cpu_to_le32(new_addr
);
47 set_page_dirty(node_page
);
50 int reserve_new_block(struct dnode_of_data
*dn
)
52 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
54 if (is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
))
56 if (!inc_valid_block_count(sbi
, dn
->inode
, 1))
59 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
61 __set_data_blkaddr(dn
, NEW_ADDR
);
62 dn
->data_blkaddr
= NEW_ADDR
;
67 static int check_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
68 struct buffer_head
*bh_result
)
70 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
71 #ifdef CONFIG_F2FS_STAT_FS
72 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
74 pgoff_t start_fofs
, end_fofs
;
75 block_t start_blkaddr
;
77 read_lock(&fi
->ext
.ext_lock
);
78 if (fi
->ext
.len
== 0) {
79 read_unlock(&fi
->ext
.ext_lock
);
83 #ifdef CONFIG_F2FS_STAT_FS
86 start_fofs
= fi
->ext
.fofs
;
87 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
88 start_blkaddr
= fi
->ext
.blk_addr
;
90 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
91 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
94 clear_buffer_new(bh_result
);
95 map_bh(bh_result
, inode
->i_sb
,
96 start_blkaddr
+ pgofs
- start_fofs
);
97 count
= end_fofs
- pgofs
+ 1;
98 if (count
< (UINT_MAX
>> blkbits
))
99 bh_result
->b_size
= (count
<< blkbits
);
101 bh_result
->b_size
= UINT_MAX
;
103 #ifdef CONFIG_F2FS_STAT_FS
106 read_unlock(&fi
->ext
.ext_lock
);
109 read_unlock(&fi
->ext
.ext_lock
);
113 void update_extent_cache(block_t blk_addr
, struct dnode_of_data
*dn
)
115 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
116 pgoff_t fofs
, start_fofs
, end_fofs
;
117 block_t start_blkaddr
, end_blkaddr
;
119 BUG_ON(blk_addr
== NEW_ADDR
);
120 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
)) + dn
->ofs_in_node
;
122 /* Update the page address in the parent node */
123 __set_data_blkaddr(dn
, blk_addr
);
125 write_lock(&fi
->ext
.ext_lock
);
127 start_fofs
= fi
->ext
.fofs
;
128 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
129 start_blkaddr
= fi
->ext
.blk_addr
;
130 end_blkaddr
= fi
->ext
.blk_addr
+ fi
->ext
.len
- 1;
132 /* Drop and initialize the matched extent */
133 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
137 if (fi
->ext
.len
== 0) {
138 if (blk_addr
!= NULL_ADDR
) {
140 fi
->ext
.blk_addr
= blk_addr
;
147 if (fofs
== start_fofs
- 1 && blk_addr
== start_blkaddr
- 1) {
155 if (fofs
== end_fofs
+ 1 && blk_addr
== end_blkaddr
+ 1) {
160 /* Split the existing extent */
161 if (fi
->ext
.len
> 1 &&
162 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
163 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
164 fi
->ext
.len
= fofs
- start_fofs
;
166 fi
->ext
.fofs
= fofs
+ 1;
167 fi
->ext
.blk_addr
= start_blkaddr
+
168 fofs
- start_fofs
+ 1;
169 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
173 write_unlock(&fi
->ext
.ext_lock
);
177 write_unlock(&fi
->ext
.ext_lock
);
182 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
184 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
185 struct address_space
*mapping
= inode
->i_mapping
;
186 struct dnode_of_data dn
;
190 page
= find_get_page(mapping
, index
);
191 if (page
&& PageUptodate(page
))
193 f2fs_put_page(page
, 0);
195 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
196 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
201 if (dn
.data_blkaddr
== NULL_ADDR
)
202 return ERR_PTR(-ENOENT
);
204 /* By fallocate(), there is no cached page, but with NEW_ADDR */
205 if (dn
.data_blkaddr
== NEW_ADDR
)
206 return ERR_PTR(-EINVAL
);
208 page
= grab_cache_page_write_begin(mapping
, index
, AOP_FLAG_NOFS
);
210 return ERR_PTR(-ENOMEM
);
212 if (PageUptodate(page
)) {
217 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
,
218 sync
? READ_SYNC
: READA
);
220 wait_on_page_locked(page
);
221 if (!PageUptodate(page
)) {
222 f2fs_put_page(page
, 0);
223 return ERR_PTR(-EIO
);
230 * If it tries to access a hole, return an error.
231 * Because, the callers, functions in dir.c and GC, should be able to know
232 * whether this page exists or not.
234 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
236 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
237 struct address_space
*mapping
= inode
->i_mapping
;
238 struct dnode_of_data dn
;
243 page
= grab_cache_page_write_begin(mapping
, index
, AOP_FLAG_NOFS
);
245 return ERR_PTR(-ENOMEM
);
247 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
248 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
250 f2fs_put_page(page
, 1);
255 if (dn
.data_blkaddr
== NULL_ADDR
) {
256 f2fs_put_page(page
, 1);
257 return ERR_PTR(-ENOENT
);
260 if (PageUptodate(page
))
263 BUG_ON(dn
.data_blkaddr
== NEW_ADDR
);
264 BUG_ON(dn
.data_blkaddr
== NULL_ADDR
);
266 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
271 if (!PageUptodate(page
)) {
272 f2fs_put_page(page
, 1);
273 return ERR_PTR(-EIO
);
275 if (page
->mapping
!= mapping
) {
276 f2fs_put_page(page
, 1);
283 * Caller ensures that this data page is never allocated.
284 * A new zero-filled data page is allocated in the page cache.
286 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
288 * Note that, npage is set only by make_empty_dir.
290 struct page
*get_new_data_page(struct inode
*inode
,
291 struct page
*npage
, pgoff_t index
, bool new_i_size
)
293 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
294 struct address_space
*mapping
= inode
->i_mapping
;
296 struct dnode_of_data dn
;
299 set_new_dnode(&dn
, inode
, npage
, npage
, 0);
300 err
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
304 if (dn
.data_blkaddr
== NULL_ADDR
) {
305 if (reserve_new_block(&dn
)) {
308 return ERR_PTR(-ENOSPC
);
314 page
= grab_cache_page(mapping
, index
);
316 return ERR_PTR(-ENOMEM
);
318 if (PageUptodate(page
))
321 if (dn
.data_blkaddr
== NEW_ADDR
) {
322 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
323 SetPageUptodate(page
);
325 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
329 if (!PageUptodate(page
)) {
330 f2fs_put_page(page
, 1);
331 return ERR_PTR(-EIO
);
333 if (page
->mapping
!= mapping
) {
334 f2fs_put_page(page
, 1);
340 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
341 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
342 /* Only the directory inode sets new_i_size */
343 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
344 mark_inode_dirty_sync(inode
);
349 static void read_end_io(struct bio
*bio
, int err
)
351 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
352 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
355 struct page
*page
= bvec
->bv_page
;
357 if (--bvec
>= bio
->bi_io_vec
)
358 prefetchw(&bvec
->bv_page
->flags
);
361 SetPageUptodate(page
);
363 ClearPageUptodate(page
);
367 } while (bvec
>= bio
->bi_io_vec
);
368 kfree(bio
->bi_private
);
373 * Fill the locked page with data located in the block address.
374 * Return unlocked page.
376 int f2fs_readpage(struct f2fs_sb_info
*sbi
, struct page
*page
,
377 block_t blk_addr
, int type
)
379 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
382 trace_f2fs_readpage(page
, blk_addr
, type
);
384 down_read(&sbi
->bio_sem
);
386 /* Allocate a new bio */
387 bio
= f2fs_bio_alloc(bdev
, 1);
389 /* Initialize the bio */
390 bio
->bi_sector
= SECTOR_FROM_BLOCK(sbi
, blk_addr
);
391 bio
->bi_end_io
= read_end_io
;
393 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
394 kfree(bio
->bi_private
);
396 up_read(&sbi
->bio_sem
);
397 f2fs_put_page(page
, 1);
401 submit_bio(type
, bio
);
402 up_read(&sbi
->bio_sem
);
407 * This function should be used by the data read flow only where it
408 * does not check the "create" flag that indicates block allocation.
409 * The reason for this special functionality is to exploit VFS readahead
412 static int get_data_block_ro(struct inode
*inode
, sector_t iblock
,
413 struct buffer_head
*bh_result
, int create
)
415 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
416 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
417 struct dnode_of_data dn
;
421 /* Get the page offset from the block offset(iblock) */
422 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
424 if (check_extent_cache(inode
, pgofs
, bh_result
)) {
425 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, 0);
429 /* When reading holes, we need its node page */
430 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
431 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE_RA
);
433 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
434 return (err
== -ENOENT
) ? 0 : err
;
437 /* It does not support data allocation */
440 if (dn
.data_blkaddr
!= NEW_ADDR
&& dn
.data_blkaddr
!= NULL_ADDR
) {
442 unsigned int end_offset
;
444 end_offset
= IS_INODE(dn
.node_page
) ?
448 clear_buffer_new(bh_result
);
450 /* Give more consecutive addresses for the read ahead */
451 for (i
= 0; i
< end_offset
- dn
.ofs_in_node
; i
++)
452 if (((datablock_addr(dn
.node_page
,
454 != (dn
.data_blkaddr
+ i
)) || maxblocks
== i
)
456 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
457 bh_result
->b_size
= (i
<< blkbits
);
460 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, 0);
464 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
466 return mpage_readpage(page
, get_data_block_ro
);
469 static int f2fs_read_data_pages(struct file
*file
,
470 struct address_space
*mapping
,
471 struct list_head
*pages
, unsigned nr_pages
)
473 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block_ro
);
476 int do_write_data_page(struct page
*page
)
478 struct inode
*inode
= page
->mapping
->host
;
479 block_t old_blk_addr
, new_blk_addr
;
480 struct dnode_of_data dn
;
483 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
484 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
488 old_blk_addr
= dn
.data_blkaddr
;
490 /* This page is already truncated */
491 if (old_blk_addr
== NULL_ADDR
)
494 set_page_writeback(page
);
497 * If current allocation needs SSR,
498 * it had better in-place writes for updated data.
500 if (unlikely(old_blk_addr
!= NEW_ADDR
&&
501 !is_cold_data(page
) &&
502 need_inplace_update(inode
))) {
503 rewrite_data_page(F2FS_SB(inode
->i_sb
), page
,
506 write_data_page(inode
, page
, &dn
,
507 old_blk_addr
, &new_blk_addr
);
508 update_extent_cache(new_blk_addr
, &dn
);
515 static int f2fs_write_data_page(struct page
*page
,
516 struct writeback_control
*wbc
)
518 struct inode
*inode
= page
->mapping
->host
;
519 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
520 loff_t i_size
= i_size_read(inode
);
521 const pgoff_t end_index
= ((unsigned long long) i_size
)
524 bool need_balance_fs
= false;
527 if (page
->index
< end_index
)
531 * If the offset is out-of-range of file size,
532 * this page does not have to be written to disk.
534 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
535 if ((page
->index
>= end_index
+ 1) || !offset
) {
536 if (S_ISDIR(inode
->i_mode
)) {
537 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
538 inode_dec_dirty_dents(inode
);
543 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
545 if (sbi
->por_doing
) {
546 err
= AOP_WRITEPAGE_ACTIVATE
;
550 /* Dentry blocks are controlled by checkpoint */
551 if (S_ISDIR(inode
->i_mode
)) {
552 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
553 inode_dec_dirty_dents(inode
);
554 err
= do_write_data_page(page
);
556 int ilock
= mutex_lock_op(sbi
);
557 err
= do_write_data_page(page
);
558 mutex_unlock_op(sbi
, ilock
);
559 need_balance_fs
= true;
566 if (wbc
->for_reclaim
)
567 f2fs_submit_bio(sbi
, DATA
, true);
569 clear_cold_data(page
);
573 f2fs_balance_fs(sbi
);
577 wbc
->pages_skipped
++;
578 set_page_dirty(page
);
582 #define MAX_DESIRED_PAGES_WP 4096
584 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
587 struct address_space
*mapping
= data
;
588 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
589 mapping_set_error(mapping
, ret
);
593 static int f2fs_write_data_pages(struct address_space
*mapping
,
594 struct writeback_control
*wbc
)
596 struct inode
*inode
= mapping
->host
;
597 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
600 long excess_nrtw
= 0, desired_nrtw
;
602 /* deal with chardevs and other special file */
603 if (!mapping
->a_ops
->writepage
)
606 if (wbc
->nr_to_write
< MAX_DESIRED_PAGES_WP
) {
607 desired_nrtw
= MAX_DESIRED_PAGES_WP
;
608 excess_nrtw
= desired_nrtw
- wbc
->nr_to_write
;
609 wbc
->nr_to_write
= desired_nrtw
;
612 if (!S_ISDIR(inode
->i_mode
)) {
613 mutex_lock(&sbi
->writepages
);
616 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
618 mutex_unlock(&sbi
->writepages
);
619 f2fs_submit_bio(sbi
, DATA
, (wbc
->sync_mode
== WB_SYNC_ALL
));
621 remove_dirty_dir_inode(inode
);
623 wbc
->nr_to_write
-= excess_nrtw
;
627 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
628 loff_t pos
, unsigned len
, unsigned flags
,
629 struct page
**pagep
, void **fsdata
)
631 struct inode
*inode
= mapping
->host
;
632 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
634 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
635 struct dnode_of_data dn
;
639 /* for nobh_write_end */
642 f2fs_balance_fs(sbi
);
644 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
649 ilock
= mutex_lock_op(sbi
);
651 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
652 err
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
656 if (dn
.data_blkaddr
== NULL_ADDR
)
657 err
= reserve_new_block(&dn
);
663 mutex_unlock_op(sbi
, ilock
);
665 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
668 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
669 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
670 unsigned end
= start
+ len
;
672 /* Reading beyond i_size is simple: memset to zero */
673 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
677 if (dn
.data_blkaddr
== NEW_ADDR
) {
678 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
680 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
684 if (!PageUptodate(page
)) {
685 f2fs_put_page(page
, 1);
688 if (page
->mapping
!= mapping
) {
689 f2fs_put_page(page
, 1);
694 SetPageUptodate(page
);
695 clear_cold_data(page
);
699 mutex_unlock_op(sbi
, ilock
);
700 f2fs_put_page(page
, 1);
704 static int f2fs_write_end(struct file
*file
,
705 struct address_space
*mapping
,
706 loff_t pos
, unsigned len
, unsigned copied
,
707 struct page
*page
, void *fsdata
)
709 struct inode
*inode
= page
->mapping
->host
;
711 SetPageUptodate(page
);
712 set_page_dirty(page
);
714 if (pos
+ copied
> i_size_read(inode
)) {
715 i_size_write(inode
, pos
+ copied
);
716 mark_inode_dirty(inode
);
717 update_inode_page(inode
);
721 page_cache_release(page
);
725 static ssize_t
f2fs_direct_IO(int rw
, struct kiocb
*iocb
,
726 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
728 struct file
*file
= iocb
->ki_filp
;
729 struct inode
*inode
= file
->f_mapping
->host
;
734 /* Needs synchronization with the cleaner */
735 return blockdev_direct_IO(rw
, iocb
, inode
, iov
, offset
, nr_segs
,
739 static void f2fs_invalidate_data_page(struct page
*page
, unsigned int offset
,
742 struct inode
*inode
= page
->mapping
->host
;
743 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
744 if (S_ISDIR(inode
->i_mode
) && PageDirty(page
)) {
745 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
746 inode_dec_dirty_dents(inode
);
748 ClearPagePrivate(page
);
751 static int f2fs_release_data_page(struct page
*page
, gfp_t wait
)
753 ClearPagePrivate(page
);
757 static int f2fs_set_data_page_dirty(struct page
*page
)
759 struct address_space
*mapping
= page
->mapping
;
760 struct inode
*inode
= mapping
->host
;
762 SetPageUptodate(page
);
763 if (!PageDirty(page
)) {
764 __set_page_dirty_nobuffers(page
);
765 set_dirty_dir_page(inode
, page
);
771 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
773 return generic_block_bmap(mapping
, block
, get_data_block_ro
);
776 const struct address_space_operations f2fs_dblock_aops
= {
777 .readpage
= f2fs_read_data_page
,
778 .readpages
= f2fs_read_data_pages
,
779 .writepage
= f2fs_write_data_page
,
780 .writepages
= f2fs_write_data_pages
,
781 .write_begin
= f2fs_write_begin
,
782 .write_end
= f2fs_write_end
,
783 .set_page_dirty
= f2fs_set_data_page_dirty
,
784 .invalidatepage
= f2fs_invalidate_data_page
,
785 .releasepage
= f2fs_release_data_page
,
786 .direct_IO
= f2fs_direct_IO
,