4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
26 #include <trace/events/f2fs.h>
28 static struct kmem_cache
*extent_tree_slab
;
29 static struct kmem_cache
*extent_node_slab
;
31 static void f2fs_read_end_io(struct bio
*bio
, int err
)
36 bio_for_each_segment_all(bvec
, bio
, i
) {
37 struct page
*page
= bvec
->bv_page
;
40 SetPageUptodate(page
);
42 ClearPageUptodate(page
);
50 static void f2fs_write_end_io(struct bio
*bio
, int err
)
52 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
56 bio_for_each_segment_all(bvec
, bio
, i
) {
57 struct page
*page
= bvec
->bv_page
;
61 set_bit(AS_EIO
, &page
->mapping
->flags
);
62 f2fs_stop_checkpoint(sbi
);
64 end_page_writeback(page
);
65 dec_page_count(sbi
, F2FS_WRITEBACK
);
68 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
69 !list_empty(&sbi
->cp_wait
.task_list
))
70 wake_up(&sbi
->cp_wait
);
76 * Low-level block read/write IO operations.
78 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
79 int npages
, bool is_read
)
83 /* No failure on bio allocation */
84 bio
= bio_alloc(GFP_NOIO
, npages
);
86 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
87 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
88 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
89 bio
->bi_private
= sbi
;
94 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
96 struct f2fs_io_info
*fio
= &io
->fio
;
101 if (is_read_io(fio
->rw
))
102 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
104 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
106 submit_bio(fio
->rw
, io
->bio
);
110 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
111 enum page_type type
, int rw
)
113 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
114 struct f2fs_bio_info
*io
;
116 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
118 down_write(&io
->io_rwsem
);
120 /* change META to META_FLUSH in the checkpoint procedure */
121 if (type
>= META_FLUSH
) {
122 io
->fio
.type
= META_FLUSH
;
123 if (test_opt(sbi
, NOBARRIER
))
124 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
126 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
128 __submit_merged_bio(io
);
129 up_write(&io
->io_rwsem
);
133 * Fill the locked page with data located in the block address.
134 * Return unlocked page.
136 int f2fs_submit_page_bio(struct f2fs_sb_info
*sbi
, struct page
*page
,
137 struct f2fs_io_info
*fio
)
141 trace_f2fs_submit_page_bio(page
, fio
);
142 f2fs_trace_ios(page
, fio
, 0);
144 /* Allocate a new bio */
145 bio
= __bio_alloc(sbi
, fio
->blk_addr
, 1, is_read_io(fio
->rw
));
147 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
149 f2fs_put_page(page
, 1);
153 submit_bio(fio
->rw
, bio
);
157 void f2fs_submit_page_mbio(struct f2fs_sb_info
*sbi
, struct page
*page
,
158 struct f2fs_io_info
*fio
)
160 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
161 struct f2fs_bio_info
*io
;
162 bool is_read
= is_read_io(fio
->rw
);
164 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
166 verify_block_addr(sbi
, fio
->blk_addr
);
168 down_write(&io
->io_rwsem
);
171 inc_page_count(sbi
, F2FS_WRITEBACK
);
173 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->blk_addr
- 1 ||
174 io
->fio
.rw
!= fio
->rw
))
175 __submit_merged_bio(io
);
177 if (io
->bio
== NULL
) {
178 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
180 io
->bio
= __bio_alloc(sbi
, fio
->blk_addr
, bio_blocks
, is_read
);
184 if (bio_add_page(io
->bio
, page
, PAGE_CACHE_SIZE
, 0) <
186 __submit_merged_bio(io
);
190 io
->last_block_in_bio
= fio
->blk_addr
;
191 f2fs_trace_ios(page
, fio
, 0);
193 up_write(&io
->io_rwsem
);
194 trace_f2fs_submit_page_mbio(page
, fio
);
198 * Lock ordering for the change of data block address:
201 * update block addresses in the node page
203 void set_data_blkaddr(struct dnode_of_data
*dn
)
205 struct f2fs_node
*rn
;
207 struct page
*node_page
= dn
->node_page
;
208 unsigned int ofs_in_node
= dn
->ofs_in_node
;
210 f2fs_wait_on_page_writeback(node_page
, NODE
);
212 rn
= F2FS_NODE(node_page
);
214 /* Get physical address of data block */
215 addr_array
= blkaddr_in_node(rn
);
216 addr_array
[ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
217 set_page_dirty(node_page
);
220 int reserve_new_block(struct dnode_of_data
*dn
)
222 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
224 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
226 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
229 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
231 dn
->data_blkaddr
= NEW_ADDR
;
232 set_data_blkaddr(dn
);
233 mark_inode_dirty(dn
->inode
);
238 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
240 bool need_put
= dn
->inode_page
? false : true;
243 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
247 if (dn
->data_blkaddr
== NULL_ADDR
)
248 err
= reserve_new_block(dn
);
254 static void f2fs_map_bh(struct super_block
*sb
, pgoff_t pgofs
,
255 struct extent_info
*ei
, struct buffer_head
*bh_result
)
257 unsigned int blkbits
= sb
->s_blocksize_bits
;
258 size_t max_size
= bh_result
->b_size
;
261 clear_buffer_new(bh_result
);
262 map_bh(bh_result
, sb
, ei
->blk
+ pgofs
- ei
->fofs
);
263 mapped_size
= (ei
->fofs
+ ei
->len
- pgofs
) << blkbits
;
264 bh_result
->b_size
= min(max_size
, mapped_size
);
267 static bool lookup_extent_info(struct inode
*inode
, pgoff_t pgofs
,
268 struct extent_info
*ei
)
270 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
271 pgoff_t start_fofs
, end_fofs
;
272 block_t start_blkaddr
;
274 read_lock(&fi
->ext_lock
);
275 if (fi
->ext
.len
== 0) {
276 read_unlock(&fi
->ext_lock
);
280 stat_inc_total_hit(inode
->i_sb
);
282 start_fofs
= fi
->ext
.fofs
;
283 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
284 start_blkaddr
= fi
->ext
.blk
;
286 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
288 stat_inc_read_hit(inode
->i_sb
);
289 read_unlock(&fi
->ext_lock
);
292 read_unlock(&fi
->ext_lock
);
296 static bool update_extent_info(struct inode
*inode
, pgoff_t fofs
,
299 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
300 pgoff_t start_fofs
, end_fofs
;
301 block_t start_blkaddr
, end_blkaddr
;
302 int need_update
= true;
304 write_lock(&fi
->ext_lock
);
306 start_fofs
= fi
->ext
.fofs
;
307 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
308 start_blkaddr
= fi
->ext
.blk
;
309 end_blkaddr
= fi
->ext
.blk
+ fi
->ext
.len
- 1;
311 /* Drop and initialize the matched extent */
312 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
316 if (fi
->ext
.len
== 0) {
317 if (blkaddr
!= NULL_ADDR
) {
319 fi
->ext
.blk
= blkaddr
;
326 if (fofs
== start_fofs
- 1 && blkaddr
== start_blkaddr
- 1) {
334 if (fofs
== end_fofs
+ 1 && blkaddr
== end_blkaddr
+ 1) {
339 /* Split the existing extent */
340 if (fi
->ext
.len
> 1 &&
341 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
342 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
343 fi
->ext
.len
= fofs
- start_fofs
;
345 fi
->ext
.fofs
= fofs
+ 1;
346 fi
->ext
.blk
= start_blkaddr
+ fofs
- start_fofs
+ 1;
347 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
353 /* Finally, if the extent is very fragmented, let's drop the cache. */
354 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
356 set_inode_flag(fi
, FI_NO_EXTENT
);
360 write_unlock(&fi
->ext_lock
);
364 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
365 struct extent_tree
*et
, struct extent_info
*ei
,
366 struct rb_node
*parent
, struct rb_node
**p
)
368 struct extent_node
*en
;
370 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
375 INIT_LIST_HEAD(&en
->list
);
377 rb_link_node(&en
->rb_node
, parent
, p
);
378 rb_insert_color(&en
->rb_node
, &et
->root
);
380 atomic_inc(&sbi
->total_ext_node
);
384 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
385 struct extent_tree
*et
, struct extent_node
*en
)
387 rb_erase(&en
->rb_node
, &et
->root
);
389 atomic_dec(&sbi
->total_ext_node
);
391 if (et
->cached_en
== en
)
392 et
->cached_en
= NULL
;
395 static struct extent_tree
*__find_extent_tree(struct f2fs_sb_info
*sbi
,
398 struct extent_tree
*et
;
400 down_read(&sbi
->extent_tree_lock
);
401 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
403 up_read(&sbi
->extent_tree_lock
);
406 atomic_inc(&et
->refcount
);
407 up_read(&sbi
->extent_tree_lock
);
412 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
414 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
415 struct extent_tree
*et
;
416 nid_t ino
= inode
->i_ino
;
418 down_write(&sbi
->extent_tree_lock
);
419 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
421 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
422 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
423 memset(et
, 0, sizeof(struct extent_tree
));
426 et
->cached_en
= NULL
;
427 rwlock_init(&et
->lock
);
428 atomic_set(&et
->refcount
, 0);
430 sbi
->total_ext_tree
++;
432 atomic_inc(&et
->refcount
);
433 up_write(&sbi
->extent_tree_lock
);
438 static struct extent_node
*__lookup_extent_tree(struct extent_tree
*et
,
441 struct rb_node
*node
= et
->root
.rb_node
;
442 struct extent_node
*en
;
445 struct extent_info
*cei
= &et
->cached_en
->ei
;
447 if (cei
->fofs
<= fofs
&& cei
->fofs
+ cei
->len
> fofs
)
448 return et
->cached_en
;
452 en
= rb_entry(node
, struct extent_node
, rb_node
);
454 if (fofs
< en
->ei
.fofs
) {
455 node
= node
->rb_left
;
456 } else if (fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
457 node
= node
->rb_right
;
466 static struct extent_node
*__try_back_merge(struct f2fs_sb_info
*sbi
,
467 struct extent_tree
*et
, struct extent_node
*en
)
469 struct extent_node
*prev
;
470 struct rb_node
*node
;
472 node
= rb_prev(&en
->rb_node
);
476 prev
= rb_entry(node
, struct extent_node
, rb_node
);
477 if (__is_back_mergeable(&en
->ei
, &prev
->ei
)) {
478 en
->ei
.fofs
= prev
->ei
.fofs
;
479 en
->ei
.blk
= prev
->ei
.blk
;
480 en
->ei
.len
+= prev
->ei
.len
;
481 __detach_extent_node(sbi
, et
, prev
);
487 static struct extent_node
*__try_front_merge(struct f2fs_sb_info
*sbi
,
488 struct extent_tree
*et
, struct extent_node
*en
)
490 struct extent_node
*next
;
491 struct rb_node
*node
;
493 node
= rb_next(&en
->rb_node
);
497 next
= rb_entry(node
, struct extent_node
, rb_node
);
498 if (__is_front_mergeable(&en
->ei
, &next
->ei
)) {
499 en
->ei
.len
+= next
->ei
.len
;
500 __detach_extent_node(sbi
, et
, next
);
506 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
507 struct extent_tree
*et
, struct extent_info
*ei
,
508 struct extent_node
**den
)
510 struct rb_node
**p
= &et
->root
.rb_node
;
511 struct rb_node
*parent
= NULL
;
512 struct extent_node
*en
;
516 en
= rb_entry(parent
, struct extent_node
, rb_node
);
518 if (ei
->fofs
< en
->ei
.fofs
) {
519 if (__is_front_mergeable(ei
, &en
->ei
)) {
520 f2fs_bug_on(sbi
, !den
);
521 en
->ei
.fofs
= ei
->fofs
;
522 en
->ei
.blk
= ei
->blk
;
523 en
->ei
.len
+= ei
->len
;
524 *den
= __try_back_merge(sbi
, et
, en
);
528 } else if (ei
->fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
529 if (__is_back_mergeable(ei
, &en
->ei
)) {
530 f2fs_bug_on(sbi
, !den
);
531 en
->ei
.len
+= ei
->len
;
532 *den
= __try_front_merge(sbi
, et
, en
);
541 return __attach_extent_node(sbi
, et
, ei
, parent
, p
);
544 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
545 struct extent_tree
*et
, bool free_all
)
547 struct rb_node
*node
, *next
;
548 struct extent_node
*en
;
549 unsigned int count
= et
->count
;
551 node
= rb_first(&et
->root
);
553 next
= rb_next(node
);
554 en
= rb_entry(node
, struct extent_node
, rb_node
);
557 spin_lock(&sbi
->extent_lock
);
558 if (!list_empty(&en
->list
))
559 list_del_init(&en
->list
);
560 spin_unlock(&sbi
->extent_lock
);
563 if (free_all
|| list_empty(&en
->list
)) {
564 __detach_extent_node(sbi
, et
, en
);
565 kmem_cache_free(extent_node_slab
, en
);
570 return count
- et
->count
;
573 static void f2fs_init_extent_tree(struct inode
*inode
,
574 struct f2fs_extent
*i_ext
)
576 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
577 struct extent_tree
*et
;
578 struct extent_node
*en
;
579 struct extent_info ei
;
581 if (le32_to_cpu(i_ext
->len
) < F2FS_MIN_EXTENT_LEN
)
584 et
= __grab_extent_tree(inode
);
586 write_lock(&et
->lock
);
590 set_extent_info(&ei
, le32_to_cpu(i_ext
->fofs
),
591 le32_to_cpu(i_ext
->blk
), le32_to_cpu(i_ext
->len
));
593 en
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
597 spin_lock(&sbi
->extent_lock
);
598 list_add_tail(&en
->list
, &sbi
->extent_list
);
599 spin_unlock(&sbi
->extent_lock
);
602 write_unlock(&et
->lock
);
603 atomic_dec(&et
->refcount
);
606 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
607 struct extent_info
*ei
)
609 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
610 struct extent_tree
*et
;
611 struct extent_node
*en
;
613 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
615 et
= __find_extent_tree(sbi
, inode
->i_ino
);
619 read_lock(&et
->lock
);
620 en
= __lookup_extent_tree(et
, pgofs
);
623 spin_lock(&sbi
->extent_lock
);
624 if (!list_empty(&en
->list
))
625 list_move_tail(&en
->list
, &sbi
->extent_list
);
626 spin_unlock(&sbi
->extent_lock
);
627 stat_inc_read_hit(sbi
->sb
);
629 stat_inc_total_hit(sbi
->sb
);
630 read_unlock(&et
->lock
);
632 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, en
);
634 atomic_dec(&et
->refcount
);
635 return en
? true : false;
638 static void f2fs_update_extent_tree(struct inode
*inode
, pgoff_t fofs
,
641 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
642 struct extent_tree
*et
;
643 struct extent_node
*en
= NULL
, *en1
= NULL
, *en2
= NULL
, *en3
= NULL
;
644 struct extent_node
*den
= NULL
;
645 struct extent_info ei
, dei
;
648 trace_f2fs_update_extent_tree(inode
, fofs
, blkaddr
);
650 et
= __grab_extent_tree(inode
);
652 write_lock(&et
->lock
);
654 /* 1. lookup and remove existing extent info in cache */
655 en
= __lookup_extent_tree(et
, fofs
);
660 __detach_extent_node(sbi
, et
, en
);
662 /* 2. if extent can be split more, split and insert the left part */
664 /* insert left part of split extent into cache */
665 if (fofs
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
666 set_extent_info(&ei
, dei
.fofs
, dei
.blk
,
668 en1
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
671 /* insert right part of split extent into cache */
672 endofs
= dei
.fofs
+ dei
.len
- 1;
673 if (endofs
- fofs
>= F2FS_MIN_EXTENT_LEN
) {
674 set_extent_info(&ei
, fofs
+ 1,
675 fofs
- dei
.fofs
+ dei
.blk
, endofs
- fofs
);
676 en2
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
681 /* 3. update extent in extent cache */
683 set_extent_info(&ei
, fofs
, blkaddr
, 1);
684 en3
= __insert_extent_tree(sbi
, et
, &ei
, &den
);
687 /* 4. update in global extent list */
688 spin_lock(&sbi
->extent_lock
);
689 if (en
&& !list_empty(&en
->list
))
692 * en1 and en2 split from en, they will become more and more smaller
693 * fragments after splitting several times. So if the length is smaller
694 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
697 list_add_tail(&en1
->list
, &sbi
->extent_list
);
699 list_add_tail(&en2
->list
, &sbi
->extent_list
);
701 if (list_empty(&en3
->list
))
702 list_add_tail(&en3
->list
, &sbi
->extent_list
);
704 list_move_tail(&en3
->list
, &sbi
->extent_list
);
706 if (den
&& !list_empty(&den
->list
))
707 list_del(&den
->list
);
708 spin_unlock(&sbi
->extent_lock
);
710 /* 5. release extent node */
712 kmem_cache_free(extent_node_slab
, en
);
714 kmem_cache_free(extent_node_slab
, den
);
716 write_unlock(&et
->lock
);
717 atomic_dec(&et
->refcount
);
720 void f2fs_preserve_extent_tree(struct inode
*inode
)
722 struct extent_tree
*et
;
723 struct extent_info
*ext
= &F2FS_I(inode
)->ext
;
726 if (!test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
729 et
= __find_extent_tree(F2FS_I_SB(inode
), inode
->i_ino
);
733 update_inode_page(inode
);
738 read_lock(&et
->lock
);
740 struct extent_node
*en
;
745 struct rb_node
*node
= rb_first(&et
->root
);
748 node
= rb_last(&et
->root
);
749 en
= rb_entry(node
, struct extent_node
, rb_node
);
752 if (__is_extent_same(ext
, &en
->ei
))
757 } else if (ext
->len
) {
762 read_unlock(&et
->lock
);
763 atomic_dec(&et
->refcount
);
766 update_inode_page(inode
);
769 void f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
771 struct extent_tree
*treevec
[EXT_TREE_VEC_SIZE
];
772 struct extent_node
*en
, *tmp
;
773 unsigned long ino
= F2FS_ROOT_INO(sbi
);
774 struct radix_tree_iter iter
;
777 unsigned int node_cnt
= 0, tree_cnt
= 0;
779 if (!test_opt(sbi
, EXTENT_CACHE
))
782 if (available_free_memory(sbi
, EXTENT_CACHE
))
785 spin_lock(&sbi
->extent_lock
);
786 list_for_each_entry_safe(en
, tmp
, &sbi
->extent_list
, list
) {
789 list_del_init(&en
->list
);
791 spin_unlock(&sbi
->extent_lock
);
793 down_read(&sbi
->extent_tree_lock
);
794 while ((found
= radix_tree_gang_lookup(&sbi
->extent_tree_root
,
795 (void **)treevec
, ino
, EXT_TREE_VEC_SIZE
))) {
798 ino
= treevec
[found
- 1]->ino
+ 1;
799 for (i
= 0; i
< found
; i
++) {
800 struct extent_tree
*et
= treevec
[i
];
802 atomic_inc(&et
->refcount
);
803 write_lock(&et
->lock
);
804 node_cnt
+= __free_extent_tree(sbi
, et
, false);
805 write_unlock(&et
->lock
);
806 atomic_dec(&et
->refcount
);
809 up_read(&sbi
->extent_tree_lock
);
811 down_write(&sbi
->extent_tree_lock
);
812 radix_tree_for_each_slot(slot
, &sbi
->extent_tree_root
, &iter
,
813 F2FS_ROOT_INO(sbi
)) {
814 struct extent_tree
*et
= (struct extent_tree
*)*slot
;
816 if (!atomic_read(&et
->refcount
) && !et
->count
) {
817 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
818 kmem_cache_free(extent_tree_slab
, et
);
819 sbi
->total_ext_tree
--;
823 up_write(&sbi
->extent_tree_lock
);
825 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
828 void f2fs_destroy_extent_tree(struct inode
*inode
)
830 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
831 struct extent_tree
*et
;
832 unsigned int node_cnt
= 0;
834 if (!test_opt(sbi
, EXTENT_CACHE
))
837 et
= __find_extent_tree(sbi
, inode
->i_ino
);
841 /* free all extent info belong to this extent tree */
842 write_lock(&et
->lock
);
843 node_cnt
= __free_extent_tree(sbi
, et
, true);
844 write_unlock(&et
->lock
);
846 atomic_dec(&et
->refcount
);
848 /* try to find and delete extent tree entry in radix tree */
849 down_write(&sbi
->extent_tree_lock
);
850 et
= radix_tree_lookup(&sbi
->extent_tree_root
, inode
->i_ino
);
852 up_write(&sbi
->extent_tree_lock
);
855 f2fs_bug_on(sbi
, atomic_read(&et
->refcount
) || et
->count
);
856 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
857 kmem_cache_free(extent_tree_slab
, et
);
858 sbi
->total_ext_tree
--;
859 up_write(&sbi
->extent_tree_lock
);
861 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
865 void f2fs_init_extent_cache(struct inode
*inode
, struct f2fs_extent
*i_ext
)
867 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
868 f2fs_init_extent_tree(inode
, i_ext
);
870 write_lock(&F2FS_I(inode
)->ext_lock
);
871 get_extent_info(&F2FS_I(inode
)->ext
, *i_ext
);
872 write_unlock(&F2FS_I(inode
)->ext_lock
);
875 static bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
876 struct extent_info
*ei
)
878 if (is_inode_flag_set(F2FS_I(inode
), FI_NO_EXTENT
))
881 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
882 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
884 return lookup_extent_info(inode
, pgofs
, ei
);
887 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
889 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
892 f2fs_bug_on(F2FS_I_SB(dn
->inode
), dn
->data_blkaddr
== NEW_ADDR
);
894 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
897 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
900 if (test_opt(F2FS_I_SB(dn
->inode
), EXTENT_CACHE
))
901 return f2fs_update_extent_tree(dn
->inode
, fofs
,
904 if (update_extent_info(dn
->inode
, fofs
, dn
->data_blkaddr
))
908 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
910 struct address_space
*mapping
= inode
->i_mapping
;
911 struct dnode_of_data dn
;
913 struct extent_info ei
;
915 struct f2fs_io_info fio
= {
917 .rw
= sync
? READ_SYNC
: READA
,
921 * If sync is false, it needs to check its block allocation.
922 * This is need and triggered by two flows:
923 * gc and truncate_partial_data_page.
928 page
= find_get_page(mapping
, index
);
929 if (page
&& PageUptodate(page
))
931 f2fs_put_page(page
, 0);
933 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
934 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
938 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
939 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
944 if (dn
.data_blkaddr
== NULL_ADDR
)
945 return ERR_PTR(-ENOENT
);
947 /* By fallocate(), there is no cached page, but with NEW_ADDR */
948 if (unlikely(dn
.data_blkaddr
== NEW_ADDR
))
949 return ERR_PTR(-EINVAL
);
952 page
= grab_cache_page(mapping
, index
);
954 return ERR_PTR(-ENOMEM
);
956 if (PageUptodate(page
)) {
961 fio
.blk_addr
= dn
.data_blkaddr
;
962 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
967 wait_on_page_locked(page
);
968 if (unlikely(!PageUptodate(page
))) {
969 f2fs_put_page(page
, 0);
970 return ERR_PTR(-EIO
);
977 * If it tries to access a hole, return an error.
978 * Because, the callers, functions in dir.c and GC, should be able to know
979 * whether this page exists or not.
981 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
983 struct address_space
*mapping
= inode
->i_mapping
;
984 struct dnode_of_data dn
;
986 struct extent_info ei
;
988 struct f2fs_io_info fio
= {
993 page
= grab_cache_page(mapping
, index
);
995 return ERR_PTR(-ENOMEM
);
997 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
998 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1002 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1003 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1005 f2fs_put_page(page
, 1);
1006 return ERR_PTR(err
);
1008 f2fs_put_dnode(&dn
);
1010 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
1011 f2fs_put_page(page
, 1);
1012 return ERR_PTR(-ENOENT
);
1016 if (PageUptodate(page
))
1020 * A new dentry page is allocated but not able to be written, since its
1021 * new inode page couldn't be allocated due to -ENOSPC.
1022 * In such the case, its blkaddr can be remained as NEW_ADDR.
1023 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
1025 if (dn
.data_blkaddr
== NEW_ADDR
) {
1026 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1027 SetPageUptodate(page
);
1031 fio
.blk_addr
= dn
.data_blkaddr
;
1032 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
1034 return ERR_PTR(err
);
1037 if (unlikely(!PageUptodate(page
))) {
1038 f2fs_put_page(page
, 1);
1039 return ERR_PTR(-EIO
);
1041 if (unlikely(page
->mapping
!= mapping
)) {
1042 f2fs_put_page(page
, 1);
1049 * Caller ensures that this data page is never allocated.
1050 * A new zero-filled data page is allocated in the page cache.
1052 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1054 * Note that, ipage is set only by make_empty_dir.
1056 struct page
*get_new_data_page(struct inode
*inode
,
1057 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
1059 struct address_space
*mapping
= inode
->i_mapping
;
1061 struct dnode_of_data dn
;
1064 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1065 err
= f2fs_reserve_block(&dn
, index
);
1067 return ERR_PTR(err
);
1069 page
= grab_cache_page(mapping
, index
);
1075 if (PageUptodate(page
))
1078 if (dn
.data_blkaddr
== NEW_ADDR
) {
1079 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1080 SetPageUptodate(page
);
1082 struct f2fs_io_info fio
= {
1085 .blk_addr
= dn
.data_blkaddr
,
1087 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
1092 if (unlikely(!PageUptodate(page
))) {
1093 f2fs_put_page(page
, 1);
1097 if (unlikely(page
->mapping
!= mapping
)) {
1098 f2fs_put_page(page
, 1);
1104 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
1105 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
1106 /* Only the directory inode sets new_i_size */
1107 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
1112 f2fs_put_dnode(&dn
);
1113 return ERR_PTR(err
);
1116 static int __allocate_data_block(struct dnode_of_data
*dn
)
1118 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1119 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
1120 struct f2fs_summary sum
;
1121 struct node_info ni
;
1122 int seg
= CURSEG_WARM_DATA
;
1125 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
1128 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
1129 if (dn
->data_blkaddr
== NEW_ADDR
)
1132 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
1136 get_node_info(sbi
, dn
->nid
, &ni
);
1137 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
1139 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
1140 seg
= CURSEG_DIRECT_IO
;
1142 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
1145 /* direct IO doesn't use extent cache to maximize the performance */
1146 set_data_blkaddr(dn
);
1149 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
1151 if (i_size_read(dn
->inode
) < ((fofs
+ 1) << PAGE_CACHE_SHIFT
))
1152 i_size_write(dn
->inode
, ((fofs
+ 1) << PAGE_CACHE_SHIFT
));
1157 static void __allocate_data_blocks(struct inode
*inode
, loff_t offset
,
1160 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1161 struct dnode_of_data dn
;
1162 u64 start
= F2FS_BYTES_TO_BLK(offset
);
1163 u64 len
= F2FS_BYTES_TO_BLK(count
);
1168 f2fs_balance_fs(sbi
);
1171 /* When reading holes, we need its node page */
1172 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1173 if (get_dnode_of_data(&dn
, start
, ALLOC_NODE
))
1177 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1179 while (dn
.ofs_in_node
< end_offset
&& len
) {
1182 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1183 if (blkaddr
== NULL_ADDR
|| blkaddr
== NEW_ADDR
) {
1184 if (__allocate_data_block(&dn
))
1194 sync_inode_page(&dn
);
1196 f2fs_put_dnode(&dn
);
1197 f2fs_unlock_op(sbi
);
1203 sync_inode_page(&dn
);
1204 f2fs_put_dnode(&dn
);
1206 f2fs_unlock_op(sbi
);
1211 * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
1212 * If original data blocks are allocated, then give them to blockdev.
1214 * a. preallocate requested block addresses
1215 * b. do not use extent cache for better performance
1216 * c. give the block addresses to blockdev
1218 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1219 struct buffer_head
*bh_result
, int create
, bool fiemap
)
1221 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
1222 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
1223 struct dnode_of_data dn
;
1224 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
1225 pgoff_t pgofs
, end_offset
;
1226 int err
= 0, ofs
= 1;
1227 struct extent_info ei
;
1228 bool allocated
= false;
1230 /* Get the page offset from the block offset(iblock) */
1231 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
1233 if (f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1234 f2fs_map_bh(inode
->i_sb
, pgofs
, &ei
, bh_result
);
1239 f2fs_lock_op(F2FS_I_SB(inode
));
1241 /* When reading holes, we need its node page */
1242 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1243 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1249 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1252 if (dn
.data_blkaddr
!= NULL_ADDR
) {
1253 clear_buffer_new(bh_result
);
1254 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
1255 } else if (create
) {
1256 err
= __allocate_data_block(&dn
);
1260 set_buffer_new(bh_result
);
1261 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
1266 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1267 bh_result
->b_size
= (((size_t)1) << blkbits
);
1272 if (dn
.ofs_in_node
>= end_offset
) {
1274 sync_inode_page(&dn
);
1276 f2fs_put_dnode(&dn
);
1278 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1279 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1285 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1288 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1291 if (maxblocks
> (bh_result
->b_size
>> blkbits
)) {
1292 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1293 if (blkaddr
== NULL_ADDR
&& create
) {
1294 err
= __allocate_data_block(&dn
);
1298 set_buffer_new(bh_result
);
1299 blkaddr
= dn
.data_blkaddr
;
1301 /* Give more consecutive addresses for the readahead */
1302 if (blkaddr
== (bh_result
->b_blocknr
+ ofs
)) {
1306 bh_result
->b_size
+= (((size_t)1) << blkbits
);
1312 sync_inode_page(&dn
);
1314 f2fs_put_dnode(&dn
);
1317 f2fs_unlock_op(F2FS_I_SB(inode
));
1319 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
1323 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1324 struct buffer_head
*bh_result
, int create
)
1326 return __get_data_block(inode
, iblock
, bh_result
, create
, false);
1329 static int get_data_block_fiemap(struct inode
*inode
, sector_t iblock
,
1330 struct buffer_head
*bh_result
, int create
)
1332 return __get_data_block(inode
, iblock
, bh_result
, create
, true);
1335 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1338 return generic_block_fiemap(inode
, fieinfo
,
1339 start
, len
, get_data_block_fiemap
);
1342 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1344 struct inode
*inode
= page
->mapping
->host
;
1347 trace_f2fs_readpage(page
, DATA
);
1349 /* If the file has inline data, try to read it directly */
1350 if (f2fs_has_inline_data(inode
))
1351 ret
= f2fs_read_inline_data(inode
, page
);
1353 ret
= mpage_readpage(page
, get_data_block
);
1358 static int f2fs_read_data_pages(struct file
*file
,
1359 struct address_space
*mapping
,
1360 struct list_head
*pages
, unsigned nr_pages
)
1362 struct inode
*inode
= file
->f_mapping
->host
;
1364 /* If the file has inline data, skip readpages */
1365 if (f2fs_has_inline_data(inode
))
1368 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block
);
1371 int do_write_data_page(struct page
*page
, struct f2fs_io_info
*fio
)
1373 struct inode
*inode
= page
->mapping
->host
;
1374 struct dnode_of_data dn
;
1377 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1378 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1382 fio
->blk_addr
= dn
.data_blkaddr
;
1384 /* This page is already truncated */
1385 if (fio
->blk_addr
== NULL_ADDR
) {
1386 ClearPageUptodate(page
);
1390 set_page_writeback(page
);
1393 * If current allocation needs SSR,
1394 * it had better in-place writes for updated data.
1396 if (unlikely(fio
->blk_addr
!= NEW_ADDR
&&
1397 !is_cold_data(page
) &&
1398 need_inplace_update(inode
))) {
1399 rewrite_data_page(page
, fio
);
1400 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
1401 trace_f2fs_do_write_data_page(page
, IPU
);
1403 write_data_page(page
, &dn
, fio
);
1404 set_data_blkaddr(&dn
);
1405 f2fs_update_extent_cache(&dn
);
1406 trace_f2fs_do_write_data_page(page
, OPU
);
1407 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
1408 if (page
->index
== 0)
1409 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
1412 f2fs_put_dnode(&dn
);
1416 static int f2fs_write_data_page(struct page
*page
,
1417 struct writeback_control
*wbc
)
1419 struct inode
*inode
= page
->mapping
->host
;
1420 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1421 loff_t i_size
= i_size_read(inode
);
1422 const pgoff_t end_index
= ((unsigned long long) i_size
)
1423 >> PAGE_CACHE_SHIFT
;
1424 unsigned offset
= 0;
1425 bool need_balance_fs
= false;
1427 struct f2fs_io_info fio
= {
1429 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1432 trace_f2fs_writepage(page
, DATA
);
1434 if (page
->index
< end_index
)
1438 * If the offset is out-of-range of file size,
1439 * this page does not have to be written to disk.
1441 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
1442 if ((page
->index
>= end_index
+ 1) || !offset
)
1445 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
1447 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1449 if (f2fs_is_drop_cache(inode
))
1451 if (f2fs_is_volatile_file(inode
) && !wbc
->for_reclaim
&&
1452 available_free_memory(sbi
, BASE_CHECK
))
1455 /* Dentry blocks are controlled by checkpoint */
1456 if (S_ISDIR(inode
->i_mode
)) {
1457 if (unlikely(f2fs_cp_error(sbi
)))
1459 err
= do_write_data_page(page
, &fio
);
1463 /* we should bypass data pages to proceed the kworkder jobs */
1464 if (unlikely(f2fs_cp_error(sbi
))) {
1469 if (!wbc
->for_reclaim
)
1470 need_balance_fs
= true;
1471 else if (has_not_enough_free_secs(sbi
, 0))
1476 if (f2fs_has_inline_data(inode
))
1477 err
= f2fs_write_inline_data(inode
, page
);
1479 err
= do_write_data_page(page
, &fio
);
1480 f2fs_unlock_op(sbi
);
1482 if (err
&& err
!= -ENOENT
)
1485 clear_cold_data(page
);
1487 inode_dec_dirty_pages(inode
);
1489 ClearPageUptodate(page
);
1491 if (need_balance_fs
)
1492 f2fs_balance_fs(sbi
);
1493 if (wbc
->for_reclaim
)
1494 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1498 redirty_page_for_writepage(wbc
, page
);
1499 return AOP_WRITEPAGE_ACTIVATE
;
1502 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
1505 struct address_space
*mapping
= data
;
1506 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
1507 mapping_set_error(mapping
, ret
);
1511 static int f2fs_write_data_pages(struct address_space
*mapping
,
1512 struct writeback_control
*wbc
)
1514 struct inode
*inode
= mapping
->host
;
1515 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1516 bool locked
= false;
1520 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1522 /* deal with chardevs and other special file */
1523 if (!mapping
->a_ops
->writepage
)
1526 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1527 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1528 available_free_memory(sbi
, DIRTY_DENTS
))
1531 /* during POR, we don't need to trigger writepage at all. */
1532 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1535 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
1537 if (!S_ISDIR(inode
->i_mode
)) {
1538 mutex_lock(&sbi
->writepages
);
1541 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
1543 mutex_unlock(&sbi
->writepages
);
1545 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1547 remove_dirty_dir_inode(inode
);
1549 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1553 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1557 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1559 struct inode
*inode
= mapping
->host
;
1561 if (to
> inode
->i_size
) {
1562 truncate_pagecache(inode
, inode
->i_size
);
1563 truncate_blocks(inode
, inode
->i_size
, true);
1567 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1568 loff_t pos
, unsigned len
, unsigned flags
,
1569 struct page
**pagep
, void **fsdata
)
1571 struct inode
*inode
= mapping
->host
;
1572 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1573 struct page
*page
, *ipage
;
1574 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
1575 struct dnode_of_data dn
;
1578 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1580 f2fs_balance_fs(sbi
);
1583 * We should check this at this moment to avoid deadlock on inode page
1584 * and #0 page. The locking rule for inline_data conversion should be:
1585 * lock_page(page #0) -> lock_page(inode_page)
1588 err
= f2fs_convert_inline_inode(inode
);
1593 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1603 /* check inline_data */
1604 ipage
= get_node_page(sbi
, inode
->i_ino
);
1605 if (IS_ERR(ipage
)) {
1606 err
= PTR_ERR(ipage
);
1610 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1612 if (f2fs_has_inline_data(inode
)) {
1613 if (pos
+ len
<= MAX_INLINE_DATA
) {
1614 read_inline_data(page
, ipage
);
1615 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
1616 sync_inode_page(&dn
);
1619 err
= f2fs_convert_inline_page(&dn
, page
);
1623 err
= f2fs_reserve_block(&dn
, index
);
1627 f2fs_put_dnode(&dn
);
1628 f2fs_unlock_op(sbi
);
1630 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
1633 f2fs_wait_on_page_writeback(page
, DATA
);
1635 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1636 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1637 unsigned end
= start
+ len
;
1639 /* Reading beyond i_size is simple: memset to zero */
1640 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1644 if (dn
.data_blkaddr
== NEW_ADDR
) {
1645 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1647 struct f2fs_io_info fio
= {
1650 .blk_addr
= dn
.data_blkaddr
,
1652 err
= f2fs_submit_page_bio(sbi
, page
, &fio
);
1657 if (unlikely(!PageUptodate(page
))) {
1658 f2fs_put_page(page
, 1);
1662 if (unlikely(page
->mapping
!= mapping
)) {
1663 f2fs_put_page(page
, 1);
1668 SetPageUptodate(page
);
1669 clear_cold_data(page
);
1673 f2fs_put_dnode(&dn
);
1675 f2fs_unlock_op(sbi
);
1676 f2fs_put_page(page
, 1);
1678 f2fs_write_failed(mapping
, pos
+ len
);
1682 static int f2fs_write_end(struct file
*file
,
1683 struct address_space
*mapping
,
1684 loff_t pos
, unsigned len
, unsigned copied
,
1685 struct page
*page
, void *fsdata
)
1687 struct inode
*inode
= page
->mapping
->host
;
1689 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1691 set_page_dirty(page
);
1693 if (pos
+ copied
> i_size_read(inode
)) {
1694 i_size_write(inode
, pos
+ copied
);
1695 mark_inode_dirty(inode
);
1696 update_inode_page(inode
);
1699 f2fs_put_page(page
, 1);
1703 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1706 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1708 if (iov_iter_rw(iter
) == READ
)
1711 if (offset
& blocksize_mask
)
1714 if (iov_iter_alignment(iter
) & blocksize_mask
)
1720 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
1723 struct file
*file
= iocb
->ki_filp
;
1724 struct address_space
*mapping
= file
->f_mapping
;
1725 struct inode
*inode
= mapping
->host
;
1726 size_t count
= iov_iter_count(iter
);
1729 /* we don't need to use inline_data strictly */
1730 if (f2fs_has_inline_data(inode
)) {
1731 err
= f2fs_convert_inline_inode(inode
);
1736 if (check_direct_IO(inode
, iter
, offset
))
1739 trace_f2fs_direct_IO_enter(inode
, offset
, count
, iov_iter_rw(iter
));
1741 if (iov_iter_rw(iter
) == WRITE
)
1742 __allocate_data_blocks(inode
, offset
, count
);
1744 err
= blockdev_direct_IO(iocb
, inode
, iter
, offset
, get_data_block
);
1745 if (err
< 0 && iov_iter_rw(iter
) == WRITE
)
1746 f2fs_write_failed(mapping
, offset
+ count
);
1748 trace_f2fs_direct_IO_exit(inode
, offset
, count
, iov_iter_rw(iter
), err
);
1753 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1754 unsigned int length
)
1756 struct inode
*inode
= page
->mapping
->host
;
1757 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1759 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1760 (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
))
1763 if (PageDirty(page
)) {
1764 if (inode
->i_ino
== F2FS_META_INO(sbi
))
1765 dec_page_count(sbi
, F2FS_DIRTY_META
);
1766 else if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
1767 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1769 inode_dec_dirty_pages(inode
);
1771 ClearPagePrivate(page
);
1774 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1776 /* If this is dirty page, keep PagePrivate */
1777 if (PageDirty(page
))
1780 ClearPagePrivate(page
);
1784 static int f2fs_set_data_page_dirty(struct page
*page
)
1786 struct address_space
*mapping
= page
->mapping
;
1787 struct inode
*inode
= mapping
->host
;
1789 trace_f2fs_set_page_dirty(page
, DATA
);
1791 SetPageUptodate(page
);
1793 if (f2fs_is_atomic_file(inode
)) {
1794 register_inmem_page(inode
, page
);
1798 mark_inode_dirty(inode
);
1800 if (!PageDirty(page
)) {
1801 __set_page_dirty_nobuffers(page
);
1802 update_dirty_page(inode
, page
);
1808 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1810 struct inode
*inode
= mapping
->host
;
1812 /* we don't need to use inline_data strictly */
1813 if (f2fs_has_inline_data(inode
)) {
1814 int err
= f2fs_convert_inline_inode(inode
);
1818 return generic_block_bmap(mapping
, block
, get_data_block
);
1821 void init_extent_cache_info(struct f2fs_sb_info
*sbi
)
1823 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
1824 init_rwsem(&sbi
->extent_tree_lock
);
1825 INIT_LIST_HEAD(&sbi
->extent_list
);
1826 spin_lock_init(&sbi
->extent_lock
);
1827 sbi
->total_ext_tree
= 0;
1828 atomic_set(&sbi
->total_ext_node
, 0);
1831 int __init
create_extent_cache(void)
1833 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
1834 sizeof(struct extent_tree
));
1835 if (!extent_tree_slab
)
1837 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
1838 sizeof(struct extent_node
));
1839 if (!extent_node_slab
) {
1840 kmem_cache_destroy(extent_tree_slab
);
1846 void destroy_extent_cache(void)
1848 kmem_cache_destroy(extent_node_slab
);
1849 kmem_cache_destroy(extent_tree_slab
);
1852 const struct address_space_operations f2fs_dblock_aops
= {
1853 .readpage
= f2fs_read_data_page
,
1854 .readpages
= f2fs_read_data_pages
,
1855 .writepage
= f2fs_write_data_page
,
1856 .writepages
= f2fs_write_data_pages
,
1857 .write_begin
= f2fs_write_begin
,
1858 .write_end
= f2fs_write_end
,
1859 .set_page_dirty
= f2fs_set_data_page_dirty
,
1860 .invalidatepage
= f2fs_invalidate_page
,
1861 .releasepage
= f2fs_release_page
,
1862 .direct_IO
= f2fs_direct_IO
,