4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
27 #include <trace/events/f2fs.h>
29 static struct kmem_cache
*extent_tree_slab
;
30 static struct kmem_cache
*extent_node_slab
;
32 static void f2fs_read_end_io(struct bio
*bio
, int err
)
37 if (f2fs_bio_encrypted(bio
)) {
39 f2fs_release_crypto_ctx(bio
->bi_private
);
41 f2fs_end_io_crypto_work(bio
->bi_private
, bio
);
46 bio_for_each_segment_all(bvec
, bio
, i
) {
47 struct page
*page
= bvec
->bv_page
;
50 SetPageUptodate(page
);
52 ClearPageUptodate(page
);
60 static void f2fs_write_end_io(struct bio
*bio
, int err
)
62 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
66 bio_for_each_segment_all(bvec
, bio
, i
) {
67 struct page
*page
= bvec
->bv_page
;
69 f2fs_restore_and_release_control_page(&page
);
73 set_bit(AS_EIO
, &page
->mapping
->flags
);
74 f2fs_stop_checkpoint(sbi
);
76 end_page_writeback(page
);
77 dec_page_count(sbi
, F2FS_WRITEBACK
);
80 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
81 !list_empty(&sbi
->cp_wait
.task_list
))
82 wake_up(&sbi
->cp_wait
);
88 * Low-level block read/write IO operations.
90 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
91 int npages
, bool is_read
)
95 /* No failure on bio allocation */
96 bio
= bio_alloc(GFP_NOIO
, npages
);
98 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
99 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
100 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
101 bio
->bi_private
= is_read
? NULL
: sbi
;
106 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
108 struct f2fs_io_info
*fio
= &io
->fio
;
113 if (is_read_io(fio
->rw
))
114 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
116 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
118 submit_bio(fio
->rw
, io
->bio
);
122 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
123 enum page_type type
, int rw
)
125 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
126 struct f2fs_bio_info
*io
;
128 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
130 down_write(&io
->io_rwsem
);
132 /* change META to META_FLUSH in the checkpoint procedure */
133 if (type
>= META_FLUSH
) {
134 io
->fio
.type
= META_FLUSH
;
135 if (test_opt(sbi
, NOBARRIER
))
136 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
138 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
140 __submit_merged_bio(io
);
141 up_write(&io
->io_rwsem
);
145 * Fill the locked page with data located in the block address.
146 * Return unlocked page.
148 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
151 struct page
*page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
153 trace_f2fs_submit_page_bio(page
, fio
);
154 f2fs_trace_ios(fio
, 0);
156 /* Allocate a new bio */
157 bio
= __bio_alloc(fio
->sbi
, fio
->blk_addr
, 1, is_read_io(fio
->rw
));
159 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
161 f2fs_put_page(page
, 1);
165 submit_bio(fio
->rw
, bio
);
169 void f2fs_submit_page_mbio(struct f2fs_io_info
*fio
)
171 struct f2fs_sb_info
*sbi
= fio
->sbi
;
172 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
173 struct f2fs_bio_info
*io
;
174 bool is_read
= is_read_io(fio
->rw
);
175 struct page
*bio_page
;
177 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
179 verify_block_addr(sbi
, fio
->blk_addr
);
181 down_write(&io
->io_rwsem
);
184 inc_page_count(sbi
, F2FS_WRITEBACK
);
186 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->blk_addr
- 1 ||
187 io
->fio
.rw
!= fio
->rw
))
188 __submit_merged_bio(io
);
190 if (io
->bio
== NULL
) {
191 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
193 io
->bio
= __bio_alloc(sbi
, fio
->blk_addr
, bio_blocks
, is_read
);
197 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
199 if (bio_add_page(io
->bio
, bio_page
, PAGE_CACHE_SIZE
, 0) <
201 __submit_merged_bio(io
);
205 io
->last_block_in_bio
= fio
->blk_addr
;
206 f2fs_trace_ios(fio
, 0);
208 up_write(&io
->io_rwsem
);
209 trace_f2fs_submit_page_mbio(fio
->page
, fio
);
213 * Lock ordering for the change of data block address:
216 * update block addresses in the node page
218 void set_data_blkaddr(struct dnode_of_data
*dn
)
220 struct f2fs_node
*rn
;
222 struct page
*node_page
= dn
->node_page
;
223 unsigned int ofs_in_node
= dn
->ofs_in_node
;
225 f2fs_wait_on_page_writeback(node_page
, NODE
);
227 rn
= F2FS_NODE(node_page
);
229 /* Get physical address of data block */
230 addr_array
= blkaddr_in_node(rn
);
231 addr_array
[ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
232 set_page_dirty(node_page
);
235 int reserve_new_block(struct dnode_of_data
*dn
)
237 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
239 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
241 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
244 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
246 dn
->data_blkaddr
= NEW_ADDR
;
247 set_data_blkaddr(dn
);
248 mark_inode_dirty(dn
->inode
);
253 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
255 bool need_put
= dn
->inode_page
? false : true;
258 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
262 if (dn
->data_blkaddr
== NULL_ADDR
)
263 err
= reserve_new_block(dn
);
269 static bool lookup_extent_info(struct inode
*inode
, pgoff_t pgofs
,
270 struct extent_info
*ei
)
272 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
273 pgoff_t start_fofs
, end_fofs
;
274 block_t start_blkaddr
;
276 read_lock(&fi
->ext_lock
);
277 if (fi
->ext
.len
== 0) {
278 read_unlock(&fi
->ext_lock
);
282 stat_inc_total_hit(inode
->i_sb
);
284 start_fofs
= fi
->ext
.fofs
;
285 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
286 start_blkaddr
= fi
->ext
.blk
;
288 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
290 stat_inc_read_hit(inode
->i_sb
);
291 read_unlock(&fi
->ext_lock
);
294 read_unlock(&fi
->ext_lock
);
298 static bool update_extent_info(struct inode
*inode
, pgoff_t fofs
,
301 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
302 pgoff_t start_fofs
, end_fofs
;
303 block_t start_blkaddr
, end_blkaddr
;
304 int need_update
= true;
306 write_lock(&fi
->ext_lock
);
308 start_fofs
= fi
->ext
.fofs
;
309 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
310 start_blkaddr
= fi
->ext
.blk
;
311 end_blkaddr
= fi
->ext
.blk
+ fi
->ext
.len
- 1;
313 /* Drop and initialize the matched extent */
314 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
318 if (fi
->ext
.len
== 0) {
319 if (blkaddr
!= NULL_ADDR
) {
321 fi
->ext
.blk
= blkaddr
;
328 if (fofs
== start_fofs
- 1 && blkaddr
== start_blkaddr
- 1) {
336 if (fofs
== end_fofs
+ 1 && blkaddr
== end_blkaddr
+ 1) {
341 /* Split the existing extent */
342 if (fi
->ext
.len
> 1 &&
343 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
344 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
345 fi
->ext
.len
= fofs
- start_fofs
;
347 fi
->ext
.fofs
= fofs
+ 1;
348 fi
->ext
.blk
= start_blkaddr
+ fofs
- start_fofs
+ 1;
349 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
355 /* Finally, if the extent is very fragmented, let's drop the cache. */
356 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
358 set_inode_flag(fi
, FI_NO_EXTENT
);
362 write_unlock(&fi
->ext_lock
);
366 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
367 struct extent_tree
*et
, struct extent_info
*ei
,
368 struct rb_node
*parent
, struct rb_node
**p
)
370 struct extent_node
*en
;
372 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
377 INIT_LIST_HEAD(&en
->list
);
379 rb_link_node(&en
->rb_node
, parent
, p
);
380 rb_insert_color(&en
->rb_node
, &et
->root
);
382 atomic_inc(&sbi
->total_ext_node
);
386 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
387 struct extent_tree
*et
, struct extent_node
*en
)
389 rb_erase(&en
->rb_node
, &et
->root
);
391 atomic_dec(&sbi
->total_ext_node
);
393 if (et
->cached_en
== en
)
394 et
->cached_en
= NULL
;
397 static struct extent_tree
*__find_extent_tree(struct f2fs_sb_info
*sbi
,
400 struct extent_tree
*et
;
402 down_read(&sbi
->extent_tree_lock
);
403 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
405 up_read(&sbi
->extent_tree_lock
);
408 atomic_inc(&et
->refcount
);
409 up_read(&sbi
->extent_tree_lock
);
414 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
416 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
417 struct extent_tree
*et
;
418 nid_t ino
= inode
->i_ino
;
420 down_write(&sbi
->extent_tree_lock
);
421 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
423 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
424 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
425 memset(et
, 0, sizeof(struct extent_tree
));
428 et
->cached_en
= NULL
;
429 rwlock_init(&et
->lock
);
430 atomic_set(&et
->refcount
, 0);
432 sbi
->total_ext_tree
++;
434 atomic_inc(&et
->refcount
);
435 up_write(&sbi
->extent_tree_lock
);
440 static struct extent_node
*__lookup_extent_tree(struct extent_tree
*et
,
443 struct rb_node
*node
= et
->root
.rb_node
;
444 struct extent_node
*en
;
447 struct extent_info
*cei
= &et
->cached_en
->ei
;
449 if (cei
->fofs
<= fofs
&& cei
->fofs
+ cei
->len
> fofs
)
450 return et
->cached_en
;
454 en
= rb_entry(node
, struct extent_node
, rb_node
);
456 if (fofs
< en
->ei
.fofs
) {
457 node
= node
->rb_left
;
458 } else if (fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
459 node
= node
->rb_right
;
468 static struct extent_node
*__try_back_merge(struct f2fs_sb_info
*sbi
,
469 struct extent_tree
*et
, struct extent_node
*en
)
471 struct extent_node
*prev
;
472 struct rb_node
*node
;
474 node
= rb_prev(&en
->rb_node
);
478 prev
= rb_entry(node
, struct extent_node
, rb_node
);
479 if (__is_back_mergeable(&en
->ei
, &prev
->ei
)) {
480 en
->ei
.fofs
= prev
->ei
.fofs
;
481 en
->ei
.blk
= prev
->ei
.blk
;
482 en
->ei
.len
+= prev
->ei
.len
;
483 __detach_extent_node(sbi
, et
, prev
);
489 static struct extent_node
*__try_front_merge(struct f2fs_sb_info
*sbi
,
490 struct extent_tree
*et
, struct extent_node
*en
)
492 struct extent_node
*next
;
493 struct rb_node
*node
;
495 node
= rb_next(&en
->rb_node
);
499 next
= rb_entry(node
, struct extent_node
, rb_node
);
500 if (__is_front_mergeable(&en
->ei
, &next
->ei
)) {
501 en
->ei
.len
+= next
->ei
.len
;
502 __detach_extent_node(sbi
, et
, next
);
508 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
509 struct extent_tree
*et
, struct extent_info
*ei
,
510 struct extent_node
**den
)
512 struct rb_node
**p
= &et
->root
.rb_node
;
513 struct rb_node
*parent
= NULL
;
514 struct extent_node
*en
;
518 en
= rb_entry(parent
, struct extent_node
, rb_node
);
520 if (ei
->fofs
< en
->ei
.fofs
) {
521 if (__is_front_mergeable(ei
, &en
->ei
)) {
522 f2fs_bug_on(sbi
, !den
);
523 en
->ei
.fofs
= ei
->fofs
;
524 en
->ei
.blk
= ei
->blk
;
525 en
->ei
.len
+= ei
->len
;
526 *den
= __try_back_merge(sbi
, et
, en
);
530 } else if (ei
->fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
531 if (__is_back_mergeable(ei
, &en
->ei
)) {
532 f2fs_bug_on(sbi
, !den
);
533 en
->ei
.len
+= ei
->len
;
534 *den
= __try_front_merge(sbi
, et
, en
);
543 return __attach_extent_node(sbi
, et
, ei
, parent
, p
);
546 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
547 struct extent_tree
*et
, bool free_all
)
549 struct rb_node
*node
, *next
;
550 struct extent_node
*en
;
551 unsigned int count
= et
->count
;
553 node
= rb_first(&et
->root
);
555 next
= rb_next(node
);
556 en
= rb_entry(node
, struct extent_node
, rb_node
);
559 spin_lock(&sbi
->extent_lock
);
560 if (!list_empty(&en
->list
))
561 list_del_init(&en
->list
);
562 spin_unlock(&sbi
->extent_lock
);
565 if (free_all
|| list_empty(&en
->list
)) {
566 __detach_extent_node(sbi
, et
, en
);
567 kmem_cache_free(extent_node_slab
, en
);
572 return count
- et
->count
;
575 static void f2fs_init_extent_tree(struct inode
*inode
,
576 struct f2fs_extent
*i_ext
)
578 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
579 struct extent_tree
*et
;
580 struct extent_node
*en
;
581 struct extent_info ei
;
583 if (le32_to_cpu(i_ext
->len
) < F2FS_MIN_EXTENT_LEN
)
586 et
= __grab_extent_tree(inode
);
588 write_lock(&et
->lock
);
592 set_extent_info(&ei
, le32_to_cpu(i_ext
->fofs
),
593 le32_to_cpu(i_ext
->blk
), le32_to_cpu(i_ext
->len
));
595 en
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
599 spin_lock(&sbi
->extent_lock
);
600 list_add_tail(&en
->list
, &sbi
->extent_list
);
601 spin_unlock(&sbi
->extent_lock
);
604 write_unlock(&et
->lock
);
605 atomic_dec(&et
->refcount
);
608 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
609 struct extent_info
*ei
)
611 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
612 struct extent_tree
*et
;
613 struct extent_node
*en
;
615 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
617 et
= __find_extent_tree(sbi
, inode
->i_ino
);
621 read_lock(&et
->lock
);
622 en
= __lookup_extent_tree(et
, pgofs
);
625 spin_lock(&sbi
->extent_lock
);
626 if (!list_empty(&en
->list
))
627 list_move_tail(&en
->list
, &sbi
->extent_list
);
628 spin_unlock(&sbi
->extent_lock
);
629 stat_inc_read_hit(sbi
->sb
);
631 stat_inc_total_hit(sbi
->sb
);
632 read_unlock(&et
->lock
);
634 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, en
);
636 atomic_dec(&et
->refcount
);
637 return en
? true : false;
640 static void f2fs_update_extent_tree(struct inode
*inode
, pgoff_t fofs
,
643 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
644 struct extent_tree
*et
;
645 struct extent_node
*en
= NULL
, *en1
= NULL
, *en2
= NULL
, *en3
= NULL
;
646 struct extent_node
*den
= NULL
;
647 struct extent_info ei
, dei
;
650 trace_f2fs_update_extent_tree(inode
, fofs
, blkaddr
);
652 et
= __grab_extent_tree(inode
);
654 write_lock(&et
->lock
);
656 /* 1. lookup and remove existing extent info in cache */
657 en
= __lookup_extent_tree(et
, fofs
);
662 __detach_extent_node(sbi
, et
, en
);
664 /* 2. if extent can be split more, split and insert the left part */
666 /* insert left part of split extent into cache */
667 if (fofs
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
668 set_extent_info(&ei
, dei
.fofs
, dei
.blk
,
670 en1
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
673 /* insert right part of split extent into cache */
674 endofs
= dei
.fofs
+ dei
.len
- 1;
675 if (endofs
- fofs
>= F2FS_MIN_EXTENT_LEN
) {
676 set_extent_info(&ei
, fofs
+ 1,
677 fofs
- dei
.fofs
+ dei
.blk
, endofs
- fofs
);
678 en2
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
683 /* 3. update extent in extent cache */
685 set_extent_info(&ei
, fofs
, blkaddr
, 1);
686 en3
= __insert_extent_tree(sbi
, et
, &ei
, &den
);
689 /* 4. update in global extent list */
690 spin_lock(&sbi
->extent_lock
);
691 if (en
&& !list_empty(&en
->list
))
694 * en1 and en2 split from en, they will become more and more smaller
695 * fragments after splitting several times. So if the length is smaller
696 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
699 list_add_tail(&en1
->list
, &sbi
->extent_list
);
701 list_add_tail(&en2
->list
, &sbi
->extent_list
);
703 if (list_empty(&en3
->list
))
704 list_add_tail(&en3
->list
, &sbi
->extent_list
);
706 list_move_tail(&en3
->list
, &sbi
->extent_list
);
708 if (den
&& !list_empty(&den
->list
))
709 list_del(&den
->list
);
710 spin_unlock(&sbi
->extent_lock
);
712 /* 5. release extent node */
714 kmem_cache_free(extent_node_slab
, en
);
716 kmem_cache_free(extent_node_slab
, den
);
718 write_unlock(&et
->lock
);
719 atomic_dec(&et
->refcount
);
722 void f2fs_preserve_extent_tree(struct inode
*inode
)
724 struct extent_tree
*et
;
725 struct extent_info
*ext
= &F2FS_I(inode
)->ext
;
728 if (!test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
731 et
= __find_extent_tree(F2FS_I_SB(inode
), inode
->i_ino
);
735 update_inode_page(inode
);
740 read_lock(&et
->lock
);
742 struct extent_node
*en
;
747 struct rb_node
*node
= rb_first(&et
->root
);
750 node
= rb_last(&et
->root
);
751 en
= rb_entry(node
, struct extent_node
, rb_node
);
754 if (__is_extent_same(ext
, &en
->ei
))
759 } else if (ext
->len
) {
764 read_unlock(&et
->lock
);
765 atomic_dec(&et
->refcount
);
768 update_inode_page(inode
);
771 void f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
773 struct extent_tree
*treevec
[EXT_TREE_VEC_SIZE
];
774 struct extent_node
*en
, *tmp
;
775 unsigned long ino
= F2FS_ROOT_INO(sbi
);
776 struct radix_tree_iter iter
;
779 unsigned int node_cnt
= 0, tree_cnt
= 0;
781 if (!test_opt(sbi
, EXTENT_CACHE
))
784 if (available_free_memory(sbi
, EXTENT_CACHE
))
787 spin_lock(&sbi
->extent_lock
);
788 list_for_each_entry_safe(en
, tmp
, &sbi
->extent_list
, list
) {
791 list_del_init(&en
->list
);
793 spin_unlock(&sbi
->extent_lock
);
795 down_read(&sbi
->extent_tree_lock
);
796 while ((found
= radix_tree_gang_lookup(&sbi
->extent_tree_root
,
797 (void **)treevec
, ino
, EXT_TREE_VEC_SIZE
))) {
800 ino
= treevec
[found
- 1]->ino
+ 1;
801 for (i
= 0; i
< found
; i
++) {
802 struct extent_tree
*et
= treevec
[i
];
804 atomic_inc(&et
->refcount
);
805 write_lock(&et
->lock
);
806 node_cnt
+= __free_extent_tree(sbi
, et
, false);
807 write_unlock(&et
->lock
);
808 atomic_dec(&et
->refcount
);
811 up_read(&sbi
->extent_tree_lock
);
813 down_write(&sbi
->extent_tree_lock
);
814 radix_tree_for_each_slot(slot
, &sbi
->extent_tree_root
, &iter
,
815 F2FS_ROOT_INO(sbi
)) {
816 struct extent_tree
*et
= (struct extent_tree
*)*slot
;
818 if (!atomic_read(&et
->refcount
) && !et
->count
) {
819 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
820 kmem_cache_free(extent_tree_slab
, et
);
821 sbi
->total_ext_tree
--;
825 up_write(&sbi
->extent_tree_lock
);
827 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
830 void f2fs_destroy_extent_tree(struct inode
*inode
)
832 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
833 struct extent_tree
*et
;
834 unsigned int node_cnt
= 0;
836 if (!test_opt(sbi
, EXTENT_CACHE
))
839 et
= __find_extent_tree(sbi
, inode
->i_ino
);
843 /* free all extent info belong to this extent tree */
844 write_lock(&et
->lock
);
845 node_cnt
= __free_extent_tree(sbi
, et
, true);
846 write_unlock(&et
->lock
);
848 atomic_dec(&et
->refcount
);
850 /* try to find and delete extent tree entry in radix tree */
851 down_write(&sbi
->extent_tree_lock
);
852 et
= radix_tree_lookup(&sbi
->extent_tree_root
, inode
->i_ino
);
854 up_write(&sbi
->extent_tree_lock
);
857 f2fs_bug_on(sbi
, atomic_read(&et
->refcount
) || et
->count
);
858 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
859 kmem_cache_free(extent_tree_slab
, et
);
860 sbi
->total_ext_tree
--;
861 up_write(&sbi
->extent_tree_lock
);
863 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
867 void f2fs_init_extent_cache(struct inode
*inode
, struct f2fs_extent
*i_ext
)
869 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
870 f2fs_init_extent_tree(inode
, i_ext
);
872 write_lock(&F2FS_I(inode
)->ext_lock
);
873 get_extent_info(&F2FS_I(inode
)->ext
, *i_ext
);
874 write_unlock(&F2FS_I(inode
)->ext_lock
);
877 static bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
878 struct extent_info
*ei
)
880 if (is_inode_flag_set(F2FS_I(inode
), FI_NO_EXTENT
))
883 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
884 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
886 return lookup_extent_info(inode
, pgofs
, ei
);
889 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
891 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
894 f2fs_bug_on(F2FS_I_SB(dn
->inode
), dn
->data_blkaddr
== NEW_ADDR
);
896 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
899 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
902 if (test_opt(F2FS_I_SB(dn
->inode
), EXTENT_CACHE
))
903 return f2fs_update_extent_tree(dn
->inode
, fofs
,
906 if (update_extent_info(dn
->inode
, fofs
, dn
->data_blkaddr
))
910 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
, int rw
)
912 struct address_space
*mapping
= inode
->i_mapping
;
913 struct dnode_of_data dn
;
915 struct extent_info ei
;
917 struct f2fs_io_info fio
= {
918 .sbi
= F2FS_I_SB(inode
),
921 .encrypted_page
= NULL
,
924 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
925 return read_mapping_page(mapping
, index
, NULL
);
927 page
= grab_cache_page(mapping
, index
);
929 return ERR_PTR(-ENOMEM
);
931 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
932 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
936 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
937 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
939 f2fs_put_page(page
, 1);
944 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
945 f2fs_put_page(page
, 1);
946 return ERR_PTR(-ENOENT
);
949 if (PageUptodate(page
)) {
955 * A new dentry page is allocated but not able to be written, since its
956 * new inode page couldn't be allocated due to -ENOSPC.
957 * In such the case, its blkaddr can be remained as NEW_ADDR.
958 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
960 if (dn
.data_blkaddr
== NEW_ADDR
) {
961 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
962 SetPageUptodate(page
);
967 fio
.blk_addr
= dn
.data_blkaddr
;
969 err
= f2fs_submit_page_bio(&fio
);
975 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
977 struct address_space
*mapping
= inode
->i_mapping
;
980 page
= find_get_page(mapping
, index
);
981 if (page
&& PageUptodate(page
))
983 f2fs_put_page(page
, 0);
985 page
= get_read_data_page(inode
, index
, READ_SYNC
);
989 if (PageUptodate(page
))
992 wait_on_page_locked(page
);
993 if (unlikely(!PageUptodate(page
))) {
994 f2fs_put_page(page
, 0);
995 return ERR_PTR(-EIO
);
1001 * If it tries to access a hole, return an error.
1002 * Because, the callers, functions in dir.c and GC, should be able to know
1003 * whether this page exists or not.
1005 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
1007 struct address_space
*mapping
= inode
->i_mapping
;
1010 page
= get_read_data_page(inode
, index
, READ_SYNC
);
1014 /* wait for read completion */
1016 if (unlikely(!PageUptodate(page
))) {
1017 f2fs_put_page(page
, 1);
1018 return ERR_PTR(-EIO
);
1020 if (unlikely(page
->mapping
!= mapping
)) {
1021 f2fs_put_page(page
, 1);
1028 * Caller ensures that this data page is never allocated.
1029 * A new zero-filled data page is allocated in the page cache.
1031 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1033 * Note that, ipage is set only by make_empty_dir.
1035 struct page
*get_new_data_page(struct inode
*inode
,
1036 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
1038 struct address_space
*mapping
= inode
->i_mapping
;
1040 struct dnode_of_data dn
;
1043 page
= grab_cache_page(mapping
, index
);
1045 return ERR_PTR(-ENOMEM
);
1047 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1048 err
= f2fs_reserve_block(&dn
, index
);
1050 f2fs_put_page(page
, 1);
1051 return ERR_PTR(err
);
1054 f2fs_put_dnode(&dn
);
1056 if (PageUptodate(page
))
1059 if (dn
.data_blkaddr
== NEW_ADDR
) {
1060 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1061 SetPageUptodate(page
);
1063 f2fs_put_page(page
, 1);
1065 page
= get_read_data_page(inode
, index
, READ_SYNC
);
1069 /* wait for read completion */
1074 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
1075 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
1076 /* Only the directory inode sets new_i_size */
1077 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
1082 static int __allocate_data_block(struct dnode_of_data
*dn
)
1084 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1085 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
1086 struct f2fs_summary sum
;
1087 struct node_info ni
;
1088 int seg
= CURSEG_WARM_DATA
;
1091 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
1094 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
1095 if (dn
->data_blkaddr
== NEW_ADDR
)
1098 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
1102 get_node_info(sbi
, dn
->nid
, &ni
);
1103 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
1105 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
1106 seg
= CURSEG_DIRECT_IO
;
1108 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
1111 /* direct IO doesn't use extent cache to maximize the performance */
1112 set_data_blkaddr(dn
);
1115 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
1117 if (i_size_read(dn
->inode
) < ((fofs
+ 1) << PAGE_CACHE_SHIFT
))
1118 i_size_write(dn
->inode
, ((fofs
+ 1) << PAGE_CACHE_SHIFT
));
1123 static void __allocate_data_blocks(struct inode
*inode
, loff_t offset
,
1126 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1127 struct dnode_of_data dn
;
1128 u64 start
= F2FS_BYTES_TO_BLK(offset
);
1129 u64 len
= F2FS_BYTES_TO_BLK(count
);
1134 f2fs_balance_fs(sbi
);
1137 /* When reading holes, we need its node page */
1138 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1139 if (get_dnode_of_data(&dn
, start
, ALLOC_NODE
))
1143 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1145 while (dn
.ofs_in_node
< end_offset
&& len
) {
1148 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1149 if (blkaddr
== NULL_ADDR
|| blkaddr
== NEW_ADDR
) {
1150 if (__allocate_data_block(&dn
))
1160 sync_inode_page(&dn
);
1162 f2fs_put_dnode(&dn
);
1163 f2fs_unlock_op(sbi
);
1169 sync_inode_page(&dn
);
1170 f2fs_put_dnode(&dn
);
1172 f2fs_unlock_op(sbi
);
1177 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1178 * f2fs_map_blocks structure.
1179 * If original data blocks are allocated, then give them to blockdev.
1181 * a. preallocate requested block addresses
1182 * b. do not use extent cache for better performance
1183 * c. give the block addresses to blockdev
1185 static int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
1186 int create
, bool fiemap
)
1188 unsigned int maxblocks
= map
->m_len
;
1189 struct dnode_of_data dn
;
1190 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
1191 pgoff_t pgofs
, end_offset
;
1192 int err
= 0, ofs
= 1;
1193 struct extent_info ei
;
1194 bool allocated
= false;
1199 /* it only supports block size == page size */
1200 pgofs
= (pgoff_t
)map
->m_lblk
;
1202 if (f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1203 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
1204 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
1205 map
->m_flags
= F2FS_MAP_MAPPED
;
1210 f2fs_lock_op(F2FS_I_SB(inode
));
1212 /* When reading holes, we need its node page */
1213 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1214 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1220 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1223 if (dn
.data_blkaddr
!= NULL_ADDR
) {
1224 map
->m_flags
= F2FS_MAP_MAPPED
;
1225 map
->m_pblk
= dn
.data_blkaddr
;
1226 if (dn
.data_blkaddr
== NEW_ADDR
)
1227 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
1228 } else if (create
) {
1229 err
= __allocate_data_block(&dn
);
1233 map
->m_flags
= F2FS_MAP_NEW
| F2FS_MAP_MAPPED
;
1234 map
->m_pblk
= dn
.data_blkaddr
;
1239 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1245 if (dn
.ofs_in_node
>= end_offset
) {
1247 sync_inode_page(&dn
);
1249 f2fs_put_dnode(&dn
);
1251 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1252 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1258 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1261 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1264 if (maxblocks
> map
->m_len
) {
1265 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1266 if (blkaddr
== NULL_ADDR
&& create
) {
1267 err
= __allocate_data_block(&dn
);
1271 map
->m_flags
|= F2FS_MAP_NEW
;
1272 blkaddr
= dn
.data_blkaddr
;
1274 /* Give more consecutive addresses for the readahead */
1275 if ((map
->m_pblk
!= NEW_ADDR
&&
1276 blkaddr
== (map
->m_pblk
+ ofs
)) ||
1277 (map
->m_pblk
== NEW_ADDR
&&
1278 blkaddr
== NEW_ADDR
)) {
1288 sync_inode_page(&dn
);
1290 f2fs_put_dnode(&dn
);
1293 f2fs_unlock_op(F2FS_I_SB(inode
));
1295 trace_f2fs_map_blocks(inode
, map
, err
);
1299 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1300 struct buffer_head
*bh
, int create
, bool fiemap
)
1302 struct f2fs_map_blocks map
;
1305 map
.m_lblk
= iblock
;
1306 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
1308 ret
= f2fs_map_blocks(inode
, &map
, create
, fiemap
);
1310 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1311 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1312 bh
->b_size
= map
.m_len
<< inode
->i_blkbits
;
1317 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1318 struct buffer_head
*bh_result
, int create
)
1320 return __get_data_block(inode
, iblock
, bh_result
, create
, false);
1323 static int get_data_block_fiemap(struct inode
*inode
, sector_t iblock
,
1324 struct buffer_head
*bh_result
, int create
)
1326 return __get_data_block(inode
, iblock
, bh_result
, create
, true);
1329 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
1331 return (offset
>> inode
->i_blkbits
);
1334 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
1336 return (blk
<< inode
->i_blkbits
);
1339 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1342 struct buffer_head map_bh
;
1343 sector_t start_blk
, last_blk
;
1344 loff_t isize
= i_size_read(inode
);
1345 u64 logical
= 0, phys
= 0, size
= 0;
1347 bool past_eof
= false, whole_file
= false;
1350 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
1354 mutex_lock(&inode
->i_mutex
);
1361 if (logical_to_blk(inode
, len
) == 0)
1362 len
= blk_to_logical(inode
, 1);
1364 start_blk
= logical_to_blk(inode
, start
);
1365 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
1367 memset(&map_bh
, 0, sizeof(struct buffer_head
));
1368 map_bh
.b_size
= len
;
1370 ret
= get_data_block_fiemap(inode
, start_blk
, &map_bh
, 0);
1375 if (!buffer_mapped(&map_bh
)) {
1378 if (!past_eof
&& blk_to_logical(inode
, start_blk
) >= isize
)
1381 if (past_eof
&& size
) {
1382 flags
|= FIEMAP_EXTENT_LAST
;
1383 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1386 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1391 /* if we have holes up to/past EOF then we're done */
1392 if (start_blk
> last_blk
|| past_eof
|| ret
)
1395 if (start_blk
> last_blk
&& !whole_file
) {
1396 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1402 * if size != 0 then we know we already have an extent
1403 * to add, so add it.
1406 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1412 logical
= blk_to_logical(inode
, start_blk
);
1413 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
1414 size
= map_bh
.b_size
;
1416 if (buffer_unwritten(&map_bh
))
1417 flags
= FIEMAP_EXTENT_UNWRITTEN
;
1419 start_blk
+= logical_to_blk(inode
, size
);
1422 * If we are past the EOF, then we need to make sure as
1423 * soon as we find a hole that the last extent we found
1424 * is marked with FIEMAP_EXTENT_LAST
1426 if (!past_eof
&& logical
+ size
>= isize
)
1430 if (fatal_signal_pending(current
))
1438 mutex_unlock(&inode
->i_mutex
);
1443 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1444 * Major change was from block_size == page_size in f2fs by default.
1446 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1447 struct list_head
*pages
, struct page
*page
,
1450 struct bio
*bio
= NULL
;
1452 sector_t last_block_in_bio
= 0;
1453 struct inode
*inode
= mapping
->host
;
1454 const unsigned blkbits
= inode
->i_blkbits
;
1455 const unsigned blocksize
= 1 << blkbits
;
1456 sector_t block_in_file
;
1457 sector_t last_block
;
1458 sector_t last_block_in_file
;
1460 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
1461 struct f2fs_map_blocks map
;
1468 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
1470 prefetchw(&page
->flags
);
1472 page
= list_entry(pages
->prev
, struct page
, lru
);
1473 list_del(&page
->lru
);
1474 if (add_to_page_cache_lru(page
, mapping
,
1475 page
->index
, GFP_KERNEL
))
1479 block_in_file
= (sector_t
)page
->index
;
1480 last_block
= block_in_file
+ nr_pages
;
1481 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1483 if (last_block
> last_block_in_file
)
1484 last_block
= last_block_in_file
;
1487 * Map blocks using the previous result first.
1489 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
1490 block_in_file
> map
.m_lblk
&&
1491 block_in_file
< (map
.m_lblk
+ map
.m_len
))
1495 * Then do more f2fs_map_blocks() calls until we are
1496 * done with this page.
1500 if (block_in_file
< last_block
) {
1501 map
.m_lblk
= block_in_file
;
1502 map
.m_len
= last_block
- block_in_file
;
1504 if (f2fs_map_blocks(inode
, &map
, 0, false))
1505 goto set_error_page
;
1508 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
1509 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
1510 SetPageMappedToDisk(page
);
1512 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1513 SetPageUptodate(page
);
1517 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1518 SetPageUptodate(page
);
1524 * This page will go to BIO. Do we need to send this
1527 if (bio
&& (last_block_in_bio
!= block_nr
- 1)) {
1529 submit_bio(READ
, bio
);
1533 struct f2fs_crypto_ctx
*ctx
= NULL
;
1535 if (f2fs_encrypted_inode(inode
) &&
1536 S_ISREG(inode
->i_mode
)) {
1539 ctx
= f2fs_get_crypto_ctx(inode
);
1541 goto set_error_page
;
1543 /* wait the page to be moved by cleaning */
1544 cpage
= find_lock_page(
1545 META_MAPPING(F2FS_I_SB(inode
)),
1548 f2fs_wait_on_page_writeback(cpage
,
1550 f2fs_put_page(cpage
, 1);
1554 bio
= bio_alloc(GFP_KERNEL
,
1555 min_t(int, nr_pages
, bio_get_nr_vecs(bdev
)));
1558 f2fs_release_crypto_ctx(ctx
);
1559 goto set_error_page
;
1561 bio
->bi_bdev
= bdev
;
1562 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(block_nr
);
1563 bio
->bi_end_io
= f2fs_read_end_io
;
1564 bio
->bi_private
= ctx
;
1567 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1568 goto submit_and_realloc
;
1570 last_block_in_bio
= block_nr
;
1574 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1579 submit_bio(READ
, bio
);
1585 page_cache_release(page
);
1587 BUG_ON(pages
&& !list_empty(pages
));
1589 submit_bio(READ
, bio
);
1593 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1595 struct inode
*inode
= page
->mapping
->host
;
1598 trace_f2fs_readpage(page
, DATA
);
1600 /* If the file has inline data, try to read it directly */
1601 if (f2fs_has_inline_data(inode
))
1602 ret
= f2fs_read_inline_data(inode
, page
);
1604 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1608 static int f2fs_read_data_pages(struct file
*file
,
1609 struct address_space
*mapping
,
1610 struct list_head
*pages
, unsigned nr_pages
)
1612 struct inode
*inode
= file
->f_mapping
->host
;
1614 /* If the file has inline data, skip readpages */
1615 if (f2fs_has_inline_data(inode
))
1618 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1621 int do_write_data_page(struct f2fs_io_info
*fio
)
1623 struct page
*page
= fio
->page
;
1624 struct inode
*inode
= page
->mapping
->host
;
1625 struct dnode_of_data dn
;
1628 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1629 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1633 fio
->blk_addr
= dn
.data_blkaddr
;
1635 /* This page is already truncated */
1636 if (fio
->blk_addr
== NULL_ADDR
) {
1637 ClearPageUptodate(page
);
1641 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1642 fio
->encrypted_page
= f2fs_encrypt(inode
, fio
->page
);
1643 if (IS_ERR(fio
->encrypted_page
)) {
1644 err
= PTR_ERR(fio
->encrypted_page
);
1649 set_page_writeback(page
);
1652 * If current allocation needs SSR,
1653 * it had better in-place writes for updated data.
1655 if (unlikely(fio
->blk_addr
!= NEW_ADDR
&&
1656 !is_cold_data(page
) &&
1657 need_inplace_update(inode
))) {
1658 rewrite_data_page(fio
);
1659 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
1660 trace_f2fs_do_write_data_page(page
, IPU
);
1662 write_data_page(&dn
, fio
);
1663 set_data_blkaddr(&dn
);
1664 f2fs_update_extent_cache(&dn
);
1665 trace_f2fs_do_write_data_page(page
, OPU
);
1666 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
1667 if (page
->index
== 0)
1668 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
1671 f2fs_put_dnode(&dn
);
1675 static int f2fs_write_data_page(struct page
*page
,
1676 struct writeback_control
*wbc
)
1678 struct inode
*inode
= page
->mapping
->host
;
1679 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1680 loff_t i_size
= i_size_read(inode
);
1681 const pgoff_t end_index
= ((unsigned long long) i_size
)
1682 >> PAGE_CACHE_SHIFT
;
1683 unsigned offset
= 0;
1684 bool need_balance_fs
= false;
1686 struct f2fs_io_info fio
= {
1689 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1691 .encrypted_page
= NULL
,
1694 trace_f2fs_writepage(page
, DATA
);
1696 if (page
->index
< end_index
)
1700 * If the offset is out-of-range of file size,
1701 * this page does not have to be written to disk.
1703 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
1704 if ((page
->index
>= end_index
+ 1) || !offset
)
1707 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
1709 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1711 if (f2fs_is_drop_cache(inode
))
1713 if (f2fs_is_volatile_file(inode
) && !wbc
->for_reclaim
&&
1714 available_free_memory(sbi
, BASE_CHECK
))
1717 /* Dentry blocks are controlled by checkpoint */
1718 if (S_ISDIR(inode
->i_mode
)) {
1719 if (unlikely(f2fs_cp_error(sbi
)))
1721 err
= do_write_data_page(&fio
);
1725 /* we should bypass data pages to proceed the kworkder jobs */
1726 if (unlikely(f2fs_cp_error(sbi
))) {
1731 if (!wbc
->for_reclaim
)
1732 need_balance_fs
= true;
1733 else if (has_not_enough_free_secs(sbi
, 0))
1738 if (f2fs_has_inline_data(inode
))
1739 err
= f2fs_write_inline_data(inode
, page
);
1741 err
= do_write_data_page(&fio
);
1742 f2fs_unlock_op(sbi
);
1744 if (err
&& err
!= -ENOENT
)
1747 clear_cold_data(page
);
1749 inode_dec_dirty_pages(inode
);
1751 ClearPageUptodate(page
);
1753 if (need_balance_fs
)
1754 f2fs_balance_fs(sbi
);
1755 if (wbc
->for_reclaim
)
1756 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1760 redirty_page_for_writepage(wbc
, page
);
1761 return AOP_WRITEPAGE_ACTIVATE
;
1764 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
1767 struct address_space
*mapping
= data
;
1768 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
1769 mapping_set_error(mapping
, ret
);
1773 static int f2fs_write_data_pages(struct address_space
*mapping
,
1774 struct writeback_control
*wbc
)
1776 struct inode
*inode
= mapping
->host
;
1777 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1778 bool locked
= false;
1782 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1784 /* deal with chardevs and other special file */
1785 if (!mapping
->a_ops
->writepage
)
1788 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1789 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1790 available_free_memory(sbi
, DIRTY_DENTS
))
1793 /* during POR, we don't need to trigger writepage at all. */
1794 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1797 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
1799 if (!S_ISDIR(inode
->i_mode
)) {
1800 mutex_lock(&sbi
->writepages
);
1803 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
1805 mutex_unlock(&sbi
->writepages
);
1807 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1809 remove_dirty_dir_inode(inode
);
1811 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1815 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1819 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1821 struct inode
*inode
= mapping
->host
;
1823 if (to
> inode
->i_size
) {
1824 truncate_pagecache(inode
, inode
->i_size
);
1825 truncate_blocks(inode
, inode
->i_size
, true);
1829 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1830 loff_t pos
, unsigned len
, unsigned flags
,
1831 struct page
**pagep
, void **fsdata
)
1833 struct inode
*inode
= mapping
->host
;
1834 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1835 struct page
*page
, *ipage
;
1836 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
1837 struct dnode_of_data dn
;
1840 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1842 f2fs_balance_fs(sbi
);
1845 * We should check this at this moment to avoid deadlock on inode page
1846 * and #0 page. The locking rule for inline_data conversion should be:
1847 * lock_page(page #0) -> lock_page(inode_page)
1850 err
= f2fs_convert_inline_inode(inode
);
1855 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1865 /* check inline_data */
1866 ipage
= get_node_page(sbi
, inode
->i_ino
);
1867 if (IS_ERR(ipage
)) {
1868 err
= PTR_ERR(ipage
);
1872 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1874 if (f2fs_has_inline_data(inode
)) {
1875 if (pos
+ len
<= MAX_INLINE_DATA
) {
1876 read_inline_data(page
, ipage
);
1877 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
1878 sync_inode_page(&dn
);
1881 err
= f2fs_convert_inline_page(&dn
, page
);
1885 err
= f2fs_reserve_block(&dn
, index
);
1889 f2fs_put_dnode(&dn
);
1890 f2fs_unlock_op(sbi
);
1892 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
1895 f2fs_wait_on_page_writeback(page
, DATA
);
1897 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1898 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1899 unsigned end
= start
+ len
;
1901 /* Reading beyond i_size is simple: memset to zero */
1902 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1906 if (dn
.data_blkaddr
== NEW_ADDR
) {
1907 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1909 struct f2fs_io_info fio
= {
1913 .blk_addr
= dn
.data_blkaddr
,
1915 .encrypted_page
= NULL
,
1917 err
= f2fs_submit_page_bio(&fio
);
1922 if (unlikely(!PageUptodate(page
))) {
1923 f2fs_put_page(page
, 1);
1927 if (unlikely(page
->mapping
!= mapping
)) {
1928 f2fs_put_page(page
, 1);
1932 /* avoid symlink page */
1933 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1934 err
= f2fs_decrypt_one(inode
, page
);
1936 f2fs_put_page(page
, 1);
1942 SetPageUptodate(page
);
1943 clear_cold_data(page
);
1947 f2fs_put_dnode(&dn
);
1949 f2fs_unlock_op(sbi
);
1950 f2fs_put_page(page
, 1);
1952 f2fs_write_failed(mapping
, pos
+ len
);
1956 static int f2fs_write_end(struct file
*file
,
1957 struct address_space
*mapping
,
1958 loff_t pos
, unsigned len
, unsigned copied
,
1959 struct page
*page
, void *fsdata
)
1961 struct inode
*inode
= page
->mapping
->host
;
1963 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1965 set_page_dirty(page
);
1967 if (pos
+ copied
> i_size_read(inode
)) {
1968 i_size_write(inode
, pos
+ copied
);
1969 mark_inode_dirty(inode
);
1970 update_inode_page(inode
);
1973 f2fs_put_page(page
, 1);
1977 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1980 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1982 if (iov_iter_rw(iter
) == READ
)
1985 if (offset
& blocksize_mask
)
1988 if (iov_iter_alignment(iter
) & blocksize_mask
)
1994 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
1997 struct file
*file
= iocb
->ki_filp
;
1998 struct address_space
*mapping
= file
->f_mapping
;
1999 struct inode
*inode
= mapping
->host
;
2000 size_t count
= iov_iter_count(iter
);
2003 /* we don't need to use inline_data strictly */
2004 if (f2fs_has_inline_data(inode
)) {
2005 err
= f2fs_convert_inline_inode(inode
);
2010 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
2013 if (check_direct_IO(inode
, iter
, offset
))
2016 trace_f2fs_direct_IO_enter(inode
, offset
, count
, iov_iter_rw(iter
));
2018 if (iov_iter_rw(iter
) == WRITE
)
2019 __allocate_data_blocks(inode
, offset
, count
);
2021 err
= blockdev_direct_IO(iocb
, inode
, iter
, offset
, get_data_block
);
2022 if (err
< 0 && iov_iter_rw(iter
) == WRITE
)
2023 f2fs_write_failed(mapping
, offset
+ count
);
2025 trace_f2fs_direct_IO_exit(inode
, offset
, count
, iov_iter_rw(iter
), err
);
2030 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
2031 unsigned int length
)
2033 struct inode
*inode
= page
->mapping
->host
;
2034 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2036 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
2037 (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
))
2040 if (PageDirty(page
)) {
2041 if (inode
->i_ino
== F2FS_META_INO(sbi
))
2042 dec_page_count(sbi
, F2FS_DIRTY_META
);
2043 else if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
2044 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
2046 inode_dec_dirty_pages(inode
);
2048 ClearPagePrivate(page
);
2051 int f2fs_release_page(struct page
*page
, gfp_t wait
)
2053 /* If this is dirty page, keep PagePrivate */
2054 if (PageDirty(page
))
2057 ClearPagePrivate(page
);
2061 static int f2fs_set_data_page_dirty(struct page
*page
)
2063 struct address_space
*mapping
= page
->mapping
;
2064 struct inode
*inode
= mapping
->host
;
2066 trace_f2fs_set_page_dirty(page
, DATA
);
2068 SetPageUptodate(page
);
2070 if (f2fs_is_atomic_file(inode
)) {
2071 register_inmem_page(inode
, page
);
2075 if (!PageDirty(page
)) {
2076 __set_page_dirty_nobuffers(page
);
2077 update_dirty_page(inode
, page
);
2083 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
2085 struct inode
*inode
= mapping
->host
;
2087 /* we don't need to use inline_data strictly */
2088 if (f2fs_has_inline_data(inode
)) {
2089 int err
= f2fs_convert_inline_inode(inode
);
2093 return generic_block_bmap(mapping
, block
, get_data_block
);
2096 void init_extent_cache_info(struct f2fs_sb_info
*sbi
)
2098 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
2099 init_rwsem(&sbi
->extent_tree_lock
);
2100 INIT_LIST_HEAD(&sbi
->extent_list
);
2101 spin_lock_init(&sbi
->extent_lock
);
2102 sbi
->total_ext_tree
= 0;
2103 atomic_set(&sbi
->total_ext_node
, 0);
2106 int __init
create_extent_cache(void)
2108 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
2109 sizeof(struct extent_tree
));
2110 if (!extent_tree_slab
)
2112 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
2113 sizeof(struct extent_node
));
2114 if (!extent_node_slab
) {
2115 kmem_cache_destroy(extent_tree_slab
);
2121 void destroy_extent_cache(void)
2123 kmem_cache_destroy(extent_node_slab
);
2124 kmem_cache_destroy(extent_tree_slab
);
2127 const struct address_space_operations f2fs_dblock_aops
= {
2128 .readpage
= f2fs_read_data_page
,
2129 .readpages
= f2fs_read_data_pages
,
2130 .writepage
= f2fs_write_data_page
,
2131 .writepages
= f2fs_write_data_pages
,
2132 .write_begin
= f2fs_write_begin
,
2133 .write_end
= f2fs_write_end
,
2134 .set_page_dirty
= f2fs_set_data_page_dirty
,
2135 .invalidatepage
= f2fs_invalidate_page
,
2136 .releasepage
= f2fs_release_page
,
2137 .direct_IO
= f2fs_direct_IO
,