1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/prefetch.h>
18 #include <linux/uio.h>
19 #include <linux/cleancache.h>
20 #include <linux/sched/signal.h>
26 #include <trace/events/f2fs.h>
28 #define NUM_PREALLOC_POST_READ_CTXS 128
30 static struct kmem_cache
*bio_post_read_ctx_cache
;
31 static mempool_t
*bio_post_read_ctx_pool
;
33 static bool __is_cp_guaranteed(struct page
*page
)
35 struct address_space
*mapping
= page
->mapping
;
37 struct f2fs_sb_info
*sbi
;
42 inode
= mapping
->host
;
43 sbi
= F2FS_I_SB(inode
);
45 if (inode
->i_ino
== F2FS_META_INO(sbi
) ||
46 inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
47 S_ISDIR(inode
->i_mode
) ||
48 (S_ISREG(inode
->i_mode
) &&
49 (f2fs_is_atomic_file(inode
) || IS_NOQUOTA(inode
))) ||
55 static enum count_type
__read_io_type(struct page
*page
)
57 struct address_space
*mapping
= page
->mapping
;
60 struct inode
*inode
= mapping
->host
;
61 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
63 if (inode
->i_ino
== F2FS_META_INO(sbi
))
66 if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
72 /* postprocessing steps for read bios */
73 enum bio_post_read_step
{
78 struct bio_post_read_ctx
{
80 struct work_struct work
;
81 unsigned int cur_step
;
82 unsigned int enabled_steps
;
85 static void __read_end_io(struct bio
*bio
)
89 struct bvec_iter_all iter_all
;
91 bio_for_each_segment_all(bv
, bio
, iter_all
) {
94 /* PG_error was set if any post_read step failed */
95 if (bio
->bi_status
|| PageError(page
)) {
96 ClearPageUptodate(page
);
97 /* will re-read again later */
100 SetPageUptodate(page
);
102 dec_page_count(F2FS_P_SB(page
), __read_io_type(page
));
106 mempool_free(bio
->bi_private
, bio_post_read_ctx_pool
);
110 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
);
112 static void decrypt_work(struct work_struct
*work
)
114 struct bio_post_read_ctx
*ctx
=
115 container_of(work
, struct bio_post_read_ctx
, work
);
117 fscrypt_decrypt_bio(ctx
->bio
);
119 bio_post_read_processing(ctx
);
122 static void bio_post_read_processing(struct bio_post_read_ctx
*ctx
)
124 switch (++ctx
->cur_step
) {
126 if (ctx
->enabled_steps
& (1 << STEP_DECRYPT
)) {
127 INIT_WORK(&ctx
->work
, decrypt_work
);
128 fscrypt_enqueue_decrypt_work(&ctx
->work
);
134 __read_end_io(ctx
->bio
);
138 static bool f2fs_bio_post_read_required(struct bio
*bio
)
140 return bio
->bi_private
&& !bio
->bi_status
;
143 static void f2fs_read_end_io(struct bio
*bio
)
145 if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio
)),
147 f2fs_show_injection_info(FAULT_READ_IO
);
148 bio
->bi_status
= BLK_STS_IOERR
;
151 if (f2fs_bio_post_read_required(bio
)) {
152 struct bio_post_read_ctx
*ctx
= bio
->bi_private
;
154 ctx
->cur_step
= STEP_INITIAL
;
155 bio_post_read_processing(ctx
);
162 static void f2fs_write_end_io(struct bio
*bio
)
164 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
165 struct bio_vec
*bvec
;
166 struct bvec_iter_all iter_all
;
168 if (time_to_inject(sbi
, FAULT_WRITE_IO
)) {
169 f2fs_show_injection_info(FAULT_WRITE_IO
);
170 bio
->bi_status
= BLK_STS_IOERR
;
173 bio_for_each_segment_all(bvec
, bio
, iter_all
) {
174 struct page
*page
= bvec
->bv_page
;
175 enum count_type type
= WB_DATA_TYPE(page
);
177 if (IS_DUMMY_WRITTEN_PAGE(page
)) {
178 set_page_private(page
, (unsigned long)NULL
);
179 ClearPagePrivate(page
);
181 mempool_free(page
, sbi
->write_io_dummy
);
183 if (unlikely(bio
->bi_status
))
184 f2fs_stop_checkpoint(sbi
, true);
188 fscrypt_pullback_bio_page(&page
, true);
190 if (unlikely(bio
->bi_status
)) {
191 mapping_set_error(page
->mapping
, -EIO
);
192 if (type
== F2FS_WB_CP_DATA
)
193 f2fs_stop_checkpoint(sbi
, true);
196 f2fs_bug_on(sbi
, page
->mapping
== NODE_MAPPING(sbi
) &&
197 page
->index
!= nid_of_node(page
));
199 dec_page_count(sbi
, type
);
200 if (f2fs_in_warm_node_list(sbi
, page
))
201 f2fs_del_fsync_node_entry(sbi
, page
);
202 clear_cold_data(page
);
203 end_page_writeback(page
);
205 if (!get_pages(sbi
, F2FS_WB_CP_DATA
) &&
206 wq_has_sleeper(&sbi
->cp_wait
))
207 wake_up(&sbi
->cp_wait
);
213 * Return true, if pre_bio's bdev is same as its target device.
215 struct block_device
*f2fs_target_device(struct f2fs_sb_info
*sbi
,
216 block_t blk_addr
, struct bio
*bio
)
218 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
221 if (f2fs_is_multi_device(sbi
)) {
222 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
223 if (FDEV(i
).start_blk
<= blk_addr
&&
224 FDEV(i
).end_blk
>= blk_addr
) {
225 blk_addr
-= FDEV(i
).start_blk
;
232 bio_set_dev(bio
, bdev
);
233 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
238 int f2fs_target_device_index(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
242 if (!f2fs_is_multi_device(sbi
))
245 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
246 if (FDEV(i
).start_blk
<= blkaddr
&& FDEV(i
).end_blk
>= blkaddr
)
251 static bool __same_bdev(struct f2fs_sb_info
*sbi
,
252 block_t blk_addr
, struct bio
*bio
)
254 struct block_device
*b
= f2fs_target_device(sbi
, blk_addr
, NULL
);
255 return bio
->bi_disk
== b
->bd_disk
&& bio
->bi_partno
== b
->bd_partno
;
259 * Low-level block read/write IO operations.
261 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
262 struct writeback_control
*wbc
,
263 int npages
, bool is_read
,
264 enum page_type type
, enum temp_type temp
)
268 bio
= f2fs_bio_alloc(sbi
, npages
, true);
270 f2fs_target_device(sbi
, blk_addr
, bio
);
272 bio
->bi_end_io
= f2fs_read_end_io
;
273 bio
->bi_private
= NULL
;
275 bio
->bi_end_io
= f2fs_write_end_io
;
276 bio
->bi_private
= sbi
;
277 bio
->bi_write_hint
= f2fs_io_type_to_rw_hint(sbi
, type
, temp
);
280 wbc_init_bio(wbc
, bio
);
285 static inline void __submit_bio(struct f2fs_sb_info
*sbi
,
286 struct bio
*bio
, enum page_type type
)
288 if (!is_read_io(bio_op(bio
))) {
291 if (type
!= DATA
&& type
!= NODE
)
294 if (test_opt(sbi
, LFS
) && current
->plug
)
295 blk_finish_plug(current
->plug
);
297 start
= bio
->bi_iter
.bi_size
>> F2FS_BLKSIZE_BITS
;
298 start
%= F2FS_IO_SIZE(sbi
);
303 /* fill dummy pages */
304 for (; start
< F2FS_IO_SIZE(sbi
); start
++) {
306 mempool_alloc(sbi
->write_io_dummy
,
307 GFP_NOIO
| __GFP_NOFAIL
);
308 f2fs_bug_on(sbi
, !page
);
310 zero_user_segment(page
, 0, PAGE_SIZE
);
311 SetPagePrivate(page
);
312 set_page_private(page
, (unsigned long)DUMMY_WRITTEN_PAGE
);
314 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
)
318 * In the NODE case, we lose next block address chain. So, we
319 * need to do checkpoint in f2fs_sync_file.
322 set_sbi_flag(sbi
, SBI_NEED_CP
);
325 if (is_read_io(bio_op(bio
)))
326 trace_f2fs_submit_read_bio(sbi
->sb
, type
, bio
);
328 trace_f2fs_submit_write_bio(sbi
->sb
, type
, bio
);
332 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
334 struct f2fs_io_info
*fio
= &io
->fio
;
339 bio_set_op_attrs(io
->bio
, fio
->op
, fio
->op_flags
);
341 if (is_read_io(fio
->op
))
342 trace_f2fs_prepare_read_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
344 trace_f2fs_prepare_write_bio(io
->sbi
->sb
, fio
->type
, io
->bio
);
346 __submit_bio(io
->sbi
, io
->bio
, fio
->type
);
350 static bool __has_merged_page(struct f2fs_bio_info
*io
, struct inode
*inode
,
351 struct page
*page
, nid_t ino
)
353 struct bio_vec
*bvec
;
355 struct bvec_iter_all iter_all
;
360 if (!inode
&& !page
&& !ino
)
363 bio_for_each_segment_all(bvec
, io
->bio
, iter_all
) {
365 if (bvec
->bv_page
->mapping
)
366 target
= bvec
->bv_page
;
368 target
= fscrypt_control_page(bvec
->bv_page
);
370 if (inode
&& inode
== target
->mapping
->host
)
372 if (page
&& page
== target
)
374 if (ino
&& ino
== ino_of_node(target
))
381 static void __f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
,
382 enum page_type type
, enum temp_type temp
)
384 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
385 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
387 down_write(&io
->io_rwsem
);
389 /* change META to META_FLUSH in the checkpoint procedure */
390 if (type
>= META_FLUSH
) {
391 io
->fio
.type
= META_FLUSH
;
392 io
->fio
.op
= REQ_OP_WRITE
;
393 io
->fio
.op_flags
= REQ_META
| REQ_PRIO
| REQ_SYNC
;
394 if (!test_opt(sbi
, NOBARRIER
))
395 io
->fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
397 __submit_merged_bio(io
);
398 up_write(&io
->io_rwsem
);
401 static void __submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
402 struct inode
*inode
, struct page
*page
,
403 nid_t ino
, enum page_type type
, bool force
)
408 for (temp
= HOT
; temp
< NR_TEMP_TYPE
; temp
++) {
410 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
411 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + temp
;
413 down_read(&io
->io_rwsem
);
414 ret
= __has_merged_page(io
, inode
, page
, ino
);
415 up_read(&io
->io_rwsem
);
418 __f2fs_submit_merged_write(sbi
, type
, temp
);
420 /* TODO: use HOT temp only for meta pages now. */
426 void f2fs_submit_merged_write(struct f2fs_sb_info
*sbi
, enum page_type type
)
428 __submit_merged_write_cond(sbi
, NULL
, NULL
, 0, type
, true);
431 void f2fs_submit_merged_write_cond(struct f2fs_sb_info
*sbi
,
432 struct inode
*inode
, struct page
*page
,
433 nid_t ino
, enum page_type type
)
435 __submit_merged_write_cond(sbi
, inode
, page
, ino
, type
, false);
438 void f2fs_flush_merged_writes(struct f2fs_sb_info
*sbi
)
440 f2fs_submit_merged_write(sbi
, DATA
);
441 f2fs_submit_merged_write(sbi
, NODE
);
442 f2fs_submit_merged_write(sbi
, META
);
446 * Fill the locked page with data located in the block address.
447 * A caller needs to unlock the page on failure.
449 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
452 struct page
*page
= fio
->encrypted_page
?
453 fio
->encrypted_page
: fio
->page
;
455 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->new_blkaddr
,
456 fio
->is_por
? META_POR
: (__is_meta_io(fio
) ?
457 META_GENERIC
: DATA_GENERIC_ENHANCE
)))
460 trace_f2fs_submit_page_bio(page
, fio
);
461 f2fs_trace_ios(fio
, 0);
463 /* Allocate a new bio */
464 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
465 1, is_read_io(fio
->op
), fio
->type
, fio
->temp
);
467 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
472 if (fio
->io_wbc
&& !is_read_io(fio
->op
))
473 wbc_account_io(fio
->io_wbc
, page
, PAGE_SIZE
);
475 bio_set_op_attrs(bio
, fio
->op
, fio
->op_flags
);
477 inc_page_count(fio
->sbi
, is_read_io(fio
->op
) ?
478 __read_io_type(page
): WB_DATA_TYPE(fio
->page
));
480 __submit_bio(fio
->sbi
, bio
, fio
->type
);
484 void f2fs_submit_page_write(struct f2fs_io_info
*fio
)
486 struct f2fs_sb_info
*sbi
= fio
->sbi
;
487 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
488 struct f2fs_bio_info
*io
= sbi
->write_io
[btype
] + fio
->temp
;
489 struct page
*bio_page
;
491 f2fs_bug_on(sbi
, is_read_io(fio
->op
));
493 down_write(&io
->io_rwsem
);
496 spin_lock(&io
->io_lock
);
497 if (list_empty(&io
->io_list
)) {
498 spin_unlock(&io
->io_lock
);
501 fio
= list_first_entry(&io
->io_list
,
502 struct f2fs_io_info
, list
);
503 list_del(&fio
->list
);
504 spin_unlock(&io
->io_lock
);
507 verify_fio_blkaddr(fio
);
509 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
511 /* set submitted = true as a return value */
512 fio
->submitted
= true;
514 inc_page_count(sbi
, WB_DATA_TYPE(bio_page
));
516 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
517 (io
->fio
.op
!= fio
->op
|| io
->fio
.op_flags
!= fio
->op_flags
) ||
518 !__same_bdev(sbi
, fio
->new_blkaddr
, io
->bio
)))
519 __submit_merged_bio(io
);
521 if (io
->bio
== NULL
) {
522 if ((fio
->type
== DATA
|| fio
->type
== NODE
) &&
523 fio
->new_blkaddr
& F2FS_IO_SIZE_MASK(sbi
)) {
524 dec_page_count(sbi
, WB_DATA_TYPE(bio_page
));
528 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
, fio
->io_wbc
,
529 BIO_MAX_PAGES
, false,
530 fio
->type
, fio
->temp
);
534 if (bio_add_page(io
->bio
, bio_page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
535 __submit_merged_bio(io
);
540 wbc_account_io(fio
->io_wbc
, bio_page
, PAGE_SIZE
);
542 io
->last_block_in_bio
= fio
->new_blkaddr
;
543 f2fs_trace_ios(fio
, 0);
545 trace_f2fs_submit_page_write(fio
->page
, fio
);
550 if (is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
) ||
551 f2fs_is_checkpoint_ready(sbi
))
552 __submit_merged_bio(io
);
553 up_write(&io
->io_rwsem
);
556 static struct bio
*f2fs_grab_read_bio(struct inode
*inode
, block_t blkaddr
,
557 unsigned nr_pages
, unsigned op_flag
)
559 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
561 struct bio_post_read_ctx
*ctx
;
562 unsigned int post_read_steps
= 0;
564 bio
= f2fs_bio_alloc(sbi
, min_t(int, nr_pages
, BIO_MAX_PAGES
), false);
566 return ERR_PTR(-ENOMEM
);
567 f2fs_target_device(sbi
, blkaddr
, bio
);
568 bio
->bi_end_io
= f2fs_read_end_io
;
569 bio_set_op_attrs(bio
, REQ_OP_READ
, op_flag
);
571 if (f2fs_encrypted_file(inode
))
572 post_read_steps
|= 1 << STEP_DECRYPT
;
573 if (post_read_steps
) {
574 ctx
= mempool_alloc(bio_post_read_ctx_pool
, GFP_NOFS
);
577 return ERR_PTR(-ENOMEM
);
580 ctx
->enabled_steps
= post_read_steps
;
581 bio
->bi_private
= ctx
;
587 /* This can handle encryption stuffs */
588 static int f2fs_submit_page_read(struct inode
*inode
, struct page
*page
,
591 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
594 bio
= f2fs_grab_read_bio(inode
, blkaddr
, 1, 0);
598 /* wait for GCed page writeback via META_MAPPING */
599 f2fs_wait_on_block_writeback(inode
, blkaddr
);
601 if (bio_add_page(bio
, page
, PAGE_SIZE
, 0) < PAGE_SIZE
) {
605 ClearPageError(page
);
606 inc_page_count(sbi
, F2FS_RD_DATA
);
607 __submit_bio(sbi
, bio
, DATA
);
611 static void __set_data_blkaddr(struct dnode_of_data
*dn
)
613 struct f2fs_node
*rn
= F2FS_NODE(dn
->node_page
);
617 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
618 base
= get_extra_isize(dn
->inode
);
620 /* Get physical address of data block */
621 addr_array
= blkaddr_in_node(rn
);
622 addr_array
[base
+ dn
->ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
626 * Lock ordering for the change of data block address:
629 * update block addresses in the node page
631 void f2fs_set_data_blkaddr(struct dnode_of_data
*dn
)
633 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true, true);
634 __set_data_blkaddr(dn
);
635 if (set_page_dirty(dn
->node_page
))
636 dn
->node_changed
= true;
639 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
641 dn
->data_blkaddr
= blkaddr
;
642 f2fs_set_data_blkaddr(dn
);
643 f2fs_update_extent_cache(dn
);
646 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
647 int f2fs_reserve_new_blocks(struct dnode_of_data
*dn
, blkcnt_t count
)
649 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
655 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
657 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
660 trace_f2fs_reserve_new_blocks(dn
->inode
, dn
->nid
,
661 dn
->ofs_in_node
, count
);
663 f2fs_wait_on_page_writeback(dn
->node_page
, NODE
, true, true);
665 for (; count
> 0; dn
->ofs_in_node
++) {
666 block_t blkaddr
= datablock_addr(dn
->inode
,
667 dn
->node_page
, dn
->ofs_in_node
);
668 if (blkaddr
== NULL_ADDR
) {
669 dn
->data_blkaddr
= NEW_ADDR
;
670 __set_data_blkaddr(dn
);
675 if (set_page_dirty(dn
->node_page
))
676 dn
->node_changed
= true;
680 /* Should keep dn->ofs_in_node unchanged */
681 int f2fs_reserve_new_block(struct dnode_of_data
*dn
)
683 unsigned int ofs_in_node
= dn
->ofs_in_node
;
686 ret
= f2fs_reserve_new_blocks(dn
, 1);
687 dn
->ofs_in_node
= ofs_in_node
;
691 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
693 bool need_put
= dn
->inode_page
? false : true;
696 err
= f2fs_get_dnode_of_data(dn
, index
, ALLOC_NODE
);
700 if (dn
->data_blkaddr
== NULL_ADDR
)
701 err
= f2fs_reserve_new_block(dn
);
707 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
709 struct extent_info ei
= {0,0,0};
710 struct inode
*inode
= dn
->inode
;
712 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
713 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
717 return f2fs_reserve_block(dn
, index
);
720 struct page
*f2fs_get_read_data_page(struct inode
*inode
, pgoff_t index
,
721 int op_flags
, bool for_write
)
723 struct address_space
*mapping
= inode
->i_mapping
;
724 struct dnode_of_data dn
;
726 struct extent_info ei
= {0,0,0};
729 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
731 return ERR_PTR(-ENOMEM
);
733 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
734 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
735 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), dn
.data_blkaddr
,
736 DATA_GENERIC_ENHANCE_READ
)) {
743 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
744 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
749 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
753 if (dn
.data_blkaddr
!= NEW_ADDR
&&
754 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
756 DATA_GENERIC_ENHANCE
)) {
761 if (PageUptodate(page
)) {
767 * A new dentry page is allocated but not able to be written, since its
768 * new inode page couldn't be allocated due to -ENOSPC.
769 * In such the case, its blkaddr can be remained as NEW_ADDR.
770 * see, f2fs_add_link -> f2fs_get_new_data_page ->
771 * f2fs_init_inode_metadata.
773 if (dn
.data_blkaddr
== NEW_ADDR
) {
774 zero_user_segment(page
, 0, PAGE_SIZE
);
775 if (!PageUptodate(page
))
776 SetPageUptodate(page
);
781 err
= f2fs_submit_page_read(inode
, page
, dn
.data_blkaddr
);
787 f2fs_put_page(page
, 1);
791 struct page
*f2fs_find_data_page(struct inode
*inode
, pgoff_t index
)
793 struct address_space
*mapping
= inode
->i_mapping
;
796 page
= find_get_page(mapping
, index
);
797 if (page
&& PageUptodate(page
))
799 f2fs_put_page(page
, 0);
801 page
= f2fs_get_read_data_page(inode
, index
, 0, false);
805 if (PageUptodate(page
))
808 wait_on_page_locked(page
);
809 if (unlikely(!PageUptodate(page
))) {
810 f2fs_put_page(page
, 0);
811 return ERR_PTR(-EIO
);
817 * If it tries to access a hole, return an error.
818 * Because, the callers, functions in dir.c and GC, should be able to know
819 * whether this page exists or not.
821 struct page
*f2fs_get_lock_data_page(struct inode
*inode
, pgoff_t index
,
824 struct address_space
*mapping
= inode
->i_mapping
;
827 page
= f2fs_get_read_data_page(inode
, index
, 0, for_write
);
831 /* wait for read completion */
833 if (unlikely(page
->mapping
!= mapping
)) {
834 f2fs_put_page(page
, 1);
837 if (unlikely(!PageUptodate(page
))) {
838 f2fs_put_page(page
, 1);
839 return ERR_PTR(-EIO
);
845 * Caller ensures that this data page is never allocated.
846 * A new zero-filled data page is allocated in the page cache.
848 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
850 * Note that, ipage is set only by make_empty_dir, and if any error occur,
851 * ipage should be released by this function.
853 struct page
*f2fs_get_new_data_page(struct inode
*inode
,
854 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
856 struct address_space
*mapping
= inode
->i_mapping
;
858 struct dnode_of_data dn
;
861 page
= f2fs_grab_cache_page(mapping
, index
, true);
864 * before exiting, we should make sure ipage will be released
865 * if any error occur.
867 f2fs_put_page(ipage
, 1);
868 return ERR_PTR(-ENOMEM
);
871 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
872 err
= f2fs_reserve_block(&dn
, index
);
874 f2fs_put_page(page
, 1);
880 if (PageUptodate(page
))
883 if (dn
.data_blkaddr
== NEW_ADDR
) {
884 zero_user_segment(page
, 0, PAGE_SIZE
);
885 if (!PageUptodate(page
))
886 SetPageUptodate(page
);
888 f2fs_put_page(page
, 1);
890 /* if ipage exists, blkaddr should be NEW_ADDR */
891 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
892 page
= f2fs_get_lock_data_page(inode
, index
, true);
897 if (new_i_size
&& i_size_read(inode
) <
898 ((loff_t
)(index
+ 1) << PAGE_SHIFT
))
899 f2fs_i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_SHIFT
));
903 static int __allocate_data_block(struct dnode_of_data
*dn
, int seg_type
)
905 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
906 struct f2fs_summary sum
;
912 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
915 err
= f2fs_get_node_info(sbi
, dn
->nid
, &ni
);
919 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
920 dn
->node_page
, dn
->ofs_in_node
);
921 if (dn
->data_blkaddr
!= NULL_ADDR
)
924 if (unlikely((err
= inc_valid_block_count(sbi
, dn
->inode
, &count
))))
928 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
929 old_blkaddr
= dn
->data_blkaddr
;
930 f2fs_allocate_data_block(sbi
, NULL
, old_blkaddr
, &dn
->data_blkaddr
,
931 &sum
, seg_type
, NULL
, false);
932 if (GET_SEGNO(sbi
, old_blkaddr
) != NULL_SEGNO
)
933 invalidate_mapping_pages(META_MAPPING(sbi
),
934 old_blkaddr
, old_blkaddr
);
935 f2fs_set_data_blkaddr(dn
);
938 * i_size will be updated by direct_IO. Otherwise, we'll get stale
939 * data from unwritten block via dio_read.
944 int f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
946 struct inode
*inode
= file_inode(iocb
->ki_filp
);
947 struct f2fs_map_blocks map
;
950 bool direct_io
= iocb
->ki_flags
& IOCB_DIRECT
;
952 /* convert inline data for Direct I/O*/
954 err
= f2fs_convert_inline_inode(inode
);
959 if (direct_io
&& allow_outplace_dio(inode
, iocb
, from
))
962 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
965 map
.m_lblk
= F2FS_BLK_ALIGN(iocb
->ki_pos
);
966 map
.m_len
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
+ iov_iter_count(from
));
967 if (map
.m_len
> map
.m_lblk
)
968 map
.m_len
-= map
.m_lblk
;
972 map
.m_next_pgofs
= NULL
;
973 map
.m_next_extent
= NULL
;
974 map
.m_seg_type
= NO_CHECK_TYPE
;
975 map
.m_may_create
= true;
978 map
.m_seg_type
= f2fs_rw_hint_to_seg_type(iocb
->ki_hint
);
979 flag
= f2fs_force_buffered_io(inode
, iocb
, from
) ?
980 F2FS_GET_BLOCK_PRE_AIO
:
981 F2FS_GET_BLOCK_PRE_DIO
;
984 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA(inode
)) {
985 err
= f2fs_convert_inline_inode(inode
);
989 if (f2fs_has_inline_data(inode
))
992 flag
= F2FS_GET_BLOCK_PRE_AIO
;
995 err
= f2fs_map_blocks(inode
, &map
, 1, flag
);
996 if (map
.m_len
> 0 && err
== -ENOSPC
) {
998 set_inode_flag(inode
, FI_NO_PREALLOC
);
1004 void __do_map_lock(struct f2fs_sb_info
*sbi
, int flag
, bool lock
)
1006 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1008 down_read(&sbi
->node_change
);
1010 up_read(&sbi
->node_change
);
1015 f2fs_unlock_op(sbi
);
1020 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1021 * f2fs_map_blocks structure.
1022 * If original data blocks are allocated, then give them to blockdev.
1024 * a. preallocate requested block addresses
1025 * b. do not use extent cache for better performance
1026 * c. give the block addresses to blockdev
1028 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
1029 int create
, int flag
)
1031 unsigned int maxblocks
= map
->m_len
;
1032 struct dnode_of_data dn
;
1033 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1034 int mode
= map
->m_may_create
? ALLOC_NODE
: LOOKUP_NODE
;
1035 pgoff_t pgofs
, end_offset
, end
;
1036 int err
= 0, ofs
= 1;
1037 unsigned int ofs_in_node
, last_ofs_in_node
;
1039 struct extent_info ei
= {0,0,0};
1041 unsigned int start_pgofs
;
1049 /* it only supports block size == page size */
1050 pgofs
= (pgoff_t
)map
->m_lblk
;
1051 end
= pgofs
+ maxblocks
;
1053 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1054 if (test_opt(sbi
, LFS
) && flag
== F2FS_GET_BLOCK_DIO
&&
1058 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
1059 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
1060 map
->m_flags
= F2FS_MAP_MAPPED
;
1061 if (map
->m_next_extent
)
1062 *map
->m_next_extent
= pgofs
+ map
->m_len
;
1064 /* for hardware encryption, but to avoid potential issue in future */
1065 if (flag
== F2FS_GET_BLOCK_DIO
)
1066 f2fs_wait_on_block_writeback_range(inode
,
1067 map
->m_pblk
, map
->m_len
);
1072 if (map
->m_may_create
)
1073 __do_map_lock(sbi
, flag
, true);
1075 /* When reading holes, we need its node page */
1076 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1077 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, mode
);
1079 if (flag
== F2FS_GET_BLOCK_BMAP
)
1081 if (err
== -ENOENT
) {
1083 if (map
->m_next_pgofs
)
1084 *map
->m_next_pgofs
=
1085 f2fs_get_next_page_offset(&dn
, pgofs
);
1086 if (map
->m_next_extent
)
1087 *map
->m_next_extent
=
1088 f2fs_get_next_page_offset(&dn
, pgofs
);
1093 start_pgofs
= pgofs
;
1095 last_ofs_in_node
= ofs_in_node
= dn
.ofs_in_node
;
1096 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1099 blkaddr
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
1101 if (__is_valid_data_blkaddr(blkaddr
) &&
1102 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE
)) {
1107 if (__is_valid_data_blkaddr(blkaddr
)) {
1108 /* use out-place-update for driect IO under LFS mode */
1109 if (test_opt(sbi
, LFS
) && flag
== F2FS_GET_BLOCK_DIO
&&
1110 map
->m_may_create
) {
1111 err
= __allocate_data_block(&dn
, map
->m_seg_type
);
1113 blkaddr
= dn
.data_blkaddr
;
1114 set_inode_flag(inode
, FI_APPEND_WRITE
);
1119 if (unlikely(f2fs_cp_error(sbi
))) {
1123 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
1124 if (blkaddr
== NULL_ADDR
) {
1126 last_ofs_in_node
= dn
.ofs_in_node
;
1129 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_DIO
&&
1130 flag
!= F2FS_GET_BLOCK_DIO
);
1131 err
= __allocate_data_block(&dn
,
1134 set_inode_flag(inode
, FI_APPEND_WRITE
);
1138 map
->m_flags
|= F2FS_MAP_NEW
;
1139 blkaddr
= dn
.data_blkaddr
;
1141 if (flag
== F2FS_GET_BLOCK_BMAP
) {
1145 if (flag
== F2FS_GET_BLOCK_PRECACHE
)
1147 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
1148 blkaddr
== NULL_ADDR
) {
1149 if (map
->m_next_pgofs
)
1150 *map
->m_next_pgofs
= pgofs
+ 1;
1153 if (flag
!= F2FS_GET_BLOCK_FIEMAP
) {
1154 /* for defragment case */
1155 if (map
->m_next_pgofs
)
1156 *map
->m_next_pgofs
= pgofs
+ 1;
1162 if (flag
== F2FS_GET_BLOCK_PRE_AIO
)
1165 if (map
->m_len
== 0) {
1166 /* preallocated unwritten block should be mapped for fiemap. */
1167 if (blkaddr
== NEW_ADDR
)
1168 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
1169 map
->m_flags
|= F2FS_MAP_MAPPED
;
1171 map
->m_pblk
= blkaddr
;
1173 } else if ((map
->m_pblk
!= NEW_ADDR
&&
1174 blkaddr
== (map
->m_pblk
+ ofs
)) ||
1175 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
1176 flag
== F2FS_GET_BLOCK_PRE_DIO
) {
1187 /* preallocate blocks in batch for one dnode page */
1188 if (flag
== F2FS_GET_BLOCK_PRE_AIO
&&
1189 (pgofs
== end
|| dn
.ofs_in_node
== end_offset
)) {
1191 dn
.ofs_in_node
= ofs_in_node
;
1192 err
= f2fs_reserve_new_blocks(&dn
, prealloc
);
1196 map
->m_len
+= dn
.ofs_in_node
- ofs_in_node
;
1197 if (prealloc
&& dn
.ofs_in_node
!= last_ofs_in_node
+ 1) {
1201 dn
.ofs_in_node
= end_offset
;
1206 else if (dn
.ofs_in_node
< end_offset
)
1209 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1210 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1211 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1213 f2fs_update_extent_cache_range(&dn
,
1214 start_pgofs
, map
->m_pblk
+ ofs
,
1219 f2fs_put_dnode(&dn
);
1221 if (map
->m_may_create
) {
1222 __do_map_lock(sbi
, flag
, false);
1223 f2fs_balance_fs(sbi
, dn
.node_changed
);
1229 /* for hardware encryption, but to avoid potential issue in future */
1230 if (flag
== F2FS_GET_BLOCK_DIO
&& map
->m_flags
& F2FS_MAP_MAPPED
)
1231 f2fs_wait_on_block_writeback_range(inode
,
1232 map
->m_pblk
, map
->m_len
);
1234 if (flag
== F2FS_GET_BLOCK_PRECACHE
) {
1235 if (map
->m_flags
& F2FS_MAP_MAPPED
) {
1236 unsigned int ofs
= start_pgofs
- map
->m_lblk
;
1238 f2fs_update_extent_cache_range(&dn
,
1239 start_pgofs
, map
->m_pblk
+ ofs
,
1242 if (map
->m_next_extent
)
1243 *map
->m_next_extent
= pgofs
+ 1;
1245 f2fs_put_dnode(&dn
);
1247 if (map
->m_may_create
) {
1248 __do_map_lock(sbi
, flag
, false);
1249 f2fs_balance_fs(sbi
, dn
.node_changed
);
1252 trace_f2fs_map_blocks(inode
, map
, err
);
1256 bool f2fs_overwrite_io(struct inode
*inode
, loff_t pos
, size_t len
)
1258 struct f2fs_map_blocks map
;
1262 if (pos
+ len
> i_size_read(inode
))
1265 map
.m_lblk
= F2FS_BYTES_TO_BLK(pos
);
1266 map
.m_next_pgofs
= NULL
;
1267 map
.m_next_extent
= NULL
;
1268 map
.m_seg_type
= NO_CHECK_TYPE
;
1269 map
.m_may_create
= false;
1270 last_lblk
= F2FS_BLK_ALIGN(pos
+ len
);
1272 while (map
.m_lblk
< last_lblk
) {
1273 map
.m_len
= last_lblk
- map
.m_lblk
;
1274 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
1275 if (err
|| map
.m_len
== 0)
1277 map
.m_lblk
+= map
.m_len
;
1282 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1283 struct buffer_head
*bh
, int create
, int flag
,
1284 pgoff_t
*next_pgofs
, int seg_type
, bool may_write
)
1286 struct f2fs_map_blocks map
;
1289 map
.m_lblk
= iblock
;
1290 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
1291 map
.m_next_pgofs
= next_pgofs
;
1292 map
.m_next_extent
= NULL
;
1293 map
.m_seg_type
= seg_type
;
1294 map
.m_may_create
= may_write
;
1296 err
= f2fs_map_blocks(inode
, &map
, create
, flag
);
1298 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1299 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1300 bh
->b_size
= (u64
)map
.m_len
<< inode
->i_blkbits
;
1305 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1306 struct buffer_head
*bh_result
, int create
, int flag
,
1307 pgoff_t
*next_pgofs
)
1309 return __get_data_block(inode
, iblock
, bh_result
, create
,
1311 NO_CHECK_TYPE
, create
);
1314 static int get_data_block_dio_write(struct inode
*inode
, sector_t iblock
,
1315 struct buffer_head
*bh_result
, int create
)
1317 return __get_data_block(inode
, iblock
, bh_result
, create
,
1318 F2FS_GET_BLOCK_DIO
, NULL
,
1319 f2fs_rw_hint_to_seg_type(inode
->i_write_hint
),
1323 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
1324 struct buffer_head
*bh_result
, int create
)
1326 return __get_data_block(inode
, iblock
, bh_result
, create
,
1327 F2FS_GET_BLOCK_DIO
, NULL
,
1328 f2fs_rw_hint_to_seg_type(inode
->i_write_hint
),
1332 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
1333 struct buffer_head
*bh_result
, int create
)
1335 /* Block number less than F2FS MAX BLOCKS */
1336 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
1339 return __get_data_block(inode
, iblock
, bh_result
, create
,
1340 F2FS_GET_BLOCK_BMAP
, NULL
,
1341 NO_CHECK_TYPE
, create
);
1344 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
1346 return (offset
>> inode
->i_blkbits
);
1349 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
1351 return (blk
<< inode
->i_blkbits
);
1354 static int f2fs_xattr_fiemap(struct inode
*inode
,
1355 struct fiemap_extent_info
*fieinfo
)
1357 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1359 struct node_info ni
;
1360 __u64 phys
= 0, len
;
1362 nid_t xnid
= F2FS_I(inode
)->i_xattr_nid
;
1365 if (f2fs_has_inline_xattr(inode
)) {
1368 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
),
1369 inode
->i_ino
, false);
1373 err
= f2fs_get_node_info(sbi
, inode
->i_ino
, &ni
);
1375 f2fs_put_page(page
, 1);
1379 phys
= (__u64
)blk_to_logical(inode
, ni
.blk_addr
);
1380 offset
= offsetof(struct f2fs_inode
, i_addr
) +
1381 sizeof(__le32
) * (DEF_ADDRS_PER_INODE
-
1382 get_inline_xattr_addrs(inode
));
1385 len
= inline_xattr_size(inode
);
1387 f2fs_put_page(page
, 1);
1389 flags
= FIEMAP_EXTENT_DATA_INLINE
| FIEMAP_EXTENT_NOT_ALIGNED
;
1392 flags
|= FIEMAP_EXTENT_LAST
;
1394 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1395 if (err
|| err
== 1)
1400 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), xnid
, false);
1404 err
= f2fs_get_node_info(sbi
, xnid
, &ni
);
1406 f2fs_put_page(page
, 1);
1410 phys
= (__u64
)blk_to_logical(inode
, ni
.blk_addr
);
1411 len
= inode
->i_sb
->s_blocksize
;
1413 f2fs_put_page(page
, 1);
1415 flags
= FIEMAP_EXTENT_LAST
;
1419 err
= fiemap_fill_next_extent(fieinfo
, 0, phys
, len
, flags
);
1421 return (err
< 0 ? err
: 0);
1424 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1427 struct buffer_head map_bh
;
1428 sector_t start_blk
, last_blk
;
1430 u64 logical
= 0, phys
= 0, size
= 0;
1434 if (fieinfo
->fi_flags
& FIEMAP_FLAG_CACHE
) {
1435 ret
= f2fs_precache_extents(inode
);
1440 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
| FIEMAP_FLAG_XATTR
);
1446 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
1447 ret
= f2fs_xattr_fiemap(inode
, fieinfo
);
1451 if (f2fs_has_inline_data(inode
)) {
1452 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
1457 if (logical_to_blk(inode
, len
) == 0)
1458 len
= blk_to_logical(inode
, 1);
1460 start_blk
= logical_to_blk(inode
, start
);
1461 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
1464 memset(&map_bh
, 0, sizeof(struct buffer_head
));
1465 map_bh
.b_size
= len
;
1467 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
1468 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
1473 if (!buffer_mapped(&map_bh
)) {
1474 start_blk
= next_pgofs
;
1476 if (blk_to_logical(inode
, start_blk
) < blk_to_logical(inode
,
1477 F2FS_I_SB(inode
)->max_file_blocks
))
1480 flags
|= FIEMAP_EXTENT_LAST
;
1484 if (IS_ENCRYPTED(inode
))
1485 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
1487 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
1491 if (start_blk
> last_blk
|| ret
)
1494 logical
= blk_to_logical(inode
, start_blk
);
1495 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
1496 size
= map_bh
.b_size
;
1498 if (buffer_unwritten(&map_bh
))
1499 flags
= FIEMAP_EXTENT_UNWRITTEN
;
1501 start_blk
+= logical_to_blk(inode
, size
);
1505 if (fatal_signal_pending(current
))
1513 inode_unlock(inode
);
1517 static int f2fs_read_single_page(struct inode
*inode
, struct page
*page
,
1519 struct f2fs_map_blocks
*map
,
1520 struct bio
**bio_ret
,
1521 sector_t
*last_block_in_bio
,
1524 struct bio
*bio
= *bio_ret
;
1525 const unsigned blkbits
= inode
->i_blkbits
;
1526 const unsigned blocksize
= 1 << blkbits
;
1527 sector_t block_in_file
;
1528 sector_t last_block
;
1529 sector_t last_block_in_file
;
1533 block_in_file
= (sector_t
)page
->index
;
1534 last_block
= block_in_file
+ nr_pages
;
1535 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1537 if (last_block
> last_block_in_file
)
1538 last_block
= last_block_in_file
;
1540 /* just zeroing out page which is beyond EOF */
1541 if (block_in_file
>= last_block
)
1544 * Map blocks using the previous result first.
1546 if ((map
->m_flags
& F2FS_MAP_MAPPED
) &&
1547 block_in_file
> map
->m_lblk
&&
1548 block_in_file
< (map
->m_lblk
+ map
->m_len
))
1552 * Then do more f2fs_map_blocks() calls until we are
1553 * done with this page.
1555 map
->m_lblk
= block_in_file
;
1556 map
->m_len
= last_block
- block_in_file
;
1558 ret
= f2fs_map_blocks(inode
, map
, 0, F2FS_GET_BLOCK_DEFAULT
);
1562 if ((map
->m_flags
& F2FS_MAP_MAPPED
)) {
1563 block_nr
= map
->m_pblk
+ block_in_file
- map
->m_lblk
;
1564 SetPageMappedToDisk(page
);
1566 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1567 SetPageUptodate(page
);
1571 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode
), block_nr
,
1572 DATA_GENERIC_ENHANCE_READ
)) {
1578 zero_user_segment(page
, 0, PAGE_SIZE
);
1579 if (!PageUptodate(page
))
1580 SetPageUptodate(page
);
1586 * This page will go to BIO. Do we need to send this
1589 if (bio
&& (*last_block_in_bio
!= block_nr
- 1 ||
1590 !__same_bdev(F2FS_I_SB(inode
), block_nr
, bio
))) {
1592 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1596 bio
= f2fs_grab_read_bio(inode
, block_nr
, nr_pages
,
1597 is_readahead
? REQ_RAHEAD
: 0);
1606 * If the page is under writeback, we need to wait for
1607 * its completion to see the correct decrypted data.
1609 f2fs_wait_on_block_writeback(inode
, block_nr
);
1611 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1612 goto submit_and_realloc
;
1614 inc_page_count(F2FS_I_SB(inode
), F2FS_RD_DATA
);
1615 ClearPageError(page
);
1616 *last_block_in_bio
= block_nr
;
1620 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1630 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1631 * Major change was from block_size == page_size in f2fs by default.
1633 * Note that the aops->readpages() function is ONLY used for read-ahead. If
1634 * this function ever deviates from doing just read-ahead, it should either
1635 * use ->readpage() or do the necessary surgery to decouple ->readpages()
1638 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1639 struct list_head
*pages
, struct page
*page
,
1640 unsigned nr_pages
, bool is_readahead
)
1642 struct bio
*bio
= NULL
;
1643 sector_t last_block_in_bio
= 0;
1644 struct inode
*inode
= mapping
->host
;
1645 struct f2fs_map_blocks map
;
1652 map
.m_next_pgofs
= NULL
;
1653 map
.m_next_extent
= NULL
;
1654 map
.m_seg_type
= NO_CHECK_TYPE
;
1655 map
.m_may_create
= false;
1657 for (; nr_pages
; nr_pages
--) {
1659 page
= list_last_entry(pages
, struct page
, lru
);
1661 prefetchw(&page
->flags
);
1662 list_del(&page
->lru
);
1663 if (add_to_page_cache_lru(page
, mapping
,
1665 readahead_gfp_mask(mapping
)))
1669 ret
= f2fs_read_single_page(inode
, page
, nr_pages
, &map
, &bio
,
1670 &last_block_in_bio
, is_readahead
);
1673 zero_user_segment(page
, 0, PAGE_SIZE
);
1680 BUG_ON(pages
&& !list_empty(pages
));
1682 __submit_bio(F2FS_I_SB(inode
), bio
, DATA
);
1683 return pages
? 0 : ret
;
1686 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1688 struct inode
*inode
= page
->mapping
->host
;
1691 trace_f2fs_readpage(page
, DATA
);
1693 /* If the file has inline data, try to read it directly */
1694 if (f2fs_has_inline_data(inode
))
1695 ret
= f2fs_read_inline_data(inode
, page
);
1697 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1, false);
1701 static int f2fs_read_data_pages(struct file
*file
,
1702 struct address_space
*mapping
,
1703 struct list_head
*pages
, unsigned nr_pages
)
1705 struct inode
*inode
= mapping
->host
;
1706 struct page
*page
= list_last_entry(pages
, struct page
, lru
);
1708 trace_f2fs_readpages(inode
, page
, nr_pages
);
1710 /* If the file has inline data, skip readpages */
1711 if (f2fs_has_inline_data(inode
))
1714 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
, true);
1717 static int encrypt_one_page(struct f2fs_io_info
*fio
)
1719 struct inode
*inode
= fio
->page
->mapping
->host
;
1721 gfp_t gfp_flags
= GFP_NOFS
;
1723 if (!f2fs_encrypted_file(inode
))
1726 /* wait for GCed page writeback via META_MAPPING */
1727 f2fs_wait_on_block_writeback(inode
, fio
->old_blkaddr
);
1730 fio
->encrypted_page
= fscrypt_encrypt_page(inode
, fio
->page
,
1731 PAGE_SIZE
, 0, fio
->page
->index
, gfp_flags
);
1732 if (IS_ERR(fio
->encrypted_page
)) {
1733 /* flush pending IOs and wait for a while in the ENOMEM case */
1734 if (PTR_ERR(fio
->encrypted_page
) == -ENOMEM
) {
1735 f2fs_flush_merged_writes(fio
->sbi
);
1736 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
1737 gfp_flags
|= __GFP_NOFAIL
;
1740 return PTR_ERR(fio
->encrypted_page
);
1743 mpage
= find_lock_page(META_MAPPING(fio
->sbi
), fio
->old_blkaddr
);
1745 if (PageUptodate(mpage
))
1746 memcpy(page_address(mpage
),
1747 page_address(fio
->encrypted_page
), PAGE_SIZE
);
1748 f2fs_put_page(mpage
, 1);
1753 static inline bool check_inplace_update_policy(struct inode
*inode
,
1754 struct f2fs_io_info
*fio
)
1756 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1757 unsigned int policy
= SM_I(sbi
)->ipu_policy
;
1759 if (policy
& (0x1 << F2FS_IPU_FORCE
))
1761 if (policy
& (0x1 << F2FS_IPU_SSR
) && f2fs_need_SSR(sbi
))
1763 if (policy
& (0x1 << F2FS_IPU_UTIL
) &&
1764 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
1766 if (policy
& (0x1 << F2FS_IPU_SSR_UTIL
) && f2fs_need_SSR(sbi
) &&
1767 utilization(sbi
) > SM_I(sbi
)->min_ipu_util
)
1771 * IPU for rewrite async pages
1773 if (policy
& (0x1 << F2FS_IPU_ASYNC
) &&
1774 fio
&& fio
->op
== REQ_OP_WRITE
&&
1775 !(fio
->op_flags
& REQ_SYNC
) &&
1776 !IS_ENCRYPTED(inode
))
1779 /* this is only set during fdatasync */
1780 if (policy
& (0x1 << F2FS_IPU_FSYNC
) &&
1781 is_inode_flag_set(inode
, FI_NEED_IPU
))
1784 if (unlikely(fio
&& is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1785 !f2fs_is_checkpointed_data(sbi
, fio
->old_blkaddr
)))
1791 bool f2fs_should_update_inplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
1793 if (f2fs_is_pinned_file(inode
))
1796 /* if this is cold file, we should overwrite to avoid fragmentation */
1797 if (file_is_cold(inode
))
1800 return check_inplace_update_policy(inode
, fio
);
1803 bool f2fs_should_update_outplace(struct inode
*inode
, struct f2fs_io_info
*fio
)
1805 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1807 if (test_opt(sbi
, LFS
))
1809 if (S_ISDIR(inode
->i_mode
))
1811 if (IS_NOQUOTA(inode
))
1813 if (f2fs_is_atomic_file(inode
))
1816 if (is_cold_data(fio
->page
))
1818 if (IS_ATOMIC_WRITTEN_PAGE(fio
->page
))
1820 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1821 f2fs_is_checkpointed_data(sbi
, fio
->old_blkaddr
)))
1827 static inline bool need_inplace_update(struct f2fs_io_info
*fio
)
1829 struct inode
*inode
= fio
->page
->mapping
->host
;
1831 if (f2fs_should_update_outplace(inode
, fio
))
1834 return f2fs_should_update_inplace(inode
, fio
);
1837 int f2fs_do_write_data_page(struct f2fs_io_info
*fio
)
1839 struct page
*page
= fio
->page
;
1840 struct inode
*inode
= page
->mapping
->host
;
1841 struct dnode_of_data dn
;
1842 struct extent_info ei
= {0,0,0};
1843 struct node_info ni
;
1844 bool ipu_force
= false;
1847 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1848 if (need_inplace_update(fio
) &&
1849 f2fs_lookup_extent_cache(inode
, page
->index
, &ei
)) {
1850 fio
->old_blkaddr
= ei
.blk
+ page
->index
- ei
.fofs
;
1852 if (!f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
1853 DATA_GENERIC_ENHANCE
))
1857 fio
->need_lock
= LOCK_DONE
;
1861 /* Deadlock due to between page->lock and f2fs_lock_op */
1862 if (fio
->need_lock
== LOCK_REQ
&& !f2fs_trylock_op(fio
->sbi
))
1865 err
= f2fs_get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1869 fio
->old_blkaddr
= dn
.data_blkaddr
;
1871 /* This page is already truncated */
1872 if (fio
->old_blkaddr
== NULL_ADDR
) {
1873 ClearPageUptodate(page
);
1874 clear_cold_data(page
);
1878 if (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
1879 !f2fs_is_valid_blkaddr(fio
->sbi
, fio
->old_blkaddr
,
1880 DATA_GENERIC_ENHANCE
)) {
1885 * If current allocation needs SSR,
1886 * it had better in-place writes for updated data.
1889 (__is_valid_data_blkaddr(fio
->old_blkaddr
) &&
1890 need_inplace_update(fio
))) {
1891 err
= encrypt_one_page(fio
);
1895 set_page_writeback(page
);
1896 ClearPageError(page
);
1897 f2fs_put_dnode(&dn
);
1898 if (fio
->need_lock
== LOCK_REQ
)
1899 f2fs_unlock_op(fio
->sbi
);
1900 err
= f2fs_inplace_write_data(fio
);
1902 if (f2fs_encrypted_file(inode
))
1903 fscrypt_pullback_bio_page(&fio
->encrypted_page
,
1905 if (PageWriteback(page
))
1906 end_page_writeback(page
);
1908 set_inode_flag(inode
, FI_UPDATE_WRITE
);
1910 trace_f2fs_do_write_data_page(fio
->page
, IPU
);
1914 if (fio
->need_lock
== LOCK_RETRY
) {
1915 if (!f2fs_trylock_op(fio
->sbi
)) {
1919 fio
->need_lock
= LOCK_REQ
;
1922 err
= f2fs_get_node_info(fio
->sbi
, dn
.nid
, &ni
);
1926 fio
->version
= ni
.version
;
1928 err
= encrypt_one_page(fio
);
1932 set_page_writeback(page
);
1933 ClearPageError(page
);
1935 /* LFS mode write path */
1936 f2fs_outplace_write_data(&dn
, fio
);
1937 trace_f2fs_do_write_data_page(page
, OPU
);
1938 set_inode_flag(inode
, FI_APPEND_WRITE
);
1939 if (page
->index
== 0)
1940 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
1942 f2fs_put_dnode(&dn
);
1944 if (fio
->need_lock
== LOCK_REQ
)
1945 f2fs_unlock_op(fio
->sbi
);
1949 static int __write_data_page(struct page
*page
, bool *submitted
,
1950 struct writeback_control
*wbc
,
1951 enum iostat_type io_type
)
1953 struct inode
*inode
= page
->mapping
->host
;
1954 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1955 loff_t i_size
= i_size_read(inode
);
1956 const pgoff_t end_index
= ((unsigned long long) i_size
)
1958 loff_t psize
= (page
->index
+ 1) << PAGE_SHIFT
;
1959 unsigned offset
= 0;
1960 bool need_balance_fs
= false;
1962 struct f2fs_io_info fio
= {
1964 .ino
= inode
->i_ino
,
1967 .op_flags
= wbc_to_write_flags(wbc
),
1968 .old_blkaddr
= NULL_ADDR
,
1970 .encrypted_page
= NULL
,
1972 .need_lock
= LOCK_RETRY
,
1977 trace_f2fs_writepage(page
, DATA
);
1979 /* we should bypass data pages to proceed the kworkder jobs */
1980 if (unlikely(f2fs_cp_error(sbi
))) {
1981 mapping_set_error(page
->mapping
, -EIO
);
1983 * don't drop any dirty dentry pages for keeping lastest
1984 * directory structure.
1986 if (S_ISDIR(inode
->i_mode
))
1991 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1994 if (page
->index
< end_index
)
1998 * If the offset is out-of-range of file size,
1999 * this page does not have to be written to disk.
2001 offset
= i_size
& (PAGE_SIZE
- 1);
2002 if ((page
->index
>= end_index
+ 1) || !offset
)
2005 zero_user_segment(page
, offset
, PAGE_SIZE
);
2007 if (f2fs_is_drop_cache(inode
))
2009 /* we should not write 0'th page having journal header */
2010 if (f2fs_is_volatile_file(inode
) && (!page
->index
||
2011 (!wbc
->for_reclaim
&&
2012 f2fs_available_free_memory(sbi
, BASE_CHECK
))))
2015 /* Dentry blocks are controlled by checkpoint */
2016 if (S_ISDIR(inode
->i_mode
)) {
2017 fio
.need_lock
= LOCK_DONE
;
2018 err
= f2fs_do_write_data_page(&fio
);
2022 if (!wbc
->for_reclaim
)
2023 need_balance_fs
= true;
2024 else if (has_not_enough_free_secs(sbi
, 0, 0))
2027 set_inode_flag(inode
, FI_HOT_DATA
);
2030 if (f2fs_has_inline_data(inode
)) {
2031 err
= f2fs_write_inline_data(inode
, page
);
2036 if (err
== -EAGAIN
) {
2037 err
= f2fs_do_write_data_page(&fio
);
2038 if (err
== -EAGAIN
) {
2039 fio
.need_lock
= LOCK_REQ
;
2040 err
= f2fs_do_write_data_page(&fio
);
2045 file_set_keep_isize(inode
);
2047 down_write(&F2FS_I(inode
)->i_sem
);
2048 if (F2FS_I(inode
)->last_disk_size
< psize
)
2049 F2FS_I(inode
)->last_disk_size
= psize
;
2050 up_write(&F2FS_I(inode
)->i_sem
);
2054 if (err
&& err
!= -ENOENT
)
2058 inode_dec_dirty_pages(inode
);
2060 ClearPageUptodate(page
);
2061 clear_cold_data(page
);
2064 if (wbc
->for_reclaim
) {
2065 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, DATA
);
2066 clear_inode_flag(inode
, FI_HOT_DATA
);
2067 f2fs_remove_dirty_inode(inode
);
2072 if (!S_ISDIR(inode
->i_mode
) && !IS_NOQUOTA(inode
) &&
2073 !F2FS_I(inode
)->cp_task
)
2074 f2fs_balance_fs(sbi
, need_balance_fs
);
2076 if (unlikely(f2fs_cp_error(sbi
))) {
2077 f2fs_submit_merged_write(sbi
, DATA
);
2082 *submitted
= fio
.submitted
;
2087 redirty_page_for_writepage(wbc
, page
);
2089 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2090 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2091 * file_write_and_wait_range() will see EIO error, which is critical
2092 * to return value of fsync() followed by atomic_write failure to user.
2094 if (!err
|| wbc
->for_reclaim
)
2095 return AOP_WRITEPAGE_ACTIVATE
;
2100 static int f2fs_write_data_page(struct page
*page
,
2101 struct writeback_control
*wbc
)
2103 return __write_data_page(page
, NULL
, wbc
, FS_DATA_IO
);
2107 * This function was copied from write_cche_pages from mm/page-writeback.c.
2108 * The major change is making write step of cold data page separately from
2109 * warm/hot data page.
2111 static int f2fs_write_cache_pages(struct address_space
*mapping
,
2112 struct writeback_control
*wbc
,
2113 enum iostat_type io_type
)
2117 struct pagevec pvec
;
2118 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
2120 pgoff_t
uninitialized_var(writeback_index
);
2122 pgoff_t end
; /* Inclusive */
2125 int range_whole
= 0;
2129 pagevec_init(&pvec
);
2131 if (get_dirty_pages(mapping
->host
) <=
2132 SM_I(F2FS_M_SB(mapping
))->min_hot_blocks
)
2133 set_inode_flag(mapping
->host
, FI_HOT_DATA
);
2135 clear_inode_flag(mapping
->host
, FI_HOT_DATA
);
2137 if (wbc
->range_cyclic
) {
2138 writeback_index
= mapping
->writeback_index
; /* prev offset */
2139 index
= writeback_index
;
2146 index
= wbc
->range_start
>> PAGE_SHIFT
;
2147 end
= wbc
->range_end
>> PAGE_SHIFT
;
2148 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
2150 cycled
= 1; /* ignore range_cyclic tests */
2152 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2153 tag
= PAGECACHE_TAG_TOWRITE
;
2155 tag
= PAGECACHE_TAG_DIRTY
;
2157 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
2158 tag_pages_for_writeback(mapping
, index
, end
);
2160 while (!done
&& (index
<= end
)) {
2163 nr_pages
= pagevec_lookup_range_tag(&pvec
, mapping
, &index
, end
,
2168 for (i
= 0; i
< nr_pages
; i
++) {
2169 struct page
*page
= pvec
.pages
[i
];
2170 bool submitted
= false;
2172 /* give a priority to WB_SYNC threads */
2173 if (atomic_read(&sbi
->wb_sync_req
[DATA
]) &&
2174 wbc
->sync_mode
== WB_SYNC_NONE
) {
2179 done_index
= page
->index
;
2183 if (unlikely(page
->mapping
!= mapping
)) {
2189 if (!PageDirty(page
)) {
2190 /* someone wrote it for us */
2191 goto continue_unlock
;
2194 if (PageWriteback(page
)) {
2195 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
2196 f2fs_wait_on_page_writeback(page
,
2199 goto continue_unlock
;
2202 if (!clear_page_dirty_for_io(page
))
2203 goto continue_unlock
;
2205 ret
= __write_data_page(page
, &submitted
, wbc
, io_type
);
2206 if (unlikely(ret
)) {
2208 * keep nr_to_write, since vfs uses this to
2209 * get # of written pages.
2211 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
2215 } else if (ret
== -EAGAIN
) {
2217 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
2219 congestion_wait(BLK_RW_ASYNC
,
2225 done_index
= page
->index
+ 1;
2228 } else if (submitted
) {
2232 if (--wbc
->nr_to_write
<= 0 &&
2233 wbc
->sync_mode
== WB_SYNC_NONE
) {
2238 pagevec_release(&pvec
);
2242 if (!cycled
&& !done
) {
2245 end
= writeback_index
- 1;
2248 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
2249 mapping
->writeback_index
= done_index
;
2252 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping
), mapping
->host
,
2258 static inline bool __should_serialize_io(struct inode
*inode
,
2259 struct writeback_control
*wbc
)
2261 if (!S_ISREG(inode
->i_mode
))
2263 if (IS_NOQUOTA(inode
))
2265 if (wbc
->sync_mode
!= WB_SYNC_ALL
)
2267 if (get_dirty_pages(inode
) >= SM_I(F2FS_I_SB(inode
))->min_seq_blocks
)
2272 static int __f2fs_write_data_pages(struct address_space
*mapping
,
2273 struct writeback_control
*wbc
,
2274 enum iostat_type io_type
)
2276 struct inode
*inode
= mapping
->host
;
2277 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2278 struct blk_plug plug
;
2280 bool locked
= false;
2282 /* deal with chardevs and other special file */
2283 if (!mapping
->a_ops
->writepage
)
2286 /* skip writing if there is no dirty page in this inode */
2287 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
2290 /* during POR, we don't need to trigger writepage at all. */
2291 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2294 if ((S_ISDIR(inode
->i_mode
) || IS_NOQUOTA(inode
)) &&
2295 wbc
->sync_mode
== WB_SYNC_NONE
&&
2296 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
2297 f2fs_available_free_memory(sbi
, DIRTY_DENTS
))
2300 /* skip writing during file defragment */
2301 if (is_inode_flag_set(inode
, FI_DO_DEFRAG
))
2304 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
2306 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
2307 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2308 atomic_inc(&sbi
->wb_sync_req
[DATA
]);
2309 else if (atomic_read(&sbi
->wb_sync_req
[DATA
]))
2312 if (__should_serialize_io(inode
, wbc
)) {
2313 mutex_lock(&sbi
->writepages
);
2317 blk_start_plug(&plug
);
2318 ret
= f2fs_write_cache_pages(mapping
, wbc
, io_type
);
2319 blk_finish_plug(&plug
);
2322 mutex_unlock(&sbi
->writepages
);
2324 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2325 atomic_dec(&sbi
->wb_sync_req
[DATA
]);
2327 * if some pages were truncated, we cannot guarantee its mapping->host
2328 * to detect pending bios.
2331 f2fs_remove_dirty_inode(inode
);
2335 wbc
->pages_skipped
+= get_dirty_pages(inode
);
2336 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
2340 static int f2fs_write_data_pages(struct address_space
*mapping
,
2341 struct writeback_control
*wbc
)
2343 struct inode
*inode
= mapping
->host
;
2345 return __f2fs_write_data_pages(mapping
, wbc
,
2346 F2FS_I(inode
)->cp_task
== current
?
2347 FS_CP_DATA_IO
: FS_DATA_IO
);
2350 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
2352 struct inode
*inode
= mapping
->host
;
2353 loff_t i_size
= i_size_read(inode
);
2356 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2357 down_write(&F2FS_I(inode
)->i_mmap_sem
);
2359 truncate_pagecache(inode
, i_size
);
2360 if (!IS_NOQUOTA(inode
))
2361 f2fs_truncate_blocks(inode
, i_size
, true);
2363 up_write(&F2FS_I(inode
)->i_mmap_sem
);
2364 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2368 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
2369 struct page
*page
, loff_t pos
, unsigned len
,
2370 block_t
*blk_addr
, bool *node_changed
)
2372 struct inode
*inode
= page
->mapping
->host
;
2373 pgoff_t index
= page
->index
;
2374 struct dnode_of_data dn
;
2376 bool locked
= false;
2377 struct extent_info ei
= {0,0,0};
2382 * we already allocated all the blocks, so we don't need to get
2383 * the block addresses when there is no need to fill the page.
2385 if (!f2fs_has_inline_data(inode
) && len
== PAGE_SIZE
&&
2386 !is_inode_flag_set(inode
, FI_NO_PREALLOC
))
2389 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
2390 if (f2fs_has_inline_data(inode
) && pos
+ len
> MAX_INLINE_DATA(inode
))
2391 flag
= F2FS_GET_BLOCK_DEFAULT
;
2393 flag
= F2FS_GET_BLOCK_PRE_AIO
;
2395 if (f2fs_has_inline_data(inode
) ||
2396 (pos
& PAGE_MASK
) >= i_size_read(inode
)) {
2397 __do_map_lock(sbi
, flag
, true);
2401 /* check inline_data */
2402 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
2403 if (IS_ERR(ipage
)) {
2404 err
= PTR_ERR(ipage
);
2408 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
2410 if (f2fs_has_inline_data(inode
)) {
2411 if (pos
+ len
<= MAX_INLINE_DATA(inode
)) {
2412 f2fs_do_read_inline_data(page
, ipage
);
2413 set_inode_flag(inode
, FI_DATA_EXIST
);
2415 set_inline_node(ipage
);
2417 err
= f2fs_convert_inline_page(&dn
, page
);
2420 if (dn
.data_blkaddr
== NULL_ADDR
)
2421 err
= f2fs_get_block(&dn
, index
);
2423 } else if (locked
) {
2424 err
= f2fs_get_block(&dn
, index
);
2426 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
2427 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
2430 err
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
2431 if (err
|| dn
.data_blkaddr
== NULL_ADDR
) {
2432 f2fs_put_dnode(&dn
);
2433 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
,
2435 WARN_ON(flag
!= F2FS_GET_BLOCK_PRE_AIO
);
2442 /* convert_inline_page can make node_changed */
2443 *blk_addr
= dn
.data_blkaddr
;
2444 *node_changed
= dn
.node_changed
;
2446 f2fs_put_dnode(&dn
);
2449 __do_map_lock(sbi
, flag
, false);
2453 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
2454 loff_t pos
, unsigned len
, unsigned flags
,
2455 struct page
**pagep
, void **fsdata
)
2457 struct inode
*inode
= mapping
->host
;
2458 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2459 struct page
*page
= NULL
;
2460 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_SHIFT
;
2461 bool need_balance
= false, drop_atomic
= false;
2462 block_t blkaddr
= NULL_ADDR
;
2465 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
2467 err
= f2fs_is_checkpoint_ready(sbi
);
2471 if ((f2fs_is_atomic_file(inode
) &&
2472 !f2fs_available_free_memory(sbi
, INMEM_PAGES
)) ||
2473 is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2480 * We should check this at this moment to avoid deadlock on inode page
2481 * and #0 page. The locking rule for inline_data conversion should be:
2482 * lock_page(page #0) -> lock_page(inode_page)
2485 err
= f2fs_convert_inline_inode(inode
);
2491 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
2492 * wait_for_stable_page. Will wait that below with our IO control.
2494 page
= f2fs_pagecache_get_page(mapping
, index
,
2495 FGP_LOCK
| FGP_WRITE
| FGP_CREAT
, GFP_NOFS
);
2503 err
= prepare_write_begin(sbi
, page
, pos
, len
,
2504 &blkaddr
, &need_balance
);
2508 if (need_balance
&& !IS_NOQUOTA(inode
) &&
2509 has_not_enough_free_secs(sbi
, 0, 0)) {
2511 f2fs_balance_fs(sbi
, true);
2513 if (page
->mapping
!= mapping
) {
2514 /* The page got truncated from under us */
2515 f2fs_put_page(page
, 1);
2520 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
2522 if (len
== PAGE_SIZE
|| PageUptodate(page
))
2525 if (!(pos
& (PAGE_SIZE
- 1)) && (pos
+ len
) >= i_size_read(inode
)) {
2526 zero_user_segment(page
, len
, PAGE_SIZE
);
2530 if (blkaddr
== NEW_ADDR
) {
2531 zero_user_segment(page
, 0, PAGE_SIZE
);
2532 SetPageUptodate(page
);
2534 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
2535 DATA_GENERIC_ENHANCE_READ
)) {
2539 err
= f2fs_submit_page_read(inode
, page
, blkaddr
);
2544 if (unlikely(page
->mapping
!= mapping
)) {
2545 f2fs_put_page(page
, 1);
2548 if (unlikely(!PageUptodate(page
))) {
2556 f2fs_put_page(page
, 1);
2557 f2fs_write_failed(mapping
, pos
+ len
);
2559 f2fs_drop_inmem_pages_all(sbi
, false);
2563 static int f2fs_write_end(struct file
*file
,
2564 struct address_space
*mapping
,
2565 loff_t pos
, unsigned len
, unsigned copied
,
2566 struct page
*page
, void *fsdata
)
2568 struct inode
*inode
= page
->mapping
->host
;
2570 trace_f2fs_write_end(inode
, pos
, len
, copied
);
2573 * This should be come from len == PAGE_SIZE, and we expect copied
2574 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
2575 * let generic_perform_write() try to copy data again through copied=0.
2577 if (!PageUptodate(page
)) {
2578 if (unlikely(copied
!= len
))
2581 SetPageUptodate(page
);
2586 set_page_dirty(page
);
2588 if (pos
+ copied
> i_size_read(inode
))
2589 f2fs_i_size_write(inode
, pos
+ copied
);
2591 f2fs_put_page(page
, 1);
2592 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2596 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
2599 unsigned i_blkbits
= READ_ONCE(inode
->i_blkbits
);
2600 unsigned blkbits
= i_blkbits
;
2601 unsigned blocksize_mask
= (1 << blkbits
) - 1;
2602 unsigned long align
= offset
| iov_iter_alignment(iter
);
2603 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
2605 if (align
& blocksize_mask
) {
2607 blkbits
= blksize_bits(bdev_logical_block_size(bdev
));
2608 blocksize_mask
= (1 << blkbits
) - 1;
2609 if (align
& blocksize_mask
)
2616 static void f2fs_dio_end_io(struct bio
*bio
)
2618 struct f2fs_private_dio
*dio
= bio
->bi_private
;
2620 dec_page_count(F2FS_I_SB(dio
->inode
),
2621 dio
->write
? F2FS_DIO_WRITE
: F2FS_DIO_READ
);
2623 bio
->bi_private
= dio
->orig_private
;
2624 bio
->bi_end_io
= dio
->orig_end_io
;
2631 static void f2fs_dio_submit_bio(struct bio
*bio
, struct inode
*inode
,
2634 struct f2fs_private_dio
*dio
;
2635 bool write
= (bio_op(bio
) == REQ_OP_WRITE
);
2637 dio
= f2fs_kzalloc(F2FS_I_SB(inode
),
2638 sizeof(struct f2fs_private_dio
), GFP_NOFS
);
2643 dio
->orig_end_io
= bio
->bi_end_io
;
2644 dio
->orig_private
= bio
->bi_private
;
2647 bio
->bi_end_io
= f2fs_dio_end_io
;
2648 bio
->bi_private
= dio
;
2650 inc_page_count(F2FS_I_SB(inode
),
2651 write
? F2FS_DIO_WRITE
: F2FS_DIO_READ
);
2656 bio
->bi_status
= BLK_STS_IOERR
;
2660 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
2662 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
2663 struct inode
*inode
= mapping
->host
;
2664 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2665 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2666 size_t count
= iov_iter_count(iter
);
2667 loff_t offset
= iocb
->ki_pos
;
2668 int rw
= iov_iter_rw(iter
);
2670 enum rw_hint hint
= iocb
->ki_hint
;
2671 int whint_mode
= F2FS_OPTION(sbi
).whint_mode
;
2674 err
= check_direct_IO(inode
, iter
, offset
);
2676 return err
< 0 ? err
: 0;
2678 if (f2fs_force_buffered_io(inode
, iocb
, iter
))
2681 do_opu
= allow_outplace_dio(inode
, iocb
, iter
);
2683 trace_f2fs_direct_IO_enter(inode
, offset
, count
, rw
);
2685 if (rw
== WRITE
&& whint_mode
== WHINT_MODE_OFF
)
2686 iocb
->ki_hint
= WRITE_LIFE_NOT_SET
;
2688 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
2689 if (!down_read_trylock(&fi
->i_gc_rwsem
[rw
])) {
2690 iocb
->ki_hint
= hint
;
2694 if (do_opu
&& !down_read_trylock(&fi
->i_gc_rwsem
[READ
])) {
2695 up_read(&fi
->i_gc_rwsem
[rw
]);
2696 iocb
->ki_hint
= hint
;
2701 down_read(&fi
->i_gc_rwsem
[rw
]);
2703 down_read(&fi
->i_gc_rwsem
[READ
]);
2706 err
= __blockdev_direct_IO(iocb
, inode
, inode
->i_sb
->s_bdev
,
2707 iter
, rw
== WRITE
? get_data_block_dio_write
:
2708 get_data_block_dio
, NULL
, f2fs_dio_submit_bio
,
2709 DIO_LOCKING
| DIO_SKIP_HOLES
);
2712 up_read(&fi
->i_gc_rwsem
[READ
]);
2714 up_read(&fi
->i_gc_rwsem
[rw
]);
2717 if (whint_mode
== WHINT_MODE_OFF
)
2718 iocb
->ki_hint
= hint
;
2720 f2fs_update_iostat(F2FS_I_SB(inode
), APP_DIRECT_IO
,
2723 set_inode_flag(inode
, FI_UPDATE_WRITE
);
2724 } else if (err
< 0) {
2725 f2fs_write_failed(mapping
, offset
+ count
);
2730 trace_f2fs_direct_IO_exit(inode
, offset
, count
, rw
, err
);
2735 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
2736 unsigned int length
)
2738 struct inode
*inode
= page
->mapping
->host
;
2739 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2741 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
2742 (offset
% PAGE_SIZE
|| length
!= PAGE_SIZE
))
2745 if (PageDirty(page
)) {
2746 if (inode
->i_ino
== F2FS_META_INO(sbi
)) {
2747 dec_page_count(sbi
, F2FS_DIRTY_META
);
2748 } else if (inode
->i_ino
== F2FS_NODE_INO(sbi
)) {
2749 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
2751 inode_dec_dirty_pages(inode
);
2752 f2fs_remove_dirty_inode(inode
);
2756 clear_cold_data(page
);
2758 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2759 return f2fs_drop_inmem_page(inode
, page
);
2761 f2fs_clear_page_private(page
);
2764 int f2fs_release_page(struct page
*page
, gfp_t wait
)
2766 /* If this is dirty page, keep PagePrivate */
2767 if (PageDirty(page
))
2770 /* This is atomic written page, keep Private */
2771 if (IS_ATOMIC_WRITTEN_PAGE(page
))
2774 clear_cold_data(page
);
2775 f2fs_clear_page_private(page
);
2779 static int f2fs_set_data_page_dirty(struct page
*page
)
2781 struct address_space
*mapping
= page
->mapping
;
2782 struct inode
*inode
= mapping
->host
;
2784 trace_f2fs_set_page_dirty(page
, DATA
);
2786 if (!PageUptodate(page
))
2787 SetPageUptodate(page
);
2789 if (f2fs_is_atomic_file(inode
) && !f2fs_is_commit_atomic_write(inode
)) {
2790 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
2791 f2fs_register_inmem_page(inode
, page
);
2795 * Previously, this page has been registered, we just
2801 if (!PageDirty(page
)) {
2802 __set_page_dirty_nobuffers(page
);
2803 f2fs_update_dirty_page(inode
, page
);
2809 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
2811 struct inode
*inode
= mapping
->host
;
2813 if (f2fs_has_inline_data(inode
))
2816 /* make sure allocating whole blocks */
2817 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
2818 filemap_write_and_wait(mapping
);
2820 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
2823 #ifdef CONFIG_MIGRATION
2824 #include <linux/migrate.h>
2826 int f2fs_migrate_page(struct address_space
*mapping
,
2827 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
2829 int rc
, extra_count
;
2830 struct f2fs_inode_info
*fi
= F2FS_I(mapping
->host
);
2831 bool atomic_written
= IS_ATOMIC_WRITTEN_PAGE(page
);
2833 BUG_ON(PageWriteback(page
));
2835 /* migrating an atomic written page is safe with the inmem_lock hold */
2836 if (atomic_written
) {
2837 if (mode
!= MIGRATE_SYNC
)
2839 if (!mutex_trylock(&fi
->inmem_lock
))
2843 /* one extra reference was held for atomic_write page */
2844 extra_count
= atomic_written
? 1 : 0;
2845 rc
= migrate_page_move_mapping(mapping
, newpage
,
2846 page
, mode
, extra_count
);
2847 if (rc
!= MIGRATEPAGE_SUCCESS
) {
2849 mutex_unlock(&fi
->inmem_lock
);
2853 if (atomic_written
) {
2854 struct inmem_pages
*cur
;
2855 list_for_each_entry(cur
, &fi
->inmem_pages
, list
)
2856 if (cur
->page
== page
) {
2857 cur
->page
= newpage
;
2860 mutex_unlock(&fi
->inmem_lock
);
2865 if (PagePrivate(page
)) {
2866 f2fs_set_page_private(newpage
, page_private(page
));
2867 f2fs_clear_page_private(page
);
2870 if (mode
!= MIGRATE_SYNC_NO_COPY
)
2871 migrate_page_copy(newpage
, page
);
2873 migrate_page_states(newpage
, page
);
2875 return MIGRATEPAGE_SUCCESS
;
2879 const struct address_space_operations f2fs_dblock_aops
= {
2880 .readpage
= f2fs_read_data_page
,
2881 .readpages
= f2fs_read_data_pages
,
2882 .writepage
= f2fs_write_data_page
,
2883 .writepages
= f2fs_write_data_pages
,
2884 .write_begin
= f2fs_write_begin
,
2885 .write_end
= f2fs_write_end
,
2886 .set_page_dirty
= f2fs_set_data_page_dirty
,
2887 .invalidatepage
= f2fs_invalidate_page
,
2888 .releasepage
= f2fs_release_page
,
2889 .direct_IO
= f2fs_direct_IO
,
2891 #ifdef CONFIG_MIGRATION
2892 .migratepage
= f2fs_migrate_page
,
2896 void f2fs_clear_page_cache_dirty_tag(struct page
*page
)
2898 struct address_space
*mapping
= page_mapping(page
);
2899 unsigned long flags
;
2901 xa_lock_irqsave(&mapping
->i_pages
, flags
);
2902 __xa_clear_mark(&mapping
->i_pages
, page_index(page
),
2903 PAGECACHE_TAG_DIRTY
);
2904 xa_unlock_irqrestore(&mapping
->i_pages
, flags
);
2907 int __init
f2fs_init_post_read_processing(void)
2909 bio_post_read_ctx_cache
= KMEM_CACHE(bio_post_read_ctx
, 0);
2910 if (!bio_post_read_ctx_cache
)
2912 bio_post_read_ctx_pool
=
2913 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS
,
2914 bio_post_read_ctx_cache
);
2915 if (!bio_post_read_ctx_pool
)
2916 goto fail_free_cache
;
2920 kmem_cache_destroy(bio_post_read_ctx_cache
);
2925 void __exit
f2fs_destroy_post_read_processing(void)
2927 mempool_destroy(bio_post_read_ctx_pool
);
2928 kmem_cache_destroy(bio_post_read_ctx_cache
);