4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uuid.h>
24 #include <linux/file.h>
33 #include <trace/events/f2fs.h>
35 static int f2fs_vm_page_mkwrite(struct vm_area_struct
*vma
,
38 struct page
*page
= vmf
->page
;
39 struct inode
*inode
= file_inode(vma
->vm_file
);
40 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
41 struct dnode_of_data dn
;
44 sb_start_pagefault(inode
->i_sb
);
46 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
48 /* block allocation */
50 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
51 err
= f2fs_reserve_block(&dn
, page
->index
);
59 f2fs_balance_fs(sbi
, dn
.node_changed
);
61 file_update_time(vma
->vm_file
);
63 if (unlikely(page
->mapping
!= inode
->i_mapping
||
64 page_offset(page
) > i_size_read(inode
) ||
65 !PageUptodate(page
))) {
72 * check to see if the page is mapped already (no holes)
74 if (PageMappedToDisk(page
))
77 /* page is wholly or partially inside EOF */
78 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
81 offset
= i_size_read(inode
) & ~PAGE_MASK
;
82 zero_user_segment(page
, offset
, PAGE_SIZE
);
85 if (!PageUptodate(page
))
86 SetPageUptodate(page
);
88 trace_f2fs_vm_page_mkwrite(page
, DATA
);
91 f2fs_wait_on_page_writeback(page
, DATA
, false);
93 /* wait for GCed encrypted page writeback */
94 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
95 f2fs_wait_on_encrypted_page_writeback(sbi
, dn
.data_blkaddr
);
97 /* if gced page is attached, don't write to cold segment */
98 clear_cold_data(page
);
100 sb_end_pagefault(inode
->i_sb
);
101 f2fs_update_time(sbi
, REQ_TIME
);
102 return block_page_mkwrite_return(err
);
105 static const struct vm_operations_struct f2fs_file_vm_ops
= {
106 .fault
= filemap_fault
,
107 .map_pages
= filemap_map_pages
,
108 .page_mkwrite
= f2fs_vm_page_mkwrite
,
111 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
113 struct dentry
*dentry
;
115 inode
= igrab(inode
);
116 dentry
= d_find_any_alias(inode
);
121 if (update_dent_inode(inode
, inode
, &dentry
->d_name
)) {
126 *pino
= parent_ino(dentry
);
131 static inline bool need_do_checkpoint(struct inode
*inode
)
133 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
134 bool need_cp
= false;
136 if (!S_ISREG(inode
->i_mode
) || inode
->i_nlink
!= 1)
138 else if (file_enc_name(inode
) && need_dentry_mark(sbi
, inode
->i_ino
))
140 else if (file_wrong_pino(inode
))
142 else if (!space_for_roll_forward(sbi
))
144 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
146 else if (F2FS_I(inode
)->xattr_ver
== cur_cp_version(F2FS_CKPT(sbi
)))
148 else if (test_opt(sbi
, FASTBOOT
))
150 else if (sbi
->active_logs
== 2)
156 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
158 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
160 /* But we need to avoid that there are some inode updates */
161 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
167 static void try_to_fix_pino(struct inode
*inode
)
169 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
172 down_write(&fi
->i_sem
);
174 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
175 get_parent_ino(inode
, &pino
)) {
176 f2fs_i_pino_write(inode
, pino
);
177 file_got_pino(inode
);
179 up_write(&fi
->i_sem
);
182 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
183 int datasync
, bool atomic
)
185 struct inode
*inode
= file
->f_mapping
->host
;
186 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
187 nid_t ino
= inode
->i_ino
;
189 bool need_cp
= false;
190 struct writeback_control wbc
= {
191 .sync_mode
= WB_SYNC_ALL
,
192 .nr_to_write
= LONG_MAX
,
196 if (unlikely(f2fs_readonly(inode
->i_sb
)))
199 trace_f2fs_sync_file_enter(inode
);
201 /* if fdatasync is triggered, let's do in-place-update */
202 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
203 set_inode_flag(inode
, FI_NEED_IPU
);
204 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
205 clear_inode_flag(inode
, FI_NEED_IPU
);
208 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
212 /* if the inode is dirty, let's recover all the time */
213 if (!datasync
&& !f2fs_skip_inode_update(inode
)) {
214 f2fs_write_inode(inode
, NULL
);
219 * if there is no written data, don't waste time to write recovery info.
221 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
222 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
224 /* it may call write_inode just prior to fsync */
225 if (need_inode_page_update(sbi
, ino
))
228 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
229 exist_written_data(sbi
, ino
, UPDATE_INO
))
235 * Both of fdatasync() and fsync() are able to be recovered from
238 down_read(&F2FS_I(inode
)->i_sem
);
239 need_cp
= need_do_checkpoint(inode
);
240 up_read(&F2FS_I(inode
)->i_sem
);
243 /* all the dirty node pages should be flushed for POR */
244 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
247 * We've secured consistency through sync_fs. Following pino
248 * will be used only for fsynced inodes after checkpoint.
250 try_to_fix_pino(inode
);
251 clear_inode_flag(inode
, FI_APPEND_WRITE
);
252 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
256 ret
= fsync_node_pages(sbi
, inode
, &wbc
, atomic
);
260 /* if cp_error was enabled, we should avoid infinite loop */
261 if (unlikely(f2fs_cp_error(sbi
))) {
266 if (need_inode_block_update(sbi
, ino
)) {
267 f2fs_mark_inode_dirty_sync(inode
);
268 f2fs_write_inode(inode
, NULL
);
272 ret
= wait_on_node_pages_writeback(sbi
, ino
);
276 /* once recovery info is written, don't need to tack this */
277 remove_ino_entry(sbi
, ino
, APPEND_INO
);
278 clear_inode_flag(inode
, FI_APPEND_WRITE
);
280 remove_ino_entry(sbi
, ino
, UPDATE_INO
);
281 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
282 ret
= f2fs_issue_flush(sbi
);
283 f2fs_update_time(sbi
, REQ_TIME
);
285 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
286 f2fs_trace_ios(NULL
, 1);
290 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
292 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
295 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
296 pgoff_t pgofs
, int whence
)
301 if (whence
!= SEEK_DATA
)
304 /* find first dirty page index */
305 pagevec_init(&pvec
, 0);
306 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
307 PAGECACHE_TAG_DIRTY
, 1);
308 pgofs
= nr_pages
? pvec
.pages
[0]->index
: ULONG_MAX
;
309 pagevec_release(&pvec
);
313 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
318 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
319 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
323 if (blkaddr
== NULL_ADDR
)
330 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
332 struct inode
*inode
= file
->f_mapping
->host
;
333 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
334 struct dnode_of_data dn
;
335 pgoff_t pgofs
, end_offset
, dirty
;
336 loff_t data_ofs
= offset
;
342 isize
= i_size_read(inode
);
346 /* handle inline data case */
347 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
348 if (whence
== SEEK_HOLE
)
353 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
355 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
357 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
358 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
359 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
360 if (err
&& err
!= -ENOENT
) {
362 } else if (err
== -ENOENT
) {
363 /* direct node does not exists */
364 if (whence
== SEEK_DATA
) {
365 pgofs
= get_next_page_offset(&dn
, pgofs
);
372 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
374 /* find data/hole in dnode block */
375 for (; dn
.ofs_in_node
< end_offset
;
376 dn
.ofs_in_node
++, pgofs
++,
377 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
379 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
381 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
389 if (whence
== SEEK_DATA
)
392 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
395 return vfs_setpos(file
, data_ofs
, maxbytes
);
401 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
403 struct inode
*inode
= file
->f_mapping
->host
;
404 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
410 return generic_file_llseek_size(file
, offset
, whence
,
411 maxbytes
, i_size_read(inode
));
416 return f2fs_seek_block(file
, offset
, whence
);
422 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
424 struct inode
*inode
= file_inode(file
);
427 if (f2fs_encrypted_inode(inode
)) {
428 err
= fscrypt_get_encryption_info(inode
);
431 if (!f2fs_encrypted_inode(inode
))
435 /* we don't need to use inline_data strictly */
436 err
= f2fs_convert_inline_inode(inode
);
441 vma
->vm_ops
= &f2fs_file_vm_ops
;
445 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
447 int ret
= generic_file_open(inode
, filp
);
450 if (!ret
&& f2fs_encrypted_inode(inode
)) {
451 ret
= fscrypt_get_encryption_info(inode
);
454 if (!fscrypt_has_encryption_key(inode
))
457 dir
= dget_parent(file_dentry(filp
));
458 if (f2fs_encrypted_inode(d_inode(dir
)) &&
459 !fscrypt_has_permitted_context(d_inode(dir
), inode
)) {
467 int truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
469 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
470 struct f2fs_node
*raw_node
;
471 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
474 raw_node
= F2FS_NODE(dn
->node_page
);
475 addr
= blkaddr_in_node(raw_node
) + ofs
;
477 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
478 block_t blkaddr
= le32_to_cpu(*addr
);
479 if (blkaddr
== NULL_ADDR
)
482 dn
->data_blkaddr
= NULL_ADDR
;
483 set_data_blkaddr(dn
);
484 invalidate_blocks(sbi
, blkaddr
);
485 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
486 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
493 * once we invalidate valid blkaddr in range [ofs, ofs + count],
494 * we will invalidate all blkaddr in the whole range.
496 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
498 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
499 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
501 dn
->ofs_in_node
= ofs
;
503 f2fs_update_time(sbi
, REQ_TIME
);
504 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
505 dn
->ofs_in_node
, nr_free
);
509 void truncate_data_blocks(struct dnode_of_data
*dn
)
511 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
514 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
517 unsigned offset
= from
& (PAGE_SIZE
- 1);
518 pgoff_t index
= from
>> PAGE_SHIFT
;
519 struct address_space
*mapping
= inode
->i_mapping
;
522 if (!offset
&& !cache_only
)
526 page
= f2fs_grab_cache_page(mapping
, index
, false);
527 if (page
&& PageUptodate(page
))
529 f2fs_put_page(page
, 1);
533 page
= get_lock_data_page(inode
, index
, true);
537 f2fs_wait_on_page_writeback(page
, DATA
, true);
538 zero_user(page
, offset
, PAGE_SIZE
- offset
);
539 if (!cache_only
|| !f2fs_encrypted_inode(inode
) ||
540 !S_ISREG(inode
->i_mode
))
541 set_page_dirty(page
);
542 f2fs_put_page(page
, 1);
546 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
548 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
549 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
550 struct dnode_of_data dn
;
552 int count
= 0, err
= 0;
554 bool truncate_page
= false;
556 trace_f2fs_truncate_blocks_enter(inode
, from
);
558 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
560 if (free_from
>= sbi
->max_file_blocks
)
566 ipage
= get_node_page(sbi
, inode
->i_ino
);
568 err
= PTR_ERR(ipage
);
572 if (f2fs_has_inline_data(inode
)) {
573 if (truncate_inline_inode(ipage
, from
))
574 set_page_dirty(ipage
);
575 f2fs_put_page(ipage
, 1);
576 truncate_page
= true;
580 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
581 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
588 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
590 count
-= dn
.ofs_in_node
;
591 f2fs_bug_on(sbi
, count
< 0);
593 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
594 truncate_data_blocks_range(&dn
, count
);
600 err
= truncate_inode_blocks(inode
, free_from
);
605 /* lastly zero out the first data page */
607 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
609 trace_f2fs_truncate_blocks_exit(inode
, err
);
613 int f2fs_truncate(struct inode
*inode
)
617 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
618 S_ISLNK(inode
->i_mode
)))
621 trace_f2fs_truncate(inode
);
623 /* we should check inline_data size */
624 if (!f2fs_may_inline_data(inode
)) {
625 err
= f2fs_convert_inline_inode(inode
);
630 err
= truncate_blocks(inode
, i_size_read(inode
), true);
634 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
635 f2fs_mark_inode_dirty_sync(inode
);
639 int f2fs_getattr(struct vfsmount
*mnt
,
640 struct dentry
*dentry
, struct kstat
*stat
)
642 struct inode
*inode
= d_inode(dentry
);
643 generic_fillattr(inode
, stat
);
648 #ifdef CONFIG_F2FS_FS_POSIX_ACL
649 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
651 unsigned int ia_valid
= attr
->ia_valid
;
653 if (ia_valid
& ATTR_UID
)
654 inode
->i_uid
= attr
->ia_uid
;
655 if (ia_valid
& ATTR_GID
)
656 inode
->i_gid
= attr
->ia_gid
;
657 if (ia_valid
& ATTR_ATIME
)
658 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
659 inode
->i_sb
->s_time_gran
);
660 if (ia_valid
& ATTR_MTIME
)
661 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
662 inode
->i_sb
->s_time_gran
);
663 if (ia_valid
& ATTR_CTIME
)
664 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
665 inode
->i_sb
->s_time_gran
);
666 if (ia_valid
& ATTR_MODE
) {
667 umode_t mode
= attr
->ia_mode
;
669 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
671 set_acl_inode(inode
, mode
);
675 #define __setattr_copy setattr_copy
678 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
680 struct inode
*inode
= d_inode(dentry
);
683 err
= inode_change_ok(inode
, attr
);
687 if (attr
->ia_valid
& ATTR_SIZE
) {
688 if (f2fs_encrypted_inode(inode
) &&
689 fscrypt_get_encryption_info(inode
))
692 if (attr
->ia_size
<= i_size_read(inode
)) {
693 truncate_setsize(inode
, attr
->ia_size
);
694 err
= f2fs_truncate(inode
);
697 f2fs_balance_fs(F2FS_I_SB(inode
), true);
700 * do not trim all blocks after i_size if target size is
701 * larger than i_size.
703 truncate_setsize(inode
, attr
->ia_size
);
705 /* should convert inline inode here */
706 if (!f2fs_may_inline_data(inode
)) {
707 err
= f2fs_convert_inline_inode(inode
);
711 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
715 __setattr_copy(inode
, attr
);
717 if (attr
->ia_valid
& ATTR_MODE
) {
718 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
719 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
720 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
721 clear_inode_flag(inode
, FI_ACL_MODE
);
725 f2fs_mark_inode_dirty_sync(inode
);
729 const struct inode_operations f2fs_file_inode_operations
= {
730 .getattr
= f2fs_getattr
,
731 .setattr
= f2fs_setattr
,
732 .get_acl
= f2fs_get_acl
,
733 .set_acl
= f2fs_set_acl
,
734 #ifdef CONFIG_F2FS_FS_XATTR
735 .setxattr
= generic_setxattr
,
736 .getxattr
= generic_getxattr
,
737 .listxattr
= f2fs_listxattr
,
738 .removexattr
= generic_removexattr
,
740 .fiemap
= f2fs_fiemap
,
743 static int fill_zero(struct inode
*inode
, pgoff_t index
,
744 loff_t start
, loff_t len
)
746 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
752 f2fs_balance_fs(sbi
, true);
755 page
= get_new_data_page(inode
, NULL
, index
, false);
759 return PTR_ERR(page
);
761 f2fs_wait_on_page_writeback(page
, DATA
, true);
762 zero_user(page
, start
, len
);
763 set_page_dirty(page
);
764 f2fs_put_page(page
, 1);
768 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
772 while (pg_start
< pg_end
) {
773 struct dnode_of_data dn
;
774 pgoff_t end_offset
, count
;
776 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
777 err
= get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
779 if (err
== -ENOENT
) {
786 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
787 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
789 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
791 truncate_data_blocks_range(&dn
, count
);
799 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
801 pgoff_t pg_start
, pg_end
;
802 loff_t off_start
, off_end
;
805 ret
= f2fs_convert_inline_inode(inode
);
809 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
810 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
812 off_start
= offset
& (PAGE_SIZE
- 1);
813 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
815 if (pg_start
== pg_end
) {
816 ret
= fill_zero(inode
, pg_start
, off_start
,
817 off_end
- off_start
);
822 ret
= fill_zero(inode
, pg_start
++, off_start
,
823 PAGE_SIZE
- off_start
);
828 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
833 if (pg_start
< pg_end
) {
834 struct address_space
*mapping
= inode
->i_mapping
;
835 loff_t blk_start
, blk_end
;
836 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
838 f2fs_balance_fs(sbi
, true);
840 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
841 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
842 truncate_inode_pages_range(mapping
, blk_start
,
846 ret
= truncate_hole(inode
, pg_start
, pg_end
);
854 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
855 int *do_replace
, pgoff_t off
, pgoff_t len
)
857 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
858 struct dnode_of_data dn
;
862 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
863 ret
= get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
864 if (ret
&& ret
!= -ENOENT
) {
866 } else if (ret
== -ENOENT
) {
867 if (dn
.max_level
== 0)
869 done
= min((pgoff_t
)ADDRS_PER_BLOCK
- dn
.ofs_in_node
, len
);
875 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
876 dn
.ofs_in_node
, len
);
877 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
878 *blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
879 if (!is_checkpointed_data(sbi
, *blkaddr
)) {
881 if (test_opt(sbi
, LFS
)) {
886 /* do not invalidate this block address */
887 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
900 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
901 int *do_replace
, pgoff_t off
, int len
)
903 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
904 struct dnode_of_data dn
;
907 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
908 if (*do_replace
== 0)
911 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
912 ret
= get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
914 dec_valid_block_count(sbi
, inode
, 1);
915 invalidate_blocks(sbi
, *blkaddr
);
917 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
924 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
925 block_t
*blkaddr
, int *do_replace
,
926 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
928 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
933 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
938 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
939 struct dnode_of_data dn
;
944 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
945 ret
= get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
949 get_node_info(sbi
, dn
.nid
, &ni
);
951 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
952 dn
.ofs_in_node
, len
- i
);
954 dn
.data_blkaddr
= datablock_addr(dn
.node_page
,
956 truncate_data_blocks_range(&dn
, 1);
959 f2fs_i_blocks_write(src_inode
,
961 f2fs_i_blocks_write(dst_inode
,
963 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
964 blkaddr
[i
], ni
.version
, true, false);
970 new_size
= (dst
+ i
) << PAGE_SHIFT
;
971 if (dst_inode
->i_size
< new_size
)
972 f2fs_i_size_write(dst_inode
, new_size
);
973 } while ((do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) && --ilen
);
977 struct page
*psrc
, *pdst
;
979 psrc
= get_lock_data_page(src_inode
, src
+ i
, true);
981 return PTR_ERR(psrc
);
982 pdst
= get_new_data_page(dst_inode
, NULL
, dst
+ i
,
985 f2fs_put_page(psrc
, 1);
986 return PTR_ERR(pdst
);
988 f2fs_copy_page(psrc
, pdst
);
989 set_page_dirty(pdst
);
990 f2fs_put_page(pdst
, 1);
991 f2fs_put_page(psrc
, 1);
993 ret
= truncate_hole(src_inode
, src
+ i
, src
+ i
+ 1);
1002 static int __exchange_data_block(struct inode
*src_inode
,
1003 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1004 pgoff_t len
, bool full
)
1006 block_t
*src_blkaddr
;
1012 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK
, len
);
1014 src_blkaddr
= f2fs_kvzalloc(sizeof(block_t
) * olen
, GFP_KERNEL
);
1018 do_replace
= f2fs_kvzalloc(sizeof(int) * olen
, GFP_KERNEL
);
1020 kvfree(src_blkaddr
);
1024 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1025 do_replace
, src
, olen
);
1029 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1030 do_replace
, src
, dst
, olen
, full
);
1038 kvfree(src_blkaddr
);
1044 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, len
);
1045 kvfree(src_blkaddr
);
1050 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
1052 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1053 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1056 f2fs_balance_fs(sbi
, true);
1059 f2fs_drop_extent_tree(inode
);
1061 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1062 f2fs_unlock_op(sbi
);
1066 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1068 pgoff_t pg_start
, pg_end
;
1072 if (offset
+ len
>= i_size_read(inode
))
1075 /* collapse range should be aligned to block size of f2fs. */
1076 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1079 ret
= f2fs_convert_inline_inode(inode
);
1083 pg_start
= offset
>> PAGE_SHIFT
;
1084 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1086 /* write out all dirty pages from offset */
1087 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1091 truncate_pagecache(inode
, offset
);
1093 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
1097 /* write out all moved pages, if possible */
1098 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1099 truncate_pagecache(inode
, offset
);
1101 new_size
= i_size_read(inode
) - len
;
1102 truncate_pagecache(inode
, new_size
);
1104 ret
= truncate_blocks(inode
, new_size
, true);
1106 f2fs_i_size_write(inode
, new_size
);
1111 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1114 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1115 pgoff_t index
= start
;
1116 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1120 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1121 if (datablock_addr(dn
->node_page
, dn
->ofs_in_node
) == NULL_ADDR
)
1125 dn
->ofs_in_node
= ofs_in_node
;
1126 ret
= reserve_new_blocks(dn
, count
);
1130 dn
->ofs_in_node
= ofs_in_node
;
1131 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1133 datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
1135 * reserve_new_blocks will not guarantee entire block
1138 if (dn
->data_blkaddr
== NULL_ADDR
) {
1142 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1143 invalidate_blocks(sbi
, dn
->data_blkaddr
);
1144 dn
->data_blkaddr
= NEW_ADDR
;
1145 set_data_blkaddr(dn
);
1149 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1154 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1157 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1158 struct address_space
*mapping
= inode
->i_mapping
;
1159 pgoff_t index
, pg_start
, pg_end
;
1160 loff_t new_size
= i_size_read(inode
);
1161 loff_t off_start
, off_end
;
1164 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1168 ret
= f2fs_convert_inline_inode(inode
);
1172 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1176 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1178 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1179 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1181 off_start
= offset
& (PAGE_SIZE
- 1);
1182 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1184 if (pg_start
== pg_end
) {
1185 ret
= fill_zero(inode
, pg_start
, off_start
,
1186 off_end
- off_start
);
1190 if (offset
+ len
> new_size
)
1191 new_size
= offset
+ len
;
1192 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1195 ret
= fill_zero(inode
, pg_start
++, off_start
,
1196 PAGE_SIZE
- off_start
);
1200 new_size
= max_t(loff_t
, new_size
,
1201 (loff_t
)pg_start
<< PAGE_SHIFT
);
1204 for (index
= pg_start
; index
< pg_end
;) {
1205 struct dnode_of_data dn
;
1206 unsigned int end_offset
;
1211 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1212 ret
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1214 f2fs_unlock_op(sbi
);
1218 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1219 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1221 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1222 f2fs_put_dnode(&dn
);
1223 f2fs_unlock_op(sbi
);
1228 new_size
= max_t(loff_t
, new_size
,
1229 (loff_t
)index
<< PAGE_SHIFT
);
1233 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1237 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1242 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1243 f2fs_i_size_write(inode
, new_size
);
1248 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1250 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1251 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1255 new_size
= i_size_read(inode
) + len
;
1256 if (new_size
> inode
->i_sb
->s_maxbytes
)
1259 if (offset
>= i_size_read(inode
))
1262 /* insert range should be aligned to block size of f2fs. */
1263 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1266 ret
= f2fs_convert_inline_inode(inode
);
1270 f2fs_balance_fs(sbi
, true);
1272 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1276 /* write out all dirty pages from offset */
1277 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1281 truncate_pagecache(inode
, offset
);
1283 pg_start
= offset
>> PAGE_SHIFT
;
1284 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1285 delta
= pg_end
- pg_start
;
1286 idx
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1288 while (!ret
&& idx
> pg_start
) {
1289 nr
= idx
- pg_start
;
1295 f2fs_drop_extent_tree(inode
);
1297 ret
= __exchange_data_block(inode
, inode
, idx
,
1298 idx
+ delta
, nr
, false);
1299 f2fs_unlock_op(sbi
);
1302 /* write out all moved pages, if possible */
1303 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1304 truncate_pagecache(inode
, offset
);
1307 f2fs_i_size_write(inode
, new_size
);
1311 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1312 loff_t len
, int mode
)
1314 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1315 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
1317 loff_t new_size
= i_size_read(inode
);
1321 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1325 ret
= f2fs_convert_inline_inode(inode
);
1329 f2fs_balance_fs(sbi
, true);
1331 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1332 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1334 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1335 map
.m_len
= pg_end
- map
.m_lblk
;
1339 ret
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1346 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1348 /* update new size to the failed position */
1349 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1350 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1352 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1355 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1356 f2fs_i_size_write(inode
, new_size
);
1361 static long f2fs_fallocate(struct file
*file
, int mode
,
1362 loff_t offset
, loff_t len
)
1364 struct inode
*inode
= file_inode(file
);
1367 /* f2fs only support ->fallocate for regular file */
1368 if (!S_ISREG(inode
->i_mode
))
1371 if (f2fs_encrypted_inode(inode
) &&
1372 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1375 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1376 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1377 FALLOC_FL_INSERT_RANGE
))
1382 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1383 if (offset
>= inode
->i_size
)
1386 ret
= punch_hole(inode
, offset
, len
);
1387 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1388 ret
= f2fs_collapse_range(inode
, offset
, len
);
1389 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1390 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1391 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1392 ret
= f2fs_insert_range(inode
, offset
, len
);
1394 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1398 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1399 f2fs_mark_inode_dirty_sync(inode
);
1400 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1404 inode_unlock(inode
);
1406 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1410 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1413 * f2fs_relase_file is called at every close calls. So we should
1414 * not drop any inmemory pages by close called by other process.
1416 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1417 atomic_read(&inode
->i_writecount
) != 1)
1420 /* some remained atomic pages should discarded */
1421 if (f2fs_is_atomic_file(inode
))
1422 drop_inmem_pages(inode
);
1423 if (f2fs_is_volatile_file(inode
)) {
1424 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1425 set_inode_flag(inode
, FI_DROP_CACHE
);
1426 filemap_fdatawrite(inode
->i_mapping
);
1427 clear_inode_flag(inode
, FI_DROP_CACHE
);
1432 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1433 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1435 static inline __u32
f2fs_mask_flags(umode_t mode
, __u32 flags
)
1439 else if (S_ISREG(mode
))
1440 return flags
& F2FS_REG_FLMASK
;
1442 return flags
& F2FS_OTHER_FLMASK
;
1445 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1447 struct inode
*inode
= file_inode(filp
);
1448 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1449 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1450 return put_user(flags
, (int __user
*)arg
);
1453 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1455 struct inode
*inode
= file_inode(filp
);
1456 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1457 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1458 unsigned int oldflags
;
1461 if (!inode_owner_or_capable(inode
))
1464 if (get_user(flags
, (int __user
*)arg
))
1467 ret
= mnt_want_write_file(filp
);
1471 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1475 oldflags
= fi
->i_flags
;
1477 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
)) {
1478 if (!capable(CAP_LINUX_IMMUTABLE
)) {
1479 inode_unlock(inode
);
1485 flags
= flags
& FS_FL_USER_MODIFIABLE
;
1486 flags
|= oldflags
& ~FS_FL_USER_MODIFIABLE
;
1487 fi
->i_flags
= flags
;
1488 inode_unlock(inode
);
1490 inode
->i_ctime
= CURRENT_TIME
;
1491 f2fs_set_inode_flags(inode
);
1493 mnt_drop_write_file(filp
);
1497 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1499 struct inode
*inode
= file_inode(filp
);
1501 return put_user(inode
->i_generation
, (int __user
*)arg
);
1504 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1506 struct inode
*inode
= file_inode(filp
);
1509 if (!inode_owner_or_capable(inode
))
1512 ret
= mnt_want_write_file(filp
);
1518 if (f2fs_is_atomic_file(inode
))
1521 ret
= f2fs_convert_inline_inode(inode
);
1525 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1526 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1528 if (!get_dirty_pages(inode
))
1531 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1532 "Unexpected flush for atomic writes: ino=%lu, npages=%lld",
1533 inode
->i_ino
, get_dirty_pages(inode
));
1534 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1536 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1538 inode_unlock(inode
);
1539 mnt_drop_write_file(filp
);
1543 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1545 struct inode
*inode
= file_inode(filp
);
1548 if (!inode_owner_or_capable(inode
))
1551 ret
= mnt_want_write_file(filp
);
1557 if (f2fs_is_volatile_file(inode
))
1560 if (f2fs_is_atomic_file(inode
)) {
1561 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1562 ret
= commit_inmem_pages(inode
);
1564 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1569 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1571 inode_unlock(inode
);
1572 mnt_drop_write_file(filp
);
1576 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1578 struct inode
*inode
= file_inode(filp
);
1581 if (!inode_owner_or_capable(inode
))
1584 ret
= mnt_want_write_file(filp
);
1590 if (f2fs_is_volatile_file(inode
))
1593 ret
= f2fs_convert_inline_inode(inode
);
1597 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1598 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1600 inode_unlock(inode
);
1601 mnt_drop_write_file(filp
);
1605 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1607 struct inode
*inode
= file_inode(filp
);
1610 if (!inode_owner_or_capable(inode
))
1613 ret
= mnt_want_write_file(filp
);
1619 if (!f2fs_is_volatile_file(inode
))
1622 if (!f2fs_is_first_block_written(inode
)) {
1623 ret
= truncate_partial_data_page(inode
, 0, true);
1627 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1629 inode_unlock(inode
);
1630 mnt_drop_write_file(filp
);
1634 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1636 struct inode
*inode
= file_inode(filp
);
1639 if (!inode_owner_or_capable(inode
))
1642 ret
= mnt_want_write_file(filp
);
1648 if (f2fs_is_atomic_file(inode
))
1649 drop_inmem_pages(inode
);
1650 if (f2fs_is_volatile_file(inode
)) {
1651 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1652 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1655 inode_unlock(inode
);
1657 mnt_drop_write_file(filp
);
1658 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1662 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1664 struct inode
*inode
= file_inode(filp
);
1665 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1666 struct super_block
*sb
= sbi
->sb
;
1670 if (!capable(CAP_SYS_ADMIN
))
1673 if (get_user(in
, (__u32 __user
*)arg
))
1676 ret
= mnt_want_write_file(filp
);
1681 case F2FS_GOING_DOWN_FULLSYNC
:
1682 sb
= freeze_bdev(sb
->s_bdev
);
1683 if (sb
&& !IS_ERR(sb
)) {
1684 f2fs_stop_checkpoint(sbi
, false);
1685 thaw_bdev(sb
->s_bdev
, sb
);
1688 case F2FS_GOING_DOWN_METASYNC
:
1689 /* do checkpoint only */
1690 f2fs_sync_fs(sb
, 1);
1691 f2fs_stop_checkpoint(sbi
, false);
1693 case F2FS_GOING_DOWN_NOSYNC
:
1694 f2fs_stop_checkpoint(sbi
, false);
1696 case F2FS_GOING_DOWN_METAFLUSH
:
1697 sync_meta_pages(sbi
, META
, LONG_MAX
);
1698 f2fs_stop_checkpoint(sbi
, false);
1704 f2fs_update_time(sbi
, REQ_TIME
);
1706 mnt_drop_write_file(filp
);
1710 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1712 struct inode
*inode
= file_inode(filp
);
1713 struct super_block
*sb
= inode
->i_sb
;
1714 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1715 struct fstrim_range range
;
1718 if (!capable(CAP_SYS_ADMIN
))
1721 if (!blk_queue_discard(q
))
1724 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1728 ret
= mnt_want_write_file(filp
);
1732 range
.minlen
= max((unsigned int)range
.minlen
,
1733 q
->limits
.discard_granularity
);
1734 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1735 mnt_drop_write_file(filp
);
1739 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1742 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1746 static bool uuid_is_nonzero(__u8 u
[16])
1750 for (i
= 0; i
< 16; i
++)
1756 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1758 struct fscrypt_policy policy
;
1759 struct inode
*inode
= file_inode(filp
);
1761 if (copy_from_user(&policy
, (struct fscrypt_policy __user
*)arg
,
1765 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1767 return fscrypt_process_policy(filp
, &policy
);
1770 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1772 struct fscrypt_policy policy
;
1773 struct inode
*inode
= file_inode(filp
);
1776 err
= fscrypt_get_policy(inode
, &policy
);
1780 if (copy_to_user((struct fscrypt_policy __user
*)arg
, &policy
, sizeof(policy
)))
1785 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1787 struct inode
*inode
= file_inode(filp
);
1788 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1791 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1794 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
1797 err
= mnt_want_write_file(filp
);
1801 /* update superblock with uuid */
1802 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
1804 err
= f2fs_commit_super(sbi
, false);
1807 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
1808 mnt_drop_write_file(filp
);
1811 mnt_drop_write_file(filp
);
1813 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
1819 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
1821 struct inode
*inode
= file_inode(filp
);
1822 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1826 if (!capable(CAP_SYS_ADMIN
))
1829 if (get_user(sync
, (__u32 __user
*)arg
))
1832 if (f2fs_readonly(sbi
->sb
))
1835 ret
= mnt_want_write_file(filp
);
1840 if (!mutex_trylock(&sbi
->gc_mutex
)) {
1845 mutex_lock(&sbi
->gc_mutex
);
1848 ret
= f2fs_gc(sbi
, sync
);
1850 mnt_drop_write_file(filp
);
1854 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
1856 struct inode
*inode
= file_inode(filp
);
1857 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1860 if (!capable(CAP_SYS_ADMIN
))
1863 if (f2fs_readonly(sbi
->sb
))
1866 ret
= mnt_want_write_file(filp
);
1870 ret
= f2fs_sync_fs(sbi
->sb
, 1);
1872 mnt_drop_write_file(filp
);
1876 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
1878 struct f2fs_defragment
*range
)
1880 struct inode
*inode
= file_inode(filp
);
1881 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
1882 struct extent_info ei
;
1883 pgoff_t pg_start
, pg_end
;
1884 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
1885 unsigned int total
= 0, sec_num
;
1886 unsigned int pages_per_sec
= sbi
->segs_per_sec
* blk_per_seg
;
1887 block_t blk_end
= 0;
1888 bool fragmented
= false;
1891 /* if in-place-update policy is enabled, don't waste time here */
1892 if (need_inplace_update(inode
))
1895 pg_start
= range
->start
>> PAGE_SHIFT
;
1896 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
1898 f2fs_balance_fs(sbi
, true);
1902 /* writeback all dirty pages in the range */
1903 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
1904 range
->start
+ range
->len
- 1);
1909 * lookup mapping info in extent cache, skip defragmenting if physical
1910 * block addresses are continuous.
1912 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
1913 if (ei
.fofs
+ ei
.len
>= pg_end
)
1917 map
.m_lblk
= pg_start
;
1920 * lookup mapping info in dnode page cache, skip defragmenting if all
1921 * physical block addresses are continuous even if there are hole(s)
1922 * in logical blocks.
1924 while (map
.m_lblk
< pg_end
) {
1925 map
.m_len
= pg_end
- map
.m_lblk
;
1926 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_READ
);
1930 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1935 if (blk_end
&& blk_end
!= map
.m_pblk
) {
1939 blk_end
= map
.m_pblk
+ map
.m_len
;
1941 map
.m_lblk
+= map
.m_len
;
1947 map
.m_lblk
= pg_start
;
1948 map
.m_len
= pg_end
- pg_start
;
1950 sec_num
= (map
.m_len
+ pages_per_sec
- 1) / pages_per_sec
;
1953 * make sure there are enough free section for LFS allocation, this can
1954 * avoid defragment running in SSR mode when free section are allocated
1957 if (has_not_enough_free_secs(sbi
, sec_num
)) {
1962 while (map
.m_lblk
< pg_end
) {
1967 map
.m_len
= pg_end
- map
.m_lblk
;
1968 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_READ
);
1972 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1977 set_inode_flag(inode
, FI_DO_DEFRAG
);
1980 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
1983 page
= get_lock_data_page(inode
, idx
, true);
1985 err
= PTR_ERR(page
);
1989 set_page_dirty(page
);
1990 f2fs_put_page(page
, 1);
1999 if (idx
< pg_end
&& cnt
< blk_per_seg
)
2002 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2004 err
= filemap_fdatawrite(inode
->i_mapping
);
2009 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2011 inode_unlock(inode
);
2013 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2017 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2019 struct inode
*inode
= file_inode(filp
);
2020 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2021 struct f2fs_defragment range
;
2024 if (!capable(CAP_SYS_ADMIN
))
2027 if (!S_ISREG(inode
->i_mode
))
2030 err
= mnt_want_write_file(filp
);
2034 if (f2fs_readonly(sbi
->sb
)) {
2039 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2045 /* verify alignment of offset & size */
2046 if (range
.start
& (F2FS_BLKSIZE
- 1) ||
2047 range
.len
& (F2FS_BLKSIZE
- 1)) {
2052 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2053 f2fs_update_time(sbi
, REQ_TIME
);
2057 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2061 mnt_drop_write_file(filp
);
2065 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2066 struct file
*file_out
, loff_t pos_out
, size_t len
)
2068 struct inode
*src
= file_inode(file_in
);
2069 struct inode
*dst
= file_inode(file_out
);
2070 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2071 size_t olen
= len
, dst_max_i_size
= 0;
2075 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2076 src
->i_sb
!= dst
->i_sb
)
2079 if (unlikely(f2fs_readonly(src
->i_sb
)))
2082 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2085 if (f2fs_encrypted_inode(src
) || f2fs_encrypted_inode(dst
))
2090 if (!inode_trylock(dst
)) {
2097 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2100 olen
= len
= src
->i_size
- pos_in
;
2101 if (pos_in
+ len
== src
->i_size
)
2102 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2108 dst_osize
= dst
->i_size
;
2109 if (pos_out
+ olen
> dst
->i_size
)
2110 dst_max_i_size
= pos_out
+ olen
;
2112 /* verify the end result is block aligned */
2113 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2114 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2115 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2118 ret
= f2fs_convert_inline_inode(src
);
2122 ret
= f2fs_convert_inline_inode(dst
);
2126 /* write out all dirty pages from offset */
2127 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2128 pos_in
, pos_in
+ len
);
2132 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2133 pos_out
, pos_out
+ len
);
2137 f2fs_balance_fs(sbi
, true);
2139 ret
= __exchange_data_block(src
, dst
, pos_in
,
2140 pos_out
, len
>> F2FS_BLKSIZE_BITS
, false);
2144 f2fs_i_size_write(dst
, dst_max_i_size
);
2145 else if (dst_osize
!= dst
->i_size
)
2146 f2fs_i_size_write(dst
, dst_osize
);
2148 f2fs_unlock_op(sbi
);
2157 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2159 struct f2fs_move_range range
;
2163 if (!(filp
->f_mode
& FMODE_READ
) ||
2164 !(filp
->f_mode
& FMODE_WRITE
))
2167 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2171 dst
= fdget(range
.dst_fd
);
2175 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2180 err
= mnt_want_write_file(filp
);
2184 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2185 range
.pos_out
, range
.len
);
2187 mnt_drop_write_file(filp
);
2189 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2190 &range
, sizeof(range
)))
2197 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2200 case F2FS_IOC_GETFLAGS
:
2201 return f2fs_ioc_getflags(filp
, arg
);
2202 case F2FS_IOC_SETFLAGS
:
2203 return f2fs_ioc_setflags(filp
, arg
);
2204 case F2FS_IOC_GETVERSION
:
2205 return f2fs_ioc_getversion(filp
, arg
);
2206 case F2FS_IOC_START_ATOMIC_WRITE
:
2207 return f2fs_ioc_start_atomic_write(filp
);
2208 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2209 return f2fs_ioc_commit_atomic_write(filp
);
2210 case F2FS_IOC_START_VOLATILE_WRITE
:
2211 return f2fs_ioc_start_volatile_write(filp
);
2212 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2213 return f2fs_ioc_release_volatile_write(filp
);
2214 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2215 return f2fs_ioc_abort_volatile_write(filp
);
2216 case F2FS_IOC_SHUTDOWN
:
2217 return f2fs_ioc_shutdown(filp
, arg
);
2219 return f2fs_ioc_fitrim(filp
, arg
);
2220 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2221 return f2fs_ioc_set_encryption_policy(filp
, arg
);
2222 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2223 return f2fs_ioc_get_encryption_policy(filp
, arg
);
2224 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2225 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
2226 case F2FS_IOC_GARBAGE_COLLECT
:
2227 return f2fs_ioc_gc(filp
, arg
);
2228 case F2FS_IOC_WRITE_CHECKPOINT
:
2229 return f2fs_ioc_write_checkpoint(filp
, arg
);
2230 case F2FS_IOC_DEFRAGMENT
:
2231 return f2fs_ioc_defragment(filp
, arg
);
2232 case F2FS_IOC_MOVE_RANGE
:
2233 return f2fs_ioc_move_range(filp
, arg
);
2239 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2241 struct file
*file
= iocb
->ki_filp
;
2242 struct inode
*inode
= file_inode(file
);
2243 struct blk_plug plug
;
2246 if (f2fs_encrypted_inode(inode
) &&
2247 !fscrypt_has_encryption_key(inode
) &&
2248 fscrypt_get_encryption_info(inode
))
2252 ret
= generic_write_checks(iocb
, from
);
2254 ret
= f2fs_preallocate_blocks(iocb
, from
);
2256 blk_start_plug(&plug
);
2257 ret
= __generic_file_write_iter(iocb
, from
);
2258 blk_finish_plug(&plug
);
2261 inode_unlock(inode
);
2264 ret
= generic_write_sync(iocb
, ret
);
2268 #ifdef CONFIG_COMPAT
2269 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2272 case F2FS_IOC32_GETFLAGS
:
2273 cmd
= F2FS_IOC_GETFLAGS
;
2275 case F2FS_IOC32_SETFLAGS
:
2276 cmd
= F2FS_IOC_SETFLAGS
;
2278 case F2FS_IOC32_GETVERSION
:
2279 cmd
= F2FS_IOC_GETVERSION
;
2281 case F2FS_IOC_START_ATOMIC_WRITE
:
2282 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2283 case F2FS_IOC_START_VOLATILE_WRITE
:
2284 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2285 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2286 case F2FS_IOC_SHUTDOWN
:
2287 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2288 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2289 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2290 case F2FS_IOC_GARBAGE_COLLECT
:
2291 case F2FS_IOC_WRITE_CHECKPOINT
:
2292 case F2FS_IOC_DEFRAGMENT
:
2294 case F2FS_IOC_MOVE_RANGE
:
2297 return -ENOIOCTLCMD
;
2299 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
2303 const struct file_operations f2fs_file_operations
= {
2304 .llseek
= f2fs_llseek
,
2305 .read_iter
= generic_file_read_iter
,
2306 .write_iter
= f2fs_file_write_iter
,
2307 .open
= f2fs_file_open
,
2308 .release
= f2fs_release_file
,
2309 .mmap
= f2fs_file_mmap
,
2310 .fsync
= f2fs_sync_file
,
2311 .fallocate
= f2fs_fallocate
,
2312 .unlocked_ioctl
= f2fs_ioctl
,
2313 #ifdef CONFIG_COMPAT
2314 .compat_ioctl
= f2fs_compat_ioctl
,
2316 .splice_read
= generic_file_splice_read
,
2317 .splice_write
= iter_file_splice_write
,