4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uio.h>
24 #include <linux/uuid.h>
25 #include <linux/file.h>
34 #include <trace/events/f2fs.h>
36 static int f2fs_filemap_fault(struct vm_fault
*vmf
)
38 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
41 down_read(&F2FS_I(inode
)->i_mmap_sem
);
42 err
= filemap_fault(vmf
);
43 up_read(&F2FS_I(inode
)->i_mmap_sem
);
48 static int f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
56 sb_start_pagefault(inode
->i_sb
);
58 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
60 /* block allocation */
62 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
63 err
= f2fs_reserve_block(&dn
, page
->index
);
71 f2fs_balance_fs(sbi
, dn
.node_changed
);
73 file_update_time(vmf
->vma
->vm_file
);
74 down_read(&F2FS_I(inode
)->i_mmap_sem
);
76 if (unlikely(page
->mapping
!= inode
->i_mapping
||
77 page_offset(page
) > i_size_read(inode
) ||
78 !PageUptodate(page
))) {
85 * check to see if the page is mapped already (no holes)
87 if (PageMappedToDisk(page
))
90 /* page is wholly or partially inside EOF */
91 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
94 offset
= i_size_read(inode
) & ~PAGE_MASK
;
95 zero_user_segment(page
, offset
, PAGE_SIZE
);
98 if (!PageUptodate(page
))
99 SetPageUptodate(page
);
101 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
103 trace_f2fs_vm_page_mkwrite(page
, DATA
);
106 f2fs_wait_on_page_writeback(page
, DATA
, false);
108 /* wait for GCed encrypted page writeback */
109 if (f2fs_encrypted_file(inode
))
110 f2fs_wait_on_block_writeback(sbi
, dn
.data_blkaddr
);
113 up_read(&F2FS_I(inode
)->i_mmap_sem
);
115 sb_end_pagefault(inode
->i_sb
);
116 f2fs_update_time(sbi
, REQ_TIME
);
117 return block_page_mkwrite_return(err
);
120 static const struct vm_operations_struct f2fs_file_vm_ops
= {
121 .fault
= f2fs_filemap_fault
,
122 .map_pages
= filemap_map_pages
,
123 .page_mkwrite
= f2fs_vm_page_mkwrite
,
126 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
128 struct dentry
*dentry
;
130 inode
= igrab(inode
);
131 dentry
= d_find_any_alias(inode
);
136 *pino
= parent_ino(dentry
);
141 static inline bool need_do_checkpoint(struct inode
*inode
)
143 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
144 bool need_cp
= false;
146 if (!S_ISREG(inode
->i_mode
) || inode
->i_nlink
!= 1)
148 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
150 else if (file_wrong_pino(inode
))
152 else if (!space_for_roll_forward(sbi
))
154 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
156 else if (test_opt(sbi
, FASTBOOT
))
158 else if (sbi
->active_logs
== 2)
164 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
166 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
168 /* But we need to avoid that there are some inode updates */
169 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
175 static void try_to_fix_pino(struct inode
*inode
)
177 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
180 down_write(&fi
->i_sem
);
181 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
182 get_parent_ino(inode
, &pino
)) {
183 f2fs_i_pino_write(inode
, pino
);
184 file_got_pino(inode
);
186 up_write(&fi
->i_sem
);
189 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
190 int datasync
, bool atomic
)
192 struct inode
*inode
= file
->f_mapping
->host
;
193 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
194 nid_t ino
= inode
->i_ino
;
196 bool need_cp
= false;
197 struct writeback_control wbc
= {
198 .sync_mode
= WB_SYNC_ALL
,
199 .nr_to_write
= LONG_MAX
,
203 if (unlikely(f2fs_readonly(inode
->i_sb
)))
206 trace_f2fs_sync_file_enter(inode
);
208 /* if fdatasync is triggered, let's do in-place-update */
209 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
210 set_inode_flag(inode
, FI_NEED_IPU
);
211 ret
= file_write_and_wait_range(file
, start
, end
);
212 clear_inode_flag(inode
, FI_NEED_IPU
);
215 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
219 /* if the inode is dirty, let's recover all the time */
220 if (!f2fs_skip_inode_update(inode
, datasync
)) {
221 f2fs_write_inode(inode
, NULL
);
226 * if there is no written data, don't waste time to write recovery info.
228 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
229 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
231 /* it may call write_inode just prior to fsync */
232 if (need_inode_page_update(sbi
, ino
))
235 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
236 exist_written_data(sbi
, ino
, UPDATE_INO
))
242 * Both of fdatasync() and fsync() are able to be recovered from
245 down_read(&F2FS_I(inode
)->i_sem
);
246 need_cp
= need_do_checkpoint(inode
);
247 up_read(&F2FS_I(inode
)->i_sem
);
250 /* all the dirty node pages should be flushed for POR */
251 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
254 * We've secured consistency through sync_fs. Following pino
255 * will be used only for fsynced inodes after checkpoint.
257 try_to_fix_pino(inode
);
258 clear_inode_flag(inode
, FI_APPEND_WRITE
);
259 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
263 ret
= fsync_node_pages(sbi
, inode
, &wbc
, atomic
);
267 /* if cp_error was enabled, we should avoid infinite loop */
268 if (unlikely(f2fs_cp_error(sbi
))) {
273 if (need_inode_block_update(sbi
, ino
)) {
274 f2fs_mark_inode_dirty_sync(inode
, true);
275 f2fs_write_inode(inode
, NULL
);
280 * If it's atomic_write, it's just fine to keep write ordering. So
281 * here we don't need to wait for node write completion, since we use
282 * node chain which serializes node blocks. If one of node writes are
283 * reordered, we can see simply broken chain, resulting in stopping
284 * roll-forward recovery. It means we'll recover all or none node blocks
288 ret
= wait_on_node_pages_writeback(sbi
, ino
);
293 /* once recovery info is written, don't need to tack this */
294 remove_ino_entry(sbi
, ino
, APPEND_INO
);
295 clear_inode_flag(inode
, FI_APPEND_WRITE
);
297 remove_ino_entry(sbi
, ino
, UPDATE_INO
);
298 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
300 ret
= f2fs_issue_flush(sbi
);
301 f2fs_update_time(sbi
, REQ_TIME
);
303 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
304 f2fs_trace_ios(NULL
, 1);
308 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
310 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
313 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
314 pgoff_t pgofs
, int whence
)
319 if (whence
!= SEEK_DATA
)
322 /* find first dirty page index */
323 pagevec_init(&pvec
, 0);
324 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
325 PAGECACHE_TAG_DIRTY
, 1);
326 pgofs
= nr_pages
? pvec
.pages
[0]->index
: ULONG_MAX
;
327 pagevec_release(&pvec
);
331 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
336 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
337 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
341 if (blkaddr
== NULL_ADDR
)
348 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
350 struct inode
*inode
= file
->f_mapping
->host
;
351 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
352 struct dnode_of_data dn
;
353 pgoff_t pgofs
, end_offset
, dirty
;
354 loff_t data_ofs
= offset
;
360 isize
= i_size_read(inode
);
364 /* handle inline data case */
365 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
366 if (whence
== SEEK_HOLE
)
371 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
373 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
375 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
376 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
377 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
378 if (err
&& err
!= -ENOENT
) {
380 } else if (err
== -ENOENT
) {
381 /* direct node does not exists */
382 if (whence
== SEEK_DATA
) {
383 pgofs
= get_next_page_offset(&dn
, pgofs
);
390 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
392 /* find data/hole in dnode block */
393 for (; dn
.ofs_in_node
< end_offset
;
394 dn
.ofs_in_node
++, pgofs
++,
395 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
397 blkaddr
= datablock_addr(dn
.inode
,
398 dn
.node_page
, dn
.ofs_in_node
);
400 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
408 if (whence
== SEEK_DATA
)
411 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
414 return vfs_setpos(file
, data_ofs
, maxbytes
);
420 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
422 struct inode
*inode
= file
->f_mapping
->host
;
423 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
429 return generic_file_llseek_size(file
, offset
, whence
,
430 maxbytes
, i_size_read(inode
));
435 return f2fs_seek_block(file
, offset
, whence
);
441 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
443 struct inode
*inode
= file_inode(file
);
446 /* we don't need to use inline_data strictly */
447 err
= f2fs_convert_inline_inode(inode
);
452 vma
->vm_ops
= &f2fs_file_vm_ops
;
456 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
460 if (f2fs_encrypted_inode(inode
)) {
461 int ret
= fscrypt_get_encryption_info(inode
);
464 if (!fscrypt_has_encryption_key(inode
))
467 dir
= dget_parent(file_dentry(filp
));
468 if (f2fs_encrypted_inode(d_inode(dir
)) &&
469 !fscrypt_has_permitted_context(d_inode(dir
), inode
)) {
474 return dquot_file_open(inode
, filp
);
477 int truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
479 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
480 struct f2fs_node
*raw_node
;
481 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
485 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
486 base
= get_extra_isize(dn
->inode
);
488 raw_node
= F2FS_NODE(dn
->node_page
);
489 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
491 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
492 block_t blkaddr
= le32_to_cpu(*addr
);
493 if (blkaddr
== NULL_ADDR
)
496 dn
->data_blkaddr
= NULL_ADDR
;
497 set_data_blkaddr(dn
);
498 invalidate_blocks(sbi
, blkaddr
);
499 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
500 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
507 * once we invalidate valid blkaddr in range [ofs, ofs + count],
508 * we will invalidate all blkaddr in the whole range.
510 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
512 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
513 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
515 dn
->ofs_in_node
= ofs
;
517 f2fs_update_time(sbi
, REQ_TIME
);
518 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
519 dn
->ofs_in_node
, nr_free
);
523 void truncate_data_blocks(struct dnode_of_data
*dn
)
525 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
528 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
531 unsigned offset
= from
& (PAGE_SIZE
- 1);
532 pgoff_t index
= from
>> PAGE_SHIFT
;
533 struct address_space
*mapping
= inode
->i_mapping
;
536 if (!offset
&& !cache_only
)
540 page
= find_lock_page(mapping
, index
);
541 if (page
&& PageUptodate(page
))
543 f2fs_put_page(page
, 1);
547 page
= get_lock_data_page(inode
, index
, true);
549 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
551 f2fs_wait_on_page_writeback(page
, DATA
, true);
552 zero_user(page
, offset
, PAGE_SIZE
- offset
);
554 /* An encrypted inode should have a key and truncate the last page. */
555 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& f2fs_encrypted_inode(inode
));
557 set_page_dirty(page
);
558 f2fs_put_page(page
, 1);
562 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
564 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
565 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
566 struct dnode_of_data dn
;
568 int count
= 0, err
= 0;
570 bool truncate_page
= false;
572 trace_f2fs_truncate_blocks_enter(inode
, from
);
574 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
576 if (free_from
>= sbi
->max_file_blocks
)
582 ipage
= get_node_page(sbi
, inode
->i_ino
);
584 err
= PTR_ERR(ipage
);
588 if (f2fs_has_inline_data(inode
)) {
589 truncate_inline_inode(inode
, ipage
, from
);
590 f2fs_put_page(ipage
, 1);
591 truncate_page
= true;
595 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
596 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
603 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
605 count
-= dn
.ofs_in_node
;
606 f2fs_bug_on(sbi
, count
< 0);
608 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
609 truncate_data_blocks_range(&dn
, count
);
615 err
= truncate_inode_blocks(inode
, free_from
);
620 /* lastly zero out the first data page */
622 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
624 trace_f2fs_truncate_blocks_exit(inode
, err
);
628 int f2fs_truncate(struct inode
*inode
)
632 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
633 S_ISLNK(inode
->i_mode
)))
636 trace_f2fs_truncate(inode
);
638 #ifdef CONFIG_F2FS_FAULT_INJECTION
639 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
640 f2fs_show_injection_info(FAULT_TRUNCATE
);
644 /* we should check inline_data size */
645 if (!f2fs_may_inline_data(inode
)) {
646 err
= f2fs_convert_inline_inode(inode
);
651 err
= truncate_blocks(inode
, i_size_read(inode
), true);
655 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
656 f2fs_mark_inode_dirty_sync(inode
, false);
660 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
661 u32 request_mask
, unsigned int query_flags
)
663 struct inode
*inode
= d_inode(path
->dentry
);
664 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
667 flags
= fi
->i_flags
& (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
);
668 if (flags
& FS_APPEND_FL
)
669 stat
->attributes
|= STATX_ATTR_APPEND
;
670 if (flags
& FS_COMPR_FL
)
671 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
672 if (f2fs_encrypted_inode(inode
))
673 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
674 if (flags
& FS_IMMUTABLE_FL
)
675 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
676 if (flags
& FS_NODUMP_FL
)
677 stat
->attributes
|= STATX_ATTR_NODUMP
;
679 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
680 STATX_ATTR_COMPRESSED
|
681 STATX_ATTR_ENCRYPTED
|
682 STATX_ATTR_IMMUTABLE
|
685 generic_fillattr(inode
, stat
);
689 #ifdef CONFIG_F2FS_FS_POSIX_ACL
690 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
692 unsigned int ia_valid
= attr
->ia_valid
;
694 if (ia_valid
& ATTR_UID
)
695 inode
->i_uid
= attr
->ia_uid
;
696 if (ia_valid
& ATTR_GID
)
697 inode
->i_gid
= attr
->ia_gid
;
698 if (ia_valid
& ATTR_ATIME
)
699 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
700 inode
->i_sb
->s_time_gran
);
701 if (ia_valid
& ATTR_MTIME
)
702 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
703 inode
->i_sb
->s_time_gran
);
704 if (ia_valid
& ATTR_CTIME
)
705 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
706 inode
->i_sb
->s_time_gran
);
707 if (ia_valid
& ATTR_MODE
) {
708 umode_t mode
= attr
->ia_mode
;
710 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
712 set_acl_inode(inode
, mode
);
716 #define __setattr_copy setattr_copy
719 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
721 struct inode
*inode
= d_inode(dentry
);
723 bool size_changed
= false;
725 err
= setattr_prepare(dentry
, attr
);
729 if (is_quota_modification(inode
, attr
)) {
730 err
= dquot_initialize(inode
);
734 if ((attr
->ia_valid
& ATTR_UID
&&
735 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
736 (attr
->ia_valid
& ATTR_GID
&&
737 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
738 err
= dquot_transfer(inode
, attr
);
743 if (attr
->ia_valid
& ATTR_SIZE
) {
744 if (f2fs_encrypted_inode(inode
)) {
745 err
= fscrypt_get_encryption_info(inode
);
748 if (!fscrypt_has_encryption_key(inode
))
752 if (attr
->ia_size
<= i_size_read(inode
)) {
753 down_write(&F2FS_I(inode
)->i_mmap_sem
);
754 truncate_setsize(inode
, attr
->ia_size
);
755 err
= f2fs_truncate(inode
);
756 up_write(&F2FS_I(inode
)->i_mmap_sem
);
761 * do not trim all blocks after i_size if target size is
762 * larger than i_size.
764 down_write(&F2FS_I(inode
)->i_mmap_sem
);
765 truncate_setsize(inode
, attr
->ia_size
);
766 up_write(&F2FS_I(inode
)->i_mmap_sem
);
768 /* should convert inline inode here */
769 if (!f2fs_may_inline_data(inode
)) {
770 err
= f2fs_convert_inline_inode(inode
);
774 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
780 __setattr_copy(inode
, attr
);
782 if (attr
->ia_valid
& ATTR_MODE
) {
783 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
784 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
785 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
786 clear_inode_flag(inode
, FI_ACL_MODE
);
790 /* file size may changed here */
791 f2fs_mark_inode_dirty_sync(inode
, size_changed
);
793 /* inode change will produce dirty node pages flushed by checkpoint */
794 f2fs_balance_fs(F2FS_I_SB(inode
), true);
799 const struct inode_operations f2fs_file_inode_operations
= {
800 .getattr
= f2fs_getattr
,
801 .setattr
= f2fs_setattr
,
802 .get_acl
= f2fs_get_acl
,
803 .set_acl
= f2fs_set_acl
,
804 #ifdef CONFIG_F2FS_FS_XATTR
805 .listxattr
= f2fs_listxattr
,
807 .fiemap
= f2fs_fiemap
,
810 static int fill_zero(struct inode
*inode
, pgoff_t index
,
811 loff_t start
, loff_t len
)
813 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
819 f2fs_balance_fs(sbi
, true);
822 page
= get_new_data_page(inode
, NULL
, index
, false);
826 return PTR_ERR(page
);
828 f2fs_wait_on_page_writeback(page
, DATA
, true);
829 zero_user(page
, start
, len
);
830 set_page_dirty(page
);
831 f2fs_put_page(page
, 1);
835 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
839 while (pg_start
< pg_end
) {
840 struct dnode_of_data dn
;
841 pgoff_t end_offset
, count
;
843 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
844 err
= get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
846 if (err
== -ENOENT
) {
853 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
854 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
856 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
858 truncate_data_blocks_range(&dn
, count
);
866 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
868 pgoff_t pg_start
, pg_end
;
869 loff_t off_start
, off_end
;
872 ret
= f2fs_convert_inline_inode(inode
);
876 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
877 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
879 off_start
= offset
& (PAGE_SIZE
- 1);
880 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
882 if (pg_start
== pg_end
) {
883 ret
= fill_zero(inode
, pg_start
, off_start
,
884 off_end
- off_start
);
889 ret
= fill_zero(inode
, pg_start
++, off_start
,
890 PAGE_SIZE
- off_start
);
895 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
900 if (pg_start
< pg_end
) {
901 struct address_space
*mapping
= inode
->i_mapping
;
902 loff_t blk_start
, blk_end
;
903 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
905 f2fs_balance_fs(sbi
, true);
907 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
908 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
909 down_write(&F2FS_I(inode
)->i_mmap_sem
);
910 truncate_inode_pages_range(mapping
, blk_start
,
914 ret
= truncate_hole(inode
, pg_start
, pg_end
);
916 up_write(&F2FS_I(inode
)->i_mmap_sem
);
923 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
924 int *do_replace
, pgoff_t off
, pgoff_t len
)
926 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
927 struct dnode_of_data dn
;
931 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
932 ret
= get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
933 if (ret
&& ret
!= -ENOENT
) {
935 } else if (ret
== -ENOENT
) {
936 if (dn
.max_level
== 0)
938 done
= min((pgoff_t
)ADDRS_PER_BLOCK
- dn
.ofs_in_node
, len
);
944 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
945 dn
.ofs_in_node
, len
);
946 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
947 *blkaddr
= datablock_addr(dn
.inode
,
948 dn
.node_page
, dn
.ofs_in_node
);
949 if (!is_checkpointed_data(sbi
, *blkaddr
)) {
951 if (test_opt(sbi
, LFS
)) {
956 /* do not invalidate this block address */
957 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
970 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
971 int *do_replace
, pgoff_t off
, int len
)
973 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
974 struct dnode_of_data dn
;
977 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
978 if (*do_replace
== 0)
981 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
982 ret
= get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
984 dec_valid_block_count(sbi
, inode
, 1);
985 invalidate_blocks(sbi
, *blkaddr
);
987 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
994 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
995 block_t
*blkaddr
, int *do_replace
,
996 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
998 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1003 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1008 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1009 struct dnode_of_data dn
;
1010 struct node_info ni
;
1014 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1015 ret
= get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1019 get_node_info(sbi
, dn
.nid
, &ni
);
1020 ilen
= min((pgoff_t
)
1021 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1022 dn
.ofs_in_node
, len
- i
);
1024 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1025 dn
.node_page
, dn
.ofs_in_node
);
1026 truncate_data_blocks_range(&dn
, 1);
1028 if (do_replace
[i
]) {
1029 f2fs_i_blocks_write(src_inode
,
1031 f2fs_i_blocks_write(dst_inode
,
1033 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1034 blkaddr
[i
], ni
.version
, true, false);
1040 new_size
= (dst
+ i
) << PAGE_SHIFT
;
1041 if (dst_inode
->i_size
< new_size
)
1042 f2fs_i_size_write(dst_inode
, new_size
);
1043 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1045 f2fs_put_dnode(&dn
);
1047 struct page
*psrc
, *pdst
;
1049 psrc
= get_lock_data_page(src_inode
, src
+ i
, true);
1051 return PTR_ERR(psrc
);
1052 pdst
= get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1055 f2fs_put_page(psrc
, 1);
1056 return PTR_ERR(pdst
);
1058 f2fs_copy_page(psrc
, pdst
);
1059 set_page_dirty(pdst
);
1060 f2fs_put_page(pdst
, 1);
1061 f2fs_put_page(psrc
, 1);
1063 ret
= truncate_hole(src_inode
, src
+ i
, src
+ i
+ 1);
1072 static int __exchange_data_block(struct inode
*src_inode
,
1073 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1074 pgoff_t len
, bool full
)
1076 block_t
*src_blkaddr
;
1082 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK
, len
);
1084 src_blkaddr
= kvzalloc(sizeof(block_t
) * olen
, GFP_KERNEL
);
1088 do_replace
= kvzalloc(sizeof(int) * olen
, GFP_KERNEL
);
1090 kvfree(src_blkaddr
);
1094 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1095 do_replace
, src
, olen
);
1099 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1100 do_replace
, src
, dst
, olen
, full
);
1108 kvfree(src_blkaddr
);
1114 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, len
);
1115 kvfree(src_blkaddr
);
1120 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
1122 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1123 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1126 f2fs_balance_fs(sbi
, true);
1129 f2fs_drop_extent_tree(inode
);
1131 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1132 f2fs_unlock_op(sbi
);
1136 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1138 pgoff_t pg_start
, pg_end
;
1142 if (offset
+ len
>= i_size_read(inode
))
1145 /* collapse range should be aligned to block size of f2fs. */
1146 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1149 ret
= f2fs_convert_inline_inode(inode
);
1153 pg_start
= offset
>> PAGE_SHIFT
;
1154 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1156 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1157 /* write out all dirty pages from offset */
1158 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1162 truncate_pagecache(inode
, offset
);
1164 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
1168 /* write out all moved pages, if possible */
1169 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1170 truncate_pagecache(inode
, offset
);
1172 new_size
= i_size_read(inode
) - len
;
1173 truncate_pagecache(inode
, new_size
);
1175 ret
= truncate_blocks(inode
, new_size
, true);
1177 f2fs_i_size_write(inode
, new_size
);
1180 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1184 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1187 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1188 pgoff_t index
= start
;
1189 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1193 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1194 if (datablock_addr(dn
->inode
, dn
->node_page
,
1195 dn
->ofs_in_node
) == NULL_ADDR
)
1199 dn
->ofs_in_node
= ofs_in_node
;
1200 ret
= reserve_new_blocks(dn
, count
);
1204 dn
->ofs_in_node
= ofs_in_node
;
1205 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1206 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1207 dn
->node_page
, dn
->ofs_in_node
);
1209 * reserve_new_blocks will not guarantee entire block
1212 if (dn
->data_blkaddr
== NULL_ADDR
) {
1216 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1217 invalidate_blocks(sbi
, dn
->data_blkaddr
);
1218 dn
->data_blkaddr
= NEW_ADDR
;
1219 set_data_blkaddr(dn
);
1223 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1228 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1231 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1232 struct address_space
*mapping
= inode
->i_mapping
;
1233 pgoff_t index
, pg_start
, pg_end
;
1234 loff_t new_size
= i_size_read(inode
);
1235 loff_t off_start
, off_end
;
1238 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1242 ret
= f2fs_convert_inline_inode(inode
);
1246 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1247 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1251 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1253 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1254 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1256 off_start
= offset
& (PAGE_SIZE
- 1);
1257 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1259 if (pg_start
== pg_end
) {
1260 ret
= fill_zero(inode
, pg_start
, off_start
,
1261 off_end
- off_start
);
1265 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1268 ret
= fill_zero(inode
, pg_start
++, off_start
,
1269 PAGE_SIZE
- off_start
);
1273 new_size
= max_t(loff_t
, new_size
,
1274 (loff_t
)pg_start
<< PAGE_SHIFT
);
1277 for (index
= pg_start
; index
< pg_end
;) {
1278 struct dnode_of_data dn
;
1279 unsigned int end_offset
;
1284 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1285 ret
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1287 f2fs_unlock_op(sbi
);
1291 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1292 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1294 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1295 f2fs_put_dnode(&dn
);
1296 f2fs_unlock_op(sbi
);
1298 f2fs_balance_fs(sbi
, dn
.node_changed
);
1304 new_size
= max_t(loff_t
, new_size
,
1305 (loff_t
)index
<< PAGE_SHIFT
);
1309 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1313 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1318 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1319 f2fs_i_size_write(inode
, new_size
);
1321 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1326 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1328 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1329 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1333 new_size
= i_size_read(inode
) + len
;
1334 ret
= inode_newsize_ok(inode
, new_size
);
1338 if (offset
>= i_size_read(inode
))
1341 /* insert range should be aligned to block size of f2fs. */
1342 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1345 ret
= f2fs_convert_inline_inode(inode
);
1349 f2fs_balance_fs(sbi
, true);
1351 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1352 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1356 /* write out all dirty pages from offset */
1357 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1361 truncate_pagecache(inode
, offset
);
1363 pg_start
= offset
>> PAGE_SHIFT
;
1364 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1365 delta
= pg_end
- pg_start
;
1366 idx
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1368 while (!ret
&& idx
> pg_start
) {
1369 nr
= idx
- pg_start
;
1375 f2fs_drop_extent_tree(inode
);
1377 ret
= __exchange_data_block(inode
, inode
, idx
,
1378 idx
+ delta
, nr
, false);
1379 f2fs_unlock_op(sbi
);
1382 /* write out all moved pages, if possible */
1383 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1384 truncate_pagecache(inode
, offset
);
1387 f2fs_i_size_write(inode
, new_size
);
1389 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1393 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1394 loff_t len
, int mode
)
1396 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1397 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
1399 loff_t new_size
= i_size_read(inode
);
1403 err
= inode_newsize_ok(inode
, (len
+ offset
));
1407 err
= f2fs_convert_inline_inode(inode
);
1411 f2fs_balance_fs(sbi
, true);
1413 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1414 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1416 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1417 map
.m_len
= pg_end
- map
.m_lblk
;
1421 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1428 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1430 /* update new size to the failed position */
1431 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1432 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1434 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1437 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1438 f2fs_i_size_write(inode
, new_size
);
1443 static long f2fs_fallocate(struct file
*file
, int mode
,
1444 loff_t offset
, loff_t len
)
1446 struct inode
*inode
= file_inode(file
);
1449 /* f2fs only support ->fallocate for regular file */
1450 if (!S_ISREG(inode
->i_mode
))
1453 if (f2fs_encrypted_inode(inode
) &&
1454 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1457 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1458 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1459 FALLOC_FL_INSERT_RANGE
))
1464 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1465 if (offset
>= inode
->i_size
)
1468 ret
= punch_hole(inode
, offset
, len
);
1469 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1470 ret
= f2fs_collapse_range(inode
, offset
, len
);
1471 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1472 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1473 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1474 ret
= f2fs_insert_range(inode
, offset
, len
);
1476 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1480 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1481 f2fs_mark_inode_dirty_sync(inode
, false);
1482 if (mode
& FALLOC_FL_KEEP_SIZE
)
1483 file_set_keep_isize(inode
);
1484 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1488 inode_unlock(inode
);
1490 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1494 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1497 * f2fs_relase_file is called at every close calls. So we should
1498 * not drop any inmemory pages by close called by other process.
1500 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1501 atomic_read(&inode
->i_writecount
) != 1)
1504 /* some remained atomic pages should discarded */
1505 if (f2fs_is_atomic_file(inode
))
1506 drop_inmem_pages(inode
);
1507 if (f2fs_is_volatile_file(inode
)) {
1508 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1509 stat_dec_volatile_write(inode
);
1510 set_inode_flag(inode
, FI_DROP_CACHE
);
1511 filemap_fdatawrite(inode
->i_mapping
);
1512 clear_inode_flag(inode
, FI_DROP_CACHE
);
1517 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1519 struct inode
*inode
= file_inode(file
);
1522 * If the process doing a transaction is crashed, we should do
1523 * roll-back. Otherwise, other reader/write can see corrupted database
1524 * until all the writers close its file. Since this should be done
1525 * before dropping file lock, it needs to do in ->flush.
1527 if (f2fs_is_atomic_file(inode
) &&
1528 F2FS_I(inode
)->inmem_task
== current
)
1529 drop_inmem_pages(inode
);
1533 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1535 struct inode
*inode
= file_inode(filp
);
1536 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1537 unsigned int flags
= fi
->i_flags
&
1538 (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
);
1539 return put_user(flags
, (int __user
*)arg
);
1542 static int __f2fs_ioc_setflags(struct inode
*inode
, unsigned int flags
)
1544 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1545 unsigned int oldflags
;
1547 /* Is it quota file? Do not allow user to mess with it */
1548 if (IS_NOQUOTA(inode
))
1551 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1553 oldflags
= fi
->i_flags
;
1555 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
))
1556 if (!capable(CAP_LINUX_IMMUTABLE
))
1559 flags
= flags
& (FS_FL_USER_MODIFIABLE
| FS_PROJINHERIT_FL
);
1560 flags
|= oldflags
& ~(FS_FL_USER_MODIFIABLE
| FS_PROJINHERIT_FL
);
1561 fi
->i_flags
= flags
;
1563 if (fi
->i_flags
& FS_PROJINHERIT_FL
)
1564 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1566 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1568 inode
->i_ctime
= current_time(inode
);
1569 f2fs_set_inode_flags(inode
);
1570 f2fs_mark_inode_dirty_sync(inode
, false);
1574 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1576 struct inode
*inode
= file_inode(filp
);
1580 if (!inode_owner_or_capable(inode
))
1583 if (get_user(flags
, (int __user
*)arg
))
1586 ret
= mnt_want_write_file(filp
);
1592 ret
= __f2fs_ioc_setflags(inode
, flags
);
1594 inode_unlock(inode
);
1595 mnt_drop_write_file(filp
);
1599 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1601 struct inode
*inode
= file_inode(filp
);
1603 return put_user(inode
->i_generation
, (int __user
*)arg
);
1606 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1608 struct inode
*inode
= file_inode(filp
);
1611 if (!inode_owner_or_capable(inode
))
1614 if (!S_ISREG(inode
->i_mode
))
1617 ret
= mnt_want_write_file(filp
);
1623 if (f2fs_is_atomic_file(inode
))
1626 ret
= f2fs_convert_inline_inode(inode
);
1630 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1631 set_inode_flag(inode
, FI_HOT_DATA
);
1632 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1634 if (!get_dirty_pages(inode
))
1637 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1638 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1639 inode
->i_ino
, get_dirty_pages(inode
));
1640 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1642 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1643 clear_inode_flag(inode
, FI_HOT_DATA
);
1648 F2FS_I(inode
)->inmem_task
= current
;
1649 stat_inc_atomic_write(inode
);
1650 stat_update_max_atomic_write(inode
);
1652 inode_unlock(inode
);
1653 mnt_drop_write_file(filp
);
1657 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1659 struct inode
*inode
= file_inode(filp
);
1662 if (!inode_owner_or_capable(inode
))
1665 ret
= mnt_want_write_file(filp
);
1671 if (f2fs_is_volatile_file(inode
))
1674 if (f2fs_is_atomic_file(inode
)) {
1675 ret
= commit_inmem_pages(inode
);
1679 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1681 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1682 clear_inode_flag(inode
, FI_HOT_DATA
);
1683 stat_dec_atomic_write(inode
);
1686 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
1689 inode_unlock(inode
);
1690 mnt_drop_write_file(filp
);
1694 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1696 struct inode
*inode
= file_inode(filp
);
1699 if (!inode_owner_or_capable(inode
))
1702 if (!S_ISREG(inode
->i_mode
))
1705 ret
= mnt_want_write_file(filp
);
1711 if (f2fs_is_volatile_file(inode
))
1714 ret
= f2fs_convert_inline_inode(inode
);
1718 stat_inc_volatile_write(inode
);
1719 stat_update_max_volatile_write(inode
);
1721 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1722 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1724 inode_unlock(inode
);
1725 mnt_drop_write_file(filp
);
1729 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1731 struct inode
*inode
= file_inode(filp
);
1734 if (!inode_owner_or_capable(inode
))
1737 ret
= mnt_want_write_file(filp
);
1743 if (!f2fs_is_volatile_file(inode
))
1746 if (!f2fs_is_first_block_written(inode
)) {
1747 ret
= truncate_partial_data_page(inode
, 0, true);
1751 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1753 inode_unlock(inode
);
1754 mnt_drop_write_file(filp
);
1758 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1760 struct inode
*inode
= file_inode(filp
);
1763 if (!inode_owner_or_capable(inode
))
1766 ret
= mnt_want_write_file(filp
);
1772 if (f2fs_is_atomic_file(inode
))
1773 drop_inmem_pages(inode
);
1774 if (f2fs_is_volatile_file(inode
)) {
1775 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1776 stat_dec_volatile_write(inode
);
1777 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1780 inode_unlock(inode
);
1782 mnt_drop_write_file(filp
);
1783 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1787 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1789 struct inode
*inode
= file_inode(filp
);
1790 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1791 struct super_block
*sb
= sbi
->sb
;
1795 if (!capable(CAP_SYS_ADMIN
))
1798 if (get_user(in
, (__u32 __user
*)arg
))
1801 ret
= mnt_want_write_file(filp
);
1806 case F2FS_GOING_DOWN_FULLSYNC
:
1807 sb
= freeze_bdev(sb
->s_bdev
);
1808 if (sb
&& !IS_ERR(sb
)) {
1809 f2fs_stop_checkpoint(sbi
, false);
1810 thaw_bdev(sb
->s_bdev
, sb
);
1813 case F2FS_GOING_DOWN_METASYNC
:
1814 /* do checkpoint only */
1815 f2fs_sync_fs(sb
, 1);
1816 f2fs_stop_checkpoint(sbi
, false);
1818 case F2FS_GOING_DOWN_NOSYNC
:
1819 f2fs_stop_checkpoint(sbi
, false);
1821 case F2FS_GOING_DOWN_METAFLUSH
:
1822 sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
1823 f2fs_stop_checkpoint(sbi
, false);
1829 f2fs_update_time(sbi
, REQ_TIME
);
1831 mnt_drop_write_file(filp
);
1835 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1837 struct inode
*inode
= file_inode(filp
);
1838 struct super_block
*sb
= inode
->i_sb
;
1839 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1840 struct fstrim_range range
;
1843 if (!capable(CAP_SYS_ADMIN
))
1846 if (!blk_queue_discard(q
))
1849 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1853 ret
= mnt_want_write_file(filp
);
1857 range
.minlen
= max((unsigned int)range
.minlen
,
1858 q
->limits
.discard_granularity
);
1859 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1860 mnt_drop_write_file(filp
);
1864 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1867 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1871 static bool uuid_is_nonzero(__u8 u
[16])
1875 for (i
= 0; i
< 16; i
++)
1881 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1883 struct inode
*inode
= file_inode(filp
);
1885 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1887 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
1890 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1892 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
1895 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1897 struct inode
*inode
= file_inode(filp
);
1898 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1901 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1904 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
1907 err
= mnt_want_write_file(filp
);
1911 /* update superblock with uuid */
1912 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
1914 err
= f2fs_commit_super(sbi
, false);
1917 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
1918 mnt_drop_write_file(filp
);
1921 mnt_drop_write_file(filp
);
1923 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
1929 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
1931 struct inode
*inode
= file_inode(filp
);
1932 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1936 if (!capable(CAP_SYS_ADMIN
))
1939 if (get_user(sync
, (__u32 __user
*)arg
))
1942 if (f2fs_readonly(sbi
->sb
))
1945 ret
= mnt_want_write_file(filp
);
1950 if (!mutex_trylock(&sbi
->gc_mutex
)) {
1955 mutex_lock(&sbi
->gc_mutex
);
1958 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
1960 mnt_drop_write_file(filp
);
1964 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
1966 struct inode
*inode
= file_inode(filp
);
1967 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1968 struct f2fs_gc_range range
;
1972 if (!capable(CAP_SYS_ADMIN
))
1975 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
1979 if (f2fs_readonly(sbi
->sb
))
1982 ret
= mnt_want_write_file(filp
);
1986 end
= range
.start
+ range
.len
;
1987 if (range
.start
< MAIN_BLKADDR(sbi
) || end
>= MAX_BLKADDR(sbi
))
1991 if (!mutex_trylock(&sbi
->gc_mutex
)) {
1996 mutex_lock(&sbi
->gc_mutex
);
1999 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2000 range
.start
+= sbi
->blocks_per_seg
;
2001 if (range
.start
<= end
)
2004 mnt_drop_write_file(filp
);
2008 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2010 struct inode
*inode
= file_inode(filp
);
2011 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2014 if (!capable(CAP_SYS_ADMIN
))
2017 if (f2fs_readonly(sbi
->sb
))
2020 ret
= mnt_want_write_file(filp
);
2024 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2026 mnt_drop_write_file(filp
);
2030 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2032 struct f2fs_defragment
*range
)
2034 struct inode
*inode
= file_inode(filp
);
2035 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
2036 struct extent_info ei
= {0,0,0};
2037 pgoff_t pg_start
, pg_end
;
2038 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2039 unsigned int total
= 0, sec_num
;
2040 block_t blk_end
= 0;
2041 bool fragmented
= false;
2044 /* if in-place-update policy is enabled, don't waste time here */
2045 if (need_inplace_update_policy(inode
, NULL
))
2048 pg_start
= range
->start
>> PAGE_SHIFT
;
2049 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2051 f2fs_balance_fs(sbi
, true);
2055 /* writeback all dirty pages in the range */
2056 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2057 range
->start
+ range
->len
- 1);
2062 * lookup mapping info in extent cache, skip defragmenting if physical
2063 * block addresses are continuous.
2065 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2066 if (ei
.fofs
+ ei
.len
>= pg_end
)
2070 map
.m_lblk
= pg_start
;
2073 * lookup mapping info in dnode page cache, skip defragmenting if all
2074 * physical block addresses are continuous even if there are hole(s)
2075 * in logical blocks.
2077 while (map
.m_lblk
< pg_end
) {
2078 map
.m_len
= pg_end
- map
.m_lblk
;
2079 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2083 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2088 if (blk_end
&& blk_end
!= map
.m_pblk
) {
2092 blk_end
= map
.m_pblk
+ map
.m_len
;
2094 map
.m_lblk
+= map
.m_len
;
2100 map
.m_lblk
= pg_start
;
2101 map
.m_len
= pg_end
- pg_start
;
2103 sec_num
= (map
.m_len
+ BLKS_PER_SEC(sbi
) - 1) / BLKS_PER_SEC(sbi
);
2106 * make sure there are enough free section for LFS allocation, this can
2107 * avoid defragment running in SSR mode when free section are allocated
2110 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2115 while (map
.m_lblk
< pg_end
) {
2120 map
.m_len
= pg_end
- map
.m_lblk
;
2121 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2125 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2130 set_inode_flag(inode
, FI_DO_DEFRAG
);
2133 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2136 page
= get_lock_data_page(inode
, idx
, true);
2138 err
= PTR_ERR(page
);
2142 set_page_dirty(page
);
2143 f2fs_put_page(page
, 1);
2152 if (idx
< pg_end
&& cnt
< blk_per_seg
)
2155 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2157 err
= filemap_fdatawrite(inode
->i_mapping
);
2162 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2164 inode_unlock(inode
);
2166 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2170 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2172 struct inode
*inode
= file_inode(filp
);
2173 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2174 struct f2fs_defragment range
;
2177 if (!capable(CAP_SYS_ADMIN
))
2180 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2183 if (f2fs_readonly(sbi
->sb
))
2186 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2190 /* verify alignment of offset & size */
2191 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2194 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2195 sbi
->max_file_blocks
))
2198 err
= mnt_want_write_file(filp
);
2202 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2203 mnt_drop_write_file(filp
);
2205 f2fs_update_time(sbi
, REQ_TIME
);
2209 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2216 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2217 struct file
*file_out
, loff_t pos_out
, size_t len
)
2219 struct inode
*src
= file_inode(file_in
);
2220 struct inode
*dst
= file_inode(file_out
);
2221 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2222 size_t olen
= len
, dst_max_i_size
= 0;
2226 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2227 src
->i_sb
!= dst
->i_sb
)
2230 if (unlikely(f2fs_readonly(src
->i_sb
)))
2233 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2236 if (f2fs_encrypted_inode(src
) || f2fs_encrypted_inode(dst
))
2240 if (pos_in
== pos_out
)
2242 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2248 if (!inode_trylock(dst
)) {
2255 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2258 olen
= len
= src
->i_size
- pos_in
;
2259 if (pos_in
+ len
== src
->i_size
)
2260 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2266 dst_osize
= dst
->i_size
;
2267 if (pos_out
+ olen
> dst
->i_size
)
2268 dst_max_i_size
= pos_out
+ olen
;
2270 /* verify the end result is block aligned */
2271 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2272 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2273 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2276 ret
= f2fs_convert_inline_inode(src
);
2280 ret
= f2fs_convert_inline_inode(dst
);
2284 /* write out all dirty pages from offset */
2285 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2286 pos_in
, pos_in
+ len
);
2290 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2291 pos_out
, pos_out
+ len
);
2295 f2fs_balance_fs(sbi
, true);
2297 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2298 pos_out
>> F2FS_BLKSIZE_BITS
,
2299 len
>> F2FS_BLKSIZE_BITS
, false);
2303 f2fs_i_size_write(dst
, dst_max_i_size
);
2304 else if (dst_osize
!= dst
->i_size
)
2305 f2fs_i_size_write(dst
, dst_osize
);
2307 f2fs_unlock_op(sbi
);
2316 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2318 struct f2fs_move_range range
;
2322 if (!(filp
->f_mode
& FMODE_READ
) ||
2323 !(filp
->f_mode
& FMODE_WRITE
))
2326 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2330 dst
= fdget(range
.dst_fd
);
2334 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2339 err
= mnt_want_write_file(filp
);
2343 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2344 range
.pos_out
, range
.len
);
2346 mnt_drop_write_file(filp
);
2350 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2351 &range
, sizeof(range
)))
2358 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2360 struct inode
*inode
= file_inode(filp
);
2361 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2362 struct sit_info
*sm
= SIT_I(sbi
);
2363 unsigned int start_segno
= 0, end_segno
= 0;
2364 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2365 struct f2fs_flush_device range
;
2368 if (!capable(CAP_SYS_ADMIN
))
2371 if (f2fs_readonly(sbi
->sb
))
2374 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2378 if (sbi
->s_ndevs
<= 1 || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2379 sbi
->segs_per_sec
!= 1) {
2380 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2381 "Can't flush %u in %d for segs_per_sec %u != 1\n",
2382 range
.dev_num
, sbi
->s_ndevs
,
2387 ret
= mnt_want_write_file(filp
);
2391 if (range
.dev_num
!= 0)
2392 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2393 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2395 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2396 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2397 start_segno
= dev_start_segno
;
2398 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2400 while (start_segno
< end_segno
) {
2401 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2405 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2406 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2407 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2408 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2416 mnt_drop_write_file(filp
);
2420 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2422 struct inode
*inode
= file_inode(filp
);
2423 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2425 /* Must validate to set it with SQLite behavior in Android. */
2426 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2428 return put_user(sb_feature
, (u32 __user
*)arg
);
2432 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2434 struct inode
*inode
= file_inode(filp
);
2435 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2436 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2437 struct super_block
*sb
= sbi
->sb
;
2438 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2443 if (!f2fs_sb_has_project_quota(sb
)) {
2444 if (projid
!= F2FS_DEF_PROJID
)
2450 if (!f2fs_has_extra_attr(inode
))
2453 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2455 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
2458 err
= mnt_want_write_file(filp
);
2465 /* Is it quota file? Do not allow user to mess with it */
2466 if (IS_NOQUOTA(inode
))
2469 ipage
= get_node_page(sbi
, inode
->i_ino
);
2470 if (IS_ERR(ipage
)) {
2471 err
= PTR_ERR(ipage
);
2475 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
2478 f2fs_put_page(ipage
, 1);
2481 f2fs_put_page(ipage
, 1);
2483 dquot_initialize(inode
);
2485 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2486 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2487 err
= __dquot_transfer(inode
, transfer_to
);
2488 dqput(transfer_to
[PRJQUOTA
]);
2493 F2FS_I(inode
)->i_projid
= kprojid
;
2494 inode
->i_ctime
= current_time(inode
);
2496 f2fs_mark_inode_dirty_sync(inode
, true);
2498 inode_unlock(inode
);
2499 mnt_drop_write_file(filp
);
2503 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2505 if (projid
!= F2FS_DEF_PROJID
)
2511 /* Transfer internal flags to xflags */
2512 static inline __u32
f2fs_iflags_to_xflags(unsigned long iflags
)
2516 if (iflags
& FS_SYNC_FL
)
2517 xflags
|= FS_XFLAG_SYNC
;
2518 if (iflags
& FS_IMMUTABLE_FL
)
2519 xflags
|= FS_XFLAG_IMMUTABLE
;
2520 if (iflags
& FS_APPEND_FL
)
2521 xflags
|= FS_XFLAG_APPEND
;
2522 if (iflags
& FS_NODUMP_FL
)
2523 xflags
|= FS_XFLAG_NODUMP
;
2524 if (iflags
& FS_NOATIME_FL
)
2525 xflags
|= FS_XFLAG_NOATIME
;
2526 if (iflags
& FS_PROJINHERIT_FL
)
2527 xflags
|= FS_XFLAG_PROJINHERIT
;
2531 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2532 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2533 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2535 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
2536 #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \
2543 /* Transfer xflags flags to internal */
2544 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags
)
2546 unsigned long iflags
= 0;
2548 if (xflags
& FS_XFLAG_SYNC
)
2549 iflags
|= FS_SYNC_FL
;
2550 if (xflags
& FS_XFLAG_IMMUTABLE
)
2551 iflags
|= FS_IMMUTABLE_FL
;
2552 if (xflags
& FS_XFLAG_APPEND
)
2553 iflags
|= FS_APPEND_FL
;
2554 if (xflags
& FS_XFLAG_NODUMP
)
2555 iflags
|= FS_NODUMP_FL
;
2556 if (xflags
& FS_XFLAG_NOATIME
)
2557 iflags
|= FS_NOATIME_FL
;
2558 if (xflags
& FS_XFLAG_PROJINHERIT
)
2559 iflags
|= FS_PROJINHERIT_FL
;
2564 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
2566 struct inode
*inode
= file_inode(filp
);
2567 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2570 memset(&fa
, 0, sizeof(struct fsxattr
));
2571 fa
.fsx_xflags
= f2fs_iflags_to_xflags(fi
->i_flags
&
2572 (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
));
2574 if (f2fs_sb_has_project_quota(inode
->i_sb
))
2575 fa
.fsx_projid
= (__u32
)from_kprojid(&init_user_ns
,
2578 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
2583 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
2585 struct inode
*inode
= file_inode(filp
);
2586 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2591 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
2594 /* Make sure caller has proper permission */
2595 if (!inode_owner_or_capable(inode
))
2598 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_FS_XFLAGS
)
2601 flags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
2602 if (f2fs_mask_flags(inode
->i_mode
, flags
) != flags
)
2605 err
= mnt_want_write_file(filp
);
2610 flags
= (fi
->i_flags
& ~F2FS_FL_XFLAG_VISIBLE
) |
2611 (flags
& F2FS_FL_XFLAG_VISIBLE
);
2612 err
= __f2fs_ioc_setflags(inode
, flags
);
2613 inode_unlock(inode
);
2614 mnt_drop_write_file(filp
);
2618 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
2625 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2628 case F2FS_IOC_GETFLAGS
:
2629 return f2fs_ioc_getflags(filp
, arg
);
2630 case F2FS_IOC_SETFLAGS
:
2631 return f2fs_ioc_setflags(filp
, arg
);
2632 case F2FS_IOC_GETVERSION
:
2633 return f2fs_ioc_getversion(filp
, arg
);
2634 case F2FS_IOC_START_ATOMIC_WRITE
:
2635 return f2fs_ioc_start_atomic_write(filp
);
2636 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2637 return f2fs_ioc_commit_atomic_write(filp
);
2638 case F2FS_IOC_START_VOLATILE_WRITE
:
2639 return f2fs_ioc_start_volatile_write(filp
);
2640 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2641 return f2fs_ioc_release_volatile_write(filp
);
2642 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2643 return f2fs_ioc_abort_volatile_write(filp
);
2644 case F2FS_IOC_SHUTDOWN
:
2645 return f2fs_ioc_shutdown(filp
, arg
);
2647 return f2fs_ioc_fitrim(filp
, arg
);
2648 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2649 return f2fs_ioc_set_encryption_policy(filp
, arg
);
2650 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2651 return f2fs_ioc_get_encryption_policy(filp
, arg
);
2652 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2653 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
2654 case F2FS_IOC_GARBAGE_COLLECT
:
2655 return f2fs_ioc_gc(filp
, arg
);
2656 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2657 return f2fs_ioc_gc_range(filp
, arg
);
2658 case F2FS_IOC_WRITE_CHECKPOINT
:
2659 return f2fs_ioc_write_checkpoint(filp
, arg
);
2660 case F2FS_IOC_DEFRAGMENT
:
2661 return f2fs_ioc_defragment(filp
, arg
);
2662 case F2FS_IOC_MOVE_RANGE
:
2663 return f2fs_ioc_move_range(filp
, arg
);
2664 case F2FS_IOC_FLUSH_DEVICE
:
2665 return f2fs_ioc_flush_device(filp
, arg
);
2666 case F2FS_IOC_GET_FEATURES
:
2667 return f2fs_ioc_get_features(filp
, arg
);
2668 case F2FS_IOC_FSGETXATTR
:
2669 return f2fs_ioc_fsgetxattr(filp
, arg
);
2670 case F2FS_IOC_FSSETXATTR
:
2671 return f2fs_ioc_fssetxattr(filp
, arg
);
2677 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2679 struct file
*file
= iocb
->ki_filp
;
2680 struct inode
*inode
= file_inode(file
);
2681 struct blk_plug plug
;
2685 ret
= generic_write_checks(iocb
, from
);
2689 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
2690 set_inode_flag(inode
, FI_NO_PREALLOC
);
2692 err
= f2fs_preallocate_blocks(iocb
, from
);
2694 inode_unlock(inode
);
2697 blk_start_plug(&plug
);
2698 ret
= __generic_file_write_iter(iocb
, from
);
2699 blk_finish_plug(&plug
);
2700 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2703 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
2705 inode_unlock(inode
);
2708 ret
= generic_write_sync(iocb
, ret
);
2712 #ifdef CONFIG_COMPAT
2713 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2716 case F2FS_IOC32_GETFLAGS
:
2717 cmd
= F2FS_IOC_GETFLAGS
;
2719 case F2FS_IOC32_SETFLAGS
:
2720 cmd
= F2FS_IOC_SETFLAGS
;
2722 case F2FS_IOC32_GETVERSION
:
2723 cmd
= F2FS_IOC_GETVERSION
;
2725 case F2FS_IOC_START_ATOMIC_WRITE
:
2726 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2727 case F2FS_IOC_START_VOLATILE_WRITE
:
2728 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2729 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2730 case F2FS_IOC_SHUTDOWN
:
2731 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2732 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2733 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2734 case F2FS_IOC_GARBAGE_COLLECT
:
2735 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2736 case F2FS_IOC_WRITE_CHECKPOINT
:
2737 case F2FS_IOC_DEFRAGMENT
:
2738 case F2FS_IOC_MOVE_RANGE
:
2739 case F2FS_IOC_FLUSH_DEVICE
:
2740 case F2FS_IOC_GET_FEATURES
:
2741 case F2FS_IOC_FSGETXATTR
:
2742 case F2FS_IOC_FSSETXATTR
:
2745 return -ENOIOCTLCMD
;
2747 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
2751 const struct file_operations f2fs_file_operations
= {
2752 .llseek
= f2fs_llseek
,
2753 .read_iter
= generic_file_read_iter
,
2754 .write_iter
= f2fs_file_write_iter
,
2755 .open
= f2fs_file_open
,
2756 .release
= f2fs_release_file
,
2757 .mmap
= f2fs_file_mmap
,
2758 .flush
= f2fs_file_flush
,
2759 .fsync
= f2fs_sync_file
,
2760 .fallocate
= f2fs_fallocate
,
2761 .unlocked_ioctl
= f2fs_ioctl
,
2762 #ifdef CONFIG_COMPAT
2763 .compat_ioctl
= f2fs_compat_ioctl
,
2765 .splice_read
= generic_file_splice_read
,
2766 .splice_write
= iter_file_splice_write
,