4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uio.h>
24 #include <linux/uuid.h>
25 #include <linux/file.h>
34 #include <trace/events/f2fs.h>
36 static int f2fs_filemap_fault(struct vm_fault
*vmf
)
38 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
41 down_read(&F2FS_I(inode
)->i_mmap_sem
);
42 err
= filemap_fault(vmf
);
43 up_read(&F2FS_I(inode
)->i_mmap_sem
);
48 static int f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
56 if (unlikely(f2fs_cp_error(sbi
))) {
61 sb_start_pagefault(inode
->i_sb
);
63 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
65 /* block allocation */
67 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
68 err
= f2fs_reserve_block(&dn
, page
->index
);
76 f2fs_balance_fs(sbi
, dn
.node_changed
);
78 file_update_time(vmf
->vma
->vm_file
);
79 down_read(&F2FS_I(inode
)->i_mmap_sem
);
81 if (unlikely(page
->mapping
!= inode
->i_mapping
||
82 page_offset(page
) > i_size_read(inode
) ||
83 !PageUptodate(page
))) {
90 * check to see if the page is mapped already (no holes)
92 if (PageMappedToDisk(page
))
95 /* page is wholly or partially inside EOF */
96 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
99 offset
= i_size_read(inode
) & ~PAGE_MASK
;
100 zero_user_segment(page
, offset
, PAGE_SIZE
);
102 set_page_dirty(page
);
103 if (!PageUptodate(page
))
104 SetPageUptodate(page
);
106 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
108 trace_f2fs_vm_page_mkwrite(page
, DATA
);
111 f2fs_wait_on_page_writeback(page
, DATA
, false);
113 /* wait for GCed encrypted page writeback */
114 if (f2fs_encrypted_file(inode
))
115 f2fs_wait_on_block_writeback(sbi
, dn
.data_blkaddr
);
118 up_read(&F2FS_I(inode
)->i_mmap_sem
);
120 sb_end_pagefault(inode
->i_sb
);
121 f2fs_update_time(sbi
, REQ_TIME
);
123 return block_page_mkwrite_return(err
);
126 static const struct vm_operations_struct f2fs_file_vm_ops
= {
127 .fault
= f2fs_filemap_fault
,
128 .map_pages
= filemap_map_pages
,
129 .page_mkwrite
= f2fs_vm_page_mkwrite
,
132 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
134 struct dentry
*dentry
;
136 inode
= igrab(inode
);
137 dentry
= d_find_any_alias(inode
);
142 *pino
= parent_ino(dentry
);
147 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
149 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
150 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
152 if (!S_ISREG(inode
->i_mode
))
153 cp_reason
= CP_NON_REGULAR
;
154 else if (inode
->i_nlink
!= 1)
155 cp_reason
= CP_HARDLINK
;
156 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
157 cp_reason
= CP_SB_NEED_CP
;
158 else if (file_wrong_pino(inode
))
159 cp_reason
= CP_WRONG_PINO
;
160 else if (!space_for_roll_forward(sbi
))
161 cp_reason
= CP_NO_SPC_ROLL
;
162 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
163 cp_reason
= CP_NODE_NEED_CP
;
164 else if (test_opt(sbi
, FASTBOOT
))
165 cp_reason
= CP_FASTBOOT_MODE
;
166 else if (sbi
->active_logs
== 2)
167 cp_reason
= CP_SPEC_LOG_NUM
;
168 else if (need_dentry_mark(sbi
, inode
->i_ino
) &&
169 exist_written_data(sbi
, F2FS_I(inode
)->i_pino
, TRANS_DIR_INO
))
170 cp_reason
= CP_RECOVER_DIR
;
175 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
177 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
179 /* But we need to avoid that there are some inode updates */
180 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
186 static void try_to_fix_pino(struct inode
*inode
)
188 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
191 down_write(&fi
->i_sem
);
192 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
193 get_parent_ino(inode
, &pino
)) {
194 f2fs_i_pino_write(inode
, pino
);
195 file_got_pino(inode
);
197 up_write(&fi
->i_sem
);
200 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
201 int datasync
, bool atomic
)
203 struct inode
*inode
= file
->f_mapping
->host
;
204 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
205 nid_t ino
= inode
->i_ino
;
207 enum cp_reason_type cp_reason
= 0;
208 struct writeback_control wbc
= {
209 .sync_mode
= WB_SYNC_ALL
,
210 .nr_to_write
= LONG_MAX
,
214 if (unlikely(f2fs_readonly(inode
->i_sb
)))
217 trace_f2fs_sync_file_enter(inode
);
219 /* if fdatasync is triggered, let's do in-place-update */
220 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
221 set_inode_flag(inode
, FI_NEED_IPU
);
222 ret
= file_write_and_wait_range(file
, start
, end
);
223 clear_inode_flag(inode
, FI_NEED_IPU
);
226 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
230 /* if the inode is dirty, let's recover all the time */
231 if (!f2fs_skip_inode_update(inode
, datasync
)) {
232 f2fs_write_inode(inode
, NULL
);
237 * if there is no written data, don't waste time to write recovery info.
239 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
240 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
242 /* it may call write_inode just prior to fsync */
243 if (need_inode_page_update(sbi
, ino
))
246 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
247 exist_written_data(sbi
, ino
, UPDATE_INO
))
253 * Both of fdatasync() and fsync() are able to be recovered from
256 down_read(&F2FS_I(inode
)->i_sem
);
257 cp_reason
= need_do_checkpoint(inode
);
258 up_read(&F2FS_I(inode
)->i_sem
);
261 /* all the dirty node pages should be flushed for POR */
262 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
265 * We've secured consistency through sync_fs. Following pino
266 * will be used only for fsynced inodes after checkpoint.
268 try_to_fix_pino(inode
);
269 clear_inode_flag(inode
, FI_APPEND_WRITE
);
270 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
274 ret
= fsync_node_pages(sbi
, inode
, &wbc
, atomic
);
278 /* if cp_error was enabled, we should avoid infinite loop */
279 if (unlikely(f2fs_cp_error(sbi
))) {
284 if (need_inode_block_update(sbi
, ino
)) {
285 f2fs_mark_inode_dirty_sync(inode
, true);
286 f2fs_write_inode(inode
, NULL
);
291 * If it's atomic_write, it's just fine to keep write ordering. So
292 * here we don't need to wait for node write completion, since we use
293 * node chain which serializes node blocks. If one of node writes are
294 * reordered, we can see simply broken chain, resulting in stopping
295 * roll-forward recovery. It means we'll recover all or none node blocks
299 ret
= wait_on_node_pages_writeback(sbi
, ino
);
304 /* once recovery info is written, don't need to tack this */
305 remove_ino_entry(sbi
, ino
, APPEND_INO
);
306 clear_inode_flag(inode
, FI_APPEND_WRITE
);
309 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
311 remove_ino_entry(sbi
, ino
, UPDATE_INO
);
312 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
313 remove_ino_entry(sbi
, ino
, FLUSH_INO
);
315 f2fs_update_time(sbi
, REQ_TIME
);
317 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
318 f2fs_trace_ios(NULL
, 1);
322 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
324 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
326 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
329 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
330 pgoff_t pgofs
, int whence
)
335 if (whence
!= SEEK_DATA
)
338 /* find first dirty page index */
339 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
348 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
353 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
354 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
358 if (blkaddr
== NULL_ADDR
)
365 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
367 struct inode
*inode
= file
->f_mapping
->host
;
368 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
369 struct dnode_of_data dn
;
370 pgoff_t pgofs
, end_offset
, dirty
;
371 loff_t data_ofs
= offset
;
377 isize
= i_size_read(inode
);
381 /* handle inline data case */
382 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
383 if (whence
== SEEK_HOLE
)
388 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
390 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
392 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
393 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
394 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
395 if (err
&& err
!= -ENOENT
) {
397 } else if (err
== -ENOENT
) {
398 /* direct node does not exists */
399 if (whence
== SEEK_DATA
) {
400 pgofs
= get_next_page_offset(&dn
, pgofs
);
407 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
409 /* find data/hole in dnode block */
410 for (; dn
.ofs_in_node
< end_offset
;
411 dn
.ofs_in_node
++, pgofs
++,
412 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
414 blkaddr
= datablock_addr(dn
.inode
,
415 dn
.node_page
, dn
.ofs_in_node
);
417 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
425 if (whence
== SEEK_DATA
)
428 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
431 return vfs_setpos(file
, data_ofs
, maxbytes
);
437 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
439 struct inode
*inode
= file
->f_mapping
->host
;
440 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
446 return generic_file_llseek_size(file
, offset
, whence
,
447 maxbytes
, i_size_read(inode
));
452 return f2fs_seek_block(file
, offset
, whence
);
458 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
460 struct inode
*inode
= file_inode(file
);
463 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
466 /* we don't need to use inline_data strictly */
467 err
= f2fs_convert_inline_inode(inode
);
472 vma
->vm_ops
= &f2fs_file_vm_ops
;
476 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
478 int err
= fscrypt_file_open(inode
, filp
);
482 return dquot_file_open(inode
, filp
);
485 void truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
487 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
488 struct f2fs_node
*raw_node
;
489 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
493 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
494 base
= get_extra_isize(dn
->inode
);
496 raw_node
= F2FS_NODE(dn
->node_page
);
497 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
499 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
500 block_t blkaddr
= le32_to_cpu(*addr
);
501 if (blkaddr
== NULL_ADDR
)
504 dn
->data_blkaddr
= NULL_ADDR
;
505 set_data_blkaddr(dn
);
506 invalidate_blocks(sbi
, blkaddr
);
507 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
508 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
515 * once we invalidate valid blkaddr in range [ofs, ofs + count],
516 * we will invalidate all blkaddr in the whole range.
518 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
520 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
521 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
523 dn
->ofs_in_node
= ofs
;
525 f2fs_update_time(sbi
, REQ_TIME
);
526 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
527 dn
->ofs_in_node
, nr_free
);
530 void truncate_data_blocks(struct dnode_of_data
*dn
)
532 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
535 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
538 unsigned offset
= from
& (PAGE_SIZE
- 1);
539 pgoff_t index
= from
>> PAGE_SHIFT
;
540 struct address_space
*mapping
= inode
->i_mapping
;
543 if (!offset
&& !cache_only
)
547 page
= find_lock_page(mapping
, index
);
548 if (page
&& PageUptodate(page
))
550 f2fs_put_page(page
, 1);
554 page
= get_lock_data_page(inode
, index
, true);
556 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
558 f2fs_wait_on_page_writeback(page
, DATA
, true);
559 zero_user(page
, offset
, PAGE_SIZE
- offset
);
561 /* An encrypted inode should have a key and truncate the last page. */
562 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& f2fs_encrypted_inode(inode
));
564 set_page_dirty(page
);
565 f2fs_put_page(page
, 1);
569 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
571 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
572 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
573 struct dnode_of_data dn
;
575 int count
= 0, err
= 0;
577 bool truncate_page
= false;
579 trace_f2fs_truncate_blocks_enter(inode
, from
);
581 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
583 if (free_from
>= sbi
->max_file_blocks
)
589 ipage
= get_node_page(sbi
, inode
->i_ino
);
591 err
= PTR_ERR(ipage
);
595 if (f2fs_has_inline_data(inode
)) {
596 truncate_inline_inode(inode
, ipage
, from
);
597 f2fs_put_page(ipage
, 1);
598 truncate_page
= true;
602 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
603 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
610 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
612 count
-= dn
.ofs_in_node
;
613 f2fs_bug_on(sbi
, count
< 0);
615 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
616 truncate_data_blocks_range(&dn
, count
);
622 err
= truncate_inode_blocks(inode
, free_from
);
627 /* lastly zero out the first data page */
629 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
631 trace_f2fs_truncate_blocks_exit(inode
, err
);
635 int f2fs_truncate(struct inode
*inode
)
639 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
642 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
643 S_ISLNK(inode
->i_mode
)))
646 trace_f2fs_truncate(inode
);
648 #ifdef CONFIG_F2FS_FAULT_INJECTION
649 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
650 f2fs_show_injection_info(FAULT_TRUNCATE
);
654 /* we should check inline_data size */
655 if (!f2fs_may_inline_data(inode
)) {
656 err
= f2fs_convert_inline_inode(inode
);
661 err
= truncate_blocks(inode
, i_size_read(inode
), true);
665 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
666 f2fs_mark_inode_dirty_sync(inode
, false);
670 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
671 u32 request_mask
, unsigned int query_flags
)
673 struct inode
*inode
= d_inode(path
->dentry
);
674 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
675 struct f2fs_inode
*ri
;
678 if (f2fs_has_extra_attr(inode
) &&
679 f2fs_sb_has_inode_crtime(inode
->i_sb
) &&
680 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
681 stat
->result_mask
|= STATX_BTIME
;
682 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
683 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
686 flags
= fi
->i_flags
& (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
);
687 if (flags
& FS_APPEND_FL
)
688 stat
->attributes
|= STATX_ATTR_APPEND
;
689 if (flags
& FS_COMPR_FL
)
690 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
691 if (f2fs_encrypted_inode(inode
))
692 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
693 if (flags
& FS_IMMUTABLE_FL
)
694 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
695 if (flags
& FS_NODUMP_FL
)
696 stat
->attributes
|= STATX_ATTR_NODUMP
;
698 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
699 STATX_ATTR_COMPRESSED
|
700 STATX_ATTR_ENCRYPTED
|
701 STATX_ATTR_IMMUTABLE
|
704 generic_fillattr(inode
, stat
);
706 /* we need to show initial sectors used for inline_data/dentries */
707 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
708 f2fs_has_inline_dentry(inode
))
709 stat
->blocks
+= (stat
->size
+ 511) >> 9;
714 #ifdef CONFIG_F2FS_FS_POSIX_ACL
715 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
717 unsigned int ia_valid
= attr
->ia_valid
;
719 if (ia_valid
& ATTR_UID
)
720 inode
->i_uid
= attr
->ia_uid
;
721 if (ia_valid
& ATTR_GID
)
722 inode
->i_gid
= attr
->ia_gid
;
723 if (ia_valid
& ATTR_ATIME
)
724 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
725 inode
->i_sb
->s_time_gran
);
726 if (ia_valid
& ATTR_MTIME
)
727 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
728 inode
->i_sb
->s_time_gran
);
729 if (ia_valid
& ATTR_CTIME
)
730 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
731 inode
->i_sb
->s_time_gran
);
732 if (ia_valid
& ATTR_MODE
) {
733 umode_t mode
= attr
->ia_mode
;
735 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
737 set_acl_inode(inode
, mode
);
741 #define __setattr_copy setattr_copy
744 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
746 struct inode
*inode
= d_inode(dentry
);
748 bool size_changed
= false;
750 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
753 err
= setattr_prepare(dentry
, attr
);
757 err
= fscrypt_prepare_setattr(dentry
, attr
);
761 if (is_quota_modification(inode
, attr
)) {
762 err
= dquot_initialize(inode
);
766 if ((attr
->ia_valid
& ATTR_UID
&&
767 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
768 (attr
->ia_valid
& ATTR_GID
&&
769 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
770 err
= dquot_transfer(inode
, attr
);
775 if (attr
->ia_valid
& ATTR_SIZE
) {
776 if (attr
->ia_size
<= i_size_read(inode
)) {
777 down_write(&F2FS_I(inode
)->i_mmap_sem
);
778 truncate_setsize(inode
, attr
->ia_size
);
779 err
= f2fs_truncate(inode
);
780 up_write(&F2FS_I(inode
)->i_mmap_sem
);
785 * do not trim all blocks after i_size if target size is
786 * larger than i_size.
788 down_write(&F2FS_I(inode
)->i_mmap_sem
);
789 truncate_setsize(inode
, attr
->ia_size
);
790 up_write(&F2FS_I(inode
)->i_mmap_sem
);
792 /* should convert inline inode here */
793 if (!f2fs_may_inline_data(inode
)) {
794 err
= f2fs_convert_inline_inode(inode
);
798 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
801 down_write(&F2FS_I(inode
)->i_sem
);
802 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
803 up_write(&F2FS_I(inode
)->i_sem
);
808 __setattr_copy(inode
, attr
);
810 if (attr
->ia_valid
& ATTR_MODE
) {
811 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
812 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
813 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
814 clear_inode_flag(inode
, FI_ACL_MODE
);
818 /* file size may changed here */
819 f2fs_mark_inode_dirty_sync(inode
, size_changed
);
821 /* inode change will produce dirty node pages flushed by checkpoint */
822 f2fs_balance_fs(F2FS_I_SB(inode
), true);
827 const struct inode_operations f2fs_file_inode_operations
= {
828 .getattr
= f2fs_getattr
,
829 .setattr
= f2fs_setattr
,
830 .get_acl
= f2fs_get_acl
,
831 .set_acl
= f2fs_set_acl
,
832 #ifdef CONFIG_F2FS_FS_XATTR
833 .listxattr
= f2fs_listxattr
,
835 .fiemap
= f2fs_fiemap
,
838 static int fill_zero(struct inode
*inode
, pgoff_t index
,
839 loff_t start
, loff_t len
)
841 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
847 f2fs_balance_fs(sbi
, true);
850 page
= get_new_data_page(inode
, NULL
, index
, false);
854 return PTR_ERR(page
);
856 f2fs_wait_on_page_writeback(page
, DATA
, true);
857 zero_user(page
, start
, len
);
858 set_page_dirty(page
);
859 f2fs_put_page(page
, 1);
863 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
867 while (pg_start
< pg_end
) {
868 struct dnode_of_data dn
;
869 pgoff_t end_offset
, count
;
871 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
872 err
= get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
874 if (err
== -ENOENT
) {
875 pg_start
= get_next_page_offset(&dn
, pg_start
);
881 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
882 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
884 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
886 truncate_data_blocks_range(&dn
, count
);
894 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
896 pgoff_t pg_start
, pg_end
;
897 loff_t off_start
, off_end
;
900 ret
= f2fs_convert_inline_inode(inode
);
904 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
905 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
907 off_start
= offset
& (PAGE_SIZE
- 1);
908 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
910 if (pg_start
== pg_end
) {
911 ret
= fill_zero(inode
, pg_start
, off_start
,
912 off_end
- off_start
);
917 ret
= fill_zero(inode
, pg_start
++, off_start
,
918 PAGE_SIZE
- off_start
);
923 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
928 if (pg_start
< pg_end
) {
929 struct address_space
*mapping
= inode
->i_mapping
;
930 loff_t blk_start
, blk_end
;
931 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
933 f2fs_balance_fs(sbi
, true);
935 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
936 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
937 down_write(&F2FS_I(inode
)->i_mmap_sem
);
938 truncate_inode_pages_range(mapping
, blk_start
,
942 ret
= truncate_hole(inode
, pg_start
, pg_end
);
944 up_write(&F2FS_I(inode
)->i_mmap_sem
);
951 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
952 int *do_replace
, pgoff_t off
, pgoff_t len
)
954 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
955 struct dnode_of_data dn
;
959 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
960 ret
= get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
961 if (ret
&& ret
!= -ENOENT
) {
963 } else if (ret
== -ENOENT
) {
964 if (dn
.max_level
== 0)
966 done
= min((pgoff_t
)ADDRS_PER_BLOCK
- dn
.ofs_in_node
, len
);
972 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
973 dn
.ofs_in_node
, len
);
974 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
975 *blkaddr
= datablock_addr(dn
.inode
,
976 dn
.node_page
, dn
.ofs_in_node
);
977 if (!is_checkpointed_data(sbi
, *blkaddr
)) {
979 if (test_opt(sbi
, LFS
)) {
984 /* do not invalidate this block address */
985 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
998 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
999 int *do_replace
, pgoff_t off
, int len
)
1001 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1002 struct dnode_of_data dn
;
1005 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1006 if (*do_replace
== 0)
1009 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1010 ret
= get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1012 dec_valid_block_count(sbi
, inode
, 1);
1013 invalidate_blocks(sbi
, *blkaddr
);
1015 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1017 f2fs_put_dnode(&dn
);
1022 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1023 block_t
*blkaddr
, int *do_replace
,
1024 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1026 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1031 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1036 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1037 struct dnode_of_data dn
;
1038 struct node_info ni
;
1042 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1043 ret
= get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1047 get_node_info(sbi
, dn
.nid
, &ni
);
1048 ilen
= min((pgoff_t
)
1049 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1050 dn
.ofs_in_node
, len
- i
);
1052 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1053 dn
.node_page
, dn
.ofs_in_node
);
1054 truncate_data_blocks_range(&dn
, 1);
1056 if (do_replace
[i
]) {
1057 f2fs_i_blocks_write(src_inode
,
1059 f2fs_i_blocks_write(dst_inode
,
1061 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1062 blkaddr
[i
], ni
.version
, true, false);
1068 new_size
= (dst
+ i
) << PAGE_SHIFT
;
1069 if (dst_inode
->i_size
< new_size
)
1070 f2fs_i_size_write(dst_inode
, new_size
);
1071 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1073 f2fs_put_dnode(&dn
);
1075 struct page
*psrc
, *pdst
;
1077 psrc
= get_lock_data_page(src_inode
, src
+ i
, true);
1079 return PTR_ERR(psrc
);
1080 pdst
= get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1083 f2fs_put_page(psrc
, 1);
1084 return PTR_ERR(pdst
);
1086 f2fs_copy_page(psrc
, pdst
);
1087 set_page_dirty(pdst
);
1088 f2fs_put_page(pdst
, 1);
1089 f2fs_put_page(psrc
, 1);
1091 ret
= truncate_hole(src_inode
, src
+ i
, src
+ i
+ 1);
1100 static int __exchange_data_block(struct inode
*src_inode
,
1101 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1102 pgoff_t len
, bool full
)
1104 block_t
*src_blkaddr
;
1110 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK
, len
);
1112 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1113 sizeof(block_t
) * olen
, GFP_KERNEL
);
1117 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1118 sizeof(int) * olen
, GFP_KERNEL
);
1120 kvfree(src_blkaddr
);
1124 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1125 do_replace
, src
, olen
);
1129 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1130 do_replace
, src
, dst
, olen
, full
);
1138 kvfree(src_blkaddr
);
1144 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, len
);
1145 kvfree(src_blkaddr
);
1150 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
1152 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1153 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1156 f2fs_balance_fs(sbi
, true);
1159 f2fs_drop_extent_tree(inode
);
1161 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1162 f2fs_unlock_op(sbi
);
1166 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1168 pgoff_t pg_start
, pg_end
;
1172 if (offset
+ len
>= i_size_read(inode
))
1175 /* collapse range should be aligned to block size of f2fs. */
1176 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1179 ret
= f2fs_convert_inline_inode(inode
);
1183 pg_start
= offset
>> PAGE_SHIFT
;
1184 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1186 /* avoid gc operation during block exchange */
1187 down_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1189 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1190 /* write out all dirty pages from offset */
1191 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1195 truncate_pagecache(inode
, offset
);
1197 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
1201 /* write out all moved pages, if possible */
1202 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1203 truncate_pagecache(inode
, offset
);
1205 new_size
= i_size_read(inode
) - len
;
1206 truncate_pagecache(inode
, new_size
);
1208 ret
= truncate_blocks(inode
, new_size
, true);
1210 f2fs_i_size_write(inode
, new_size
);
1212 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1213 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1217 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1220 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1221 pgoff_t index
= start
;
1222 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1226 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1227 if (datablock_addr(dn
->inode
, dn
->node_page
,
1228 dn
->ofs_in_node
) == NULL_ADDR
)
1232 dn
->ofs_in_node
= ofs_in_node
;
1233 ret
= reserve_new_blocks(dn
, count
);
1237 dn
->ofs_in_node
= ofs_in_node
;
1238 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1239 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1240 dn
->node_page
, dn
->ofs_in_node
);
1242 * reserve_new_blocks will not guarantee entire block
1245 if (dn
->data_blkaddr
== NULL_ADDR
) {
1249 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1250 invalidate_blocks(sbi
, dn
->data_blkaddr
);
1251 dn
->data_blkaddr
= NEW_ADDR
;
1252 set_data_blkaddr(dn
);
1256 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1261 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1264 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1265 struct address_space
*mapping
= inode
->i_mapping
;
1266 pgoff_t index
, pg_start
, pg_end
;
1267 loff_t new_size
= i_size_read(inode
);
1268 loff_t off_start
, off_end
;
1271 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1275 ret
= f2fs_convert_inline_inode(inode
);
1279 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1280 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1284 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1286 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1287 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1289 off_start
= offset
& (PAGE_SIZE
- 1);
1290 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1292 if (pg_start
== pg_end
) {
1293 ret
= fill_zero(inode
, pg_start
, off_start
,
1294 off_end
- off_start
);
1298 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1301 ret
= fill_zero(inode
, pg_start
++, off_start
,
1302 PAGE_SIZE
- off_start
);
1306 new_size
= max_t(loff_t
, new_size
,
1307 (loff_t
)pg_start
<< PAGE_SHIFT
);
1310 for (index
= pg_start
; index
< pg_end
;) {
1311 struct dnode_of_data dn
;
1312 unsigned int end_offset
;
1317 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1318 ret
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1320 f2fs_unlock_op(sbi
);
1324 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1325 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1327 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1328 f2fs_put_dnode(&dn
);
1329 f2fs_unlock_op(sbi
);
1331 f2fs_balance_fs(sbi
, dn
.node_changed
);
1337 new_size
= max_t(loff_t
, new_size
,
1338 (loff_t
)index
<< PAGE_SHIFT
);
1342 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1346 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1351 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1352 f2fs_i_size_write(inode
, new_size
);
1354 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1359 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1361 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1362 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1366 new_size
= i_size_read(inode
) + len
;
1367 ret
= inode_newsize_ok(inode
, new_size
);
1371 if (offset
>= i_size_read(inode
))
1374 /* insert range should be aligned to block size of f2fs. */
1375 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1378 ret
= f2fs_convert_inline_inode(inode
);
1382 f2fs_balance_fs(sbi
, true);
1384 /* avoid gc operation during block exchange */
1385 down_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1387 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1388 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1392 /* write out all dirty pages from offset */
1393 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1397 truncate_pagecache(inode
, offset
);
1399 pg_start
= offset
>> PAGE_SHIFT
;
1400 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1401 delta
= pg_end
- pg_start
;
1402 idx
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1404 while (!ret
&& idx
> pg_start
) {
1405 nr
= idx
- pg_start
;
1411 f2fs_drop_extent_tree(inode
);
1413 ret
= __exchange_data_block(inode
, inode
, idx
,
1414 idx
+ delta
, nr
, false);
1415 f2fs_unlock_op(sbi
);
1418 /* write out all moved pages, if possible */
1419 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1420 truncate_pagecache(inode
, offset
);
1423 f2fs_i_size_write(inode
, new_size
);
1425 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1426 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
1430 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1431 loff_t len
, int mode
)
1433 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1434 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1435 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
};
1437 loff_t new_size
= i_size_read(inode
);
1441 err
= inode_newsize_ok(inode
, (len
+ offset
));
1445 err
= f2fs_convert_inline_inode(inode
);
1449 f2fs_balance_fs(sbi
, true);
1451 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1452 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1454 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1455 map
.m_len
= pg_end
- map
.m_lblk
;
1459 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1466 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1468 /* update new size to the failed position */
1469 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1470 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1472 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1475 if (new_size
> i_size_read(inode
)) {
1476 if (mode
& FALLOC_FL_KEEP_SIZE
)
1477 file_set_keep_isize(inode
);
1479 f2fs_i_size_write(inode
, new_size
);
1485 static long f2fs_fallocate(struct file
*file
, int mode
,
1486 loff_t offset
, loff_t len
)
1488 struct inode
*inode
= file_inode(file
);
1491 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1494 /* f2fs only support ->fallocate for regular file */
1495 if (!S_ISREG(inode
->i_mode
))
1498 if (f2fs_encrypted_inode(inode
) &&
1499 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1502 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1503 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1504 FALLOC_FL_INSERT_RANGE
))
1509 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1510 if (offset
>= inode
->i_size
)
1513 ret
= punch_hole(inode
, offset
, len
);
1514 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1515 ret
= f2fs_collapse_range(inode
, offset
, len
);
1516 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1517 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1518 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1519 ret
= f2fs_insert_range(inode
, offset
, len
);
1521 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1525 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1526 f2fs_mark_inode_dirty_sync(inode
, false);
1527 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1531 inode_unlock(inode
);
1533 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1537 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1540 * f2fs_relase_file is called at every close calls. So we should
1541 * not drop any inmemory pages by close called by other process.
1543 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1544 atomic_read(&inode
->i_writecount
) != 1)
1547 /* some remained atomic pages should discarded */
1548 if (f2fs_is_atomic_file(inode
))
1549 drop_inmem_pages(inode
);
1550 if (f2fs_is_volatile_file(inode
)) {
1551 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1552 stat_dec_volatile_write(inode
);
1553 set_inode_flag(inode
, FI_DROP_CACHE
);
1554 filemap_fdatawrite(inode
->i_mapping
);
1555 clear_inode_flag(inode
, FI_DROP_CACHE
);
1560 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1562 struct inode
*inode
= file_inode(file
);
1565 * If the process doing a transaction is crashed, we should do
1566 * roll-back. Otherwise, other reader/write can see corrupted database
1567 * until all the writers close its file. Since this should be done
1568 * before dropping file lock, it needs to do in ->flush.
1570 if (f2fs_is_atomic_file(inode
) &&
1571 F2FS_I(inode
)->inmem_task
== current
)
1572 drop_inmem_pages(inode
);
1576 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1578 struct inode
*inode
= file_inode(filp
);
1579 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1580 unsigned int flags
= fi
->i_flags
&
1581 (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
);
1582 return put_user(flags
, (int __user
*)arg
);
1585 static int __f2fs_ioc_setflags(struct inode
*inode
, unsigned int flags
)
1587 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1588 unsigned int oldflags
;
1590 /* Is it quota file? Do not allow user to mess with it */
1591 if (IS_NOQUOTA(inode
))
1594 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1596 oldflags
= fi
->i_flags
;
1598 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
))
1599 if (!capable(CAP_LINUX_IMMUTABLE
))
1602 flags
= flags
& (FS_FL_USER_MODIFIABLE
| FS_PROJINHERIT_FL
);
1603 flags
|= oldflags
& ~(FS_FL_USER_MODIFIABLE
| FS_PROJINHERIT_FL
);
1604 fi
->i_flags
= flags
;
1606 if (fi
->i_flags
& FS_PROJINHERIT_FL
)
1607 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1609 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1611 inode
->i_ctime
= current_time(inode
);
1612 f2fs_set_inode_flags(inode
);
1613 f2fs_mark_inode_dirty_sync(inode
, false);
1617 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1619 struct inode
*inode
= file_inode(filp
);
1623 if (!inode_owner_or_capable(inode
))
1626 if (get_user(flags
, (int __user
*)arg
))
1629 ret
= mnt_want_write_file(filp
);
1635 ret
= __f2fs_ioc_setflags(inode
, flags
);
1637 inode_unlock(inode
);
1638 mnt_drop_write_file(filp
);
1642 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1644 struct inode
*inode
= file_inode(filp
);
1646 return put_user(inode
->i_generation
, (int __user
*)arg
);
1649 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1651 struct inode
*inode
= file_inode(filp
);
1654 if (!inode_owner_or_capable(inode
))
1657 if (!S_ISREG(inode
->i_mode
))
1660 ret
= mnt_want_write_file(filp
);
1666 if (f2fs_is_atomic_file(inode
))
1669 ret
= f2fs_convert_inline_inode(inode
);
1673 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1674 set_inode_flag(inode
, FI_HOT_DATA
);
1675 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1677 if (!get_dirty_pages(inode
))
1680 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1681 "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1682 inode
->i_ino
, get_dirty_pages(inode
));
1683 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1685 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1686 clear_inode_flag(inode
, FI_HOT_DATA
);
1691 F2FS_I(inode
)->inmem_task
= current
;
1692 stat_inc_atomic_write(inode
);
1693 stat_update_max_atomic_write(inode
);
1695 inode_unlock(inode
);
1696 mnt_drop_write_file(filp
);
1700 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1702 struct inode
*inode
= file_inode(filp
);
1705 if (!inode_owner_or_capable(inode
))
1708 ret
= mnt_want_write_file(filp
);
1714 if (f2fs_is_volatile_file(inode
))
1717 if (f2fs_is_atomic_file(inode
)) {
1718 ret
= commit_inmem_pages(inode
);
1722 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1724 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1725 clear_inode_flag(inode
, FI_HOT_DATA
);
1726 stat_dec_atomic_write(inode
);
1729 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
1732 inode_unlock(inode
);
1733 mnt_drop_write_file(filp
);
1737 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1739 struct inode
*inode
= file_inode(filp
);
1742 if (!inode_owner_or_capable(inode
))
1745 if (!S_ISREG(inode
->i_mode
))
1748 ret
= mnt_want_write_file(filp
);
1754 if (f2fs_is_volatile_file(inode
))
1757 ret
= f2fs_convert_inline_inode(inode
);
1761 stat_inc_volatile_write(inode
);
1762 stat_update_max_volatile_write(inode
);
1764 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1765 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1767 inode_unlock(inode
);
1768 mnt_drop_write_file(filp
);
1772 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1774 struct inode
*inode
= file_inode(filp
);
1777 if (!inode_owner_or_capable(inode
))
1780 ret
= mnt_want_write_file(filp
);
1786 if (!f2fs_is_volatile_file(inode
))
1789 if (!f2fs_is_first_block_written(inode
)) {
1790 ret
= truncate_partial_data_page(inode
, 0, true);
1794 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1796 inode_unlock(inode
);
1797 mnt_drop_write_file(filp
);
1801 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1803 struct inode
*inode
= file_inode(filp
);
1806 if (!inode_owner_or_capable(inode
))
1809 ret
= mnt_want_write_file(filp
);
1815 if (f2fs_is_atomic_file(inode
))
1816 drop_inmem_pages(inode
);
1817 if (f2fs_is_volatile_file(inode
)) {
1818 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1819 stat_dec_volatile_write(inode
);
1820 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1823 inode_unlock(inode
);
1825 mnt_drop_write_file(filp
);
1826 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1830 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1832 struct inode
*inode
= file_inode(filp
);
1833 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1834 struct super_block
*sb
= sbi
->sb
;
1838 if (!capable(CAP_SYS_ADMIN
))
1841 if (get_user(in
, (__u32 __user
*)arg
))
1844 ret
= mnt_want_write_file(filp
);
1849 case F2FS_GOING_DOWN_FULLSYNC
:
1850 sb
= freeze_bdev(sb
->s_bdev
);
1856 f2fs_stop_checkpoint(sbi
, false);
1857 thaw_bdev(sb
->s_bdev
, sb
);
1860 case F2FS_GOING_DOWN_METASYNC
:
1861 /* do checkpoint only */
1862 ret
= f2fs_sync_fs(sb
, 1);
1865 f2fs_stop_checkpoint(sbi
, false);
1867 case F2FS_GOING_DOWN_NOSYNC
:
1868 f2fs_stop_checkpoint(sbi
, false);
1870 case F2FS_GOING_DOWN_METAFLUSH
:
1871 sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
1872 f2fs_stop_checkpoint(sbi
, false);
1879 stop_gc_thread(sbi
);
1880 stop_discard_thread(sbi
);
1882 drop_discard_cmd(sbi
);
1883 clear_opt(sbi
, DISCARD
);
1885 f2fs_update_time(sbi
, REQ_TIME
);
1887 mnt_drop_write_file(filp
);
1891 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1893 struct inode
*inode
= file_inode(filp
);
1894 struct super_block
*sb
= inode
->i_sb
;
1895 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1896 struct fstrim_range range
;
1899 if (!capable(CAP_SYS_ADMIN
))
1902 if (!blk_queue_discard(q
))
1905 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1909 ret
= mnt_want_write_file(filp
);
1913 range
.minlen
= max((unsigned int)range
.minlen
,
1914 q
->limits
.discard_granularity
);
1915 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1916 mnt_drop_write_file(filp
);
1920 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1923 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1927 static bool uuid_is_nonzero(__u8 u
[16])
1931 for (i
= 0; i
< 16; i
++)
1937 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1939 struct inode
*inode
= file_inode(filp
);
1941 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1944 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1946 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
1949 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1951 if (!f2fs_sb_has_crypto(file_inode(filp
)->i_sb
))
1953 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
1956 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1958 struct inode
*inode
= file_inode(filp
);
1959 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1962 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1965 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
1968 err
= mnt_want_write_file(filp
);
1972 /* update superblock with uuid */
1973 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
1975 err
= f2fs_commit_super(sbi
, false);
1978 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
1979 mnt_drop_write_file(filp
);
1982 mnt_drop_write_file(filp
);
1984 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
1990 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
1992 struct inode
*inode
= file_inode(filp
);
1993 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1997 if (!capable(CAP_SYS_ADMIN
))
2000 if (get_user(sync
, (__u32 __user
*)arg
))
2003 if (f2fs_readonly(sbi
->sb
))
2006 ret
= mnt_want_write_file(filp
);
2011 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2016 mutex_lock(&sbi
->gc_mutex
);
2019 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2021 mnt_drop_write_file(filp
);
2025 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2027 struct inode
*inode
= file_inode(filp
);
2028 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2029 struct f2fs_gc_range range
;
2033 if (!capable(CAP_SYS_ADMIN
))
2036 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2040 if (f2fs_readonly(sbi
->sb
))
2043 ret
= mnt_want_write_file(filp
);
2047 end
= range
.start
+ range
.len
;
2048 if (range
.start
< MAIN_BLKADDR(sbi
) || end
>= MAX_BLKADDR(sbi
))
2052 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2057 mutex_lock(&sbi
->gc_mutex
);
2060 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2061 range
.start
+= sbi
->blocks_per_seg
;
2062 if (range
.start
<= end
)
2065 mnt_drop_write_file(filp
);
2069 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2071 struct inode
*inode
= file_inode(filp
);
2072 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2075 if (!capable(CAP_SYS_ADMIN
))
2078 if (f2fs_readonly(sbi
->sb
))
2081 ret
= mnt_want_write_file(filp
);
2085 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2087 mnt_drop_write_file(filp
);
2091 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2093 struct f2fs_defragment
*range
)
2095 struct inode
*inode
= file_inode(filp
);
2096 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2097 .m_seg_type
= NO_CHECK_TYPE
};
2098 struct extent_info ei
= {0,0,0};
2099 pgoff_t pg_start
, pg_end
, next_pgofs
;
2100 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2101 unsigned int total
= 0, sec_num
;
2102 block_t blk_end
= 0;
2103 bool fragmented
= false;
2106 /* if in-place-update policy is enabled, don't waste time here */
2107 if (should_update_inplace(inode
, NULL
))
2110 pg_start
= range
->start
>> PAGE_SHIFT
;
2111 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2113 f2fs_balance_fs(sbi
, true);
2117 /* writeback all dirty pages in the range */
2118 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2119 range
->start
+ range
->len
- 1);
2124 * lookup mapping info in extent cache, skip defragmenting if physical
2125 * block addresses are continuous.
2127 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2128 if (ei
.fofs
+ ei
.len
>= pg_end
)
2132 map
.m_lblk
= pg_start
;
2133 map
.m_next_pgofs
= &next_pgofs
;
2136 * lookup mapping info in dnode page cache, skip defragmenting if all
2137 * physical block addresses are continuous even if there are hole(s)
2138 * in logical blocks.
2140 while (map
.m_lblk
< pg_end
) {
2141 map
.m_len
= pg_end
- map
.m_lblk
;
2142 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2146 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2147 map
.m_lblk
= next_pgofs
;
2151 if (blk_end
&& blk_end
!= map
.m_pblk
)
2154 /* record total count of block that we're going to move */
2157 blk_end
= map
.m_pblk
+ map
.m_len
;
2159 map
.m_lblk
+= map
.m_len
;
2165 sec_num
= (total
+ BLKS_PER_SEC(sbi
) - 1) / BLKS_PER_SEC(sbi
);
2168 * make sure there are enough free section for LFS allocation, this can
2169 * avoid defragment running in SSR mode when free section are allocated
2172 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2177 map
.m_lblk
= pg_start
;
2178 map
.m_len
= pg_end
- pg_start
;
2181 while (map
.m_lblk
< pg_end
) {
2186 map
.m_len
= pg_end
- map
.m_lblk
;
2187 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2191 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2192 map
.m_lblk
= next_pgofs
;
2196 set_inode_flag(inode
, FI_DO_DEFRAG
);
2199 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2202 page
= get_lock_data_page(inode
, idx
, true);
2204 err
= PTR_ERR(page
);
2208 set_page_dirty(page
);
2209 f2fs_put_page(page
, 1);
2218 if (idx
< pg_end
&& cnt
< blk_per_seg
)
2221 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2223 err
= filemap_fdatawrite(inode
->i_mapping
);
2228 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2230 inode_unlock(inode
);
2232 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2236 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2238 struct inode
*inode
= file_inode(filp
);
2239 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2240 struct f2fs_defragment range
;
2243 if (!capable(CAP_SYS_ADMIN
))
2246 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2249 if (f2fs_readonly(sbi
->sb
))
2252 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2256 /* verify alignment of offset & size */
2257 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2260 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2261 sbi
->max_file_blocks
))
2264 err
= mnt_want_write_file(filp
);
2268 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2269 mnt_drop_write_file(filp
);
2271 f2fs_update_time(sbi
, REQ_TIME
);
2275 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2282 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2283 struct file
*file_out
, loff_t pos_out
, size_t len
)
2285 struct inode
*src
= file_inode(file_in
);
2286 struct inode
*dst
= file_inode(file_out
);
2287 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2288 size_t olen
= len
, dst_max_i_size
= 0;
2292 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2293 src
->i_sb
!= dst
->i_sb
)
2296 if (unlikely(f2fs_readonly(src
->i_sb
)))
2299 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2302 if (f2fs_encrypted_inode(src
) || f2fs_encrypted_inode(dst
))
2306 if (pos_in
== pos_out
)
2308 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2313 down_write(&F2FS_I(src
)->dio_rwsem
[WRITE
]);
2316 if (!inode_trylock(dst
))
2318 if (!down_write_trylock(&F2FS_I(dst
)->dio_rwsem
[WRITE
])) {
2325 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2328 olen
= len
= src
->i_size
- pos_in
;
2329 if (pos_in
+ len
== src
->i_size
)
2330 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2336 dst_osize
= dst
->i_size
;
2337 if (pos_out
+ olen
> dst
->i_size
)
2338 dst_max_i_size
= pos_out
+ olen
;
2340 /* verify the end result is block aligned */
2341 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2342 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2343 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2346 ret
= f2fs_convert_inline_inode(src
);
2350 ret
= f2fs_convert_inline_inode(dst
);
2354 /* write out all dirty pages from offset */
2355 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2356 pos_in
, pos_in
+ len
);
2360 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2361 pos_out
, pos_out
+ len
);
2365 f2fs_balance_fs(sbi
, true);
2367 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2368 pos_out
>> F2FS_BLKSIZE_BITS
,
2369 len
>> F2FS_BLKSIZE_BITS
, false);
2373 f2fs_i_size_write(dst
, dst_max_i_size
);
2374 else if (dst_osize
!= dst
->i_size
)
2375 f2fs_i_size_write(dst
, dst_osize
);
2377 f2fs_unlock_op(sbi
);
2380 up_write(&F2FS_I(dst
)->dio_rwsem
[WRITE
]);
2384 up_write(&F2FS_I(src
)->dio_rwsem
[WRITE
]);
2389 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2391 struct f2fs_move_range range
;
2395 if (!(filp
->f_mode
& FMODE_READ
) ||
2396 !(filp
->f_mode
& FMODE_WRITE
))
2399 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2403 dst
= fdget(range
.dst_fd
);
2407 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2412 err
= mnt_want_write_file(filp
);
2416 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2417 range
.pos_out
, range
.len
);
2419 mnt_drop_write_file(filp
);
2423 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2424 &range
, sizeof(range
)))
2431 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2433 struct inode
*inode
= file_inode(filp
);
2434 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2435 struct sit_info
*sm
= SIT_I(sbi
);
2436 unsigned int start_segno
= 0, end_segno
= 0;
2437 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2438 struct f2fs_flush_device range
;
2441 if (!capable(CAP_SYS_ADMIN
))
2444 if (f2fs_readonly(sbi
->sb
))
2447 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2451 if (sbi
->s_ndevs
<= 1 || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2452 sbi
->segs_per_sec
!= 1) {
2453 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2454 "Can't flush %u in %d for segs_per_sec %u != 1\n",
2455 range
.dev_num
, sbi
->s_ndevs
,
2460 ret
= mnt_want_write_file(filp
);
2464 if (range
.dev_num
!= 0)
2465 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2466 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2468 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2469 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2470 start_segno
= dev_start_segno
;
2471 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2473 while (start_segno
< end_segno
) {
2474 if (!mutex_trylock(&sbi
->gc_mutex
)) {
2478 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2479 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2480 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2481 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2489 mnt_drop_write_file(filp
);
2493 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2495 struct inode
*inode
= file_inode(filp
);
2496 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2498 /* Must validate to set it with SQLite behavior in Android. */
2499 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2501 return put_user(sb_feature
, (u32 __user
*)arg
);
2505 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2507 struct inode
*inode
= file_inode(filp
);
2508 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2509 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2510 struct super_block
*sb
= sbi
->sb
;
2511 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2516 if (!f2fs_sb_has_project_quota(sb
)) {
2517 if (projid
!= F2FS_DEF_PROJID
)
2523 if (!f2fs_has_extra_attr(inode
))
2526 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2528 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
2531 err
= mnt_want_write_file(filp
);
2538 /* Is it quota file? Do not allow user to mess with it */
2539 if (IS_NOQUOTA(inode
))
2542 ipage
= get_node_page(sbi
, inode
->i_ino
);
2543 if (IS_ERR(ipage
)) {
2544 err
= PTR_ERR(ipage
);
2548 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
2551 f2fs_put_page(ipage
, 1);
2554 f2fs_put_page(ipage
, 1);
2556 dquot_initialize(inode
);
2558 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2559 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2560 err
= __dquot_transfer(inode
, transfer_to
);
2561 dqput(transfer_to
[PRJQUOTA
]);
2566 F2FS_I(inode
)->i_projid
= kprojid
;
2567 inode
->i_ctime
= current_time(inode
);
2569 f2fs_mark_inode_dirty_sync(inode
, true);
2571 inode_unlock(inode
);
2572 mnt_drop_write_file(filp
);
2576 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2578 if (projid
!= F2FS_DEF_PROJID
)
2584 /* Transfer internal flags to xflags */
2585 static inline __u32
f2fs_iflags_to_xflags(unsigned long iflags
)
2589 if (iflags
& FS_SYNC_FL
)
2590 xflags
|= FS_XFLAG_SYNC
;
2591 if (iflags
& FS_IMMUTABLE_FL
)
2592 xflags
|= FS_XFLAG_IMMUTABLE
;
2593 if (iflags
& FS_APPEND_FL
)
2594 xflags
|= FS_XFLAG_APPEND
;
2595 if (iflags
& FS_NODUMP_FL
)
2596 xflags
|= FS_XFLAG_NODUMP
;
2597 if (iflags
& FS_NOATIME_FL
)
2598 xflags
|= FS_XFLAG_NOATIME
;
2599 if (iflags
& FS_PROJINHERIT_FL
)
2600 xflags
|= FS_XFLAG_PROJINHERIT
;
2604 #define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
2605 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
2606 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2608 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
2609 #define F2FS_FL_XFLAG_VISIBLE (FS_SYNC_FL | \
2616 /* Transfer xflags flags to internal */
2617 static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags
)
2619 unsigned long iflags
= 0;
2621 if (xflags
& FS_XFLAG_SYNC
)
2622 iflags
|= FS_SYNC_FL
;
2623 if (xflags
& FS_XFLAG_IMMUTABLE
)
2624 iflags
|= FS_IMMUTABLE_FL
;
2625 if (xflags
& FS_XFLAG_APPEND
)
2626 iflags
|= FS_APPEND_FL
;
2627 if (xflags
& FS_XFLAG_NODUMP
)
2628 iflags
|= FS_NODUMP_FL
;
2629 if (xflags
& FS_XFLAG_NOATIME
)
2630 iflags
|= FS_NOATIME_FL
;
2631 if (xflags
& FS_XFLAG_PROJINHERIT
)
2632 iflags
|= FS_PROJINHERIT_FL
;
2637 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
2639 struct inode
*inode
= file_inode(filp
);
2640 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2643 memset(&fa
, 0, sizeof(struct fsxattr
));
2644 fa
.fsx_xflags
= f2fs_iflags_to_xflags(fi
->i_flags
&
2645 (FS_FL_USER_VISIBLE
| FS_PROJINHERIT_FL
));
2647 if (f2fs_sb_has_project_quota(inode
->i_sb
))
2648 fa
.fsx_projid
= (__u32
)from_kprojid(&init_user_ns
,
2651 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
2656 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
2658 struct inode
*inode
= file_inode(filp
);
2659 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2664 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
2667 /* Make sure caller has proper permission */
2668 if (!inode_owner_or_capable(inode
))
2671 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_FS_XFLAGS
)
2674 flags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
2675 if (f2fs_mask_flags(inode
->i_mode
, flags
) != flags
)
2678 err
= mnt_want_write_file(filp
);
2683 flags
= (fi
->i_flags
& ~F2FS_FL_XFLAG_VISIBLE
) |
2684 (flags
& F2FS_FL_XFLAG_VISIBLE
);
2685 err
= __f2fs_ioc_setflags(inode
, flags
);
2686 inode_unlock(inode
);
2687 mnt_drop_write_file(filp
);
2691 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
2698 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
2700 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2701 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2703 /* Use i_gc_failures for normal file as a risk signal. */
2705 f2fs_i_gc_failures_write(inode
, fi
->i_gc_failures
+ 1);
2707 if (fi
->i_gc_failures
> sbi
->gc_pin_file_threshold
) {
2708 f2fs_msg(sbi
->sb
, KERN_WARNING
,
2709 "%s: Enable GC = ino %lx after %x GC trials\n",
2710 __func__
, inode
->i_ino
, fi
->i_gc_failures
);
2711 clear_inode_flag(inode
, FI_PIN_FILE
);
2717 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
2719 struct inode
*inode
= file_inode(filp
);
2723 if (!inode_owner_or_capable(inode
))
2726 if (get_user(pin
, (__u32 __user
*)arg
))
2729 if (!S_ISREG(inode
->i_mode
))
2732 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
2735 ret
= mnt_want_write_file(filp
);
2741 if (should_update_outplace(inode
, NULL
)) {
2747 clear_inode_flag(inode
, FI_PIN_FILE
);
2748 F2FS_I(inode
)->i_gc_failures
= 1;
2752 if (f2fs_pin_file_control(inode
, false)) {
2756 ret
= f2fs_convert_inline_inode(inode
);
2760 set_inode_flag(inode
, FI_PIN_FILE
);
2761 ret
= F2FS_I(inode
)->i_gc_failures
;
2763 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2765 inode_unlock(inode
);
2766 mnt_drop_write_file(filp
);
2770 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
2772 struct inode
*inode
= file_inode(filp
);
2775 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
2776 pin
= F2FS_I(inode
)->i_gc_failures
;
2777 return put_user(pin
, (u32 __user
*)arg
);
2780 int f2fs_precache_extents(struct inode
*inode
)
2782 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2783 struct f2fs_map_blocks map
;
2784 pgoff_t m_next_extent
;
2788 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
2792 map
.m_next_pgofs
= NULL
;
2793 map
.m_next_extent
= &m_next_extent
;
2794 map
.m_seg_type
= NO_CHECK_TYPE
;
2795 end
= F2FS_I_SB(inode
)->max_file_blocks
;
2797 while (map
.m_lblk
< end
) {
2798 map
.m_len
= end
- map
.m_lblk
;
2800 down_write(&fi
->dio_rwsem
[WRITE
]);
2801 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
2802 up_write(&fi
->dio_rwsem
[WRITE
]);
2806 map
.m_lblk
= m_next_extent
;
2812 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
2814 return f2fs_precache_extents(file_inode(filp
));
2817 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2819 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
2823 case F2FS_IOC_GETFLAGS
:
2824 return f2fs_ioc_getflags(filp
, arg
);
2825 case F2FS_IOC_SETFLAGS
:
2826 return f2fs_ioc_setflags(filp
, arg
);
2827 case F2FS_IOC_GETVERSION
:
2828 return f2fs_ioc_getversion(filp
, arg
);
2829 case F2FS_IOC_START_ATOMIC_WRITE
:
2830 return f2fs_ioc_start_atomic_write(filp
);
2831 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2832 return f2fs_ioc_commit_atomic_write(filp
);
2833 case F2FS_IOC_START_VOLATILE_WRITE
:
2834 return f2fs_ioc_start_volatile_write(filp
);
2835 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2836 return f2fs_ioc_release_volatile_write(filp
);
2837 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2838 return f2fs_ioc_abort_volatile_write(filp
);
2839 case F2FS_IOC_SHUTDOWN
:
2840 return f2fs_ioc_shutdown(filp
, arg
);
2842 return f2fs_ioc_fitrim(filp
, arg
);
2843 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2844 return f2fs_ioc_set_encryption_policy(filp
, arg
);
2845 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2846 return f2fs_ioc_get_encryption_policy(filp
, arg
);
2847 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2848 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
2849 case F2FS_IOC_GARBAGE_COLLECT
:
2850 return f2fs_ioc_gc(filp
, arg
);
2851 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2852 return f2fs_ioc_gc_range(filp
, arg
);
2853 case F2FS_IOC_WRITE_CHECKPOINT
:
2854 return f2fs_ioc_write_checkpoint(filp
, arg
);
2855 case F2FS_IOC_DEFRAGMENT
:
2856 return f2fs_ioc_defragment(filp
, arg
);
2857 case F2FS_IOC_MOVE_RANGE
:
2858 return f2fs_ioc_move_range(filp
, arg
);
2859 case F2FS_IOC_FLUSH_DEVICE
:
2860 return f2fs_ioc_flush_device(filp
, arg
);
2861 case F2FS_IOC_GET_FEATURES
:
2862 return f2fs_ioc_get_features(filp
, arg
);
2863 case F2FS_IOC_FSGETXATTR
:
2864 return f2fs_ioc_fsgetxattr(filp
, arg
);
2865 case F2FS_IOC_FSSETXATTR
:
2866 return f2fs_ioc_fssetxattr(filp
, arg
);
2867 case F2FS_IOC_GET_PIN_FILE
:
2868 return f2fs_ioc_get_pin_file(filp
, arg
);
2869 case F2FS_IOC_SET_PIN_FILE
:
2870 return f2fs_ioc_set_pin_file(filp
, arg
);
2871 case F2FS_IOC_PRECACHE_EXTENTS
:
2872 return f2fs_ioc_precache_extents(filp
, arg
);
2878 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
2880 struct file
*file
= iocb
->ki_filp
;
2881 struct inode
*inode
= file_inode(file
);
2882 struct blk_plug plug
;
2885 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
2889 ret
= generic_write_checks(iocb
, from
);
2893 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
2894 set_inode_flag(inode
, FI_NO_PREALLOC
);
2896 err
= f2fs_preallocate_blocks(iocb
, from
);
2898 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2899 inode_unlock(inode
);
2902 blk_start_plug(&plug
);
2903 ret
= __generic_file_write_iter(iocb
, from
);
2904 blk_finish_plug(&plug
);
2905 clear_inode_flag(inode
, FI_NO_PREALLOC
);
2908 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
2910 inode_unlock(inode
);
2913 ret
= generic_write_sync(iocb
, ret
);
2917 #ifdef CONFIG_COMPAT
2918 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2921 case F2FS_IOC32_GETFLAGS
:
2922 cmd
= F2FS_IOC_GETFLAGS
;
2924 case F2FS_IOC32_SETFLAGS
:
2925 cmd
= F2FS_IOC_SETFLAGS
;
2927 case F2FS_IOC32_GETVERSION
:
2928 cmd
= F2FS_IOC_GETVERSION
;
2930 case F2FS_IOC_START_ATOMIC_WRITE
:
2931 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2932 case F2FS_IOC_START_VOLATILE_WRITE
:
2933 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2934 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2935 case F2FS_IOC_SHUTDOWN
:
2936 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2937 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2938 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2939 case F2FS_IOC_GARBAGE_COLLECT
:
2940 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
2941 case F2FS_IOC_WRITE_CHECKPOINT
:
2942 case F2FS_IOC_DEFRAGMENT
:
2943 case F2FS_IOC_MOVE_RANGE
:
2944 case F2FS_IOC_FLUSH_DEVICE
:
2945 case F2FS_IOC_GET_FEATURES
:
2946 case F2FS_IOC_FSGETXATTR
:
2947 case F2FS_IOC_FSSETXATTR
:
2948 case F2FS_IOC_GET_PIN_FILE
:
2949 case F2FS_IOC_SET_PIN_FILE
:
2950 case F2FS_IOC_PRECACHE_EXTENTS
:
2953 return -ENOIOCTLCMD
;
2955 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
2959 const struct file_operations f2fs_file_operations
= {
2960 .llseek
= f2fs_llseek
,
2961 .read_iter
= generic_file_read_iter
,
2962 .write_iter
= f2fs_file_write_iter
,
2963 .open
= f2fs_file_open
,
2964 .release
= f2fs_release_file
,
2965 .mmap
= f2fs_file_mmap
,
2966 .flush
= f2fs_file_flush
,
2967 .fsync
= f2fs_sync_file
,
2968 .fallocate
= f2fs_fallocate
,
2969 .unlocked_ioctl
= f2fs_ioctl
,
2970 #ifdef CONFIG_COMPAT
2971 .compat_ioctl
= f2fs_compat_ioctl
,
2973 .splice_read
= generic_file_splice_read
,
2974 .splice_write
= iter_file_splice_write
,