1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
32 #include <trace/events/f2fs.h>
34 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
36 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
39 down_read(&F2FS_I(inode
)->i_mmap_sem
);
40 ret
= filemap_fault(vmf
);
41 up_read(&F2FS_I(inode
)->i_mmap_sem
);
43 trace_f2fs_filemap_fault(inode
, vmf
->pgoff
, (unsigned long)ret
);
48 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
54 bool need_alloc
= true;
57 if (unlikely(f2fs_cp_error(sbi
))) {
62 if (!f2fs_is_checkpoint_ready(sbi
)) {
67 #ifdef CONFIG_F2FS_FS_COMPRESSION
68 if (f2fs_compressed_file(inode
)) {
69 int ret
= f2fs_is_compressed_cluster(inode
, page
->index
);
75 if (ret
< F2FS_I(inode
)->i_cluster_size
) {
83 /* should do out of any locked page */
85 f2fs_balance_fs(sbi
, true);
87 sb_start_pagefault(inode
->i_sb
);
89 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
91 file_update_time(vmf
->vma
->vm_file
);
92 down_read(&F2FS_I(inode
)->i_mmap_sem
);
94 if (unlikely(page
->mapping
!= inode
->i_mapping
||
95 page_offset(page
) > i_size_read(inode
) ||
96 !PageUptodate(page
))) {
103 /* block allocation */
104 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
105 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
106 err
= f2fs_get_block(&dn
, page
->index
);
108 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
116 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
118 /* wait for GCed page writeback via META_MAPPING */
119 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
122 * check to see if the page is mapped already (no holes)
124 if (PageMappedToDisk(page
))
127 /* page is wholly or partially inside EOF */
128 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
129 i_size_read(inode
)) {
132 offset
= i_size_read(inode
) & ~PAGE_MASK
;
133 zero_user_segment(page
, offset
, PAGE_SIZE
);
135 set_page_dirty(page
);
136 if (!PageUptodate(page
))
137 SetPageUptodate(page
);
139 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
140 f2fs_update_time(sbi
, REQ_TIME
);
142 trace_f2fs_vm_page_mkwrite(page
, DATA
);
144 up_read(&F2FS_I(inode
)->i_mmap_sem
);
146 sb_end_pagefault(inode
->i_sb
);
148 return block_page_mkwrite_return(err
);
151 static const struct vm_operations_struct f2fs_file_vm_ops
= {
152 .fault
= f2fs_filemap_fault
,
153 .map_pages
= filemap_map_pages
,
154 .page_mkwrite
= f2fs_vm_page_mkwrite
,
157 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
159 struct dentry
*dentry
;
161 inode
= igrab(inode
);
162 dentry
= d_find_any_alias(inode
);
167 *pino
= parent_ino(dentry
);
172 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
174 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
175 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
177 if (!S_ISREG(inode
->i_mode
))
178 cp_reason
= CP_NON_REGULAR
;
179 else if (f2fs_compressed_file(inode
))
180 cp_reason
= CP_COMPRESSED
;
181 else if (inode
->i_nlink
!= 1)
182 cp_reason
= CP_HARDLINK
;
183 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
184 cp_reason
= CP_SB_NEED_CP
;
185 else if (file_wrong_pino(inode
))
186 cp_reason
= CP_WRONG_PINO
;
187 else if (!f2fs_space_for_roll_forward(sbi
))
188 cp_reason
= CP_NO_SPC_ROLL
;
189 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
190 cp_reason
= CP_NODE_NEED_CP
;
191 else if (test_opt(sbi
, FASTBOOT
))
192 cp_reason
= CP_FASTBOOT_MODE
;
193 else if (F2FS_OPTION(sbi
).active_logs
== 2)
194 cp_reason
= CP_SPEC_LOG_NUM
;
195 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
196 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
197 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
199 cp_reason
= CP_RECOVER_DIR
;
204 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
206 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
208 /* But we need to avoid that there are some inode updates */
209 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
215 static void try_to_fix_pino(struct inode
*inode
)
217 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
220 down_write(&fi
->i_sem
);
221 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
222 get_parent_ino(inode
, &pino
)) {
223 f2fs_i_pino_write(inode
, pino
);
224 file_got_pino(inode
);
226 up_write(&fi
->i_sem
);
229 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
230 int datasync
, bool atomic
)
232 struct inode
*inode
= file
->f_mapping
->host
;
233 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
234 nid_t ino
= inode
->i_ino
;
236 enum cp_reason_type cp_reason
= 0;
237 struct writeback_control wbc
= {
238 .sync_mode
= WB_SYNC_ALL
,
239 .nr_to_write
= LONG_MAX
,
242 unsigned int seq_id
= 0;
244 if (unlikely(f2fs_readonly(inode
->i_sb
) ||
245 is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
248 trace_f2fs_sync_file_enter(inode
);
250 if (S_ISDIR(inode
->i_mode
))
253 /* if fdatasync is triggered, let's do in-place-update */
254 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
255 set_inode_flag(inode
, FI_NEED_IPU
);
256 ret
= file_write_and_wait_range(file
, start
, end
);
257 clear_inode_flag(inode
, FI_NEED_IPU
);
260 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
264 /* if the inode is dirty, let's recover all the time */
265 if (!f2fs_skip_inode_update(inode
, datasync
)) {
266 f2fs_write_inode(inode
, NULL
);
271 * if there is no written data, don't waste time to write recovery info.
273 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
274 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
276 /* it may call write_inode just prior to fsync */
277 if (need_inode_page_update(sbi
, ino
))
280 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
281 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
287 * Both of fdatasync() and fsync() are able to be recovered from
290 down_read(&F2FS_I(inode
)->i_sem
);
291 cp_reason
= need_do_checkpoint(inode
);
292 up_read(&F2FS_I(inode
)->i_sem
);
295 /* all the dirty node pages should be flushed for POR */
296 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
299 * We've secured consistency through sync_fs. Following pino
300 * will be used only for fsynced inodes after checkpoint.
302 try_to_fix_pino(inode
);
303 clear_inode_flag(inode
, FI_APPEND_WRITE
);
304 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
308 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
309 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
, &seq_id
);
310 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
314 /* if cp_error was enabled, we should avoid infinite loop */
315 if (unlikely(f2fs_cp_error(sbi
))) {
320 if (f2fs_need_inode_block_update(sbi
, ino
)) {
321 f2fs_mark_inode_dirty_sync(inode
, true);
322 f2fs_write_inode(inode
, NULL
);
327 * If it's atomic_write, it's just fine to keep write ordering. So
328 * here we don't need to wait for node write completion, since we use
329 * node chain which serializes node blocks. If one of node writes are
330 * reordered, we can see simply broken chain, resulting in stopping
331 * roll-forward recovery. It means we'll recover all or none node blocks
335 ret
= f2fs_wait_on_node_pages_writeback(sbi
, seq_id
);
340 /* once recovery info is written, don't need to tack this */
341 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
342 clear_inode_flag(inode
, FI_APPEND_WRITE
);
344 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
345 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
347 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
348 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
349 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
351 f2fs_update_time(sbi
, REQ_TIME
);
353 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
354 f2fs_trace_ios(NULL
, 1);
358 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
360 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
362 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
365 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
366 pgoff_t pgofs
, int whence
)
371 if (whence
!= SEEK_DATA
)
374 /* find first dirty page index */
375 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
384 static bool __found_offset(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
385 pgoff_t dirty
, pgoff_t pgofs
, int whence
)
389 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
390 __is_valid_data_blkaddr(blkaddr
))
394 if (blkaddr
== NULL_ADDR
)
401 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
403 struct inode
*inode
= file
->f_mapping
->host
;
404 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
405 struct dnode_of_data dn
;
406 pgoff_t pgofs
, end_offset
, dirty
;
407 loff_t data_ofs
= offset
;
413 isize
= i_size_read(inode
);
417 /* handle inline data case */
418 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
419 if (whence
== SEEK_HOLE
)
424 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
426 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
428 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
429 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
430 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
431 if (err
&& err
!= -ENOENT
) {
433 } else if (err
== -ENOENT
) {
434 /* direct node does not exists */
435 if (whence
== SEEK_DATA
) {
436 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
443 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
445 /* find data/hole in dnode block */
446 for (; dn
.ofs_in_node
< end_offset
;
447 dn
.ofs_in_node
++, pgofs
++,
448 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
451 blkaddr
= datablock_addr(dn
.inode
,
452 dn
.node_page
, dn
.ofs_in_node
);
454 if (__is_valid_data_blkaddr(blkaddr
) &&
455 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
456 blkaddr
, DATA_GENERIC_ENHANCE
)) {
461 if (__found_offset(F2FS_I_SB(inode
), blkaddr
, dirty
,
470 if (whence
== SEEK_DATA
)
473 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
476 return vfs_setpos(file
, data_ofs
, maxbytes
);
482 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
484 struct inode
*inode
= file
->f_mapping
->host
;
485 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
491 return generic_file_llseek_size(file
, offset
, whence
,
492 maxbytes
, i_size_read(inode
));
497 return f2fs_seek_block(file
, offset
, whence
);
503 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
505 struct inode
*inode
= file_inode(file
);
508 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
511 if (!f2fs_is_compress_backend_ready(inode
))
514 /* we don't need to use inline_data strictly */
515 err
= f2fs_convert_inline_inode(inode
);
520 vma
->vm_ops
= &f2fs_file_vm_ops
;
521 set_inode_flag(inode
, FI_MMAP_FILE
);
525 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
527 int err
= fscrypt_file_open(inode
, filp
);
532 if (!f2fs_is_compress_backend_ready(inode
))
535 err
= fsverity_file_open(inode
, filp
);
539 filp
->f_mode
|= FMODE_NOWAIT
;
541 return dquot_file_open(inode
, filp
);
544 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
546 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
547 struct f2fs_node
*raw_node
;
548 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
551 bool compressed_cluster
= false;
552 int cluster_index
= 0, valid_blocks
= 0;
553 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
555 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
556 base
= get_extra_isize(dn
->inode
);
558 raw_node
= F2FS_NODE(dn
->node_page
);
559 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
561 /* Assumption: truncateion starts with cluster */
562 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++, cluster_index
++) {
563 block_t blkaddr
= le32_to_cpu(*addr
);
565 if (f2fs_compressed_file(dn
->inode
) &&
566 !(cluster_index
& (cluster_size
- 1))) {
567 if (compressed_cluster
)
568 f2fs_i_compr_blocks_update(dn
->inode
,
569 valid_blocks
, false);
570 compressed_cluster
= (blkaddr
== COMPRESS_ADDR
);
574 if (blkaddr
== NULL_ADDR
)
577 dn
->data_blkaddr
= NULL_ADDR
;
578 f2fs_set_data_blkaddr(dn
);
580 if (__is_valid_data_blkaddr(blkaddr
)) {
581 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
582 DATA_GENERIC_ENHANCE
))
584 if (compressed_cluster
)
588 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
589 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
591 f2fs_invalidate_blocks(sbi
, blkaddr
);
595 if (compressed_cluster
)
596 f2fs_i_compr_blocks_update(dn
->inode
, valid_blocks
, false);
601 * once we invalidate valid blkaddr in range [ofs, ofs + count],
602 * we will invalidate all blkaddr in the whole range.
604 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
606 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
607 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
609 dn
->ofs_in_node
= ofs
;
611 f2fs_update_time(sbi
, REQ_TIME
);
612 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
613 dn
->ofs_in_node
, nr_free
);
616 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
618 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK(dn
->inode
));
621 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
624 loff_t offset
= from
& (PAGE_SIZE
- 1);
625 pgoff_t index
= from
>> PAGE_SHIFT
;
626 struct address_space
*mapping
= inode
->i_mapping
;
629 if (!offset
&& !cache_only
)
633 page
= find_lock_page(mapping
, index
);
634 if (page
&& PageUptodate(page
))
636 f2fs_put_page(page
, 1);
640 if (f2fs_compressed_file(inode
))
643 page
= f2fs_get_lock_data_page(inode
, index
, true);
645 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
647 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
648 zero_user(page
, offset
, PAGE_SIZE
- offset
);
650 /* An encrypted inode should have a key and truncate the last page. */
651 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& IS_ENCRYPTED(inode
));
653 set_page_dirty(page
);
654 f2fs_put_page(page
, 1);
658 static int do_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
660 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
661 struct dnode_of_data dn
;
663 int count
= 0, err
= 0;
665 bool truncate_page
= false;
667 trace_f2fs_truncate_blocks_enter(inode
, from
);
669 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
671 if (free_from
>= sbi
->max_file_blocks
)
677 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
679 err
= PTR_ERR(ipage
);
683 if (f2fs_has_inline_data(inode
)) {
684 f2fs_truncate_inline_inode(inode
, ipage
, from
);
685 f2fs_put_page(ipage
, 1);
686 truncate_page
= true;
690 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
691 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
698 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
700 count
-= dn
.ofs_in_node
;
701 f2fs_bug_on(sbi
, count
< 0);
703 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
704 f2fs_truncate_data_blocks_range(&dn
, count
);
710 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
715 /* lastly zero out the first data page */
717 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
719 trace_f2fs_truncate_blocks_exit(inode
, err
);
723 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
725 u64 free_from
= from
;
728 * for compressed file, only support cluster size
729 * aligned truncation.
731 if (f2fs_compressed_file(inode
)) {
732 size_t cluster_shift
= PAGE_SHIFT
+
733 F2FS_I(inode
)->i_log_cluster_size
;
734 size_t cluster_mask
= (1 << cluster_shift
) - 1;
736 free_from
= from
>> cluster_shift
;
737 if (from
& cluster_mask
)
739 free_from
<<= cluster_shift
;
742 return do_truncate_blocks(inode
, free_from
, lock
);
745 int f2fs_truncate(struct inode
*inode
)
749 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
752 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
753 S_ISLNK(inode
->i_mode
)))
756 trace_f2fs_truncate(inode
);
758 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
759 f2fs_show_injection_info(F2FS_I_SB(inode
), FAULT_TRUNCATE
);
763 /* we should check inline_data size */
764 if (!f2fs_may_inline_data(inode
)) {
765 err
= f2fs_convert_inline_inode(inode
);
770 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
774 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
775 f2fs_mark_inode_dirty_sync(inode
, false);
779 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
780 u32 request_mask
, unsigned int query_flags
)
782 struct inode
*inode
= d_inode(path
->dentry
);
783 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
784 struct f2fs_inode
*ri
;
787 if (f2fs_has_extra_attr(inode
) &&
788 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode
)) &&
789 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
790 stat
->result_mask
|= STATX_BTIME
;
791 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
792 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
796 if (flags
& F2FS_APPEND_FL
)
797 stat
->attributes
|= STATX_ATTR_APPEND
;
798 if (IS_ENCRYPTED(inode
))
799 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
800 if (flags
& F2FS_IMMUTABLE_FL
)
801 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
802 if (flags
& F2FS_NODUMP_FL
)
803 stat
->attributes
|= STATX_ATTR_NODUMP
;
804 if (IS_VERITY(inode
))
805 stat
->attributes
|= STATX_ATTR_VERITY
;
807 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
808 STATX_ATTR_ENCRYPTED
|
809 STATX_ATTR_IMMUTABLE
|
813 generic_fillattr(inode
, stat
);
815 /* we need to show initial sectors used for inline_data/dentries */
816 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
817 f2fs_has_inline_dentry(inode
))
818 stat
->blocks
+= (stat
->size
+ 511) >> 9;
823 #ifdef CONFIG_F2FS_FS_POSIX_ACL
824 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
826 unsigned int ia_valid
= attr
->ia_valid
;
828 if (ia_valid
& ATTR_UID
)
829 inode
->i_uid
= attr
->ia_uid
;
830 if (ia_valid
& ATTR_GID
)
831 inode
->i_gid
= attr
->ia_gid
;
832 if (ia_valid
& ATTR_ATIME
)
833 inode
->i_atime
= attr
->ia_atime
;
834 if (ia_valid
& ATTR_MTIME
)
835 inode
->i_mtime
= attr
->ia_mtime
;
836 if (ia_valid
& ATTR_CTIME
)
837 inode
->i_ctime
= attr
->ia_ctime
;
838 if (ia_valid
& ATTR_MODE
) {
839 umode_t mode
= attr
->ia_mode
;
841 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
843 set_acl_inode(inode
, mode
);
847 #define __setattr_copy setattr_copy
850 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
852 struct inode
*inode
= d_inode(dentry
);
855 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
858 if ((attr
->ia_valid
& ATTR_SIZE
) &&
859 !f2fs_is_compress_backend_ready(inode
))
862 err
= setattr_prepare(dentry
, attr
);
866 err
= fscrypt_prepare_setattr(dentry
, attr
);
870 err
= fsverity_prepare_setattr(dentry
, attr
);
874 if (is_quota_modification(inode
, attr
)) {
875 err
= dquot_initialize(inode
);
879 if ((attr
->ia_valid
& ATTR_UID
&&
880 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
881 (attr
->ia_valid
& ATTR_GID
&&
882 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
883 f2fs_lock_op(F2FS_I_SB(inode
));
884 err
= dquot_transfer(inode
, attr
);
886 set_sbi_flag(F2FS_I_SB(inode
),
887 SBI_QUOTA_NEED_REPAIR
);
888 f2fs_unlock_op(F2FS_I_SB(inode
));
892 * update uid/gid under lock_op(), so that dquot and inode can
893 * be updated atomically.
895 if (attr
->ia_valid
& ATTR_UID
)
896 inode
->i_uid
= attr
->ia_uid
;
897 if (attr
->ia_valid
& ATTR_GID
)
898 inode
->i_gid
= attr
->ia_gid
;
899 f2fs_mark_inode_dirty_sync(inode
, true);
900 f2fs_unlock_op(F2FS_I_SB(inode
));
903 if (attr
->ia_valid
& ATTR_SIZE
) {
904 loff_t old_size
= i_size_read(inode
);
906 if (attr
->ia_size
> MAX_INLINE_DATA(inode
)) {
908 * should convert inline inode before i_size_write to
909 * keep smaller than inline_data size with inline flag.
911 err
= f2fs_convert_inline_inode(inode
);
916 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
917 down_write(&F2FS_I(inode
)->i_mmap_sem
);
919 truncate_setsize(inode
, attr
->ia_size
);
921 if (attr
->ia_size
<= old_size
)
922 err
= f2fs_truncate(inode
);
924 * do not trim all blocks after i_size if target size is
925 * larger than i_size.
927 up_write(&F2FS_I(inode
)->i_mmap_sem
);
928 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
932 down_write(&F2FS_I(inode
)->i_sem
);
933 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
934 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
935 up_write(&F2FS_I(inode
)->i_sem
);
938 __setattr_copy(inode
, attr
);
940 if (attr
->ia_valid
& ATTR_MODE
) {
941 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
942 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
943 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
944 clear_inode_flag(inode
, FI_ACL_MODE
);
948 /* file size may changed here */
949 f2fs_mark_inode_dirty_sync(inode
, true);
951 /* inode change will produce dirty node pages flushed by checkpoint */
952 f2fs_balance_fs(F2FS_I_SB(inode
), true);
957 const struct inode_operations f2fs_file_inode_operations
= {
958 .getattr
= f2fs_getattr
,
959 .setattr
= f2fs_setattr
,
960 .get_acl
= f2fs_get_acl
,
961 .set_acl
= f2fs_set_acl
,
962 #ifdef CONFIG_F2FS_FS_XATTR
963 .listxattr
= f2fs_listxattr
,
965 .fiemap
= f2fs_fiemap
,
968 static int fill_zero(struct inode
*inode
, pgoff_t index
,
969 loff_t start
, loff_t len
)
971 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
977 f2fs_balance_fs(sbi
, true);
980 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
984 return PTR_ERR(page
);
986 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
987 zero_user(page
, start
, len
);
988 set_page_dirty(page
);
989 f2fs_put_page(page
, 1);
993 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
997 while (pg_start
< pg_end
) {
998 struct dnode_of_data dn
;
999 pgoff_t end_offset
, count
;
1001 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1002 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
1004 if (err
== -ENOENT
) {
1005 pg_start
= f2fs_get_next_page_offset(&dn
,
1012 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1013 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
1015 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
1017 f2fs_truncate_data_blocks_range(&dn
, count
);
1018 f2fs_put_dnode(&dn
);
1025 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1027 pgoff_t pg_start
, pg_end
;
1028 loff_t off_start
, off_end
;
1031 ret
= f2fs_convert_inline_inode(inode
);
1035 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1036 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1038 off_start
= offset
& (PAGE_SIZE
- 1);
1039 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1041 if (pg_start
== pg_end
) {
1042 ret
= fill_zero(inode
, pg_start
, off_start
,
1043 off_end
- off_start
);
1048 ret
= fill_zero(inode
, pg_start
++, off_start
,
1049 PAGE_SIZE
- off_start
);
1054 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1059 if (pg_start
< pg_end
) {
1060 struct address_space
*mapping
= inode
->i_mapping
;
1061 loff_t blk_start
, blk_end
;
1062 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1064 f2fs_balance_fs(sbi
, true);
1066 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
1067 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
1069 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1070 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1072 truncate_inode_pages_range(mapping
, blk_start
,
1076 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
1077 f2fs_unlock_op(sbi
);
1079 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1080 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1087 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1088 int *do_replace
, pgoff_t off
, pgoff_t len
)
1090 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1091 struct dnode_of_data dn
;
1095 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1096 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
1097 if (ret
&& ret
!= -ENOENT
) {
1099 } else if (ret
== -ENOENT
) {
1100 if (dn
.max_level
== 0)
1102 done
= min((pgoff_t
)ADDRS_PER_BLOCK(inode
) -
1103 dn
.ofs_in_node
, len
);
1109 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
1110 dn
.ofs_in_node
, len
);
1111 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
1112 *blkaddr
= datablock_addr(dn
.inode
,
1113 dn
.node_page
, dn
.ofs_in_node
);
1115 if (__is_valid_data_blkaddr(*blkaddr
) &&
1116 !f2fs_is_valid_blkaddr(sbi
, *blkaddr
,
1117 DATA_GENERIC_ENHANCE
)) {
1118 f2fs_put_dnode(&dn
);
1119 return -EFSCORRUPTED
;
1122 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
1124 if (test_opt(sbi
, LFS
)) {
1125 f2fs_put_dnode(&dn
);
1129 /* do not invalidate this block address */
1130 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1134 f2fs_put_dnode(&dn
);
1143 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1144 int *do_replace
, pgoff_t off
, int len
)
1146 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1147 struct dnode_of_data dn
;
1150 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1151 if (*do_replace
== 0)
1154 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1155 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1157 dec_valid_block_count(sbi
, inode
, 1);
1158 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1160 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1162 f2fs_put_dnode(&dn
);
1167 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1168 block_t
*blkaddr
, int *do_replace
,
1169 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1171 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1176 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1181 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1182 struct dnode_of_data dn
;
1183 struct node_info ni
;
1187 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1188 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1192 ret
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1194 f2fs_put_dnode(&dn
);
1198 ilen
= min((pgoff_t
)
1199 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1200 dn
.ofs_in_node
, len
- i
);
1202 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1203 dn
.node_page
, dn
.ofs_in_node
);
1204 f2fs_truncate_data_blocks_range(&dn
, 1);
1206 if (do_replace
[i
]) {
1207 f2fs_i_blocks_write(src_inode
,
1209 f2fs_i_blocks_write(dst_inode
,
1211 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1212 blkaddr
[i
], ni
.version
, true, false);
1218 new_size
= (loff_t
)(dst
+ i
) << PAGE_SHIFT
;
1219 if (dst_inode
->i_size
< new_size
)
1220 f2fs_i_size_write(dst_inode
, new_size
);
1221 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1223 f2fs_put_dnode(&dn
);
1225 struct page
*psrc
, *pdst
;
1227 psrc
= f2fs_get_lock_data_page(src_inode
,
1230 return PTR_ERR(psrc
);
1231 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1234 f2fs_put_page(psrc
, 1);
1235 return PTR_ERR(pdst
);
1237 f2fs_copy_page(psrc
, pdst
);
1238 set_page_dirty(pdst
);
1239 f2fs_put_page(pdst
, 1);
1240 f2fs_put_page(psrc
, 1);
1242 ret
= f2fs_truncate_hole(src_inode
,
1243 src
+ i
, src
+ i
+ 1);
1252 static int __exchange_data_block(struct inode
*src_inode
,
1253 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1254 pgoff_t len
, bool full
)
1256 block_t
*src_blkaddr
;
1262 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK(src_inode
), len
);
1264 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1265 array_size(olen
, sizeof(block_t
)),
1270 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1271 array_size(olen
, sizeof(int)),
1274 kvfree(src_blkaddr
);
1278 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1279 do_replace
, src
, olen
);
1283 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1284 do_replace
, src
, dst
, olen
, full
);
1292 kvfree(src_blkaddr
);
1298 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1299 kvfree(src_blkaddr
);
1304 static int f2fs_do_collapse(struct inode
*inode
, loff_t offset
, loff_t len
)
1306 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1307 pgoff_t nrpages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1308 pgoff_t start
= offset
>> PAGE_SHIFT
;
1309 pgoff_t end
= (offset
+ len
) >> PAGE_SHIFT
;
1312 f2fs_balance_fs(sbi
, true);
1314 /* avoid gc operation during block exchange */
1315 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1316 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1319 f2fs_drop_extent_tree(inode
);
1320 truncate_pagecache(inode
, offset
);
1321 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1322 f2fs_unlock_op(sbi
);
1324 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1325 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1329 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1334 if (offset
+ len
>= i_size_read(inode
))
1337 /* collapse range should be aligned to block size of f2fs. */
1338 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1341 ret
= f2fs_convert_inline_inode(inode
);
1345 /* write out all dirty pages from offset */
1346 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1350 ret
= f2fs_do_collapse(inode
, offset
, len
);
1354 /* write out all moved pages, if possible */
1355 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1356 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1357 truncate_pagecache(inode
, offset
);
1359 new_size
= i_size_read(inode
) - len
;
1360 truncate_pagecache(inode
, new_size
);
1362 ret
= f2fs_truncate_blocks(inode
, new_size
, true);
1363 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1365 f2fs_i_size_write(inode
, new_size
);
1369 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1372 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1373 pgoff_t index
= start
;
1374 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1378 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1379 if (datablock_addr(dn
->inode
, dn
->node_page
,
1380 dn
->ofs_in_node
) == NULL_ADDR
)
1384 dn
->ofs_in_node
= ofs_in_node
;
1385 ret
= f2fs_reserve_new_blocks(dn
, count
);
1389 dn
->ofs_in_node
= ofs_in_node
;
1390 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1391 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1392 dn
->node_page
, dn
->ofs_in_node
);
1394 * f2fs_reserve_new_blocks will not guarantee entire block
1397 if (dn
->data_blkaddr
== NULL_ADDR
) {
1401 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1402 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1403 dn
->data_blkaddr
= NEW_ADDR
;
1404 f2fs_set_data_blkaddr(dn
);
1408 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1413 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1416 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1417 struct address_space
*mapping
= inode
->i_mapping
;
1418 pgoff_t index
, pg_start
, pg_end
;
1419 loff_t new_size
= i_size_read(inode
);
1420 loff_t off_start
, off_end
;
1423 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1427 ret
= f2fs_convert_inline_inode(inode
);
1431 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1435 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1436 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1438 off_start
= offset
& (PAGE_SIZE
- 1);
1439 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1441 if (pg_start
== pg_end
) {
1442 ret
= fill_zero(inode
, pg_start
, off_start
,
1443 off_end
- off_start
);
1447 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1450 ret
= fill_zero(inode
, pg_start
++, off_start
,
1451 PAGE_SIZE
- off_start
);
1455 new_size
= max_t(loff_t
, new_size
,
1456 (loff_t
)pg_start
<< PAGE_SHIFT
);
1459 for (index
= pg_start
; index
< pg_end
;) {
1460 struct dnode_of_data dn
;
1461 unsigned int end_offset
;
1464 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1465 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1467 truncate_pagecache_range(inode
,
1468 (loff_t
)index
<< PAGE_SHIFT
,
1469 ((loff_t
)pg_end
<< PAGE_SHIFT
) - 1);
1473 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1474 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1476 f2fs_unlock_op(sbi
);
1477 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1478 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1482 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1483 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1485 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1486 f2fs_put_dnode(&dn
);
1488 f2fs_unlock_op(sbi
);
1489 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1490 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1492 f2fs_balance_fs(sbi
, dn
.node_changed
);
1498 new_size
= max_t(loff_t
, new_size
,
1499 (loff_t
)index
<< PAGE_SHIFT
);
1503 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1507 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1512 if (new_size
> i_size_read(inode
)) {
1513 if (mode
& FALLOC_FL_KEEP_SIZE
)
1514 file_set_keep_isize(inode
);
1516 f2fs_i_size_write(inode
, new_size
);
1521 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1523 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1524 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1528 new_size
= i_size_read(inode
) + len
;
1529 ret
= inode_newsize_ok(inode
, new_size
);
1533 if (offset
>= i_size_read(inode
))
1536 /* insert range should be aligned to block size of f2fs. */
1537 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1540 ret
= f2fs_convert_inline_inode(inode
);
1544 f2fs_balance_fs(sbi
, true);
1546 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1547 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
1548 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1552 /* write out all dirty pages from offset */
1553 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1557 pg_start
= offset
>> PAGE_SHIFT
;
1558 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1559 delta
= pg_end
- pg_start
;
1560 idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1562 /* avoid gc operation during block exchange */
1563 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1564 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1565 truncate_pagecache(inode
, offset
);
1567 while (!ret
&& idx
> pg_start
) {
1568 nr
= idx
- pg_start
;
1574 f2fs_drop_extent_tree(inode
);
1576 ret
= __exchange_data_block(inode
, inode
, idx
,
1577 idx
+ delta
, nr
, false);
1578 f2fs_unlock_op(sbi
);
1580 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1581 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1583 /* write out all moved pages, if possible */
1584 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1585 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1586 truncate_pagecache(inode
, offset
);
1587 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1590 f2fs_i_size_write(inode
, new_size
);
1594 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1595 loff_t len
, int mode
)
1597 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1598 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1599 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
,
1600 .m_may_create
= true };
1602 loff_t new_size
= i_size_read(inode
);
1606 err
= inode_newsize_ok(inode
, (len
+ offset
));
1610 err
= f2fs_convert_inline_inode(inode
);
1614 f2fs_balance_fs(sbi
, true);
1616 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1617 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1619 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1620 map
.m_len
= pg_end
- map
.m_lblk
;
1627 if (f2fs_is_pinned_file(inode
)) {
1628 block_t len
= (map
.m_len
>> sbi
->log_blocks_per_seg
) <<
1629 sbi
->log_blocks_per_seg
;
1632 if (map
.m_len
% sbi
->blocks_per_seg
)
1633 len
+= sbi
->blocks_per_seg
;
1635 map
.m_len
= sbi
->blocks_per_seg
;
1637 if (has_not_enough_free_secs(sbi
, 0,
1638 GET_SEC_FROM_SEG(sbi
, overprovision_segments(sbi
)))) {
1639 down_write(&sbi
->gc_lock
);
1640 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1641 if (err
&& err
!= -ENODATA
&& err
!= -EAGAIN
)
1645 down_write(&sbi
->pin_sem
);
1646 map
.m_seg_type
= CURSEG_COLD_DATA_PINNED
;
1647 f2fs_allocate_new_segments(sbi
, CURSEG_COLD_DATA
);
1648 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
1649 up_write(&sbi
->pin_sem
);
1653 map
.m_lblk
+= map
.m_len
;
1659 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1668 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1670 /* update new size to the failed position */
1671 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1672 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1674 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1677 if (new_size
> i_size_read(inode
)) {
1678 if (mode
& FALLOC_FL_KEEP_SIZE
)
1679 file_set_keep_isize(inode
);
1681 f2fs_i_size_write(inode
, new_size
);
1687 static long f2fs_fallocate(struct file
*file
, int mode
,
1688 loff_t offset
, loff_t len
)
1690 struct inode
*inode
= file_inode(file
);
1693 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1695 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode
)))
1697 if (!f2fs_is_compress_backend_ready(inode
))
1700 /* f2fs only support ->fallocate for regular file */
1701 if (!S_ISREG(inode
->i_mode
))
1704 if (IS_ENCRYPTED(inode
) &&
1705 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1708 if (f2fs_compressed_file(inode
) &&
1709 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_COLLAPSE_RANGE
|
1710 FALLOC_FL_ZERO_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1713 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1714 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1715 FALLOC_FL_INSERT_RANGE
))
1720 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1721 if (offset
>= inode
->i_size
)
1724 ret
= punch_hole(inode
, offset
, len
);
1725 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1726 ret
= f2fs_collapse_range(inode
, offset
, len
);
1727 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1728 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1729 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1730 ret
= f2fs_insert_range(inode
, offset
, len
);
1732 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1736 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1737 f2fs_mark_inode_dirty_sync(inode
, false);
1738 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1742 inode_unlock(inode
);
1744 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1748 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1751 * f2fs_relase_file is called at every close calls. So we should
1752 * not drop any inmemory pages by close called by other process.
1754 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1755 atomic_read(&inode
->i_writecount
) != 1)
1758 /* some remained atomic pages should discarded */
1759 if (f2fs_is_atomic_file(inode
))
1760 f2fs_drop_inmem_pages(inode
);
1761 if (f2fs_is_volatile_file(inode
)) {
1762 set_inode_flag(inode
, FI_DROP_CACHE
);
1763 filemap_fdatawrite(inode
->i_mapping
);
1764 clear_inode_flag(inode
, FI_DROP_CACHE
);
1765 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1766 stat_dec_volatile_write(inode
);
1771 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1773 struct inode
*inode
= file_inode(file
);
1776 * If the process doing a transaction is crashed, we should do
1777 * roll-back. Otherwise, other reader/write can see corrupted database
1778 * until all the writers close its file. Since this should be done
1779 * before dropping file lock, it needs to do in ->flush.
1781 if (f2fs_is_atomic_file(inode
) &&
1782 F2FS_I(inode
)->inmem_task
== current
)
1783 f2fs_drop_inmem_pages(inode
);
1787 static int f2fs_setflags_common(struct inode
*inode
, u32 iflags
, u32 mask
)
1789 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1791 /* Is it quota file? Do not allow user to mess with it */
1792 if (IS_NOQUOTA(inode
))
1795 if ((iflags
^ fi
->i_flags
) & F2FS_CASEFOLD_FL
) {
1796 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode
)))
1798 if (!f2fs_empty_dir(inode
))
1802 if (iflags
& (F2FS_COMPR_FL
| F2FS_NOCOMP_FL
)) {
1803 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
1805 if ((iflags
& F2FS_COMPR_FL
) && (iflags
& F2FS_NOCOMP_FL
))
1809 if ((iflags
^ fi
->i_flags
) & F2FS_COMPR_FL
) {
1810 if (S_ISREG(inode
->i_mode
) &&
1811 (fi
->i_flags
& F2FS_COMPR_FL
|| i_size_read(inode
) ||
1812 F2FS_HAS_BLOCKS(inode
)))
1814 if (iflags
& F2FS_NOCOMP_FL
)
1816 if (iflags
& F2FS_COMPR_FL
) {
1817 int err
= f2fs_convert_inline_inode(inode
);
1822 if (!f2fs_may_compress(inode
))
1825 set_compress_context(inode
);
1828 if ((iflags
^ fi
->i_flags
) & F2FS_NOCOMP_FL
) {
1829 if (fi
->i_flags
& F2FS_COMPR_FL
)
1833 fi
->i_flags
= iflags
| (fi
->i_flags
& ~mask
);
1834 f2fs_bug_on(F2FS_I_SB(inode
), (fi
->i_flags
& F2FS_COMPR_FL
) &&
1835 (fi
->i_flags
& F2FS_NOCOMP_FL
));
1837 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1838 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1840 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1842 inode
->i_ctime
= current_time(inode
);
1843 f2fs_set_inode_flags(inode
);
1844 f2fs_mark_inode_dirty_sync(inode
, true);
1848 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1851 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1852 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1853 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1854 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1857 static const struct {
1860 } f2fs_fsflags_map
[] = {
1861 { F2FS_COMPR_FL
, FS_COMPR_FL
},
1862 { F2FS_SYNC_FL
, FS_SYNC_FL
},
1863 { F2FS_IMMUTABLE_FL
, FS_IMMUTABLE_FL
},
1864 { F2FS_APPEND_FL
, FS_APPEND_FL
},
1865 { F2FS_NODUMP_FL
, FS_NODUMP_FL
},
1866 { F2FS_NOATIME_FL
, FS_NOATIME_FL
},
1867 { F2FS_NOCOMP_FL
, FS_NOCOMP_FL
},
1868 { F2FS_INDEX_FL
, FS_INDEX_FL
},
1869 { F2FS_DIRSYNC_FL
, FS_DIRSYNC_FL
},
1870 { F2FS_PROJINHERIT_FL
, FS_PROJINHERIT_FL
},
1871 { F2FS_CASEFOLD_FL
, FS_CASEFOLD_FL
},
1874 #define F2FS_GETTABLE_FS_FL ( \
1884 FS_PROJINHERIT_FL | \
1886 FS_INLINE_DATA_FL | \
1891 #define F2FS_SETTABLE_FS_FL ( \
1900 FS_PROJINHERIT_FL | \
1903 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1904 static inline u32
f2fs_iflags_to_fsflags(u32 iflags
)
1909 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1910 if (iflags
& f2fs_fsflags_map
[i
].iflag
)
1911 fsflags
|= f2fs_fsflags_map
[i
].fsflag
;
1916 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1917 static inline u32
f2fs_fsflags_to_iflags(u32 fsflags
)
1922 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1923 if (fsflags
& f2fs_fsflags_map
[i
].fsflag
)
1924 iflags
|= f2fs_fsflags_map
[i
].iflag
;
1929 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1931 struct inode
*inode
= file_inode(filp
);
1932 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1933 u32 fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1935 if (IS_ENCRYPTED(inode
))
1936 fsflags
|= FS_ENCRYPT_FL
;
1937 if (IS_VERITY(inode
))
1938 fsflags
|= FS_VERITY_FL
;
1939 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1940 fsflags
|= FS_INLINE_DATA_FL
;
1941 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
1942 fsflags
|= FS_NOCOW_FL
;
1944 fsflags
&= F2FS_GETTABLE_FS_FL
;
1946 return put_user(fsflags
, (int __user
*)arg
);
1949 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1951 struct inode
*inode
= file_inode(filp
);
1952 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1953 u32 fsflags
, old_fsflags
;
1957 if (!inode_owner_or_capable(inode
))
1960 if (get_user(fsflags
, (int __user
*)arg
))
1963 if (fsflags
& ~F2FS_GETTABLE_FS_FL
)
1965 fsflags
&= F2FS_SETTABLE_FS_FL
;
1967 iflags
= f2fs_fsflags_to_iflags(fsflags
);
1968 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
1971 ret
= mnt_want_write_file(filp
);
1977 old_fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1978 ret
= vfs_ioc_setflags_prepare(inode
, old_fsflags
, fsflags
);
1982 ret
= f2fs_setflags_common(inode
, iflags
,
1983 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL
));
1985 inode_unlock(inode
);
1986 mnt_drop_write_file(filp
);
1990 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1992 struct inode
*inode
= file_inode(filp
);
1994 return put_user(inode
->i_generation
, (int __user
*)arg
);
1997 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1999 struct inode
*inode
= file_inode(filp
);
2000 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2001 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2004 if (!inode_owner_or_capable(inode
))
2007 if (!S_ISREG(inode
->i_mode
))
2010 if (filp
->f_flags
& O_DIRECT
)
2013 ret
= mnt_want_write_file(filp
);
2019 f2fs_disable_compressed_file(inode
);
2021 if (f2fs_is_atomic_file(inode
)) {
2022 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
))
2027 ret
= f2fs_convert_inline_inode(inode
);
2031 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2034 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2035 * f2fs_is_atomic_file.
2037 if (get_dirty_pages(inode
))
2038 f2fs_warn(F2FS_I_SB(inode
), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2039 inode
->i_ino
, get_dirty_pages(inode
));
2040 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
2042 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2046 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2047 if (list_empty(&fi
->inmem_ilist
))
2048 list_add_tail(&fi
->inmem_ilist
, &sbi
->inode_list
[ATOMIC_FILE
]);
2049 sbi
->atomic_files
++;
2050 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2052 /* add inode in inmem_list first and set atomic_file */
2053 set_inode_flag(inode
, FI_ATOMIC_FILE
);
2054 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2055 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2057 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2058 F2FS_I(inode
)->inmem_task
= current
;
2059 stat_update_max_atomic_write(inode
);
2061 inode_unlock(inode
);
2062 mnt_drop_write_file(filp
);
2066 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
2068 struct inode
*inode
= file_inode(filp
);
2071 if (!inode_owner_or_capable(inode
))
2074 ret
= mnt_want_write_file(filp
);
2078 f2fs_balance_fs(F2FS_I_SB(inode
), true);
2082 if (f2fs_is_volatile_file(inode
)) {
2087 if (f2fs_is_atomic_file(inode
)) {
2088 ret
= f2fs_commit_inmem_pages(inode
);
2092 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2094 f2fs_drop_inmem_pages(inode
);
2096 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
2099 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2100 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2103 inode_unlock(inode
);
2104 mnt_drop_write_file(filp
);
2108 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
2110 struct inode
*inode
= file_inode(filp
);
2113 if (!inode_owner_or_capable(inode
))
2116 if (!S_ISREG(inode
->i_mode
))
2119 ret
= mnt_want_write_file(filp
);
2125 if (f2fs_is_volatile_file(inode
))
2128 ret
= f2fs_convert_inline_inode(inode
);
2132 stat_inc_volatile_write(inode
);
2133 stat_update_max_volatile_write(inode
);
2135 set_inode_flag(inode
, FI_VOLATILE_FILE
);
2136 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2138 inode_unlock(inode
);
2139 mnt_drop_write_file(filp
);
2143 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
2145 struct inode
*inode
= file_inode(filp
);
2148 if (!inode_owner_or_capable(inode
))
2151 ret
= mnt_want_write_file(filp
);
2157 if (!f2fs_is_volatile_file(inode
))
2160 if (!f2fs_is_first_block_written(inode
)) {
2161 ret
= truncate_partial_data_page(inode
, 0, true);
2165 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
2167 inode_unlock(inode
);
2168 mnt_drop_write_file(filp
);
2172 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
2174 struct inode
*inode
= file_inode(filp
);
2177 if (!inode_owner_or_capable(inode
))
2180 ret
= mnt_want_write_file(filp
);
2186 if (f2fs_is_atomic_file(inode
))
2187 f2fs_drop_inmem_pages(inode
);
2188 if (f2fs_is_volatile_file(inode
)) {
2189 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
2190 stat_dec_volatile_write(inode
);
2191 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2194 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2196 inode_unlock(inode
);
2198 mnt_drop_write_file(filp
);
2199 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2203 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
2205 struct inode
*inode
= file_inode(filp
);
2206 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2207 struct super_block
*sb
= sbi
->sb
;
2211 if (!capable(CAP_SYS_ADMIN
))
2214 if (get_user(in
, (__u32 __user
*)arg
))
2217 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
2218 ret
= mnt_want_write_file(filp
);
2224 case F2FS_GOING_DOWN_FULLSYNC
:
2225 sb
= freeze_bdev(sb
->s_bdev
);
2231 f2fs_stop_checkpoint(sbi
, false);
2232 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2233 thaw_bdev(sb
->s_bdev
, sb
);
2236 case F2FS_GOING_DOWN_METASYNC
:
2237 /* do checkpoint only */
2238 ret
= f2fs_sync_fs(sb
, 1);
2241 f2fs_stop_checkpoint(sbi
, false);
2242 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2244 case F2FS_GOING_DOWN_NOSYNC
:
2245 f2fs_stop_checkpoint(sbi
, false);
2246 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2248 case F2FS_GOING_DOWN_METAFLUSH
:
2249 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
2250 f2fs_stop_checkpoint(sbi
, false);
2251 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2253 case F2FS_GOING_DOWN_NEED_FSCK
:
2254 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2255 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
2256 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2257 /* do checkpoint only */
2258 ret
= f2fs_sync_fs(sb
, 1);
2265 f2fs_stop_gc_thread(sbi
);
2266 f2fs_stop_discard_thread(sbi
);
2268 f2fs_drop_discard_cmd(sbi
);
2269 clear_opt(sbi
, DISCARD
);
2271 f2fs_update_time(sbi
, REQ_TIME
);
2273 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
2274 mnt_drop_write_file(filp
);
2276 trace_f2fs_shutdown(sbi
, in
, ret
);
2281 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
2283 struct inode
*inode
= file_inode(filp
);
2284 struct super_block
*sb
= inode
->i_sb
;
2285 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
2286 struct fstrim_range range
;
2289 if (!capable(CAP_SYS_ADMIN
))
2292 if (!f2fs_hw_support_discard(F2FS_SB(sb
)))
2295 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
2299 ret
= mnt_want_write_file(filp
);
2303 range
.minlen
= max((unsigned int)range
.minlen
,
2304 q
->limits
.discard_granularity
);
2305 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
2306 mnt_drop_write_file(filp
);
2310 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
2313 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2317 static bool uuid_is_nonzero(__u8 u
[16])
2321 for (i
= 0; i
< 16; i
++)
2327 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
2329 struct inode
*inode
= file_inode(filp
);
2331 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode
)))
2334 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2336 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
2339 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
2341 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2343 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
2346 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
2348 struct inode
*inode
= file_inode(filp
);
2349 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2352 if (!f2fs_sb_has_encrypt(sbi
))
2355 err
= mnt_want_write_file(filp
);
2359 down_write(&sbi
->sb_lock
);
2361 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2364 /* update superblock with uuid */
2365 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2367 err
= f2fs_commit_super(sbi
, false);
2370 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2374 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2378 up_write(&sbi
->sb_lock
);
2379 mnt_drop_write_file(filp
);
2383 static int f2fs_ioc_get_encryption_policy_ex(struct file
*filp
,
2386 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2389 return fscrypt_ioctl_get_policy_ex(filp
, (void __user
*)arg
);
2392 static int f2fs_ioc_add_encryption_key(struct file
*filp
, unsigned long arg
)
2394 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2397 return fscrypt_ioctl_add_key(filp
, (void __user
*)arg
);
2400 static int f2fs_ioc_remove_encryption_key(struct file
*filp
, unsigned long arg
)
2402 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2405 return fscrypt_ioctl_remove_key(filp
, (void __user
*)arg
);
2408 static int f2fs_ioc_remove_encryption_key_all_users(struct file
*filp
,
2411 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2414 return fscrypt_ioctl_remove_key_all_users(filp
, (void __user
*)arg
);
2417 static int f2fs_ioc_get_encryption_key_status(struct file
*filp
,
2420 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2423 return fscrypt_ioctl_get_key_status(filp
, (void __user
*)arg
);
2426 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2428 struct inode
*inode
= file_inode(filp
);
2429 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2433 if (!capable(CAP_SYS_ADMIN
))
2436 if (get_user(sync
, (__u32 __user
*)arg
))
2439 if (f2fs_readonly(sbi
->sb
))
2442 ret
= mnt_want_write_file(filp
);
2447 if (!down_write_trylock(&sbi
->gc_lock
)) {
2452 down_write(&sbi
->gc_lock
);
2455 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2457 mnt_drop_write_file(filp
);
2461 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2463 struct inode
*inode
= file_inode(filp
);
2464 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2465 struct f2fs_gc_range range
;
2469 if (!capable(CAP_SYS_ADMIN
))
2472 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2476 if (f2fs_readonly(sbi
->sb
))
2479 end
= range
.start
+ range
.len
;
2480 if (end
< range
.start
|| range
.start
< MAIN_BLKADDR(sbi
) ||
2481 end
>= MAX_BLKADDR(sbi
))
2484 ret
= mnt_want_write_file(filp
);
2490 if (!down_write_trylock(&sbi
->gc_lock
)) {
2495 down_write(&sbi
->gc_lock
);
2498 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2499 range
.start
+= BLKS_PER_SEC(sbi
);
2500 if (range
.start
<= end
)
2503 mnt_drop_write_file(filp
);
2507 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2509 struct inode
*inode
= file_inode(filp
);
2510 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2513 if (!capable(CAP_SYS_ADMIN
))
2516 if (f2fs_readonly(sbi
->sb
))
2519 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2520 f2fs_info(sbi
, "Skipping Checkpoint. Checkpoints currently disabled.");
2524 ret
= mnt_want_write_file(filp
);
2528 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2530 mnt_drop_write_file(filp
);
2534 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2536 struct f2fs_defragment
*range
)
2538 struct inode
*inode
= file_inode(filp
);
2539 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2540 .m_seg_type
= NO_CHECK_TYPE
,
2541 .m_may_create
= false };
2542 struct extent_info ei
= {0, 0, 0};
2543 pgoff_t pg_start
, pg_end
, next_pgofs
;
2544 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2545 unsigned int total
= 0, sec_num
;
2546 block_t blk_end
= 0;
2547 bool fragmented
= false;
2550 /* if in-place-update policy is enabled, don't waste time here */
2551 if (f2fs_should_update_inplace(inode
, NULL
))
2554 pg_start
= range
->start
>> PAGE_SHIFT
;
2555 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2557 f2fs_balance_fs(sbi
, true);
2561 /* writeback all dirty pages in the range */
2562 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2563 range
->start
+ range
->len
- 1);
2568 * lookup mapping info in extent cache, skip defragmenting if physical
2569 * block addresses are continuous.
2571 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2572 if (ei
.fofs
+ ei
.len
>= pg_end
)
2576 map
.m_lblk
= pg_start
;
2577 map
.m_next_pgofs
= &next_pgofs
;
2580 * lookup mapping info in dnode page cache, skip defragmenting if all
2581 * physical block addresses are continuous even if there are hole(s)
2582 * in logical blocks.
2584 while (map
.m_lblk
< pg_end
) {
2585 map
.m_len
= pg_end
- map
.m_lblk
;
2586 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2590 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2591 map
.m_lblk
= next_pgofs
;
2595 if (blk_end
&& blk_end
!= map
.m_pblk
)
2598 /* record total count of block that we're going to move */
2601 blk_end
= map
.m_pblk
+ map
.m_len
;
2603 map
.m_lblk
+= map
.m_len
;
2611 sec_num
= DIV_ROUND_UP(total
, BLKS_PER_SEC(sbi
));
2614 * make sure there are enough free section for LFS allocation, this can
2615 * avoid defragment running in SSR mode when free section are allocated
2618 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2623 map
.m_lblk
= pg_start
;
2624 map
.m_len
= pg_end
- pg_start
;
2627 while (map
.m_lblk
< pg_end
) {
2632 map
.m_len
= pg_end
- map
.m_lblk
;
2633 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2637 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2638 map
.m_lblk
= next_pgofs
;
2642 set_inode_flag(inode
, FI_DO_DEFRAG
);
2645 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2648 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2650 err
= PTR_ERR(page
);
2654 set_page_dirty(page
);
2655 f2fs_put_page(page
, 1);
2664 if (map
.m_lblk
< pg_end
&& cnt
< blk_per_seg
)
2667 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2669 err
= filemap_fdatawrite(inode
->i_mapping
);
2674 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2676 inode_unlock(inode
);
2678 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2682 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2684 struct inode
*inode
= file_inode(filp
);
2685 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2686 struct f2fs_defragment range
;
2689 if (!capable(CAP_SYS_ADMIN
))
2692 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2695 if (f2fs_readonly(sbi
->sb
))
2698 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2702 /* verify alignment of offset & size */
2703 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2706 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2707 sbi
->max_file_blocks
))
2710 err
= mnt_want_write_file(filp
);
2714 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2715 mnt_drop_write_file(filp
);
2717 f2fs_update_time(sbi
, REQ_TIME
);
2721 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2728 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2729 struct file
*file_out
, loff_t pos_out
, size_t len
)
2731 struct inode
*src
= file_inode(file_in
);
2732 struct inode
*dst
= file_inode(file_out
);
2733 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2734 size_t olen
= len
, dst_max_i_size
= 0;
2738 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2739 src
->i_sb
!= dst
->i_sb
)
2742 if (unlikely(f2fs_readonly(src
->i_sb
)))
2745 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2748 if (IS_ENCRYPTED(src
) || IS_ENCRYPTED(dst
))
2752 if (pos_in
== pos_out
)
2754 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2761 if (!inode_trylock(dst
))
2766 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2769 olen
= len
= src
->i_size
- pos_in
;
2770 if (pos_in
+ len
== src
->i_size
)
2771 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2777 dst_osize
= dst
->i_size
;
2778 if (pos_out
+ olen
> dst
->i_size
)
2779 dst_max_i_size
= pos_out
+ olen
;
2781 /* verify the end result is block aligned */
2782 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2783 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2784 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2787 ret
= f2fs_convert_inline_inode(src
);
2791 ret
= f2fs_convert_inline_inode(dst
);
2795 /* write out all dirty pages from offset */
2796 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2797 pos_in
, pos_in
+ len
);
2801 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2802 pos_out
, pos_out
+ len
);
2806 f2fs_balance_fs(sbi
, true);
2808 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2811 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]))
2816 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2817 pos_out
>> F2FS_BLKSIZE_BITS
,
2818 len
>> F2FS_BLKSIZE_BITS
, false);
2822 f2fs_i_size_write(dst
, dst_max_i_size
);
2823 else if (dst_osize
!= dst
->i_size
)
2824 f2fs_i_size_write(dst
, dst_osize
);
2826 f2fs_unlock_op(sbi
);
2829 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2831 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2840 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2842 struct f2fs_move_range range
;
2846 if (!(filp
->f_mode
& FMODE_READ
) ||
2847 !(filp
->f_mode
& FMODE_WRITE
))
2850 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2854 dst
= fdget(range
.dst_fd
);
2858 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2863 err
= mnt_want_write_file(filp
);
2867 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2868 range
.pos_out
, range
.len
);
2870 mnt_drop_write_file(filp
);
2874 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2875 &range
, sizeof(range
)))
2882 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2884 struct inode
*inode
= file_inode(filp
);
2885 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2886 struct sit_info
*sm
= SIT_I(sbi
);
2887 unsigned int start_segno
= 0, end_segno
= 0;
2888 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2889 struct f2fs_flush_device range
;
2892 if (!capable(CAP_SYS_ADMIN
))
2895 if (f2fs_readonly(sbi
->sb
))
2898 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2901 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2905 if (!f2fs_is_multi_device(sbi
) || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2906 __is_large_section(sbi
)) {
2907 f2fs_warn(sbi
, "Can't flush %u in %d for segs_per_sec %u != 1",
2908 range
.dev_num
, sbi
->s_ndevs
, sbi
->segs_per_sec
);
2912 ret
= mnt_want_write_file(filp
);
2916 if (range
.dev_num
!= 0)
2917 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2918 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2920 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2921 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2922 start_segno
= dev_start_segno
;
2923 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2925 while (start_segno
< end_segno
) {
2926 if (!down_write_trylock(&sbi
->gc_lock
)) {
2930 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2931 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2932 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2933 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2941 mnt_drop_write_file(filp
);
2945 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2947 struct inode
*inode
= file_inode(filp
);
2948 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2950 /* Must validate to set it with SQLite behavior in Android. */
2951 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2953 return put_user(sb_feature
, (u32 __user
*)arg
);
2957 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2959 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2960 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2961 struct super_block
*sb
= sbi
->sb
;
2964 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2965 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2966 err
= __dquot_transfer(inode
, transfer_to
);
2968 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2969 dqput(transfer_to
[PRJQUOTA
]);
2974 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2976 struct inode
*inode
= file_inode(filp
);
2977 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2978 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2983 if (!f2fs_sb_has_project_quota(sbi
)) {
2984 if (projid
!= F2FS_DEF_PROJID
)
2990 if (!f2fs_has_extra_attr(inode
))
2993 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2995 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
2999 /* Is it quota file? Do not allow user to mess with it */
3000 if (IS_NOQUOTA(inode
))
3003 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
3005 return PTR_ERR(ipage
);
3007 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
3010 f2fs_put_page(ipage
, 1);
3013 f2fs_put_page(ipage
, 1);
3015 err
= dquot_initialize(inode
);
3020 err
= f2fs_transfer_project_quota(inode
, kprojid
);
3024 F2FS_I(inode
)->i_projid
= kprojid
;
3025 inode
->i_ctime
= current_time(inode
);
3026 f2fs_mark_inode_dirty_sync(inode
, true);
3028 f2fs_unlock_op(sbi
);
3032 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
3037 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
3039 if (projid
!= F2FS_DEF_PROJID
)
3045 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3048 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3049 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3050 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3053 static const struct {
3056 } f2fs_xflags_map
[] = {
3057 { F2FS_SYNC_FL
, FS_XFLAG_SYNC
},
3058 { F2FS_IMMUTABLE_FL
, FS_XFLAG_IMMUTABLE
},
3059 { F2FS_APPEND_FL
, FS_XFLAG_APPEND
},
3060 { F2FS_NODUMP_FL
, FS_XFLAG_NODUMP
},
3061 { F2FS_NOATIME_FL
, FS_XFLAG_NOATIME
},
3062 { F2FS_PROJINHERIT_FL
, FS_XFLAG_PROJINHERIT
},
3065 #define F2FS_SUPPORTED_XFLAGS ( \
3067 FS_XFLAG_IMMUTABLE | \
3070 FS_XFLAG_NOATIME | \
3071 FS_XFLAG_PROJINHERIT)
3073 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3074 static inline u32
f2fs_iflags_to_xflags(u32 iflags
)
3079 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3080 if (iflags
& f2fs_xflags_map
[i
].iflag
)
3081 xflags
|= f2fs_xflags_map
[i
].xflag
;
3086 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3087 static inline u32
f2fs_xflags_to_iflags(u32 xflags
)
3092 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3093 if (xflags
& f2fs_xflags_map
[i
].xflag
)
3094 iflags
|= f2fs_xflags_map
[i
].iflag
;
3099 static void f2fs_fill_fsxattr(struct inode
*inode
, struct fsxattr
*fa
)
3101 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3103 simple_fill_fsxattr(fa
, f2fs_iflags_to_xflags(fi
->i_flags
));
3105 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)))
3106 fa
->fsx_projid
= from_kprojid(&init_user_ns
, fi
->i_projid
);
3109 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
3111 struct inode
*inode
= file_inode(filp
);
3114 f2fs_fill_fsxattr(inode
, &fa
);
3116 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
3121 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
3123 struct inode
*inode
= file_inode(filp
);
3124 struct fsxattr fa
, old_fa
;
3128 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
3131 /* Make sure caller has proper permission */
3132 if (!inode_owner_or_capable(inode
))
3135 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_XFLAGS
)
3138 iflags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
3139 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
3142 err
= mnt_want_write_file(filp
);
3148 f2fs_fill_fsxattr(inode
, &old_fa
);
3149 err
= vfs_ioc_fssetxattr_check(inode
, &old_fa
, &fa
);
3153 err
= f2fs_setflags_common(inode
, iflags
,
3154 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS
));
3158 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
3160 inode_unlock(inode
);
3161 mnt_drop_write_file(filp
);
3165 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
3167 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3168 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3170 /* Use i_gc_failures for normal file as a risk signal. */
3172 f2fs_i_gc_failures_write(inode
,
3173 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
3175 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
3176 f2fs_warn(sbi
, "%s: Enable GC = ino %lx after %x GC trials",
3177 __func__
, inode
->i_ino
,
3178 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
3179 clear_inode_flag(inode
, FI_PIN_FILE
);
3185 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
3187 struct inode
*inode
= file_inode(filp
);
3191 if (get_user(pin
, (__u32 __user
*)arg
))
3194 if (!S_ISREG(inode
->i_mode
))
3197 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
3200 ret
= mnt_want_write_file(filp
);
3206 if (f2fs_should_update_outplace(inode
, NULL
)) {
3212 clear_inode_flag(inode
, FI_PIN_FILE
);
3213 f2fs_i_gc_failures_write(inode
, 0);
3217 if (f2fs_pin_file_control(inode
, false)) {
3222 ret
= f2fs_convert_inline_inode(inode
);
3226 if (f2fs_disable_compressed_file(inode
)) {
3231 set_inode_flag(inode
, FI_PIN_FILE
);
3232 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3234 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3236 inode_unlock(inode
);
3237 mnt_drop_write_file(filp
);
3241 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
3243 struct inode
*inode
= file_inode(filp
);
3246 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
3247 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3248 return put_user(pin
, (u32 __user
*)arg
);
3251 int f2fs_precache_extents(struct inode
*inode
)
3253 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3254 struct f2fs_map_blocks map
;
3255 pgoff_t m_next_extent
;
3259 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
3263 map
.m_next_pgofs
= NULL
;
3264 map
.m_next_extent
= &m_next_extent
;
3265 map
.m_seg_type
= NO_CHECK_TYPE
;
3266 map
.m_may_create
= false;
3267 end
= F2FS_I_SB(inode
)->max_file_blocks
;
3269 while (map
.m_lblk
< end
) {
3270 map
.m_len
= end
- map
.m_lblk
;
3272 down_write(&fi
->i_gc_rwsem
[WRITE
]);
3273 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
3274 up_write(&fi
->i_gc_rwsem
[WRITE
]);
3278 map
.m_lblk
= m_next_extent
;
3284 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
3286 return f2fs_precache_extents(file_inode(filp
));
3289 static int f2fs_ioc_resize_fs(struct file
*filp
, unsigned long arg
)
3291 struct f2fs_sb_info
*sbi
= F2FS_I_SB(file_inode(filp
));
3295 if (!capable(CAP_SYS_ADMIN
))
3298 if (f2fs_readonly(sbi
->sb
))
3301 if (copy_from_user(&block_count
, (void __user
*)arg
,
3302 sizeof(block_count
)))
3305 ret
= f2fs_resize_fs(sbi
, block_count
);
3310 static int f2fs_ioc_enable_verity(struct file
*filp
, unsigned long arg
)
3312 struct inode
*inode
= file_inode(filp
);
3314 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3316 if (!f2fs_sb_has_verity(F2FS_I_SB(inode
))) {
3317 f2fs_warn(F2FS_I_SB(inode
),
3318 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3323 return fsverity_ioctl_enable(filp
, (const void __user
*)arg
);
3326 static int f2fs_ioc_measure_verity(struct file
*filp
, unsigned long arg
)
3328 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp
))))
3331 return fsverity_ioctl_measure(filp
, (void __user
*)arg
);
3334 static int f2fs_get_volume_name(struct file
*filp
, unsigned long arg
)
3336 struct inode
*inode
= file_inode(filp
);
3337 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3342 vbuf
= f2fs_kzalloc(sbi
, MAX_VOLUME_NAME
, GFP_KERNEL
);
3346 down_read(&sbi
->sb_lock
);
3347 count
= utf16s_to_utf8s(sbi
->raw_super
->volume_name
,
3348 ARRAY_SIZE(sbi
->raw_super
->volume_name
),
3349 UTF16_LITTLE_ENDIAN
, vbuf
, MAX_VOLUME_NAME
);
3350 up_read(&sbi
->sb_lock
);
3352 if (copy_to_user((char __user
*)arg
, vbuf
,
3353 min(FSLABEL_MAX
, count
)))
3360 static int f2fs_set_volume_name(struct file
*filp
, unsigned long arg
)
3362 struct inode
*inode
= file_inode(filp
);
3363 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3367 if (!capable(CAP_SYS_ADMIN
))
3370 vbuf
= strndup_user((const char __user
*)arg
, FSLABEL_MAX
);
3372 return PTR_ERR(vbuf
);
3374 err
= mnt_want_write_file(filp
);
3378 down_write(&sbi
->sb_lock
);
3380 memset(sbi
->raw_super
->volume_name
, 0,
3381 sizeof(sbi
->raw_super
->volume_name
));
3382 utf8s_to_utf16s(vbuf
, strlen(vbuf
), UTF16_LITTLE_ENDIAN
,
3383 sbi
->raw_super
->volume_name
,
3384 ARRAY_SIZE(sbi
->raw_super
->volume_name
));
3386 err
= f2fs_commit_super(sbi
, false);
3388 up_write(&sbi
->sb_lock
);
3390 mnt_drop_write_file(filp
);
3396 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3398 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
3400 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp
))))
3404 case F2FS_IOC_GETFLAGS
:
3405 return f2fs_ioc_getflags(filp
, arg
);
3406 case F2FS_IOC_SETFLAGS
:
3407 return f2fs_ioc_setflags(filp
, arg
);
3408 case F2FS_IOC_GETVERSION
:
3409 return f2fs_ioc_getversion(filp
, arg
);
3410 case F2FS_IOC_START_ATOMIC_WRITE
:
3411 return f2fs_ioc_start_atomic_write(filp
);
3412 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3413 return f2fs_ioc_commit_atomic_write(filp
);
3414 case F2FS_IOC_START_VOLATILE_WRITE
:
3415 return f2fs_ioc_start_volatile_write(filp
);
3416 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3417 return f2fs_ioc_release_volatile_write(filp
);
3418 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3419 return f2fs_ioc_abort_volatile_write(filp
);
3420 case F2FS_IOC_SHUTDOWN
:
3421 return f2fs_ioc_shutdown(filp
, arg
);
3423 return f2fs_ioc_fitrim(filp
, arg
);
3424 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3425 return f2fs_ioc_set_encryption_policy(filp
, arg
);
3426 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3427 return f2fs_ioc_get_encryption_policy(filp
, arg
);
3428 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3429 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
3430 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3431 return f2fs_ioc_get_encryption_policy_ex(filp
, arg
);
3432 case FS_IOC_ADD_ENCRYPTION_KEY
:
3433 return f2fs_ioc_add_encryption_key(filp
, arg
);
3434 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3435 return f2fs_ioc_remove_encryption_key(filp
, arg
);
3436 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3437 return f2fs_ioc_remove_encryption_key_all_users(filp
, arg
);
3438 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3439 return f2fs_ioc_get_encryption_key_status(filp
, arg
);
3440 case F2FS_IOC_GARBAGE_COLLECT
:
3441 return f2fs_ioc_gc(filp
, arg
);
3442 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3443 return f2fs_ioc_gc_range(filp
, arg
);
3444 case F2FS_IOC_WRITE_CHECKPOINT
:
3445 return f2fs_ioc_write_checkpoint(filp
, arg
);
3446 case F2FS_IOC_DEFRAGMENT
:
3447 return f2fs_ioc_defragment(filp
, arg
);
3448 case F2FS_IOC_MOVE_RANGE
:
3449 return f2fs_ioc_move_range(filp
, arg
);
3450 case F2FS_IOC_FLUSH_DEVICE
:
3451 return f2fs_ioc_flush_device(filp
, arg
);
3452 case F2FS_IOC_GET_FEATURES
:
3453 return f2fs_ioc_get_features(filp
, arg
);
3454 case F2FS_IOC_FSGETXATTR
:
3455 return f2fs_ioc_fsgetxattr(filp
, arg
);
3456 case F2FS_IOC_FSSETXATTR
:
3457 return f2fs_ioc_fssetxattr(filp
, arg
);
3458 case F2FS_IOC_GET_PIN_FILE
:
3459 return f2fs_ioc_get_pin_file(filp
, arg
);
3460 case F2FS_IOC_SET_PIN_FILE
:
3461 return f2fs_ioc_set_pin_file(filp
, arg
);
3462 case F2FS_IOC_PRECACHE_EXTENTS
:
3463 return f2fs_ioc_precache_extents(filp
, arg
);
3464 case F2FS_IOC_RESIZE_FS
:
3465 return f2fs_ioc_resize_fs(filp
, arg
);
3466 case FS_IOC_ENABLE_VERITY
:
3467 return f2fs_ioc_enable_verity(filp
, arg
);
3468 case FS_IOC_MEASURE_VERITY
:
3469 return f2fs_ioc_measure_verity(filp
, arg
);
3470 case F2FS_IOC_GET_VOLUME_NAME
:
3471 return f2fs_get_volume_name(filp
, arg
);
3472 case F2FS_IOC_SET_VOLUME_NAME
:
3473 return f2fs_set_volume_name(filp
, arg
);
3479 static ssize_t
f2fs_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
3481 struct file
*file
= iocb
->ki_filp
;
3482 struct inode
*inode
= file_inode(file
);
3484 if (!f2fs_is_compress_backend_ready(inode
))
3487 return generic_file_read_iter(iocb
, iter
);
3490 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
3492 struct file
*file
= iocb
->ki_filp
;
3493 struct inode
*inode
= file_inode(file
);
3496 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
3501 if (!f2fs_is_compress_backend_ready(inode
))
3504 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
3505 if (!inode_trylock(inode
)) {
3513 ret
= generic_write_checks(iocb
, from
);
3515 bool preallocated
= false;
3516 size_t target_size
= 0;
3519 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
3520 set_inode_flag(inode
, FI_NO_PREALLOC
);
3522 if ((iocb
->ki_flags
& IOCB_NOWAIT
)) {
3523 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
3524 iov_iter_count(from
)) ||
3525 f2fs_has_inline_data(inode
) ||
3526 f2fs_force_buffered_io(inode
, iocb
, from
)) {
3527 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3528 inode_unlock(inode
);
3535 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
3538 if (iocb
->ki_flags
& IOCB_DIRECT
) {
3540 * Convert inline data for Direct I/O before entering
3543 err
= f2fs_convert_inline_inode(inode
);
3547 * If force_buffere_io() is true, we have to allocate
3548 * blocks all the time, since f2fs_direct_IO will fall
3549 * back to buffered IO.
3551 if (!f2fs_force_buffered_io(inode
, iocb
, from
) &&
3552 allow_outplace_dio(inode
, iocb
, from
))
3555 preallocated
= true;
3556 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
3558 err
= f2fs_preallocate_blocks(iocb
, from
);
3561 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3562 inode_unlock(inode
);
3567 ret
= __generic_file_write_iter(iocb
, from
);
3568 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3570 /* if we couldn't write data, we should deallocate blocks. */
3571 if (preallocated
&& i_size_read(inode
) < target_size
)
3572 f2fs_truncate(inode
);
3575 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
3577 inode_unlock(inode
);
3579 trace_f2fs_file_write_iter(inode
, iocb
->ki_pos
,
3580 iov_iter_count(from
), ret
);
3582 ret
= generic_write_sync(iocb
, ret
);
3586 #ifdef CONFIG_COMPAT
3587 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
3590 case F2FS_IOC32_GETFLAGS
:
3591 cmd
= F2FS_IOC_GETFLAGS
;
3593 case F2FS_IOC32_SETFLAGS
:
3594 cmd
= F2FS_IOC_SETFLAGS
;
3596 case F2FS_IOC32_GETVERSION
:
3597 cmd
= F2FS_IOC_GETVERSION
;
3599 case F2FS_IOC_START_ATOMIC_WRITE
:
3600 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3601 case F2FS_IOC_START_VOLATILE_WRITE
:
3602 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3603 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3604 case F2FS_IOC_SHUTDOWN
:
3606 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3607 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3608 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3609 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3610 case FS_IOC_ADD_ENCRYPTION_KEY
:
3611 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3612 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3613 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3614 case F2FS_IOC_GARBAGE_COLLECT
:
3615 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3616 case F2FS_IOC_WRITE_CHECKPOINT
:
3617 case F2FS_IOC_DEFRAGMENT
:
3618 case F2FS_IOC_MOVE_RANGE
:
3619 case F2FS_IOC_FLUSH_DEVICE
:
3620 case F2FS_IOC_GET_FEATURES
:
3621 case F2FS_IOC_FSGETXATTR
:
3622 case F2FS_IOC_FSSETXATTR
:
3623 case F2FS_IOC_GET_PIN_FILE
:
3624 case F2FS_IOC_SET_PIN_FILE
:
3625 case F2FS_IOC_PRECACHE_EXTENTS
:
3626 case F2FS_IOC_RESIZE_FS
:
3627 case FS_IOC_ENABLE_VERITY
:
3628 case FS_IOC_MEASURE_VERITY
:
3629 case F2FS_IOC_GET_VOLUME_NAME
:
3630 case F2FS_IOC_SET_VOLUME_NAME
:
3633 return -ENOIOCTLCMD
;
3635 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3639 const struct file_operations f2fs_file_operations
= {
3640 .llseek
= f2fs_llseek
,
3641 .read_iter
= f2fs_file_read_iter
,
3642 .write_iter
= f2fs_file_write_iter
,
3643 .open
= f2fs_file_open
,
3644 .release
= f2fs_release_file
,
3645 .mmap
= f2fs_file_mmap
,
3646 .flush
= f2fs_file_flush
,
3647 .fsync
= f2fs_sync_file
,
3648 .fallocate
= f2fs_fallocate
,
3649 .unlocked_ioctl
= f2fs_ioctl
,
3650 #ifdef CONFIG_COMPAT
3651 .compat_ioctl
= f2fs_compat_ioctl
,
3653 .splice_read
= generic_file_splice_read
,
3654 .splice_write
= iter_file_splice_write
,