1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
32 #include <trace/events/f2fs.h>
34 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
36 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
39 down_read(&F2FS_I(inode
)->i_mmap_sem
);
40 ret
= filemap_fault(vmf
);
41 up_read(&F2FS_I(inode
)->i_mmap_sem
);
43 trace_f2fs_filemap_fault(inode
, vmf
->pgoff
, (unsigned long)ret
);
48 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
54 bool need_alloc
= true;
57 if (unlikely(f2fs_cp_error(sbi
))) {
62 if (!f2fs_is_checkpoint_ready(sbi
)) {
67 #ifdef CONFIG_F2FS_FS_COMPRESSION
68 if (f2fs_compressed_file(inode
)) {
69 int ret
= f2fs_is_compressed_cluster(inode
, page
->index
);
75 if (ret
< F2FS_I(inode
)->i_cluster_size
) {
83 /* should do out of any locked page */
85 f2fs_balance_fs(sbi
, true);
87 sb_start_pagefault(inode
->i_sb
);
89 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
91 file_update_time(vmf
->vma
->vm_file
);
92 down_read(&F2FS_I(inode
)->i_mmap_sem
);
94 if (unlikely(page
->mapping
!= inode
->i_mapping
||
95 page_offset(page
) > i_size_read(inode
) ||
96 !PageUptodate(page
))) {
103 /* block allocation */
104 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
105 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
106 err
= f2fs_get_block(&dn
, page
->index
);
108 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
116 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
118 /* wait for GCed page writeback via META_MAPPING */
119 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
122 * check to see if the page is mapped already (no holes)
124 if (PageMappedToDisk(page
))
127 /* page is wholly or partially inside EOF */
128 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
129 i_size_read(inode
)) {
132 offset
= i_size_read(inode
) & ~PAGE_MASK
;
133 zero_user_segment(page
, offset
, PAGE_SIZE
);
135 set_page_dirty(page
);
136 if (!PageUptodate(page
))
137 SetPageUptodate(page
);
139 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
140 f2fs_update_time(sbi
, REQ_TIME
);
142 trace_f2fs_vm_page_mkwrite(page
, DATA
);
144 up_read(&F2FS_I(inode
)->i_mmap_sem
);
146 sb_end_pagefault(inode
->i_sb
);
148 return block_page_mkwrite_return(err
);
151 static const struct vm_operations_struct f2fs_file_vm_ops
= {
152 .fault
= f2fs_filemap_fault
,
153 .map_pages
= filemap_map_pages
,
154 .page_mkwrite
= f2fs_vm_page_mkwrite
,
157 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
159 struct dentry
*dentry
;
161 inode
= igrab(inode
);
162 dentry
= d_find_any_alias(inode
);
167 *pino
= parent_ino(dentry
);
172 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
174 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
175 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
177 if (!S_ISREG(inode
->i_mode
))
178 cp_reason
= CP_NON_REGULAR
;
179 else if (f2fs_compressed_file(inode
))
180 cp_reason
= CP_COMPRESSED
;
181 else if (inode
->i_nlink
!= 1)
182 cp_reason
= CP_HARDLINK
;
183 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
184 cp_reason
= CP_SB_NEED_CP
;
185 else if (file_wrong_pino(inode
))
186 cp_reason
= CP_WRONG_PINO
;
187 else if (!f2fs_space_for_roll_forward(sbi
))
188 cp_reason
= CP_NO_SPC_ROLL
;
189 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
190 cp_reason
= CP_NODE_NEED_CP
;
191 else if (test_opt(sbi
, FASTBOOT
))
192 cp_reason
= CP_FASTBOOT_MODE
;
193 else if (F2FS_OPTION(sbi
).active_logs
== 2)
194 cp_reason
= CP_SPEC_LOG_NUM
;
195 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
196 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
197 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
199 cp_reason
= CP_RECOVER_DIR
;
204 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
206 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
208 /* But we need to avoid that there are some inode updates */
209 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
215 static void try_to_fix_pino(struct inode
*inode
)
217 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
220 down_write(&fi
->i_sem
);
221 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
222 get_parent_ino(inode
, &pino
)) {
223 f2fs_i_pino_write(inode
, pino
);
224 file_got_pino(inode
);
226 up_write(&fi
->i_sem
);
229 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
230 int datasync
, bool atomic
)
232 struct inode
*inode
= file
->f_mapping
->host
;
233 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
234 nid_t ino
= inode
->i_ino
;
236 enum cp_reason_type cp_reason
= 0;
237 struct writeback_control wbc
= {
238 .sync_mode
= WB_SYNC_ALL
,
239 .nr_to_write
= LONG_MAX
,
242 unsigned int seq_id
= 0;
244 if (unlikely(f2fs_readonly(inode
->i_sb
) ||
245 is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
248 trace_f2fs_sync_file_enter(inode
);
250 if (S_ISDIR(inode
->i_mode
))
253 /* if fdatasync is triggered, let's do in-place-update */
254 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
255 set_inode_flag(inode
, FI_NEED_IPU
);
256 ret
= file_write_and_wait_range(file
, start
, end
);
257 clear_inode_flag(inode
, FI_NEED_IPU
);
260 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
264 /* if the inode is dirty, let's recover all the time */
265 if (!f2fs_skip_inode_update(inode
, datasync
)) {
266 f2fs_write_inode(inode
, NULL
);
271 * if there is no written data, don't waste time to write recovery info.
273 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
274 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
276 /* it may call write_inode just prior to fsync */
277 if (need_inode_page_update(sbi
, ino
))
280 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
281 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
287 * Both of fdatasync() and fsync() are able to be recovered from
290 down_read(&F2FS_I(inode
)->i_sem
);
291 cp_reason
= need_do_checkpoint(inode
);
292 up_read(&F2FS_I(inode
)->i_sem
);
295 /* all the dirty node pages should be flushed for POR */
296 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
299 * We've secured consistency through sync_fs. Following pino
300 * will be used only for fsynced inodes after checkpoint.
302 try_to_fix_pino(inode
);
303 clear_inode_flag(inode
, FI_APPEND_WRITE
);
304 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
308 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
309 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
, &seq_id
);
310 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
314 /* if cp_error was enabled, we should avoid infinite loop */
315 if (unlikely(f2fs_cp_error(sbi
))) {
320 if (f2fs_need_inode_block_update(sbi
, ino
)) {
321 f2fs_mark_inode_dirty_sync(inode
, true);
322 f2fs_write_inode(inode
, NULL
);
327 * If it's atomic_write, it's just fine to keep write ordering. So
328 * here we don't need to wait for node write completion, since we use
329 * node chain which serializes node blocks. If one of node writes are
330 * reordered, we can see simply broken chain, resulting in stopping
331 * roll-forward recovery. It means we'll recover all or none node blocks
335 ret
= f2fs_wait_on_node_pages_writeback(sbi
, seq_id
);
340 /* once recovery info is written, don't need to tack this */
341 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
342 clear_inode_flag(inode
, FI_APPEND_WRITE
);
344 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
345 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
347 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
348 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
349 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
351 f2fs_update_time(sbi
, REQ_TIME
);
353 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
354 f2fs_trace_ios(NULL
, 1);
358 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
360 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
362 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
365 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
366 pgoff_t pgofs
, int whence
)
371 if (whence
!= SEEK_DATA
)
374 /* find first dirty page index */
375 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
384 static bool __found_offset(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
385 pgoff_t dirty
, pgoff_t pgofs
, int whence
)
389 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
390 __is_valid_data_blkaddr(blkaddr
))
394 if (blkaddr
== NULL_ADDR
)
401 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
403 struct inode
*inode
= file
->f_mapping
->host
;
404 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
405 struct dnode_of_data dn
;
406 pgoff_t pgofs
, end_offset
, dirty
;
407 loff_t data_ofs
= offset
;
413 isize
= i_size_read(inode
);
417 /* handle inline data case */
418 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
419 if (whence
== SEEK_HOLE
)
424 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
426 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
428 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
429 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
430 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
431 if (err
&& err
!= -ENOENT
) {
433 } else if (err
== -ENOENT
) {
434 /* direct node does not exists */
435 if (whence
== SEEK_DATA
) {
436 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
443 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
445 /* find data/hole in dnode block */
446 for (; dn
.ofs_in_node
< end_offset
;
447 dn
.ofs_in_node
++, pgofs
++,
448 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
451 blkaddr
= datablock_addr(dn
.inode
,
452 dn
.node_page
, dn
.ofs_in_node
);
454 if (__is_valid_data_blkaddr(blkaddr
) &&
455 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
456 blkaddr
, DATA_GENERIC_ENHANCE
)) {
461 if (__found_offset(F2FS_I_SB(inode
), blkaddr
, dirty
,
470 if (whence
== SEEK_DATA
)
473 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
476 return vfs_setpos(file
, data_ofs
, maxbytes
);
482 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
484 struct inode
*inode
= file
->f_mapping
->host
;
485 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
491 return generic_file_llseek_size(file
, offset
, whence
,
492 maxbytes
, i_size_read(inode
));
497 return f2fs_seek_block(file
, offset
, whence
);
503 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
505 struct inode
*inode
= file_inode(file
);
508 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
511 if (!f2fs_is_compress_backend_ready(inode
))
514 /* we don't need to use inline_data strictly */
515 err
= f2fs_convert_inline_inode(inode
);
520 vma
->vm_ops
= &f2fs_file_vm_ops
;
521 set_inode_flag(inode
, FI_MMAP_FILE
);
525 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
527 int err
= fscrypt_file_open(inode
, filp
);
532 if (!f2fs_is_compress_backend_ready(inode
))
535 err
= fsverity_file_open(inode
, filp
);
539 filp
->f_mode
|= FMODE_NOWAIT
;
541 return dquot_file_open(inode
, filp
);
544 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
546 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
547 struct f2fs_node
*raw_node
;
548 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
551 bool compressed_cluster
= false;
552 int cluster_index
= 0, valid_blocks
= 0;
553 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
555 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
556 base
= get_extra_isize(dn
->inode
);
558 raw_node
= F2FS_NODE(dn
->node_page
);
559 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
561 /* Assumption: truncateion starts with cluster */
562 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++, cluster_index
++) {
563 block_t blkaddr
= le32_to_cpu(*addr
);
565 if (f2fs_compressed_file(dn
->inode
) &&
566 !(cluster_index
& (cluster_size
- 1))) {
567 if (compressed_cluster
)
568 f2fs_i_compr_blocks_update(dn
->inode
,
569 valid_blocks
, false);
570 compressed_cluster
= (blkaddr
== COMPRESS_ADDR
);
574 if (blkaddr
== NULL_ADDR
)
577 dn
->data_blkaddr
= NULL_ADDR
;
578 f2fs_set_data_blkaddr(dn
);
580 if (__is_valid_data_blkaddr(blkaddr
)) {
581 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
582 DATA_GENERIC_ENHANCE
))
584 if (compressed_cluster
)
588 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
589 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
591 f2fs_invalidate_blocks(sbi
, blkaddr
);
595 if (compressed_cluster
)
596 f2fs_i_compr_blocks_update(dn
->inode
, valid_blocks
, false);
601 * once we invalidate valid blkaddr in range [ofs, ofs + count],
602 * we will invalidate all blkaddr in the whole range.
604 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
606 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
607 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
609 dn
->ofs_in_node
= ofs
;
611 f2fs_update_time(sbi
, REQ_TIME
);
612 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
613 dn
->ofs_in_node
, nr_free
);
616 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
618 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK(dn
->inode
));
621 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
624 loff_t offset
= from
& (PAGE_SIZE
- 1);
625 pgoff_t index
= from
>> PAGE_SHIFT
;
626 struct address_space
*mapping
= inode
->i_mapping
;
629 if (!offset
&& !cache_only
)
633 page
= find_lock_page(mapping
, index
);
634 if (page
&& PageUptodate(page
))
636 f2fs_put_page(page
, 1);
640 if (f2fs_compressed_file(inode
))
643 page
= f2fs_get_lock_data_page(inode
, index
, true);
645 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
647 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
648 zero_user(page
, offset
, PAGE_SIZE
- offset
);
650 /* An encrypted inode should have a key and truncate the last page. */
651 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& IS_ENCRYPTED(inode
));
653 set_page_dirty(page
);
654 f2fs_put_page(page
, 1);
658 static int do_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
660 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
661 struct dnode_of_data dn
;
663 int count
= 0, err
= 0;
665 bool truncate_page
= false;
667 trace_f2fs_truncate_blocks_enter(inode
, from
);
669 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
671 if (free_from
>= sbi
->max_file_blocks
)
677 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
679 err
= PTR_ERR(ipage
);
683 if (f2fs_has_inline_data(inode
)) {
684 f2fs_truncate_inline_inode(inode
, ipage
, from
);
685 f2fs_put_page(ipage
, 1);
686 truncate_page
= true;
690 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
691 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
698 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
700 count
-= dn
.ofs_in_node
;
701 f2fs_bug_on(sbi
, count
< 0);
703 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
704 f2fs_truncate_data_blocks_range(&dn
, count
);
710 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
715 /* lastly zero out the first data page */
717 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
719 trace_f2fs_truncate_blocks_exit(inode
, err
);
723 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
725 u64 free_from
= from
;
728 * for compressed file, only support cluster size
729 * aligned truncation.
731 if (f2fs_compressed_file(inode
)) {
732 size_t cluster_shift
= PAGE_SHIFT
+
733 F2FS_I(inode
)->i_log_cluster_size
;
734 size_t cluster_mask
= (1 << cluster_shift
) - 1;
736 free_from
= from
>> cluster_shift
;
737 if (from
& cluster_mask
)
739 free_from
<<= cluster_shift
;
742 return do_truncate_blocks(inode
, free_from
, lock
);
745 int f2fs_truncate(struct inode
*inode
)
749 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
752 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
753 S_ISLNK(inode
->i_mode
)))
756 trace_f2fs_truncate(inode
);
758 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
759 f2fs_show_injection_info(F2FS_I_SB(inode
), FAULT_TRUNCATE
);
763 /* we should check inline_data size */
764 if (!f2fs_may_inline_data(inode
)) {
765 err
= f2fs_convert_inline_inode(inode
);
770 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
774 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
775 f2fs_mark_inode_dirty_sync(inode
, false);
779 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
780 u32 request_mask
, unsigned int query_flags
)
782 struct inode
*inode
= d_inode(path
->dentry
);
783 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
784 struct f2fs_inode
*ri
;
787 if (f2fs_has_extra_attr(inode
) &&
788 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode
)) &&
789 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
790 stat
->result_mask
|= STATX_BTIME
;
791 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
792 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
796 if (flags
& F2FS_APPEND_FL
)
797 stat
->attributes
|= STATX_ATTR_APPEND
;
798 if (IS_ENCRYPTED(inode
))
799 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
800 if (flags
& F2FS_IMMUTABLE_FL
)
801 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
802 if (flags
& F2FS_NODUMP_FL
)
803 stat
->attributes
|= STATX_ATTR_NODUMP
;
804 if (IS_VERITY(inode
))
805 stat
->attributes
|= STATX_ATTR_VERITY
;
807 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
808 STATX_ATTR_ENCRYPTED
|
809 STATX_ATTR_IMMUTABLE
|
813 generic_fillattr(inode
, stat
);
815 /* we need to show initial sectors used for inline_data/dentries */
816 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
817 f2fs_has_inline_dentry(inode
))
818 stat
->blocks
+= (stat
->size
+ 511) >> 9;
823 #ifdef CONFIG_F2FS_FS_POSIX_ACL
824 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
826 unsigned int ia_valid
= attr
->ia_valid
;
828 if (ia_valid
& ATTR_UID
)
829 inode
->i_uid
= attr
->ia_uid
;
830 if (ia_valid
& ATTR_GID
)
831 inode
->i_gid
= attr
->ia_gid
;
832 if (ia_valid
& ATTR_ATIME
)
833 inode
->i_atime
= attr
->ia_atime
;
834 if (ia_valid
& ATTR_MTIME
)
835 inode
->i_mtime
= attr
->ia_mtime
;
836 if (ia_valid
& ATTR_CTIME
)
837 inode
->i_ctime
= attr
->ia_ctime
;
838 if (ia_valid
& ATTR_MODE
) {
839 umode_t mode
= attr
->ia_mode
;
841 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
843 set_acl_inode(inode
, mode
);
847 #define __setattr_copy setattr_copy
850 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
852 struct inode
*inode
= d_inode(dentry
);
855 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
858 if ((attr
->ia_valid
& ATTR_SIZE
) &&
859 !f2fs_is_compress_backend_ready(inode
))
862 err
= setattr_prepare(dentry
, attr
);
866 err
= fscrypt_prepare_setattr(dentry
, attr
);
870 err
= fsverity_prepare_setattr(dentry
, attr
);
874 if (is_quota_modification(inode
, attr
)) {
875 err
= dquot_initialize(inode
);
879 if ((attr
->ia_valid
& ATTR_UID
&&
880 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
881 (attr
->ia_valid
& ATTR_GID
&&
882 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
883 f2fs_lock_op(F2FS_I_SB(inode
));
884 err
= dquot_transfer(inode
, attr
);
886 set_sbi_flag(F2FS_I_SB(inode
),
887 SBI_QUOTA_NEED_REPAIR
);
888 f2fs_unlock_op(F2FS_I_SB(inode
));
892 * update uid/gid under lock_op(), so that dquot and inode can
893 * be updated atomically.
895 if (attr
->ia_valid
& ATTR_UID
)
896 inode
->i_uid
= attr
->ia_uid
;
897 if (attr
->ia_valid
& ATTR_GID
)
898 inode
->i_gid
= attr
->ia_gid
;
899 f2fs_mark_inode_dirty_sync(inode
, true);
900 f2fs_unlock_op(F2FS_I_SB(inode
));
903 if (attr
->ia_valid
& ATTR_SIZE
) {
904 loff_t old_size
= i_size_read(inode
);
906 if (attr
->ia_size
> MAX_INLINE_DATA(inode
)) {
908 * should convert inline inode before i_size_write to
909 * keep smaller than inline_data size with inline flag.
911 err
= f2fs_convert_inline_inode(inode
);
916 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
917 down_write(&F2FS_I(inode
)->i_mmap_sem
);
919 truncate_setsize(inode
, attr
->ia_size
);
921 if (attr
->ia_size
<= old_size
)
922 err
= f2fs_truncate(inode
);
924 * do not trim all blocks after i_size if target size is
925 * larger than i_size.
927 up_write(&F2FS_I(inode
)->i_mmap_sem
);
928 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
932 down_write(&F2FS_I(inode
)->i_sem
);
933 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
934 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
935 up_write(&F2FS_I(inode
)->i_sem
);
938 __setattr_copy(inode
, attr
);
940 if (attr
->ia_valid
& ATTR_MODE
) {
941 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
942 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
943 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
944 clear_inode_flag(inode
, FI_ACL_MODE
);
948 /* file size may changed here */
949 f2fs_mark_inode_dirty_sync(inode
, true);
951 /* inode change will produce dirty node pages flushed by checkpoint */
952 f2fs_balance_fs(F2FS_I_SB(inode
), true);
957 const struct inode_operations f2fs_file_inode_operations
= {
958 .getattr
= f2fs_getattr
,
959 .setattr
= f2fs_setattr
,
960 .get_acl
= f2fs_get_acl
,
961 .set_acl
= f2fs_set_acl
,
962 #ifdef CONFIG_F2FS_FS_XATTR
963 .listxattr
= f2fs_listxattr
,
965 .fiemap
= f2fs_fiemap
,
968 static int fill_zero(struct inode
*inode
, pgoff_t index
,
969 loff_t start
, loff_t len
)
971 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
977 f2fs_balance_fs(sbi
, true);
980 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
984 return PTR_ERR(page
);
986 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
987 zero_user(page
, start
, len
);
988 set_page_dirty(page
);
989 f2fs_put_page(page
, 1);
993 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
997 while (pg_start
< pg_end
) {
998 struct dnode_of_data dn
;
999 pgoff_t end_offset
, count
;
1001 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1002 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
1004 if (err
== -ENOENT
) {
1005 pg_start
= f2fs_get_next_page_offset(&dn
,
1012 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1013 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
1015 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
1017 f2fs_truncate_data_blocks_range(&dn
, count
);
1018 f2fs_put_dnode(&dn
);
1025 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1027 pgoff_t pg_start
, pg_end
;
1028 loff_t off_start
, off_end
;
1031 ret
= f2fs_convert_inline_inode(inode
);
1035 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1036 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1038 off_start
= offset
& (PAGE_SIZE
- 1);
1039 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1041 if (pg_start
== pg_end
) {
1042 ret
= fill_zero(inode
, pg_start
, off_start
,
1043 off_end
- off_start
);
1048 ret
= fill_zero(inode
, pg_start
++, off_start
,
1049 PAGE_SIZE
- off_start
);
1054 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1059 if (pg_start
< pg_end
) {
1060 struct address_space
*mapping
= inode
->i_mapping
;
1061 loff_t blk_start
, blk_end
;
1062 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1064 f2fs_balance_fs(sbi
, true);
1066 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
1067 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
1069 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1070 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1072 truncate_inode_pages_range(mapping
, blk_start
,
1076 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
1077 f2fs_unlock_op(sbi
);
1079 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1080 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1087 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1088 int *do_replace
, pgoff_t off
, pgoff_t len
)
1090 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1091 struct dnode_of_data dn
;
1095 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1096 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
1097 if (ret
&& ret
!= -ENOENT
) {
1099 } else if (ret
== -ENOENT
) {
1100 if (dn
.max_level
== 0)
1102 done
= min((pgoff_t
)ADDRS_PER_BLOCK(inode
) -
1103 dn
.ofs_in_node
, len
);
1109 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
1110 dn
.ofs_in_node
, len
);
1111 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
1112 *blkaddr
= datablock_addr(dn
.inode
,
1113 dn
.node_page
, dn
.ofs_in_node
);
1115 if (__is_valid_data_blkaddr(*blkaddr
) &&
1116 !f2fs_is_valid_blkaddr(sbi
, *blkaddr
,
1117 DATA_GENERIC_ENHANCE
)) {
1118 f2fs_put_dnode(&dn
);
1119 return -EFSCORRUPTED
;
1122 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
1124 if (test_opt(sbi
, LFS
)) {
1125 f2fs_put_dnode(&dn
);
1129 /* do not invalidate this block address */
1130 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1134 f2fs_put_dnode(&dn
);
1143 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1144 int *do_replace
, pgoff_t off
, int len
)
1146 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1147 struct dnode_of_data dn
;
1150 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1151 if (*do_replace
== 0)
1154 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1155 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1157 dec_valid_block_count(sbi
, inode
, 1);
1158 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1160 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1162 f2fs_put_dnode(&dn
);
1167 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1168 block_t
*blkaddr
, int *do_replace
,
1169 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1171 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1176 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1181 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1182 struct dnode_of_data dn
;
1183 struct node_info ni
;
1187 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1188 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1192 ret
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1194 f2fs_put_dnode(&dn
);
1198 ilen
= min((pgoff_t
)
1199 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1200 dn
.ofs_in_node
, len
- i
);
1202 dn
.data_blkaddr
= datablock_addr(dn
.inode
,
1203 dn
.node_page
, dn
.ofs_in_node
);
1204 f2fs_truncate_data_blocks_range(&dn
, 1);
1206 if (do_replace
[i
]) {
1207 f2fs_i_blocks_write(src_inode
,
1209 f2fs_i_blocks_write(dst_inode
,
1211 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1212 blkaddr
[i
], ni
.version
, true, false);
1218 new_size
= (loff_t
)(dst
+ i
) << PAGE_SHIFT
;
1219 if (dst_inode
->i_size
< new_size
)
1220 f2fs_i_size_write(dst_inode
, new_size
);
1221 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1223 f2fs_put_dnode(&dn
);
1225 struct page
*psrc
, *pdst
;
1227 psrc
= f2fs_get_lock_data_page(src_inode
,
1230 return PTR_ERR(psrc
);
1231 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1234 f2fs_put_page(psrc
, 1);
1235 return PTR_ERR(pdst
);
1237 f2fs_copy_page(psrc
, pdst
);
1238 set_page_dirty(pdst
);
1239 f2fs_put_page(pdst
, 1);
1240 f2fs_put_page(psrc
, 1);
1242 ret
= f2fs_truncate_hole(src_inode
,
1243 src
+ i
, src
+ i
+ 1);
1252 static int __exchange_data_block(struct inode
*src_inode
,
1253 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1254 pgoff_t len
, bool full
)
1256 block_t
*src_blkaddr
;
1262 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK(src_inode
), len
);
1264 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1265 array_size(olen
, sizeof(block_t
)),
1270 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1271 array_size(olen
, sizeof(int)),
1274 kvfree(src_blkaddr
);
1278 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1279 do_replace
, src
, olen
);
1283 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1284 do_replace
, src
, dst
, olen
, full
);
1292 kvfree(src_blkaddr
);
1298 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1299 kvfree(src_blkaddr
);
1304 static int f2fs_do_collapse(struct inode
*inode
, loff_t offset
, loff_t len
)
1306 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1307 pgoff_t nrpages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1308 pgoff_t start
= offset
>> PAGE_SHIFT
;
1309 pgoff_t end
= (offset
+ len
) >> PAGE_SHIFT
;
1312 f2fs_balance_fs(sbi
, true);
1314 /* avoid gc operation during block exchange */
1315 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1316 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1319 f2fs_drop_extent_tree(inode
);
1320 truncate_pagecache(inode
, offset
);
1321 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1322 f2fs_unlock_op(sbi
);
1324 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1325 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1329 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1334 if (offset
+ len
>= i_size_read(inode
))
1337 /* collapse range should be aligned to block size of f2fs. */
1338 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1341 ret
= f2fs_convert_inline_inode(inode
);
1345 /* write out all dirty pages from offset */
1346 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1350 ret
= f2fs_do_collapse(inode
, offset
, len
);
1354 /* write out all moved pages, if possible */
1355 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1356 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1357 truncate_pagecache(inode
, offset
);
1359 new_size
= i_size_read(inode
) - len
;
1360 truncate_pagecache(inode
, new_size
);
1362 ret
= f2fs_truncate_blocks(inode
, new_size
, true);
1363 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1365 f2fs_i_size_write(inode
, new_size
);
1369 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1372 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1373 pgoff_t index
= start
;
1374 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1378 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1379 if (datablock_addr(dn
->inode
, dn
->node_page
,
1380 dn
->ofs_in_node
) == NULL_ADDR
)
1384 dn
->ofs_in_node
= ofs_in_node
;
1385 ret
= f2fs_reserve_new_blocks(dn
, count
);
1389 dn
->ofs_in_node
= ofs_in_node
;
1390 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1391 dn
->data_blkaddr
= datablock_addr(dn
->inode
,
1392 dn
->node_page
, dn
->ofs_in_node
);
1394 * f2fs_reserve_new_blocks will not guarantee entire block
1397 if (dn
->data_blkaddr
== NULL_ADDR
) {
1401 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1402 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1403 dn
->data_blkaddr
= NEW_ADDR
;
1404 f2fs_set_data_blkaddr(dn
);
1408 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1413 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1416 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1417 struct address_space
*mapping
= inode
->i_mapping
;
1418 pgoff_t index
, pg_start
, pg_end
;
1419 loff_t new_size
= i_size_read(inode
);
1420 loff_t off_start
, off_end
;
1423 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1427 ret
= f2fs_convert_inline_inode(inode
);
1431 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1435 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1436 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1438 off_start
= offset
& (PAGE_SIZE
- 1);
1439 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1441 if (pg_start
== pg_end
) {
1442 ret
= fill_zero(inode
, pg_start
, off_start
,
1443 off_end
- off_start
);
1447 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1450 ret
= fill_zero(inode
, pg_start
++, off_start
,
1451 PAGE_SIZE
- off_start
);
1455 new_size
= max_t(loff_t
, new_size
,
1456 (loff_t
)pg_start
<< PAGE_SHIFT
);
1459 for (index
= pg_start
; index
< pg_end
;) {
1460 struct dnode_of_data dn
;
1461 unsigned int end_offset
;
1464 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1465 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1467 truncate_pagecache_range(inode
,
1468 (loff_t
)index
<< PAGE_SHIFT
,
1469 ((loff_t
)pg_end
<< PAGE_SHIFT
) - 1);
1473 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1474 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1476 f2fs_unlock_op(sbi
);
1477 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1478 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1482 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1483 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1485 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1486 f2fs_put_dnode(&dn
);
1488 f2fs_unlock_op(sbi
);
1489 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1490 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1492 f2fs_balance_fs(sbi
, dn
.node_changed
);
1498 new_size
= max_t(loff_t
, new_size
,
1499 (loff_t
)index
<< PAGE_SHIFT
);
1503 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1507 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1512 if (new_size
> i_size_read(inode
)) {
1513 if (mode
& FALLOC_FL_KEEP_SIZE
)
1514 file_set_keep_isize(inode
);
1516 f2fs_i_size_write(inode
, new_size
);
1521 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1523 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1524 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1528 new_size
= i_size_read(inode
) + len
;
1529 ret
= inode_newsize_ok(inode
, new_size
);
1533 if (offset
>= i_size_read(inode
))
1536 /* insert range should be aligned to block size of f2fs. */
1537 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1540 ret
= f2fs_convert_inline_inode(inode
);
1544 f2fs_balance_fs(sbi
, true);
1546 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1547 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
1548 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1552 /* write out all dirty pages from offset */
1553 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1557 pg_start
= offset
>> PAGE_SHIFT
;
1558 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1559 delta
= pg_end
- pg_start
;
1560 idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1562 /* avoid gc operation during block exchange */
1563 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1564 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1565 truncate_pagecache(inode
, offset
);
1567 while (!ret
&& idx
> pg_start
) {
1568 nr
= idx
- pg_start
;
1574 f2fs_drop_extent_tree(inode
);
1576 ret
= __exchange_data_block(inode
, inode
, idx
,
1577 idx
+ delta
, nr
, false);
1578 f2fs_unlock_op(sbi
);
1580 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1581 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1583 /* write out all moved pages, if possible */
1584 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1585 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1586 truncate_pagecache(inode
, offset
);
1587 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1590 f2fs_i_size_write(inode
, new_size
);
1594 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1595 loff_t len
, int mode
)
1597 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1598 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1599 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
,
1600 .m_may_create
= true };
1602 loff_t new_size
= i_size_read(inode
);
1606 err
= inode_newsize_ok(inode
, (len
+ offset
));
1610 err
= f2fs_convert_inline_inode(inode
);
1614 f2fs_balance_fs(sbi
, true);
1616 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1617 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1619 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1620 map
.m_len
= pg_end
- map
.m_lblk
;
1627 if (f2fs_is_pinned_file(inode
)) {
1628 block_t len
= (map
.m_len
>> sbi
->log_blocks_per_seg
) <<
1629 sbi
->log_blocks_per_seg
;
1632 if (map
.m_len
% sbi
->blocks_per_seg
)
1633 len
+= sbi
->blocks_per_seg
;
1635 map
.m_len
= sbi
->blocks_per_seg
;
1637 if (has_not_enough_free_secs(sbi
, 0,
1638 GET_SEC_FROM_SEG(sbi
, overprovision_segments(sbi
)))) {
1639 down_write(&sbi
->gc_lock
);
1640 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1641 if (err
&& err
!= -ENODATA
&& err
!= -EAGAIN
)
1645 down_write(&sbi
->pin_sem
);
1646 map
.m_seg_type
= CURSEG_COLD_DATA_PINNED
;
1647 f2fs_allocate_new_segments(sbi
, CURSEG_COLD_DATA
);
1648 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
1649 up_write(&sbi
->pin_sem
);
1653 map
.m_lblk
+= map
.m_len
;
1659 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1668 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1670 /* update new size to the failed position */
1671 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1672 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1674 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1677 if (new_size
> i_size_read(inode
)) {
1678 if (mode
& FALLOC_FL_KEEP_SIZE
)
1679 file_set_keep_isize(inode
);
1681 f2fs_i_size_write(inode
, new_size
);
1687 static long f2fs_fallocate(struct file
*file
, int mode
,
1688 loff_t offset
, loff_t len
)
1690 struct inode
*inode
= file_inode(file
);
1693 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1695 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode
)))
1697 if (!f2fs_is_compress_backend_ready(inode
))
1700 /* f2fs only support ->fallocate for regular file */
1701 if (!S_ISREG(inode
->i_mode
))
1704 if (IS_ENCRYPTED(inode
) &&
1705 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1708 if (f2fs_compressed_file(inode
) &&
1709 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_COLLAPSE_RANGE
|
1710 FALLOC_FL_ZERO_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1713 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1714 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1715 FALLOC_FL_INSERT_RANGE
))
1720 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1721 if (offset
>= inode
->i_size
)
1724 ret
= punch_hole(inode
, offset
, len
);
1725 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1726 ret
= f2fs_collapse_range(inode
, offset
, len
);
1727 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1728 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1729 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1730 ret
= f2fs_insert_range(inode
, offset
, len
);
1732 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1736 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1737 f2fs_mark_inode_dirty_sync(inode
, false);
1738 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1742 inode_unlock(inode
);
1744 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1748 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1751 * f2fs_relase_file is called at every close calls. So we should
1752 * not drop any inmemory pages by close called by other process.
1754 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1755 atomic_read(&inode
->i_writecount
) != 1)
1758 /* some remained atomic pages should discarded */
1759 if (f2fs_is_atomic_file(inode
))
1760 f2fs_drop_inmem_pages(inode
);
1761 if (f2fs_is_volatile_file(inode
)) {
1762 set_inode_flag(inode
, FI_DROP_CACHE
);
1763 filemap_fdatawrite(inode
->i_mapping
);
1764 clear_inode_flag(inode
, FI_DROP_CACHE
);
1765 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1766 stat_dec_volatile_write(inode
);
1771 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1773 struct inode
*inode
= file_inode(file
);
1776 * If the process doing a transaction is crashed, we should do
1777 * roll-back. Otherwise, other reader/write can see corrupted database
1778 * until all the writers close its file. Since this should be done
1779 * before dropping file lock, it needs to do in ->flush.
1781 if (f2fs_is_atomic_file(inode
) &&
1782 F2FS_I(inode
)->inmem_task
== current
)
1783 f2fs_drop_inmem_pages(inode
);
1787 static int f2fs_setflags_common(struct inode
*inode
, u32 iflags
, u32 mask
)
1789 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1790 u32 masked_flags
= fi
->i_flags
& mask
;
1792 f2fs_bug_on(F2FS_I_SB(inode
), (iflags
& ~mask
));
1794 /* Is it quota file? Do not allow user to mess with it */
1795 if (IS_NOQUOTA(inode
))
1798 if ((iflags
^ masked_flags
) & F2FS_CASEFOLD_FL
) {
1799 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode
)))
1801 if (!f2fs_empty_dir(inode
))
1805 if (iflags
& (F2FS_COMPR_FL
| F2FS_NOCOMP_FL
)) {
1806 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
1808 if ((iflags
& F2FS_COMPR_FL
) && (iflags
& F2FS_NOCOMP_FL
))
1812 if ((iflags
^ masked_flags
) & F2FS_COMPR_FL
) {
1813 if (S_ISREG(inode
->i_mode
) &&
1814 (masked_flags
& F2FS_COMPR_FL
|| i_size_read(inode
) ||
1815 F2FS_HAS_BLOCKS(inode
)))
1817 if (iflags
& F2FS_NOCOMP_FL
)
1819 if (iflags
& F2FS_COMPR_FL
) {
1820 int err
= f2fs_convert_inline_inode(inode
);
1825 if (!f2fs_may_compress(inode
))
1828 set_compress_context(inode
);
1831 if ((iflags
^ masked_flags
) & F2FS_NOCOMP_FL
) {
1832 if (masked_flags
& F2FS_COMPR_FL
)
1836 fi
->i_flags
= iflags
| (fi
->i_flags
& ~mask
);
1837 f2fs_bug_on(F2FS_I_SB(inode
), (fi
->i_flags
& F2FS_COMPR_FL
) &&
1838 (fi
->i_flags
& F2FS_NOCOMP_FL
));
1840 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1841 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1843 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1845 inode
->i_ctime
= current_time(inode
);
1846 f2fs_set_inode_flags(inode
);
1847 f2fs_mark_inode_dirty_sync(inode
, true);
1851 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1854 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1855 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1856 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1857 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1860 static const struct {
1863 } f2fs_fsflags_map
[] = {
1864 { F2FS_COMPR_FL
, FS_COMPR_FL
},
1865 { F2FS_SYNC_FL
, FS_SYNC_FL
},
1866 { F2FS_IMMUTABLE_FL
, FS_IMMUTABLE_FL
},
1867 { F2FS_APPEND_FL
, FS_APPEND_FL
},
1868 { F2FS_NODUMP_FL
, FS_NODUMP_FL
},
1869 { F2FS_NOATIME_FL
, FS_NOATIME_FL
},
1870 { F2FS_NOCOMP_FL
, FS_NOCOMP_FL
},
1871 { F2FS_INDEX_FL
, FS_INDEX_FL
},
1872 { F2FS_DIRSYNC_FL
, FS_DIRSYNC_FL
},
1873 { F2FS_PROJINHERIT_FL
, FS_PROJINHERIT_FL
},
1874 { F2FS_CASEFOLD_FL
, FS_CASEFOLD_FL
},
1877 #define F2FS_GETTABLE_FS_FL ( \
1887 FS_PROJINHERIT_FL | \
1889 FS_INLINE_DATA_FL | \
1894 #define F2FS_SETTABLE_FS_FL ( \
1903 FS_PROJINHERIT_FL | \
1906 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1907 static inline u32
f2fs_iflags_to_fsflags(u32 iflags
)
1912 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1913 if (iflags
& f2fs_fsflags_map
[i
].iflag
)
1914 fsflags
|= f2fs_fsflags_map
[i
].fsflag
;
1919 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1920 static inline u32
f2fs_fsflags_to_iflags(u32 fsflags
)
1925 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1926 if (fsflags
& f2fs_fsflags_map
[i
].fsflag
)
1927 iflags
|= f2fs_fsflags_map
[i
].iflag
;
1932 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1934 struct inode
*inode
= file_inode(filp
);
1935 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1936 u32 fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1938 if (IS_ENCRYPTED(inode
))
1939 fsflags
|= FS_ENCRYPT_FL
;
1940 if (IS_VERITY(inode
))
1941 fsflags
|= FS_VERITY_FL
;
1942 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1943 fsflags
|= FS_INLINE_DATA_FL
;
1944 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
1945 fsflags
|= FS_NOCOW_FL
;
1947 fsflags
&= F2FS_GETTABLE_FS_FL
;
1949 return put_user(fsflags
, (int __user
*)arg
);
1952 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1954 struct inode
*inode
= file_inode(filp
);
1955 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1956 u32 fsflags
, old_fsflags
;
1960 if (!inode_owner_or_capable(inode
))
1963 if (get_user(fsflags
, (int __user
*)arg
))
1966 if (fsflags
& ~F2FS_GETTABLE_FS_FL
)
1968 fsflags
&= F2FS_SETTABLE_FS_FL
;
1970 iflags
= f2fs_fsflags_to_iflags(fsflags
);
1971 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
1974 ret
= mnt_want_write_file(filp
);
1980 old_fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1981 ret
= vfs_ioc_setflags_prepare(inode
, old_fsflags
, fsflags
);
1985 ret
= f2fs_setflags_common(inode
, iflags
,
1986 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL
));
1988 inode_unlock(inode
);
1989 mnt_drop_write_file(filp
);
1993 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1995 struct inode
*inode
= file_inode(filp
);
1997 return put_user(inode
->i_generation
, (int __user
*)arg
);
2000 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
2002 struct inode
*inode
= file_inode(filp
);
2003 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2004 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2007 if (!inode_owner_or_capable(inode
))
2010 if (!S_ISREG(inode
->i_mode
))
2013 if (filp
->f_flags
& O_DIRECT
)
2016 ret
= mnt_want_write_file(filp
);
2022 f2fs_disable_compressed_file(inode
);
2024 if (f2fs_is_atomic_file(inode
)) {
2025 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
))
2030 ret
= f2fs_convert_inline_inode(inode
);
2034 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2037 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2038 * f2fs_is_atomic_file.
2040 if (get_dirty_pages(inode
))
2041 f2fs_warn(F2FS_I_SB(inode
), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2042 inode
->i_ino
, get_dirty_pages(inode
));
2043 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
2045 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2049 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2050 if (list_empty(&fi
->inmem_ilist
))
2051 list_add_tail(&fi
->inmem_ilist
, &sbi
->inode_list
[ATOMIC_FILE
]);
2052 sbi
->atomic_files
++;
2053 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2055 /* add inode in inmem_list first and set atomic_file */
2056 set_inode_flag(inode
, FI_ATOMIC_FILE
);
2057 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2058 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2060 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2061 F2FS_I(inode
)->inmem_task
= current
;
2062 stat_update_max_atomic_write(inode
);
2064 inode_unlock(inode
);
2065 mnt_drop_write_file(filp
);
2069 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
2071 struct inode
*inode
= file_inode(filp
);
2074 if (!inode_owner_or_capable(inode
))
2077 ret
= mnt_want_write_file(filp
);
2081 f2fs_balance_fs(F2FS_I_SB(inode
), true);
2085 if (f2fs_is_volatile_file(inode
)) {
2090 if (f2fs_is_atomic_file(inode
)) {
2091 ret
= f2fs_commit_inmem_pages(inode
);
2095 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2097 f2fs_drop_inmem_pages(inode
);
2099 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
2102 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2103 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2106 inode_unlock(inode
);
2107 mnt_drop_write_file(filp
);
2111 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
2113 struct inode
*inode
= file_inode(filp
);
2116 if (!inode_owner_or_capable(inode
))
2119 if (!S_ISREG(inode
->i_mode
))
2122 ret
= mnt_want_write_file(filp
);
2128 if (f2fs_is_volatile_file(inode
))
2131 ret
= f2fs_convert_inline_inode(inode
);
2135 stat_inc_volatile_write(inode
);
2136 stat_update_max_volatile_write(inode
);
2138 set_inode_flag(inode
, FI_VOLATILE_FILE
);
2139 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2141 inode_unlock(inode
);
2142 mnt_drop_write_file(filp
);
2146 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
2148 struct inode
*inode
= file_inode(filp
);
2151 if (!inode_owner_or_capable(inode
))
2154 ret
= mnt_want_write_file(filp
);
2160 if (!f2fs_is_volatile_file(inode
))
2163 if (!f2fs_is_first_block_written(inode
)) {
2164 ret
= truncate_partial_data_page(inode
, 0, true);
2168 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
2170 inode_unlock(inode
);
2171 mnt_drop_write_file(filp
);
2175 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
2177 struct inode
*inode
= file_inode(filp
);
2180 if (!inode_owner_or_capable(inode
))
2183 ret
= mnt_want_write_file(filp
);
2189 if (f2fs_is_atomic_file(inode
))
2190 f2fs_drop_inmem_pages(inode
);
2191 if (f2fs_is_volatile_file(inode
)) {
2192 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
2193 stat_dec_volatile_write(inode
);
2194 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2197 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2199 inode_unlock(inode
);
2201 mnt_drop_write_file(filp
);
2202 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2206 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
2208 struct inode
*inode
= file_inode(filp
);
2209 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2210 struct super_block
*sb
= sbi
->sb
;
2214 if (!capable(CAP_SYS_ADMIN
))
2217 if (get_user(in
, (__u32 __user
*)arg
))
2220 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
2221 ret
= mnt_want_write_file(filp
);
2227 case F2FS_GOING_DOWN_FULLSYNC
:
2228 sb
= freeze_bdev(sb
->s_bdev
);
2234 f2fs_stop_checkpoint(sbi
, false);
2235 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2236 thaw_bdev(sb
->s_bdev
, sb
);
2239 case F2FS_GOING_DOWN_METASYNC
:
2240 /* do checkpoint only */
2241 ret
= f2fs_sync_fs(sb
, 1);
2244 f2fs_stop_checkpoint(sbi
, false);
2245 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2247 case F2FS_GOING_DOWN_NOSYNC
:
2248 f2fs_stop_checkpoint(sbi
, false);
2249 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2251 case F2FS_GOING_DOWN_METAFLUSH
:
2252 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
2253 f2fs_stop_checkpoint(sbi
, false);
2254 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2256 case F2FS_GOING_DOWN_NEED_FSCK
:
2257 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2258 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
2259 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2260 /* do checkpoint only */
2261 ret
= f2fs_sync_fs(sb
, 1);
2268 f2fs_stop_gc_thread(sbi
);
2269 f2fs_stop_discard_thread(sbi
);
2271 f2fs_drop_discard_cmd(sbi
);
2272 clear_opt(sbi
, DISCARD
);
2274 f2fs_update_time(sbi
, REQ_TIME
);
2276 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
2277 mnt_drop_write_file(filp
);
2279 trace_f2fs_shutdown(sbi
, in
, ret
);
2284 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
2286 struct inode
*inode
= file_inode(filp
);
2287 struct super_block
*sb
= inode
->i_sb
;
2288 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
2289 struct fstrim_range range
;
2292 if (!capable(CAP_SYS_ADMIN
))
2295 if (!f2fs_hw_support_discard(F2FS_SB(sb
)))
2298 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
2302 ret
= mnt_want_write_file(filp
);
2306 range
.minlen
= max((unsigned int)range
.minlen
,
2307 q
->limits
.discard_granularity
);
2308 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
2309 mnt_drop_write_file(filp
);
2313 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
2316 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2320 static bool uuid_is_nonzero(__u8 u
[16])
2324 for (i
= 0; i
< 16; i
++)
2330 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
2332 struct inode
*inode
= file_inode(filp
);
2334 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode
)))
2337 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2339 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
2342 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
2344 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2346 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
2349 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
2351 struct inode
*inode
= file_inode(filp
);
2352 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2355 if (!f2fs_sb_has_encrypt(sbi
))
2358 err
= mnt_want_write_file(filp
);
2362 down_write(&sbi
->sb_lock
);
2364 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2367 /* update superblock with uuid */
2368 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2370 err
= f2fs_commit_super(sbi
, false);
2373 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2377 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2381 up_write(&sbi
->sb_lock
);
2382 mnt_drop_write_file(filp
);
2386 static int f2fs_ioc_get_encryption_policy_ex(struct file
*filp
,
2389 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2392 return fscrypt_ioctl_get_policy_ex(filp
, (void __user
*)arg
);
2395 static int f2fs_ioc_add_encryption_key(struct file
*filp
, unsigned long arg
)
2397 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2400 return fscrypt_ioctl_add_key(filp
, (void __user
*)arg
);
2403 static int f2fs_ioc_remove_encryption_key(struct file
*filp
, unsigned long arg
)
2405 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2408 return fscrypt_ioctl_remove_key(filp
, (void __user
*)arg
);
2411 static int f2fs_ioc_remove_encryption_key_all_users(struct file
*filp
,
2414 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2417 return fscrypt_ioctl_remove_key_all_users(filp
, (void __user
*)arg
);
2420 static int f2fs_ioc_get_encryption_key_status(struct file
*filp
,
2423 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2426 return fscrypt_ioctl_get_key_status(filp
, (void __user
*)arg
);
2429 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2431 struct inode
*inode
= file_inode(filp
);
2432 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2436 if (!capable(CAP_SYS_ADMIN
))
2439 if (get_user(sync
, (__u32 __user
*)arg
))
2442 if (f2fs_readonly(sbi
->sb
))
2445 ret
= mnt_want_write_file(filp
);
2450 if (!down_write_trylock(&sbi
->gc_lock
)) {
2455 down_write(&sbi
->gc_lock
);
2458 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2460 mnt_drop_write_file(filp
);
2464 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2466 struct inode
*inode
= file_inode(filp
);
2467 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2468 struct f2fs_gc_range range
;
2472 if (!capable(CAP_SYS_ADMIN
))
2475 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2479 if (f2fs_readonly(sbi
->sb
))
2482 end
= range
.start
+ range
.len
;
2483 if (end
< range
.start
|| range
.start
< MAIN_BLKADDR(sbi
) ||
2484 end
>= MAX_BLKADDR(sbi
))
2487 ret
= mnt_want_write_file(filp
);
2493 if (!down_write_trylock(&sbi
->gc_lock
)) {
2498 down_write(&sbi
->gc_lock
);
2501 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2502 range
.start
+= BLKS_PER_SEC(sbi
);
2503 if (range
.start
<= end
)
2506 mnt_drop_write_file(filp
);
2510 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2512 struct inode
*inode
= file_inode(filp
);
2513 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2516 if (!capable(CAP_SYS_ADMIN
))
2519 if (f2fs_readonly(sbi
->sb
))
2522 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2523 f2fs_info(sbi
, "Skipping Checkpoint. Checkpoints currently disabled.");
2527 ret
= mnt_want_write_file(filp
);
2531 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2533 mnt_drop_write_file(filp
);
2537 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2539 struct f2fs_defragment
*range
)
2541 struct inode
*inode
= file_inode(filp
);
2542 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2543 .m_seg_type
= NO_CHECK_TYPE
,
2544 .m_may_create
= false };
2545 struct extent_info ei
= {0, 0, 0};
2546 pgoff_t pg_start
, pg_end
, next_pgofs
;
2547 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2548 unsigned int total
= 0, sec_num
;
2549 block_t blk_end
= 0;
2550 bool fragmented
= false;
2553 /* if in-place-update policy is enabled, don't waste time here */
2554 if (f2fs_should_update_inplace(inode
, NULL
))
2557 pg_start
= range
->start
>> PAGE_SHIFT
;
2558 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2560 f2fs_balance_fs(sbi
, true);
2564 /* writeback all dirty pages in the range */
2565 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2566 range
->start
+ range
->len
- 1);
2571 * lookup mapping info in extent cache, skip defragmenting if physical
2572 * block addresses are continuous.
2574 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2575 if (ei
.fofs
+ ei
.len
>= pg_end
)
2579 map
.m_lblk
= pg_start
;
2580 map
.m_next_pgofs
= &next_pgofs
;
2583 * lookup mapping info in dnode page cache, skip defragmenting if all
2584 * physical block addresses are continuous even if there are hole(s)
2585 * in logical blocks.
2587 while (map
.m_lblk
< pg_end
) {
2588 map
.m_len
= pg_end
- map
.m_lblk
;
2589 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2593 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2594 map
.m_lblk
= next_pgofs
;
2598 if (blk_end
&& blk_end
!= map
.m_pblk
)
2601 /* record total count of block that we're going to move */
2604 blk_end
= map
.m_pblk
+ map
.m_len
;
2606 map
.m_lblk
+= map
.m_len
;
2614 sec_num
= DIV_ROUND_UP(total
, BLKS_PER_SEC(sbi
));
2617 * make sure there are enough free section for LFS allocation, this can
2618 * avoid defragment running in SSR mode when free section are allocated
2621 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2626 map
.m_lblk
= pg_start
;
2627 map
.m_len
= pg_end
- pg_start
;
2630 while (map
.m_lblk
< pg_end
) {
2635 map
.m_len
= pg_end
- map
.m_lblk
;
2636 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2640 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2641 map
.m_lblk
= next_pgofs
;
2645 set_inode_flag(inode
, FI_DO_DEFRAG
);
2648 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2651 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2653 err
= PTR_ERR(page
);
2657 set_page_dirty(page
);
2658 f2fs_put_page(page
, 1);
2667 if (map
.m_lblk
< pg_end
&& cnt
< blk_per_seg
)
2670 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2672 err
= filemap_fdatawrite(inode
->i_mapping
);
2677 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2679 inode_unlock(inode
);
2681 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2685 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2687 struct inode
*inode
= file_inode(filp
);
2688 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2689 struct f2fs_defragment range
;
2692 if (!capable(CAP_SYS_ADMIN
))
2695 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2698 if (f2fs_readonly(sbi
->sb
))
2701 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2705 /* verify alignment of offset & size */
2706 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2709 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2710 sbi
->max_file_blocks
))
2713 err
= mnt_want_write_file(filp
);
2717 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2718 mnt_drop_write_file(filp
);
2720 f2fs_update_time(sbi
, REQ_TIME
);
2724 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2731 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2732 struct file
*file_out
, loff_t pos_out
, size_t len
)
2734 struct inode
*src
= file_inode(file_in
);
2735 struct inode
*dst
= file_inode(file_out
);
2736 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2737 size_t olen
= len
, dst_max_i_size
= 0;
2741 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2742 src
->i_sb
!= dst
->i_sb
)
2745 if (unlikely(f2fs_readonly(src
->i_sb
)))
2748 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2751 if (IS_ENCRYPTED(src
) || IS_ENCRYPTED(dst
))
2755 if (pos_in
== pos_out
)
2757 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2764 if (!inode_trylock(dst
))
2769 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2772 olen
= len
= src
->i_size
- pos_in
;
2773 if (pos_in
+ len
== src
->i_size
)
2774 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2780 dst_osize
= dst
->i_size
;
2781 if (pos_out
+ olen
> dst
->i_size
)
2782 dst_max_i_size
= pos_out
+ olen
;
2784 /* verify the end result is block aligned */
2785 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2786 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2787 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2790 ret
= f2fs_convert_inline_inode(src
);
2794 ret
= f2fs_convert_inline_inode(dst
);
2798 /* write out all dirty pages from offset */
2799 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2800 pos_in
, pos_in
+ len
);
2804 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2805 pos_out
, pos_out
+ len
);
2809 f2fs_balance_fs(sbi
, true);
2811 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2814 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]))
2819 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2820 pos_out
>> F2FS_BLKSIZE_BITS
,
2821 len
>> F2FS_BLKSIZE_BITS
, false);
2825 f2fs_i_size_write(dst
, dst_max_i_size
);
2826 else if (dst_osize
!= dst
->i_size
)
2827 f2fs_i_size_write(dst
, dst_osize
);
2829 f2fs_unlock_op(sbi
);
2832 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2834 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2843 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2845 struct f2fs_move_range range
;
2849 if (!(filp
->f_mode
& FMODE_READ
) ||
2850 !(filp
->f_mode
& FMODE_WRITE
))
2853 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2857 dst
= fdget(range
.dst_fd
);
2861 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2866 err
= mnt_want_write_file(filp
);
2870 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2871 range
.pos_out
, range
.len
);
2873 mnt_drop_write_file(filp
);
2877 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2878 &range
, sizeof(range
)))
2885 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2887 struct inode
*inode
= file_inode(filp
);
2888 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2889 struct sit_info
*sm
= SIT_I(sbi
);
2890 unsigned int start_segno
= 0, end_segno
= 0;
2891 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2892 struct f2fs_flush_device range
;
2895 if (!capable(CAP_SYS_ADMIN
))
2898 if (f2fs_readonly(sbi
->sb
))
2901 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2904 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2908 if (!f2fs_is_multi_device(sbi
) || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2909 __is_large_section(sbi
)) {
2910 f2fs_warn(sbi
, "Can't flush %u in %d for segs_per_sec %u != 1",
2911 range
.dev_num
, sbi
->s_ndevs
, sbi
->segs_per_sec
);
2915 ret
= mnt_want_write_file(filp
);
2919 if (range
.dev_num
!= 0)
2920 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2921 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2923 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2924 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2925 start_segno
= dev_start_segno
;
2926 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2928 while (start_segno
< end_segno
) {
2929 if (!down_write_trylock(&sbi
->gc_lock
)) {
2933 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2934 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2935 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2936 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2944 mnt_drop_write_file(filp
);
2948 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2950 struct inode
*inode
= file_inode(filp
);
2951 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2953 /* Must validate to set it with SQLite behavior in Android. */
2954 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2956 return put_user(sb_feature
, (u32 __user
*)arg
);
2960 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2962 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2963 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2964 struct super_block
*sb
= sbi
->sb
;
2967 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2968 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2969 err
= __dquot_transfer(inode
, transfer_to
);
2971 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2972 dqput(transfer_to
[PRJQUOTA
]);
2977 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2979 struct inode
*inode
= file_inode(filp
);
2980 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2981 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2986 if (!f2fs_sb_has_project_quota(sbi
)) {
2987 if (projid
!= F2FS_DEF_PROJID
)
2993 if (!f2fs_has_extra_attr(inode
))
2996 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
2998 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
3002 /* Is it quota file? Do not allow user to mess with it */
3003 if (IS_NOQUOTA(inode
))
3006 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
3008 return PTR_ERR(ipage
);
3010 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
3013 f2fs_put_page(ipage
, 1);
3016 f2fs_put_page(ipage
, 1);
3018 err
= dquot_initialize(inode
);
3023 err
= f2fs_transfer_project_quota(inode
, kprojid
);
3027 F2FS_I(inode
)->i_projid
= kprojid
;
3028 inode
->i_ctime
= current_time(inode
);
3029 f2fs_mark_inode_dirty_sync(inode
, true);
3031 f2fs_unlock_op(sbi
);
3035 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
3040 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
3042 if (projid
!= F2FS_DEF_PROJID
)
3048 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3051 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3052 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3053 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3056 static const struct {
3059 } f2fs_xflags_map
[] = {
3060 { F2FS_SYNC_FL
, FS_XFLAG_SYNC
},
3061 { F2FS_IMMUTABLE_FL
, FS_XFLAG_IMMUTABLE
},
3062 { F2FS_APPEND_FL
, FS_XFLAG_APPEND
},
3063 { F2FS_NODUMP_FL
, FS_XFLAG_NODUMP
},
3064 { F2FS_NOATIME_FL
, FS_XFLAG_NOATIME
},
3065 { F2FS_PROJINHERIT_FL
, FS_XFLAG_PROJINHERIT
},
3068 #define F2FS_SUPPORTED_XFLAGS ( \
3070 FS_XFLAG_IMMUTABLE | \
3073 FS_XFLAG_NOATIME | \
3074 FS_XFLAG_PROJINHERIT)
3076 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3077 static inline u32
f2fs_iflags_to_xflags(u32 iflags
)
3082 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3083 if (iflags
& f2fs_xflags_map
[i
].iflag
)
3084 xflags
|= f2fs_xflags_map
[i
].xflag
;
3089 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3090 static inline u32
f2fs_xflags_to_iflags(u32 xflags
)
3095 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3096 if (xflags
& f2fs_xflags_map
[i
].xflag
)
3097 iflags
|= f2fs_xflags_map
[i
].iflag
;
3102 static void f2fs_fill_fsxattr(struct inode
*inode
, struct fsxattr
*fa
)
3104 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3106 simple_fill_fsxattr(fa
, f2fs_iflags_to_xflags(fi
->i_flags
));
3108 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)))
3109 fa
->fsx_projid
= from_kprojid(&init_user_ns
, fi
->i_projid
);
3112 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
3114 struct inode
*inode
= file_inode(filp
);
3117 f2fs_fill_fsxattr(inode
, &fa
);
3119 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
3124 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
3126 struct inode
*inode
= file_inode(filp
);
3127 struct fsxattr fa
, old_fa
;
3131 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
3134 /* Make sure caller has proper permission */
3135 if (!inode_owner_or_capable(inode
))
3138 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_XFLAGS
)
3141 iflags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
3142 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
3145 err
= mnt_want_write_file(filp
);
3151 f2fs_fill_fsxattr(inode
, &old_fa
);
3152 err
= vfs_ioc_fssetxattr_check(inode
, &old_fa
, &fa
);
3156 err
= f2fs_setflags_common(inode
, iflags
,
3157 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS
));
3161 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
3163 inode_unlock(inode
);
3164 mnt_drop_write_file(filp
);
3168 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
3170 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3171 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3173 /* Use i_gc_failures for normal file as a risk signal. */
3175 f2fs_i_gc_failures_write(inode
,
3176 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
3178 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
3179 f2fs_warn(sbi
, "%s: Enable GC = ino %lx after %x GC trials",
3180 __func__
, inode
->i_ino
,
3181 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
3182 clear_inode_flag(inode
, FI_PIN_FILE
);
3188 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
3190 struct inode
*inode
= file_inode(filp
);
3194 if (get_user(pin
, (__u32 __user
*)arg
))
3197 if (!S_ISREG(inode
->i_mode
))
3200 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
3203 ret
= mnt_want_write_file(filp
);
3209 if (f2fs_should_update_outplace(inode
, NULL
)) {
3215 clear_inode_flag(inode
, FI_PIN_FILE
);
3216 f2fs_i_gc_failures_write(inode
, 0);
3220 if (f2fs_pin_file_control(inode
, false)) {
3225 ret
= f2fs_convert_inline_inode(inode
);
3229 if (f2fs_disable_compressed_file(inode
)) {
3234 set_inode_flag(inode
, FI_PIN_FILE
);
3235 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3237 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3239 inode_unlock(inode
);
3240 mnt_drop_write_file(filp
);
3244 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
3246 struct inode
*inode
= file_inode(filp
);
3249 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
3250 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3251 return put_user(pin
, (u32 __user
*)arg
);
3254 int f2fs_precache_extents(struct inode
*inode
)
3256 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3257 struct f2fs_map_blocks map
;
3258 pgoff_t m_next_extent
;
3262 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
3266 map
.m_next_pgofs
= NULL
;
3267 map
.m_next_extent
= &m_next_extent
;
3268 map
.m_seg_type
= NO_CHECK_TYPE
;
3269 map
.m_may_create
= false;
3270 end
= F2FS_I_SB(inode
)->max_file_blocks
;
3272 while (map
.m_lblk
< end
) {
3273 map
.m_len
= end
- map
.m_lblk
;
3275 down_write(&fi
->i_gc_rwsem
[WRITE
]);
3276 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
3277 up_write(&fi
->i_gc_rwsem
[WRITE
]);
3281 map
.m_lblk
= m_next_extent
;
3287 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
3289 return f2fs_precache_extents(file_inode(filp
));
3292 static int f2fs_ioc_resize_fs(struct file
*filp
, unsigned long arg
)
3294 struct f2fs_sb_info
*sbi
= F2FS_I_SB(file_inode(filp
));
3298 if (!capable(CAP_SYS_ADMIN
))
3301 if (f2fs_readonly(sbi
->sb
))
3304 if (copy_from_user(&block_count
, (void __user
*)arg
,
3305 sizeof(block_count
)))
3308 ret
= f2fs_resize_fs(sbi
, block_count
);
3313 static int f2fs_ioc_enable_verity(struct file
*filp
, unsigned long arg
)
3315 struct inode
*inode
= file_inode(filp
);
3317 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3319 if (!f2fs_sb_has_verity(F2FS_I_SB(inode
))) {
3320 f2fs_warn(F2FS_I_SB(inode
),
3321 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3326 return fsverity_ioctl_enable(filp
, (const void __user
*)arg
);
3329 static int f2fs_ioc_measure_verity(struct file
*filp
, unsigned long arg
)
3331 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp
))))
3334 return fsverity_ioctl_measure(filp
, (void __user
*)arg
);
3337 static int f2fs_get_volume_name(struct file
*filp
, unsigned long arg
)
3339 struct inode
*inode
= file_inode(filp
);
3340 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3345 vbuf
= f2fs_kzalloc(sbi
, MAX_VOLUME_NAME
, GFP_KERNEL
);
3349 down_read(&sbi
->sb_lock
);
3350 count
= utf16s_to_utf8s(sbi
->raw_super
->volume_name
,
3351 ARRAY_SIZE(sbi
->raw_super
->volume_name
),
3352 UTF16_LITTLE_ENDIAN
, vbuf
, MAX_VOLUME_NAME
);
3353 up_read(&sbi
->sb_lock
);
3355 if (copy_to_user((char __user
*)arg
, vbuf
,
3356 min(FSLABEL_MAX
, count
)))
3363 static int f2fs_set_volume_name(struct file
*filp
, unsigned long arg
)
3365 struct inode
*inode
= file_inode(filp
);
3366 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3370 if (!capable(CAP_SYS_ADMIN
))
3373 vbuf
= strndup_user((const char __user
*)arg
, FSLABEL_MAX
);
3375 return PTR_ERR(vbuf
);
3377 err
= mnt_want_write_file(filp
);
3381 down_write(&sbi
->sb_lock
);
3383 memset(sbi
->raw_super
->volume_name
, 0,
3384 sizeof(sbi
->raw_super
->volume_name
));
3385 utf8s_to_utf16s(vbuf
, strlen(vbuf
), UTF16_LITTLE_ENDIAN
,
3386 sbi
->raw_super
->volume_name
,
3387 ARRAY_SIZE(sbi
->raw_super
->volume_name
));
3389 err
= f2fs_commit_super(sbi
, false);
3391 up_write(&sbi
->sb_lock
);
3393 mnt_drop_write_file(filp
);
3399 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3401 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
3403 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp
))))
3407 case F2FS_IOC_GETFLAGS
:
3408 return f2fs_ioc_getflags(filp
, arg
);
3409 case F2FS_IOC_SETFLAGS
:
3410 return f2fs_ioc_setflags(filp
, arg
);
3411 case F2FS_IOC_GETVERSION
:
3412 return f2fs_ioc_getversion(filp
, arg
);
3413 case F2FS_IOC_START_ATOMIC_WRITE
:
3414 return f2fs_ioc_start_atomic_write(filp
);
3415 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3416 return f2fs_ioc_commit_atomic_write(filp
);
3417 case F2FS_IOC_START_VOLATILE_WRITE
:
3418 return f2fs_ioc_start_volatile_write(filp
);
3419 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3420 return f2fs_ioc_release_volatile_write(filp
);
3421 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3422 return f2fs_ioc_abort_volatile_write(filp
);
3423 case F2FS_IOC_SHUTDOWN
:
3424 return f2fs_ioc_shutdown(filp
, arg
);
3426 return f2fs_ioc_fitrim(filp
, arg
);
3427 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3428 return f2fs_ioc_set_encryption_policy(filp
, arg
);
3429 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3430 return f2fs_ioc_get_encryption_policy(filp
, arg
);
3431 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3432 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
3433 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3434 return f2fs_ioc_get_encryption_policy_ex(filp
, arg
);
3435 case FS_IOC_ADD_ENCRYPTION_KEY
:
3436 return f2fs_ioc_add_encryption_key(filp
, arg
);
3437 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3438 return f2fs_ioc_remove_encryption_key(filp
, arg
);
3439 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3440 return f2fs_ioc_remove_encryption_key_all_users(filp
, arg
);
3441 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3442 return f2fs_ioc_get_encryption_key_status(filp
, arg
);
3443 case F2FS_IOC_GARBAGE_COLLECT
:
3444 return f2fs_ioc_gc(filp
, arg
);
3445 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3446 return f2fs_ioc_gc_range(filp
, arg
);
3447 case F2FS_IOC_WRITE_CHECKPOINT
:
3448 return f2fs_ioc_write_checkpoint(filp
, arg
);
3449 case F2FS_IOC_DEFRAGMENT
:
3450 return f2fs_ioc_defragment(filp
, arg
);
3451 case F2FS_IOC_MOVE_RANGE
:
3452 return f2fs_ioc_move_range(filp
, arg
);
3453 case F2FS_IOC_FLUSH_DEVICE
:
3454 return f2fs_ioc_flush_device(filp
, arg
);
3455 case F2FS_IOC_GET_FEATURES
:
3456 return f2fs_ioc_get_features(filp
, arg
);
3457 case F2FS_IOC_FSGETXATTR
:
3458 return f2fs_ioc_fsgetxattr(filp
, arg
);
3459 case F2FS_IOC_FSSETXATTR
:
3460 return f2fs_ioc_fssetxattr(filp
, arg
);
3461 case F2FS_IOC_GET_PIN_FILE
:
3462 return f2fs_ioc_get_pin_file(filp
, arg
);
3463 case F2FS_IOC_SET_PIN_FILE
:
3464 return f2fs_ioc_set_pin_file(filp
, arg
);
3465 case F2FS_IOC_PRECACHE_EXTENTS
:
3466 return f2fs_ioc_precache_extents(filp
, arg
);
3467 case F2FS_IOC_RESIZE_FS
:
3468 return f2fs_ioc_resize_fs(filp
, arg
);
3469 case FS_IOC_ENABLE_VERITY
:
3470 return f2fs_ioc_enable_verity(filp
, arg
);
3471 case FS_IOC_MEASURE_VERITY
:
3472 return f2fs_ioc_measure_verity(filp
, arg
);
3473 case F2FS_IOC_GET_VOLUME_NAME
:
3474 return f2fs_get_volume_name(filp
, arg
);
3475 case F2FS_IOC_SET_VOLUME_NAME
:
3476 return f2fs_set_volume_name(filp
, arg
);
3482 static ssize_t
f2fs_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
3484 struct file
*file
= iocb
->ki_filp
;
3485 struct inode
*inode
= file_inode(file
);
3487 if (!f2fs_is_compress_backend_ready(inode
))
3490 return generic_file_read_iter(iocb
, iter
);
3493 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
3495 struct file
*file
= iocb
->ki_filp
;
3496 struct inode
*inode
= file_inode(file
);
3499 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
3504 if (!f2fs_is_compress_backend_ready(inode
))
3507 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
3508 if (!inode_trylock(inode
)) {
3516 ret
= generic_write_checks(iocb
, from
);
3518 bool preallocated
= false;
3519 size_t target_size
= 0;
3522 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
3523 set_inode_flag(inode
, FI_NO_PREALLOC
);
3525 if ((iocb
->ki_flags
& IOCB_NOWAIT
)) {
3526 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
3527 iov_iter_count(from
)) ||
3528 f2fs_has_inline_data(inode
) ||
3529 f2fs_force_buffered_io(inode
, iocb
, from
)) {
3530 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3531 inode_unlock(inode
);
3538 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
3541 if (iocb
->ki_flags
& IOCB_DIRECT
) {
3543 * Convert inline data for Direct I/O before entering
3546 err
= f2fs_convert_inline_inode(inode
);
3550 * If force_buffere_io() is true, we have to allocate
3551 * blocks all the time, since f2fs_direct_IO will fall
3552 * back to buffered IO.
3554 if (!f2fs_force_buffered_io(inode
, iocb
, from
) &&
3555 allow_outplace_dio(inode
, iocb
, from
))
3558 preallocated
= true;
3559 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
3561 err
= f2fs_preallocate_blocks(iocb
, from
);
3564 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3565 inode_unlock(inode
);
3570 ret
= __generic_file_write_iter(iocb
, from
);
3571 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3573 /* if we couldn't write data, we should deallocate blocks. */
3574 if (preallocated
&& i_size_read(inode
) < target_size
)
3575 f2fs_truncate(inode
);
3578 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
3580 inode_unlock(inode
);
3582 trace_f2fs_file_write_iter(inode
, iocb
->ki_pos
,
3583 iov_iter_count(from
), ret
);
3585 ret
= generic_write_sync(iocb
, ret
);
3589 #ifdef CONFIG_COMPAT
3590 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
3593 case F2FS_IOC32_GETFLAGS
:
3594 cmd
= F2FS_IOC_GETFLAGS
;
3596 case F2FS_IOC32_SETFLAGS
:
3597 cmd
= F2FS_IOC_SETFLAGS
;
3599 case F2FS_IOC32_GETVERSION
:
3600 cmd
= F2FS_IOC_GETVERSION
;
3602 case F2FS_IOC_START_ATOMIC_WRITE
:
3603 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3604 case F2FS_IOC_START_VOLATILE_WRITE
:
3605 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3606 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3607 case F2FS_IOC_SHUTDOWN
:
3609 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3610 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3611 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3612 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3613 case FS_IOC_ADD_ENCRYPTION_KEY
:
3614 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3615 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3616 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3617 case F2FS_IOC_GARBAGE_COLLECT
:
3618 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3619 case F2FS_IOC_WRITE_CHECKPOINT
:
3620 case F2FS_IOC_DEFRAGMENT
:
3621 case F2FS_IOC_MOVE_RANGE
:
3622 case F2FS_IOC_FLUSH_DEVICE
:
3623 case F2FS_IOC_GET_FEATURES
:
3624 case F2FS_IOC_FSGETXATTR
:
3625 case F2FS_IOC_FSSETXATTR
:
3626 case F2FS_IOC_GET_PIN_FILE
:
3627 case F2FS_IOC_SET_PIN_FILE
:
3628 case F2FS_IOC_PRECACHE_EXTENTS
:
3629 case F2FS_IOC_RESIZE_FS
:
3630 case FS_IOC_ENABLE_VERITY
:
3631 case FS_IOC_MEASURE_VERITY
:
3632 case F2FS_IOC_GET_VOLUME_NAME
:
3633 case F2FS_IOC_SET_VOLUME_NAME
:
3636 return -ENOIOCTLCMD
;
3638 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3642 const struct file_operations f2fs_file_operations
= {
3643 .llseek
= f2fs_llseek
,
3644 .read_iter
= f2fs_file_read_iter
,
3645 .write_iter
= f2fs_file_write_iter
,
3646 .open
= f2fs_file_open
,
3647 .release
= f2fs_release_file
,
3648 .mmap
= f2fs_file_mmap
,
3649 .flush
= f2fs_file_flush
,
3650 .fsync
= f2fs_sync_file
,
3651 .fallocate
= f2fs_fallocate
,
3652 .unlocked_ioctl
= f2fs_ioctl
,
3653 #ifdef CONFIG_COMPAT
3654 .compat_ioctl
= f2fs_compat_ioctl
,
3656 .splice_read
= generic_file_splice_read
,
3657 .splice_write
= iter_file_splice_write
,