1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
32 #include <trace/events/f2fs.h>
34 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
36 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
39 down_read(&F2FS_I(inode
)->i_mmap_sem
);
40 ret
= filemap_fault(vmf
);
41 up_read(&F2FS_I(inode
)->i_mmap_sem
);
43 trace_f2fs_filemap_fault(inode
, vmf
->pgoff
, (unsigned long)ret
);
48 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
54 bool need_alloc
= true;
57 if (unlikely(f2fs_cp_error(sbi
))) {
62 if (!f2fs_is_checkpoint_ready(sbi
)) {
67 #ifdef CONFIG_F2FS_FS_COMPRESSION
68 if (f2fs_compressed_file(inode
)) {
69 int ret
= f2fs_is_compressed_cluster(inode
, page
->index
);
75 if (ret
< F2FS_I(inode
)->i_cluster_size
) {
83 /* should do out of any locked page */
85 f2fs_balance_fs(sbi
, true);
87 sb_start_pagefault(inode
->i_sb
);
89 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
91 file_update_time(vmf
->vma
->vm_file
);
92 down_read(&F2FS_I(inode
)->i_mmap_sem
);
94 if (unlikely(page
->mapping
!= inode
->i_mapping
||
95 page_offset(page
) > i_size_read(inode
) ||
96 !PageUptodate(page
))) {
103 /* block allocation */
104 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
105 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
106 err
= f2fs_get_block(&dn
, page
->index
);
108 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
111 #ifdef CONFIG_F2FS_FS_COMPRESSION
113 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
114 err
= f2fs_get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
123 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
125 /* wait for GCed page writeback via META_MAPPING */
126 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
129 * check to see if the page is mapped already (no holes)
131 if (PageMappedToDisk(page
))
134 /* page is wholly or partially inside EOF */
135 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
136 i_size_read(inode
)) {
139 offset
= i_size_read(inode
) & ~PAGE_MASK
;
140 zero_user_segment(page
, offset
, PAGE_SIZE
);
142 set_page_dirty(page
);
143 if (!PageUptodate(page
))
144 SetPageUptodate(page
);
146 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
147 f2fs_update_time(sbi
, REQ_TIME
);
149 trace_f2fs_vm_page_mkwrite(page
, DATA
);
151 up_read(&F2FS_I(inode
)->i_mmap_sem
);
153 sb_end_pagefault(inode
->i_sb
);
155 return block_page_mkwrite_return(err
);
158 static const struct vm_operations_struct f2fs_file_vm_ops
= {
159 .fault
= f2fs_filemap_fault
,
160 .map_pages
= filemap_map_pages
,
161 .page_mkwrite
= f2fs_vm_page_mkwrite
,
164 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
166 struct dentry
*dentry
;
168 inode
= igrab(inode
);
169 dentry
= d_find_any_alias(inode
);
174 *pino
= parent_ino(dentry
);
179 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
181 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
182 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
184 if (!S_ISREG(inode
->i_mode
))
185 cp_reason
= CP_NON_REGULAR
;
186 else if (f2fs_compressed_file(inode
))
187 cp_reason
= CP_COMPRESSED
;
188 else if (inode
->i_nlink
!= 1)
189 cp_reason
= CP_HARDLINK
;
190 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
191 cp_reason
= CP_SB_NEED_CP
;
192 else if (file_wrong_pino(inode
))
193 cp_reason
= CP_WRONG_PINO
;
194 else if (!f2fs_space_for_roll_forward(sbi
))
195 cp_reason
= CP_NO_SPC_ROLL
;
196 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
197 cp_reason
= CP_NODE_NEED_CP
;
198 else if (test_opt(sbi
, FASTBOOT
))
199 cp_reason
= CP_FASTBOOT_MODE
;
200 else if (F2FS_OPTION(sbi
).active_logs
== 2)
201 cp_reason
= CP_SPEC_LOG_NUM
;
202 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
203 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
204 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
206 cp_reason
= CP_RECOVER_DIR
;
211 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
213 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
215 /* But we need to avoid that there are some inode updates */
216 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
222 static void try_to_fix_pino(struct inode
*inode
)
224 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
227 down_write(&fi
->i_sem
);
228 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
229 get_parent_ino(inode
, &pino
)) {
230 f2fs_i_pino_write(inode
, pino
);
231 file_got_pino(inode
);
233 up_write(&fi
->i_sem
);
236 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
237 int datasync
, bool atomic
)
239 struct inode
*inode
= file
->f_mapping
->host
;
240 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
241 nid_t ino
= inode
->i_ino
;
243 enum cp_reason_type cp_reason
= 0;
244 struct writeback_control wbc
= {
245 .sync_mode
= WB_SYNC_ALL
,
246 .nr_to_write
= LONG_MAX
,
249 unsigned int seq_id
= 0;
251 if (unlikely(f2fs_readonly(inode
->i_sb
) ||
252 is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
255 trace_f2fs_sync_file_enter(inode
);
257 if (S_ISDIR(inode
->i_mode
))
260 /* if fdatasync is triggered, let's do in-place-update */
261 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
262 set_inode_flag(inode
, FI_NEED_IPU
);
263 ret
= file_write_and_wait_range(file
, start
, end
);
264 clear_inode_flag(inode
, FI_NEED_IPU
);
267 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
271 /* if the inode is dirty, let's recover all the time */
272 if (!f2fs_skip_inode_update(inode
, datasync
)) {
273 f2fs_write_inode(inode
, NULL
);
278 * if there is no written data, don't waste time to write recovery info.
280 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
281 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
283 /* it may call write_inode just prior to fsync */
284 if (need_inode_page_update(sbi
, ino
))
287 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
288 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
294 * Both of fdatasync() and fsync() are able to be recovered from
297 down_read(&F2FS_I(inode
)->i_sem
);
298 cp_reason
= need_do_checkpoint(inode
);
299 up_read(&F2FS_I(inode
)->i_sem
);
302 /* all the dirty node pages should be flushed for POR */
303 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
306 * We've secured consistency through sync_fs. Following pino
307 * will be used only for fsynced inodes after checkpoint.
309 try_to_fix_pino(inode
);
310 clear_inode_flag(inode
, FI_APPEND_WRITE
);
311 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
315 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
316 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
, &seq_id
);
317 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
321 /* if cp_error was enabled, we should avoid infinite loop */
322 if (unlikely(f2fs_cp_error(sbi
))) {
327 if (f2fs_need_inode_block_update(sbi
, ino
)) {
328 f2fs_mark_inode_dirty_sync(inode
, true);
329 f2fs_write_inode(inode
, NULL
);
334 * If it's atomic_write, it's just fine to keep write ordering. So
335 * here we don't need to wait for node write completion, since we use
336 * node chain which serializes node blocks. If one of node writes are
337 * reordered, we can see simply broken chain, resulting in stopping
338 * roll-forward recovery. It means we'll recover all or none node blocks
342 ret
= f2fs_wait_on_node_pages_writeback(sbi
, seq_id
);
347 /* once recovery info is written, don't need to tack this */
348 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
349 clear_inode_flag(inode
, FI_APPEND_WRITE
);
351 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
352 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
354 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
355 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
356 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
358 f2fs_update_time(sbi
, REQ_TIME
);
360 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
361 f2fs_trace_ios(NULL
, 1);
365 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
367 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
369 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
372 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
373 pgoff_t pgofs
, int whence
)
378 if (whence
!= SEEK_DATA
)
381 /* find first dirty page index */
382 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
391 static bool __found_offset(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
392 pgoff_t dirty
, pgoff_t pgofs
, int whence
)
396 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
397 __is_valid_data_blkaddr(blkaddr
))
401 if (blkaddr
== NULL_ADDR
)
408 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
410 struct inode
*inode
= file
->f_mapping
->host
;
411 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
412 struct dnode_of_data dn
;
413 pgoff_t pgofs
, end_offset
, dirty
;
414 loff_t data_ofs
= offset
;
420 isize
= i_size_read(inode
);
424 /* handle inline data case */
425 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
426 if (whence
== SEEK_HOLE
)
431 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
433 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
435 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
436 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
437 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
438 if (err
&& err
!= -ENOENT
) {
440 } else if (err
== -ENOENT
) {
441 /* direct node does not exists */
442 if (whence
== SEEK_DATA
) {
443 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
450 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
452 /* find data/hole in dnode block */
453 for (; dn
.ofs_in_node
< end_offset
;
454 dn
.ofs_in_node
++, pgofs
++,
455 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
458 blkaddr
= f2fs_data_blkaddr(&dn
);
460 if (__is_valid_data_blkaddr(blkaddr
) &&
461 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
462 blkaddr
, DATA_GENERIC_ENHANCE
)) {
467 if (__found_offset(F2FS_I_SB(inode
), blkaddr
, dirty
,
476 if (whence
== SEEK_DATA
)
479 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
482 return vfs_setpos(file
, data_ofs
, maxbytes
);
488 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
490 struct inode
*inode
= file
->f_mapping
->host
;
491 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
497 return generic_file_llseek_size(file
, offset
, whence
,
498 maxbytes
, i_size_read(inode
));
503 return f2fs_seek_block(file
, offset
, whence
);
509 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
511 struct inode
*inode
= file_inode(file
);
514 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
517 if (!f2fs_is_compress_backend_ready(inode
))
520 /* we don't need to use inline_data strictly */
521 err
= f2fs_convert_inline_inode(inode
);
526 vma
->vm_ops
= &f2fs_file_vm_ops
;
527 set_inode_flag(inode
, FI_MMAP_FILE
);
531 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
533 int err
= fscrypt_file_open(inode
, filp
);
538 if (!f2fs_is_compress_backend_ready(inode
))
541 err
= fsverity_file_open(inode
, filp
);
545 filp
->f_mode
|= FMODE_NOWAIT
;
547 return dquot_file_open(inode
, filp
);
550 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
552 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
553 struct f2fs_node
*raw_node
;
554 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
557 bool compressed_cluster
= false;
558 int cluster_index
= 0, valid_blocks
= 0;
559 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
561 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
562 base
= get_extra_isize(dn
->inode
);
564 raw_node
= F2FS_NODE(dn
->node_page
);
565 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
567 /* Assumption: truncateion starts with cluster */
568 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++, cluster_index
++) {
569 block_t blkaddr
= le32_to_cpu(*addr
);
571 if (f2fs_compressed_file(dn
->inode
) &&
572 !(cluster_index
& (cluster_size
- 1))) {
573 if (compressed_cluster
)
574 f2fs_i_compr_blocks_update(dn
->inode
,
575 valid_blocks
, false);
576 compressed_cluster
= (blkaddr
== COMPRESS_ADDR
);
580 if (blkaddr
== NULL_ADDR
)
583 dn
->data_blkaddr
= NULL_ADDR
;
584 f2fs_set_data_blkaddr(dn
);
586 if (__is_valid_data_blkaddr(blkaddr
)) {
587 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
588 DATA_GENERIC_ENHANCE
))
590 if (compressed_cluster
)
594 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
595 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
597 f2fs_invalidate_blocks(sbi
, blkaddr
);
601 if (compressed_cluster
)
602 f2fs_i_compr_blocks_update(dn
->inode
, valid_blocks
, false);
607 * once we invalidate valid blkaddr in range [ofs, ofs + count],
608 * we will invalidate all blkaddr in the whole range.
610 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
612 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
613 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
615 dn
->ofs_in_node
= ofs
;
617 f2fs_update_time(sbi
, REQ_TIME
);
618 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
619 dn
->ofs_in_node
, nr_free
);
622 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
624 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK(dn
->inode
));
627 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
630 loff_t offset
= from
& (PAGE_SIZE
- 1);
631 pgoff_t index
= from
>> PAGE_SHIFT
;
632 struct address_space
*mapping
= inode
->i_mapping
;
635 if (!offset
&& !cache_only
)
639 page
= find_lock_page(mapping
, index
);
640 if (page
&& PageUptodate(page
))
642 f2fs_put_page(page
, 1);
646 if (f2fs_compressed_file(inode
))
649 page
= f2fs_get_lock_data_page(inode
, index
, true);
651 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
653 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
654 zero_user(page
, offset
, PAGE_SIZE
- offset
);
656 /* An encrypted inode should have a key and truncate the last page. */
657 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& IS_ENCRYPTED(inode
));
659 set_page_dirty(page
);
660 f2fs_put_page(page
, 1);
664 static int do_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
666 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
667 struct dnode_of_data dn
;
669 int count
= 0, err
= 0;
671 bool truncate_page
= false;
673 trace_f2fs_truncate_blocks_enter(inode
, from
);
675 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
677 if (free_from
>= sbi
->max_file_blocks
)
683 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
685 err
= PTR_ERR(ipage
);
689 if (f2fs_has_inline_data(inode
)) {
690 f2fs_truncate_inline_inode(inode
, ipage
, from
);
691 f2fs_put_page(ipage
, 1);
692 truncate_page
= true;
696 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
697 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
704 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
706 count
-= dn
.ofs_in_node
;
707 f2fs_bug_on(sbi
, count
< 0);
709 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
710 f2fs_truncate_data_blocks_range(&dn
, count
);
716 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
721 /* lastly zero out the first data page */
723 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
725 trace_f2fs_truncate_blocks_exit(inode
, err
);
729 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
731 u64 free_from
= from
;
734 * for compressed file, only support cluster size
735 * aligned truncation.
737 if (f2fs_compressed_file(inode
)) {
738 size_t cluster_shift
= PAGE_SHIFT
+
739 F2FS_I(inode
)->i_log_cluster_size
;
740 size_t cluster_mask
= (1 << cluster_shift
) - 1;
742 free_from
= from
>> cluster_shift
;
743 if (from
& cluster_mask
)
745 free_from
<<= cluster_shift
;
748 return do_truncate_blocks(inode
, free_from
, lock
);
751 int f2fs_truncate(struct inode
*inode
)
755 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
758 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
759 S_ISLNK(inode
->i_mode
)))
762 trace_f2fs_truncate(inode
);
764 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
765 f2fs_show_injection_info(F2FS_I_SB(inode
), FAULT_TRUNCATE
);
769 /* we should check inline_data size */
770 if (!f2fs_may_inline_data(inode
)) {
771 err
= f2fs_convert_inline_inode(inode
);
776 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
780 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
781 f2fs_mark_inode_dirty_sync(inode
, false);
785 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
786 u32 request_mask
, unsigned int query_flags
)
788 struct inode
*inode
= d_inode(path
->dentry
);
789 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
790 struct f2fs_inode
*ri
;
793 if (f2fs_has_extra_attr(inode
) &&
794 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode
)) &&
795 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
796 stat
->result_mask
|= STATX_BTIME
;
797 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
798 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
802 if (flags
& F2FS_COMPR_FL
)
803 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
804 if (flags
& F2FS_APPEND_FL
)
805 stat
->attributes
|= STATX_ATTR_APPEND
;
806 if (IS_ENCRYPTED(inode
))
807 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
808 if (flags
& F2FS_IMMUTABLE_FL
)
809 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
810 if (flags
& F2FS_NODUMP_FL
)
811 stat
->attributes
|= STATX_ATTR_NODUMP
;
812 if (IS_VERITY(inode
))
813 stat
->attributes
|= STATX_ATTR_VERITY
;
815 stat
->attributes_mask
|= (STATX_ATTR_COMPRESSED
|
817 STATX_ATTR_ENCRYPTED
|
818 STATX_ATTR_IMMUTABLE
|
822 generic_fillattr(inode
, stat
);
824 /* we need to show initial sectors used for inline_data/dentries */
825 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
826 f2fs_has_inline_dentry(inode
))
827 stat
->blocks
+= (stat
->size
+ 511) >> 9;
832 #ifdef CONFIG_F2FS_FS_POSIX_ACL
833 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
835 unsigned int ia_valid
= attr
->ia_valid
;
837 if (ia_valid
& ATTR_UID
)
838 inode
->i_uid
= attr
->ia_uid
;
839 if (ia_valid
& ATTR_GID
)
840 inode
->i_gid
= attr
->ia_gid
;
841 if (ia_valid
& ATTR_ATIME
)
842 inode
->i_atime
= attr
->ia_atime
;
843 if (ia_valid
& ATTR_MTIME
)
844 inode
->i_mtime
= attr
->ia_mtime
;
845 if (ia_valid
& ATTR_CTIME
)
846 inode
->i_ctime
= attr
->ia_ctime
;
847 if (ia_valid
& ATTR_MODE
) {
848 umode_t mode
= attr
->ia_mode
;
850 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
852 set_acl_inode(inode
, mode
);
856 #define __setattr_copy setattr_copy
859 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
861 struct inode
*inode
= d_inode(dentry
);
864 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
867 if ((attr
->ia_valid
& ATTR_SIZE
) &&
868 !f2fs_is_compress_backend_ready(inode
))
871 err
= setattr_prepare(dentry
, attr
);
875 err
= fscrypt_prepare_setattr(dentry
, attr
);
879 err
= fsverity_prepare_setattr(dentry
, attr
);
883 if (is_quota_modification(inode
, attr
)) {
884 err
= dquot_initialize(inode
);
888 if ((attr
->ia_valid
& ATTR_UID
&&
889 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
890 (attr
->ia_valid
& ATTR_GID
&&
891 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
892 f2fs_lock_op(F2FS_I_SB(inode
));
893 err
= dquot_transfer(inode
, attr
);
895 set_sbi_flag(F2FS_I_SB(inode
),
896 SBI_QUOTA_NEED_REPAIR
);
897 f2fs_unlock_op(F2FS_I_SB(inode
));
901 * update uid/gid under lock_op(), so that dquot and inode can
902 * be updated atomically.
904 if (attr
->ia_valid
& ATTR_UID
)
905 inode
->i_uid
= attr
->ia_uid
;
906 if (attr
->ia_valid
& ATTR_GID
)
907 inode
->i_gid
= attr
->ia_gid
;
908 f2fs_mark_inode_dirty_sync(inode
, true);
909 f2fs_unlock_op(F2FS_I_SB(inode
));
912 if (attr
->ia_valid
& ATTR_SIZE
) {
913 loff_t old_size
= i_size_read(inode
);
915 if (attr
->ia_size
> MAX_INLINE_DATA(inode
)) {
917 * should convert inline inode before i_size_write to
918 * keep smaller than inline_data size with inline flag.
920 err
= f2fs_convert_inline_inode(inode
);
925 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
926 down_write(&F2FS_I(inode
)->i_mmap_sem
);
928 truncate_setsize(inode
, attr
->ia_size
);
930 if (attr
->ia_size
<= old_size
)
931 err
= f2fs_truncate(inode
);
933 * do not trim all blocks after i_size if target size is
934 * larger than i_size.
936 up_write(&F2FS_I(inode
)->i_mmap_sem
);
937 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
941 spin_lock(&F2FS_I(inode
)->i_size_lock
);
942 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
943 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
944 spin_unlock(&F2FS_I(inode
)->i_size_lock
);
947 __setattr_copy(inode
, attr
);
949 if (attr
->ia_valid
& ATTR_MODE
) {
950 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
951 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
952 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
953 clear_inode_flag(inode
, FI_ACL_MODE
);
957 /* file size may changed here */
958 f2fs_mark_inode_dirty_sync(inode
, true);
960 /* inode change will produce dirty node pages flushed by checkpoint */
961 f2fs_balance_fs(F2FS_I_SB(inode
), true);
966 const struct inode_operations f2fs_file_inode_operations
= {
967 .getattr
= f2fs_getattr
,
968 .setattr
= f2fs_setattr
,
969 .get_acl
= f2fs_get_acl
,
970 .set_acl
= f2fs_set_acl
,
971 #ifdef CONFIG_F2FS_FS_XATTR
972 .listxattr
= f2fs_listxattr
,
974 .fiemap
= f2fs_fiemap
,
977 static int fill_zero(struct inode
*inode
, pgoff_t index
,
978 loff_t start
, loff_t len
)
980 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
986 f2fs_balance_fs(sbi
, true);
989 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
993 return PTR_ERR(page
);
995 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
996 zero_user(page
, start
, len
);
997 set_page_dirty(page
);
998 f2fs_put_page(page
, 1);
1002 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
1006 while (pg_start
< pg_end
) {
1007 struct dnode_of_data dn
;
1008 pgoff_t end_offset
, count
;
1010 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1011 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
1013 if (err
== -ENOENT
) {
1014 pg_start
= f2fs_get_next_page_offset(&dn
,
1021 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1022 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
1024 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
1026 f2fs_truncate_data_blocks_range(&dn
, count
);
1027 f2fs_put_dnode(&dn
);
1034 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1036 pgoff_t pg_start
, pg_end
;
1037 loff_t off_start
, off_end
;
1040 ret
= f2fs_convert_inline_inode(inode
);
1044 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1045 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1047 off_start
= offset
& (PAGE_SIZE
- 1);
1048 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1050 if (pg_start
== pg_end
) {
1051 ret
= fill_zero(inode
, pg_start
, off_start
,
1052 off_end
- off_start
);
1057 ret
= fill_zero(inode
, pg_start
++, off_start
,
1058 PAGE_SIZE
- off_start
);
1063 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1068 if (pg_start
< pg_end
) {
1069 struct address_space
*mapping
= inode
->i_mapping
;
1070 loff_t blk_start
, blk_end
;
1071 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1073 f2fs_balance_fs(sbi
, true);
1075 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
1076 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
1078 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1079 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1081 truncate_inode_pages_range(mapping
, blk_start
,
1085 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
1086 f2fs_unlock_op(sbi
);
1088 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1089 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1096 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1097 int *do_replace
, pgoff_t off
, pgoff_t len
)
1099 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1100 struct dnode_of_data dn
;
1104 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1105 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
1106 if (ret
&& ret
!= -ENOENT
) {
1108 } else if (ret
== -ENOENT
) {
1109 if (dn
.max_level
== 0)
1111 done
= min((pgoff_t
)ADDRS_PER_BLOCK(inode
) -
1112 dn
.ofs_in_node
, len
);
1118 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
1119 dn
.ofs_in_node
, len
);
1120 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
1121 *blkaddr
= f2fs_data_blkaddr(&dn
);
1123 if (__is_valid_data_blkaddr(*blkaddr
) &&
1124 !f2fs_is_valid_blkaddr(sbi
, *blkaddr
,
1125 DATA_GENERIC_ENHANCE
)) {
1126 f2fs_put_dnode(&dn
);
1127 return -EFSCORRUPTED
;
1130 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
1132 if (f2fs_lfs_mode(sbi
)) {
1133 f2fs_put_dnode(&dn
);
1137 /* do not invalidate this block address */
1138 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1142 f2fs_put_dnode(&dn
);
1151 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1152 int *do_replace
, pgoff_t off
, int len
)
1154 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1155 struct dnode_of_data dn
;
1158 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1159 if (*do_replace
== 0)
1162 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1163 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1165 dec_valid_block_count(sbi
, inode
, 1);
1166 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1168 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1170 f2fs_put_dnode(&dn
);
1175 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1176 block_t
*blkaddr
, int *do_replace
,
1177 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1179 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1184 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1189 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1190 struct dnode_of_data dn
;
1191 struct node_info ni
;
1195 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1196 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1200 ret
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1202 f2fs_put_dnode(&dn
);
1206 ilen
= min((pgoff_t
)
1207 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1208 dn
.ofs_in_node
, len
- i
);
1210 dn
.data_blkaddr
= f2fs_data_blkaddr(&dn
);
1211 f2fs_truncate_data_blocks_range(&dn
, 1);
1213 if (do_replace
[i
]) {
1214 f2fs_i_blocks_write(src_inode
,
1216 f2fs_i_blocks_write(dst_inode
,
1218 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1219 blkaddr
[i
], ni
.version
, true, false);
1225 new_size
= (loff_t
)(dst
+ i
) << PAGE_SHIFT
;
1226 if (dst_inode
->i_size
< new_size
)
1227 f2fs_i_size_write(dst_inode
, new_size
);
1228 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1230 f2fs_put_dnode(&dn
);
1232 struct page
*psrc
, *pdst
;
1234 psrc
= f2fs_get_lock_data_page(src_inode
,
1237 return PTR_ERR(psrc
);
1238 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1241 f2fs_put_page(psrc
, 1);
1242 return PTR_ERR(pdst
);
1244 f2fs_copy_page(psrc
, pdst
);
1245 set_page_dirty(pdst
);
1246 f2fs_put_page(pdst
, 1);
1247 f2fs_put_page(psrc
, 1);
1249 ret
= f2fs_truncate_hole(src_inode
,
1250 src
+ i
, src
+ i
+ 1);
1259 static int __exchange_data_block(struct inode
*src_inode
,
1260 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1261 pgoff_t len
, bool full
)
1263 block_t
*src_blkaddr
;
1269 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK(src_inode
), len
);
1271 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1272 array_size(olen
, sizeof(block_t
)),
1277 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1278 array_size(olen
, sizeof(int)),
1281 kvfree(src_blkaddr
);
1285 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1286 do_replace
, src
, olen
);
1290 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1291 do_replace
, src
, dst
, olen
, full
);
1299 kvfree(src_blkaddr
);
1305 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1306 kvfree(src_blkaddr
);
1311 static int f2fs_do_collapse(struct inode
*inode
, loff_t offset
, loff_t len
)
1313 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1314 pgoff_t nrpages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1315 pgoff_t start
= offset
>> PAGE_SHIFT
;
1316 pgoff_t end
= (offset
+ len
) >> PAGE_SHIFT
;
1319 f2fs_balance_fs(sbi
, true);
1321 /* avoid gc operation during block exchange */
1322 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1323 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1326 f2fs_drop_extent_tree(inode
);
1327 truncate_pagecache(inode
, offset
);
1328 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1329 f2fs_unlock_op(sbi
);
1331 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1332 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1336 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1341 if (offset
+ len
>= i_size_read(inode
))
1344 /* collapse range should be aligned to block size of f2fs. */
1345 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1348 ret
= f2fs_convert_inline_inode(inode
);
1352 /* write out all dirty pages from offset */
1353 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1357 ret
= f2fs_do_collapse(inode
, offset
, len
);
1361 /* write out all moved pages, if possible */
1362 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1363 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1364 truncate_pagecache(inode
, offset
);
1366 new_size
= i_size_read(inode
) - len
;
1367 truncate_pagecache(inode
, new_size
);
1369 ret
= f2fs_truncate_blocks(inode
, new_size
, true);
1370 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1372 f2fs_i_size_write(inode
, new_size
);
1376 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1379 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1380 pgoff_t index
= start
;
1381 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1385 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1386 if (f2fs_data_blkaddr(dn
) == NULL_ADDR
)
1390 dn
->ofs_in_node
= ofs_in_node
;
1391 ret
= f2fs_reserve_new_blocks(dn
, count
);
1395 dn
->ofs_in_node
= ofs_in_node
;
1396 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1397 dn
->data_blkaddr
= f2fs_data_blkaddr(dn
);
1399 * f2fs_reserve_new_blocks will not guarantee entire block
1402 if (dn
->data_blkaddr
== NULL_ADDR
) {
1406 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1407 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1408 dn
->data_blkaddr
= NEW_ADDR
;
1409 f2fs_set_data_blkaddr(dn
);
1413 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1418 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1421 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1422 struct address_space
*mapping
= inode
->i_mapping
;
1423 pgoff_t index
, pg_start
, pg_end
;
1424 loff_t new_size
= i_size_read(inode
);
1425 loff_t off_start
, off_end
;
1428 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1432 ret
= f2fs_convert_inline_inode(inode
);
1436 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1440 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1441 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1443 off_start
= offset
& (PAGE_SIZE
- 1);
1444 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1446 if (pg_start
== pg_end
) {
1447 ret
= fill_zero(inode
, pg_start
, off_start
,
1448 off_end
- off_start
);
1452 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1455 ret
= fill_zero(inode
, pg_start
++, off_start
,
1456 PAGE_SIZE
- off_start
);
1460 new_size
= max_t(loff_t
, new_size
,
1461 (loff_t
)pg_start
<< PAGE_SHIFT
);
1464 for (index
= pg_start
; index
< pg_end
;) {
1465 struct dnode_of_data dn
;
1466 unsigned int end_offset
;
1469 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1470 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1472 truncate_pagecache_range(inode
,
1473 (loff_t
)index
<< PAGE_SHIFT
,
1474 ((loff_t
)pg_end
<< PAGE_SHIFT
) - 1);
1478 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1479 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1481 f2fs_unlock_op(sbi
);
1482 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1483 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1487 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1488 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1490 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1491 f2fs_put_dnode(&dn
);
1493 f2fs_unlock_op(sbi
);
1494 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1495 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1497 f2fs_balance_fs(sbi
, dn
.node_changed
);
1503 new_size
= max_t(loff_t
, new_size
,
1504 (loff_t
)index
<< PAGE_SHIFT
);
1508 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1512 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1517 if (new_size
> i_size_read(inode
)) {
1518 if (mode
& FALLOC_FL_KEEP_SIZE
)
1519 file_set_keep_isize(inode
);
1521 f2fs_i_size_write(inode
, new_size
);
1526 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1528 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1529 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1533 new_size
= i_size_read(inode
) + len
;
1534 ret
= inode_newsize_ok(inode
, new_size
);
1538 if (offset
>= i_size_read(inode
))
1541 /* insert range should be aligned to block size of f2fs. */
1542 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1545 ret
= f2fs_convert_inline_inode(inode
);
1549 f2fs_balance_fs(sbi
, true);
1551 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1552 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
1553 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1557 /* write out all dirty pages from offset */
1558 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1562 pg_start
= offset
>> PAGE_SHIFT
;
1563 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1564 delta
= pg_end
- pg_start
;
1565 idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1567 /* avoid gc operation during block exchange */
1568 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1569 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1570 truncate_pagecache(inode
, offset
);
1572 while (!ret
&& idx
> pg_start
) {
1573 nr
= idx
- pg_start
;
1579 f2fs_drop_extent_tree(inode
);
1581 ret
= __exchange_data_block(inode
, inode
, idx
,
1582 idx
+ delta
, nr
, false);
1583 f2fs_unlock_op(sbi
);
1585 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1586 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1588 /* write out all moved pages, if possible */
1589 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1590 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1591 truncate_pagecache(inode
, offset
);
1592 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1595 f2fs_i_size_write(inode
, new_size
);
1599 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1600 loff_t len
, int mode
)
1602 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1603 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1604 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
,
1605 .m_may_create
= true };
1607 loff_t new_size
= i_size_read(inode
);
1611 err
= inode_newsize_ok(inode
, (len
+ offset
));
1615 err
= f2fs_convert_inline_inode(inode
);
1619 f2fs_balance_fs(sbi
, true);
1621 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1622 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1624 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1625 map
.m_len
= pg_end
- map
.m_lblk
;
1632 if (f2fs_is_pinned_file(inode
)) {
1633 block_t len
= (map
.m_len
>> sbi
->log_blocks_per_seg
) <<
1634 sbi
->log_blocks_per_seg
;
1637 if (map
.m_len
% sbi
->blocks_per_seg
)
1638 len
+= sbi
->blocks_per_seg
;
1640 map
.m_len
= sbi
->blocks_per_seg
;
1642 if (has_not_enough_free_secs(sbi
, 0,
1643 GET_SEC_FROM_SEG(sbi
, overprovision_segments(sbi
)))) {
1644 down_write(&sbi
->gc_lock
);
1645 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1646 if (err
&& err
!= -ENODATA
&& err
!= -EAGAIN
)
1650 down_write(&sbi
->pin_sem
);
1651 map
.m_seg_type
= CURSEG_COLD_DATA_PINNED
;
1652 f2fs_allocate_new_segments(sbi
, CURSEG_COLD_DATA
);
1653 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
1654 up_write(&sbi
->pin_sem
);
1658 map
.m_lblk
+= map
.m_len
;
1664 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1673 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1675 /* update new size to the failed position */
1676 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1677 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1679 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1682 if (new_size
> i_size_read(inode
)) {
1683 if (mode
& FALLOC_FL_KEEP_SIZE
)
1684 file_set_keep_isize(inode
);
1686 f2fs_i_size_write(inode
, new_size
);
1692 static long f2fs_fallocate(struct file
*file
, int mode
,
1693 loff_t offset
, loff_t len
)
1695 struct inode
*inode
= file_inode(file
);
1698 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1700 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode
)))
1702 if (!f2fs_is_compress_backend_ready(inode
))
1705 /* f2fs only support ->fallocate for regular file */
1706 if (!S_ISREG(inode
->i_mode
))
1709 if (IS_ENCRYPTED(inode
) &&
1710 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1713 if (f2fs_compressed_file(inode
) &&
1714 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_COLLAPSE_RANGE
|
1715 FALLOC_FL_ZERO_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1718 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1719 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1720 FALLOC_FL_INSERT_RANGE
))
1725 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1726 if (offset
>= inode
->i_size
)
1729 ret
= punch_hole(inode
, offset
, len
);
1730 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1731 ret
= f2fs_collapse_range(inode
, offset
, len
);
1732 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1733 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1734 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1735 ret
= f2fs_insert_range(inode
, offset
, len
);
1737 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1741 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1742 f2fs_mark_inode_dirty_sync(inode
, false);
1743 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1747 inode_unlock(inode
);
1749 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1753 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1756 * f2fs_relase_file is called at every close calls. So we should
1757 * not drop any inmemory pages by close called by other process.
1759 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1760 atomic_read(&inode
->i_writecount
) != 1)
1763 /* some remained atomic pages should discarded */
1764 if (f2fs_is_atomic_file(inode
))
1765 f2fs_drop_inmem_pages(inode
);
1766 if (f2fs_is_volatile_file(inode
)) {
1767 set_inode_flag(inode
, FI_DROP_CACHE
);
1768 filemap_fdatawrite(inode
->i_mapping
);
1769 clear_inode_flag(inode
, FI_DROP_CACHE
);
1770 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1771 stat_dec_volatile_write(inode
);
1776 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1778 struct inode
*inode
= file_inode(file
);
1781 * If the process doing a transaction is crashed, we should do
1782 * roll-back. Otherwise, other reader/write can see corrupted database
1783 * until all the writers close its file. Since this should be done
1784 * before dropping file lock, it needs to do in ->flush.
1786 if (f2fs_is_atomic_file(inode
) &&
1787 F2FS_I(inode
)->inmem_task
== current
)
1788 f2fs_drop_inmem_pages(inode
);
1792 static int f2fs_setflags_common(struct inode
*inode
, u32 iflags
, u32 mask
)
1794 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1795 u32 masked_flags
= fi
->i_flags
& mask
;
1797 f2fs_bug_on(F2FS_I_SB(inode
), (iflags
& ~mask
));
1799 /* Is it quota file? Do not allow user to mess with it */
1800 if (IS_NOQUOTA(inode
))
1803 if ((iflags
^ masked_flags
) & F2FS_CASEFOLD_FL
) {
1804 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode
)))
1806 if (!f2fs_empty_dir(inode
))
1810 if (iflags
& (F2FS_COMPR_FL
| F2FS_NOCOMP_FL
)) {
1811 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
1813 if ((iflags
& F2FS_COMPR_FL
) && (iflags
& F2FS_NOCOMP_FL
))
1817 if ((iflags
^ masked_flags
) & F2FS_COMPR_FL
) {
1818 if (masked_flags
& F2FS_COMPR_FL
) {
1819 if (f2fs_disable_compressed_file(inode
))
1822 if (iflags
& F2FS_NOCOMP_FL
)
1824 if (iflags
& F2FS_COMPR_FL
) {
1825 if (!f2fs_may_compress(inode
))
1828 set_compress_context(inode
);
1831 if ((iflags
^ masked_flags
) & F2FS_NOCOMP_FL
) {
1832 if (masked_flags
& F2FS_COMPR_FL
)
1836 fi
->i_flags
= iflags
| (fi
->i_flags
& ~mask
);
1837 f2fs_bug_on(F2FS_I_SB(inode
), (fi
->i_flags
& F2FS_COMPR_FL
) &&
1838 (fi
->i_flags
& F2FS_NOCOMP_FL
));
1840 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1841 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1843 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1845 inode
->i_ctime
= current_time(inode
);
1846 f2fs_set_inode_flags(inode
);
1847 f2fs_mark_inode_dirty_sync(inode
, true);
1851 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1854 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1855 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1856 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1857 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1860 static const struct {
1863 } f2fs_fsflags_map
[] = {
1864 { F2FS_COMPR_FL
, FS_COMPR_FL
},
1865 { F2FS_SYNC_FL
, FS_SYNC_FL
},
1866 { F2FS_IMMUTABLE_FL
, FS_IMMUTABLE_FL
},
1867 { F2FS_APPEND_FL
, FS_APPEND_FL
},
1868 { F2FS_NODUMP_FL
, FS_NODUMP_FL
},
1869 { F2FS_NOATIME_FL
, FS_NOATIME_FL
},
1870 { F2FS_NOCOMP_FL
, FS_NOCOMP_FL
},
1871 { F2FS_INDEX_FL
, FS_INDEX_FL
},
1872 { F2FS_DIRSYNC_FL
, FS_DIRSYNC_FL
},
1873 { F2FS_PROJINHERIT_FL
, FS_PROJINHERIT_FL
},
1874 { F2FS_CASEFOLD_FL
, FS_CASEFOLD_FL
},
1877 #define F2FS_GETTABLE_FS_FL ( \
1887 FS_PROJINHERIT_FL | \
1889 FS_INLINE_DATA_FL | \
1894 #define F2FS_SETTABLE_FS_FL ( \
1903 FS_PROJINHERIT_FL | \
1906 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1907 static inline u32
f2fs_iflags_to_fsflags(u32 iflags
)
1912 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1913 if (iflags
& f2fs_fsflags_map
[i
].iflag
)
1914 fsflags
|= f2fs_fsflags_map
[i
].fsflag
;
1919 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1920 static inline u32
f2fs_fsflags_to_iflags(u32 fsflags
)
1925 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1926 if (fsflags
& f2fs_fsflags_map
[i
].fsflag
)
1927 iflags
|= f2fs_fsflags_map
[i
].iflag
;
1932 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1934 struct inode
*inode
= file_inode(filp
);
1935 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1936 u32 fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1938 if (IS_ENCRYPTED(inode
))
1939 fsflags
|= FS_ENCRYPT_FL
;
1940 if (IS_VERITY(inode
))
1941 fsflags
|= FS_VERITY_FL
;
1942 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1943 fsflags
|= FS_INLINE_DATA_FL
;
1944 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
1945 fsflags
|= FS_NOCOW_FL
;
1947 fsflags
&= F2FS_GETTABLE_FS_FL
;
1949 return put_user(fsflags
, (int __user
*)arg
);
1952 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1954 struct inode
*inode
= file_inode(filp
);
1955 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1956 u32 fsflags
, old_fsflags
;
1960 if (!inode_owner_or_capable(inode
))
1963 if (get_user(fsflags
, (int __user
*)arg
))
1966 if (fsflags
& ~F2FS_GETTABLE_FS_FL
)
1968 fsflags
&= F2FS_SETTABLE_FS_FL
;
1970 iflags
= f2fs_fsflags_to_iflags(fsflags
);
1971 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
1974 ret
= mnt_want_write_file(filp
);
1980 old_fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1981 ret
= vfs_ioc_setflags_prepare(inode
, old_fsflags
, fsflags
);
1985 ret
= f2fs_setflags_common(inode
, iflags
,
1986 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL
));
1988 inode_unlock(inode
);
1989 mnt_drop_write_file(filp
);
1993 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1995 struct inode
*inode
= file_inode(filp
);
1997 return put_user(inode
->i_generation
, (int __user
*)arg
);
2000 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
2002 struct inode
*inode
= file_inode(filp
);
2003 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2004 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2007 if (!inode_owner_or_capable(inode
))
2010 if (!S_ISREG(inode
->i_mode
))
2013 if (filp
->f_flags
& O_DIRECT
)
2016 ret
= mnt_want_write_file(filp
);
2022 f2fs_disable_compressed_file(inode
);
2024 if (f2fs_is_atomic_file(inode
)) {
2025 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
))
2030 ret
= f2fs_convert_inline_inode(inode
);
2034 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2037 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2038 * f2fs_is_atomic_file.
2040 if (get_dirty_pages(inode
))
2041 f2fs_warn(F2FS_I_SB(inode
), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2042 inode
->i_ino
, get_dirty_pages(inode
));
2043 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
2045 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2049 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2050 if (list_empty(&fi
->inmem_ilist
))
2051 list_add_tail(&fi
->inmem_ilist
, &sbi
->inode_list
[ATOMIC_FILE
]);
2052 sbi
->atomic_files
++;
2053 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2055 /* add inode in inmem_list first and set atomic_file */
2056 set_inode_flag(inode
, FI_ATOMIC_FILE
);
2057 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2058 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2060 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2061 F2FS_I(inode
)->inmem_task
= current
;
2062 stat_update_max_atomic_write(inode
);
2064 inode_unlock(inode
);
2065 mnt_drop_write_file(filp
);
2069 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
2071 struct inode
*inode
= file_inode(filp
);
2074 if (!inode_owner_or_capable(inode
))
2077 ret
= mnt_want_write_file(filp
);
2081 f2fs_balance_fs(F2FS_I_SB(inode
), true);
2085 if (f2fs_is_volatile_file(inode
)) {
2090 if (f2fs_is_atomic_file(inode
)) {
2091 ret
= f2fs_commit_inmem_pages(inode
);
2095 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2097 f2fs_drop_inmem_pages(inode
);
2099 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
2102 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2103 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2106 inode_unlock(inode
);
2107 mnt_drop_write_file(filp
);
2111 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
2113 struct inode
*inode
= file_inode(filp
);
2116 if (!inode_owner_or_capable(inode
))
2119 if (!S_ISREG(inode
->i_mode
))
2122 ret
= mnt_want_write_file(filp
);
2128 if (f2fs_is_volatile_file(inode
))
2131 ret
= f2fs_convert_inline_inode(inode
);
2135 stat_inc_volatile_write(inode
);
2136 stat_update_max_volatile_write(inode
);
2138 set_inode_flag(inode
, FI_VOLATILE_FILE
);
2139 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2141 inode_unlock(inode
);
2142 mnt_drop_write_file(filp
);
2146 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
2148 struct inode
*inode
= file_inode(filp
);
2151 if (!inode_owner_or_capable(inode
))
2154 ret
= mnt_want_write_file(filp
);
2160 if (!f2fs_is_volatile_file(inode
))
2163 if (!f2fs_is_first_block_written(inode
)) {
2164 ret
= truncate_partial_data_page(inode
, 0, true);
2168 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
2170 inode_unlock(inode
);
2171 mnt_drop_write_file(filp
);
2175 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
2177 struct inode
*inode
= file_inode(filp
);
2180 if (!inode_owner_or_capable(inode
))
2183 ret
= mnt_want_write_file(filp
);
2189 if (f2fs_is_atomic_file(inode
))
2190 f2fs_drop_inmem_pages(inode
);
2191 if (f2fs_is_volatile_file(inode
)) {
2192 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
2193 stat_dec_volatile_write(inode
);
2194 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2197 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2199 inode_unlock(inode
);
2201 mnt_drop_write_file(filp
);
2202 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2206 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
2208 struct inode
*inode
= file_inode(filp
);
2209 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2210 struct super_block
*sb
= sbi
->sb
;
2214 if (!capable(CAP_SYS_ADMIN
))
2217 if (get_user(in
, (__u32 __user
*)arg
))
2220 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
2221 ret
= mnt_want_write_file(filp
);
2227 case F2FS_GOING_DOWN_FULLSYNC
:
2228 sb
= freeze_bdev(sb
->s_bdev
);
2234 f2fs_stop_checkpoint(sbi
, false);
2235 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2236 thaw_bdev(sb
->s_bdev
, sb
);
2239 case F2FS_GOING_DOWN_METASYNC
:
2240 /* do checkpoint only */
2241 ret
= f2fs_sync_fs(sb
, 1);
2244 f2fs_stop_checkpoint(sbi
, false);
2245 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2247 case F2FS_GOING_DOWN_NOSYNC
:
2248 f2fs_stop_checkpoint(sbi
, false);
2249 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2251 case F2FS_GOING_DOWN_METAFLUSH
:
2252 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
2253 f2fs_stop_checkpoint(sbi
, false);
2254 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2256 case F2FS_GOING_DOWN_NEED_FSCK
:
2257 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2258 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
2259 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2260 /* do checkpoint only */
2261 ret
= f2fs_sync_fs(sb
, 1);
2268 f2fs_stop_gc_thread(sbi
);
2269 f2fs_stop_discard_thread(sbi
);
2271 f2fs_drop_discard_cmd(sbi
);
2272 clear_opt(sbi
, DISCARD
);
2274 f2fs_update_time(sbi
, REQ_TIME
);
2276 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
2277 mnt_drop_write_file(filp
);
2279 trace_f2fs_shutdown(sbi
, in
, ret
);
2284 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
2286 struct inode
*inode
= file_inode(filp
);
2287 struct super_block
*sb
= inode
->i_sb
;
2288 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
2289 struct fstrim_range range
;
2292 if (!capable(CAP_SYS_ADMIN
))
2295 if (!f2fs_hw_support_discard(F2FS_SB(sb
)))
2298 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
2302 ret
= mnt_want_write_file(filp
);
2306 range
.minlen
= max((unsigned int)range
.minlen
,
2307 q
->limits
.discard_granularity
);
2308 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
2309 mnt_drop_write_file(filp
);
2313 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
2316 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2320 static bool uuid_is_nonzero(__u8 u
[16])
2324 for (i
= 0; i
< 16; i
++)
2330 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
2332 struct inode
*inode
= file_inode(filp
);
2334 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode
)))
2337 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2339 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
2342 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
2344 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2346 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
2349 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
2351 struct inode
*inode
= file_inode(filp
);
2352 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2355 if (!f2fs_sb_has_encrypt(sbi
))
2358 err
= mnt_want_write_file(filp
);
2362 down_write(&sbi
->sb_lock
);
2364 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2367 /* update superblock with uuid */
2368 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2370 err
= f2fs_commit_super(sbi
, false);
2373 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2377 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2381 up_write(&sbi
->sb_lock
);
2382 mnt_drop_write_file(filp
);
2386 static int f2fs_ioc_get_encryption_policy_ex(struct file
*filp
,
2389 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2392 return fscrypt_ioctl_get_policy_ex(filp
, (void __user
*)arg
);
2395 static int f2fs_ioc_add_encryption_key(struct file
*filp
, unsigned long arg
)
2397 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2400 return fscrypt_ioctl_add_key(filp
, (void __user
*)arg
);
2403 static int f2fs_ioc_remove_encryption_key(struct file
*filp
, unsigned long arg
)
2405 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2408 return fscrypt_ioctl_remove_key(filp
, (void __user
*)arg
);
2411 static int f2fs_ioc_remove_encryption_key_all_users(struct file
*filp
,
2414 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2417 return fscrypt_ioctl_remove_key_all_users(filp
, (void __user
*)arg
);
2420 static int f2fs_ioc_get_encryption_key_status(struct file
*filp
,
2423 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2426 return fscrypt_ioctl_get_key_status(filp
, (void __user
*)arg
);
2429 static int f2fs_ioc_get_encryption_nonce(struct file
*filp
, unsigned long arg
)
2431 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2434 return fscrypt_ioctl_get_nonce(filp
, (void __user
*)arg
);
2437 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2439 struct inode
*inode
= file_inode(filp
);
2440 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2444 if (!capable(CAP_SYS_ADMIN
))
2447 if (get_user(sync
, (__u32 __user
*)arg
))
2450 if (f2fs_readonly(sbi
->sb
))
2453 ret
= mnt_want_write_file(filp
);
2458 if (!down_write_trylock(&sbi
->gc_lock
)) {
2463 down_write(&sbi
->gc_lock
);
2466 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2468 mnt_drop_write_file(filp
);
2472 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2474 struct inode
*inode
= file_inode(filp
);
2475 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2476 struct f2fs_gc_range range
;
2480 if (!capable(CAP_SYS_ADMIN
))
2483 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2487 if (f2fs_readonly(sbi
->sb
))
2490 end
= range
.start
+ range
.len
;
2491 if (end
< range
.start
|| range
.start
< MAIN_BLKADDR(sbi
) ||
2492 end
>= MAX_BLKADDR(sbi
))
2495 ret
= mnt_want_write_file(filp
);
2501 if (!down_write_trylock(&sbi
->gc_lock
)) {
2506 down_write(&sbi
->gc_lock
);
2509 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2510 range
.start
+= BLKS_PER_SEC(sbi
);
2511 if (range
.start
<= end
)
2514 mnt_drop_write_file(filp
);
2518 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2520 struct inode
*inode
= file_inode(filp
);
2521 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2524 if (!capable(CAP_SYS_ADMIN
))
2527 if (f2fs_readonly(sbi
->sb
))
2530 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2531 f2fs_info(sbi
, "Skipping Checkpoint. Checkpoints currently disabled.");
2535 ret
= mnt_want_write_file(filp
);
2539 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2541 mnt_drop_write_file(filp
);
2545 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2547 struct f2fs_defragment
*range
)
2549 struct inode
*inode
= file_inode(filp
);
2550 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2551 .m_seg_type
= NO_CHECK_TYPE
,
2552 .m_may_create
= false };
2553 struct extent_info ei
= {0, 0, 0};
2554 pgoff_t pg_start
, pg_end
, next_pgofs
;
2555 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2556 unsigned int total
= 0, sec_num
;
2557 block_t blk_end
= 0;
2558 bool fragmented
= false;
2561 /* if in-place-update policy is enabled, don't waste time here */
2562 if (f2fs_should_update_inplace(inode
, NULL
))
2565 pg_start
= range
->start
>> PAGE_SHIFT
;
2566 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2568 f2fs_balance_fs(sbi
, true);
2572 /* writeback all dirty pages in the range */
2573 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2574 range
->start
+ range
->len
- 1);
2579 * lookup mapping info in extent cache, skip defragmenting if physical
2580 * block addresses are continuous.
2582 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2583 if (ei
.fofs
+ ei
.len
>= pg_end
)
2587 map
.m_lblk
= pg_start
;
2588 map
.m_next_pgofs
= &next_pgofs
;
2591 * lookup mapping info in dnode page cache, skip defragmenting if all
2592 * physical block addresses are continuous even if there are hole(s)
2593 * in logical blocks.
2595 while (map
.m_lblk
< pg_end
) {
2596 map
.m_len
= pg_end
- map
.m_lblk
;
2597 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2601 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2602 map
.m_lblk
= next_pgofs
;
2606 if (blk_end
&& blk_end
!= map
.m_pblk
)
2609 /* record total count of block that we're going to move */
2612 blk_end
= map
.m_pblk
+ map
.m_len
;
2614 map
.m_lblk
+= map
.m_len
;
2622 sec_num
= DIV_ROUND_UP(total
, BLKS_PER_SEC(sbi
));
2625 * make sure there are enough free section for LFS allocation, this can
2626 * avoid defragment running in SSR mode when free section are allocated
2629 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2634 map
.m_lblk
= pg_start
;
2635 map
.m_len
= pg_end
- pg_start
;
2638 while (map
.m_lblk
< pg_end
) {
2643 map
.m_len
= pg_end
- map
.m_lblk
;
2644 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2648 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2649 map
.m_lblk
= next_pgofs
;
2653 set_inode_flag(inode
, FI_DO_DEFRAG
);
2656 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2659 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2661 err
= PTR_ERR(page
);
2665 set_page_dirty(page
);
2666 f2fs_put_page(page
, 1);
2675 if (map
.m_lblk
< pg_end
&& cnt
< blk_per_seg
)
2678 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2680 err
= filemap_fdatawrite(inode
->i_mapping
);
2685 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2687 inode_unlock(inode
);
2689 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2693 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2695 struct inode
*inode
= file_inode(filp
);
2696 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2697 struct f2fs_defragment range
;
2700 if (!capable(CAP_SYS_ADMIN
))
2703 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2706 if (f2fs_readonly(sbi
->sb
))
2709 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2713 /* verify alignment of offset & size */
2714 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2717 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2718 sbi
->max_file_blocks
))
2721 err
= mnt_want_write_file(filp
);
2725 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2726 mnt_drop_write_file(filp
);
2728 f2fs_update_time(sbi
, REQ_TIME
);
2732 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2739 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2740 struct file
*file_out
, loff_t pos_out
, size_t len
)
2742 struct inode
*src
= file_inode(file_in
);
2743 struct inode
*dst
= file_inode(file_out
);
2744 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2745 size_t olen
= len
, dst_max_i_size
= 0;
2749 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2750 src
->i_sb
!= dst
->i_sb
)
2753 if (unlikely(f2fs_readonly(src
->i_sb
)))
2756 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2759 if (IS_ENCRYPTED(src
) || IS_ENCRYPTED(dst
))
2763 if (pos_in
== pos_out
)
2765 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2772 if (!inode_trylock(dst
))
2777 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2780 olen
= len
= src
->i_size
- pos_in
;
2781 if (pos_in
+ len
== src
->i_size
)
2782 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2788 dst_osize
= dst
->i_size
;
2789 if (pos_out
+ olen
> dst
->i_size
)
2790 dst_max_i_size
= pos_out
+ olen
;
2792 /* verify the end result is block aligned */
2793 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2794 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2795 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2798 ret
= f2fs_convert_inline_inode(src
);
2802 ret
= f2fs_convert_inline_inode(dst
);
2806 /* write out all dirty pages from offset */
2807 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2808 pos_in
, pos_in
+ len
);
2812 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2813 pos_out
, pos_out
+ len
);
2817 f2fs_balance_fs(sbi
, true);
2819 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2822 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]))
2827 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2828 pos_out
>> F2FS_BLKSIZE_BITS
,
2829 len
>> F2FS_BLKSIZE_BITS
, false);
2833 f2fs_i_size_write(dst
, dst_max_i_size
);
2834 else if (dst_osize
!= dst
->i_size
)
2835 f2fs_i_size_write(dst
, dst_osize
);
2837 f2fs_unlock_op(sbi
);
2840 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2842 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2851 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2853 struct f2fs_move_range range
;
2857 if (!(filp
->f_mode
& FMODE_READ
) ||
2858 !(filp
->f_mode
& FMODE_WRITE
))
2861 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2865 dst
= fdget(range
.dst_fd
);
2869 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2874 err
= mnt_want_write_file(filp
);
2878 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2879 range
.pos_out
, range
.len
);
2881 mnt_drop_write_file(filp
);
2885 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2886 &range
, sizeof(range
)))
2893 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2895 struct inode
*inode
= file_inode(filp
);
2896 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2897 struct sit_info
*sm
= SIT_I(sbi
);
2898 unsigned int start_segno
= 0, end_segno
= 0;
2899 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2900 struct f2fs_flush_device range
;
2903 if (!capable(CAP_SYS_ADMIN
))
2906 if (f2fs_readonly(sbi
->sb
))
2909 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2912 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2916 if (!f2fs_is_multi_device(sbi
) || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2917 __is_large_section(sbi
)) {
2918 f2fs_warn(sbi
, "Can't flush %u in %d for segs_per_sec %u != 1",
2919 range
.dev_num
, sbi
->s_ndevs
, sbi
->segs_per_sec
);
2923 ret
= mnt_want_write_file(filp
);
2927 if (range
.dev_num
!= 0)
2928 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2929 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2931 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2932 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2933 start_segno
= dev_start_segno
;
2934 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2936 while (start_segno
< end_segno
) {
2937 if (!down_write_trylock(&sbi
->gc_lock
)) {
2941 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2942 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2943 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2944 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2952 mnt_drop_write_file(filp
);
2956 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2958 struct inode
*inode
= file_inode(filp
);
2959 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2961 /* Must validate to set it with SQLite behavior in Android. */
2962 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2964 return put_user(sb_feature
, (u32 __user
*)arg
);
2968 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2970 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2971 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2972 struct super_block
*sb
= sbi
->sb
;
2975 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2976 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2977 err
= __dquot_transfer(inode
, transfer_to
);
2979 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2980 dqput(transfer_to
[PRJQUOTA
]);
2985 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2987 struct inode
*inode
= file_inode(filp
);
2988 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2989 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2994 if (!f2fs_sb_has_project_quota(sbi
)) {
2995 if (projid
!= F2FS_DEF_PROJID
)
3001 if (!f2fs_has_extra_attr(inode
))
3004 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
3006 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
3010 /* Is it quota file? Do not allow user to mess with it */
3011 if (IS_NOQUOTA(inode
))
3014 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
3016 return PTR_ERR(ipage
);
3018 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
3021 f2fs_put_page(ipage
, 1);
3024 f2fs_put_page(ipage
, 1);
3026 err
= dquot_initialize(inode
);
3031 err
= f2fs_transfer_project_quota(inode
, kprojid
);
3035 F2FS_I(inode
)->i_projid
= kprojid
;
3036 inode
->i_ctime
= current_time(inode
);
3037 f2fs_mark_inode_dirty_sync(inode
, true);
3039 f2fs_unlock_op(sbi
);
3043 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
3048 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
3050 if (projid
!= F2FS_DEF_PROJID
)
3056 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3059 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3060 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3061 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3064 static const struct {
3067 } f2fs_xflags_map
[] = {
3068 { F2FS_SYNC_FL
, FS_XFLAG_SYNC
},
3069 { F2FS_IMMUTABLE_FL
, FS_XFLAG_IMMUTABLE
},
3070 { F2FS_APPEND_FL
, FS_XFLAG_APPEND
},
3071 { F2FS_NODUMP_FL
, FS_XFLAG_NODUMP
},
3072 { F2FS_NOATIME_FL
, FS_XFLAG_NOATIME
},
3073 { F2FS_PROJINHERIT_FL
, FS_XFLAG_PROJINHERIT
},
3076 #define F2FS_SUPPORTED_XFLAGS ( \
3078 FS_XFLAG_IMMUTABLE | \
3081 FS_XFLAG_NOATIME | \
3082 FS_XFLAG_PROJINHERIT)
3084 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3085 static inline u32
f2fs_iflags_to_xflags(u32 iflags
)
3090 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3091 if (iflags
& f2fs_xflags_map
[i
].iflag
)
3092 xflags
|= f2fs_xflags_map
[i
].xflag
;
3097 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3098 static inline u32
f2fs_xflags_to_iflags(u32 xflags
)
3103 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3104 if (xflags
& f2fs_xflags_map
[i
].xflag
)
3105 iflags
|= f2fs_xflags_map
[i
].iflag
;
3110 static void f2fs_fill_fsxattr(struct inode
*inode
, struct fsxattr
*fa
)
3112 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3114 simple_fill_fsxattr(fa
, f2fs_iflags_to_xflags(fi
->i_flags
));
3116 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)))
3117 fa
->fsx_projid
= from_kprojid(&init_user_ns
, fi
->i_projid
);
3120 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
3122 struct inode
*inode
= file_inode(filp
);
3125 f2fs_fill_fsxattr(inode
, &fa
);
3127 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
3132 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
3134 struct inode
*inode
= file_inode(filp
);
3135 struct fsxattr fa
, old_fa
;
3139 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
3142 /* Make sure caller has proper permission */
3143 if (!inode_owner_or_capable(inode
))
3146 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_XFLAGS
)
3149 iflags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
3150 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
3153 err
= mnt_want_write_file(filp
);
3159 f2fs_fill_fsxattr(inode
, &old_fa
);
3160 err
= vfs_ioc_fssetxattr_check(inode
, &old_fa
, &fa
);
3164 err
= f2fs_setflags_common(inode
, iflags
,
3165 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS
));
3169 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
3171 inode_unlock(inode
);
3172 mnt_drop_write_file(filp
);
3176 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
3178 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3179 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3181 /* Use i_gc_failures for normal file as a risk signal. */
3183 f2fs_i_gc_failures_write(inode
,
3184 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
3186 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
3187 f2fs_warn(sbi
, "%s: Enable GC = ino %lx after %x GC trials",
3188 __func__
, inode
->i_ino
,
3189 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
3190 clear_inode_flag(inode
, FI_PIN_FILE
);
3196 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
3198 struct inode
*inode
= file_inode(filp
);
3202 if (get_user(pin
, (__u32 __user
*)arg
))
3205 if (!S_ISREG(inode
->i_mode
))
3208 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
3211 ret
= mnt_want_write_file(filp
);
3217 if (f2fs_should_update_outplace(inode
, NULL
)) {
3223 clear_inode_flag(inode
, FI_PIN_FILE
);
3224 f2fs_i_gc_failures_write(inode
, 0);
3228 if (f2fs_pin_file_control(inode
, false)) {
3233 ret
= f2fs_convert_inline_inode(inode
);
3237 if (f2fs_disable_compressed_file(inode
)) {
3242 set_inode_flag(inode
, FI_PIN_FILE
);
3243 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3245 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3247 inode_unlock(inode
);
3248 mnt_drop_write_file(filp
);
3252 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
3254 struct inode
*inode
= file_inode(filp
);
3257 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
3258 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3259 return put_user(pin
, (u32 __user
*)arg
);
3262 int f2fs_precache_extents(struct inode
*inode
)
3264 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3265 struct f2fs_map_blocks map
;
3266 pgoff_t m_next_extent
;
3270 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
3274 map
.m_next_pgofs
= NULL
;
3275 map
.m_next_extent
= &m_next_extent
;
3276 map
.m_seg_type
= NO_CHECK_TYPE
;
3277 map
.m_may_create
= false;
3278 end
= F2FS_I_SB(inode
)->max_file_blocks
;
3280 while (map
.m_lblk
< end
) {
3281 map
.m_len
= end
- map
.m_lblk
;
3283 down_write(&fi
->i_gc_rwsem
[WRITE
]);
3284 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
3285 up_write(&fi
->i_gc_rwsem
[WRITE
]);
3289 map
.m_lblk
= m_next_extent
;
3295 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
3297 return f2fs_precache_extents(file_inode(filp
));
3300 static int f2fs_ioc_resize_fs(struct file
*filp
, unsigned long arg
)
3302 struct f2fs_sb_info
*sbi
= F2FS_I_SB(file_inode(filp
));
3306 if (!capable(CAP_SYS_ADMIN
))
3309 if (f2fs_readonly(sbi
->sb
))
3312 if (copy_from_user(&block_count
, (void __user
*)arg
,
3313 sizeof(block_count
)))
3316 ret
= f2fs_resize_fs(sbi
, block_count
);
3321 static int f2fs_ioc_enable_verity(struct file
*filp
, unsigned long arg
)
3323 struct inode
*inode
= file_inode(filp
);
3325 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3327 if (!f2fs_sb_has_verity(F2FS_I_SB(inode
))) {
3328 f2fs_warn(F2FS_I_SB(inode
),
3329 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3334 return fsverity_ioctl_enable(filp
, (const void __user
*)arg
);
3337 static int f2fs_ioc_measure_verity(struct file
*filp
, unsigned long arg
)
3339 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp
))))
3342 return fsverity_ioctl_measure(filp
, (void __user
*)arg
);
3345 static int f2fs_get_volume_name(struct file
*filp
, unsigned long arg
)
3347 struct inode
*inode
= file_inode(filp
);
3348 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3353 vbuf
= f2fs_kzalloc(sbi
, MAX_VOLUME_NAME
, GFP_KERNEL
);
3357 down_read(&sbi
->sb_lock
);
3358 count
= utf16s_to_utf8s(sbi
->raw_super
->volume_name
,
3359 ARRAY_SIZE(sbi
->raw_super
->volume_name
),
3360 UTF16_LITTLE_ENDIAN
, vbuf
, MAX_VOLUME_NAME
);
3361 up_read(&sbi
->sb_lock
);
3363 if (copy_to_user((char __user
*)arg
, vbuf
,
3364 min(FSLABEL_MAX
, count
)))
3371 static int f2fs_set_volume_name(struct file
*filp
, unsigned long arg
)
3373 struct inode
*inode
= file_inode(filp
);
3374 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3378 if (!capable(CAP_SYS_ADMIN
))
3381 vbuf
= strndup_user((const char __user
*)arg
, FSLABEL_MAX
);
3383 return PTR_ERR(vbuf
);
3385 err
= mnt_want_write_file(filp
);
3389 down_write(&sbi
->sb_lock
);
3391 memset(sbi
->raw_super
->volume_name
, 0,
3392 sizeof(sbi
->raw_super
->volume_name
));
3393 utf8s_to_utf16s(vbuf
, strlen(vbuf
), UTF16_LITTLE_ENDIAN
,
3394 sbi
->raw_super
->volume_name
,
3395 ARRAY_SIZE(sbi
->raw_super
->volume_name
));
3397 err
= f2fs_commit_super(sbi
, false);
3399 up_write(&sbi
->sb_lock
);
3401 mnt_drop_write_file(filp
);
3407 static int f2fs_get_compress_blocks(struct file
*filp
, unsigned long arg
)
3409 struct inode
*inode
= file_inode(filp
);
3412 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
3415 if (!f2fs_compressed_file(inode
))
3418 blocks
= F2FS_I(inode
)->i_compr_blocks
;
3419 return put_user(blocks
, (u64 __user
*)arg
);
3422 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3424 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
3426 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp
))))
3430 case F2FS_IOC_GETFLAGS
:
3431 return f2fs_ioc_getflags(filp
, arg
);
3432 case F2FS_IOC_SETFLAGS
:
3433 return f2fs_ioc_setflags(filp
, arg
);
3434 case F2FS_IOC_GETVERSION
:
3435 return f2fs_ioc_getversion(filp
, arg
);
3436 case F2FS_IOC_START_ATOMIC_WRITE
:
3437 return f2fs_ioc_start_atomic_write(filp
);
3438 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3439 return f2fs_ioc_commit_atomic_write(filp
);
3440 case F2FS_IOC_START_VOLATILE_WRITE
:
3441 return f2fs_ioc_start_volatile_write(filp
);
3442 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3443 return f2fs_ioc_release_volatile_write(filp
);
3444 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3445 return f2fs_ioc_abort_volatile_write(filp
);
3446 case F2FS_IOC_SHUTDOWN
:
3447 return f2fs_ioc_shutdown(filp
, arg
);
3449 return f2fs_ioc_fitrim(filp
, arg
);
3450 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3451 return f2fs_ioc_set_encryption_policy(filp
, arg
);
3452 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3453 return f2fs_ioc_get_encryption_policy(filp
, arg
);
3454 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3455 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
3456 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3457 return f2fs_ioc_get_encryption_policy_ex(filp
, arg
);
3458 case FS_IOC_ADD_ENCRYPTION_KEY
:
3459 return f2fs_ioc_add_encryption_key(filp
, arg
);
3460 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3461 return f2fs_ioc_remove_encryption_key(filp
, arg
);
3462 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3463 return f2fs_ioc_remove_encryption_key_all_users(filp
, arg
);
3464 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3465 return f2fs_ioc_get_encryption_key_status(filp
, arg
);
3466 case FS_IOC_GET_ENCRYPTION_NONCE
:
3467 return f2fs_ioc_get_encryption_nonce(filp
, arg
);
3468 case F2FS_IOC_GARBAGE_COLLECT
:
3469 return f2fs_ioc_gc(filp
, arg
);
3470 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3471 return f2fs_ioc_gc_range(filp
, arg
);
3472 case F2FS_IOC_WRITE_CHECKPOINT
:
3473 return f2fs_ioc_write_checkpoint(filp
, arg
);
3474 case F2FS_IOC_DEFRAGMENT
:
3475 return f2fs_ioc_defragment(filp
, arg
);
3476 case F2FS_IOC_MOVE_RANGE
:
3477 return f2fs_ioc_move_range(filp
, arg
);
3478 case F2FS_IOC_FLUSH_DEVICE
:
3479 return f2fs_ioc_flush_device(filp
, arg
);
3480 case F2FS_IOC_GET_FEATURES
:
3481 return f2fs_ioc_get_features(filp
, arg
);
3482 case F2FS_IOC_FSGETXATTR
:
3483 return f2fs_ioc_fsgetxattr(filp
, arg
);
3484 case F2FS_IOC_FSSETXATTR
:
3485 return f2fs_ioc_fssetxattr(filp
, arg
);
3486 case F2FS_IOC_GET_PIN_FILE
:
3487 return f2fs_ioc_get_pin_file(filp
, arg
);
3488 case F2FS_IOC_SET_PIN_FILE
:
3489 return f2fs_ioc_set_pin_file(filp
, arg
);
3490 case F2FS_IOC_PRECACHE_EXTENTS
:
3491 return f2fs_ioc_precache_extents(filp
, arg
);
3492 case F2FS_IOC_RESIZE_FS
:
3493 return f2fs_ioc_resize_fs(filp
, arg
);
3494 case FS_IOC_ENABLE_VERITY
:
3495 return f2fs_ioc_enable_verity(filp
, arg
);
3496 case FS_IOC_MEASURE_VERITY
:
3497 return f2fs_ioc_measure_verity(filp
, arg
);
3498 case F2FS_IOC_GET_VOLUME_NAME
:
3499 return f2fs_get_volume_name(filp
, arg
);
3500 case F2FS_IOC_SET_VOLUME_NAME
:
3501 return f2fs_set_volume_name(filp
, arg
);
3502 case F2FS_IOC_GET_COMPRESS_BLOCKS
:
3503 return f2fs_get_compress_blocks(filp
, arg
);
3509 static ssize_t
f2fs_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
3511 struct file
*file
= iocb
->ki_filp
;
3512 struct inode
*inode
= file_inode(file
);
3514 if (!f2fs_is_compress_backend_ready(inode
))
3517 return generic_file_read_iter(iocb
, iter
);
3520 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
3522 struct file
*file
= iocb
->ki_filp
;
3523 struct inode
*inode
= file_inode(file
);
3526 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
3531 if (!f2fs_is_compress_backend_ready(inode
)) {
3536 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
3537 if (!inode_trylock(inode
)) {
3545 ret
= generic_write_checks(iocb
, from
);
3547 bool preallocated
= false;
3548 size_t target_size
= 0;
3551 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
3552 set_inode_flag(inode
, FI_NO_PREALLOC
);
3554 if ((iocb
->ki_flags
& IOCB_NOWAIT
)) {
3555 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
3556 iov_iter_count(from
)) ||
3557 f2fs_has_inline_data(inode
) ||
3558 f2fs_force_buffered_io(inode
, iocb
, from
)) {
3559 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3560 inode_unlock(inode
);
3567 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
3570 if (iocb
->ki_flags
& IOCB_DIRECT
) {
3572 * Convert inline data for Direct I/O before entering
3575 err
= f2fs_convert_inline_inode(inode
);
3579 * If force_buffere_io() is true, we have to allocate
3580 * blocks all the time, since f2fs_direct_IO will fall
3581 * back to buffered IO.
3583 if (!f2fs_force_buffered_io(inode
, iocb
, from
) &&
3584 allow_outplace_dio(inode
, iocb
, from
))
3587 preallocated
= true;
3588 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
3590 err
= f2fs_preallocate_blocks(iocb
, from
);
3593 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3594 inode_unlock(inode
);
3599 ret
= __generic_file_write_iter(iocb
, from
);
3600 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3602 /* if we couldn't write data, we should deallocate blocks. */
3603 if (preallocated
&& i_size_read(inode
) < target_size
)
3604 f2fs_truncate(inode
);
3607 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
3609 inode_unlock(inode
);
3611 trace_f2fs_file_write_iter(inode
, iocb
->ki_pos
,
3612 iov_iter_count(from
), ret
);
3614 ret
= generic_write_sync(iocb
, ret
);
3618 #ifdef CONFIG_COMPAT
3619 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
3622 case F2FS_IOC32_GETFLAGS
:
3623 cmd
= F2FS_IOC_GETFLAGS
;
3625 case F2FS_IOC32_SETFLAGS
:
3626 cmd
= F2FS_IOC_SETFLAGS
;
3628 case F2FS_IOC32_GETVERSION
:
3629 cmd
= F2FS_IOC_GETVERSION
;
3631 case F2FS_IOC_START_ATOMIC_WRITE
:
3632 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3633 case F2FS_IOC_START_VOLATILE_WRITE
:
3634 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3635 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3636 case F2FS_IOC_SHUTDOWN
:
3638 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3639 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3640 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3641 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3642 case FS_IOC_ADD_ENCRYPTION_KEY
:
3643 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3644 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3645 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3646 case FS_IOC_GET_ENCRYPTION_NONCE
:
3647 case F2FS_IOC_GARBAGE_COLLECT
:
3648 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3649 case F2FS_IOC_WRITE_CHECKPOINT
:
3650 case F2FS_IOC_DEFRAGMENT
:
3651 case F2FS_IOC_MOVE_RANGE
:
3652 case F2FS_IOC_FLUSH_DEVICE
:
3653 case F2FS_IOC_GET_FEATURES
:
3654 case F2FS_IOC_FSGETXATTR
:
3655 case F2FS_IOC_FSSETXATTR
:
3656 case F2FS_IOC_GET_PIN_FILE
:
3657 case F2FS_IOC_SET_PIN_FILE
:
3658 case F2FS_IOC_PRECACHE_EXTENTS
:
3659 case F2FS_IOC_RESIZE_FS
:
3660 case FS_IOC_ENABLE_VERITY
:
3661 case FS_IOC_MEASURE_VERITY
:
3662 case F2FS_IOC_GET_VOLUME_NAME
:
3663 case F2FS_IOC_SET_VOLUME_NAME
:
3664 case F2FS_IOC_GET_COMPRESS_BLOCKS
:
3667 return -ENOIOCTLCMD
;
3669 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3673 const struct file_operations f2fs_file_operations
= {
3674 .llseek
= f2fs_llseek
,
3675 .read_iter
= f2fs_file_read_iter
,
3676 .write_iter
= f2fs_file_write_iter
,
3677 .open
= f2fs_file_open
,
3678 .release
= f2fs_release_file
,
3679 .mmap
= f2fs_file_mmap
,
3680 .flush
= f2fs_file_flush
,
3681 .fsync
= f2fs_sync_file
,
3682 .fallocate
= f2fs_fallocate
,
3683 .unlocked_ioctl
= f2fs_ioctl
,
3684 #ifdef CONFIG_COMPAT
3685 .compat_ioctl
= f2fs_compat_ioctl
,
3687 .splice_read
= generic_file_splice_read
,
3688 .splice_write
= iter_file_splice_write
,