1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
32 #include <trace/events/f2fs.h>
34 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
36 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
39 down_read(&F2FS_I(inode
)->i_mmap_sem
);
40 ret
= filemap_fault(vmf
);
41 up_read(&F2FS_I(inode
)->i_mmap_sem
);
43 trace_f2fs_filemap_fault(inode
, vmf
->pgoff
, (unsigned long)ret
);
48 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
50 struct page
*page
= vmf
->page
;
51 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
52 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
53 struct dnode_of_data dn
;
54 bool need_alloc
= true;
57 if (unlikely(f2fs_cp_error(sbi
))) {
62 if (!f2fs_is_checkpoint_ready(sbi
)) {
67 #ifdef CONFIG_F2FS_FS_COMPRESSION
68 if (f2fs_compressed_file(inode
)) {
69 int ret
= f2fs_is_compressed_cluster(inode
, page
->index
);
75 if (ret
< F2FS_I(inode
)->i_cluster_size
) {
83 /* should do out of any locked page */
85 f2fs_balance_fs(sbi
, true);
87 sb_start_pagefault(inode
->i_sb
);
89 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
91 file_update_time(vmf
->vma
->vm_file
);
92 down_read(&F2FS_I(inode
)->i_mmap_sem
);
94 if (unlikely(page
->mapping
!= inode
->i_mapping
||
95 page_offset(page
) > i_size_read(inode
) ||
96 !PageUptodate(page
))) {
103 /* block allocation */
104 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
105 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
106 err
= f2fs_get_block(&dn
, page
->index
);
108 __do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
111 #ifdef CONFIG_F2FS_FS_COMPRESSION
113 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
114 err
= f2fs_get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
123 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
125 /* wait for GCed page writeback via META_MAPPING */
126 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
129 * check to see if the page is mapped already (no holes)
131 if (PageMappedToDisk(page
))
134 /* page is wholly or partially inside EOF */
135 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
136 i_size_read(inode
)) {
139 offset
= i_size_read(inode
) & ~PAGE_MASK
;
140 zero_user_segment(page
, offset
, PAGE_SIZE
);
142 set_page_dirty(page
);
143 if (!PageUptodate(page
))
144 SetPageUptodate(page
);
146 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
147 f2fs_update_time(sbi
, REQ_TIME
);
149 trace_f2fs_vm_page_mkwrite(page
, DATA
);
151 up_read(&F2FS_I(inode
)->i_mmap_sem
);
153 sb_end_pagefault(inode
->i_sb
);
155 return block_page_mkwrite_return(err
);
158 static const struct vm_operations_struct f2fs_file_vm_ops
= {
159 .fault
= f2fs_filemap_fault
,
160 .map_pages
= filemap_map_pages
,
161 .page_mkwrite
= f2fs_vm_page_mkwrite
,
164 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
166 struct dentry
*dentry
;
168 inode
= igrab(inode
);
169 dentry
= d_find_any_alias(inode
);
174 *pino
= parent_ino(dentry
);
179 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
181 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
182 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
184 if (!S_ISREG(inode
->i_mode
))
185 cp_reason
= CP_NON_REGULAR
;
186 else if (f2fs_compressed_file(inode
))
187 cp_reason
= CP_COMPRESSED
;
188 else if (inode
->i_nlink
!= 1)
189 cp_reason
= CP_HARDLINK
;
190 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
191 cp_reason
= CP_SB_NEED_CP
;
192 else if (file_wrong_pino(inode
))
193 cp_reason
= CP_WRONG_PINO
;
194 else if (!f2fs_space_for_roll_forward(sbi
))
195 cp_reason
= CP_NO_SPC_ROLL
;
196 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
197 cp_reason
= CP_NODE_NEED_CP
;
198 else if (test_opt(sbi
, FASTBOOT
))
199 cp_reason
= CP_FASTBOOT_MODE
;
200 else if (F2FS_OPTION(sbi
).active_logs
== 2)
201 cp_reason
= CP_SPEC_LOG_NUM
;
202 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
203 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
204 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
206 cp_reason
= CP_RECOVER_DIR
;
211 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
213 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
215 /* But we need to avoid that there are some inode updates */
216 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
222 static void try_to_fix_pino(struct inode
*inode
)
224 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
227 down_write(&fi
->i_sem
);
228 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
229 get_parent_ino(inode
, &pino
)) {
230 f2fs_i_pino_write(inode
, pino
);
231 file_got_pino(inode
);
233 up_write(&fi
->i_sem
);
236 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
237 int datasync
, bool atomic
)
239 struct inode
*inode
= file
->f_mapping
->host
;
240 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
241 nid_t ino
= inode
->i_ino
;
243 enum cp_reason_type cp_reason
= 0;
244 struct writeback_control wbc
= {
245 .sync_mode
= WB_SYNC_ALL
,
246 .nr_to_write
= LONG_MAX
,
249 unsigned int seq_id
= 0;
251 if (unlikely(f2fs_readonly(inode
->i_sb
) ||
252 is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
255 trace_f2fs_sync_file_enter(inode
);
257 if (S_ISDIR(inode
->i_mode
))
260 /* if fdatasync is triggered, let's do in-place-update */
261 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
262 set_inode_flag(inode
, FI_NEED_IPU
);
263 ret
= file_write_and_wait_range(file
, start
, end
);
264 clear_inode_flag(inode
, FI_NEED_IPU
);
267 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
271 /* if the inode is dirty, let's recover all the time */
272 if (!f2fs_skip_inode_update(inode
, datasync
)) {
273 f2fs_write_inode(inode
, NULL
);
278 * if there is no written data, don't waste time to write recovery info.
280 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
281 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
283 /* it may call write_inode just prior to fsync */
284 if (need_inode_page_update(sbi
, ino
))
287 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
288 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
294 * Both of fdatasync() and fsync() are able to be recovered from
297 down_read(&F2FS_I(inode
)->i_sem
);
298 cp_reason
= need_do_checkpoint(inode
);
299 up_read(&F2FS_I(inode
)->i_sem
);
302 /* all the dirty node pages should be flushed for POR */
303 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
306 * We've secured consistency through sync_fs. Following pino
307 * will be used only for fsynced inodes after checkpoint.
309 try_to_fix_pino(inode
);
310 clear_inode_flag(inode
, FI_APPEND_WRITE
);
311 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
315 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
316 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
, &seq_id
);
317 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
321 /* if cp_error was enabled, we should avoid infinite loop */
322 if (unlikely(f2fs_cp_error(sbi
))) {
327 if (f2fs_need_inode_block_update(sbi
, ino
)) {
328 f2fs_mark_inode_dirty_sync(inode
, true);
329 f2fs_write_inode(inode
, NULL
);
334 * If it's atomic_write, it's just fine to keep write ordering. So
335 * here we don't need to wait for node write completion, since we use
336 * node chain which serializes node blocks. If one of node writes are
337 * reordered, we can see simply broken chain, resulting in stopping
338 * roll-forward recovery. It means we'll recover all or none node blocks
342 ret
= f2fs_wait_on_node_pages_writeback(sbi
, seq_id
);
347 /* once recovery info is written, don't need to tack this */
348 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
349 clear_inode_flag(inode
, FI_APPEND_WRITE
);
351 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
352 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
354 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
355 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
356 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
358 f2fs_update_time(sbi
, REQ_TIME
);
360 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
361 f2fs_trace_ios(NULL
, 1);
365 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
367 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
369 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
372 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
373 pgoff_t pgofs
, int whence
)
378 if (whence
!= SEEK_DATA
)
381 /* find first dirty page index */
382 nr_pages
= find_get_pages_tag(mapping
, &pgofs
, PAGECACHE_TAG_DIRTY
,
391 static bool __found_offset(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
392 pgoff_t dirty
, pgoff_t pgofs
, int whence
)
396 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
397 __is_valid_data_blkaddr(blkaddr
))
401 if (blkaddr
== NULL_ADDR
)
408 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
410 struct inode
*inode
= file
->f_mapping
->host
;
411 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
412 struct dnode_of_data dn
;
413 pgoff_t pgofs
, end_offset
, dirty
;
414 loff_t data_ofs
= offset
;
420 isize
= i_size_read(inode
);
424 /* handle inline data case */
425 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
426 if (whence
== SEEK_HOLE
)
431 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
433 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
435 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
436 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
437 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
438 if (err
&& err
!= -ENOENT
) {
440 } else if (err
== -ENOENT
) {
441 /* direct node does not exists */
442 if (whence
== SEEK_DATA
) {
443 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
450 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
452 /* find data/hole in dnode block */
453 for (; dn
.ofs_in_node
< end_offset
;
454 dn
.ofs_in_node
++, pgofs
++,
455 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
458 blkaddr
= f2fs_data_blkaddr(&dn
);
460 if (__is_valid_data_blkaddr(blkaddr
) &&
461 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
462 blkaddr
, DATA_GENERIC_ENHANCE
)) {
467 if (__found_offset(F2FS_I_SB(inode
), blkaddr
, dirty
,
476 if (whence
== SEEK_DATA
)
479 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
482 return vfs_setpos(file
, data_ofs
, maxbytes
);
488 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
490 struct inode
*inode
= file
->f_mapping
->host
;
491 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
497 return generic_file_llseek_size(file
, offset
, whence
,
498 maxbytes
, i_size_read(inode
));
503 return f2fs_seek_block(file
, offset
, whence
);
509 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
511 struct inode
*inode
= file_inode(file
);
514 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
517 if (!f2fs_is_compress_backend_ready(inode
))
520 /* we don't need to use inline_data strictly */
521 err
= f2fs_convert_inline_inode(inode
);
526 vma
->vm_ops
= &f2fs_file_vm_ops
;
527 set_inode_flag(inode
, FI_MMAP_FILE
);
531 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
533 int err
= fscrypt_file_open(inode
, filp
);
538 if (!f2fs_is_compress_backend_ready(inode
))
541 err
= fsverity_file_open(inode
, filp
);
545 filp
->f_mode
|= FMODE_NOWAIT
;
547 return dquot_file_open(inode
, filp
);
550 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
552 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
553 struct f2fs_node
*raw_node
;
554 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
557 bool compressed_cluster
= false;
558 int cluster_index
= 0, valid_blocks
= 0;
559 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
561 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
562 base
= get_extra_isize(dn
->inode
);
564 raw_node
= F2FS_NODE(dn
->node_page
);
565 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
567 /* Assumption: truncateion starts with cluster */
568 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++, cluster_index
++) {
569 block_t blkaddr
= le32_to_cpu(*addr
);
571 if (f2fs_compressed_file(dn
->inode
) &&
572 !(cluster_index
& (cluster_size
- 1))) {
573 if (compressed_cluster
)
574 f2fs_i_compr_blocks_update(dn
->inode
,
575 valid_blocks
, false);
576 compressed_cluster
= (blkaddr
== COMPRESS_ADDR
);
580 if (blkaddr
== NULL_ADDR
)
583 dn
->data_blkaddr
= NULL_ADDR
;
584 f2fs_set_data_blkaddr(dn
);
586 if (__is_valid_data_blkaddr(blkaddr
)) {
587 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
588 DATA_GENERIC_ENHANCE
))
590 if (compressed_cluster
)
594 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
595 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
597 f2fs_invalidate_blocks(sbi
, blkaddr
);
601 if (compressed_cluster
)
602 f2fs_i_compr_blocks_update(dn
->inode
, valid_blocks
, false);
607 * once we invalidate valid blkaddr in range [ofs, ofs + count],
608 * we will invalidate all blkaddr in the whole range.
610 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
612 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
613 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
615 dn
->ofs_in_node
= ofs
;
617 f2fs_update_time(sbi
, REQ_TIME
);
618 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
619 dn
->ofs_in_node
, nr_free
);
622 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
624 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK(dn
->inode
));
627 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
630 loff_t offset
= from
& (PAGE_SIZE
- 1);
631 pgoff_t index
= from
>> PAGE_SHIFT
;
632 struct address_space
*mapping
= inode
->i_mapping
;
635 if (!offset
&& !cache_only
)
639 page
= find_lock_page(mapping
, index
);
640 if (page
&& PageUptodate(page
))
642 f2fs_put_page(page
, 1);
646 if (f2fs_compressed_file(inode
))
649 page
= f2fs_get_lock_data_page(inode
, index
, true);
651 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
653 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
654 zero_user(page
, offset
, PAGE_SIZE
- offset
);
656 /* An encrypted inode should have a key and truncate the last page. */
657 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& IS_ENCRYPTED(inode
));
659 set_page_dirty(page
);
660 f2fs_put_page(page
, 1);
664 static int do_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
666 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
667 struct dnode_of_data dn
;
669 int count
= 0, err
= 0;
671 bool truncate_page
= false;
673 trace_f2fs_truncate_blocks_enter(inode
, from
);
675 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
677 if (free_from
>= sbi
->max_file_blocks
)
683 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
685 err
= PTR_ERR(ipage
);
689 if (f2fs_has_inline_data(inode
)) {
690 f2fs_truncate_inline_inode(inode
, ipage
, from
);
691 f2fs_put_page(ipage
, 1);
692 truncate_page
= true;
696 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
697 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
704 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
706 count
-= dn
.ofs_in_node
;
707 f2fs_bug_on(sbi
, count
< 0);
709 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
710 f2fs_truncate_data_blocks_range(&dn
, count
);
716 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
721 /* lastly zero out the first data page */
723 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
725 trace_f2fs_truncate_blocks_exit(inode
, err
);
729 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
731 u64 free_from
= from
;
734 * for compressed file, only support cluster size
735 * aligned truncation.
737 if (f2fs_compressed_file(inode
)) {
738 size_t cluster_shift
= PAGE_SHIFT
+
739 F2FS_I(inode
)->i_log_cluster_size
;
740 size_t cluster_mask
= (1 << cluster_shift
) - 1;
742 free_from
= from
>> cluster_shift
;
743 if (from
& cluster_mask
)
745 free_from
<<= cluster_shift
;
748 return do_truncate_blocks(inode
, free_from
, lock
);
751 int f2fs_truncate(struct inode
*inode
)
755 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
758 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
759 S_ISLNK(inode
->i_mode
)))
762 trace_f2fs_truncate(inode
);
764 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
765 f2fs_show_injection_info(F2FS_I_SB(inode
), FAULT_TRUNCATE
);
769 /* we should check inline_data size */
770 if (!f2fs_may_inline_data(inode
)) {
771 err
= f2fs_convert_inline_inode(inode
);
776 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
780 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
781 f2fs_mark_inode_dirty_sync(inode
, false);
785 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
786 u32 request_mask
, unsigned int query_flags
)
788 struct inode
*inode
= d_inode(path
->dentry
);
789 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
790 struct f2fs_inode
*ri
;
793 if (f2fs_has_extra_attr(inode
) &&
794 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode
)) &&
795 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
796 stat
->result_mask
|= STATX_BTIME
;
797 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
798 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
802 if (flags
& F2FS_COMPR_FL
)
803 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
804 if (flags
& F2FS_APPEND_FL
)
805 stat
->attributes
|= STATX_ATTR_APPEND
;
806 if (IS_ENCRYPTED(inode
))
807 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
808 if (flags
& F2FS_IMMUTABLE_FL
)
809 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
810 if (flags
& F2FS_NODUMP_FL
)
811 stat
->attributes
|= STATX_ATTR_NODUMP
;
812 if (IS_VERITY(inode
))
813 stat
->attributes
|= STATX_ATTR_VERITY
;
815 stat
->attributes_mask
|= (STATX_ATTR_COMPRESSED
|
817 STATX_ATTR_ENCRYPTED
|
818 STATX_ATTR_IMMUTABLE
|
822 generic_fillattr(inode
, stat
);
824 /* we need to show initial sectors used for inline_data/dentries */
825 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
826 f2fs_has_inline_dentry(inode
))
827 stat
->blocks
+= (stat
->size
+ 511) >> 9;
832 #ifdef CONFIG_F2FS_FS_POSIX_ACL
833 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
835 unsigned int ia_valid
= attr
->ia_valid
;
837 if (ia_valid
& ATTR_UID
)
838 inode
->i_uid
= attr
->ia_uid
;
839 if (ia_valid
& ATTR_GID
)
840 inode
->i_gid
= attr
->ia_gid
;
841 if (ia_valid
& ATTR_ATIME
)
842 inode
->i_atime
= attr
->ia_atime
;
843 if (ia_valid
& ATTR_MTIME
)
844 inode
->i_mtime
= attr
->ia_mtime
;
845 if (ia_valid
& ATTR_CTIME
)
846 inode
->i_ctime
= attr
->ia_ctime
;
847 if (ia_valid
& ATTR_MODE
) {
848 umode_t mode
= attr
->ia_mode
;
850 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
852 set_acl_inode(inode
, mode
);
856 #define __setattr_copy setattr_copy
859 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
861 struct inode
*inode
= d_inode(dentry
);
864 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
867 if ((attr
->ia_valid
& ATTR_SIZE
) &&
868 !f2fs_is_compress_backend_ready(inode
))
871 err
= setattr_prepare(dentry
, attr
);
875 err
= fscrypt_prepare_setattr(dentry
, attr
);
879 err
= fsverity_prepare_setattr(dentry
, attr
);
883 if (is_quota_modification(inode
, attr
)) {
884 err
= dquot_initialize(inode
);
888 if ((attr
->ia_valid
& ATTR_UID
&&
889 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
890 (attr
->ia_valid
& ATTR_GID
&&
891 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
892 f2fs_lock_op(F2FS_I_SB(inode
));
893 err
= dquot_transfer(inode
, attr
);
895 set_sbi_flag(F2FS_I_SB(inode
),
896 SBI_QUOTA_NEED_REPAIR
);
897 f2fs_unlock_op(F2FS_I_SB(inode
));
901 * update uid/gid under lock_op(), so that dquot and inode can
902 * be updated atomically.
904 if (attr
->ia_valid
& ATTR_UID
)
905 inode
->i_uid
= attr
->ia_uid
;
906 if (attr
->ia_valid
& ATTR_GID
)
907 inode
->i_gid
= attr
->ia_gid
;
908 f2fs_mark_inode_dirty_sync(inode
, true);
909 f2fs_unlock_op(F2FS_I_SB(inode
));
912 if (attr
->ia_valid
& ATTR_SIZE
) {
913 loff_t old_size
= i_size_read(inode
);
915 if (attr
->ia_size
> MAX_INLINE_DATA(inode
)) {
917 * should convert inline inode before i_size_write to
918 * keep smaller than inline_data size with inline flag.
920 err
= f2fs_convert_inline_inode(inode
);
925 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
926 down_write(&F2FS_I(inode
)->i_mmap_sem
);
928 truncate_setsize(inode
, attr
->ia_size
);
930 if (attr
->ia_size
<= old_size
)
931 err
= f2fs_truncate(inode
);
933 * do not trim all blocks after i_size if target size is
934 * larger than i_size.
936 up_write(&F2FS_I(inode
)->i_mmap_sem
);
937 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
941 spin_lock(&F2FS_I(inode
)->i_size_lock
);
942 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
943 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
944 spin_unlock(&F2FS_I(inode
)->i_size_lock
);
947 __setattr_copy(inode
, attr
);
949 if (attr
->ia_valid
& ATTR_MODE
) {
950 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
951 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
952 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
953 clear_inode_flag(inode
, FI_ACL_MODE
);
957 /* file size may changed here */
958 f2fs_mark_inode_dirty_sync(inode
, true);
960 /* inode change will produce dirty node pages flushed by checkpoint */
961 f2fs_balance_fs(F2FS_I_SB(inode
), true);
966 const struct inode_operations f2fs_file_inode_operations
= {
967 .getattr
= f2fs_getattr
,
968 .setattr
= f2fs_setattr
,
969 .get_acl
= f2fs_get_acl
,
970 .set_acl
= f2fs_set_acl
,
971 #ifdef CONFIG_F2FS_FS_XATTR
972 .listxattr
= f2fs_listxattr
,
974 .fiemap
= f2fs_fiemap
,
977 static int fill_zero(struct inode
*inode
, pgoff_t index
,
978 loff_t start
, loff_t len
)
980 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
986 f2fs_balance_fs(sbi
, true);
989 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
993 return PTR_ERR(page
);
995 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
996 zero_user(page
, start
, len
);
997 set_page_dirty(page
);
998 f2fs_put_page(page
, 1);
1002 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
1006 while (pg_start
< pg_end
) {
1007 struct dnode_of_data dn
;
1008 pgoff_t end_offset
, count
;
1010 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1011 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
1013 if (err
== -ENOENT
) {
1014 pg_start
= f2fs_get_next_page_offset(&dn
,
1021 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1022 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
1024 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
1026 f2fs_truncate_data_blocks_range(&dn
, count
);
1027 f2fs_put_dnode(&dn
);
1034 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1036 pgoff_t pg_start
, pg_end
;
1037 loff_t off_start
, off_end
;
1040 ret
= f2fs_convert_inline_inode(inode
);
1044 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1045 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1047 off_start
= offset
& (PAGE_SIZE
- 1);
1048 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1050 if (pg_start
== pg_end
) {
1051 ret
= fill_zero(inode
, pg_start
, off_start
,
1052 off_end
- off_start
);
1057 ret
= fill_zero(inode
, pg_start
++, off_start
,
1058 PAGE_SIZE
- off_start
);
1063 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1068 if (pg_start
< pg_end
) {
1069 struct address_space
*mapping
= inode
->i_mapping
;
1070 loff_t blk_start
, blk_end
;
1071 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1073 f2fs_balance_fs(sbi
, true);
1075 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
1076 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
1078 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1079 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1081 truncate_inode_pages_range(mapping
, blk_start
,
1085 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
1086 f2fs_unlock_op(sbi
);
1088 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1089 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1096 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1097 int *do_replace
, pgoff_t off
, pgoff_t len
)
1099 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1100 struct dnode_of_data dn
;
1104 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1105 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
1106 if (ret
&& ret
!= -ENOENT
) {
1108 } else if (ret
== -ENOENT
) {
1109 if (dn
.max_level
== 0)
1111 done
= min((pgoff_t
)ADDRS_PER_BLOCK(inode
) -
1112 dn
.ofs_in_node
, len
);
1118 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
1119 dn
.ofs_in_node
, len
);
1120 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
1121 *blkaddr
= f2fs_data_blkaddr(&dn
);
1123 if (__is_valid_data_blkaddr(*blkaddr
) &&
1124 !f2fs_is_valid_blkaddr(sbi
, *blkaddr
,
1125 DATA_GENERIC_ENHANCE
)) {
1126 f2fs_put_dnode(&dn
);
1127 return -EFSCORRUPTED
;
1130 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
1132 if (f2fs_lfs_mode(sbi
)) {
1133 f2fs_put_dnode(&dn
);
1137 /* do not invalidate this block address */
1138 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1142 f2fs_put_dnode(&dn
);
1151 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1152 int *do_replace
, pgoff_t off
, int len
)
1154 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1155 struct dnode_of_data dn
;
1158 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1159 if (*do_replace
== 0)
1162 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1163 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1165 dec_valid_block_count(sbi
, inode
, 1);
1166 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1168 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1170 f2fs_put_dnode(&dn
);
1175 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1176 block_t
*blkaddr
, int *do_replace
,
1177 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1179 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1184 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1189 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1190 struct dnode_of_data dn
;
1191 struct node_info ni
;
1195 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1196 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1200 ret
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1202 f2fs_put_dnode(&dn
);
1206 ilen
= min((pgoff_t
)
1207 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1208 dn
.ofs_in_node
, len
- i
);
1210 dn
.data_blkaddr
= f2fs_data_blkaddr(&dn
);
1211 f2fs_truncate_data_blocks_range(&dn
, 1);
1213 if (do_replace
[i
]) {
1214 f2fs_i_blocks_write(src_inode
,
1216 f2fs_i_blocks_write(dst_inode
,
1218 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1219 blkaddr
[i
], ni
.version
, true, false);
1225 new_size
= (loff_t
)(dst
+ i
) << PAGE_SHIFT
;
1226 if (dst_inode
->i_size
< new_size
)
1227 f2fs_i_size_write(dst_inode
, new_size
);
1228 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1230 f2fs_put_dnode(&dn
);
1232 struct page
*psrc
, *pdst
;
1234 psrc
= f2fs_get_lock_data_page(src_inode
,
1237 return PTR_ERR(psrc
);
1238 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1241 f2fs_put_page(psrc
, 1);
1242 return PTR_ERR(pdst
);
1244 f2fs_copy_page(psrc
, pdst
);
1245 set_page_dirty(pdst
);
1246 f2fs_put_page(pdst
, 1);
1247 f2fs_put_page(psrc
, 1);
1249 ret
= f2fs_truncate_hole(src_inode
,
1250 src
+ i
, src
+ i
+ 1);
1259 static int __exchange_data_block(struct inode
*src_inode
,
1260 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1261 pgoff_t len
, bool full
)
1263 block_t
*src_blkaddr
;
1269 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK(src_inode
), len
);
1271 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1272 array_size(olen
, sizeof(block_t
)),
1277 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1278 array_size(olen
, sizeof(int)),
1281 kvfree(src_blkaddr
);
1285 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1286 do_replace
, src
, olen
);
1290 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1291 do_replace
, src
, dst
, olen
, full
);
1299 kvfree(src_blkaddr
);
1305 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1306 kvfree(src_blkaddr
);
1311 static int f2fs_do_collapse(struct inode
*inode
, loff_t offset
, loff_t len
)
1313 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1314 pgoff_t nrpages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1315 pgoff_t start
= offset
>> PAGE_SHIFT
;
1316 pgoff_t end
= (offset
+ len
) >> PAGE_SHIFT
;
1319 f2fs_balance_fs(sbi
, true);
1321 /* avoid gc operation during block exchange */
1322 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1323 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1326 f2fs_drop_extent_tree(inode
);
1327 truncate_pagecache(inode
, offset
);
1328 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1329 f2fs_unlock_op(sbi
);
1331 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1332 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1336 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1341 if (offset
+ len
>= i_size_read(inode
))
1344 /* collapse range should be aligned to block size of f2fs. */
1345 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1348 ret
= f2fs_convert_inline_inode(inode
);
1352 /* write out all dirty pages from offset */
1353 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1357 ret
= f2fs_do_collapse(inode
, offset
, len
);
1361 /* write out all moved pages, if possible */
1362 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1363 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1364 truncate_pagecache(inode
, offset
);
1366 new_size
= i_size_read(inode
) - len
;
1367 truncate_pagecache(inode
, new_size
);
1369 ret
= f2fs_truncate_blocks(inode
, new_size
, true);
1370 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1372 f2fs_i_size_write(inode
, new_size
);
1376 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1379 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1380 pgoff_t index
= start
;
1381 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1385 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1386 if (f2fs_data_blkaddr(dn
) == NULL_ADDR
)
1390 dn
->ofs_in_node
= ofs_in_node
;
1391 ret
= f2fs_reserve_new_blocks(dn
, count
);
1395 dn
->ofs_in_node
= ofs_in_node
;
1396 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1397 dn
->data_blkaddr
= f2fs_data_blkaddr(dn
);
1399 * f2fs_reserve_new_blocks will not guarantee entire block
1402 if (dn
->data_blkaddr
== NULL_ADDR
) {
1406 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1407 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1408 dn
->data_blkaddr
= NEW_ADDR
;
1409 f2fs_set_data_blkaddr(dn
);
1413 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1418 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1421 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1422 struct address_space
*mapping
= inode
->i_mapping
;
1423 pgoff_t index
, pg_start
, pg_end
;
1424 loff_t new_size
= i_size_read(inode
);
1425 loff_t off_start
, off_end
;
1428 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1432 ret
= f2fs_convert_inline_inode(inode
);
1436 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1440 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1441 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1443 off_start
= offset
& (PAGE_SIZE
- 1);
1444 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1446 if (pg_start
== pg_end
) {
1447 ret
= fill_zero(inode
, pg_start
, off_start
,
1448 off_end
- off_start
);
1452 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1455 ret
= fill_zero(inode
, pg_start
++, off_start
,
1456 PAGE_SIZE
- off_start
);
1460 new_size
= max_t(loff_t
, new_size
,
1461 (loff_t
)pg_start
<< PAGE_SHIFT
);
1464 for (index
= pg_start
; index
< pg_end
;) {
1465 struct dnode_of_data dn
;
1466 unsigned int end_offset
;
1469 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1470 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1472 truncate_pagecache_range(inode
,
1473 (loff_t
)index
<< PAGE_SHIFT
,
1474 ((loff_t
)pg_end
<< PAGE_SHIFT
) - 1);
1478 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1479 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1481 f2fs_unlock_op(sbi
);
1482 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1483 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1487 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1488 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1490 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1491 f2fs_put_dnode(&dn
);
1493 f2fs_unlock_op(sbi
);
1494 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1495 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1497 f2fs_balance_fs(sbi
, dn
.node_changed
);
1503 new_size
= max_t(loff_t
, new_size
,
1504 (loff_t
)index
<< PAGE_SHIFT
);
1508 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1512 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1517 if (new_size
> i_size_read(inode
)) {
1518 if (mode
& FALLOC_FL_KEEP_SIZE
)
1519 file_set_keep_isize(inode
);
1521 f2fs_i_size_write(inode
, new_size
);
1526 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1528 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1529 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1533 new_size
= i_size_read(inode
) + len
;
1534 ret
= inode_newsize_ok(inode
, new_size
);
1538 if (offset
>= i_size_read(inode
))
1541 /* insert range should be aligned to block size of f2fs. */
1542 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1545 ret
= f2fs_convert_inline_inode(inode
);
1549 f2fs_balance_fs(sbi
, true);
1551 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1552 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
1553 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1557 /* write out all dirty pages from offset */
1558 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1562 pg_start
= offset
>> PAGE_SHIFT
;
1563 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1564 delta
= pg_end
- pg_start
;
1565 idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1567 /* avoid gc operation during block exchange */
1568 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1569 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1570 truncate_pagecache(inode
, offset
);
1572 while (!ret
&& idx
> pg_start
) {
1573 nr
= idx
- pg_start
;
1579 f2fs_drop_extent_tree(inode
);
1581 ret
= __exchange_data_block(inode
, inode
, idx
,
1582 idx
+ delta
, nr
, false);
1583 f2fs_unlock_op(sbi
);
1585 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1586 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1588 /* write out all moved pages, if possible */
1589 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1590 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1591 truncate_pagecache(inode
, offset
);
1592 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1595 f2fs_i_size_write(inode
, new_size
);
1599 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1600 loff_t len
, int mode
)
1602 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1603 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1604 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
,
1605 .m_may_create
= true };
1607 loff_t new_size
= i_size_read(inode
);
1611 err
= inode_newsize_ok(inode
, (len
+ offset
));
1615 err
= f2fs_convert_inline_inode(inode
);
1619 f2fs_balance_fs(sbi
, true);
1621 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1622 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1624 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1625 map
.m_len
= pg_end
- map
.m_lblk
;
1632 if (f2fs_is_pinned_file(inode
)) {
1633 block_t len
= (map
.m_len
>> sbi
->log_blocks_per_seg
) <<
1634 sbi
->log_blocks_per_seg
;
1637 if (map
.m_len
% sbi
->blocks_per_seg
)
1638 len
+= sbi
->blocks_per_seg
;
1640 map
.m_len
= sbi
->blocks_per_seg
;
1642 if (has_not_enough_free_secs(sbi
, 0,
1643 GET_SEC_FROM_SEG(sbi
, overprovision_segments(sbi
)))) {
1644 down_write(&sbi
->gc_lock
);
1645 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1646 if (err
&& err
!= -ENODATA
&& err
!= -EAGAIN
)
1650 down_write(&sbi
->pin_sem
);
1651 map
.m_seg_type
= CURSEG_COLD_DATA_PINNED
;
1652 f2fs_allocate_new_segments(sbi
, CURSEG_COLD_DATA
);
1653 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
1654 up_write(&sbi
->pin_sem
);
1658 map
.m_lblk
+= map
.m_len
;
1664 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1673 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1675 /* update new size to the failed position */
1676 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1677 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1679 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1682 if (new_size
> i_size_read(inode
)) {
1683 if (mode
& FALLOC_FL_KEEP_SIZE
)
1684 file_set_keep_isize(inode
);
1686 f2fs_i_size_write(inode
, new_size
);
1692 static long f2fs_fallocate(struct file
*file
, int mode
,
1693 loff_t offset
, loff_t len
)
1695 struct inode
*inode
= file_inode(file
);
1698 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1700 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode
)))
1702 if (!f2fs_is_compress_backend_ready(inode
))
1705 /* f2fs only support ->fallocate for regular file */
1706 if (!S_ISREG(inode
->i_mode
))
1709 if (IS_ENCRYPTED(inode
) &&
1710 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1713 if (f2fs_compressed_file(inode
) &&
1714 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_COLLAPSE_RANGE
|
1715 FALLOC_FL_ZERO_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1718 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1719 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1720 FALLOC_FL_INSERT_RANGE
))
1725 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1726 if (offset
>= inode
->i_size
)
1729 ret
= punch_hole(inode
, offset
, len
);
1730 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1731 ret
= f2fs_collapse_range(inode
, offset
, len
);
1732 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1733 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1734 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1735 ret
= f2fs_insert_range(inode
, offset
, len
);
1737 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1741 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1742 f2fs_mark_inode_dirty_sync(inode
, false);
1743 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1747 inode_unlock(inode
);
1749 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1753 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1756 * f2fs_relase_file is called at every close calls. So we should
1757 * not drop any inmemory pages by close called by other process.
1759 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1760 atomic_read(&inode
->i_writecount
) != 1)
1763 /* some remained atomic pages should discarded */
1764 if (f2fs_is_atomic_file(inode
))
1765 f2fs_drop_inmem_pages(inode
);
1766 if (f2fs_is_volatile_file(inode
)) {
1767 set_inode_flag(inode
, FI_DROP_CACHE
);
1768 filemap_fdatawrite(inode
->i_mapping
);
1769 clear_inode_flag(inode
, FI_DROP_CACHE
);
1770 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1771 stat_dec_volatile_write(inode
);
1776 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1778 struct inode
*inode
= file_inode(file
);
1781 * If the process doing a transaction is crashed, we should do
1782 * roll-back. Otherwise, other reader/write can see corrupted database
1783 * until all the writers close its file. Since this should be done
1784 * before dropping file lock, it needs to do in ->flush.
1786 if (f2fs_is_atomic_file(inode
) &&
1787 F2FS_I(inode
)->inmem_task
== current
)
1788 f2fs_drop_inmem_pages(inode
);
1792 static int f2fs_setflags_common(struct inode
*inode
, u32 iflags
, u32 mask
)
1794 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1795 u32 masked_flags
= fi
->i_flags
& mask
;
1797 f2fs_bug_on(F2FS_I_SB(inode
), (iflags
& ~mask
));
1799 /* Is it quota file? Do not allow user to mess with it */
1800 if (IS_NOQUOTA(inode
))
1803 if ((iflags
^ masked_flags
) & F2FS_CASEFOLD_FL
) {
1804 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode
)))
1806 if (!f2fs_empty_dir(inode
))
1810 if (iflags
& (F2FS_COMPR_FL
| F2FS_NOCOMP_FL
)) {
1811 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
1813 if ((iflags
& F2FS_COMPR_FL
) && (iflags
& F2FS_NOCOMP_FL
))
1817 if ((iflags
^ masked_flags
) & F2FS_COMPR_FL
) {
1818 if (masked_flags
& F2FS_COMPR_FL
) {
1819 if (f2fs_disable_compressed_file(inode
))
1822 if (iflags
& F2FS_NOCOMP_FL
)
1824 if (iflags
& F2FS_COMPR_FL
) {
1825 if (!f2fs_may_compress(inode
))
1828 set_compress_context(inode
);
1831 if ((iflags
^ masked_flags
) & F2FS_NOCOMP_FL
) {
1832 if (masked_flags
& F2FS_COMPR_FL
)
1836 fi
->i_flags
= iflags
| (fi
->i_flags
& ~mask
);
1837 f2fs_bug_on(F2FS_I_SB(inode
), (fi
->i_flags
& F2FS_COMPR_FL
) &&
1838 (fi
->i_flags
& F2FS_NOCOMP_FL
));
1840 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1841 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1843 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1845 inode
->i_ctime
= current_time(inode
);
1846 f2fs_set_inode_flags(inode
);
1847 f2fs_mark_inode_dirty_sync(inode
, true);
1851 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1854 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1855 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1856 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1857 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1860 static const struct {
1863 } f2fs_fsflags_map
[] = {
1864 { F2FS_COMPR_FL
, FS_COMPR_FL
},
1865 { F2FS_SYNC_FL
, FS_SYNC_FL
},
1866 { F2FS_IMMUTABLE_FL
, FS_IMMUTABLE_FL
},
1867 { F2FS_APPEND_FL
, FS_APPEND_FL
},
1868 { F2FS_NODUMP_FL
, FS_NODUMP_FL
},
1869 { F2FS_NOATIME_FL
, FS_NOATIME_FL
},
1870 { F2FS_NOCOMP_FL
, FS_NOCOMP_FL
},
1871 { F2FS_INDEX_FL
, FS_INDEX_FL
},
1872 { F2FS_DIRSYNC_FL
, FS_DIRSYNC_FL
},
1873 { F2FS_PROJINHERIT_FL
, FS_PROJINHERIT_FL
},
1874 { F2FS_CASEFOLD_FL
, FS_CASEFOLD_FL
},
1877 #define F2FS_GETTABLE_FS_FL ( \
1887 FS_PROJINHERIT_FL | \
1889 FS_INLINE_DATA_FL | \
1894 #define F2FS_SETTABLE_FS_FL ( \
1903 FS_PROJINHERIT_FL | \
1906 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1907 static inline u32
f2fs_iflags_to_fsflags(u32 iflags
)
1912 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1913 if (iflags
& f2fs_fsflags_map
[i
].iflag
)
1914 fsflags
|= f2fs_fsflags_map
[i
].fsflag
;
1919 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1920 static inline u32
f2fs_fsflags_to_iflags(u32 fsflags
)
1925 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1926 if (fsflags
& f2fs_fsflags_map
[i
].fsflag
)
1927 iflags
|= f2fs_fsflags_map
[i
].iflag
;
1932 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1934 struct inode
*inode
= file_inode(filp
);
1935 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1936 u32 fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1938 if (IS_ENCRYPTED(inode
))
1939 fsflags
|= FS_ENCRYPT_FL
;
1940 if (IS_VERITY(inode
))
1941 fsflags
|= FS_VERITY_FL
;
1942 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1943 fsflags
|= FS_INLINE_DATA_FL
;
1944 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
1945 fsflags
|= FS_NOCOW_FL
;
1947 fsflags
&= F2FS_GETTABLE_FS_FL
;
1949 return put_user(fsflags
, (int __user
*)arg
);
1952 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1954 struct inode
*inode
= file_inode(filp
);
1955 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1956 u32 fsflags
, old_fsflags
;
1960 if (!inode_owner_or_capable(inode
))
1963 if (get_user(fsflags
, (int __user
*)arg
))
1966 if (fsflags
& ~F2FS_GETTABLE_FS_FL
)
1968 fsflags
&= F2FS_SETTABLE_FS_FL
;
1970 iflags
= f2fs_fsflags_to_iflags(fsflags
);
1971 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
1974 ret
= mnt_want_write_file(filp
);
1980 old_fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1981 ret
= vfs_ioc_setflags_prepare(inode
, old_fsflags
, fsflags
);
1985 ret
= f2fs_setflags_common(inode
, iflags
,
1986 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL
));
1988 inode_unlock(inode
);
1989 mnt_drop_write_file(filp
);
1993 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1995 struct inode
*inode
= file_inode(filp
);
1997 return put_user(inode
->i_generation
, (int __user
*)arg
);
2000 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
2002 struct inode
*inode
= file_inode(filp
);
2003 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2004 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2007 if (!inode_owner_or_capable(inode
))
2010 if (!S_ISREG(inode
->i_mode
))
2013 if (filp
->f_flags
& O_DIRECT
)
2016 ret
= mnt_want_write_file(filp
);
2022 f2fs_disable_compressed_file(inode
);
2024 if (f2fs_is_atomic_file(inode
)) {
2025 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
))
2030 ret
= f2fs_convert_inline_inode(inode
);
2034 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2037 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2038 * f2fs_is_atomic_file.
2040 if (get_dirty_pages(inode
))
2041 f2fs_warn(F2FS_I_SB(inode
), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2042 inode
->i_ino
, get_dirty_pages(inode
));
2043 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
2045 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2049 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2050 if (list_empty(&fi
->inmem_ilist
))
2051 list_add_tail(&fi
->inmem_ilist
, &sbi
->inode_list
[ATOMIC_FILE
]);
2052 sbi
->atomic_files
++;
2053 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2055 /* add inode in inmem_list first and set atomic_file */
2056 set_inode_flag(inode
, FI_ATOMIC_FILE
);
2057 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2058 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2060 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2061 F2FS_I(inode
)->inmem_task
= current
;
2062 stat_update_max_atomic_write(inode
);
2064 inode_unlock(inode
);
2065 mnt_drop_write_file(filp
);
2069 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
2071 struct inode
*inode
= file_inode(filp
);
2074 if (!inode_owner_or_capable(inode
))
2077 ret
= mnt_want_write_file(filp
);
2081 f2fs_balance_fs(F2FS_I_SB(inode
), true);
2085 if (f2fs_is_volatile_file(inode
)) {
2090 if (f2fs_is_atomic_file(inode
)) {
2091 ret
= f2fs_commit_inmem_pages(inode
);
2095 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2097 f2fs_drop_inmem_pages(inode
);
2099 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
2102 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2103 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2106 inode_unlock(inode
);
2107 mnt_drop_write_file(filp
);
2111 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
2113 struct inode
*inode
= file_inode(filp
);
2116 if (!inode_owner_or_capable(inode
))
2119 if (!S_ISREG(inode
->i_mode
))
2122 ret
= mnt_want_write_file(filp
);
2128 if (f2fs_is_volatile_file(inode
))
2131 ret
= f2fs_convert_inline_inode(inode
);
2135 stat_inc_volatile_write(inode
);
2136 stat_update_max_volatile_write(inode
);
2138 set_inode_flag(inode
, FI_VOLATILE_FILE
);
2139 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2141 inode_unlock(inode
);
2142 mnt_drop_write_file(filp
);
2146 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
2148 struct inode
*inode
= file_inode(filp
);
2151 if (!inode_owner_or_capable(inode
))
2154 ret
= mnt_want_write_file(filp
);
2160 if (!f2fs_is_volatile_file(inode
))
2163 if (!f2fs_is_first_block_written(inode
)) {
2164 ret
= truncate_partial_data_page(inode
, 0, true);
2168 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
2170 inode_unlock(inode
);
2171 mnt_drop_write_file(filp
);
2175 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
2177 struct inode
*inode
= file_inode(filp
);
2180 if (!inode_owner_or_capable(inode
))
2183 ret
= mnt_want_write_file(filp
);
2189 if (f2fs_is_atomic_file(inode
))
2190 f2fs_drop_inmem_pages(inode
);
2191 if (f2fs_is_volatile_file(inode
)) {
2192 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
2193 stat_dec_volatile_write(inode
);
2194 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2197 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2199 inode_unlock(inode
);
2201 mnt_drop_write_file(filp
);
2202 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2206 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
2208 struct inode
*inode
= file_inode(filp
);
2209 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2210 struct super_block
*sb
= sbi
->sb
;
2214 if (!capable(CAP_SYS_ADMIN
))
2217 if (get_user(in
, (__u32 __user
*)arg
))
2220 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
2221 ret
= mnt_want_write_file(filp
);
2223 if (ret
== -EROFS
) {
2225 f2fs_stop_checkpoint(sbi
, false);
2226 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2227 trace_f2fs_shutdown(sbi
, in
, ret
);
2234 case F2FS_GOING_DOWN_FULLSYNC
:
2235 sb
= freeze_bdev(sb
->s_bdev
);
2241 f2fs_stop_checkpoint(sbi
, false);
2242 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2243 thaw_bdev(sb
->s_bdev
, sb
);
2246 case F2FS_GOING_DOWN_METASYNC
:
2247 /* do checkpoint only */
2248 ret
= f2fs_sync_fs(sb
, 1);
2251 f2fs_stop_checkpoint(sbi
, false);
2252 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2254 case F2FS_GOING_DOWN_NOSYNC
:
2255 f2fs_stop_checkpoint(sbi
, false);
2256 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2258 case F2FS_GOING_DOWN_METAFLUSH
:
2259 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
2260 f2fs_stop_checkpoint(sbi
, false);
2261 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2263 case F2FS_GOING_DOWN_NEED_FSCK
:
2264 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2265 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
2266 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2267 /* do checkpoint only */
2268 ret
= f2fs_sync_fs(sb
, 1);
2275 f2fs_stop_gc_thread(sbi
);
2276 f2fs_stop_discard_thread(sbi
);
2278 f2fs_drop_discard_cmd(sbi
);
2279 clear_opt(sbi
, DISCARD
);
2281 f2fs_update_time(sbi
, REQ_TIME
);
2283 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
2284 mnt_drop_write_file(filp
);
2286 trace_f2fs_shutdown(sbi
, in
, ret
);
2291 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
2293 struct inode
*inode
= file_inode(filp
);
2294 struct super_block
*sb
= inode
->i_sb
;
2295 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
2296 struct fstrim_range range
;
2299 if (!capable(CAP_SYS_ADMIN
))
2302 if (!f2fs_hw_support_discard(F2FS_SB(sb
)))
2305 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
2309 ret
= mnt_want_write_file(filp
);
2313 range
.minlen
= max((unsigned int)range
.minlen
,
2314 q
->limits
.discard_granularity
);
2315 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
2316 mnt_drop_write_file(filp
);
2320 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
2323 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2327 static bool uuid_is_nonzero(__u8 u
[16])
2331 for (i
= 0; i
< 16; i
++)
2337 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
2339 struct inode
*inode
= file_inode(filp
);
2341 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode
)))
2344 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2346 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
2349 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
2351 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2353 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
2356 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
2358 struct inode
*inode
= file_inode(filp
);
2359 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2362 if (!f2fs_sb_has_encrypt(sbi
))
2365 err
= mnt_want_write_file(filp
);
2369 down_write(&sbi
->sb_lock
);
2371 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2374 /* update superblock with uuid */
2375 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2377 err
= f2fs_commit_super(sbi
, false);
2380 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2384 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2388 up_write(&sbi
->sb_lock
);
2389 mnt_drop_write_file(filp
);
2393 static int f2fs_ioc_get_encryption_policy_ex(struct file
*filp
,
2396 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2399 return fscrypt_ioctl_get_policy_ex(filp
, (void __user
*)arg
);
2402 static int f2fs_ioc_add_encryption_key(struct file
*filp
, unsigned long arg
)
2404 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2407 return fscrypt_ioctl_add_key(filp
, (void __user
*)arg
);
2410 static int f2fs_ioc_remove_encryption_key(struct file
*filp
, unsigned long arg
)
2412 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2415 return fscrypt_ioctl_remove_key(filp
, (void __user
*)arg
);
2418 static int f2fs_ioc_remove_encryption_key_all_users(struct file
*filp
,
2421 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2424 return fscrypt_ioctl_remove_key_all_users(filp
, (void __user
*)arg
);
2427 static int f2fs_ioc_get_encryption_key_status(struct file
*filp
,
2430 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2433 return fscrypt_ioctl_get_key_status(filp
, (void __user
*)arg
);
2436 static int f2fs_ioc_get_encryption_nonce(struct file
*filp
, unsigned long arg
)
2438 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2441 return fscrypt_ioctl_get_nonce(filp
, (void __user
*)arg
);
2444 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2446 struct inode
*inode
= file_inode(filp
);
2447 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2451 if (!capable(CAP_SYS_ADMIN
))
2454 if (get_user(sync
, (__u32 __user
*)arg
))
2457 if (f2fs_readonly(sbi
->sb
))
2460 ret
= mnt_want_write_file(filp
);
2465 if (!down_write_trylock(&sbi
->gc_lock
)) {
2470 down_write(&sbi
->gc_lock
);
2473 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2475 mnt_drop_write_file(filp
);
2479 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2481 struct inode
*inode
= file_inode(filp
);
2482 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2483 struct f2fs_gc_range range
;
2487 if (!capable(CAP_SYS_ADMIN
))
2490 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2494 if (f2fs_readonly(sbi
->sb
))
2497 end
= range
.start
+ range
.len
;
2498 if (end
< range
.start
|| range
.start
< MAIN_BLKADDR(sbi
) ||
2499 end
>= MAX_BLKADDR(sbi
))
2502 ret
= mnt_want_write_file(filp
);
2508 if (!down_write_trylock(&sbi
->gc_lock
)) {
2513 down_write(&sbi
->gc_lock
);
2516 ret
= f2fs_gc(sbi
, range
.sync
, true, GET_SEGNO(sbi
, range
.start
));
2517 range
.start
+= BLKS_PER_SEC(sbi
);
2518 if (range
.start
<= end
)
2521 mnt_drop_write_file(filp
);
2525 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2527 struct inode
*inode
= file_inode(filp
);
2528 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2531 if (!capable(CAP_SYS_ADMIN
))
2534 if (f2fs_readonly(sbi
->sb
))
2537 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2538 f2fs_info(sbi
, "Skipping Checkpoint. Checkpoints currently disabled.");
2542 ret
= mnt_want_write_file(filp
);
2546 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2548 mnt_drop_write_file(filp
);
2552 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2554 struct f2fs_defragment
*range
)
2556 struct inode
*inode
= file_inode(filp
);
2557 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2558 .m_seg_type
= NO_CHECK_TYPE
,
2559 .m_may_create
= false };
2560 struct extent_info ei
= {0, 0, 0};
2561 pgoff_t pg_start
, pg_end
, next_pgofs
;
2562 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2563 unsigned int total
= 0, sec_num
;
2564 block_t blk_end
= 0;
2565 bool fragmented
= false;
2568 /* if in-place-update policy is enabled, don't waste time here */
2569 if (f2fs_should_update_inplace(inode
, NULL
))
2572 pg_start
= range
->start
>> PAGE_SHIFT
;
2573 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2575 f2fs_balance_fs(sbi
, true);
2579 /* writeback all dirty pages in the range */
2580 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2581 range
->start
+ range
->len
- 1);
2586 * lookup mapping info in extent cache, skip defragmenting if physical
2587 * block addresses are continuous.
2589 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2590 if (ei
.fofs
+ ei
.len
>= pg_end
)
2594 map
.m_lblk
= pg_start
;
2595 map
.m_next_pgofs
= &next_pgofs
;
2598 * lookup mapping info in dnode page cache, skip defragmenting if all
2599 * physical block addresses are continuous even if there are hole(s)
2600 * in logical blocks.
2602 while (map
.m_lblk
< pg_end
) {
2603 map
.m_len
= pg_end
- map
.m_lblk
;
2604 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2608 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2609 map
.m_lblk
= next_pgofs
;
2613 if (blk_end
&& blk_end
!= map
.m_pblk
)
2616 /* record total count of block that we're going to move */
2619 blk_end
= map
.m_pblk
+ map
.m_len
;
2621 map
.m_lblk
+= map
.m_len
;
2629 sec_num
= DIV_ROUND_UP(total
, BLKS_PER_SEC(sbi
));
2632 * make sure there are enough free section for LFS allocation, this can
2633 * avoid defragment running in SSR mode when free section are allocated
2636 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2641 map
.m_lblk
= pg_start
;
2642 map
.m_len
= pg_end
- pg_start
;
2645 while (map
.m_lblk
< pg_end
) {
2650 map
.m_len
= pg_end
- map
.m_lblk
;
2651 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2655 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2656 map
.m_lblk
= next_pgofs
;
2660 set_inode_flag(inode
, FI_DO_DEFRAG
);
2663 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2666 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2668 err
= PTR_ERR(page
);
2672 set_page_dirty(page
);
2673 f2fs_put_page(page
, 1);
2682 if (map
.m_lblk
< pg_end
&& cnt
< blk_per_seg
)
2685 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2687 err
= filemap_fdatawrite(inode
->i_mapping
);
2692 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2694 inode_unlock(inode
);
2696 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2700 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2702 struct inode
*inode
= file_inode(filp
);
2703 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2704 struct f2fs_defragment range
;
2707 if (!capable(CAP_SYS_ADMIN
))
2710 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2713 if (f2fs_readonly(sbi
->sb
))
2716 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2720 /* verify alignment of offset & size */
2721 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2724 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2725 sbi
->max_file_blocks
))
2728 err
= mnt_want_write_file(filp
);
2732 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2733 mnt_drop_write_file(filp
);
2735 f2fs_update_time(sbi
, REQ_TIME
);
2739 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2746 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2747 struct file
*file_out
, loff_t pos_out
, size_t len
)
2749 struct inode
*src
= file_inode(file_in
);
2750 struct inode
*dst
= file_inode(file_out
);
2751 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2752 size_t olen
= len
, dst_max_i_size
= 0;
2756 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2757 src
->i_sb
!= dst
->i_sb
)
2760 if (unlikely(f2fs_readonly(src
->i_sb
)))
2763 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2766 if (IS_ENCRYPTED(src
) || IS_ENCRYPTED(dst
))
2770 if (pos_in
== pos_out
)
2772 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2779 if (!inode_trylock(dst
))
2784 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2787 olen
= len
= src
->i_size
- pos_in
;
2788 if (pos_in
+ len
== src
->i_size
)
2789 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2795 dst_osize
= dst
->i_size
;
2796 if (pos_out
+ olen
> dst
->i_size
)
2797 dst_max_i_size
= pos_out
+ olen
;
2799 /* verify the end result is block aligned */
2800 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2801 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2802 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2805 ret
= f2fs_convert_inline_inode(src
);
2809 ret
= f2fs_convert_inline_inode(dst
);
2813 /* write out all dirty pages from offset */
2814 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2815 pos_in
, pos_in
+ len
);
2819 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2820 pos_out
, pos_out
+ len
);
2824 f2fs_balance_fs(sbi
, true);
2826 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2829 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]))
2834 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2835 pos_out
>> F2FS_BLKSIZE_BITS
,
2836 len
>> F2FS_BLKSIZE_BITS
, false);
2840 f2fs_i_size_write(dst
, dst_max_i_size
);
2841 else if (dst_osize
!= dst
->i_size
)
2842 f2fs_i_size_write(dst
, dst_osize
);
2844 f2fs_unlock_op(sbi
);
2847 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2849 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2858 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2860 struct f2fs_move_range range
;
2864 if (!(filp
->f_mode
& FMODE_READ
) ||
2865 !(filp
->f_mode
& FMODE_WRITE
))
2868 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2872 dst
= fdget(range
.dst_fd
);
2876 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2881 err
= mnt_want_write_file(filp
);
2885 err
= f2fs_move_file_range(filp
, range
.pos_in
, dst
.file
,
2886 range
.pos_out
, range
.len
);
2888 mnt_drop_write_file(filp
);
2892 if (copy_to_user((struct f2fs_move_range __user
*)arg
,
2893 &range
, sizeof(range
)))
2900 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2902 struct inode
*inode
= file_inode(filp
);
2903 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2904 struct sit_info
*sm
= SIT_I(sbi
);
2905 unsigned int start_segno
= 0, end_segno
= 0;
2906 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2907 struct f2fs_flush_device range
;
2910 if (!capable(CAP_SYS_ADMIN
))
2913 if (f2fs_readonly(sbi
->sb
))
2916 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2919 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2923 if (!f2fs_is_multi_device(sbi
) || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2924 __is_large_section(sbi
)) {
2925 f2fs_warn(sbi
, "Can't flush %u in %d for segs_per_sec %u != 1",
2926 range
.dev_num
, sbi
->s_ndevs
, sbi
->segs_per_sec
);
2930 ret
= mnt_want_write_file(filp
);
2934 if (range
.dev_num
!= 0)
2935 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2936 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2938 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2939 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2940 start_segno
= dev_start_segno
;
2941 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2943 while (start_segno
< end_segno
) {
2944 if (!down_write_trylock(&sbi
->gc_lock
)) {
2948 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2949 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2950 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2951 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2959 mnt_drop_write_file(filp
);
2963 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2965 struct inode
*inode
= file_inode(filp
);
2966 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2968 /* Must validate to set it with SQLite behavior in Android. */
2969 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2971 return put_user(sb_feature
, (u32 __user
*)arg
);
2975 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2977 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2978 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2979 struct super_block
*sb
= sbi
->sb
;
2982 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2983 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2984 err
= __dquot_transfer(inode
, transfer_to
);
2986 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2987 dqput(transfer_to
[PRJQUOTA
]);
2992 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
2994 struct inode
*inode
= file_inode(filp
);
2995 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2996 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3001 if (!f2fs_sb_has_project_quota(sbi
)) {
3002 if (projid
!= F2FS_DEF_PROJID
)
3008 if (!f2fs_has_extra_attr(inode
))
3011 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
3013 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
3017 /* Is it quota file? Do not allow user to mess with it */
3018 if (IS_NOQUOTA(inode
))
3021 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
3023 return PTR_ERR(ipage
);
3025 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
3028 f2fs_put_page(ipage
, 1);
3031 f2fs_put_page(ipage
, 1);
3033 err
= dquot_initialize(inode
);
3038 err
= f2fs_transfer_project_quota(inode
, kprojid
);
3042 F2FS_I(inode
)->i_projid
= kprojid
;
3043 inode
->i_ctime
= current_time(inode
);
3044 f2fs_mark_inode_dirty_sync(inode
, true);
3046 f2fs_unlock_op(sbi
);
3050 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
3055 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
3057 if (projid
!= F2FS_DEF_PROJID
)
3063 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3066 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3067 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3068 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3071 static const struct {
3074 } f2fs_xflags_map
[] = {
3075 { F2FS_SYNC_FL
, FS_XFLAG_SYNC
},
3076 { F2FS_IMMUTABLE_FL
, FS_XFLAG_IMMUTABLE
},
3077 { F2FS_APPEND_FL
, FS_XFLAG_APPEND
},
3078 { F2FS_NODUMP_FL
, FS_XFLAG_NODUMP
},
3079 { F2FS_NOATIME_FL
, FS_XFLAG_NOATIME
},
3080 { F2FS_PROJINHERIT_FL
, FS_XFLAG_PROJINHERIT
},
3083 #define F2FS_SUPPORTED_XFLAGS ( \
3085 FS_XFLAG_IMMUTABLE | \
3088 FS_XFLAG_NOATIME | \
3089 FS_XFLAG_PROJINHERIT)
3091 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3092 static inline u32
f2fs_iflags_to_xflags(u32 iflags
)
3097 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3098 if (iflags
& f2fs_xflags_map
[i
].iflag
)
3099 xflags
|= f2fs_xflags_map
[i
].xflag
;
3104 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3105 static inline u32
f2fs_xflags_to_iflags(u32 xflags
)
3110 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3111 if (xflags
& f2fs_xflags_map
[i
].xflag
)
3112 iflags
|= f2fs_xflags_map
[i
].iflag
;
3117 static void f2fs_fill_fsxattr(struct inode
*inode
, struct fsxattr
*fa
)
3119 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3121 simple_fill_fsxattr(fa
, f2fs_iflags_to_xflags(fi
->i_flags
));
3123 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)))
3124 fa
->fsx_projid
= from_kprojid(&init_user_ns
, fi
->i_projid
);
3127 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
3129 struct inode
*inode
= file_inode(filp
);
3132 f2fs_fill_fsxattr(inode
, &fa
);
3134 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
3139 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
3141 struct inode
*inode
= file_inode(filp
);
3142 struct fsxattr fa
, old_fa
;
3146 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
3149 /* Make sure caller has proper permission */
3150 if (!inode_owner_or_capable(inode
))
3153 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_XFLAGS
)
3156 iflags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
3157 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
3160 err
= mnt_want_write_file(filp
);
3166 f2fs_fill_fsxattr(inode
, &old_fa
);
3167 err
= vfs_ioc_fssetxattr_check(inode
, &old_fa
, &fa
);
3171 err
= f2fs_setflags_common(inode
, iflags
,
3172 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS
));
3176 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
3178 inode_unlock(inode
);
3179 mnt_drop_write_file(filp
);
3183 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
3185 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3186 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3188 /* Use i_gc_failures for normal file as a risk signal. */
3190 f2fs_i_gc_failures_write(inode
,
3191 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
3193 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
3194 f2fs_warn(sbi
, "%s: Enable GC = ino %lx after %x GC trials",
3195 __func__
, inode
->i_ino
,
3196 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
3197 clear_inode_flag(inode
, FI_PIN_FILE
);
3203 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
3205 struct inode
*inode
= file_inode(filp
);
3209 if (get_user(pin
, (__u32 __user
*)arg
))
3212 if (!S_ISREG(inode
->i_mode
))
3215 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
3218 ret
= mnt_want_write_file(filp
);
3224 if (f2fs_should_update_outplace(inode
, NULL
)) {
3230 clear_inode_flag(inode
, FI_PIN_FILE
);
3231 f2fs_i_gc_failures_write(inode
, 0);
3235 if (f2fs_pin_file_control(inode
, false)) {
3240 ret
= f2fs_convert_inline_inode(inode
);
3244 if (f2fs_disable_compressed_file(inode
)) {
3249 set_inode_flag(inode
, FI_PIN_FILE
);
3250 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3252 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3254 inode_unlock(inode
);
3255 mnt_drop_write_file(filp
);
3259 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
3261 struct inode
*inode
= file_inode(filp
);
3264 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
3265 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3266 return put_user(pin
, (u32 __user
*)arg
);
3269 int f2fs_precache_extents(struct inode
*inode
)
3271 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3272 struct f2fs_map_blocks map
;
3273 pgoff_t m_next_extent
;
3277 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
3281 map
.m_next_pgofs
= NULL
;
3282 map
.m_next_extent
= &m_next_extent
;
3283 map
.m_seg_type
= NO_CHECK_TYPE
;
3284 map
.m_may_create
= false;
3285 end
= F2FS_I_SB(inode
)->max_file_blocks
;
3287 while (map
.m_lblk
< end
) {
3288 map
.m_len
= end
- map
.m_lblk
;
3290 down_write(&fi
->i_gc_rwsem
[WRITE
]);
3291 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
3292 up_write(&fi
->i_gc_rwsem
[WRITE
]);
3296 map
.m_lblk
= m_next_extent
;
3302 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
3304 return f2fs_precache_extents(file_inode(filp
));
3307 static int f2fs_ioc_resize_fs(struct file
*filp
, unsigned long arg
)
3309 struct f2fs_sb_info
*sbi
= F2FS_I_SB(file_inode(filp
));
3313 if (!capable(CAP_SYS_ADMIN
))
3316 if (f2fs_readonly(sbi
->sb
))
3319 if (copy_from_user(&block_count
, (void __user
*)arg
,
3320 sizeof(block_count
)))
3323 ret
= f2fs_resize_fs(sbi
, block_count
);
3328 static int f2fs_ioc_enable_verity(struct file
*filp
, unsigned long arg
)
3330 struct inode
*inode
= file_inode(filp
);
3332 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3334 if (!f2fs_sb_has_verity(F2FS_I_SB(inode
))) {
3335 f2fs_warn(F2FS_I_SB(inode
),
3336 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3341 return fsverity_ioctl_enable(filp
, (const void __user
*)arg
);
3344 static int f2fs_ioc_measure_verity(struct file
*filp
, unsigned long arg
)
3346 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp
))))
3349 return fsverity_ioctl_measure(filp
, (void __user
*)arg
);
3352 static int f2fs_get_volume_name(struct file
*filp
, unsigned long arg
)
3354 struct inode
*inode
= file_inode(filp
);
3355 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3360 vbuf
= f2fs_kzalloc(sbi
, MAX_VOLUME_NAME
, GFP_KERNEL
);
3364 down_read(&sbi
->sb_lock
);
3365 count
= utf16s_to_utf8s(sbi
->raw_super
->volume_name
,
3366 ARRAY_SIZE(sbi
->raw_super
->volume_name
),
3367 UTF16_LITTLE_ENDIAN
, vbuf
, MAX_VOLUME_NAME
);
3368 up_read(&sbi
->sb_lock
);
3370 if (copy_to_user((char __user
*)arg
, vbuf
,
3371 min(FSLABEL_MAX
, count
)))
3378 static int f2fs_set_volume_name(struct file
*filp
, unsigned long arg
)
3380 struct inode
*inode
= file_inode(filp
);
3381 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3385 if (!capable(CAP_SYS_ADMIN
))
3388 vbuf
= strndup_user((const char __user
*)arg
, FSLABEL_MAX
);
3390 return PTR_ERR(vbuf
);
3392 err
= mnt_want_write_file(filp
);
3396 down_write(&sbi
->sb_lock
);
3398 memset(sbi
->raw_super
->volume_name
, 0,
3399 sizeof(sbi
->raw_super
->volume_name
));
3400 utf8s_to_utf16s(vbuf
, strlen(vbuf
), UTF16_LITTLE_ENDIAN
,
3401 sbi
->raw_super
->volume_name
,
3402 ARRAY_SIZE(sbi
->raw_super
->volume_name
));
3404 err
= f2fs_commit_super(sbi
, false);
3406 up_write(&sbi
->sb_lock
);
3408 mnt_drop_write_file(filp
);
3414 static int f2fs_get_compress_blocks(struct file
*filp
, unsigned long arg
)
3416 struct inode
*inode
= file_inode(filp
);
3419 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
3422 if (!f2fs_compressed_file(inode
))
3425 blocks
= F2FS_I(inode
)->i_compr_blocks
;
3426 return put_user(blocks
, (u64 __user
*)arg
);
3429 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3431 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
3433 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp
))))
3437 case F2FS_IOC_GETFLAGS
:
3438 return f2fs_ioc_getflags(filp
, arg
);
3439 case F2FS_IOC_SETFLAGS
:
3440 return f2fs_ioc_setflags(filp
, arg
);
3441 case F2FS_IOC_GETVERSION
:
3442 return f2fs_ioc_getversion(filp
, arg
);
3443 case F2FS_IOC_START_ATOMIC_WRITE
:
3444 return f2fs_ioc_start_atomic_write(filp
);
3445 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3446 return f2fs_ioc_commit_atomic_write(filp
);
3447 case F2FS_IOC_START_VOLATILE_WRITE
:
3448 return f2fs_ioc_start_volatile_write(filp
);
3449 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3450 return f2fs_ioc_release_volatile_write(filp
);
3451 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3452 return f2fs_ioc_abort_volatile_write(filp
);
3453 case F2FS_IOC_SHUTDOWN
:
3454 return f2fs_ioc_shutdown(filp
, arg
);
3456 return f2fs_ioc_fitrim(filp
, arg
);
3457 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3458 return f2fs_ioc_set_encryption_policy(filp
, arg
);
3459 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3460 return f2fs_ioc_get_encryption_policy(filp
, arg
);
3461 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3462 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
3463 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3464 return f2fs_ioc_get_encryption_policy_ex(filp
, arg
);
3465 case FS_IOC_ADD_ENCRYPTION_KEY
:
3466 return f2fs_ioc_add_encryption_key(filp
, arg
);
3467 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3468 return f2fs_ioc_remove_encryption_key(filp
, arg
);
3469 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3470 return f2fs_ioc_remove_encryption_key_all_users(filp
, arg
);
3471 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3472 return f2fs_ioc_get_encryption_key_status(filp
, arg
);
3473 case FS_IOC_GET_ENCRYPTION_NONCE
:
3474 return f2fs_ioc_get_encryption_nonce(filp
, arg
);
3475 case F2FS_IOC_GARBAGE_COLLECT
:
3476 return f2fs_ioc_gc(filp
, arg
);
3477 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3478 return f2fs_ioc_gc_range(filp
, arg
);
3479 case F2FS_IOC_WRITE_CHECKPOINT
:
3480 return f2fs_ioc_write_checkpoint(filp
, arg
);
3481 case F2FS_IOC_DEFRAGMENT
:
3482 return f2fs_ioc_defragment(filp
, arg
);
3483 case F2FS_IOC_MOVE_RANGE
:
3484 return f2fs_ioc_move_range(filp
, arg
);
3485 case F2FS_IOC_FLUSH_DEVICE
:
3486 return f2fs_ioc_flush_device(filp
, arg
);
3487 case F2FS_IOC_GET_FEATURES
:
3488 return f2fs_ioc_get_features(filp
, arg
);
3489 case F2FS_IOC_FSGETXATTR
:
3490 return f2fs_ioc_fsgetxattr(filp
, arg
);
3491 case F2FS_IOC_FSSETXATTR
:
3492 return f2fs_ioc_fssetxattr(filp
, arg
);
3493 case F2FS_IOC_GET_PIN_FILE
:
3494 return f2fs_ioc_get_pin_file(filp
, arg
);
3495 case F2FS_IOC_SET_PIN_FILE
:
3496 return f2fs_ioc_set_pin_file(filp
, arg
);
3497 case F2FS_IOC_PRECACHE_EXTENTS
:
3498 return f2fs_ioc_precache_extents(filp
, arg
);
3499 case F2FS_IOC_RESIZE_FS
:
3500 return f2fs_ioc_resize_fs(filp
, arg
);
3501 case FS_IOC_ENABLE_VERITY
:
3502 return f2fs_ioc_enable_verity(filp
, arg
);
3503 case FS_IOC_MEASURE_VERITY
:
3504 return f2fs_ioc_measure_verity(filp
, arg
);
3505 case F2FS_IOC_GET_VOLUME_NAME
:
3506 return f2fs_get_volume_name(filp
, arg
);
3507 case F2FS_IOC_SET_VOLUME_NAME
:
3508 return f2fs_set_volume_name(filp
, arg
);
3509 case F2FS_IOC_GET_COMPRESS_BLOCKS
:
3510 return f2fs_get_compress_blocks(filp
, arg
);
3516 static ssize_t
f2fs_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
3518 struct file
*file
= iocb
->ki_filp
;
3519 struct inode
*inode
= file_inode(file
);
3521 if (!f2fs_is_compress_backend_ready(inode
))
3524 return generic_file_read_iter(iocb
, iter
);
3527 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
3529 struct file
*file
= iocb
->ki_filp
;
3530 struct inode
*inode
= file_inode(file
);
3533 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
3538 if (!f2fs_is_compress_backend_ready(inode
)) {
3543 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
3544 if (!inode_trylock(inode
)) {
3552 ret
= generic_write_checks(iocb
, from
);
3554 bool preallocated
= false;
3555 size_t target_size
= 0;
3558 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
3559 set_inode_flag(inode
, FI_NO_PREALLOC
);
3561 if ((iocb
->ki_flags
& IOCB_NOWAIT
)) {
3562 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
3563 iov_iter_count(from
)) ||
3564 f2fs_has_inline_data(inode
) ||
3565 f2fs_force_buffered_io(inode
, iocb
, from
)) {
3566 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3567 inode_unlock(inode
);
3574 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
3577 if (iocb
->ki_flags
& IOCB_DIRECT
) {
3579 * Convert inline data for Direct I/O before entering
3582 err
= f2fs_convert_inline_inode(inode
);
3586 * If force_buffere_io() is true, we have to allocate
3587 * blocks all the time, since f2fs_direct_IO will fall
3588 * back to buffered IO.
3590 if (!f2fs_force_buffered_io(inode
, iocb
, from
) &&
3591 allow_outplace_dio(inode
, iocb
, from
))
3594 preallocated
= true;
3595 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
3597 err
= f2fs_preallocate_blocks(iocb
, from
);
3600 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3601 inode_unlock(inode
);
3606 ret
= __generic_file_write_iter(iocb
, from
);
3607 clear_inode_flag(inode
, FI_NO_PREALLOC
);
3609 /* if we couldn't write data, we should deallocate blocks. */
3610 if (preallocated
&& i_size_read(inode
) < target_size
)
3611 f2fs_truncate(inode
);
3614 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
3616 inode_unlock(inode
);
3618 trace_f2fs_file_write_iter(inode
, iocb
->ki_pos
,
3619 iov_iter_count(from
), ret
);
3621 ret
= generic_write_sync(iocb
, ret
);
3625 #ifdef CONFIG_COMPAT
3626 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
3629 case F2FS_IOC32_GETFLAGS
:
3630 cmd
= F2FS_IOC_GETFLAGS
;
3632 case F2FS_IOC32_SETFLAGS
:
3633 cmd
= F2FS_IOC_SETFLAGS
;
3635 case F2FS_IOC32_GETVERSION
:
3636 cmd
= F2FS_IOC_GETVERSION
;
3638 case F2FS_IOC_START_ATOMIC_WRITE
:
3639 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
3640 case F2FS_IOC_START_VOLATILE_WRITE
:
3641 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
3642 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
3643 case F2FS_IOC_SHUTDOWN
:
3645 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
3646 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
3647 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
3648 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
3649 case FS_IOC_ADD_ENCRYPTION_KEY
:
3650 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
3651 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
3652 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
3653 case FS_IOC_GET_ENCRYPTION_NONCE
:
3654 case F2FS_IOC_GARBAGE_COLLECT
:
3655 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
3656 case F2FS_IOC_WRITE_CHECKPOINT
:
3657 case F2FS_IOC_DEFRAGMENT
:
3658 case F2FS_IOC_MOVE_RANGE
:
3659 case F2FS_IOC_FLUSH_DEVICE
:
3660 case F2FS_IOC_GET_FEATURES
:
3661 case F2FS_IOC_FSGETXATTR
:
3662 case F2FS_IOC_FSSETXATTR
:
3663 case F2FS_IOC_GET_PIN_FILE
:
3664 case F2FS_IOC_SET_PIN_FILE
:
3665 case F2FS_IOC_PRECACHE_EXTENTS
:
3666 case F2FS_IOC_RESIZE_FS
:
3667 case FS_IOC_ENABLE_VERITY
:
3668 case FS_IOC_MEASURE_VERITY
:
3669 case F2FS_IOC_GET_VOLUME_NAME
:
3670 case F2FS_IOC_SET_VOLUME_NAME
:
3671 case F2FS_IOC_GET_COMPRESS_BLOCKS
:
3674 return -ENOIOCTLCMD
;
3676 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
3680 const struct file_operations f2fs_file_operations
= {
3681 .llseek
= f2fs_llseek
,
3682 .read_iter
= f2fs_file_read_iter
,
3683 .write_iter
= f2fs_file_write_iter
,
3684 .open
= f2fs_file_open
,
3685 .release
= f2fs_release_file
,
3686 .mmap
= f2fs_file_mmap
,
3687 .flush
= f2fs_file_flush
,
3688 .fsync
= f2fs_sync_file
,
3689 .fallocate
= f2fs_fallocate
,
3690 .unlocked_ioctl
= f2fs_ioctl
,
3691 #ifdef CONFIG_COMPAT
3692 .compat_ioctl
= f2fs_compat_ioctl
,
3694 .splice_read
= generic_file_splice_read
,
3695 .splice_write
= iter_file_splice_write
,