1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
36 static vm_fault_t
f2fs_filemap_fault(struct vm_fault
*vmf
)
38 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
41 down_read(&F2FS_I(inode
)->i_mmap_sem
);
42 ret
= filemap_fault(vmf
);
43 up_read(&F2FS_I(inode
)->i_mmap_sem
);
46 f2fs_update_iostat(F2FS_I_SB(inode
), APP_MAPPED_READ_IO
,
49 trace_f2fs_filemap_fault(inode
, vmf
->pgoff
, (unsigned long)ret
);
54 static vm_fault_t
f2fs_vm_page_mkwrite(struct vm_fault
*vmf
)
56 struct page
*page
= vmf
->page
;
57 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
58 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
59 struct dnode_of_data dn
;
60 bool need_alloc
= true;
63 if (unlikely(f2fs_cp_error(sbi
))) {
68 if (!f2fs_is_checkpoint_ready(sbi
)) {
73 #ifdef CONFIG_F2FS_FS_COMPRESSION
74 if (f2fs_compressed_file(inode
)) {
75 int ret
= f2fs_is_compressed_cluster(inode
, page
->index
);
81 if (ret
< F2FS_I(inode
)->i_cluster_size
) {
89 /* should do out of any locked page */
91 f2fs_balance_fs(sbi
, true);
93 sb_start_pagefault(inode
->i_sb
);
95 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
97 file_update_time(vmf
->vma
->vm_file
);
98 down_read(&F2FS_I(inode
)->i_mmap_sem
);
100 if (unlikely(page
->mapping
!= inode
->i_mapping
||
101 page_offset(page
) > i_size_read(inode
) ||
102 !PageUptodate(page
))) {
109 /* block allocation */
110 f2fs_do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, true);
111 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
112 err
= f2fs_get_block(&dn
, page
->index
);
114 f2fs_do_map_lock(sbi
, F2FS_GET_BLOCK_PRE_AIO
, false);
117 #ifdef CONFIG_F2FS_FS_COMPRESSION
119 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
120 err
= f2fs_get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
129 f2fs_wait_on_page_writeback(page
, DATA
, false, true);
131 /* wait for GCed page writeback via META_MAPPING */
132 f2fs_wait_on_block_writeback(inode
, dn
.data_blkaddr
);
135 * check to see if the page is mapped already (no holes)
137 if (PageMappedToDisk(page
))
140 /* page is wholly or partially inside EOF */
141 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
142 i_size_read(inode
)) {
145 offset
= i_size_read(inode
) & ~PAGE_MASK
;
146 zero_user_segment(page
, offset
, PAGE_SIZE
);
148 set_page_dirty(page
);
149 if (!PageUptodate(page
))
150 SetPageUptodate(page
);
152 f2fs_update_iostat(sbi
, APP_MAPPED_IO
, F2FS_BLKSIZE
);
153 f2fs_update_time(sbi
, REQ_TIME
);
155 trace_f2fs_vm_page_mkwrite(page
, DATA
);
157 up_read(&F2FS_I(inode
)->i_mmap_sem
);
159 sb_end_pagefault(inode
->i_sb
);
161 return block_page_mkwrite_return(err
);
164 static const struct vm_operations_struct f2fs_file_vm_ops
= {
165 .fault
= f2fs_filemap_fault
,
166 .map_pages
= filemap_map_pages
,
167 .page_mkwrite
= f2fs_vm_page_mkwrite
,
170 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
172 struct dentry
*dentry
;
175 * Make sure to get the non-deleted alias. The alias associated with
176 * the open file descriptor being fsync()'ed may be deleted already.
178 dentry
= d_find_alias(inode
);
182 *pino
= parent_ino(dentry
);
187 static inline enum cp_reason_type
need_do_checkpoint(struct inode
*inode
)
189 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
190 enum cp_reason_type cp_reason
= CP_NO_NEEDED
;
192 if (!S_ISREG(inode
->i_mode
))
193 cp_reason
= CP_NON_REGULAR
;
194 else if (f2fs_compressed_file(inode
))
195 cp_reason
= CP_COMPRESSED
;
196 else if (inode
->i_nlink
!= 1)
197 cp_reason
= CP_HARDLINK
;
198 else if (is_sbi_flag_set(sbi
, SBI_NEED_CP
))
199 cp_reason
= CP_SB_NEED_CP
;
200 else if (file_wrong_pino(inode
))
201 cp_reason
= CP_WRONG_PINO
;
202 else if (!f2fs_space_for_roll_forward(sbi
))
203 cp_reason
= CP_NO_SPC_ROLL
;
204 else if (!f2fs_is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
205 cp_reason
= CP_NODE_NEED_CP
;
206 else if (test_opt(sbi
, FASTBOOT
))
207 cp_reason
= CP_FASTBOOT_MODE
;
208 else if (F2FS_OPTION(sbi
).active_logs
== 2)
209 cp_reason
= CP_SPEC_LOG_NUM
;
210 else if (F2FS_OPTION(sbi
).fsync_mode
== FSYNC_MODE_STRICT
&&
211 f2fs_need_dentry_mark(sbi
, inode
->i_ino
) &&
212 f2fs_exist_written_data(sbi
, F2FS_I(inode
)->i_pino
,
214 cp_reason
= CP_RECOVER_DIR
;
219 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
221 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
223 /* But we need to avoid that there are some inode updates */
224 if ((i
&& PageDirty(i
)) || f2fs_need_inode_block_update(sbi
, ino
))
230 static void try_to_fix_pino(struct inode
*inode
)
232 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
235 down_write(&fi
->i_sem
);
236 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
237 get_parent_ino(inode
, &pino
)) {
238 f2fs_i_pino_write(inode
, pino
);
239 file_got_pino(inode
);
241 up_write(&fi
->i_sem
);
244 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
245 int datasync
, bool atomic
)
247 struct inode
*inode
= file
->f_mapping
->host
;
248 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
249 nid_t ino
= inode
->i_ino
;
251 enum cp_reason_type cp_reason
= 0;
252 struct writeback_control wbc
= {
253 .sync_mode
= WB_SYNC_ALL
,
254 .nr_to_write
= LONG_MAX
,
257 unsigned int seq_id
= 0;
259 if (unlikely(f2fs_readonly(inode
->i_sb
) ||
260 is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
263 trace_f2fs_sync_file_enter(inode
);
265 if (S_ISDIR(inode
->i_mode
))
268 /* if fdatasync is triggered, let's do in-place-update */
269 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
270 set_inode_flag(inode
, FI_NEED_IPU
);
271 ret
= file_write_and_wait_range(file
, start
, end
);
272 clear_inode_flag(inode
, FI_NEED_IPU
);
275 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
279 /* if the inode is dirty, let's recover all the time */
280 if (!f2fs_skip_inode_update(inode
, datasync
)) {
281 f2fs_write_inode(inode
, NULL
);
286 * if there is no written data, don't waste time to write recovery info.
288 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
289 !f2fs_exist_written_data(sbi
, ino
, APPEND_INO
)) {
291 /* it may call write_inode just prior to fsync */
292 if (need_inode_page_update(sbi
, ino
))
295 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
296 f2fs_exist_written_data(sbi
, ino
, UPDATE_INO
))
302 * Both of fdatasync() and fsync() are able to be recovered from
305 down_read(&F2FS_I(inode
)->i_sem
);
306 cp_reason
= need_do_checkpoint(inode
);
307 up_read(&F2FS_I(inode
)->i_sem
);
310 /* all the dirty node pages should be flushed for POR */
311 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
314 * We've secured consistency through sync_fs. Following pino
315 * will be used only for fsynced inodes after checkpoint.
317 try_to_fix_pino(inode
);
318 clear_inode_flag(inode
, FI_APPEND_WRITE
);
319 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
323 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
324 ret
= f2fs_fsync_node_pages(sbi
, inode
, &wbc
, atomic
, &seq_id
);
325 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
329 /* if cp_error was enabled, we should avoid infinite loop */
330 if (unlikely(f2fs_cp_error(sbi
))) {
335 if (f2fs_need_inode_block_update(sbi
, ino
)) {
336 f2fs_mark_inode_dirty_sync(inode
, true);
337 f2fs_write_inode(inode
, NULL
);
342 * If it's atomic_write, it's just fine to keep write ordering. So
343 * here we don't need to wait for node write completion, since we use
344 * node chain which serializes node blocks. If one of node writes are
345 * reordered, we can see simply broken chain, resulting in stopping
346 * roll-forward recovery. It means we'll recover all or none node blocks
350 ret
= f2fs_wait_on_node_pages_writeback(sbi
, seq_id
);
355 /* once recovery info is written, don't need to tack this */
356 f2fs_remove_ino_entry(sbi
, ino
, APPEND_INO
);
357 clear_inode_flag(inode
, FI_APPEND_WRITE
);
359 if (!atomic
&& F2FS_OPTION(sbi
).fsync_mode
!= FSYNC_MODE_NOBARRIER
)
360 ret
= f2fs_issue_flush(sbi
, inode
->i_ino
);
362 f2fs_remove_ino_entry(sbi
, ino
, UPDATE_INO
);
363 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
364 f2fs_remove_ino_entry(sbi
, ino
, FLUSH_INO
);
366 f2fs_update_time(sbi
, REQ_TIME
);
368 trace_f2fs_sync_file_exit(inode
, cp_reason
, datasync
, ret
);
369 f2fs_trace_ios(NULL
, 1);
373 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
375 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
377 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
380 static bool __found_offset(struct address_space
*mapping
, block_t blkaddr
,
381 pgoff_t index
, int whence
)
385 if (__is_valid_data_blkaddr(blkaddr
))
387 if (blkaddr
== NEW_ADDR
&&
388 xa_get_mark(&mapping
->i_pages
, index
, PAGECACHE_TAG_DIRTY
))
392 if (blkaddr
== NULL_ADDR
)
399 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
401 struct inode
*inode
= file
->f_mapping
->host
;
402 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
403 struct dnode_of_data dn
;
404 pgoff_t pgofs
, end_offset
;
405 loff_t data_ofs
= offset
;
411 isize
= i_size_read(inode
);
415 /* handle inline data case */
416 if (f2fs_has_inline_data(inode
)) {
417 if (whence
== SEEK_HOLE
) {
420 } else if (whence
== SEEK_DATA
) {
426 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
428 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
429 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
430 err
= f2fs_get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
431 if (err
&& err
!= -ENOENT
) {
433 } else if (err
== -ENOENT
) {
434 /* direct node does not exists */
435 if (whence
== SEEK_DATA
) {
436 pgofs
= f2fs_get_next_page_offset(&dn
, pgofs
);
443 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
445 /* find data/hole in dnode block */
446 for (; dn
.ofs_in_node
< end_offset
;
447 dn
.ofs_in_node
++, pgofs
++,
448 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
451 blkaddr
= f2fs_data_blkaddr(&dn
);
453 if (__is_valid_data_blkaddr(blkaddr
) &&
454 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode
),
455 blkaddr
, DATA_GENERIC_ENHANCE
)) {
460 if (__found_offset(file
->f_mapping
, blkaddr
,
469 if (whence
== SEEK_DATA
)
472 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
475 return vfs_setpos(file
, data_ofs
, maxbytes
);
481 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
483 struct inode
*inode
= file
->f_mapping
->host
;
484 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
490 return generic_file_llseek_size(file
, offset
, whence
,
491 maxbytes
, i_size_read(inode
));
496 return f2fs_seek_block(file
, offset
, whence
);
502 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
504 struct inode
*inode
= file_inode(file
);
507 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
510 if (!f2fs_is_compress_backend_ready(inode
))
513 /* we don't need to use inline_data strictly */
514 err
= f2fs_convert_inline_inode(inode
);
519 vma
->vm_ops
= &f2fs_file_vm_ops
;
520 set_inode_flag(inode
, FI_MMAP_FILE
);
524 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
526 int err
= fscrypt_file_open(inode
, filp
);
531 if (!f2fs_is_compress_backend_ready(inode
))
534 err
= fsverity_file_open(inode
, filp
);
538 filp
->f_mode
|= FMODE_NOWAIT
;
540 return dquot_file_open(inode
, filp
);
543 void f2fs_truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
545 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
546 struct f2fs_node
*raw_node
;
547 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
550 bool compressed_cluster
= false;
551 int cluster_index
= 0, valid_blocks
= 0;
552 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
553 bool released
= !atomic_read(&F2FS_I(dn
->inode
)->i_compr_blocks
);
555 if (IS_INODE(dn
->node_page
) && f2fs_has_extra_attr(dn
->inode
))
556 base
= get_extra_isize(dn
->inode
);
558 raw_node
= F2FS_NODE(dn
->node_page
);
559 addr
= blkaddr_in_node(raw_node
) + base
+ ofs
;
561 /* Assumption: truncateion starts with cluster */
562 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++, cluster_index
++) {
563 block_t blkaddr
= le32_to_cpu(*addr
);
565 if (f2fs_compressed_file(dn
->inode
) &&
566 !(cluster_index
& (cluster_size
- 1))) {
567 if (compressed_cluster
)
568 f2fs_i_compr_blocks_update(dn
->inode
,
569 valid_blocks
, false);
570 compressed_cluster
= (blkaddr
== COMPRESS_ADDR
);
574 if (blkaddr
== NULL_ADDR
)
577 dn
->data_blkaddr
= NULL_ADDR
;
578 f2fs_set_data_blkaddr(dn
);
580 if (__is_valid_data_blkaddr(blkaddr
)) {
581 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
582 DATA_GENERIC_ENHANCE
))
584 if (compressed_cluster
)
588 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
589 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
591 f2fs_invalidate_blocks(sbi
, blkaddr
);
593 if (!released
|| blkaddr
!= COMPRESS_ADDR
)
597 if (compressed_cluster
)
598 f2fs_i_compr_blocks_update(dn
->inode
, valid_blocks
, false);
603 * once we invalidate valid blkaddr in range [ofs, ofs + count],
604 * we will invalidate all blkaddr in the whole range.
606 fofs
= f2fs_start_bidx_of_node(ofs_of_node(dn
->node_page
),
608 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
609 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
611 dn
->ofs_in_node
= ofs
;
613 f2fs_update_time(sbi
, REQ_TIME
);
614 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
615 dn
->ofs_in_node
, nr_free
);
618 void f2fs_truncate_data_blocks(struct dnode_of_data
*dn
)
620 f2fs_truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK(dn
->inode
));
623 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
626 loff_t offset
= from
& (PAGE_SIZE
- 1);
627 pgoff_t index
= from
>> PAGE_SHIFT
;
628 struct address_space
*mapping
= inode
->i_mapping
;
631 if (!offset
&& !cache_only
)
635 page
= find_lock_page(mapping
, index
);
636 if (page
&& PageUptodate(page
))
638 f2fs_put_page(page
, 1);
642 page
= f2fs_get_lock_data_page(inode
, index
, true);
644 return PTR_ERR(page
) == -ENOENT
? 0 : PTR_ERR(page
);
646 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
647 zero_user(page
, offset
, PAGE_SIZE
- offset
);
649 /* An encrypted inode should have a key and truncate the last page. */
650 f2fs_bug_on(F2FS_I_SB(inode
), cache_only
&& IS_ENCRYPTED(inode
));
652 set_page_dirty(page
);
653 f2fs_put_page(page
, 1);
657 int f2fs_do_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
659 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
660 struct dnode_of_data dn
;
662 int count
= 0, err
= 0;
664 bool truncate_page
= false;
666 trace_f2fs_truncate_blocks_enter(inode
, from
);
668 free_from
= (pgoff_t
)F2FS_BLK_ALIGN(from
);
670 if (free_from
>= sbi
->max_file_blocks
)
676 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
678 err
= PTR_ERR(ipage
);
682 if (f2fs_has_inline_data(inode
)) {
683 f2fs_truncate_inline_inode(inode
, ipage
, from
);
684 f2fs_put_page(ipage
, 1);
685 truncate_page
= true;
689 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
690 err
= f2fs_get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
697 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
699 count
-= dn
.ofs_in_node
;
700 f2fs_bug_on(sbi
, count
< 0);
702 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
703 f2fs_truncate_data_blocks_range(&dn
, count
);
709 err
= f2fs_truncate_inode_blocks(inode
, free_from
);
714 /* lastly zero out the first data page */
716 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
718 trace_f2fs_truncate_blocks_exit(inode
, err
);
722 int f2fs_truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
724 u64 free_from
= from
;
727 #ifdef CONFIG_F2FS_FS_COMPRESSION
729 * for compressed file, only support cluster size
730 * aligned truncation.
732 if (f2fs_compressed_file(inode
))
733 free_from
= round_up(from
,
734 F2FS_I(inode
)->i_cluster_size
<< PAGE_SHIFT
);
737 err
= f2fs_do_truncate_blocks(inode
, free_from
, lock
);
741 #ifdef CONFIG_F2FS_FS_COMPRESSION
742 if (from
!= free_from
) {
743 err
= f2fs_truncate_partial_cluster(inode
, from
, lock
);
752 int f2fs_truncate(struct inode
*inode
)
756 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
759 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
760 S_ISLNK(inode
->i_mode
)))
763 trace_f2fs_truncate(inode
);
765 if (time_to_inject(F2FS_I_SB(inode
), FAULT_TRUNCATE
)) {
766 f2fs_show_injection_info(F2FS_I_SB(inode
), FAULT_TRUNCATE
);
770 /* we should check inline_data size */
771 if (!f2fs_may_inline_data(inode
)) {
772 err
= f2fs_convert_inline_inode(inode
);
777 err
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
781 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
782 f2fs_mark_inode_dirty_sync(inode
, false);
786 int f2fs_getattr(const struct path
*path
, struct kstat
*stat
,
787 u32 request_mask
, unsigned int query_flags
)
789 struct inode
*inode
= d_inode(path
->dentry
);
790 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
791 struct f2fs_inode
*ri
;
794 if (f2fs_has_extra_attr(inode
) &&
795 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode
)) &&
796 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_crtime
)) {
797 stat
->result_mask
|= STATX_BTIME
;
798 stat
->btime
.tv_sec
= fi
->i_crtime
.tv_sec
;
799 stat
->btime
.tv_nsec
= fi
->i_crtime
.tv_nsec
;
803 if (flags
& F2FS_COMPR_FL
)
804 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
805 if (flags
& F2FS_APPEND_FL
)
806 stat
->attributes
|= STATX_ATTR_APPEND
;
807 if (IS_ENCRYPTED(inode
))
808 stat
->attributes
|= STATX_ATTR_ENCRYPTED
;
809 if (flags
& F2FS_IMMUTABLE_FL
)
810 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
811 if (flags
& F2FS_NODUMP_FL
)
812 stat
->attributes
|= STATX_ATTR_NODUMP
;
813 if (IS_VERITY(inode
))
814 stat
->attributes
|= STATX_ATTR_VERITY
;
816 stat
->attributes_mask
|= (STATX_ATTR_COMPRESSED
|
818 STATX_ATTR_ENCRYPTED
|
819 STATX_ATTR_IMMUTABLE
|
823 generic_fillattr(inode
, stat
);
825 /* we need to show initial sectors used for inline_data/dentries */
826 if ((S_ISREG(inode
->i_mode
) && f2fs_has_inline_data(inode
)) ||
827 f2fs_has_inline_dentry(inode
))
828 stat
->blocks
+= (stat
->size
+ 511) >> 9;
833 #ifdef CONFIG_F2FS_FS_POSIX_ACL
834 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
836 unsigned int ia_valid
= attr
->ia_valid
;
838 if (ia_valid
& ATTR_UID
)
839 inode
->i_uid
= attr
->ia_uid
;
840 if (ia_valid
& ATTR_GID
)
841 inode
->i_gid
= attr
->ia_gid
;
842 if (ia_valid
& ATTR_ATIME
)
843 inode
->i_atime
= attr
->ia_atime
;
844 if (ia_valid
& ATTR_MTIME
)
845 inode
->i_mtime
= attr
->ia_mtime
;
846 if (ia_valid
& ATTR_CTIME
)
847 inode
->i_ctime
= attr
->ia_ctime
;
848 if (ia_valid
& ATTR_MODE
) {
849 umode_t mode
= attr
->ia_mode
;
851 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
853 set_acl_inode(inode
, mode
);
857 #define __setattr_copy setattr_copy
860 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
862 struct inode
*inode
= d_inode(dentry
);
865 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
868 if ((attr
->ia_valid
& ATTR_SIZE
) &&
869 !f2fs_is_compress_backend_ready(inode
))
872 err
= setattr_prepare(dentry
, attr
);
876 err
= fscrypt_prepare_setattr(dentry
, attr
);
880 err
= fsverity_prepare_setattr(dentry
, attr
);
884 if (is_quota_modification(inode
, attr
)) {
885 err
= dquot_initialize(inode
);
889 if ((attr
->ia_valid
& ATTR_UID
&&
890 !uid_eq(attr
->ia_uid
, inode
->i_uid
)) ||
891 (attr
->ia_valid
& ATTR_GID
&&
892 !gid_eq(attr
->ia_gid
, inode
->i_gid
))) {
893 f2fs_lock_op(F2FS_I_SB(inode
));
894 err
= dquot_transfer(inode
, attr
);
896 set_sbi_flag(F2FS_I_SB(inode
),
897 SBI_QUOTA_NEED_REPAIR
);
898 f2fs_unlock_op(F2FS_I_SB(inode
));
902 * update uid/gid under lock_op(), so that dquot and inode can
903 * be updated atomically.
905 if (attr
->ia_valid
& ATTR_UID
)
906 inode
->i_uid
= attr
->ia_uid
;
907 if (attr
->ia_valid
& ATTR_GID
)
908 inode
->i_gid
= attr
->ia_gid
;
909 f2fs_mark_inode_dirty_sync(inode
, true);
910 f2fs_unlock_op(F2FS_I_SB(inode
));
913 if (attr
->ia_valid
& ATTR_SIZE
) {
914 loff_t old_size
= i_size_read(inode
);
916 if (attr
->ia_size
> MAX_INLINE_DATA(inode
)) {
918 * should convert inline inode before i_size_write to
919 * keep smaller than inline_data size with inline flag.
921 err
= f2fs_convert_inline_inode(inode
);
926 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
927 down_write(&F2FS_I(inode
)->i_mmap_sem
);
929 truncate_setsize(inode
, attr
->ia_size
);
931 if (attr
->ia_size
<= old_size
)
932 err
= f2fs_truncate(inode
);
934 * do not trim all blocks after i_size if target size is
935 * larger than i_size.
937 up_write(&F2FS_I(inode
)->i_mmap_sem
);
938 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
942 spin_lock(&F2FS_I(inode
)->i_size_lock
);
943 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
944 F2FS_I(inode
)->last_disk_size
= i_size_read(inode
);
945 spin_unlock(&F2FS_I(inode
)->i_size_lock
);
948 __setattr_copy(inode
, attr
);
950 if (attr
->ia_valid
& ATTR_MODE
) {
951 err
= posix_acl_chmod(inode
, f2fs_get_inode_mode(inode
));
952 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
953 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
954 clear_inode_flag(inode
, FI_ACL_MODE
);
958 /* file size may changed here */
959 f2fs_mark_inode_dirty_sync(inode
, true);
961 /* inode change will produce dirty node pages flushed by checkpoint */
962 f2fs_balance_fs(F2FS_I_SB(inode
), true);
967 const struct inode_operations f2fs_file_inode_operations
= {
968 .getattr
= f2fs_getattr
,
969 .setattr
= f2fs_setattr
,
970 .get_acl
= f2fs_get_acl
,
971 .set_acl
= f2fs_set_acl
,
972 .listxattr
= f2fs_listxattr
,
973 .fiemap
= f2fs_fiemap
,
976 static int fill_zero(struct inode
*inode
, pgoff_t index
,
977 loff_t start
, loff_t len
)
979 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
985 f2fs_balance_fs(sbi
, true);
988 page
= f2fs_get_new_data_page(inode
, NULL
, index
, false);
992 return PTR_ERR(page
);
994 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
995 zero_user(page
, start
, len
);
996 set_page_dirty(page
);
997 f2fs_put_page(page
, 1);
1001 int f2fs_truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
1005 while (pg_start
< pg_end
) {
1006 struct dnode_of_data dn
;
1007 pgoff_t end_offset
, count
;
1009 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1010 err
= f2fs_get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
1012 if (err
== -ENOENT
) {
1013 pg_start
= f2fs_get_next_page_offset(&dn
,
1020 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1021 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
1023 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
1025 f2fs_truncate_data_blocks_range(&dn
, count
);
1026 f2fs_put_dnode(&dn
);
1033 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1035 pgoff_t pg_start
, pg_end
;
1036 loff_t off_start
, off_end
;
1039 ret
= f2fs_convert_inline_inode(inode
);
1043 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1044 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1046 off_start
= offset
& (PAGE_SIZE
- 1);
1047 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1049 if (pg_start
== pg_end
) {
1050 ret
= fill_zero(inode
, pg_start
, off_start
,
1051 off_end
- off_start
);
1056 ret
= fill_zero(inode
, pg_start
++, off_start
,
1057 PAGE_SIZE
- off_start
);
1062 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1067 if (pg_start
< pg_end
) {
1068 struct address_space
*mapping
= inode
->i_mapping
;
1069 loff_t blk_start
, blk_end
;
1070 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1072 f2fs_balance_fs(sbi
, true);
1074 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
1075 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
1077 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1078 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1080 truncate_inode_pages_range(mapping
, blk_start
,
1084 ret
= f2fs_truncate_hole(inode
, pg_start
, pg_end
);
1085 f2fs_unlock_op(sbi
);
1087 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1088 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1095 static int __read_out_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1096 int *do_replace
, pgoff_t off
, pgoff_t len
)
1098 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1099 struct dnode_of_data dn
;
1103 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1104 ret
= f2fs_get_dnode_of_data(&dn
, off
, LOOKUP_NODE_RA
);
1105 if (ret
&& ret
!= -ENOENT
) {
1107 } else if (ret
== -ENOENT
) {
1108 if (dn
.max_level
== 0)
1110 done
= min((pgoff_t
)ADDRS_PER_BLOCK(inode
) -
1111 dn
.ofs_in_node
, len
);
1117 done
= min((pgoff_t
)ADDRS_PER_PAGE(dn
.node_page
, inode
) -
1118 dn
.ofs_in_node
, len
);
1119 for (i
= 0; i
< done
; i
++, blkaddr
++, do_replace
++, dn
.ofs_in_node
++) {
1120 *blkaddr
= f2fs_data_blkaddr(&dn
);
1122 if (__is_valid_data_blkaddr(*blkaddr
) &&
1123 !f2fs_is_valid_blkaddr(sbi
, *blkaddr
,
1124 DATA_GENERIC_ENHANCE
)) {
1125 f2fs_put_dnode(&dn
);
1126 return -EFSCORRUPTED
;
1129 if (!f2fs_is_checkpointed_data(sbi
, *blkaddr
)) {
1131 if (f2fs_lfs_mode(sbi
)) {
1132 f2fs_put_dnode(&dn
);
1136 /* do not invalidate this block address */
1137 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
1141 f2fs_put_dnode(&dn
);
1150 static int __roll_back_blkaddrs(struct inode
*inode
, block_t
*blkaddr
,
1151 int *do_replace
, pgoff_t off
, int len
)
1153 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1154 struct dnode_of_data dn
;
1157 for (i
= 0; i
< len
; i
++, do_replace
++, blkaddr
++) {
1158 if (*do_replace
== 0)
1161 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1162 ret
= f2fs_get_dnode_of_data(&dn
, off
+ i
, LOOKUP_NODE_RA
);
1164 dec_valid_block_count(sbi
, inode
, 1);
1165 f2fs_invalidate_blocks(sbi
, *blkaddr
);
1167 f2fs_update_data_blkaddr(&dn
, *blkaddr
);
1169 f2fs_put_dnode(&dn
);
1174 static int __clone_blkaddrs(struct inode
*src_inode
, struct inode
*dst_inode
,
1175 block_t
*blkaddr
, int *do_replace
,
1176 pgoff_t src
, pgoff_t dst
, pgoff_t len
, bool full
)
1178 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src_inode
);
1183 if (blkaddr
[i
] == NULL_ADDR
&& !full
) {
1188 if (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
) {
1189 struct dnode_of_data dn
;
1190 struct node_info ni
;
1194 set_new_dnode(&dn
, dst_inode
, NULL
, NULL
, 0);
1195 ret
= f2fs_get_dnode_of_data(&dn
, dst
+ i
, ALLOC_NODE
);
1199 ret
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
1201 f2fs_put_dnode(&dn
);
1205 ilen
= min((pgoff_t
)
1206 ADDRS_PER_PAGE(dn
.node_page
, dst_inode
) -
1207 dn
.ofs_in_node
, len
- i
);
1209 dn
.data_blkaddr
= f2fs_data_blkaddr(&dn
);
1210 f2fs_truncate_data_blocks_range(&dn
, 1);
1212 if (do_replace
[i
]) {
1213 f2fs_i_blocks_write(src_inode
,
1215 f2fs_i_blocks_write(dst_inode
,
1217 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
1218 blkaddr
[i
], ni
.version
, true, false);
1224 new_size
= (loff_t
)(dst
+ i
) << PAGE_SHIFT
;
1225 if (dst_inode
->i_size
< new_size
)
1226 f2fs_i_size_write(dst_inode
, new_size
);
1227 } while (--ilen
&& (do_replace
[i
] || blkaddr
[i
] == NULL_ADDR
));
1229 f2fs_put_dnode(&dn
);
1231 struct page
*psrc
, *pdst
;
1233 psrc
= f2fs_get_lock_data_page(src_inode
,
1236 return PTR_ERR(psrc
);
1237 pdst
= f2fs_get_new_data_page(dst_inode
, NULL
, dst
+ i
,
1240 f2fs_put_page(psrc
, 1);
1241 return PTR_ERR(pdst
);
1243 f2fs_copy_page(psrc
, pdst
);
1244 set_page_dirty(pdst
);
1245 f2fs_put_page(pdst
, 1);
1246 f2fs_put_page(psrc
, 1);
1248 ret
= f2fs_truncate_hole(src_inode
,
1249 src
+ i
, src
+ i
+ 1);
1258 static int __exchange_data_block(struct inode
*src_inode
,
1259 struct inode
*dst_inode
, pgoff_t src
, pgoff_t dst
,
1260 pgoff_t len
, bool full
)
1262 block_t
*src_blkaddr
;
1268 olen
= min((pgoff_t
)4 * ADDRS_PER_BLOCK(src_inode
), len
);
1270 src_blkaddr
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1271 array_size(olen
, sizeof(block_t
)),
1276 do_replace
= f2fs_kvzalloc(F2FS_I_SB(src_inode
),
1277 array_size(olen
, sizeof(int)),
1280 kvfree(src_blkaddr
);
1284 ret
= __read_out_blkaddrs(src_inode
, src_blkaddr
,
1285 do_replace
, src
, olen
);
1289 ret
= __clone_blkaddrs(src_inode
, dst_inode
, src_blkaddr
,
1290 do_replace
, src
, dst
, olen
, full
);
1298 kvfree(src_blkaddr
);
1304 __roll_back_blkaddrs(src_inode
, src_blkaddr
, do_replace
, src
, olen
);
1305 kvfree(src_blkaddr
);
1310 static int f2fs_do_collapse(struct inode
*inode
, loff_t offset
, loff_t len
)
1312 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1313 pgoff_t nrpages
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1314 pgoff_t start
= offset
>> PAGE_SHIFT
;
1315 pgoff_t end
= (offset
+ len
) >> PAGE_SHIFT
;
1318 f2fs_balance_fs(sbi
, true);
1320 /* avoid gc operation during block exchange */
1321 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1322 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1325 f2fs_drop_extent_tree(inode
);
1326 truncate_pagecache(inode
, offset
);
1327 ret
= __exchange_data_block(inode
, inode
, end
, start
, nrpages
- end
, true);
1328 f2fs_unlock_op(sbi
);
1330 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1331 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1335 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1340 if (offset
+ len
>= i_size_read(inode
))
1343 /* collapse range should be aligned to block size of f2fs. */
1344 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1347 ret
= f2fs_convert_inline_inode(inode
);
1351 /* write out all dirty pages from offset */
1352 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1356 ret
= f2fs_do_collapse(inode
, offset
, len
);
1360 /* write out all moved pages, if possible */
1361 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1362 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1363 truncate_pagecache(inode
, offset
);
1365 new_size
= i_size_read(inode
) - len
;
1366 ret
= f2fs_truncate_blocks(inode
, new_size
, true);
1367 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1369 f2fs_i_size_write(inode
, new_size
);
1373 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1376 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1377 pgoff_t index
= start
;
1378 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1382 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1383 if (f2fs_data_blkaddr(dn
) == NULL_ADDR
)
1387 dn
->ofs_in_node
= ofs_in_node
;
1388 ret
= f2fs_reserve_new_blocks(dn
, count
);
1392 dn
->ofs_in_node
= ofs_in_node
;
1393 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1394 dn
->data_blkaddr
= f2fs_data_blkaddr(dn
);
1396 * f2fs_reserve_new_blocks will not guarantee entire block
1399 if (dn
->data_blkaddr
== NULL_ADDR
) {
1403 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1404 f2fs_invalidate_blocks(sbi
, dn
->data_blkaddr
);
1405 dn
->data_blkaddr
= NEW_ADDR
;
1406 f2fs_set_data_blkaddr(dn
);
1410 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1415 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1418 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1419 struct address_space
*mapping
= inode
->i_mapping
;
1420 pgoff_t index
, pg_start
, pg_end
;
1421 loff_t new_size
= i_size_read(inode
);
1422 loff_t off_start
, off_end
;
1425 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1429 ret
= f2fs_convert_inline_inode(inode
);
1433 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1437 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1438 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1440 off_start
= offset
& (PAGE_SIZE
- 1);
1441 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1443 if (pg_start
== pg_end
) {
1444 ret
= fill_zero(inode
, pg_start
, off_start
,
1445 off_end
- off_start
);
1449 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1452 ret
= fill_zero(inode
, pg_start
++, off_start
,
1453 PAGE_SIZE
- off_start
);
1457 new_size
= max_t(loff_t
, new_size
,
1458 (loff_t
)pg_start
<< PAGE_SHIFT
);
1461 for (index
= pg_start
; index
< pg_end
;) {
1462 struct dnode_of_data dn
;
1463 unsigned int end_offset
;
1466 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1467 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1469 truncate_pagecache_range(inode
,
1470 (loff_t
)index
<< PAGE_SHIFT
,
1471 ((loff_t
)pg_end
<< PAGE_SHIFT
) - 1);
1475 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1476 ret
= f2fs_get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1478 f2fs_unlock_op(sbi
);
1479 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1480 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1484 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1485 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1487 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1488 f2fs_put_dnode(&dn
);
1490 f2fs_unlock_op(sbi
);
1491 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1492 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1494 f2fs_balance_fs(sbi
, dn
.node_changed
);
1500 new_size
= max_t(loff_t
, new_size
,
1501 (loff_t
)index
<< PAGE_SHIFT
);
1505 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1509 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1514 if (new_size
> i_size_read(inode
)) {
1515 if (mode
& FALLOC_FL_KEEP_SIZE
)
1516 file_set_keep_isize(inode
);
1518 f2fs_i_size_write(inode
, new_size
);
1523 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1525 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1526 pgoff_t nr
, pg_start
, pg_end
, delta
, idx
;
1530 new_size
= i_size_read(inode
) + len
;
1531 ret
= inode_newsize_ok(inode
, new_size
);
1535 if (offset
>= i_size_read(inode
))
1538 /* insert range should be aligned to block size of f2fs. */
1539 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1542 ret
= f2fs_convert_inline_inode(inode
);
1546 f2fs_balance_fs(sbi
, true);
1548 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1549 ret
= f2fs_truncate_blocks(inode
, i_size_read(inode
), true);
1550 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1554 /* write out all dirty pages from offset */
1555 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1559 pg_start
= offset
>> PAGE_SHIFT
;
1560 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1561 delta
= pg_end
- pg_start
;
1562 idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1564 /* avoid gc operation during block exchange */
1565 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1566 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1567 truncate_pagecache(inode
, offset
);
1569 while (!ret
&& idx
> pg_start
) {
1570 nr
= idx
- pg_start
;
1576 f2fs_drop_extent_tree(inode
);
1578 ret
= __exchange_data_block(inode
, inode
, idx
,
1579 idx
+ delta
, nr
, false);
1580 f2fs_unlock_op(sbi
);
1582 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1583 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
1585 /* write out all moved pages, if possible */
1586 down_write(&F2FS_I(inode
)->i_mmap_sem
);
1587 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1588 truncate_pagecache(inode
, offset
);
1589 up_write(&F2FS_I(inode
)->i_mmap_sem
);
1592 f2fs_i_size_write(inode
, new_size
);
1596 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1597 loff_t len
, int mode
)
1599 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1600 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
,
1601 .m_next_extent
= NULL
, .m_seg_type
= NO_CHECK_TYPE
,
1602 .m_may_create
= true };
1604 loff_t new_size
= i_size_read(inode
);
1608 err
= inode_newsize_ok(inode
, (len
+ offset
));
1612 err
= f2fs_convert_inline_inode(inode
);
1616 f2fs_balance_fs(sbi
, true);
1618 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1619 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1621 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1622 map
.m_len
= pg_end
- map
.m_lblk
;
1629 if (f2fs_is_pinned_file(inode
)) {
1630 block_t len
= (map
.m_len
>> sbi
->log_blocks_per_seg
) <<
1631 sbi
->log_blocks_per_seg
;
1634 if (map
.m_len
% sbi
->blocks_per_seg
)
1635 len
+= sbi
->blocks_per_seg
;
1637 map
.m_len
= sbi
->blocks_per_seg
;
1639 if (has_not_enough_free_secs(sbi
, 0,
1640 GET_SEC_FROM_SEG(sbi
, overprovision_segments(sbi
)))) {
1641 down_write(&sbi
->gc_lock
);
1642 err
= f2fs_gc(sbi
, true, false, NULL_SEGNO
);
1643 if (err
&& err
!= -ENODATA
&& err
!= -EAGAIN
)
1647 down_write(&sbi
->pin_sem
);
1650 f2fs_allocate_new_segment(sbi
, CURSEG_COLD_DATA_PINNED
);
1651 f2fs_unlock_op(sbi
);
1653 map
.m_seg_type
= CURSEG_COLD_DATA_PINNED
;
1654 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
1656 up_write(&sbi
->pin_sem
);
1660 map
.m_lblk
+= map
.m_len
;
1666 err
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1675 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1677 /* update new size to the failed position */
1678 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1679 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1681 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1684 if (new_size
> i_size_read(inode
)) {
1685 if (mode
& FALLOC_FL_KEEP_SIZE
)
1686 file_set_keep_isize(inode
);
1688 f2fs_i_size_write(inode
, new_size
);
1694 static long f2fs_fallocate(struct file
*file
, int mode
,
1695 loff_t offset
, loff_t len
)
1697 struct inode
*inode
= file_inode(file
);
1700 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
))))
1702 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode
)))
1704 if (!f2fs_is_compress_backend_ready(inode
))
1707 /* f2fs only support ->fallocate for regular file */
1708 if (!S_ISREG(inode
->i_mode
))
1711 if (IS_ENCRYPTED(inode
) &&
1712 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1715 if (f2fs_compressed_file(inode
) &&
1716 (mode
& (FALLOC_FL_PUNCH_HOLE
| FALLOC_FL_COLLAPSE_RANGE
|
1717 FALLOC_FL_ZERO_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1720 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1721 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1722 FALLOC_FL_INSERT_RANGE
))
1727 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1728 if (offset
>= inode
->i_size
)
1731 ret
= punch_hole(inode
, offset
, len
);
1732 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1733 ret
= f2fs_collapse_range(inode
, offset
, len
);
1734 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1735 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1736 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1737 ret
= f2fs_insert_range(inode
, offset
, len
);
1739 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1743 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1744 f2fs_mark_inode_dirty_sync(inode
, false);
1745 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1749 inode_unlock(inode
);
1751 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1755 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1758 * f2fs_relase_file is called at every close calls. So we should
1759 * not drop any inmemory pages by close called by other process.
1761 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1762 atomic_read(&inode
->i_writecount
) != 1)
1765 /* some remained atomic pages should discarded */
1766 if (f2fs_is_atomic_file(inode
))
1767 f2fs_drop_inmem_pages(inode
);
1768 if (f2fs_is_volatile_file(inode
)) {
1769 set_inode_flag(inode
, FI_DROP_CACHE
);
1770 filemap_fdatawrite(inode
->i_mapping
);
1771 clear_inode_flag(inode
, FI_DROP_CACHE
);
1772 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1773 stat_dec_volatile_write(inode
);
1778 static int f2fs_file_flush(struct file
*file
, fl_owner_t id
)
1780 struct inode
*inode
= file_inode(file
);
1783 * If the process doing a transaction is crashed, we should do
1784 * roll-back. Otherwise, other reader/write can see corrupted database
1785 * until all the writers close its file. Since this should be done
1786 * before dropping file lock, it needs to do in ->flush.
1788 if (f2fs_is_atomic_file(inode
) &&
1789 F2FS_I(inode
)->inmem_task
== current
)
1790 f2fs_drop_inmem_pages(inode
);
1794 static int f2fs_setflags_common(struct inode
*inode
, u32 iflags
, u32 mask
)
1796 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1797 u32 masked_flags
= fi
->i_flags
& mask
;
1799 f2fs_bug_on(F2FS_I_SB(inode
), (iflags
& ~mask
));
1801 /* Is it quota file? Do not allow user to mess with it */
1802 if (IS_NOQUOTA(inode
))
1805 if ((iflags
^ masked_flags
) & F2FS_CASEFOLD_FL
) {
1806 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode
)))
1808 if (!f2fs_empty_dir(inode
))
1812 if (iflags
& (F2FS_COMPR_FL
| F2FS_NOCOMP_FL
)) {
1813 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
1815 if ((iflags
& F2FS_COMPR_FL
) && (iflags
& F2FS_NOCOMP_FL
))
1819 if ((iflags
^ masked_flags
) & F2FS_COMPR_FL
) {
1820 if (masked_flags
& F2FS_COMPR_FL
) {
1821 if (!f2fs_disable_compressed_file(inode
))
1824 if (iflags
& F2FS_NOCOMP_FL
)
1826 if (iflags
& F2FS_COMPR_FL
) {
1827 if (!f2fs_may_compress(inode
))
1829 if (S_ISREG(inode
->i_mode
) && inode
->i_size
)
1832 set_compress_context(inode
);
1835 if ((iflags
^ masked_flags
) & F2FS_NOCOMP_FL
) {
1836 if (masked_flags
& F2FS_COMPR_FL
)
1840 fi
->i_flags
= iflags
| (fi
->i_flags
& ~mask
);
1841 f2fs_bug_on(F2FS_I_SB(inode
), (fi
->i_flags
& F2FS_COMPR_FL
) &&
1842 (fi
->i_flags
& F2FS_NOCOMP_FL
));
1844 if (fi
->i_flags
& F2FS_PROJINHERIT_FL
)
1845 set_inode_flag(inode
, FI_PROJ_INHERIT
);
1847 clear_inode_flag(inode
, FI_PROJ_INHERIT
);
1849 inode
->i_ctime
= current_time(inode
);
1850 f2fs_set_inode_flags(inode
);
1851 f2fs_mark_inode_dirty_sync(inode
, true);
1855 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1858 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1859 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1860 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1861 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1864 static const struct {
1867 } f2fs_fsflags_map
[] = {
1868 { F2FS_COMPR_FL
, FS_COMPR_FL
},
1869 { F2FS_SYNC_FL
, FS_SYNC_FL
},
1870 { F2FS_IMMUTABLE_FL
, FS_IMMUTABLE_FL
},
1871 { F2FS_APPEND_FL
, FS_APPEND_FL
},
1872 { F2FS_NODUMP_FL
, FS_NODUMP_FL
},
1873 { F2FS_NOATIME_FL
, FS_NOATIME_FL
},
1874 { F2FS_NOCOMP_FL
, FS_NOCOMP_FL
},
1875 { F2FS_INDEX_FL
, FS_INDEX_FL
},
1876 { F2FS_DIRSYNC_FL
, FS_DIRSYNC_FL
},
1877 { F2FS_PROJINHERIT_FL
, FS_PROJINHERIT_FL
},
1878 { F2FS_CASEFOLD_FL
, FS_CASEFOLD_FL
},
1881 #define F2FS_GETTABLE_FS_FL ( \
1891 FS_PROJINHERIT_FL | \
1893 FS_INLINE_DATA_FL | \
1898 #define F2FS_SETTABLE_FS_FL ( \
1907 FS_PROJINHERIT_FL | \
1910 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1911 static inline u32
f2fs_iflags_to_fsflags(u32 iflags
)
1916 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1917 if (iflags
& f2fs_fsflags_map
[i
].iflag
)
1918 fsflags
|= f2fs_fsflags_map
[i
].fsflag
;
1923 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1924 static inline u32
f2fs_fsflags_to_iflags(u32 fsflags
)
1929 for (i
= 0; i
< ARRAY_SIZE(f2fs_fsflags_map
); i
++)
1930 if (fsflags
& f2fs_fsflags_map
[i
].fsflag
)
1931 iflags
|= f2fs_fsflags_map
[i
].iflag
;
1936 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1938 struct inode
*inode
= file_inode(filp
);
1939 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1940 u32 fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1942 if (IS_ENCRYPTED(inode
))
1943 fsflags
|= FS_ENCRYPT_FL
;
1944 if (IS_VERITY(inode
))
1945 fsflags
|= FS_VERITY_FL
;
1946 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
))
1947 fsflags
|= FS_INLINE_DATA_FL
;
1948 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
1949 fsflags
|= FS_NOCOW_FL
;
1951 fsflags
&= F2FS_GETTABLE_FS_FL
;
1953 return put_user(fsflags
, (int __user
*)arg
);
1956 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1958 struct inode
*inode
= file_inode(filp
);
1959 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1960 u32 fsflags
, old_fsflags
;
1964 if (!inode_owner_or_capable(inode
))
1967 if (get_user(fsflags
, (int __user
*)arg
))
1970 if (fsflags
& ~F2FS_GETTABLE_FS_FL
)
1972 fsflags
&= F2FS_SETTABLE_FS_FL
;
1974 iflags
= f2fs_fsflags_to_iflags(fsflags
);
1975 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
1978 ret
= mnt_want_write_file(filp
);
1984 old_fsflags
= f2fs_iflags_to_fsflags(fi
->i_flags
);
1985 ret
= vfs_ioc_setflags_prepare(inode
, old_fsflags
, fsflags
);
1989 ret
= f2fs_setflags_common(inode
, iflags
,
1990 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL
));
1992 inode_unlock(inode
);
1993 mnt_drop_write_file(filp
);
1997 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1999 struct inode
*inode
= file_inode(filp
);
2001 return put_user(inode
->i_generation
, (int __user
*)arg
);
2004 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
2006 struct inode
*inode
= file_inode(filp
);
2007 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
2008 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2011 if (!inode_owner_or_capable(inode
))
2014 if (!S_ISREG(inode
->i_mode
))
2017 if (filp
->f_flags
& O_DIRECT
)
2020 ret
= mnt_want_write_file(filp
);
2026 f2fs_disable_compressed_file(inode
);
2028 if (f2fs_is_atomic_file(inode
)) {
2029 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
))
2034 ret
= f2fs_convert_inline_inode(inode
);
2038 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2041 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2042 * f2fs_is_atomic_file.
2044 if (get_dirty_pages(inode
))
2045 f2fs_warn(F2FS_I_SB(inode
), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2046 inode
->i_ino
, get_dirty_pages(inode
));
2047 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
2049 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2053 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2054 if (list_empty(&fi
->inmem_ilist
))
2055 list_add_tail(&fi
->inmem_ilist
, &sbi
->inode_list
[ATOMIC_FILE
]);
2056 sbi
->atomic_files
++;
2057 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
2059 /* add inode in inmem_list first and set atomic_file */
2060 set_inode_flag(inode
, FI_ATOMIC_FILE
);
2061 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2062 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
2064 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2065 F2FS_I(inode
)->inmem_task
= current
;
2066 stat_update_max_atomic_write(inode
);
2068 inode_unlock(inode
);
2069 mnt_drop_write_file(filp
);
2073 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
2075 struct inode
*inode
= file_inode(filp
);
2078 if (!inode_owner_or_capable(inode
))
2081 ret
= mnt_want_write_file(filp
);
2085 f2fs_balance_fs(F2FS_I_SB(inode
), true);
2089 if (f2fs_is_volatile_file(inode
)) {
2094 if (f2fs_is_atomic_file(inode
)) {
2095 ret
= f2fs_commit_inmem_pages(inode
);
2099 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2101 f2fs_drop_inmem_pages(inode
);
2103 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 1, false);
2106 if (is_inode_flag_set(inode
, FI_ATOMIC_REVOKE_REQUEST
)) {
2107 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2110 inode_unlock(inode
);
2111 mnt_drop_write_file(filp
);
2115 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
2117 struct inode
*inode
= file_inode(filp
);
2120 if (!inode_owner_or_capable(inode
))
2123 if (!S_ISREG(inode
->i_mode
))
2126 ret
= mnt_want_write_file(filp
);
2132 if (f2fs_is_volatile_file(inode
))
2135 ret
= f2fs_convert_inline_inode(inode
);
2139 stat_inc_volatile_write(inode
);
2140 stat_update_max_volatile_write(inode
);
2142 set_inode_flag(inode
, FI_VOLATILE_FILE
);
2143 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2145 inode_unlock(inode
);
2146 mnt_drop_write_file(filp
);
2150 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
2152 struct inode
*inode
= file_inode(filp
);
2155 if (!inode_owner_or_capable(inode
))
2158 ret
= mnt_want_write_file(filp
);
2164 if (!f2fs_is_volatile_file(inode
))
2167 if (!f2fs_is_first_block_written(inode
)) {
2168 ret
= truncate_partial_data_page(inode
, 0, true);
2172 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
2174 inode_unlock(inode
);
2175 mnt_drop_write_file(filp
);
2179 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
2181 struct inode
*inode
= file_inode(filp
);
2184 if (!inode_owner_or_capable(inode
))
2187 ret
= mnt_want_write_file(filp
);
2193 if (f2fs_is_atomic_file(inode
))
2194 f2fs_drop_inmem_pages(inode
);
2195 if (f2fs_is_volatile_file(inode
)) {
2196 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
2197 stat_dec_volatile_write(inode
);
2198 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
2201 clear_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
2203 inode_unlock(inode
);
2205 mnt_drop_write_file(filp
);
2206 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2210 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
2212 struct inode
*inode
= file_inode(filp
);
2213 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2214 struct super_block
*sb
= sbi
->sb
;
2218 if (!capable(CAP_SYS_ADMIN
))
2221 if (get_user(in
, (__u32 __user
*)arg
))
2224 if (in
!= F2FS_GOING_DOWN_FULLSYNC
) {
2225 ret
= mnt_want_write_file(filp
);
2227 if (ret
== -EROFS
) {
2229 f2fs_stop_checkpoint(sbi
, false);
2230 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2231 trace_f2fs_shutdown(sbi
, in
, ret
);
2238 case F2FS_GOING_DOWN_FULLSYNC
:
2239 ret
= freeze_bdev(sb
->s_bdev
);
2242 f2fs_stop_checkpoint(sbi
, false);
2243 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2244 thaw_bdev(sb
->s_bdev
);
2246 case F2FS_GOING_DOWN_METASYNC
:
2247 /* do checkpoint only */
2248 ret
= f2fs_sync_fs(sb
, 1);
2251 f2fs_stop_checkpoint(sbi
, false);
2252 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2254 case F2FS_GOING_DOWN_NOSYNC
:
2255 f2fs_stop_checkpoint(sbi
, false);
2256 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2258 case F2FS_GOING_DOWN_METAFLUSH
:
2259 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_META_IO
);
2260 f2fs_stop_checkpoint(sbi
, false);
2261 set_sbi_flag(sbi
, SBI_IS_SHUTDOWN
);
2263 case F2FS_GOING_DOWN_NEED_FSCK
:
2264 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2265 set_sbi_flag(sbi
, SBI_CP_DISABLED_QUICK
);
2266 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
2267 /* do checkpoint only */
2268 ret
= f2fs_sync_fs(sb
, 1);
2275 f2fs_stop_gc_thread(sbi
);
2276 f2fs_stop_discard_thread(sbi
);
2278 f2fs_drop_discard_cmd(sbi
);
2279 clear_opt(sbi
, DISCARD
);
2281 f2fs_update_time(sbi
, REQ_TIME
);
2283 if (in
!= F2FS_GOING_DOWN_FULLSYNC
)
2284 mnt_drop_write_file(filp
);
2286 trace_f2fs_shutdown(sbi
, in
, ret
);
2291 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
2293 struct inode
*inode
= file_inode(filp
);
2294 struct super_block
*sb
= inode
->i_sb
;
2295 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
2296 struct fstrim_range range
;
2299 if (!capable(CAP_SYS_ADMIN
))
2302 if (!f2fs_hw_support_discard(F2FS_SB(sb
)))
2305 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
2309 ret
= mnt_want_write_file(filp
);
2313 range
.minlen
= max((unsigned int)range
.minlen
,
2314 q
->limits
.discard_granularity
);
2315 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
2316 mnt_drop_write_file(filp
);
2320 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
2323 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2327 static bool uuid_is_nonzero(__u8 u
[16])
2331 for (i
= 0; i
< 16; i
++)
2337 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
2339 struct inode
*inode
= file_inode(filp
);
2341 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode
)))
2344 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
2346 return fscrypt_ioctl_set_policy(filp
, (const void __user
*)arg
);
2349 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
2351 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2353 return fscrypt_ioctl_get_policy(filp
, (void __user
*)arg
);
2356 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
2358 struct inode
*inode
= file_inode(filp
);
2359 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2362 if (!f2fs_sb_has_encrypt(sbi
))
2365 err
= mnt_want_write_file(filp
);
2369 down_write(&sbi
->sb_lock
);
2371 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
2374 /* update superblock with uuid */
2375 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
2377 err
= f2fs_commit_super(sbi
, false);
2380 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
2384 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
2388 up_write(&sbi
->sb_lock
);
2389 mnt_drop_write_file(filp
);
2393 static int f2fs_ioc_get_encryption_policy_ex(struct file
*filp
,
2396 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2399 return fscrypt_ioctl_get_policy_ex(filp
, (void __user
*)arg
);
2402 static int f2fs_ioc_add_encryption_key(struct file
*filp
, unsigned long arg
)
2404 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2407 return fscrypt_ioctl_add_key(filp
, (void __user
*)arg
);
2410 static int f2fs_ioc_remove_encryption_key(struct file
*filp
, unsigned long arg
)
2412 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2415 return fscrypt_ioctl_remove_key(filp
, (void __user
*)arg
);
2418 static int f2fs_ioc_remove_encryption_key_all_users(struct file
*filp
,
2421 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2424 return fscrypt_ioctl_remove_key_all_users(filp
, (void __user
*)arg
);
2427 static int f2fs_ioc_get_encryption_key_status(struct file
*filp
,
2430 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2433 return fscrypt_ioctl_get_key_status(filp
, (void __user
*)arg
);
2436 static int f2fs_ioc_get_encryption_nonce(struct file
*filp
, unsigned long arg
)
2438 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp
))))
2441 return fscrypt_ioctl_get_nonce(filp
, (void __user
*)arg
);
2444 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
2446 struct inode
*inode
= file_inode(filp
);
2447 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2451 if (!capable(CAP_SYS_ADMIN
))
2454 if (get_user(sync
, (__u32 __user
*)arg
))
2457 if (f2fs_readonly(sbi
->sb
))
2460 ret
= mnt_want_write_file(filp
);
2465 if (!down_write_trylock(&sbi
->gc_lock
)) {
2470 down_write(&sbi
->gc_lock
);
2473 ret
= f2fs_gc(sbi
, sync
, true, NULL_SEGNO
);
2475 mnt_drop_write_file(filp
);
2479 static int __f2fs_ioc_gc_range(struct file
*filp
, struct f2fs_gc_range
*range
)
2481 struct f2fs_sb_info
*sbi
= F2FS_I_SB(file_inode(filp
));
2485 if (!capable(CAP_SYS_ADMIN
))
2487 if (f2fs_readonly(sbi
->sb
))
2490 end
= range
->start
+ range
->len
;
2491 if (end
< range
->start
|| range
->start
< MAIN_BLKADDR(sbi
) ||
2492 end
>= MAX_BLKADDR(sbi
))
2495 ret
= mnt_want_write_file(filp
);
2501 if (!down_write_trylock(&sbi
->gc_lock
)) {
2506 down_write(&sbi
->gc_lock
);
2509 ret
= f2fs_gc(sbi
, range
->sync
, true, GET_SEGNO(sbi
, range
->start
));
2515 range
->start
+= BLKS_PER_SEC(sbi
);
2516 if (range
->start
<= end
)
2519 mnt_drop_write_file(filp
);
2523 static int f2fs_ioc_gc_range(struct file
*filp
, unsigned long arg
)
2525 struct f2fs_gc_range range
;
2527 if (copy_from_user(&range
, (struct f2fs_gc_range __user
*)arg
,
2530 return __f2fs_ioc_gc_range(filp
, &range
);
2533 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
2535 struct inode
*inode
= file_inode(filp
);
2536 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2539 if (!capable(CAP_SYS_ADMIN
))
2542 if (f2fs_readonly(sbi
->sb
))
2545 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2546 f2fs_info(sbi
, "Skipping Checkpoint. Checkpoints currently disabled.");
2550 ret
= mnt_want_write_file(filp
);
2554 ret
= f2fs_sync_fs(sbi
->sb
, 1);
2556 mnt_drop_write_file(filp
);
2560 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
2562 struct f2fs_defragment
*range
)
2564 struct inode
*inode
= file_inode(filp
);
2565 struct f2fs_map_blocks map
= { .m_next_extent
= NULL
,
2566 .m_seg_type
= NO_CHECK_TYPE
,
2567 .m_may_create
= false };
2568 struct extent_info ei
= {0, 0, 0};
2569 pgoff_t pg_start
, pg_end
, next_pgofs
;
2570 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
2571 unsigned int total
= 0, sec_num
;
2572 block_t blk_end
= 0;
2573 bool fragmented
= false;
2576 /* if in-place-update policy is enabled, don't waste time here */
2577 if (f2fs_should_update_inplace(inode
, NULL
))
2580 pg_start
= range
->start
>> PAGE_SHIFT
;
2581 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
2583 f2fs_balance_fs(sbi
, true);
2587 /* writeback all dirty pages in the range */
2588 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
2589 range
->start
+ range
->len
- 1);
2594 * lookup mapping info in extent cache, skip defragmenting if physical
2595 * block addresses are continuous.
2597 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
2598 if (ei
.fofs
+ ei
.len
>= pg_end
)
2602 map
.m_lblk
= pg_start
;
2603 map
.m_next_pgofs
= &next_pgofs
;
2606 * lookup mapping info in dnode page cache, skip defragmenting if all
2607 * physical block addresses are continuous even if there are hole(s)
2608 * in logical blocks.
2610 while (map
.m_lblk
< pg_end
) {
2611 map
.m_len
= pg_end
- map
.m_lblk
;
2612 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2616 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2617 map
.m_lblk
= next_pgofs
;
2621 if (blk_end
&& blk_end
!= map
.m_pblk
)
2624 /* record total count of block that we're going to move */
2627 blk_end
= map
.m_pblk
+ map
.m_len
;
2629 map
.m_lblk
+= map
.m_len
;
2637 sec_num
= DIV_ROUND_UP(total
, BLKS_PER_SEC(sbi
));
2640 * make sure there are enough free section for LFS allocation, this can
2641 * avoid defragment running in SSR mode when free section are allocated
2644 if (has_not_enough_free_secs(sbi
, 0, sec_num
)) {
2649 map
.m_lblk
= pg_start
;
2650 map
.m_len
= pg_end
- pg_start
;
2653 while (map
.m_lblk
< pg_end
) {
2658 map
.m_len
= pg_end
- map
.m_lblk
;
2659 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_DEFAULT
);
2663 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
2664 map
.m_lblk
= next_pgofs
;
2668 set_inode_flag(inode
, FI_DO_DEFRAG
);
2671 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
2674 page
= f2fs_get_lock_data_page(inode
, idx
, true);
2676 err
= PTR_ERR(page
);
2680 set_page_dirty(page
);
2681 f2fs_put_page(page
, 1);
2690 if (map
.m_lblk
< pg_end
&& cnt
< blk_per_seg
)
2693 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2695 err
= filemap_fdatawrite(inode
->i_mapping
);
2700 clear_inode_flag(inode
, FI_DO_DEFRAG
);
2702 inode_unlock(inode
);
2704 range
->len
= (u64
)total
<< PAGE_SHIFT
;
2708 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
2710 struct inode
*inode
= file_inode(filp
);
2711 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2712 struct f2fs_defragment range
;
2715 if (!capable(CAP_SYS_ADMIN
))
2718 if (!S_ISREG(inode
->i_mode
) || f2fs_is_atomic_file(inode
))
2721 if (f2fs_readonly(sbi
->sb
))
2724 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
2728 /* verify alignment of offset & size */
2729 if (range
.start
& (F2FS_BLKSIZE
- 1) || range
.len
& (F2FS_BLKSIZE
- 1))
2732 if (unlikely((range
.start
+ range
.len
) >> PAGE_SHIFT
>
2733 sbi
->max_file_blocks
))
2736 err
= mnt_want_write_file(filp
);
2740 err
= f2fs_defragment_range(sbi
, filp
, &range
);
2741 mnt_drop_write_file(filp
);
2743 f2fs_update_time(sbi
, REQ_TIME
);
2747 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
2754 static int f2fs_move_file_range(struct file
*file_in
, loff_t pos_in
,
2755 struct file
*file_out
, loff_t pos_out
, size_t len
)
2757 struct inode
*src
= file_inode(file_in
);
2758 struct inode
*dst
= file_inode(file_out
);
2759 struct f2fs_sb_info
*sbi
= F2FS_I_SB(src
);
2760 size_t olen
= len
, dst_max_i_size
= 0;
2764 if (file_in
->f_path
.mnt
!= file_out
->f_path
.mnt
||
2765 src
->i_sb
!= dst
->i_sb
)
2768 if (unlikely(f2fs_readonly(src
->i_sb
)))
2771 if (!S_ISREG(src
->i_mode
) || !S_ISREG(dst
->i_mode
))
2774 if (IS_ENCRYPTED(src
) || IS_ENCRYPTED(dst
))
2777 if (pos_out
< 0 || pos_in
< 0)
2781 if (pos_in
== pos_out
)
2783 if (pos_out
> pos_in
&& pos_out
< pos_in
+ len
)
2790 if (!inode_trylock(dst
))
2795 if (pos_in
+ len
> src
->i_size
|| pos_in
+ len
< pos_in
)
2798 olen
= len
= src
->i_size
- pos_in
;
2799 if (pos_in
+ len
== src
->i_size
)
2800 len
= ALIGN(src
->i_size
, F2FS_BLKSIZE
) - pos_in
;
2806 dst_osize
= dst
->i_size
;
2807 if (pos_out
+ olen
> dst
->i_size
)
2808 dst_max_i_size
= pos_out
+ olen
;
2810 /* verify the end result is block aligned */
2811 if (!IS_ALIGNED(pos_in
, F2FS_BLKSIZE
) ||
2812 !IS_ALIGNED(pos_in
+ len
, F2FS_BLKSIZE
) ||
2813 !IS_ALIGNED(pos_out
, F2FS_BLKSIZE
))
2816 ret
= f2fs_convert_inline_inode(src
);
2820 ret
= f2fs_convert_inline_inode(dst
);
2824 /* write out all dirty pages from offset */
2825 ret
= filemap_write_and_wait_range(src
->i_mapping
,
2826 pos_in
, pos_in
+ len
);
2830 ret
= filemap_write_and_wait_range(dst
->i_mapping
,
2831 pos_out
, pos_out
+ len
);
2835 f2fs_balance_fs(sbi
, true);
2837 down_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2840 if (!down_write_trylock(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]))
2845 ret
= __exchange_data_block(src
, dst
, pos_in
>> F2FS_BLKSIZE_BITS
,
2846 pos_out
>> F2FS_BLKSIZE_BITS
,
2847 len
>> F2FS_BLKSIZE_BITS
, false);
2851 f2fs_i_size_write(dst
, dst_max_i_size
);
2852 else if (dst_osize
!= dst
->i_size
)
2853 f2fs_i_size_write(dst
, dst_osize
);
2855 f2fs_unlock_op(sbi
);
2858 up_write(&F2FS_I(dst
)->i_gc_rwsem
[WRITE
]);
2860 up_write(&F2FS_I(src
)->i_gc_rwsem
[WRITE
]);
2869 static int __f2fs_ioc_move_range(struct file
*filp
,
2870 struct f2fs_move_range
*range
)
2875 if (!(filp
->f_mode
& FMODE_READ
) ||
2876 !(filp
->f_mode
& FMODE_WRITE
))
2879 dst
= fdget(range
->dst_fd
);
2883 if (!(dst
.file
->f_mode
& FMODE_WRITE
)) {
2888 err
= mnt_want_write_file(filp
);
2892 err
= f2fs_move_file_range(filp
, range
->pos_in
, dst
.file
,
2893 range
->pos_out
, range
->len
);
2895 mnt_drop_write_file(filp
);
2901 static int f2fs_ioc_move_range(struct file
*filp
, unsigned long arg
)
2903 struct f2fs_move_range range
;
2905 if (copy_from_user(&range
, (struct f2fs_move_range __user
*)arg
,
2908 return __f2fs_ioc_move_range(filp
, &range
);
2911 static int f2fs_ioc_flush_device(struct file
*filp
, unsigned long arg
)
2913 struct inode
*inode
= file_inode(filp
);
2914 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2915 struct sit_info
*sm
= SIT_I(sbi
);
2916 unsigned int start_segno
= 0, end_segno
= 0;
2917 unsigned int dev_start_segno
= 0, dev_end_segno
= 0;
2918 struct f2fs_flush_device range
;
2921 if (!capable(CAP_SYS_ADMIN
))
2924 if (f2fs_readonly(sbi
->sb
))
2927 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2930 if (copy_from_user(&range
, (struct f2fs_flush_device __user
*)arg
,
2934 if (!f2fs_is_multi_device(sbi
) || sbi
->s_ndevs
- 1 <= range
.dev_num
||
2935 __is_large_section(sbi
)) {
2936 f2fs_warn(sbi
, "Can't flush %u in %d for segs_per_sec %u != 1",
2937 range
.dev_num
, sbi
->s_ndevs
, sbi
->segs_per_sec
);
2941 ret
= mnt_want_write_file(filp
);
2945 if (range
.dev_num
!= 0)
2946 dev_start_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).start_blk
);
2947 dev_end_segno
= GET_SEGNO(sbi
, FDEV(range
.dev_num
).end_blk
);
2949 start_segno
= sm
->last_victim
[FLUSH_DEVICE
];
2950 if (start_segno
< dev_start_segno
|| start_segno
>= dev_end_segno
)
2951 start_segno
= dev_start_segno
;
2952 end_segno
= min(start_segno
+ range
.segments
, dev_end_segno
);
2954 while (start_segno
< end_segno
) {
2955 if (!down_write_trylock(&sbi
->gc_lock
)) {
2959 sm
->last_victim
[GC_CB
] = end_segno
+ 1;
2960 sm
->last_victim
[GC_GREEDY
] = end_segno
+ 1;
2961 sm
->last_victim
[ALLOC_NEXT
] = end_segno
+ 1;
2962 ret
= f2fs_gc(sbi
, true, true, start_segno
);
2970 mnt_drop_write_file(filp
);
2974 static int f2fs_ioc_get_features(struct file
*filp
, unsigned long arg
)
2976 struct inode
*inode
= file_inode(filp
);
2977 u32 sb_feature
= le32_to_cpu(F2FS_I_SB(inode
)->raw_super
->feature
);
2979 /* Must validate to set it with SQLite behavior in Android. */
2980 sb_feature
|= F2FS_FEATURE_ATOMIC_WRITE
;
2982 return put_user(sb_feature
, (u32 __user
*)arg
);
2986 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
2988 struct dquot
*transfer_to
[MAXQUOTAS
] = {};
2989 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2990 struct super_block
*sb
= sbi
->sb
;
2993 transfer_to
[PRJQUOTA
] = dqget(sb
, make_kqid_projid(kprojid
));
2994 if (!IS_ERR(transfer_to
[PRJQUOTA
])) {
2995 err
= __dquot_transfer(inode
, transfer_to
);
2997 set_sbi_flag(sbi
, SBI_QUOTA_NEED_REPAIR
);
2998 dqput(transfer_to
[PRJQUOTA
]);
3003 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
3005 struct inode
*inode
= file_inode(filp
);
3006 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3007 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3012 if (!f2fs_sb_has_project_quota(sbi
)) {
3013 if (projid
!= F2FS_DEF_PROJID
)
3019 if (!f2fs_has_extra_attr(inode
))
3022 kprojid
= make_kprojid(&init_user_ns
, (projid_t
)projid
);
3024 if (projid_eq(kprojid
, F2FS_I(inode
)->i_projid
))
3028 /* Is it quota file? Do not allow user to mess with it */
3029 if (IS_NOQUOTA(inode
))
3032 ipage
= f2fs_get_node_page(sbi
, inode
->i_ino
);
3034 return PTR_ERR(ipage
);
3036 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage
), fi
->i_extra_isize
,
3039 f2fs_put_page(ipage
, 1);
3042 f2fs_put_page(ipage
, 1);
3044 err
= dquot_initialize(inode
);
3049 err
= f2fs_transfer_project_quota(inode
, kprojid
);
3053 F2FS_I(inode
)->i_projid
= kprojid
;
3054 inode
->i_ctime
= current_time(inode
);
3055 f2fs_mark_inode_dirty_sync(inode
, true);
3057 f2fs_unlock_op(sbi
);
3061 int f2fs_transfer_project_quota(struct inode
*inode
, kprojid_t kprojid
)
3066 static int f2fs_ioc_setproject(struct file
*filp
, __u32 projid
)
3068 if (projid
!= F2FS_DEF_PROJID
)
3074 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3077 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3078 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3079 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3082 static const struct {
3085 } f2fs_xflags_map
[] = {
3086 { F2FS_SYNC_FL
, FS_XFLAG_SYNC
},
3087 { F2FS_IMMUTABLE_FL
, FS_XFLAG_IMMUTABLE
},
3088 { F2FS_APPEND_FL
, FS_XFLAG_APPEND
},
3089 { F2FS_NODUMP_FL
, FS_XFLAG_NODUMP
},
3090 { F2FS_NOATIME_FL
, FS_XFLAG_NOATIME
},
3091 { F2FS_PROJINHERIT_FL
, FS_XFLAG_PROJINHERIT
},
3094 #define F2FS_SUPPORTED_XFLAGS ( \
3096 FS_XFLAG_IMMUTABLE | \
3099 FS_XFLAG_NOATIME | \
3100 FS_XFLAG_PROJINHERIT)
3102 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3103 static inline u32
f2fs_iflags_to_xflags(u32 iflags
)
3108 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3109 if (iflags
& f2fs_xflags_map
[i
].iflag
)
3110 xflags
|= f2fs_xflags_map
[i
].xflag
;
3115 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3116 static inline u32
f2fs_xflags_to_iflags(u32 xflags
)
3121 for (i
= 0; i
< ARRAY_SIZE(f2fs_xflags_map
); i
++)
3122 if (xflags
& f2fs_xflags_map
[i
].xflag
)
3123 iflags
|= f2fs_xflags_map
[i
].iflag
;
3128 static void f2fs_fill_fsxattr(struct inode
*inode
, struct fsxattr
*fa
)
3130 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3132 simple_fill_fsxattr(fa
, f2fs_iflags_to_xflags(fi
->i_flags
));
3134 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)))
3135 fa
->fsx_projid
= from_kprojid(&init_user_ns
, fi
->i_projid
);
3138 static int f2fs_ioc_fsgetxattr(struct file
*filp
, unsigned long arg
)
3140 struct inode
*inode
= file_inode(filp
);
3143 f2fs_fill_fsxattr(inode
, &fa
);
3145 if (copy_to_user((struct fsxattr __user
*)arg
, &fa
, sizeof(fa
)))
3150 static int f2fs_ioc_fssetxattr(struct file
*filp
, unsigned long arg
)
3152 struct inode
*inode
= file_inode(filp
);
3153 struct fsxattr fa
, old_fa
;
3157 if (copy_from_user(&fa
, (struct fsxattr __user
*)arg
, sizeof(fa
)))
3160 /* Make sure caller has proper permission */
3161 if (!inode_owner_or_capable(inode
))
3164 if (fa
.fsx_xflags
& ~F2FS_SUPPORTED_XFLAGS
)
3167 iflags
= f2fs_xflags_to_iflags(fa
.fsx_xflags
);
3168 if (f2fs_mask_flags(inode
->i_mode
, iflags
) != iflags
)
3171 err
= mnt_want_write_file(filp
);
3177 f2fs_fill_fsxattr(inode
, &old_fa
);
3178 err
= vfs_ioc_fssetxattr_check(inode
, &old_fa
, &fa
);
3182 err
= f2fs_setflags_common(inode
, iflags
,
3183 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS
));
3187 err
= f2fs_ioc_setproject(filp
, fa
.fsx_projid
);
3189 inode_unlock(inode
);
3190 mnt_drop_write_file(filp
);
3194 int f2fs_pin_file_control(struct inode
*inode
, bool inc
)
3196 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3197 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3199 /* Use i_gc_failures for normal file as a risk signal. */
3201 f2fs_i_gc_failures_write(inode
,
3202 fi
->i_gc_failures
[GC_FAILURE_PIN
] + 1);
3204 if (fi
->i_gc_failures
[GC_FAILURE_PIN
] > sbi
->gc_pin_file_threshold
) {
3205 f2fs_warn(sbi
, "%s: Enable GC = ino %lx after %x GC trials",
3206 __func__
, inode
->i_ino
,
3207 fi
->i_gc_failures
[GC_FAILURE_PIN
]);
3208 clear_inode_flag(inode
, FI_PIN_FILE
);
3214 static int f2fs_ioc_set_pin_file(struct file
*filp
, unsigned long arg
)
3216 struct inode
*inode
= file_inode(filp
);
3220 if (get_user(pin
, (__u32 __user
*)arg
))
3223 if (!S_ISREG(inode
->i_mode
))
3226 if (f2fs_readonly(F2FS_I_SB(inode
)->sb
))
3229 ret
= mnt_want_write_file(filp
);
3235 if (f2fs_should_update_outplace(inode
, NULL
)) {
3241 clear_inode_flag(inode
, FI_PIN_FILE
);
3242 f2fs_i_gc_failures_write(inode
, 0);
3246 if (f2fs_pin_file_control(inode
, false)) {
3251 ret
= f2fs_convert_inline_inode(inode
);
3255 if (!f2fs_disable_compressed_file(inode
)) {
3260 set_inode_flag(inode
, FI_PIN_FILE
);
3261 ret
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3263 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3265 inode_unlock(inode
);
3266 mnt_drop_write_file(filp
);
3270 static int f2fs_ioc_get_pin_file(struct file
*filp
, unsigned long arg
)
3272 struct inode
*inode
= file_inode(filp
);
3275 if (is_inode_flag_set(inode
, FI_PIN_FILE
))
3276 pin
= F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
];
3277 return put_user(pin
, (u32 __user
*)arg
);
3280 int f2fs_precache_extents(struct inode
*inode
)
3282 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
3283 struct f2fs_map_blocks map
;
3284 pgoff_t m_next_extent
;
3288 if (is_inode_flag_set(inode
, FI_NO_EXTENT
))
3292 map
.m_next_pgofs
= NULL
;
3293 map
.m_next_extent
= &m_next_extent
;
3294 map
.m_seg_type
= NO_CHECK_TYPE
;
3295 map
.m_may_create
= false;
3296 end
= F2FS_I_SB(inode
)->max_file_blocks
;
3298 while (map
.m_lblk
< end
) {
3299 map
.m_len
= end
- map
.m_lblk
;
3301 down_write(&fi
->i_gc_rwsem
[WRITE
]);
3302 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_PRECACHE
);
3303 up_write(&fi
->i_gc_rwsem
[WRITE
]);
3307 map
.m_lblk
= m_next_extent
;
3313 static int f2fs_ioc_precache_extents(struct file
*filp
, unsigned long arg
)
3315 return f2fs_precache_extents(file_inode(filp
));
3318 static int f2fs_ioc_resize_fs(struct file
*filp
, unsigned long arg
)
3320 struct f2fs_sb_info
*sbi
= F2FS_I_SB(file_inode(filp
));
3323 if (!capable(CAP_SYS_ADMIN
))
3326 if (f2fs_readonly(sbi
->sb
))
3329 if (copy_from_user(&block_count
, (void __user
*)arg
,
3330 sizeof(block_count
)))
3333 return f2fs_resize_fs(sbi
, block_count
);
3336 static int f2fs_ioc_enable_verity(struct file
*filp
, unsigned long arg
)
3338 struct inode
*inode
= file_inode(filp
);
3340 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
3342 if (!f2fs_sb_has_verity(F2FS_I_SB(inode
))) {
3343 f2fs_warn(F2FS_I_SB(inode
),
3344 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3349 return fsverity_ioctl_enable(filp
, (const void __user
*)arg
);
3352 static int f2fs_ioc_measure_verity(struct file
*filp
, unsigned long arg
)
3354 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp
))))
3357 return fsverity_ioctl_measure(filp
, (void __user
*)arg
);
3360 static int f2fs_ioc_getfslabel(struct file
*filp
, unsigned long arg
)
3362 struct inode
*inode
= file_inode(filp
);
3363 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3368 vbuf
= f2fs_kzalloc(sbi
, MAX_VOLUME_NAME
, GFP_KERNEL
);
3372 down_read(&sbi
->sb_lock
);
3373 count
= utf16s_to_utf8s(sbi
->raw_super
->volume_name
,
3374 ARRAY_SIZE(sbi
->raw_super
->volume_name
),
3375 UTF16_LITTLE_ENDIAN
, vbuf
, MAX_VOLUME_NAME
);
3376 up_read(&sbi
->sb_lock
);
3378 if (copy_to_user((char __user
*)arg
, vbuf
,
3379 min(FSLABEL_MAX
, count
)))
3386 static int f2fs_ioc_setfslabel(struct file
*filp
, unsigned long arg
)
3388 struct inode
*inode
= file_inode(filp
);
3389 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3393 if (!capable(CAP_SYS_ADMIN
))
3396 vbuf
= strndup_user((const char __user
*)arg
, FSLABEL_MAX
);
3398 return PTR_ERR(vbuf
);
3400 err
= mnt_want_write_file(filp
);
3404 down_write(&sbi
->sb_lock
);
3406 memset(sbi
->raw_super
->volume_name
, 0,
3407 sizeof(sbi
->raw_super
->volume_name
));
3408 utf8s_to_utf16s(vbuf
, strlen(vbuf
), UTF16_LITTLE_ENDIAN
,
3409 sbi
->raw_super
->volume_name
,
3410 ARRAY_SIZE(sbi
->raw_super
->volume_name
));
3412 err
= f2fs_commit_super(sbi
, false);
3414 up_write(&sbi
->sb_lock
);
3416 mnt_drop_write_file(filp
);
3422 static int f2fs_get_compress_blocks(struct file
*filp
, unsigned long arg
)
3424 struct inode
*inode
= file_inode(filp
);
3427 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
3430 if (!f2fs_compressed_file(inode
))
3433 blocks
= atomic_read(&F2FS_I(inode
)->i_compr_blocks
);
3434 return put_user(blocks
, (u64 __user
*)arg
);
3437 static int release_compress_blocks(struct dnode_of_data
*dn
, pgoff_t count
)
3439 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
3440 unsigned int released_blocks
= 0;
3441 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
3445 for (i
= 0; i
< count
; i
++) {
3446 blkaddr
= data_blkaddr(dn
->inode
, dn
->node_page
,
3447 dn
->ofs_in_node
+ i
);
3449 if (!__is_valid_data_blkaddr(blkaddr
))
3451 if (unlikely(!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
3452 DATA_GENERIC_ENHANCE
)))
3453 return -EFSCORRUPTED
;
3457 int compr_blocks
= 0;
3459 for (i
= 0; i
< cluster_size
; i
++, dn
->ofs_in_node
++) {
3460 blkaddr
= f2fs_data_blkaddr(dn
);
3463 if (blkaddr
== COMPRESS_ADDR
)
3465 dn
->ofs_in_node
+= cluster_size
;
3469 if (__is_valid_data_blkaddr(blkaddr
))
3472 if (blkaddr
!= NEW_ADDR
)
3475 dn
->data_blkaddr
= NULL_ADDR
;
3476 f2fs_set_data_blkaddr(dn
);
3479 f2fs_i_compr_blocks_update(dn
->inode
, compr_blocks
, false);
3480 dec_valid_block_count(sbi
, dn
->inode
,
3481 cluster_size
- compr_blocks
);
3483 released_blocks
+= cluster_size
- compr_blocks
;
3485 count
-= cluster_size
;
3488 return released_blocks
;
3491 static int f2fs_release_compress_blocks(struct file
*filp
, unsigned long arg
)
3493 struct inode
*inode
= file_inode(filp
);
3494 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3495 pgoff_t page_idx
= 0, last_idx
;
3496 unsigned int released_blocks
= 0;
3500 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
3503 if (!f2fs_compressed_file(inode
))
3506 if (f2fs_readonly(sbi
->sb
))
3509 ret
= mnt_want_write_file(filp
);
3513 f2fs_balance_fs(F2FS_I_SB(inode
), true);
3517 writecount
= atomic_read(&inode
->i_writecount
);
3518 if ((filp
->f_mode
& FMODE_WRITE
&& writecount
!= 1) ||
3519 (!(filp
->f_mode
& FMODE_WRITE
) && writecount
)) {
3524 if (IS_IMMUTABLE(inode
)) {
3529 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
3533 F2FS_I(inode
)->i_flags
|= F2FS_IMMUTABLE_FL
;
3534 f2fs_set_inode_flags(inode
);
3535 inode
->i_ctime
= current_time(inode
);
3536 f2fs_mark_inode_dirty_sync(inode
, true);
3538 if (!atomic_read(&F2FS_I(inode
)->i_compr_blocks
))
3541 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3542 down_write(&F2FS_I(inode
)->i_mmap_sem
);
3544 last_idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
3546 while (page_idx
< last_idx
) {
3547 struct dnode_of_data dn
;
3548 pgoff_t end_offset
, count
;
3550 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
3551 ret
= f2fs_get_dnode_of_data(&dn
, page_idx
, LOOKUP_NODE
);
3553 if (ret
== -ENOENT
) {
3554 page_idx
= f2fs_get_next_page_offset(&dn
,
3562 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
3563 count
= min(end_offset
- dn
.ofs_in_node
, last_idx
- page_idx
);
3564 count
= round_up(count
, F2FS_I(inode
)->i_cluster_size
);
3566 ret
= release_compress_blocks(&dn
, count
);
3568 f2fs_put_dnode(&dn
);
3574 released_blocks
+= ret
;
3577 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3578 up_write(&F2FS_I(inode
)->i_mmap_sem
);
3580 inode_unlock(inode
);
3582 mnt_drop_write_file(filp
);
3585 ret
= put_user(released_blocks
, (u64 __user
*)arg
);
3586 } else if (released_blocks
&&
3587 atomic_read(&F2FS_I(inode
)->i_compr_blocks
)) {
3588 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3589 f2fs_warn(sbi
, "%s: partial blocks were released i_ino=%lx "
3590 "iblocks=%llu, released=%u, compr_blocks=%u, "
3592 __func__
, inode
->i_ino
, inode
->i_blocks
,
3594 atomic_read(&F2FS_I(inode
)->i_compr_blocks
));
3600 static int reserve_compress_blocks(struct dnode_of_data
*dn
, pgoff_t count
)
3602 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
3603 unsigned int reserved_blocks
= 0;
3604 int cluster_size
= F2FS_I(dn
->inode
)->i_cluster_size
;
3608 for (i
= 0; i
< count
; i
++) {
3609 blkaddr
= data_blkaddr(dn
->inode
, dn
->node_page
,
3610 dn
->ofs_in_node
+ i
);
3612 if (!__is_valid_data_blkaddr(blkaddr
))
3614 if (unlikely(!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
3615 DATA_GENERIC_ENHANCE
)))
3616 return -EFSCORRUPTED
;
3620 int compr_blocks
= 0;
3624 for (i
= 0; i
< cluster_size
; i
++, dn
->ofs_in_node
++) {
3625 blkaddr
= f2fs_data_blkaddr(dn
);
3628 if (blkaddr
== COMPRESS_ADDR
)
3630 dn
->ofs_in_node
+= cluster_size
;
3634 if (__is_valid_data_blkaddr(blkaddr
)) {
3639 dn
->data_blkaddr
= NEW_ADDR
;
3640 f2fs_set_data_blkaddr(dn
);
3643 reserved
= cluster_size
- compr_blocks
;
3644 ret
= inc_valid_block_count(sbi
, dn
->inode
, &reserved
);
3648 if (reserved
!= cluster_size
- compr_blocks
)
3651 f2fs_i_compr_blocks_update(dn
->inode
, compr_blocks
, true);
3653 reserved_blocks
+= reserved
;
3655 count
-= cluster_size
;
3658 return reserved_blocks
;
3661 static int f2fs_reserve_compress_blocks(struct file
*filp
, unsigned long arg
)
3663 struct inode
*inode
= file_inode(filp
);
3664 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3665 pgoff_t page_idx
= 0, last_idx
;
3666 unsigned int reserved_blocks
= 0;
3669 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
3672 if (!f2fs_compressed_file(inode
))
3675 if (f2fs_readonly(sbi
->sb
))
3678 ret
= mnt_want_write_file(filp
);
3682 if (atomic_read(&F2FS_I(inode
)->i_compr_blocks
))
3685 f2fs_balance_fs(F2FS_I_SB(inode
), true);
3689 if (!IS_IMMUTABLE(inode
)) {
3694 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3695 down_write(&F2FS_I(inode
)->i_mmap_sem
);
3697 last_idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
3699 while (page_idx
< last_idx
) {
3700 struct dnode_of_data dn
;
3701 pgoff_t end_offset
, count
;
3703 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
3704 ret
= f2fs_get_dnode_of_data(&dn
, page_idx
, LOOKUP_NODE
);
3706 if (ret
== -ENOENT
) {
3707 page_idx
= f2fs_get_next_page_offset(&dn
,
3715 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
3716 count
= min(end_offset
- dn
.ofs_in_node
, last_idx
- page_idx
);
3717 count
= round_up(count
, F2FS_I(inode
)->i_cluster_size
);
3719 ret
= reserve_compress_blocks(&dn
, count
);
3721 f2fs_put_dnode(&dn
);
3727 reserved_blocks
+= ret
;
3730 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3731 up_write(&F2FS_I(inode
)->i_mmap_sem
);
3734 F2FS_I(inode
)->i_flags
&= ~F2FS_IMMUTABLE_FL
;
3735 f2fs_set_inode_flags(inode
);
3736 inode
->i_ctime
= current_time(inode
);
3737 f2fs_mark_inode_dirty_sync(inode
, true);
3740 inode_unlock(inode
);
3742 mnt_drop_write_file(filp
);
3745 ret
= put_user(reserved_blocks
, (u64 __user
*)arg
);
3746 } else if (reserved_blocks
&&
3747 atomic_read(&F2FS_I(inode
)->i_compr_blocks
)) {
3748 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3749 f2fs_warn(sbi
, "%s: partial blocks were released i_ino=%lx "
3750 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3752 __func__
, inode
->i_ino
, inode
->i_blocks
,
3754 atomic_read(&F2FS_I(inode
)->i_compr_blocks
));
3760 static int f2fs_secure_erase(struct block_device
*bdev
, struct inode
*inode
,
3761 pgoff_t off
, block_t block
, block_t len
, u32 flags
)
3763 struct request_queue
*q
= bdev_get_queue(bdev
);
3764 sector_t sector
= SECTOR_FROM_BLOCK(block
);
3765 sector_t nr_sects
= SECTOR_FROM_BLOCK(len
);
3771 if (flags
& F2FS_TRIM_FILE_DISCARD
)
3772 ret
= blkdev_issue_discard(bdev
, sector
, nr_sects
, GFP_NOFS
,
3773 blk_queue_secure_erase(q
) ?
3774 BLKDEV_DISCARD_SECURE
: 0);
3776 if (!ret
&& (flags
& F2FS_TRIM_FILE_ZEROOUT
)) {
3777 if (IS_ENCRYPTED(inode
))
3778 ret
= fscrypt_zeroout_range(inode
, off
, block
, len
);
3780 ret
= blkdev_issue_zeroout(bdev
, sector
, nr_sects
,
3787 static int f2fs_sec_trim_file(struct file
*filp
, unsigned long arg
)
3789 struct inode
*inode
= file_inode(filp
);
3790 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3791 struct address_space
*mapping
= inode
->i_mapping
;
3792 struct block_device
*prev_bdev
= NULL
;
3793 struct f2fs_sectrim_range range
;
3794 pgoff_t index
, pg_end
, prev_index
= 0;
3795 block_t prev_block
= 0, len
= 0;
3797 bool to_end
= false;
3800 if (!(filp
->f_mode
& FMODE_WRITE
))
3803 if (copy_from_user(&range
, (struct f2fs_sectrim_range __user
*)arg
,
3807 if (range
.flags
== 0 || (range
.flags
& ~F2FS_TRIM_FILE_MASK
) ||
3808 !S_ISREG(inode
->i_mode
))
3811 if (((range
.flags
& F2FS_TRIM_FILE_DISCARD
) &&
3812 !f2fs_hw_support_discard(sbi
)) ||
3813 ((range
.flags
& F2FS_TRIM_FILE_ZEROOUT
) &&
3814 IS_ENCRYPTED(inode
) && f2fs_is_multi_device(sbi
)))
3817 file_start_write(filp
);
3820 if (f2fs_is_atomic_file(inode
) || f2fs_compressed_file(inode
) ||
3821 range
.start
>= inode
->i_size
) {
3829 if (inode
->i_size
- range
.start
> range
.len
) {
3830 end_addr
= range
.start
+ range
.len
;
3832 end_addr
= range
.len
== (u64
)-1 ?
3833 sbi
->sb
->s_maxbytes
: inode
->i_size
;
3837 if (!IS_ALIGNED(range
.start
, F2FS_BLKSIZE
) ||
3838 (!to_end
&& !IS_ALIGNED(end_addr
, F2FS_BLKSIZE
))) {
3843 index
= F2FS_BYTES_TO_BLK(range
.start
);
3844 pg_end
= DIV_ROUND_UP(end_addr
, F2FS_BLKSIZE
);
3846 ret
= f2fs_convert_inline_inode(inode
);
3850 down_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3851 down_write(&F2FS_I(inode
)->i_mmap_sem
);
3853 ret
= filemap_write_and_wait_range(mapping
, range
.start
,
3854 to_end
? LLONG_MAX
: end_addr
- 1);
3858 truncate_inode_pages_range(mapping
, range
.start
,
3859 to_end
? -1 : end_addr
- 1);
3861 while (index
< pg_end
) {
3862 struct dnode_of_data dn
;
3863 pgoff_t end_offset
, count
;
3866 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
3867 ret
= f2fs_get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
3869 if (ret
== -ENOENT
) {
3870 index
= f2fs_get_next_page_offset(&dn
, index
);
3876 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
3877 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- index
);
3878 for (i
= 0; i
< count
; i
++, index
++, dn
.ofs_in_node
++) {
3879 struct block_device
*cur_bdev
;
3880 block_t blkaddr
= f2fs_data_blkaddr(&dn
);
3882 if (!__is_valid_data_blkaddr(blkaddr
))
3885 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
,
3886 DATA_GENERIC_ENHANCE
)) {
3887 ret
= -EFSCORRUPTED
;
3888 f2fs_put_dnode(&dn
);
3892 cur_bdev
= f2fs_target_device(sbi
, blkaddr
, NULL
);
3893 if (f2fs_is_multi_device(sbi
)) {
3894 int di
= f2fs_target_device_index(sbi
, blkaddr
);
3896 blkaddr
-= FDEV(di
).start_blk
;
3900 if (prev_bdev
== cur_bdev
&&
3901 index
== prev_index
+ len
&&
3902 blkaddr
== prev_block
+ len
) {
3905 ret
= f2fs_secure_erase(prev_bdev
,
3906 inode
, prev_index
, prev_block
,
3909 f2fs_put_dnode(&dn
);
3918 prev_bdev
= cur_bdev
;
3920 prev_block
= blkaddr
;
3925 f2fs_put_dnode(&dn
);
3927 if (fatal_signal_pending(current
)) {
3935 ret
= f2fs_secure_erase(prev_bdev
, inode
, prev_index
,
3936 prev_block
, len
, range
.flags
);
3938 up_write(&F2FS_I(inode
)->i_mmap_sem
);
3939 up_write(&F2FS_I(inode
)->i_gc_rwsem
[WRITE
]);
3941 inode_unlock(inode
);
3942 file_end_write(filp
);
3947 static int f2fs_ioc_get_compress_option(struct file
*filp
, unsigned long arg
)
3949 struct inode
*inode
= file_inode(filp
);
3950 struct f2fs_comp_option option
;
3952 if (!f2fs_sb_has_compression(F2FS_I_SB(inode
)))
3955 inode_lock_shared(inode
);
3957 if (!f2fs_compressed_file(inode
)) {
3958 inode_unlock_shared(inode
);
3962 option
.algorithm
= F2FS_I(inode
)->i_compress_algorithm
;
3963 option
.log_cluster_size
= F2FS_I(inode
)->i_log_cluster_size
;
3965 inode_unlock_shared(inode
);
3967 if (copy_to_user((struct f2fs_comp_option __user
*)arg
, &option
,
3974 static int f2fs_ioc_set_compress_option(struct file
*filp
, unsigned long arg
)
3976 struct inode
*inode
= file_inode(filp
);
3977 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3978 struct f2fs_comp_option option
;
3981 if (!f2fs_sb_has_compression(sbi
))
3984 if (!(filp
->f_mode
& FMODE_WRITE
))
3987 if (copy_from_user(&option
, (struct f2fs_comp_option __user
*)arg
,
3991 if (!f2fs_compressed_file(inode
) ||
3992 option
.log_cluster_size
< MIN_COMPRESS_LOG_SIZE
||
3993 option
.log_cluster_size
> MAX_COMPRESS_LOG_SIZE
||
3994 option
.algorithm
>= COMPRESS_MAX
)
3997 file_start_write(filp
);
4000 if (f2fs_is_mmap_file(inode
) || get_dirty_pages(inode
)) {
4005 if (inode
->i_size
!= 0) {
4010 F2FS_I(inode
)->i_compress_algorithm
= option
.algorithm
;
4011 F2FS_I(inode
)->i_log_cluster_size
= option
.log_cluster_size
;
4012 F2FS_I(inode
)->i_cluster_size
= 1 << option
.log_cluster_size
;
4013 f2fs_mark_inode_dirty_sync(inode
, true);
4015 if (!f2fs_is_compress_backend_ready(inode
))
4016 f2fs_warn(sbi
, "compression algorithm is successfully set, "
4017 "but current kernel doesn't support this algorithm.");
4019 inode_unlock(inode
);
4020 file_end_write(filp
);
4025 static int redirty_blocks(struct inode
*inode
, pgoff_t page_idx
, int len
)
4027 DEFINE_READAHEAD(ractl
, NULL
, inode
->i_mapping
, page_idx
);
4028 struct address_space
*mapping
= inode
->i_mapping
;
4030 pgoff_t redirty_idx
= page_idx
;
4031 int i
, page_len
= 0, ret
= 0;
4033 page_cache_ra_unbounded(&ractl
, len
, 0);
4035 for (i
= 0; i
< len
; i
++, page_idx
++) {
4036 page
= read_cache_page(mapping
, page_idx
, NULL
, NULL
);
4038 ret
= PTR_ERR(page
);
4044 for (i
= 0; i
< page_len
; i
++, redirty_idx
++) {
4045 page
= find_lock_page(mapping
, redirty_idx
);
4048 set_page_dirty(page
);
4049 f2fs_put_page(page
, 1);
4050 f2fs_put_page(page
, 0);
4056 static int f2fs_ioc_decompress_file(struct file
*filp
, unsigned long arg
)
4058 struct inode
*inode
= file_inode(filp
);
4059 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
4060 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
4061 pgoff_t page_idx
= 0, last_idx
;
4062 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
4063 int cluster_size
= F2FS_I(inode
)->i_cluster_size
;
4066 if (!f2fs_sb_has_compression(sbi
) ||
4067 F2FS_OPTION(sbi
).compress_mode
!= COMPR_MODE_USER
)
4070 if (!(filp
->f_mode
& FMODE_WRITE
))
4073 if (!f2fs_compressed_file(inode
))
4076 f2fs_balance_fs(F2FS_I_SB(inode
), true);
4078 file_start_write(filp
);
4081 if (!f2fs_is_compress_backend_ready(inode
)) {
4086 if (f2fs_is_mmap_file(inode
)) {
4091 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
4095 if (!atomic_read(&fi
->i_compr_blocks
))
4098 last_idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
4100 count
= last_idx
- page_idx
;
4102 int len
= min(cluster_size
, count
);
4104 ret
= redirty_blocks(inode
, page_idx
, len
);
4108 if (get_dirty_pages(inode
) >= blk_per_seg
)
4109 filemap_fdatawrite(inode
->i_mapping
);
4116 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0,
4120 f2fs_warn(sbi
, "%s: The file might be partially decompressed "
4121 "(errno=%d). Please delete the file.\n",
4124 inode_unlock(inode
);
4125 file_end_write(filp
);
4130 static int f2fs_ioc_compress_file(struct file
*filp
, unsigned long arg
)
4132 struct inode
*inode
= file_inode(filp
);
4133 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
4134 pgoff_t page_idx
= 0, last_idx
;
4135 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
4136 int cluster_size
= F2FS_I(inode
)->i_cluster_size
;
4139 if (!f2fs_sb_has_compression(sbi
) ||
4140 F2FS_OPTION(sbi
).compress_mode
!= COMPR_MODE_USER
)
4143 if (!(filp
->f_mode
& FMODE_WRITE
))
4146 if (!f2fs_compressed_file(inode
))
4149 f2fs_balance_fs(F2FS_I_SB(inode
), true);
4151 file_start_write(filp
);
4154 if (!f2fs_is_compress_backend_ready(inode
)) {
4159 if (f2fs_is_mmap_file(inode
)) {
4164 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
4168 set_inode_flag(inode
, FI_ENABLE_COMPRESS
);
4170 last_idx
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
4172 count
= last_idx
- page_idx
;
4174 int len
= min(cluster_size
, count
);
4176 ret
= redirty_blocks(inode
, page_idx
, len
);
4180 if (get_dirty_pages(inode
) >= blk_per_seg
)
4181 filemap_fdatawrite(inode
->i_mapping
);
4188 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0,
4191 clear_inode_flag(inode
, FI_ENABLE_COMPRESS
);
4194 f2fs_warn(sbi
, "%s: The file might be partially compressed "
4195 "(errno=%d). Please delete the file.\n",
4198 inode_unlock(inode
);
4199 file_end_write(filp
);
4204 static long __f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4207 case FS_IOC_GETFLAGS
:
4208 return f2fs_ioc_getflags(filp
, arg
);
4209 case FS_IOC_SETFLAGS
:
4210 return f2fs_ioc_setflags(filp
, arg
);
4211 case FS_IOC_GETVERSION
:
4212 return f2fs_ioc_getversion(filp
, arg
);
4213 case F2FS_IOC_START_ATOMIC_WRITE
:
4214 return f2fs_ioc_start_atomic_write(filp
);
4215 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
4216 return f2fs_ioc_commit_atomic_write(filp
);
4217 case F2FS_IOC_START_VOLATILE_WRITE
:
4218 return f2fs_ioc_start_volatile_write(filp
);
4219 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
4220 return f2fs_ioc_release_volatile_write(filp
);
4221 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
4222 return f2fs_ioc_abort_volatile_write(filp
);
4223 case F2FS_IOC_SHUTDOWN
:
4224 return f2fs_ioc_shutdown(filp
, arg
);
4226 return f2fs_ioc_fitrim(filp
, arg
);
4227 case FS_IOC_SET_ENCRYPTION_POLICY
:
4228 return f2fs_ioc_set_encryption_policy(filp
, arg
);
4229 case FS_IOC_GET_ENCRYPTION_POLICY
:
4230 return f2fs_ioc_get_encryption_policy(filp
, arg
);
4231 case FS_IOC_GET_ENCRYPTION_PWSALT
:
4232 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
4233 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
4234 return f2fs_ioc_get_encryption_policy_ex(filp
, arg
);
4235 case FS_IOC_ADD_ENCRYPTION_KEY
:
4236 return f2fs_ioc_add_encryption_key(filp
, arg
);
4237 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
4238 return f2fs_ioc_remove_encryption_key(filp
, arg
);
4239 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
4240 return f2fs_ioc_remove_encryption_key_all_users(filp
, arg
);
4241 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
4242 return f2fs_ioc_get_encryption_key_status(filp
, arg
);
4243 case FS_IOC_GET_ENCRYPTION_NONCE
:
4244 return f2fs_ioc_get_encryption_nonce(filp
, arg
);
4245 case F2FS_IOC_GARBAGE_COLLECT
:
4246 return f2fs_ioc_gc(filp
, arg
);
4247 case F2FS_IOC_GARBAGE_COLLECT_RANGE
:
4248 return f2fs_ioc_gc_range(filp
, arg
);
4249 case F2FS_IOC_WRITE_CHECKPOINT
:
4250 return f2fs_ioc_write_checkpoint(filp
, arg
);
4251 case F2FS_IOC_DEFRAGMENT
:
4252 return f2fs_ioc_defragment(filp
, arg
);
4253 case F2FS_IOC_MOVE_RANGE
:
4254 return f2fs_ioc_move_range(filp
, arg
);
4255 case F2FS_IOC_FLUSH_DEVICE
:
4256 return f2fs_ioc_flush_device(filp
, arg
);
4257 case F2FS_IOC_GET_FEATURES
:
4258 return f2fs_ioc_get_features(filp
, arg
);
4259 case FS_IOC_FSGETXATTR
:
4260 return f2fs_ioc_fsgetxattr(filp
, arg
);
4261 case FS_IOC_FSSETXATTR
:
4262 return f2fs_ioc_fssetxattr(filp
, arg
);
4263 case F2FS_IOC_GET_PIN_FILE
:
4264 return f2fs_ioc_get_pin_file(filp
, arg
);
4265 case F2FS_IOC_SET_PIN_FILE
:
4266 return f2fs_ioc_set_pin_file(filp
, arg
);
4267 case F2FS_IOC_PRECACHE_EXTENTS
:
4268 return f2fs_ioc_precache_extents(filp
, arg
);
4269 case F2FS_IOC_RESIZE_FS
:
4270 return f2fs_ioc_resize_fs(filp
, arg
);
4271 case FS_IOC_ENABLE_VERITY
:
4272 return f2fs_ioc_enable_verity(filp
, arg
);
4273 case FS_IOC_MEASURE_VERITY
:
4274 return f2fs_ioc_measure_verity(filp
, arg
);
4275 case FS_IOC_GETFSLABEL
:
4276 return f2fs_ioc_getfslabel(filp
, arg
);
4277 case FS_IOC_SETFSLABEL
:
4278 return f2fs_ioc_setfslabel(filp
, arg
);
4279 case F2FS_IOC_GET_COMPRESS_BLOCKS
:
4280 return f2fs_get_compress_blocks(filp
, arg
);
4281 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS
:
4282 return f2fs_release_compress_blocks(filp
, arg
);
4283 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS
:
4284 return f2fs_reserve_compress_blocks(filp
, arg
);
4285 case F2FS_IOC_SEC_TRIM_FILE
:
4286 return f2fs_sec_trim_file(filp
, arg
);
4287 case F2FS_IOC_GET_COMPRESS_OPTION
:
4288 return f2fs_ioc_get_compress_option(filp
, arg
);
4289 case F2FS_IOC_SET_COMPRESS_OPTION
:
4290 return f2fs_ioc_set_compress_option(filp
, arg
);
4291 case F2FS_IOC_DECOMPRESS_FILE
:
4292 return f2fs_ioc_decompress_file(filp
, arg
);
4293 case F2FS_IOC_COMPRESS_FILE
:
4294 return f2fs_ioc_compress_file(filp
, arg
);
4300 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4302 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp
)))))
4304 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp
))))
4307 return __f2fs_ioctl(filp
, cmd
, arg
);
4310 static ssize_t
f2fs_file_read_iter(struct kiocb
*iocb
, struct iov_iter
*iter
)
4312 struct file
*file
= iocb
->ki_filp
;
4313 struct inode
*inode
= file_inode(file
);
4316 if (!f2fs_is_compress_backend_ready(inode
))
4319 ret
= generic_file_read_iter(iocb
, iter
);
4322 f2fs_update_iostat(F2FS_I_SB(inode
), APP_READ_IO
, ret
);
4327 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
4329 struct file
*file
= iocb
->ki_filp
;
4330 struct inode
*inode
= file_inode(file
);
4333 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
4338 if (!f2fs_is_compress_backend_ready(inode
)) {
4343 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
4344 if (!inode_trylock(inode
)) {
4352 ret
= generic_write_checks(iocb
, from
);
4354 bool preallocated
= false;
4355 size_t target_size
= 0;
4358 if (iov_iter_fault_in_readable(from
, iov_iter_count(from
)))
4359 set_inode_flag(inode
, FI_NO_PREALLOC
);
4361 if ((iocb
->ki_flags
& IOCB_NOWAIT
)) {
4362 if (!f2fs_overwrite_io(inode
, iocb
->ki_pos
,
4363 iov_iter_count(from
)) ||
4364 f2fs_has_inline_data(inode
) ||
4365 f2fs_force_buffered_io(inode
, iocb
, from
)) {
4366 clear_inode_flag(inode
, FI_NO_PREALLOC
);
4367 inode_unlock(inode
);
4374 if (is_inode_flag_set(inode
, FI_NO_PREALLOC
))
4377 if (iocb
->ki_flags
& IOCB_DIRECT
) {
4379 * Convert inline data for Direct I/O before entering
4382 err
= f2fs_convert_inline_inode(inode
);
4386 * If force_buffere_io() is true, we have to allocate
4387 * blocks all the time, since f2fs_direct_IO will fall
4388 * back to buffered IO.
4390 if (!f2fs_force_buffered_io(inode
, iocb
, from
) &&
4391 allow_outplace_dio(inode
, iocb
, from
))
4394 preallocated
= true;
4395 target_size
= iocb
->ki_pos
+ iov_iter_count(from
);
4397 err
= f2fs_preallocate_blocks(iocb
, from
);
4400 clear_inode_flag(inode
, FI_NO_PREALLOC
);
4401 inode_unlock(inode
);
4406 ret
= __generic_file_write_iter(iocb
, from
);
4407 clear_inode_flag(inode
, FI_NO_PREALLOC
);
4409 /* if we couldn't write data, we should deallocate blocks. */
4410 if (preallocated
&& i_size_read(inode
) < target_size
)
4411 f2fs_truncate(inode
);
4414 f2fs_update_iostat(F2FS_I_SB(inode
), APP_WRITE_IO
, ret
);
4416 inode_unlock(inode
);
4418 trace_f2fs_file_write_iter(inode
, iocb
->ki_pos
,
4419 iov_iter_count(from
), ret
);
4421 ret
= generic_write_sync(iocb
, ret
);
4425 #ifdef CONFIG_COMPAT
4426 struct compat_f2fs_gc_range
{
4431 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4432 struct compat_f2fs_gc_range)
4434 static int f2fs_compat_ioc_gc_range(struct file
*file
, unsigned long arg
)
4436 struct compat_f2fs_gc_range __user
*urange
;
4437 struct f2fs_gc_range range
;
4440 urange
= compat_ptr(arg
);
4441 err
= get_user(range
.sync
, &urange
->sync
);
4442 err
|= get_user(range
.start
, &urange
->start
);
4443 err
|= get_user(range
.len
, &urange
->len
);
4447 return __f2fs_ioc_gc_range(file
, &range
);
4450 struct compat_f2fs_move_range
{
4456 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4457 struct compat_f2fs_move_range)
4459 static int f2fs_compat_ioc_move_range(struct file
*file
, unsigned long arg
)
4461 struct compat_f2fs_move_range __user
*urange
;
4462 struct f2fs_move_range range
;
4465 urange
= compat_ptr(arg
);
4466 err
= get_user(range
.dst_fd
, &urange
->dst_fd
);
4467 err
|= get_user(range
.pos_in
, &urange
->pos_in
);
4468 err
|= get_user(range
.pos_out
, &urange
->pos_out
);
4469 err
|= get_user(range
.len
, &urange
->len
);
4473 return __f2fs_ioc_move_range(file
, &range
);
4476 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
4478 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file
)))))
4480 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file
))))
4484 case FS_IOC32_GETFLAGS
:
4485 cmd
= FS_IOC_GETFLAGS
;
4487 case FS_IOC32_SETFLAGS
:
4488 cmd
= FS_IOC_SETFLAGS
;
4490 case FS_IOC32_GETVERSION
:
4491 cmd
= FS_IOC_GETVERSION
;
4493 case F2FS_IOC32_GARBAGE_COLLECT_RANGE
:
4494 return f2fs_compat_ioc_gc_range(file
, arg
);
4495 case F2FS_IOC32_MOVE_RANGE
:
4496 return f2fs_compat_ioc_move_range(file
, arg
);
4497 case F2FS_IOC_START_ATOMIC_WRITE
:
4498 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
4499 case F2FS_IOC_START_VOLATILE_WRITE
:
4500 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
4501 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
4502 case F2FS_IOC_SHUTDOWN
:
4504 case FS_IOC_SET_ENCRYPTION_POLICY
:
4505 case FS_IOC_GET_ENCRYPTION_PWSALT
:
4506 case FS_IOC_GET_ENCRYPTION_POLICY
:
4507 case FS_IOC_GET_ENCRYPTION_POLICY_EX
:
4508 case FS_IOC_ADD_ENCRYPTION_KEY
:
4509 case FS_IOC_REMOVE_ENCRYPTION_KEY
:
4510 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS
:
4511 case FS_IOC_GET_ENCRYPTION_KEY_STATUS
:
4512 case FS_IOC_GET_ENCRYPTION_NONCE
:
4513 case F2FS_IOC_GARBAGE_COLLECT
:
4514 case F2FS_IOC_WRITE_CHECKPOINT
:
4515 case F2FS_IOC_DEFRAGMENT
:
4516 case F2FS_IOC_FLUSH_DEVICE
:
4517 case F2FS_IOC_GET_FEATURES
:
4518 case FS_IOC_FSGETXATTR
:
4519 case FS_IOC_FSSETXATTR
:
4520 case F2FS_IOC_GET_PIN_FILE
:
4521 case F2FS_IOC_SET_PIN_FILE
:
4522 case F2FS_IOC_PRECACHE_EXTENTS
:
4523 case F2FS_IOC_RESIZE_FS
:
4524 case FS_IOC_ENABLE_VERITY
:
4525 case FS_IOC_MEASURE_VERITY
:
4526 case FS_IOC_GETFSLABEL
:
4527 case FS_IOC_SETFSLABEL
:
4528 case F2FS_IOC_GET_COMPRESS_BLOCKS
:
4529 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS
:
4530 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS
:
4531 case F2FS_IOC_SEC_TRIM_FILE
:
4532 case F2FS_IOC_GET_COMPRESS_OPTION
:
4533 case F2FS_IOC_SET_COMPRESS_OPTION
:
4534 case F2FS_IOC_DECOMPRESS_FILE
:
4535 case F2FS_IOC_COMPRESS_FILE
:
4538 return -ENOIOCTLCMD
;
4540 return __f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
4544 const struct file_operations f2fs_file_operations
= {
4545 .llseek
= f2fs_llseek
,
4546 .read_iter
= f2fs_file_read_iter
,
4547 .write_iter
= f2fs_file_write_iter
,
4548 .open
= f2fs_file_open
,
4549 .release
= f2fs_release_file
,
4550 .mmap
= f2fs_file_mmap
,
4551 .flush
= f2fs_file_flush
,
4552 .fsync
= f2fs_sync_file
,
4553 .fallocate
= f2fs_fallocate
,
4554 .unlocked_ioctl
= f2fs_ioctl
,
4555 #ifdef CONFIG_COMPAT
4556 .compat_ioctl
= f2fs_compat_ioctl
,
4558 .splice_read
= generic_file_splice_read
,
4559 .splice_write
= iter_file_splice_write
,